/* * Copyright (c) 2018-2019 Cisco and/or its affiliates. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at: * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include #include static tls_main_t tls_main; static tls_engine_vft_t *tls_vfts; #define TLS_INVALID_HANDLE ~0 #define TLS_IDX_MASK 0x00FFFFFF #define TLS_ENGINE_TYPE_SHIFT 28 void tls_disconnect (u32 ctx_handle, u32 thread_index); void tls_disconnect_transport (tls_ctx_t * ctx) { vnet_disconnect_args_t a = { .handle = ctx->tls_session_handle, .app_index = tls_main.app_index, }; if (vnet_disconnect_session (&a)) clib_warning ("disconnect returned"); } crypto_engine_type_t tls_get_available_engine (void) { int i; for (i = 0; i < vec_len (tls_vfts); i++) { if (tls_vfts[i].ctx_alloc) return i; } return CRYPTO_ENGINE_NONE; } int tls_add_vpp_q_rx_evt (session_t * s) { if (svm_fifo_set_event (s->rx_fifo)) session_send_io_evt_to_thread (s->rx_fifo, SESSION_IO_EVT_RX); return 0; } int tls_add_vpp_q_builtin_rx_evt (session_t * s) { if (svm_fifo_set_event (s->rx_fifo)) session_send_io_evt_to_thread (s->rx_fifo, SESSION_IO_EVT_BUILTIN_RX); return 0; } int tls_add_vpp_q_tx_evt (session_t * s) { if (svm_fifo_set_event (s->tx_fifo)) session_send_io_evt_to_thread (s->tx_fifo, SESSION_IO_EVT_TX); return 0; } static inline int tls_add_app_q_evt (app_worker_t * app, session_t * app_session) { return app_worker_lock_and_send_event (app, app_session, SESSION_IO_EVT_RX); } u32 tls_listener_ctx_alloc (void) { tls_main_t *tm = &tls_main; tls_ctx_t *ctx; pool_get (tm->listener_ctx_pool, ctx); clib_memset (ctx, 0, sizeof (*ctx)); return ctx - tm->listener_ctx_pool; } void tls_listener_ctx_free (tls_ctx_t * ctx) { if (CLIB_DEBUG) memset (ctx, 0xfb, sizeof (*ctx)); pool_put (tls_main.listener_ctx_pool, ctx); } tls_ctx_t * tls_listener_ctx_get (u32 ctx_index) { return pool_elt_at_index (tls_main.listener_ctx_pool, ctx_index); } u32 tls_listener_ctx_index (tls_ctx_t * ctx) { return (ctx - tls_main.listener_ctx_pool); } u32 tls_ctx_half_open_alloc (void) { tls_main_t *tm = &tls_main; u8 will_expand = pool_get_will_expand (tm->half_open_ctx_pool); tls_ctx_t *ctx; u32 ctx_index; if (PREDICT_FALSE (will_expand && vlib_num_workers ())) { clib_rwlock_writer_lock (&tm->half_open_rwlock); pool_get_zero (tm->half_open_ctx_pool, ctx); ctx->c_c_index = ctx - tm->half_open_ctx_pool; ctx_index = ctx->c_c_index; clib_rwlock_writer_unlock (&tm->half_open_rwlock); } else { /* reader lock assumption: only main thread will call pool_get */ clib_rwlock_reader_lock (&tm->half_open_rwlock); pool_get_zero (tm->half_open_ctx_pool, ctx); ctx->c_c_index = ctx - tm->half_open_ctx_pool; ctx_index = ctx->c_c_index; clib_rwlock_reader_unlock (&tm->half_open_rwlock); } return ctx_index; } void tls_ctx_half_open_free (u32 ho_index) { tls_main_t *tm = &tls_main; clib_rwlock_writer_lock (&tm->half_open_rwlock); pool_put_index (tls_main.half_open_ctx_pool, ho_index); clib_rwlock_writer_unlock (&tm->half_open_rwlock); } tls_ctx_t * tls_ctx_half_open_get (u32 ctx_index) { tls_main_t *tm = &tls_main; clib_rwlock_reader_lock (&tm->half_open_rwlock); return pool_elt_at_index (tm->half_open_ctx_pool, ctx_index); } void tls_ctx_half_open_reader_unlock () { clib_rwlock_reader_unlock (&tls_main.half_open_rwlock); } u32 tls_ctx_half_open_index (tls_ctx_t * ctx) { return (ctx - tls_main.half_open_ctx_pool); } void tls_notify_app_enqueue (tls_ctx_t * ctx, session_t * app_session) { app_worker_t *app_wrk; app_wrk = app_worker_get_if_valid (app_session->app_wrk_index); if (PREDICT_TRUE (app_wrk != 0)) tls_add_app_q_evt (app_wrk, app_session); } int tls_notify_app_accept (tls_ctx_t * ctx) { session_t *app_listener, *app_session; app_worker_t *app_wrk; tls_ctx_t *lctx; int rv; lctx = tls_listener_ctx_get (ctx->listener_ctx_index); app_listener = listen_session_get_from_handle (lctx->app_session_handle); app_session = session_get (ctx->c_s_index, ctx->c_thread_index); app_session->app_wrk_index = ctx->parent_app_wrk_index; app_session->connection_index = ctx->tls_ctx_handle; app_session->session_type = app_listener->session_type; app_session->listener_handle = listen_session_get_handle (app_listener); app_session->session_state = SESSION_STATE_ACCEPTING; if ((rv = app_worker_init_accepted (app_session))) { TLS_DBG (1, "failed to allocate fifos"); session_free (app_session); return rv; } ctx->app_session_handle = session_handle (app_session); ctx->parent_app_wrk_index = app_session->app_wrk_index; app_wrk = app_worker_get (app_session->app_wrk_index); return app_worker_accept_notify (app_wrk, app_session); } int tls_notify_app_connected (tls_ctx_t * ctx, session_error_t err) { u32 parent_app_api_ctx; session_t *app_session; app_worker_t *app_wrk; app_wrk = app_worker_get_if_valid (ctx->parent_app_wrk_index); if (!app_wrk) { if (ctx->tls_type == TRANSPORT_PROTO_TLS) session_free (session_get (ctx->c_s_index, ctx->c_thread_index)); ctx->no_app_session = 1; return -1; } if (err) { /* Free app session pre-allocated when transport was established */ if (ctx->tls_type == TRANSPORT_PROTO_TLS) session_free (session_get (ctx->c_s_index, ctx->c_thread_index)); ctx->no_app_session = 1; goto send_reply; } /* For DTLS the app session is not preallocated because the underlying udp * session might migrate to a different worker during the handshake */ if (ctx->tls_type == TRANSPORT_PROTO_DTLS) { session_type_t st; /* Cleanup half-open session as we don't get notification from udp */ session_half_open_delete_notify (&ctx->connection); app_session = session_alloc (ctx->c_thread_index); app_session->session_state = SESSION_STATE_CREATED; ctx->c_s_index = app_session->session_index; st = session_type_from_proto_and_ip (TRANSPORT_PROTO_DTLS, ctx->tcp_is_ip4); app_session->session_type = st; app_session->connection_index = ctx->tls_ctx_handle; } else { app_session = session_get (ctx->c_s_index, ctx->c_thread_index); } app_session->app_wrk_index = ctx->parent_app_wrk_index; if ((err = app_worker_init_connected (app_wrk, app_session))) goto failed; app_session->session_state = SESSION_STATE_READY; parent_app_api_ctx = ctx->parent_app_api_context; ctx->app_session_handle = session_handle (app_session); if (app_worker_connect_notify (app_wrk, app_session, SESSION_E_NONE, parent_app_api_ctx)) { TLS_DBG (1, "failed to notify app"); session_free (session_get (ctx->c_s_index, ctx->c_thread_index)); ctx->no_app_session = 1; return -1; } return 0; failed: ctx->no_app_session = 1; tls_disconnect (ctx->tls_ctx_handle, vlib_get_thread_index ()); send_reply: return app_worker_connect_notify (app_wrk, 0, err, ctx->parent_app_api_context); } static inline void tls_ctx_parse_handle (u32 ctx_handle, u32 * ctx_index, u32 * engine_type) { *ctx_index = ctx_handle & TLS_IDX_MASK; *engine_type = ctx_handle >> TLS_ENGINE_TYPE_SHIFT; } static inline crypto_engine_type_t tls_get_engine_type (crypto_engine_type_t requested, crypto_engine_type_t preferred) { if (requested != CRYPTO_ENGINE_NONE) { if (tls_vfts[requested].ctx_alloc) return requested; return CRYPTO_ENGINE_NONE; } if (!tls_vfts[preferred].ctx_alloc) return tls_get_available_engine (); return preferred; } static inline u32 tls_ctx_alloc (crypto_engine_type_t engine_type) { u32 ctx_index; ctx_index = tls_vfts[engine_type].ctx_alloc (); return (((u32) engine_type << TLS_ENGINE_TYPE_SHIFT) | ctx_index); } static inline u32 tls_ctx_alloc_w_thread (crypto_engine_type_t engine_type, u32 thread_index) { u32 ctx_index; ctx_index = tls_vfts[engine_type].ctx_alloc_w_thread (thread_index); return (((u32) engine_type << TLS_ENGINE_TYPE_SHIFT) | ctx_index); } static inline u32 tls_ctx_attach (crypto_engine_type_t engine_type, u32 thread_index, void *ctx) { u32 ctx_index; ctx_index = tls_vfts[engine_type].ctx_attach (thread_index, ctx); return (((u32) engine_type << TLS_ENGINE_TYPE_SHIFT) | ctx_index); } static inline void * tls_ctx_detach (tls_ctx_t *ctx) { return tls_vfts[ctx->tls_ctx_engine].ctx_detach (ctx); } static inline tls_ctx_t * tls_ctx_get (u32 ctx_handle) { u32 ctx_index, engine_type; tls_ctx_parse_handle (ctx_handle, &ctx_index, &engine_type); return tls_vfts[engine_type].ctx_get (ctx_index); } static inline tls_ctx_t * tls_ctx_get_w_thread (u32 ctx_handle, u8 thread_index) { u32 ctx_index, engine_type; tls_ctx_parse_handle (ctx_handle, &ctx_index, &engine_type); return tls_vfts[engine_type].ctx_get_w_thread (ctx_index, thread_index); } static inline int tls_ctx_init_server (tls_ctx_t * ctx) { return tls_vfts[ctx->tls_ctx_engine].ctx_init_server (ctx); } static inline int tls_ctx_init_client (tls_ctx_t * ctx) { return tls_vfts[ctx->tls_ctx_engine].ctx_init_client (ctx); } static inline int tls_ctx_write (tls_ctx_t * ctx, session_t * app_session, transport_send_params_t * sp) { u32 n_wrote; sp->max_burst_size = sp->max_burst_size * TRANSPORT_PACER_MIN_MSS; n_wrote = tls_vfts[ctx->tls_ctx_engine].ctx_write (ctx, app_session, sp); sp->bytes_dequeued = n_wrote; return n_wrote > 0 ? clib_max (n_wrote / TRANSPORT_PACER_MIN_MSS, 1) : 0; } static inline int tls_ctx_read (tls_ctx_t * ctx, session_t * tls_session) { return tls_vfts[ctx->tls_ctx_engine].ctx_read (ctx, tls_session); } static inline int tls_ctx_transport_close (tls_ctx_t * ctx) { return tls_vfts[ctx->tls_ctx_engine].ctx_transport_close (ctx); } static inline int tls_ctx_app_close (tls_ctx_t * ctx) { return tls_vfts[ctx->tls_ctx_engine].ctx_app_close (ctx); } void tls_ctx_free (tls_ctx_t * ctx) { tls_vfts[ctx->tls_ctx_engine].ctx_free (ctx); } u8 tls_ctx_handshake_is_over (tls_ctx_t * ctx) { return tls_vfts[ctx->tls_ctx_engine].ctx_handshake_is_over (ctx); } int tls_reinit_ca_chain (crypto_engine_type_t tls_engine_id) { return tls_vfts[tls_engine_id].ctx_reinit_cachain (); } void tls_notify_app_io_error (tls_ctx_t *ctx) { ASSERT (tls_ctx_handshake_is_over (ctx)); session_transport_reset_notify (&ctx->connection); session_transport_closed_notify (&ctx->connection); tls_disconnect_transport (ctx); } void tls_session_reset_callback (session_t * s) { tls_ctx_t *ctx; transport_connection_t *tc; session_t *app_session; ctx = tls_ctx_get (s->opaque); ctx->is_passive_close = 1; tc = &ctx->connection; if (tls_ctx_handshake_is_over (ctx)) { session_transport_reset_notify (tc); session_transport_closed_notify (tc); tls_disconnect_transport (ctx); } else if ((app_session = session_get_if_valid (ctx->c_s_index, ctx->c_thread_index))) { session_free (app_session); ctx->c_s_index = SESSION_INVALID_INDEX; tls_disconnect_transport (ctx); } } static void tls_session_cleanup_ho (session_t *s) { tls_ctx_t *ctx; u32 ho_index; /* session opaque stores the opaque passed on connect */ ho_index = s->opaque; ctx = tls_ctx_half_open_get (ho_index); session_half_open_delete_notify (&ctx->connection); tls_ctx_half_open_reader_unlock (); tls_ctx_half_open_free (ho_index); } int tls_add_segment_callback (u32 client_index, u64 segment_handle) { /* No-op for builtin */ return 0; } int tls_del_segment_callback (u32 client_index, u64 segment_handle) { return 0; } void tls_session_disconnect_callback (session_t * tls_session) { tls_ctx_t *ctx; TLS_DBG (1, "TCP disconnecting handle %x session %u", tls_session->opaque, tls_session->session_index); ASSERT (tls_session->thread_index == vlib_get_thread_index () || vlib_thread_is_main_w_barrier ()); ctx = tls_ctx_get_w_thread (tls_session->opaque, tls_session->thread_index); ctx->is_passive_close = 1; tls_ctx_transport_close (ctx); } int tls_session_accept_callback (session_t * tls_session) { session_t *tls_listener, *app_session; tls_ctx_t *lctx, *ctx; u32 ctx_handle; tls_listener = listen_session_get_from_handle (tls_session->listener_handle); lctx = tls_listener_ctx_get (tls_listener->opaque); ctx_handle = tls_ctx_alloc (lctx->tls_ctx_engine); ctx = tls_ctx_get (ctx_handle); memcpy (ctx, lctx, sizeof (*lctx)); ctx->c_thread_index = vlib_get_thread_index (); ctx->tls_ctx_handle = ctx_handle; tls_session->session_state = SESSION_STATE_READY; tls_session->opaque = ctx_handle; ctx->tls_session_handle = session_handle (tls_session); ctx->listener_ctx_index = tls_listener->opaque; ctx->c_flags |= TRANSPORT_CONNECTION_F_NO_LOOKUP; ctx->ckpair_index = lctx->ckpair_index; /* Preallocate app session. Avoids allocating a session post handshake * on tls_session rx and potentially invalidating the session pool */ app_session = session_alloc (ctx->c_thread_index); app_session->session_state = SESSION_STATE_CREATED; ctx->c_s_index = app_session->session_index; TLS_DBG (1, "Accept on listener %u new connection [%u]%x", tls_listener->opaque, vlib_get_thread_index (), ctx_handle); return tls_ctx_init_server (ctx); } int tls_app_rx_callback (session_t * tls_session) { tls_ctx_t *ctx; /* DTLS session migrating, wait for next notification */ if (PREDICT_FALSE (tls_session->flags & SESSION_F_IS_MIGRATING)) return 0; ctx = tls_ctx_get (tls_session->opaque); if (PREDICT_FALSE (ctx->no_app_session)) { TLS_DBG (1, "Local App closed"); return 0; } tls_ctx_read (ctx, tls_session); return 0; } int tls_app_tx_callback (session_t * tls_session) { tls_ctx_t *ctx; ctx = tls_ctx_get (tls_session->opaque); transport_connection_reschedule (&ctx->connection); return 0; } int tls_session_connected_cb (u32 tls_app_index, u32 ho_ctx_index, session_t *tls_session, session_error_t err) { session_t *app_session; tls_ctx_t *ho_ctx, *ctx; session_type_t st; u32 ctx_handle; ho_ctx = tls_ctx_half_open_get (ho_ctx_index); ctx_handle = tls_ctx_alloc (ho_ctx->tls_ctx_engine); ctx = tls_ctx_get (ctx_handle); clib_memcpy_fast (ctx, ho_ctx, sizeof (*ctx)); /* Half-open freed on tcp half-open cleanup notification */ tls_ctx_half_open_reader_unlock (); ctx->c_thread_index = vlib_get_thread_index (); ctx->tls_ctx_handle = ctx_handle; ctx->c_flags |= TRANSPORT_CONNECTION_F_NO_LOOKUP; TLS_DBG (1, "TCP connect for %u returned %u. New connection [%u]%x", ho_ctx_index, err, vlib_get_thread_index (), (ctx) ? ctx_handle : ~0); ctx->tls_session_handle = session_handle (tls_session); tls_session->opaque = ctx_handle; tls_session->session_state = SESSION_STATE_READY; /* Preallocate app session. Avoids allocating a session post handshake * on tls_session rx and potentially invalidating the session pool */ app_session = session_alloc (ctx->c_thread_index); app_session->session_state = SESSION_STATE_CREATED; ctx->c_s_index = app_session->session_index; st = session_type_from_proto_and_ip (TRANSPORT_PROTO_TLS, ctx->tcp_is_ip4); app_session->session_type = st; app_session->connection_index = ctx->tls_ctx_handle; return tls_ctx_init_client (ctx); } int dtls_session_connected_cb (u32 app_wrk_index, u32 ctx_handle, session_t *us, session_error_t err) { tls_ctx_t *ctx; ctx = tls_ctx_get_w_thread (ctx_handle, transport_cl_thread ()); ctx->tls_session_handle = session_handle (us); ctx->c_flags |= TRANSPORT_CONNECTION_F_NO_LOOKUP; us->opaque = ctx_handle; /* We don't preallocate the app session because the udp session might * actually migrate to a different worker at the end of the handshake */ return tls_ctx_init_client (ctx); } int tls_session_connected_callback (u32 tls_app_index, u32 ho_ctx_index, session_t *tls_session, session_error_t err) { if (err) { app_worker_t *app_wrk; tls_ctx_t *ho_ctx; u32 api_context; ho_ctx = tls_ctx_half_open_get (ho_ctx_index); app_wrk = app_worker_get_if_valid (ho_ctx->parent_app_wrk_index); if (app_wrk) { api_context = ho_ctx->parent_app_api_context; app_worker_connect_notify (app_wrk, 0, err, api_context); } tls_ctx_half_open_reader_unlock (); return 0; } if (session_get_transport_proto (tls_session) == TRANSPORT_PROTO_TCP) return tls_session_connected_cb (tls_app_index, ho_ctx_index, tls_session, err); else return dtls_session_connected_cb (tls_app_index, ho_ctx_index, tls_session, err); } static void tls_app_session_cleanup (session_t * s, session_cleanup_ntf_t ntf) { tls_ctx_t *ctx; if (ntf == SESSION_CLEANUP_TRANSPORT) { /* Allow cleanup of tcp session */ if (s->session_state == SESSION_STATE_TRANSPORT_DELETED) session_close (s); return; } ctx = tls_ctx_get (s->opaque); if (!ctx->no_app_session) session_transport_delete_notify (&ctx->connection); tls_ctx_free (ctx); } static void dtls_migrate_ctx (void *arg) { tls_ctx_t *ctx = (tls_ctx_t *) arg; u32 ctx_handle, thread_index; session_t *us; thread_index = session_thread_from_handle (ctx->tls_session_handle); ASSERT (thread_index == vlib_get_thread_index ()); ctx_handle = tls_ctx_attach (ctx->tls_ctx_engine, thread_index, ctx); ctx = tls_ctx_get_w_thread (ctx_handle, thread_index); ctx->tls_ctx_handle = ctx_handle; us = session_get_from_handle (ctx->tls_session_handle); us->opaque = ctx_handle; us->flags &= ~SESSION_F_IS_MIGRATING; /* Probably the app detached while the session was migrating. Cleanup */ if (session_half_open_migrated_notify (&ctx->connection)) { ctx->no_app_session = 1; tls_disconnect (ctx->tls_ctx_handle, vlib_get_thread_index ()); return; } if (svm_fifo_max_dequeue (us->tx_fifo)) session_send_io_evt_to_thread (us->tx_fifo, SESSION_IO_EVT_TX); } static void dtls_session_migrate_callback (session_t *us, session_handle_t new_sh) { u32 new_thread = session_thread_from_handle (new_sh); tls_ctx_t *ctx, *cloned_ctx; /* Migrate dtls context to new thread */ ctx = tls_ctx_get_w_thread (us->opaque, us->thread_index); ctx->tls_session_handle = new_sh; cloned_ctx = tls_ctx_detach (ctx); ctx->is_migrated = 1; session_half_open_migrate_notify (&ctx->connection); session_send_rpc_evt_to_thread (new_thread, dtls_migrate_ctx, (void *) cloned_ctx); tls_ctx_free (ctx); } static session_cb_vft_t tls_app_cb_vft = { .session_accept_callback = tls_session_accept_callback, .session_disconnect_callback = tls_session_disconnect_callback, .session_connected_callback = tls_session_connected_callback, .session_reset_callback = tls_session_reset_callback, .half_open_cleanup_callback = tls_session_cleanup_ho, .add_segment_callback = tls_add_segment_callback, .del_segment_callback = tls_del_segment_callback, .builtin_app_rx_callback = tls_app_rx_callback, .builtin_app_tx_callback = tls_app_tx_callback, .session_migrate_callback = dtls_session_migrate_callback, .session_cleanup_callback = tls_app_session_cleanup, }; int tls_connect (transport_endpoint_cfg_t * tep) { vnet_connect_args_t _cargs = { {}, }, *cargs = &_cargs; transport_endpt_crypto_cfg_t *ccfg; crypto_engine_type_t engine_type; session_endpoint_cfg_t *sep; tls_main_t *tm = &tls_main; app_worker_t *app_wrk; application_t *app; tls_ctx_t *ctx; u32 ctx_index; int rv; sep = (session_endpoint_cfg_t *) tep; if (!sep->ext_cfg) return SESSION_E_NOEXTCFG; app_wrk = app_worker_get (sep->app_wrk_index); app = application_get (app_wrk->app_index); ccfg = &sep->ext_cfg->crypto; engine_type = tls_get_engine_type (ccfg->crypto_engine, app->tls_engine); if (engine_type == CRYPTO_ENGINE_NONE) { clib_warning ("No tls engine_type available"); return SESSION_E_NOCRYPTOENG; } ctx_index = tls_ctx_half_open_alloc (); ctx = tls_ctx_half_open_get (ctx_index); ctx->parent_app_wrk_index = sep->app_wrk_index; ctx->parent_app_api_context = sep->opaque; c
/*
 * Copyright (c) 2016 Cisco and/or its affiliates.
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at:
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#include <vnet/mpls/mpls.h>
#include <vnet/dpo/mpls_label_dpo.h>
#include <vnet/dpo/load_balance.h>
#include <vnet/dpo/drop_dpo.h>

#include <vnet/fib/fib_path_ext.h>
#include <vnet/fib/fib_entry_src.h>
#include <vnet/fib/fib_path.h>
#include <vnet/fib/fib_path_list.h>
#include <vnet/fib/fib_internal.h>

const char *fib_path_ext_adj_flags_names[] = FIB_PATH_EXT_ADJ_ATTR_NAMES;
const char *fib_path_ext_mpls_flags_names[] = FIB_PATH_EXT_MPLS_ATTR_NAMES;

u8 *
format_fib_path_ext (u8 * s, va_list * args)
{
    fib_path_ext_t *path_ext;
    u32 ii;

    path_ext = va_arg (*args, fib_path_ext_t *);

    s = format(s, "path:%d ", path_ext->fpe_path_index);

    switch (path_ext->fpe_type)
    {
    case FIB_PATH_EXT_MPLS: {
        fib_path_ext_mpls_attr_t attr;

        if (path_ext->fpe_mpls_flags)
        {
            s = format(s, "mpls-flags:[");

            FOR_EACH_PATH_EXT_MPLS_ATTR(attr)
            {
                if ((1<<attr) & path_ext->fpe_mpls_flags) {
                    s = format(s, "%s", fib_path_ext_mpls_flags_names[attr]);
                }
            }
            s = format(s, "]");
        }
        s = format(s, " labels:[");
        for (ii = 0; ii < vec_len(path_ext->fpe_path.frp_label_stack); ii++)
        {
            s = format(s, "[%U]",
                       format_fib_mpls_label,
                       &path_ext->fpe_path.frp_label_stack[ii]);
        }
        s = format(s, "]");
        break;
    }
    case FIB_PATH_EXT_ADJ: {
        fib_path_ext_adj_attr_t attr;

        if (path_ext->fpe_adj_flags)
        {
            s = format(s, "adj-flags:[");
            FOR_EACH_PATH_EXT_ADJ_ATTR(attr)
            {
                if ((1<<attr) & path_ext->fpe_adj_flags)
                {
                    s = format(s, "%s", fib_path_ext_adj_flags_names[attr]);
                }
            }
            s = for