/* * Copyright (c) 2016 Cisco and/or its affiliates. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at: * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include #include #include #include #include #include #include #include #include /** * Memory pool of all the allocated tables */ bier_table_t *bier_table_pool; /** * DB store of all BIER tables index by SD/set/hdr-len */ static uword *bier_tables_by_key; /** * The magic number of BIER ECMP tables to create. * The load-balance distribution algorithm will use a power of 2 * for the number of buckets, which constrains the choice. */ #define BIER_N_ECMP_TABLES 16 static inline index_t bier_table_get_index (const bier_table_t *bt) { return (bt - bier_table_pool); } int bier_table_is_main (const bier_table_t *bt) { return (BIER_ECMP_TABLE_ID_MAIN == bt->bt_id.bti_ecmp); } /* * Construct the key to use to find a BIER table * in the global hash map */ static u32 bier_table_mk_key (const bier_table_id_t *id) { /* * the set and sub-domain Ids are 8 bit values. * we have space for ECMP table ID and talbe type (SPF/TE) * for later */ u32 key = ((id->bti_sub_domain << 24) | (id->bti_set << 16) | (id->bti_ecmp << 8) | (id->bti_hdr_len << 4) | (id->bti_type)); return (key); } static void bier_table_init (bier_table_t *bt, const bier_table_id_t *id, mpls_label_t ll) { u32 num_entries; bt->bt_lfei = FIB_NODE_INDEX_INVALID; bt->bt_id = *id; bt->bt_ll = ll; num_entries = bier_hdr_len_id_to_num_bits(bt->bt_id.bti_hdr_len); /* * create the lookup table of entries. */ if (bier_table_is_main(bt)) { vec_validate_init_empty_aligned(bt->bt_entries, num_entries, INDEX_INVALID, CLIB_CACHE_LINE_BYTES); } else { vec_validate_init_empty_aligned(bt->bt_fmasks, num_entries, INDEX_INVALID, CLIB_CACHE_LINE_BYTES); } } static void bier_table_rm_bift (bier_table_t *bt) { ASSERT(MPLS_LABEL_INVALID == bt->bt_ll); bier_bift_table_entry_remove(bier_bift_id_encode(bt->bt_id.bti_set, bt->bt_id.bti_sub_domain, bt->bt_id.bti_hdr_len)); } static void bier_table_mk_bift (bier_table_t *bt) { dpo_id_t dpo = DPO_INVALID; ASSERT(MPLS_LABEL_INVALID == bt->bt_ll); bier_table_contribute_forwarding(bier_table_get_index(bt), &dpo); bier_bift_table_entry_add(bier_bift_id_encode(bt->bt_id.bti_set, bt->bt_id.bti_sub_domain, bt->bt_id.bti_hdr_len), &dpo); dpo_reset(&dpo); } static void bier_table_rm_lfib (bier_table_t *bt) { if (FIB_NODE_INDEX_INVALID != bt->bt_lfei) { fib_table_entry_delete_index(bt->bt_lfei, FIB_SOURCE_BIER); fib_table_unlock(MPLS_FIB_DEFAULT_TABLE_ID, FIB_PROTOCOL_MPLS, FIB_SOURCE_BIER); } bt->bt_lfei = FIB_NODE_INDEX_INVALID; } static void bier_table_destroy (bier_table_t *bt) { if (bier_table_is_main(bt)) { index_t *bei; if (MPLS_LABEL_INVALID != bt->bt_ll) { bier_table_rm_lfib(bt); } else { bier_table_rm_bift(bt); } fib_path_list_unlock(bt->bt_pl); bt->bt_pl = FIB_NODE_INDEX_INVALID; /* * unresolve/remove all entries from the table */ vec_foreach (bei, bt->bt_entries) { if (INDEX_INVALID != *bei) { bier_entry_delete(*bei); } } vec_free (bt->bt_entries); } else { index_t *bfmi; /* * unlock any fmasks */ vec_foreach (bfmi, bt->bt_fmasks) { bier_fmask_unlock(*bfmi); } vec_free(bt->bt_fmasks); } hash_unset(bier_tables_by_key, bier_table_mk_key(&bt->bt_id)); pool_put(bier_table_pool, bt); } static void bier_table_lock_i (bier_table_t *bt) { bt->bt_locks++; } static void bier_table_unlock_i (bier_table_t *bt) { bt->bt_locks--; if (0 == bt->bt_locks) { bier_table_destroy(bt); } } void bier_table_unlock (const bier_table_id_t *bti) { uword *p; u32 key; key = bier_table_mk_key(bti); p = hash_get (bier_tables_by_key, key); if (NULL != p) { bier_table_unlock_i(bier_table_get(p[0])); } } static void bier_table_mk_lfib (bier_table_t *bt) { /* * Add a new MPLS lfib entry */ if (MPLS_LABEL_INVALID != bt->bt_ll) { fib_prefix_t pfx = { .fp_proto = FIB_PROTOCOL_MPLS, .fp_len = 21, .fp_label = bt->bt_ll, .fp_eos = MPLS_EOS, .fp_payload_proto = DPO_PROTO_BIER, }; u32 mpls_fib_index; dpo_id_t dpo = DPO_INVALID; fib_table_find_or_create_and_lock(FIB_PROTOCOL_MPLS, MPLS_FIB_DEFAULT_TABLE_ID, FIB_SOURCE_BIER); /* * stack the entry on the forwarding chain produced by the * path-list via the ECMP tables. */ fib_path_list_contribute_forwarding(bt->bt_pl, FIB_FORW_CHAIN_TYPE_BIER, FIB_PATH_LIST_FWD_FLAG_COLLAPSE, &dpo); mpls_fib_index = fib_table_find(FIB_PROTOCOL_MPLS, MPLS_FIB_DEFAULT_TABLE_ID); bt->bt_lfei = fib_table_entry_special_dpo_add(mpls_fib_index, &pfx, FIB_SOURCE_BIER, FIB_ENTRY_FLAG_EXCLUSIVE, &dpo); dpo_reset(&dpo); } } static bier_table_t * bier_table_find (const bier_table_id_t *bti) { uword *p; u32 key; key = bier_table_mk_key(bti); p = hash_get(bier_tables_by_key, key); if (NULL != p) { return (bier_table_get(p[0])); } return (NULL); } static bier_table_t * bier_table_mk_ecmp (index_t bti) { fib_route_path_t *rpaths; fib_node_index_t pli; bier_table_t *bt; int ii; rpaths = NULL; bt = bier_table_get(bti); vec_validate(rpaths, BIER_N_ECMP_TABLES-1
;;; Copyright (c) 2016 Cisco and/or its affiliates.
;;; Licensed under the Apache License, Version 2.0 (the "License");
;;; you may not use this file except in compliance with the License.
;;; You may obtain a copy of the License at:
;;;
;;;     http://www.apache.org/licenses/LICENSE-2.0
;;;
;;; Unless required by applicable law or agreed to in writing, software
;;; distributed under the License is distributed on an "AS IS" BASIS,
;;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
;;; See the License for the specific language governing permissions and
;;; limitations under the License.

;;; pipe-skel.el - pipelined graph node skeleton

(require 'skeleton)

(define-skeleton skel-periodic
"Insert a skeleton periodic process node"
nil
'(setq node-name (skeleton-read "Name: "))
'(setq uc-node-name (upcase node-name))
'(setq poll-period (skeleton-read "Poll period (f64 seconds, e.g. 10.0): "))

"
#define " uc-node-name "_POLL_PERIOD " poll-period "

static uword
" node-name "_process (vlib_main_t * vm,
                       vlib_node_runtime_t * rt,
                       vlib_frame_t * f)
{
    f64 poll_time_remaining;
    uword event_type, * event_data = 0;

    poll_time_remaining = " uc-node-name "_POLL_PERIOD;
    while (1) {
        int i;

        /* 
         * Sleep until next periodic call due, or until we receive event(s) 
         */
        poll_time_remaining = 
            vlib_process_wait_for_event_or_clock (vm, poll_time_remaining);
        
        event_type = vlib_process_get_events (vm, &event_data);
        switch (event_type) {
        case ~0:                /* no events => timeout */
            break;

        /* 
         * $$$$ FIXME: add cases / handlers for each event type 
         */
        case EVENT1:
            for (i = 0; i < vec_len (event_data); i++) 
                handle_event1 (mm, event_data[i]);
            break;

        case EVENT2:
            for (i = 0; i < vec_len (event_data); i++) 
                handle_event2 (vm, event_data[i]);
	    break;

        /* ... and so forth for each event type */

        default:
            /* This should never happen... */
            clib_warning (\"BUG: unhandled event type %d\", event_type);
            break;
        }
        if (event_data)
            _vec_len (event_data) = 0;

        /* Timer expired, call periodic function */
        if (vlib_process_suspend_time_is_zero (poll_time_remaining)) {
            " node-name "_periodic (vm);
            poll_time_remaining = " uc-node-name "_POLL_PERIOD;
        }
    }

    return 0;