/* * Copyright (c) 2015 Cisco and/or its affiliates. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at: * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* * main.c: main vector processing loop * * Copyright (c) 2008 Eliot Dresselhaus * * Permission is hereby granted, free of charge, to any person obtaining * a copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sublicense, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be * included in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include #include #include #include #include #include /* Actually allocate a few extra slots of vector data to support speculative vector enqueues which overflow vector data in next frame. */ #define VLIB_FRAME_SIZE_ALLOC (VLIB_FRAME_SIZE + 4) always_inline u32 vlib_frame_bytes (u32 n_scalar_bytes, u32 n_vector_bytes) { u32 n_bytes; /* Make room for vlib_frame_t plus scalar arguments. */ n_bytes = vlib_frame_vector_byte_offset (n_scalar_bytes); /* Make room for vector arguments. Allocate a few extra slots of vector data to support speculative vector enqueues which overflow vector data in next frame. */ #define VLIB_FRAME_SIZE_EXTRA 4 n_bytes += (VLIB_FRAME_SIZE + VLIB_FRAME_SIZE_EXTRA) * n_vector_bytes; /* Magic number is first 32bit number after vector data. Used to make sure that vector data is never overrun. */ #define VLIB_FRAME_MAGIC (0xabadc0ed) n_bytes += sizeof (u32); /* Pad to cache line. */ n_bytes = round_pow2 (n_bytes, CLIB_CACHE_LINE_BYTES); return n_bytes; } always_inline u32 * vlib_frame_find_magic (vlib_frame_t * f, vlib_node_t * node) { void *p = f; p += vlib_frame_vector_byte_offset (node->scalar_size); p += (VLIB_FRAME_SIZE + VLIB_FRAME_SIZE_EXTRA) * node->vector_size; return p; } static inline vlib_frame_size_t * get_frame_size_info (vlib_node_main_t * nm, u32 n_scalar_bytes, u32 n_vector_bytes) { #ifdef VLIB_SUPPORTS_ARBITRARY_SCALAR_SIZES uword key = (n_scalar_bytes << 16) | n_vector_bytes; uword *p, i; p = hash_get (nm->frame_size_hash, key); if (p) i = p[0]; else { i = vec_len (nm->frame_sizes); vec_validate (nm->frame_sizes, i); hash_set (nm->frame_size_hash, key, i); } return vec_elt_at_index (nm->frame_sizes, i); #else ASSERT (vlib_frame_bytes (n_scalar_bytes, n_vector_bytes) == (vlib_frame_bytes (0, 4))); return vec_elt_at_index (nm->frame_sizes, 0); #endif } static vlib_frame_t * vlib_frame_alloc_to_node (vlib_main_t * vm, u32 to_node_index, u32 frame_flags) { vlib_node_main_t *nm = &vm->node_main; vlib_frame_size_t *fs; vlib_node_t *to_node; vlib_frame_t *f; u32 l, n, scalar_size, vector_size; ASSERT (vm == vlib_get_main ()); to_node = vlib_get_node (vm, to_node_index); scalar_size = to_node->scalar_size; vector_size = to_node->vector_size; fs = get_frame_size_info (nm, scalar_size, vector_size); n = vlib_frame_bytes (scalar_size, vector_size); if ((l = vec_len (fs->free_frames)) > 0) { /* Allocate from end of free list. */ f = fs->free_frames[l - 1]; _vec_len (fs->free_frames) = l - 1; } else { f = clib_mem_alloc_aligned_no_fail (n, VLIB_FRAME_ALIGN); } /* Poison frame when debugging. */ if (CLIB_DEBUG > 0) clib_memset (f, 0xfe, n); /* Insert magic number. */ { u32 *magic; magic = vlib_frame_find_magic (f, to_node); *magic = VLIB_FRAME_MAGIC; } f->frame_flags = VLIB_FRAME_IS_ALLOCATED | frame_flags; f->n_vectors = 0; f->scalar_size = scalar_size; f->vector_size = vector_size; f->flags = 0; fs->n_alloc_frames += 1; return f; } /* Allocate a frame for from FROM_NODE to TO_NODE via TO_NEXT_INDEX. Returns frame index. */ static vlib_frame_t * vlib_frame_alloc (vlib_main_t * vm, vlib_node_runtime_t * from_node_runtime, u32 to_next_index) { vlib_node_t *from_node; from_node = vlib_get_node (vm, from_node_runtime->node_index); ASSERT (to_next_index < vec_len (from_node->next_nodes)); return vlib_frame_alloc_to_node (vm, from_node->next_nodes[to_next_index], /* frame_flags */ 0); } vlib_frame_t * vlib_get_frame_to_node (vlib_main_t * vm, u32 to_node_index) { vlib_frame_t *f = vlib_frame_alloc_to_node (vm, to_node_index, /* frame_flags */ VLIB_FRAME_FREE_AFTER_DISPATCH); return vlib_get_frame (vm, f); } static inline void vlib_validate_frame_indices (vlib_frame_t * f) { if (CLIB_DEBUG > 0) { int i; u32 *from = vlib_frame_vector_args (f); /* Check for bad buffer index values */ for (i = 0; i < f->n_vectors; i++) { if (from[i] == 0) { clib_warning ("BUG: buffer index 0 at index %d", i); ASSERT (0); } else if (from[i] == 0xfefefefe) { clib_warning ("BUG: frame poison pattern at index %d", i); ASSERT (0); } } } } void vlib_put_frame_to_node (vlib_main_t * vm, u32 to_node_index, vlib_frame_t * f) { vlib_pending_frame_t *p; vlib_node_t *to_node; if (f->n_vectors == 0) return; ASSERT (vm == vlib_get_main ()); vlib_validate_frame_indices (f); to_node = vlib_get_node (vm, to_node_index); vec_add2 (vm->node_main.pending_frames, p, 1); f->frame_flags |= VLIB_FRAME_PENDING; p->frame = vlib_get_frame (vm, f); p->node_runtime_index = to_node->runtime_index; p->next_frame_index = VLIB_PENDING_FRAME_NO_NEXT_FRAME; } /* Free given frame. */ void vlib_frame_free (vlib_main_t * vm, vlib_node_runtime_t * r, vlib_frame_t * f) { vlib_node_main_t *nm = &vm->node_main; vlib_node_t *node; vlib_frame_size_t *fs; ASSERT (vm == vlib_get_main ()); ASSERT (f->frame_flags & VLIB_FRAME_IS_ALLOCATED); node = vlib_get_node (vm, r->node_index); fs = get_frame_size_info (nm, node->scalar_size, node->vector_size); ASSERT (f->frame_flags & VLIB_FRAME_IS_ALLOCATED); /* No next frames may point to freed frame. */ if (CLIB_DEBUG > 0) { vlib_next_frame_t *nf; vec_foreach (nf, vm->node_main.next_frames) ASSERT (nf->frame != f); } f->frame_flags &= ~(VLIB_FRAME_IS_ALLOCATED | VLIB_FRAME_NO_APPEND); vec_add1 (fs->free_frames, f); ASSERT (fs->n_alloc_frames > 0); fs->n_alloc_frames -= 1; } static clib_error_t * show_frame_stats (vlib_main_t * vm, unformat_input_t * input, vlib_cli_command_t * cmd) { vlib_node_main_t *nm = &vm->node_main; vlib_frame_size_t *fs; vlib_cli_output (vm, "%=6s%=12s%=12s", "Size", "# Alloc", "# Free"); vec_foreach (fs, nm->frame_sizes) { u32 n_alloc = fs->n_alloc_frames; u32 n_free = vec_len (fs->free_frames); if (n_alloc + n_free > 0) vlib_cli_output (vm, "%=6d%=12d%=12d", fs - nm->frame_sizes, n_alloc, n_free); } return 0; } /* *INDENT-OFF* */ VLIB_CLI_COMMAND (show_frame_stats_cli, static) = { .path = "show vlib frame-allocation", .short_help = "Show node dispatch frame statistics", .function = show_frame_stats, }; /* *INDENT-ON* */ /* Change ownership of enqueue rights to given next node. */ static void vlib_next_frame_change_ownership (vlib_main_t * vm, vlib_node_runtime_t * node_runtime, u32 next_index) { vlib_node_main_t *nm = &vm->node_main; vlib_next_frame_t *next_frame; vlib_node_t *node, *next_node; node = vec_elt (nm->nodes, node_runtime->node_index); /* Only internal & input nodes are allowed to call other nodes. */ ASSERT (node->type == VLIB_NODE_TYPE_INTERNAL || node->type == VLIB_NODE_TYPE_INPUT || node->type == VLIB_NODE_TYPE_PROCESS); ASSERT (vec_len (node->next_nodes) == node_runtime->n_next_nodes); next_frame = vlib_node_runtime_get_next_frame (vm, node_runtime, next_index); next_node = vec_elt (nm->nodes, node->next_nodes[next_index]); if (next_node->owner_node_index != VLIB_INVALID_NODE_INDEX) { /* Get frame from previous owner. */ vlib_next_frame_t *owner_next_frame; vlib_next_frame_t tmp; owner_next_frame = vlib_node_get_next_frame (vm, next_node->owner_node_index, next_node->owner_next_index); /* Swap target next frame with owner's. */ tmp = owner_next_frame[0]; owner_next_frame[0] = next_frame[0]; next_frame[0] = tmp; /* * If next_frame is already pending, we have to track down * all pending frames and fix their next_frame_index fields. */ if (next_frame->flags & VLIB_FRAME_PENDING) { vlib_pending_frame_t *p; if (next_frame->frame != NULL) { vec_foreach (p, nm->pending_frames) { if (p->frame == next_frame->frame) { p->next_frame_index = next_frame - vm->node_main.next_frames; } } } } } else { /* No previous owner. Take ownership. */ next_frame->flags |= VLIB_FRAME_OWNER; } /* Record new owner. */ next_node->owner_node_index = node->index; next_node->owner_next_index = next_index; /* Now we should be owner. */ ASSERT (next_frame->flags & VLIB_FRAME_OWNER); } /* Make sure that magic number is still there. Otherwise, it is likely that caller has overrun frame arguments. */ always_inline void validate_frame_magic (vlib_main_t * vm, vlib_frame_t * f, vlib_node_t * n, uword next_index) { vlib_node_t *next_node = vlib_get_node (vm, n->next_nodes[next_index]); u32 *magic = vlib_frame_find_magic (f, next_node); ASSERT (VLIB_FRAME_MAGIC == magic[0]); } vlib_frame_t * vlib_get_next_frame_internal (vlib_main_t * vm, vlib_node_runtime_t * node, u32 next_index, u32 allocate_new_next_frame) { vlib_frame_t *f; vlib_next_frame_t *nf; u32 n_used; nf = vlib_node_runtime_get_next_frame (vm, node, next_index); /* Make sure this next frame owns right to enqueue to destination frame. */ if (PREDICT_FALSE (!(nf->flags & VLIB_FRAME_OWNER))) vlib_next_frame_change_ownership (vm, node, next_index); /* ??? Don't need valid flag: can use frame_index == ~0 */ if (PREDICT_FALSE (!(nf->flags & VLIB_FRAME_IS_ALLOCATED))) { nf->frame = vlib_frame_alloc (vm, node, next_index); nf->flags |= VLIB_FRAME_IS_ALLOCATED; } f = nf->frame; /* Has frame been removed from pending vector (e.g. finished dispatching)? If so we can reuse frame. */ if ((nf->flags & VLIB_FRAME_PENDING) && !(f->frame_flags & VLIB_FRAME_PENDING)) { nf->flags &= ~VLIB_FRAME_PENDING; f->n_vectors = 0; f->flags = 0; } /* Allocate new frame if current one is marked as no-append or it is already full. */ n_used = f->n_vectors; if (n_used >= VLIB_FRAME_SIZE || (allocate_new_next_frame && n_used > 0) || (f->frame_flags & VLIB_FRAME_NO_APPEND)) { /* Old frame may need to be freed after dispatch, since we'll have two redundant frames from node -> next node. */ if (!(nf->flags & VLIB_FRAME_NO_FREE_AFTER_DISPATCH)) { vlib_frame_t *f_old = vlib_get_frame (vm, nf->frame); f_old->frame_flags |= VLIB_FRAME_FREE_AFTER_DISPATCH; } /* Allocate new frame to replace full one. */ f = nf->frame = vlib_frame_alloc (vm, node, next_index); n_used = f->n_vectors; } /* Should have free vectors in frame now. */ ASSERT (n_used < VLIB_FRAME_SIZE); if (CLIB_DEBUG > 0) { validate_frame_magic (vm, f, vlib_get_node (vm, node->node_index), next_index); } return f; } static void vlib_put_next_frame_validate (vlib_main_t * vm, vlib_node_runtime_t * rt, u32 next_index, u32 n_vectors_left) { vlib_node_main_t *nm = &vm->node_main; vlib_next_frame_t *nf; vlib_frame_t *f; vlib_node_runtime_t *next_rt; vlib_node_t *next_node; u32 n_before, n_after; nf = vlib_node_runtime_get_next_frame (vm, rt, next_index); f = vlib_get_frame (vm, nf->frame); ASSERT (n_vectors_left <= VLIB_FRAME_SIZE); vlib_validate_frame_indices (f); n_after = VLIB_FRAME_SIZE - n_vectors_left; n_before = f->n_vectors; ASSERT (n_after >= n_before); next_rt = vec_elt_at_index (nm->nodes_by_type[VLIB_NODE_TYPE_INTERNAL], nf->node_runtime_index); next_node = vlib_get_node (vm, next_rt->node_index); if (n_after > 0 && next_node->validate_frame) { u8 *msg = next_node->validate_frame (vm, rt, f); if (msg) { clib_warning ("%v", msg); ASSERT (0); } vec_free (msg); } } void vlib_put_next_frame (vlib_main_t * vm, vlib_node_runtime_t * r, u32 next_index, u32 n_vectors_left) { vlib_node_main_t *nm = &vm->node_main; vlib_next_frame_t *nf; vlib_frame_t *f; u32 n_vectors_in_frame; if (CLIB_DEBUG > 0) vlib_put_next_frame_validate (vm, r, next_index, n_vectors_left); nf = vlib_node_runtime_get_next_frame (vm, r, next_index); f = vlib_get_frame (vm, nf->frame); /* Make sure that magic number is still there. Otherwise, caller has overrun frame meta data. */ if (CLIB_DEBUG > 0) { vlib_node_t *node = vlib_get_node (vm, r->node_index); validate_frame_magic (vm, f, node, next_index); } /* Convert # of vectors left -> number of vectors there. */ ASSERT (n_vectors_left <= VLIB_FRAME_SIZE); n_vectors_in_frame = VLIB_FRAME_SIZE - n_vectors_left; f->n_vectors = n_vectors_in_frame; /* If vectors were added to frame, add to pending vector. */ if (PREDICT_TRUE (n_vectors_in_frame > 0)) { vlib_pending_frame_t *p; u32 v0, v1; r->cached_next_index = next_index; if (!(f->frame_flags & VLIB_FRAME_PENDING)) { __attribute__ ((unused)) vlib_node_t *node; vlib_node_t *next_node; vlib_node_runtime_t *next_runtime; node = vlib_get_node (vm, r->node_index); next_node = vlib_get_next_node (vm, r->node_index, next_index); next_runtime = vlib_node_get_runtime (vm, next_node->index); vec_add2 (nm->pending_frames, p, 1); p->frame = nf->frame; p->node_runtime_index = nf->node_runtime_index; p->next_frame_index = nf - nm->next_frames; nf->flags |= VLIB_FRAME_PENDING; f->frame_flags |= VLIB_FRAME_PENDING; /* * If we're going to dispatch this frame on another thread, * force allocation of a new frame. Otherwise, we create * a dangling frame reference. Each thread has its own copy of * the next_frames vector. */ if (0 && r->thread_index != next_runtime->thread_index) { nf->frame = NULL; nf->flags &= ~(VLIB_FRAME_PENDING | VLIB_FRAME_IS_ALLOCATED); } } /* Copy trace flag from next_frame and from runtime. */ nf->flags |= (nf->flags & VLIB_NODE_FLAG_TRACE) | (r-> flags & VLIB_NODE_FLAG_TRACE); v0 = nf->vectors_since_last_overflow; v1 = v0 + n_vectors_in_frame; nf->vectors_since_last_overflow = v1; if (PREDICT_FALSE (v1 < v0)) { vlib_node_t *node = vlib_get_node (vm, r->node_index); vec_elt (node->n_vectors_by_next_node, next_index) += v0; } } } /* Sync up runtime (32 bit counters) and main node stats (64 bit counters). */ never_inline void vlib_node_runtime_sync_stats (vlib_main_t * vm, vlib_node_runtime_t * r, uword n_calls, uword n_vectors, uword n_clocks) { vlib_node_t *n = vlib_get_node (vm, r->node_index); n->stats_total.calls += n_calls + r->calls_since_last_overflow; n->stats_total.vectors += n_vectors + r->vectors_since_last_overflow; n->stats_total.clocks += n_clocks + r->clocks_since_last_overflow; n->stats_total.max_clock = r->max_clock; n->stats_total.max_clock_n = r->max_clock_n; r->calls_since_last_overflow = 0; r->vectors_since_last_overflow = 0; r->clocks_since_last_overflow = 0; } always_inline void __attribute__ ((unused)) vlib_process_sync_stats (vlib_main_t * vm, vlib_process_t * p, uword n_calls, uword n_vectors, uword n_clocks) { vlib_node_runtime_t *rt = &p->node_runtime; vlib_node_t *n = vlib_get_node (vm, rt->node_index); vlib_node_runtime_sync_stats (vm, rt, n_calls, n_vectors, n_clocks); n->stats_total.suspends += p->n_suspends; p->n_suspends = 0; } void vlib_node_sync_stats (vlib_main_t * vm, vlib_node_t * n) { vlib_node_runtime_t *rt; if (n->type == VLIB_NODE_TYPE_PROCESS) { /* Nothing to do for PROCESS nodes except in main thread */ if (vm != &vlib_global_main) return; vlib_process_t *p = vlib_get_process_from_node (vm, n); n->stats_total.suspends += p->n_suspends; p->n_suspends = 0; rt = &p->node_runtime; } else rt = vec_elt_at_index (vm->node_main.nodes_by_type[n->type], n->runtime_index); vlib_node_runtime_sync_stats (vm, rt, 0, 0, 0); /* Sync up runtime next frame vector counters with main node structure. */ { vlib_next_frame_t *nf; uword i; for (i = 0; i < rt->n_next_nodes; i++) { nf = vlib_node_runtime_get_next_frame (vm, rt, i); vec_elt (n->n_vectors_by_next_node, i) += nf->vectors_since_last_overflow; nf->vectors_since_last_overflow = 0; } } } always_inline u32 vlib_node_runtime_update_stats (vlib_main_t * vm, vlib_node_runtime_t * node, uword n_calls, uword n_vectors, uword n_clocks) { u32 ca0, ca1, v0, v1, cl0, cl1, r; cl0 = cl1 = node->clocks_since_last_overflow; ca0 = ca1 = node->calls_since_last_overflow; v0 = v1 = node->vectors_since_last_overflow; ca1 = ca0 + n_calls; v1 = v0 + n_vectors; cl1 = cl0 + n_clocks; node->calls_since_last_overflow = ca1; node->clocks_since_last_overflow = cl1; node->vectors_since_last_overflow = v1; node->max_clock_n = node->max_clock > n_clocks ? node->max_clock_n : n_vectors; node->max_clock = node->max_clock > n_clocks ? node->max_clock : n_clocks; r = vlib_node_runtime_update_main_loop_vector_stats (vm, node, n_vectors); if (PREDICT_FALSE (ca1 < ca0 || v1 < v0 || cl1 < cl0)) { node->calls_since_last_overflow = ca0; node->clocks_since_last_overflow = cl0; node->vectors_since_last_overflow = v0; vlib_node_runtime_sync_stats (vm, node, n_calls, n_vectors, n_clocks); } return r; } always_inline void vlib_process_update_stats (vlib_main_t * vm, vlib_process_t * p, uword n_calls, uword n_vectors, uword n_clocks) { vlib_node_runtime_update_stats (vm, &p->node_runtime, n_calls, n_vectors, n_clocks); } static clib_error_t * vlib_cli_elog_clear (vlib_main_t * vm, unformat_input_t * input, vlib_cli_command_t * cmd) { elog_reset_buffer (&vm->elog_main); return 0; } /* *INDENT-OFF* */ VLIB_CLI_COMMAND (elog_clear_cli, static) = { .path = "event-logger clear", .short_help = "Clear the event log", .function = vlib_cli_elog_clear, }; /* *INDENT-ON* */ #ifdef CLIB_UNIX static clib_error_t * elog_save_buffer (vlib_main_t * vm, unformat_input_t * input, vlib_cli_command_t * cmd) { elog_main_t *em = &vm->elog_main; char *file, *chroot_file; clib_error_t *error = 0; if (!unformat (input, "%s", &file)) { vlib_cli_output (vm, "expected file name, got `%U'", format_unformat_error, input); return 0; } /* It's fairly hard to get "../oopsie" through unformat; just in case */ if (strstr (file, "..") || index (file, '/')) { vlib_cli_output (vm, "illegal characters in filename '%s'", file); return 0; } chroot_file = (char *) format (0, "/tmp/%s%c", file, 0); vec_free (file); vlib_cli_output (vm, "Saving %wd of %wd events to %s", elog_n_events_in_buffer (em), elog_buffer_capacity (em), chroot_file); vlib_worker_thread_barrier_sync (vm); error = elog_write_file (em, chroot_file, 1 /* flush ring */ ); vlib_worker_thread_barrier_release (vm); vec_free (chroot_file); return error; } void vlib_post_mortem_dump (void) { vlib_main_t *vm = &vlib_global_main; elog_main_t *em = &vm->elog_main; u8 *filename; clib_error_t *error; if ((vm->elog_post_mortem_dump + vm->dispatch_pcap_postmortem) == 0) return; if (vm->dispatch_pcap_postmortem) { clib_error_t *error; pcap_main_t *pm = &vm->dispatch_pcap_main; pm->n_packets_to_capture = pm->n_packets_captured; pm->file_name = (char *) format (0, "/tmp/dispatch_post_mortem.%d%c", getpid (), 0); error = pcap_write (pm); pcap_close (pm); if (error) clib_error_report (error); /* * We're in the middle of crashing. Don't try to free the filename. */ } if (vm->elog_post_mortem_dump) { filename = format (0, "/tmp/elog_post_mortem.%d%c", getpid (), 0); error = elog_write_file (em, (char *) filename, 1 /* flush ring */ ); if (error) clib_error_report (error); /* * We're in the middle of crashing. Don't try to free the filename. */ } } /* *INDENT-OFF* */ VLIB_CLI_COMMAND (elog_save_cli, static) = { .path = "event-logger save", .short_help = "event-logger save (saves log in /tmp/)", .function = elog_save_buffer, }; /* *INDENT-ON* */ static clib_error_t * elog_stop (vlib_main_t * vm, unformat_input_t * input, vlib_cli_command_t * cmd) { elog_main_t *em = &vm->elog_main; em->n_total_events_disable_limit = em->n_total_events; vlib_cli_output (vm, "Stopped the event logger..."); return 0; } /* *INDENT-OFF* */ VLIB_CLI_COMMAND (elog_stop_cli, static) = { .path = "event-logger stop", .short_help = "Stop the event-logger", .function = elog_stop, }; /* *INDENT-ON* */ static clib_error_t * elog_restart (vlib_main_t * vm, unformat_input_t * input, vlib_cli_command_t * cmd) { elog_main_t *em = &vm->elog_main; em->n_total_events_disable_limit = ~0; vlib_cli_output (vm, "Restarted the event logger..."); return 0; } /* *INDENT-OFF* */ VLIB_CLI_COMMAND (elog_restart_cli, static) = { .path = "event-logger restart", .short_help = "Restart the event-logger", .function = elog_restart, }; /* *INDENT-ON* */ static clib_error_t * elog_resize_command_fn (vlib_main_t * vm, unformat_input_t * input, vlib_cli_command_t * cmd) { elog_main_t *em = &vm->elog_main; u32 tmp; /* Stop the parade */ elog_reset_buffer (&vm->elog_main); if (unformat (input, "%d", &tmp)) { elog_alloc (em, tmp); em->n_total_events_disable_limit = ~0; } else return clib_error_return (0, "Must specify how many events in the ring"); vlib_cli_output (vm, "Resized ring and restarted the event logger..."); return 0; } /* *INDENT-OFF* */ VLIB_CLI_COMMAND (elog_resize_cli, static) = { .path = "event-logger resize", .short_help = "event-logger resize ", .function = elog_resize_command_fn, }; /* *INDENT-ON* */ #endif /* CLIB_UNIX */ static void elog_show_buffer_internal (vlib_main_t * vm, u32 n_events_to_show) { elog_main_t *em = &vm->elog_main; elog_event_t *e, *es; f64 dt; /* Show events in VLIB time since log clock starts after VLIB clock. */ dt = (em->init_time.cpu - vm->clib_time.init_cpu_time) * vm->clib_time.seconds_per_clock; es = elog_peek_events (em); vlib_cli_output (vm, "%d of %d events in buffer, logger %s", vec_len (es), em->event_ring_size, em->n_total_events < em->n_total_events_disable_limit ? "running" : "stopped"); vec_foreach (e, es) { vlib_cli_output (vm, "%18.9f: %U", e->time + dt, format_elog_event, em, e); n_events_to_show--; if (n_events_to_show == 0) break; } vec_free (es); } static clib_error_t * elog_show_buffer (vlib_main_t * vm, unformat_input_t * input, vlib_cli_command_t * cmd) { u32 n_events_to_show; clib_error_t *error = 0; n_events_to_show = 250; while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) { if (unformat (input, "%d", &n_events_to_show)) ; else if (unformat (input, "all")) n_events_to_show = ~0; else return unformat_parse_error (input); } elog_show_buffer_internal (vm, n_events_to_show); return error; } /* *INDENT-OFF* */ VLIB_CLI_COMMAND (elog_show_cli, static) = { .path = "show event-logger", .short_help = "Show event logger info", .function = elog_show_buffer, }; /* *INDENT-ON* */ void vlib_gdb_show_event_log (void) { elog_show_buffer_internal (vlib_get_main (), (u32) ~ 0); } static inline void vlib_elog_main_loop_event (vlib_main_t * vm, u32 node_index, u64 time, u32 n_vectors, u32 is_return) { vlib_main_t *evm = &vlib_global_main; elog_main_t *em = &evm->elog_main; int enabled = evm->elog_trace_graph_dispatch | evm->elog_trace_graph_circuit; if (PREDICT_FALSE (enabled && n_vectors)) { if (PREDICT_FALSE (!elog_is_enabled (em))) { evm->elog_trace_graph_dispatch = 0; evm->elog_trace_graph_circuit = 0; return; } if (PREDICT_TRUE (evm->elog_trace_graph_dispatch || (evm->elog_trace_graph_circuit && node_index == evm->elog_trace_graph_circuit_node_index))) { elog_track (em, /* event type */ vec_elt_at_index (is_return ? evm->node_return_elog_event_types : evm->node_call_elog_event_types, node_index), /* track */ (vm->thread_index ? &vlib_worker_threads[vm->thread_index].elog_track : &em->default_track), /* data to log */ n_vectors); } } } #if VLIB_BUFFER_TRACE_TRAJECTORY > 0 void (*vlib_buffer_trace_trajectory_cb) (vlib_buffer_t * b, u32 node_index); void (*vlib_buffer_trace_trajectory_init_cb) (vlib_buffer_t * b); void vlib_buffer_trace_trajectory_init (vlib_buffer_t * b) { if (PREDICT_TRUE (vlib_buffer_trace_trajectory_init_cb != 0)) { (*vlib_buffer_trace_trajectory_init_cb) (b); } } #endif static inline void add_trajectory_trace (vlib_buffer_t * b, u32 node_index) { #if VLIB_BUFFER_TRACE_TRAJECTORY > 0 if (PREDICT_TRUE (vlib_buffer_trace_trajectory_cb != 0)) { (*vlib_buffer_trace_trajectory_cb) (b, node_index); } #endif } u8 *format_vnet_buffer_flags (u8 * s, va_list * args) __attribute__ ((weak)); u8 * format_vnet_buffer_flags (u8 * s, va_list * args) { s = format (s, "BUG STUB %s", __FUNCTION__); return s; } u8 *format_vnet_buffer_opaque (u8 * s, va_list * args) __attribute__ ((weak)); u8 * format_vnet_buffer_opaque (u8 * s, va_list * args) { s = format (s, "BUG STUB %s", __FUNCTION__); return s; } u8 *format_vnet_buffer_opaque2 (u8 * s, va_list * args) __attribute__ ((weak)); u8 * format_vnet_buffer_opaque2 (u8 * s, va_list * args) { s = format (s, "BUG STUB %s", __FUNCTION__); return s; } static u8 * format_buffer_metadata (u8 * s, va_list * args) { vlib_buffer_t *b = va_arg (*args, vlib_buffer_t *); s = format (s, "flags: %U\n", format_vnet_buffer_flags, b); s = format (s, "current_data: %d, current_length: %d\n", (i32) (b->current_data), (i32) (b->current_length)); s = format (s, "current_config_index/punt_reason: %d, flow_id: %x, next_buffer: %x\n", b->current_config_index, b->flow_id, b->next_buffer); s = format (s, "error: %d, ref_count: %d, buffer_pool_index: %d\n", (u32) (b->error), (u32) (b->ref_count), (u32) (b->buffer_pool_index)); s = format (s, "trace_handle: 0x%x, len_not_first_buf: %d\n", b->trace_handle, b->total_length_not_including_first_buffer); return s; } #define A(x) vec_add1(vm->pcap_buffer, (x)) static void dispatch_pcap_trace (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) { int i; vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **bufp, *b; pcap_main_t *pm = &vlib_global_main.dispatch_pcap_main; vlib_trace_main_t *tm = &vm->trace_main; u32 capture_size; vlib_node_t *n; i32 n_left; f64 time_now = vlib_time_now (vm); u32 *from; u8 *d; u8 string_count; /* Input nodes don't have frames yet */ if (frame == 0 || frame->n_vectors == 0) return; from = vlib_frame_vector_args (frame); vlib_get_buffers (vm, from, bufs, frame->n_vectors); bufp = bufs; n = vlib_get_node (vm, node->node_index); for (i = 0; i < frame->n_vectors; i++) { if (PREDICT_TRUE (pm->n_packets_captured < pm->n_packets_to_capture)) { b = bufp[i]; vec_reset_length (vm->pcap_buffer); string_count = 0; /* Version, flags */ A ((u8) VLIB_PCAP_MAJOR_VERSION); A ((u8) VLIB_PCAP_MINOR_VERSION); A (0 /* string_count */ ); A (n->protocol_hint); /* Buffer index (big endian) */ A ((from[i] >> 24) & 0xff); A ((from[i] >> 16) & 0xff); A ((from[i] >> 8) & 0xff); A ((from[i] >> 0) & 0xff); /* Node name, NULL-terminated ASCII */ vm->pcap_buffer = format (vm->pcap_buffer, "%v%c", n->name, 0); string_count++; vm->pcap_buffer = format (vm->pcap_buffer, "%U%c", format_buffer_metadata, b, 0); string_count++; vm->pcap_buffer = format (vm->pcap_buffer, "%U%c", format_vnet_buffer_opaque, b, 0); string_count++; vm->pcap_buffer = format (vm->pcap_buffer, "%U%c", format_vnet_buffer_opaque2, b, 0); string_count++; /* Is this packet traced? */ if (PREDICT_FALSE (b->flags & VLIB_BUFFER_IS_TRACED)) { vlib_trace_header_t **h = pool_elt_at_index (tm->trace_buffer_pool, vlib_buffer_get_trace_index (b)); vm->pcap_buffer = format (vm->pcap_buffer, "%U%c", format_vlib_trace, vm, h[0], 0); string_count++; } /* Save the string count */ vm->pcap_buffer[2] = string_count; /* Figure out how many bytes in the pcap trace */ capture_size = vec_len (vm->pcap_buffer) + +vlib_buffer_length_in_chain (vm, b); clib_spinlock_lock_if_init (&pm->lock); n_left = clib_min (capture_size, 16384); d = pcap_add_packet (pm, time_now, n_left, capture_size); /* Copy the header */ clib_memcpy_fast (d, vm->pcap_buffer, vec_len (vm->pcap_buffer)); d += vec_len (vm->pcap_buffer); n_left = clib_min (vlib_buffer_length_in_chain (vm, b), (16384 - vec_len (vm->pcap_buffer))); /* Copy the packet data */ while (1) { u32 copy_length = clib_min ((u32) n_left, b->current_length); clib_memcpy_fast (d, b->data + b->current_data, copy_length); n_left -= b->current_length; if (n_left <= 0) break; d += b->current_length; ASSERT (b->flags & VLIB_BUFFER_NEXT_PRESENT); b = vlib_get_buffer (vm, b->next_buffer); } clib_spinlock_unlock_if_init (&pm->lock); } } } static_always_inline u64 dispatch_node (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_node_type_t type, vlib_node_state_t dispatch_state, vlib_frame_t * frame, u64 last_time_stamp) { uword n, v; u64 t; vlib_node_main_t *nm = &vm->node_main; vlib_next_frame_t *nf; if (CLIB_DEBUG > 0) { vlib_node_t *n = vlib_get_node (vm, node->node_index); ASSERT (n->type == type); } /* Only non-internal nodes may be disabled. */ if (type != VLIB_NODE_TYPE_INTERNAL && node->state != dispatch_state) { ASSERT (type != VLIB_NODE_TYPE_INTERNAL); return last_time_stamp; } if ((type == VLIB_NODE_TYPE_PRE_INPUT || type == VLIB_NODE_TYPE_INPUT) && dispatch_state != VLIB_NODE_STATE_INTERRUPT) { u32 c = node->input_main_loops_per_call; /* Only call node when count reaches zero. */ if (c) { node->input_main_loops_per_call = c - 1; return last_time_stamp; } } /* Speculatively prefetch next frames. */ if (node->n_next_nodes > 0) { nf = vec_elt_at_index (nm->next_frames, node->next_frame_index); CLIB_PREFETCH (nf, 4 * sizeof (nf[0]), WRITE); } vm->cpu_time_last_node_dispatch = last_time_stamp; vlib_elog_main_loop_event (vm, node->node_index, last_time_stamp, frame ? frame->n_vectors : 0, /* is_after */ 0); vlib_node_runtime_perf_counter (vm, node, frame, 0, last_time_stamp, VLIB_NODE_RUNTIME_PERF_BEFORE); /* * Turn this on if you run into * "bad monkey" contexts, and you want to know exactly * which nodes they've visited... See ixge.c... */ if (VLIB_BUFFER_TRACE_TRAJECTO
/*
 *------------------------------------------------------------------
 * l2_api.c - layer 2 forwarding api
 *
 * Copyright (c) 2016 Cisco and/or its affiliates.
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at:
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 *------------------------------------------------------------------
 */

#include <vnet/vnet.h>
#include <vlibmemory/api.h>

#include <vnet/interface.h>
#include <vnet/api_errno.h>
#include <vnet/l2/l2_input.h>
#include <vnet/l2/l2_fib.h>
#include <vnet/l2/l2_vtr.h>
#include <vnet/l2/l2_learn.h>

#include <vnet/vnet_msg_enum.h>

#define vl_typedefs		/* define message structures */
#include <vnet/vnet_all_api_h.h>
#undef vl_typedefs

#define vl_endianfun		/* define message structures */
#include <vnet/vnet_all_api_h.h>
#undef vl_endianfun

#define vl_api_bridge_domain_details_t_endian vl_noop_handler
#define vl_api_bridge_domain_details_t_print vl_noop_handler

/* instantiate all the print functions we know about */
#define vl_print(handle, ...) vlib_cli_output (handle, __VA_ARGS__)
#define vl_printfun
#include <vnet/vnet_all_api_h.h>
#undef vl_printfun

#include <vlibapi/api_helper_macros.h>

#define foreach_vpe_api_msg                                 \
_(L2_XCONNECT_DUMP, l2_xconnect_dump)                       \
_(L2_FIB_CLEAR_TABLE, l2_fib_clear_table)                   \
_(L2_FIB_TABLE_DUMP, l2_fib_table_dump)                     \
_(L2FIB_FLUSH_ALL, l2fib_flush_all)                         \
_(L2FIB_FLUSH_INT, l2fib_flush_int)                         \
_(L2FIB_FLUSH_BD, l2fib_flush_bd)                           \
_(L2FIB_ADD_DEL, l2fib_add_del)                             \
_(WANT_L2_MACS_EVENTS, want_l2_macs_events)		    \
_(L2_FLAGS, l2_flags)                                       \
_(SW_INTERFACE_SET_L2_XCONNECT, sw_interface_set_l2_xconnect)   \
_(SW_INTERFACE_SET_L2_BRIDGE, sw_interface_set_l2_bridge)       \
_(L2_PATCH_ADD_DEL, l2_patch_add_del)				\
_(L2_INTERFACE_EFP_FILTER, l2_interface_efp_filter)             \
_(BD_IP_MAC_ADD_DEL, bd_ip_mac_add_del)                         \
_(BRIDGE_DOMAIN_ADD_DEL, bridge_domain_add_del)                 \
_(BRIDGE_DOMAIN_DUMP, bridge_domain_dump)                       \
_(BRIDGE_FLAGS, bridge_flags)                                   \
_(L2_INTERFACE_VLAN_TAG_REWRITE, l2_interface_vlan_tag_rewrite) \
_(L2_INTERFACE_PBB_TAG_REWRITE, l2_interface_pbb_tag_rewrite)   \
_(BRIDGE_DOMAIN_SET_MAC_AGE, bridge_domain_set_mac_age)         \
_(SW_INTERFACE_SET_VPATH, sw_interface_set_vpath)

static void
send_l2_xconnect_details (vl_api_registration_t * reg, u32 context,
			  u32 rx_sw_if_index, u32 tx_sw_if_index)
{
  vl_api_l2_xconnect_details_t *mp;

  mp = vl_msg_api_alloc (sizeof (*mp));
  memset (mp, 0, sizeof (*mp));
  mp->_vl_msg_id = ntohs (VL_API_L2_XCONNECT_DETAILS);
  mp->context = context;
  mp->rx_sw_if_index = htonl (rx_sw_if_index);
  mp->tx_sw_if_index = htonl (tx_sw_if_index);

  vl_api_send_msg (reg, (u8 *) mp);
}

static void
vl_api_l2_xconnect_dump_t_handler (vl_api_l2_xconnect_dump_t * mp)
{
  vl_api_registration_t *reg;
  vnet_main_t *vnm = vnet_get_main ();
  vnet_interface_main_t *im = &vnm->interface_main;
  l2input_main_t *l2im = &l2input_main;
  vnet_sw_interface_t *swif;
  l2_input_config_t *config;

  reg = vl_api_client_index_to_registration (mp->client_index);
  if (!reg)
    return;

  /* *INDENT-OFF* */
  pool_foreach (swif, im->sw_interfaces,
  ({
    config = vec_elt_at_index (l2im->configs, swif->sw_if_index);
    if (config->xconnect)
      send_l2_xconnect_details (reg, mp->context, swif->sw_if_index,
                                config->output_sw_if_index);
  }));
  /* *INDENT-ON* */
}

static void
vl_api_l2_fib_clear_table_t_handler (vl_api_l2_fib_clear_table_t * mp)
{
  int rv = 0;
  vl_api_l2_fib_clear_table_reply_t *rmp;

  /* Clear all MACs including static MACs  */
  l2fib_clear_table ();

  REPLY_MACRO (VL_API_L2_FIB_CLEAR_TABLE_REPLY);
}

static void
send_l2fib_table_entry (vpe_api_main_t * am,
			vl_api_registration_t * reg,
			l2fib_entry_key_t * l2fe_key,
			l2fib_entry_result_t * l2fe_res, u32 context)
{
  vl_api_l2_fib_table_details_t *mp;

  mp = vl_msg_api_alloc (sizeof (*mp));
  memset (mp, 0, sizeof (*mp));
  mp->_vl_msg_id = ntohs (VL_API_L2_FIB_TABLE_DETAILS);

  mp->bd_id =
    ntohl (l2input_main.bd_configs[l2fe_key->fields.bd_index].bd_id);

  clib_memcpy (mp->mac, l2fe_key->fields.mac, 6);
  mp->sw_if_index = ntohl (l2fe_res->fields.sw_if_index);
  mp->static_mac = l2fe_res->fields.static_mac;
  mp->filter_mac = l2fe_res->fields.filter;
  mp->bvi_mac = l2fe_res->fields.bvi;
  mp->context = context;

  vl_api_send_msg (reg, (u8 *) mp);
}

static void
vl_api_l2_fib_table_dump_t_handler (vl_api_l2_fib_table_dump_t * mp)
{
  vpe_api_main_t *am = &vpe_api_main;
  bd_main_t *bdm = &bd_main;
  l2fib_entry_key_t *l2fe_key = NULL;
  l2fib_entry_result_t *l2fe_res = NULL;
  u32 ni, bd_id = ntohl (mp->bd_id);
  u32 bd_index;
  vl_api_registration_t *reg;
  uword *p;

  reg = vl_api_client_index_to_registration (mp->client_index);
  if (!reg)
    return;

  /* see l2fib_table_dump: ~0 means "any" */
  if (bd_id == ~0)
    bd_index = ~0;
  else
    {
      p = hash_get (bdm->bd_index_by_bd_id, bd_id);
      if (p == 0)
	return;

      bd_index = p[0];
    }

  l2fib_table_dump (bd_index, &l2fe_key, &l2fe_res);

  vec_foreach_index (ni, l2fe_key)
  {
    send_l2fib_table_entry (am, reg, vec_elt_at_index (l2fe_key, ni),
			    vec_elt_at_index (l2fe_res, ni), mp->context);
  }
  vec_free (l2fe_key);
  vec_free (l2fe_res);
}

static void
vl_api_l2fib_add_del_t_handler (vl_api_l2fib_add_del_t * mp)
{
  bd_main_t *bdm = &bd_main;
  l2input_main_t *l2im = &l2input_main;
  vl_api_l2fib_add_del_reply_t *rmp;
  int rv = 0;
  u32 bd_id = ntohl (mp->bd_id);
  uword *p = hash_get (bdm->bd_index_by_bd_id, bd_id);

  if (!p)
    {
      rv = VNET_API_ERROR_NO_SUCH_ENTRY;
      goto bad_sw_if_index;
    }
  u32 bd_index = p[0];

  u8 mac[6];

  clib_memcpy (mac, mp->mac, 6);
  if (mp->is_add)
    {
      if (mp->filter_mac)
	l2fib_add_filter_entry (mac, bd_index);
      else
	{
	  u32 sw_if_index = ntohl (mp->sw_if_index);
	  VALIDATE_SW_IF_INDEX (mp);
	  if (vec_len (l2im->configs) <= sw_if_index)
	    {
	      rv = VNET_API_ERROR_INVALID_SW_IF_INDEX;
	      goto bad_sw_if_index;
	    }
	  else
	    {
	      l2_input_config_t *config;
	      config = vec_elt_at_index (l2im->configs, sw_if_index);
	      if (config->bridge == 0)
		{
		  rv = VNET_API_ERROR_INVALID_SW_IF_INDEX;
		  goto bad_sw_if_index;
		}
	    }
	  u8 static_mac = mp->static_mac ? 1 : 0;
	  u8 bvi_mac = mp->bvi_mac ? 1 : 0;
	  l2fib_add_fwd_entry (mac, bd_index, sw_if_index, static_mac,
			       bvi_mac);
	}
    }
  else
    {
      l2fib_del_entry (mac, bd_index);
    }

  BAD_SW_IF_INDEX_LABEL;

  REPLY_MACRO (VL_API_L2FIB_ADD_DEL_REPLY);
}

static void
vl_api_want_l2_macs_events_t_handler (vl_api_want_l2_macs_events_t * mp)
{
  int rv = 0;
  vl_api_want_l2_macs_events_reply_t *rmp;
  l2learn_main_t *lm = &l2learn_main;
  l2fib_main_t *fm = &l2fib_main;
  u32 pid = ntohl (mp->pid);
  u32 learn_limit = ntohl (mp->learn_limit);

  if (mp->enable_disable)
    {
      if (lm->client_pid == 0)
	{
	  lm->client_pid = pid;
	  lm->client_index = mp->client_index;

	  if (mp->max_macs_in_event)
	    fm->max_macs_in_event = mp->max_macs_in_event * 10;
	  else
	    fm->max_macs_in_event = L2FIB_EVENT_MAX_MACS_DEFAULT;

	  if (mp->scan_delay)
	    fm->event_scan_delay = (f64) (mp->scan_delay) * 10e-3;
	  else
	    fm->event_scan_delay = L2FIB_EVENT_SCAN_DELAY_DEFAULT;

	  /* change learn limit and flush all learned MACs */
	  if (learn_limit && (learn_limit < L2LEARN_DEFAULT_LIMIT))
	    lm->global_learn_limit = learn_limit;
	  else
	    lm->global_learn_limit = L2FIB_EVENT_LEARN_LIMIT_DEFAULT;

	  l2fib_flush_all_mac (vlib_get_main ());
	}
      else if (lm->client_pid != pid)
	{
	  rv = VNET_API_ERROR_L2_MACS_EVENT_CLINET_PRESENT;
	  goto exit;
	}
    }
  else if (lm->client_pid)
    {
      lm->client_pid = 0;
      lm->client_index = 0;
      if (learn_limit && (learn_limit < L2LEARN_DEFAULT_LIMIT))
	lm->global_learn_limit = learn_limit;
      else
	lm->global_learn_limit = L2LEARN_DEFAULT_LIMIT;
    }

exit:
  REPLY_MACRO (VL_API_WANT_L2_MACS_EVENTS_REPLY);
}

static void
vl_api_l2fib_flush_int_t_handler (vl_api_l2fib_flush_int_t * mp)
{
  int rv = 0;
  vlib_main_t *vm = vlib_get_main ();
  vl_api_l2fib_flush_int_reply_t *rmp;

  VALIDATE_SW_IF_INDEX (mp);

  u32 sw_if_index = ntohl (mp->sw_if_index);
  l2fib_flush_int_mac (vm, sw_if_index);