From 7cd468a3d7dee7d6c92f69a0bb7061ae208ec727 Mon Sep 17 00:00:00 2001 From: Damjan Marion Date: Mon, 19 Dec 2016 23:05:39 +0100 Subject: Reorganize source tree to use single autotools instance Change-Id: I7b51f88292e057c6443b12224486f2d0c9f8ae23 Signed-off-by: Damjan Marion --- src/vpp/stats/stats.c | 987 ++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 987 insertions(+) create mode 100644 src/vpp/stats/stats.c (limited to 'src/vpp/stats/stats.c') diff --git a/src/vpp/stats/stats.c b/src/vpp/stats/stats.c new file mode 100644 index 00000000..391e02f6 --- /dev/null +++ b/src/vpp/stats/stats.c @@ -0,0 +1,987 @@ +/* + * Copyright (c) 2015 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include +#include +#include +#include + +#define STATS_DEBUG 0 + +stats_main_t stats_main; + +#include + +#include + +#define f64_endian(a) +#define f64_print(a,b) + +#define vl_typedefs /* define message structures */ +#include +#undef vl_typedefs + +#define vl_endianfun /* define message structures */ +#include +#undef vl_endianfun + +/* instantiate all the print functions we know about */ +#define vl_print(handle, ...) vlib_cli_output (handle, __VA_ARGS__) +#define vl_printfun +#include +#undef vl_printfun + +#define foreach_stats_msg \ +_(WANT_STATS, want_stats) \ +_(WANT_STATS_REPLY, want_stats_reply) \ +_(VNET_INTERFACE_COUNTERS, vnet_interface_counters) \ +_(VNET_IP4_FIB_COUNTERS, vnet_ip4_fib_counters) \ +_(VNET_IP6_FIB_COUNTERS, vnet_ip6_fib_counters) + +/* These constants ensure msg sizes <= 1024, aka ring allocation */ +#define SIMPLE_COUNTER_BATCH_SIZE 126 +#define COMBINED_COUNTER_BATCH_SIZE 63 +#define IP4_FIB_COUNTER_BATCH_SIZE 48 +#define IP6_FIB_COUNTER_BATCH_SIZE 30 + +/* 5ms */ +#define STATS_RELEASE_DELAY_NS (1000 * 1000 * 5) +/* ns/us us/ms */ + +void +dslock (stats_main_t * sm, int release_hint, int tag) +{ + u32 thread_id; + data_structure_lock_t *l = sm->data_structure_lock; + + if (PREDICT_FALSE (l == 0)) + return; + + thread_id = os_get_cpu_number (); + if (l->lock && l->thread_id == thread_id) + { + l->count++; + return; + } + + if (release_hint) + l->release_hint++; + + while (__sync_lock_test_and_set (&l->lock, 1)) + /* zzzz */ ; + l->tag = tag; + l->thread_id = thread_id; + l->count = 1; +} + +void +stats_dslock_with_hint (int hint, int tag) +{ + stats_main_t *sm = &stats_main; + dslock (sm, hint, tag); +} + +void +dsunlock (stats_main_t * sm) +{ + u32 thread_id; + data_structure_lock_t *l = sm->data_structure_lock; + + if (PREDICT_FALSE (l == 0)) + return; + + thread_id = os_get_cpu_number (); + ASSERT (l->lock && l->thread_id == thread_id); + l->count--; + if (l->count == 0) + { + l->tag = -l->tag; + l->release_hint = 0; + CLIB_MEMORY_BARRIER (); + l->lock = 0; + } +} + +void +stats_dsunlock (int hint, int tag) +{ + stats_main_t *sm = &stats_main; + dsunlock (sm); +} + +static void +do_simple_interface_counters (stats_main_t * sm) +{ + vl_api_vnet_interface_counters_t *mp = 0; + vnet_interface_main_t *im = sm->interface_main; + api_main_t *am = sm->api_main; + vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr; + unix_shared_memory_queue_t *q = shmem_hdr->vl_input_queue; + vlib_simple_counter_main_t *cm; + u32 items_this_message = 0; + u64 v, *vp = 0; + int i; + + /* + * Prevent interface registration from expanding / moving the vectors... + * That tends never to happen, so we can hold this lock for a while. + */ + vnet_interface_counter_lock (im); + + vec_foreach (cm, im->sw_if_counters) + { + + for (i = 0; i < vec_len (cm->maxi); i++) + { + if (mp == 0) + { + items_this_message = clib_min (SIMPLE_COUNTER_BATCH_SIZE, + vec_len (cm->maxi) - i); + + mp = vl_msg_api_alloc_as_if_client + (sizeof (*mp) + items_this_message * sizeof (v)); + mp->_vl_msg_id = ntohs (VL_API_VNET_INTERFACE_COUNTERS); + mp->vnet_counter_type = cm - im->sw_if_counters; + mp->is_combined = 0; + mp->first_sw_if_index = htonl (i); + mp->count = 0; + vp = (u64 *) mp->data; + } + v = vlib_get_simple_counter (cm, i); + clib_mem_unaligned (vp, u64) = clib_host_to_net_u64 (v); + vp++; + mp->count++; + if (mp->count == items_this_message) + { + mp->count = htonl (items_this_message); + /* Send to the main thread... */ + vl_msg_api_send_shmem (q, (u8 *) & mp); + mp = 0; + } + } + ASSERT (mp == 0); + } + vnet_interface_counter_unlock (im); +} + +static void +do_combined_interface_counters (stats_main_t * sm) +{ + vl_api_vnet_interface_counters_t *mp = 0; + vnet_interface_main_t *im = sm->interface_main; + api_main_t *am = sm->api_main; + vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr; + unix_shared_memory_queue_t *q = shmem_hdr->vl_input_queue; + vlib_combined_counter_main_t *cm; + u32 items_this_message = 0; + vlib_counter_t v, *vp = 0; + int i; + + vnet_interface_counter_lock (im); + + vec_foreach (cm, im->combined_sw_if_counters) + { + + for (i = 0; i < vec_len (cm->maxi); i++) + { + if (mp == 0) + { + items_this_message = clib_min (COMBINED_COUNTER_BATCH_SIZE, + vec_len (cm->maxi) - i); + + mp = vl_msg_api_alloc_as_if_client + (sizeof (*mp) + items_this_message * sizeof (v)); + mp->_vl_msg_id = ntohs (VL_API_VNET_INTERFACE_COUNTERS); + mp->vnet_counter_type = cm - im->combined_sw_if_counters; + mp->is_combined = 1; + mp->first_sw_if_index = htonl (i); + mp->count = 0; + vp = (vlib_counter_t *) mp->data; + } + vlib_get_combined_counter (cm, i, &v); + clib_mem_unaligned (&vp->packets, u64) + = clib_host_to_net_u64 (v.packets); + clib_mem_unaligned (&vp->bytes, u64) = clib_host_to_net_u64 (v.bytes); + vp++; + mp->count++; + if (mp->count == items_this_message) + { + mp->count = htonl (items_this_message); + /* Send to the main thread... */ + vl_msg_api_send_shmem (q, (u8 *) & mp); + mp = 0; + } + } + ASSERT (mp == 0); + } + vnet_interface_counter_unlock (im); +} + +/* from .../vnet/vnet/ip/lookup.c. Yuck */ +typedef CLIB_PACKED (struct + { + ip4_address_t address; +u32 address_length: 6; +u32 index: 26; + }) ip4_route_t; + +static void +ip46_fib_stats_delay (stats_main_t * sm, u32 sec, u32 nsec) +{ + struct timespec _req, *req = &_req; + struct timespec _rem, *rem = &_rem; + + req->tv_sec = sec; + req->tv_nsec = nsec; + while (1) + { + if (nanosleep (req, rem) == 0) + break; + *req = *rem; + if (errno == EINTR) + continue; + clib_unix_warning ("nanosleep"); + break; + } +} + +static void +do_ip4_fibs (stats_main_t * sm) +{ + ip4_main_t *im4 = &ip4_main; + api_main_t *am = sm->api_main; + vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr; + unix_shared_memory_queue_t *q = shmem_hdr->vl_input_queue; + static ip4_route_t *routes; + ip4_route_t *r; + fib_table_t *fib; + ip_lookup_main_t *lm = &im4->lookup_main; + static uword *results; + vl_api_vnet_ip4_fib_counters_t *mp = 0; + u32 items_this_message; + vl_api_ip4_fib_counter_t *ctrp = 0; + u32 start_at_fib_index = 0; + int i; + +again: + /* *INDENT-OFF* */ + pool_foreach (fib, im4->fibs, + ({ + /* We may have bailed out due to control-plane activity */ + while ((fib - im4->fibs) < start_at_fib_index) + continue; + + if (mp == 0) + { + items_this_message = IP4_FIB_COUNTER_BATCH_SIZE; + mp = vl_msg_api_alloc_as_if_client + (sizeof (*mp) + + items_this_message * sizeof (vl_api_ip4_fib_counter_t)); + mp->_vl_msg_id = ntohs (VL_API_VNET_IP4_FIB_COUNTERS); + mp->count = 0; + mp->vrf_id = ntohl (fib->ft_table_id); + ctrp = (vl_api_ip4_fib_counter_t *) mp->c; + } + else + { + /* happens if the last FIB was empty... */ + ASSERT (mp->count == 0); + mp->vrf_id = ntohl (fib->ft_table_id); + } + + dslock (sm, 0 /* release hint */ , 1 /* tag */ ); + + vec_reset_length (routes); + vec_reset_length (results); + + for (i = 0; i < ARRAY_LEN (fib->v4.fib_entry_by_dst_address); i++) + { + uword *hash = fib->v4.fib_entry_by_dst_address[i]; + hash_pair_t *p; + ip4_route_t x; + + x.address_length = i; + + hash_foreach_pair (p, hash, + ({ + x.address.data_u32 = p->key; + if (lm->fib_result_n_words > 1) + { + x.index = vec_len (results); + vec_add (results, p->value, lm->fib_result_n_words); + } + else + x.index = p->value[0]; + + vec_add1 (routes, x); + if (sm->data_structure_lock->release_hint) + { + start_at_fib_index = fib - im4->fibs; + dsunlock (sm); + ip46_fib_stats_delay (sm, 0 /* sec */, + STATS_RELEASE_DELAY_NS); + mp->count = 0; + ctrp = (vl_api_ip4_fib_counter_t *)mp->c; + goto again; + } + })); + } + + vec_foreach (r, routes) + { + vlib_counter_t c; + + vlib_get_combined_counter (&load_balance_main.lbm_to_counters, + r->index, &c); + /* + * If it has actually + * seen at least one packet, send it. + */ + if (c.packets > 0) + { + + /* already in net byte order */ + ctrp->address = r->address.as_u32; + ctrp->address_length = r->address_length; + ctrp->packets = clib_host_to_net_u64 (c.packets); + ctrp->bytes = clib_host_to_net_u64 (c.bytes); + mp->count++; + ctrp++; + + if (mp->count == items_this_message) + { + mp->count = htonl (items_this_message); + /* + * If the main thread's input queue is stuffed, + * drop the data structure lock (which the main thread + * may want), and take a pause. + */ + unix_shared_memory_queue_lock (q); + if (unix_shared_memory_queue_is_full (q)) + { + dsunlock (sm); + vl_msg_api_send_shmem_nolock (q, (u8 *) & mp); + unix_shared_memory_queue_unlock (q); + mp = 0; + ip46_fib_stats_delay (sm, 0 /* sec */ , + STATS_RELEASE_DELAY_NS); + goto again; + } + vl_msg_api_send_shmem_nolock (q, (u8 *) & mp); + unix_shared_memory_queue_unlock (q); + + items_this_message = IP4_FIB_COUNTER_BATCH_SIZE; + mp = vl_msg_api_alloc_as_if_client + (sizeof (*mp) + + items_this_message * sizeof (vl_api_ip4_fib_counter_t)); + mp->_vl_msg_id = ntohs (VL_API_VNET_IP4_FIB_COUNTERS); + mp->count = 0; + mp->vrf_id = ntohl (fib->ft_table_id); + ctrp = (vl_api_ip4_fib_counter_t *) mp->c; + } + } /* for each (mp or single) adj */ + if (sm->data_structure_lock->release_hint) + { + start_at_fib_index = fib - im4->fibs; + dsunlock (sm); + ip46_fib_stats_delay (sm, 0 /* sec */ , STATS_RELEASE_DELAY_NS); + mp->count = 0; + ctrp = (vl_api_ip4_fib_counter_t *) mp->c; + goto again; + } + } /* vec_foreach (routes) */ + + dsunlock (sm); + + /* Flush any data from this fib */ + if (mp->count) + { + mp->count = htonl (mp->count); + vl_msg_api_send_shmem (q, (u8 *) & mp); + mp = 0; + } + })); + /* *INDENT-ON* */ + + /* If e.g. the last FIB had no reportable routes, free the buffer */ + if (mp) + vl_msg_api_free (mp); +} + +typedef struct +{ + ip6_address_t address; + u32 address_length; + u32 index; +} ip6_route_t; + +typedef struct +{ + u32 fib_index; + ip6_route_t **routep; + stats_main_t *sm; +} add_routes_in_fib_arg_t; + +static void +add_routes_in_fib (BVT (clib_bihash_kv) * kvp, void *arg) +{ + add_routes_in_fib_arg_t *ap = arg; + stats_main_t *sm = ap->sm; + + if (sm->data_structure_lock->release_hint) + clib_longjmp (&sm->jmp_buf, 1); + + if (kvp->key[2] >> 32 == ap->fib_index) + { + ip6_address_t *addr; + ip6_route_t *r; + addr = (ip6_address_t *) kvp; + vec_add2 (*ap->routep, r, 1); + r->address = addr[0]; + r->address_length = kvp->key[2] & 0xFF; + r->index = kvp->value; + } +} + +static void +do_ip6_fibs (stats_main_t * sm) +{ + ip6_main_t *im6 = &ip6_main; + api_main_t *am = sm->api_main; + vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr; + unix_shared_memory_queue_t *q = shmem_hdr->vl_input_queue; + static ip6_route_t *routes; + ip6_route_t *r; + fib_table_t *fib; + static uword *results; + vl_api_vnet_ip6_fib_counters_t *mp = 0; + u32 items_this_message; + vl_api_ip6_fib_counter_t *ctrp = 0; + u32 start_at_fib_index = 0; + BVT (clib_bihash) * h = &im6->ip6_table[IP6_FIB_TABLE_FWDING].ip6_hash; + add_routes_in_fib_arg_t _a, *a = &_a; + +again: + /* *INDENT-OFF* */ + pool_foreach (fib, im6->fibs, + ({ + /* We may have bailed out due to control-plane activity */ + while ((fib - im6->fibs) < start_at_fib_index) + continue; + + if (mp == 0) + { + items_this_message = IP6_FIB_COUNTER_BATCH_SIZE; + mp = vl_msg_api_alloc_as_if_client + (sizeof (*mp) + + items_this_message * sizeof (vl_api_ip6_fib_counter_t)); + mp->_vl_msg_id = ntohs (VL_API_VNET_IP6_FIB_COUNTERS); + mp->count = 0; + mp->vrf_id = ntohl (fib->ft_table_id); + ctrp = (vl_api_ip6_fib_counter_t *) mp->c; + } + + dslock (sm, 0 /* release hint */ , 1 /* tag */ ); + + vec_reset_length (routes); + vec_reset_length (results); + + a->fib_index = fib - im6->fibs; + a->routep = &routes; + a->sm = sm; + + if (clib_setjmp (&sm->jmp_buf, 0) == 0) + { + start_at_fib_index = fib - im6->fibs; + BV (clib_bihash_foreach_key_value_pair) (h, add_routes_in_fib, a); + } + else + { + dsunlock (sm); + ip46_fib_stats_delay (sm, 0 /* sec */ , + STATS_RELEASE_DELAY_NS); + mp->count = 0; + ctrp = (vl_api_ip6_fib_counter_t *) mp->c; + goto again; + } + + vec_foreach (r, routes) + { + vlib_counter_t c; + + vlib_get_combined_counter (&load_balance_main.lbm_to_counters, + r->index, &c); + /* + * If it has actually + * seen at least one packet, send it. + */ + if (c.packets > 0) + { + /* already in net byte order */ + ctrp->address[0] = r->address.as_u64[0]; + ctrp->address[1] = r->address.as_u64[1]; + ctrp->address_length = (u8) r->address_length; + ctrp->packets = clib_host_to_net_u64 (c.packets); + ctrp->bytes = clib_host_to_net_u64 (c.bytes); + mp->count++; + ctrp++; + + if (mp->count == items_this_message) + { + mp->count = htonl (items_this_message); + /* + * If the main thread's input queue is stuffed, + * drop the data structure lock (which the main thread + * may want), and take a pause. + */ + unix_shared_memory_queue_lock (q); + if (unix_shared_memory_queue_is_full (q)) + { + dsunlock (sm); + vl_msg_api_send_shmem_nolock (q, (u8 *) & mp); + unix_shared_memory_queue_unlock (q); + mp = 0; + ip46_fib_stats_delay (sm, 0 /* sec */ , + STATS_RELEASE_DELAY_NS); + goto again; + } + vl_msg_api_send_shmem_nolock (q, (u8 *) & mp); + unix_shared_memory_queue_unlock (q); + + items_this_message = IP6_FIB_COUNTER_BATCH_SIZE; + mp = vl_msg_api_alloc_as_if_client + (sizeof (*mp) + + items_this_message * sizeof (vl_api_ip6_fib_counter_t)); + mp->_vl_msg_id = ntohs (VL_API_VNET_IP6_FIB_COUNTERS); + mp->count = 0; + mp->vrf_id = ntohl (fib->ft_table_id); + ctrp = (vl_api_ip6_fib_counter_t *) mp->c; + } + } + + if (sm->data_structure_lock->release_hint) + { + start_at_fib_index = fib - im6->fibs; + dsunlock (sm); + ip46_fib_stats_delay (sm, 0 /* sec */ , STATS_RELEASE_DELAY_NS); + mp->count = 0; + ctrp = (vl_api_ip6_fib_counter_t *) mp->c; + goto again; + } + } /* vec_foreach (routes) */ + + dsunlock (sm); + + /* Flush any data from this fib */ + if (mp->count) + { + mp->count = htonl (mp->count); + vl_msg_api_send_shmem (q, (u8 *) & mp); + mp = 0; + } + })); + /* *INDENT-ON* */ + + /* If e.g. the last FIB had no reportable routes, free the buffer */ + if (mp) + vl_msg_api_free (mp); +} + +static void +stats_thread_fn (void *arg) +{ + stats_main_t *sm = &stats_main; + vlib_worker_thread_t *w = (vlib_worker_thread_t *) arg; + vlib_thread_main_t *tm = vlib_get_thread_main (); + + /* stats thread wants no signals. */ + { + sigset_t s; + sigfillset (&s); + pthread_sigmask (SIG_SETMASK, &s, 0); + } + + if (vec_len (tm->thread_prefix)) + vlib_set_thread_name ((char *) + format (0, "%v_stats%c", tm->thread_prefix, '\0')); + + clib_mem_set_heap (w->thread_mheap); + + while (1) + { + /* 10 second poll interval */ + ip46_fib_stats_delay (sm, 10 /* secs */ , 0 /* nsec */ ); + + if (!(sm->enable_poller)) + continue; + do_simple_interface_counters (sm); + do_combined_interface_counters (sm); + do_ip4_fibs (sm); + do_ip6_fibs (sm); + } +} + +static void +vl_api_vnet_interface_counters_t_handler (vl_api_vnet_interface_counters_t * + mp) +{ + vpe_client_registration_t *reg; + stats_main_t *sm = &stats_main; + unix_shared_memory_queue_t *q, *q_prev = NULL; + vl_api_vnet_interface_counters_t *mp_copy = NULL; + u32 mp_size; + +#if STATS_DEBUG > 0 + char *counter_name; + u32 count, sw_if_index; + int i; +#endif + + mp_size = sizeof (*mp) + (ntohl (mp->count) * + (mp->is_combined ? sizeof (vlib_counter_t) : + sizeof (u64))); + + /* *INDENT-OFF* */ + pool_foreach(reg, sm->stats_registrations, + ({ + q = vl_api_client_index_to_input_queue (reg->client_index); + if (q) + { + if (q_prev && (q_prev->cursize < q_prev->maxsize)) + { + mp_copy = vl_msg_api_alloc_as_if_client(mp_size); + clib_memcpy(mp_copy, mp, mp_size); + vl_msg_api_send_shmem (q_prev, (u8 *)&mp); + mp = mp_copy; + } + q_prev = q; + } + })); + /* *INDENT-ON* */ + +#if STATS_DEBUG > 0 + count = ntohl (mp->count); + sw_if_index = ntohl (mp->first_sw_if_index); + if (mp->is_combined == 0) + { + u64 *vp, v; + vp = (u64 *) mp->data; + + switch (mp->vnet_counter_type) + { + case VNET_INTERFACE_COUNTER_DROP: + counter_name = "drop"; + break; + case VNET_INTERFACE_COUNTER_PUNT: + counter_name = "punt"; + break; + case VNET_INTERFACE_COUNTER_IP4: + counter_name = "ip4"; + break; + case VNET_INTERFACE_COUNTER_IP6: + counter_name = "ip6"; + break; + case VNET_INTERFACE_COUNTER_RX_NO_BUF: + counter_name = "rx-no-buff"; + break; + case VNET_INTERFACE_COUNTER_RX_MISS: + , counter_name = "rx-miss"; + break; + case VNET_INTERFACE_COUNTER_RX_ERROR: + , counter_name = "rx-error (fifo-full)"; + break; + case VNET_INTERFACE_COUNTER_TX_ERROR: + , counter_name = "tx-error (fifo-full)"; + break; + default: + counter_name = "bogus"; + break; + } + for (i = 0; i < count; i++) + { + v = clib_mem_unaligned (vp, u64); + v = clib_net_to_host_u64 (v); + vp++; + fformat (stdout, "%U.%s %lld\n", format_vnet_sw_if_index_name, + sm->vnet_main, sw_if_index, counter_name, v); + sw_if_index++; + } + } + else + { + vlib_counter_t *vp; + u64 packets, bytes; + vp = (vlib_counter_t *) mp->data; + + switch (mp->vnet_counter_type) + { + case VNET_INTERFACE_COUNTER_RX: + counter_name = "rx"; + break; + case VNET_INTERFACE_COUNTER_TX: + counter_name = "tx"; + break; + default: + counter_name = "bogus"; + break; + } + for (i = 0; i < count; i++) + { + packets = clib_mem_unaligned (&vp->packets, u64); + packets = clib_net_to_host_u64 (packets); + bytes = clib_mem_unaligned (&vp->bytes, u64); + bytes = clib_net_to_host_u64 (bytes); + vp++; + fformat (stdout, "%U.%s.packets %lld\n", + format_vnet_sw_if_index_name, + sm->vnet_main, sw_if_index, counter_name, packets); + fformat (stdout, "%U.%s.bytes %lld\n", + format_vnet_sw_if_index_name, + sm->vnet_main, sw_if_index, counter_name, bytes); + sw_if_index++; + } + } +#endif + if (q_prev && (q_prev->cursize < q_prev->maxsize)) + { + vl_msg_api_send_shmem (q_prev, (u8 *) & mp); + } + else + { + vl_msg_api_free (mp); + } +} + +static void +vl_api_vnet_ip4_fib_counters_t_handler (vl_api_vnet_ip4_fib_counters_t * mp) +{ + vpe_client_registration_t *reg; + stats_main_t *sm = &stats_main; + unix_shared_memory_queue_t *q, *q_prev = NULL; + vl_api_vnet_ip4_fib_counters_t *mp_copy = NULL; + u32 mp_size; + + mp_size = sizeof (*mp_copy) + + ntohl (mp->count) * sizeof (vl_api_ip4_fib_counter_t); + + /* *INDENT-OFF* */ + pool_foreach(reg, sm->stats_registrations, + ({ + q = vl_api_client_index_to_input_queue (reg->client_index); + if (q) + { + if (q_prev && (q_prev->cursize < q_prev->maxsize)) + { + mp_copy = vl_msg_api_alloc_as_if_client(mp_size); + clib_memcpy(mp_copy, mp, mp_size); + vl_msg_api_send_shmem (q_prev, (u8 *)&mp); + mp = mp_copy; + } + q_prev = q; + } + })); + /* *INDENT-ON* */ + if (q_prev && (q_prev->cursize < q_prev->maxsize)) + { + vl_msg_api_send_shmem (q_prev, (u8 *) & mp); + } + else + { + vl_msg_api_free (mp); + } +} + +static void +vl_api_vnet_ip6_fib_counters_t_handler (vl_api_vnet_ip6_fib_counters_t * mp) +{ + vpe_client_registration_t *reg; + stats_main_t *sm = &stats_main; + unix_shared_memory_queue_t *q, *q_prev = NULL; + vl_api_vnet_ip6_fib_counters_t *mp_copy = NULL; + u32 mp_size; + + mp_size = sizeof (*mp_copy) + + ntohl (mp->count) * sizeof (vl_api_ip6_fib_counter_t); + + /* *INDENT-OFF* */ + pool_foreach(reg, sm->stats_registrations, + ({ + q = vl_api_client_index_to_input_queue (reg->client_index); + if (q) + { + if (q_prev && (q_prev->cursize < q_prev->maxsize)) + { + mp_copy = vl_msg_api_alloc_as_if_client(mp_size); + clib_memcpy(mp_copy, mp, mp_size); + vl_msg_api_send_shmem (q_prev, (u8 *)&mp); + mp = mp_copy; + } + q_prev = q; + } + })); + /* *INDENT-ON* */ + if (q_prev && (q_prev->cursize < q_prev->maxsize)) + { + vl_msg_api_send_shmem (q_prev, (u8 *) & mp); + } + else + { + vl_msg_api_free (mp); + } +} + +static void +vl_api_want_stats_reply_t_handler (vl_api_want_stats_reply_t * mp) +{ + clib_warning ("BUG"); +} + +static void +vl_api_want_stats_t_handler (vl_api_want_stats_t * mp) +{ + stats_main_t *sm = &stats_main; + vpe_client_registration_t *rp; + vl_api_want_stats_reply_t *rmp; + uword *p; + i32 retval = 0; + unix_shared_memory_queue_t *q; + + p = hash_get (sm->stats_registration_hash, mp->client_index); + if (p) + { + if (mp->enable_disable) + { + clib_warning ("pid %d: already enabled...", mp->pid); + retval = -2; + goto reply; + } + else + { + rp = pool_elt_at_index (sm->stats_registrations, p[0]); + pool_put (sm->stats_registrations, rp); + hash_unset (sm->stats_registration_hash, mp->client_index); + goto reply; + } + } + if (mp->enable_disable == 0) + { + clib_warning ("pid %d: already disabled...", mp->pid); + retval = -3; + goto reply; + } + pool_get (sm->stats_registrations, rp); + rp->client_index = mp->client_index; + rp->client_pid = mp->pid; + hash_set (sm->stats_registration_hash, rp->client_index, + rp - sm->stats_registrations); + +reply: + if (pool_elts (sm->stats_registrations)) + sm->enable_poller = 1; + else + sm->enable_poller = 0; + + q = vl_api_client_index_to_input_queue (mp->client_index); + + if (!q) + return; + + rmp = vl_msg_api_alloc (sizeof (*rmp)); + rmp->_vl_msg_id = ntohs (VL_API_WANT_STATS_REPLY); + rmp->context = mp->context; + rmp->retval = retval; + + vl_msg_api_send_shmem (q, (u8 *) & rmp); +} + +int +stats_memclnt_delete_callback (u32 client_index) +{ + vpe_client_registration_t *rp; + stats_main_t *sm = &stats_main; + uword *p; + + p = hash_get (sm->stats_registration_hash, client_index); + if (p) + { + rp = pool_elt_at_index (sm->stats_registrations, p[0]); + pool_put (sm->stats_registrations, rp); + hash_unset (sm->stats_registration_hash, client_index); + } + + return 0; +} + +#define vl_api_vnet_ip4_fib_counters_t_endian vl_noop_handler +#define vl_api_vnet_ip4_fib_counters_t_print vl_noop_handler +#define vl_api_vnet_ip6_fib_counters_t_endian vl_noop_handler +#define vl_api_vnet_ip6_fib_counters_t_print vl_noop_handler + +static clib_error_t * +stats_init (vlib_main_t * vm) +{ + stats_main_t *sm = &stats_main; + api_main_t *am = &api_main; + void *vlib_worker_thread_bootstrap_fn (void *arg); + + sm->vlib_main = vm; + sm->vnet_main = vnet_get_main (); + sm->interface_main = &vnet_get_main ()->interface_main; + sm->api_main = am; + sm->stats_poll_interval_in_seconds = 10; + sm->data_structure_lock = + clib_mem_alloc_aligned (sizeof (data_structure_lock_t), + CLIB_CACHE_LINE_BYTES); + memset (sm->data_structure_lock, 0, sizeof (*sm->data_structure_lock)); + +#define _(N,n) \ + vl_msg_api_set_handlers(VL_API_##N, #n, \ + vl_api_##n##_t_handler, \ + vl_noop_handler, \ + vl_api_##n##_t_endian, \ + vl_api_##n##_t_print, \ + sizeof(vl_api_##n##_t), 0 /* do NOT trace! */); + foreach_stats_msg; +#undef _ + + /* tell the msg infra not to free these messages... */ + am->message_bounce[VL_API_VNET_INTERFACE_COUNTERS] = 1; + am->message_bounce[VL_API_VNET_IP4_FIB_COUNTERS] = 1; + am->message_bounce[VL_API_VNET_IP6_FIB_COUNTERS] = 1; + + return 0; +} + +VLIB_INIT_FUNCTION (stats_init); + +/* *INDENT-OFF* */ +VLIB_REGISTER_THREAD (stats_thread_reg, static) = { + .name = "stats", + .function = stats_thread_fn, + .fixed_count = 1, + .count = 1, + .no_data_structure_clone = 1, + .use_pthreads = 1, +}; +/* *INDENT-ON* */ + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ -- cgit 1.2.3-korg From 044183faacc5eb8a055658f9deebefb56b254adc Mon Sep 17 00:00:00 2001 From: Neale Ranns Date: Tue, 24 Jan 2017 01:34:25 -0800 Subject: [re]Enable per-Adjacency/neighbour counters Change-Id: I953b3888bbc6d8a5f53f684a5edc8742b382f323 Signed-off-by: Neale Ranns --- src/scripts/vnet/ip6 | 18 +- src/vat/api_format.c | 124 ++++++++++++++ src/vat/vat.h | 18 ++ src/vlib/counter.h | 14 ++ src/vnet/adj/adj.c | 4 + src/vnet/adj/adj_l2.c | 10 +- src/vnet/ip/ip4_forward.c | 42 +++-- src/vnet/ip/ip6_forward.c | 24 +-- src/vnet/mpls/mpls_output.c | 37 ++-- src/vpp/api/vpe.api | 41 +++++ src/vpp/stats/stats.c | 405 +++++++++++++++++++++++++++++++++++++++++++- 11 files changed, 675 insertions(+), 62 deletions(-) (limited to 'src/vpp/stats/stats.c') diff --git a/src/scripts/vnet/ip6 b/src/scripts/vnet/ip6 index 4f9f3ee5..adb27225 100644 --- a/src/scripts/vnet/ip6 +++ b/src/scripts/vnet/ip6 @@ -6,10 +6,24 @@ packet-generator new { no-recycle data { IP6: 1.2.3 -> 4.5.6 - ICMP: ::1 -> ::2 + ICMP: 3002::2 -> 3001::2 ICMP echo_request incrementing 100 } } -tr add pg-input 100 + +loop create +loop create +set int state loop0 up +set int state loop1 up + +set int ip address loop0 2001:1::1/64 +set int ip address loop1 2001:2::1/64 + +set ip6 neighbor loop0 2001:1::2 00:00:DD:EE:AA:DD +set ip6 neighbor loop1 2001:2::2 00:00:DD:EE:AA:EE + +ip route add 3001::/64 via 2001:2::2 loop1 + +trace add pg-input 100 diff --git a/src/vat/api_format.c b/src/vat/api_format.c index 653cf79f..839bcdaa 100644 --- a/src/vat/api_format.c +++ b/src/vat/api_format.c @@ -2042,6 +2042,42 @@ static void vl_api_vnet_ip4_fib_counters_t_handler_json } } +static void vl_api_vnet_ip4_nbr_counters_t_handler + (vl_api_vnet_ip4_nbr_counters_t * mp) +{ + /* not supported */ +} + +static void vl_api_vnet_ip4_nbr_counters_t_handler_json + (vl_api_vnet_ip4_nbr_counters_t * mp) +{ + vat_main_t *vam = &vat_main; + vl_api_ip4_nbr_counter_t *v; + ip4_nbr_counter_t *counter; + u32 sw_if_index; + u32 count; + int i; + + sw_if_index = ntohl (mp->sw_if_index); + count = ntohl (mp->count); + vec_validate (vam->ip4_nbr_counters, sw_if_index); + + if (mp->begin) + vec_free (vam->ip4_nbr_counters[sw_if_index]); + + v = (vl_api_ip4_nbr_counter_t *) & mp->c; + for (i = 0; i < count; i++) + { + vec_validate (vam->ip4_nbr_counters[sw_if_index], i); + counter = &vam->ip4_nbr_counters[sw_if_index][i]; + counter->address.s_addr = v->address; + counter->packets = clib_net_to_host_u64 (v->packets); + counter->bytes = clib_net_to_host_u64 (v->bytes); + counter->linkt = v->link_type; + v++; + } +} + static void vl_api_vnet_ip6_fib_counters_t_handler (vl_api_vnet_ip6_fib_counters_t * mp) { @@ -2087,6 +2123,43 @@ static void vl_api_vnet_ip6_fib_counters_t_handler_json } } +static void vl_api_vnet_ip6_nbr_counters_t_handler + (vl_api_vnet_ip6_nbr_counters_t * mp) +{ + /* not supported */ +} + +static void vl_api_vnet_ip6_nbr_counters_t_handler_json + (vl_api_vnet_ip6_nbr_counters_t * mp) +{ + vat_main_t *vam = &vat_main; + vl_api_ip6_nbr_counter_t *v; + ip6_nbr_counter_t *counter; + struct in6_addr ip6; + u32 sw_if_index; + u32 count; + int i; + + sw_if_index = ntohl (mp->sw_if_index); + count = ntohl (mp->count); + vec_validate (vam->ip6_nbr_counters, sw_if_index); + + if (mp->begin) + vec_free (vam->ip6_nbr_counters[sw_if_index]); + + v = (vl_api_ip6_nbr_counter_t *) & mp->c; + for (i = 0; i < count; i++) + { + vec_validate (vam->ip6_nbr_counters[sw_if_index], i); + counter = &vam->ip6_nbr_counters[sw_if_index][i]; + clib_memcpy (&ip6, &v->address, sizeof (ip6)); + counter->address = ip6; + counter->packets = clib_net_to_host_u64 (v->packets); + counter->bytes = clib_net_to_host_u64 (v->bytes); + v++; + } +} + static void vl_api_get_first_msg_id_reply_t_handler (vl_api_get_first_msg_id_reply_t * mp) { @@ -3490,6 +3563,10 @@ static void vl_api_flow_classify_details_t_handler_json #define vl_api_vnet_ip4_fib_counters_t_print vl_noop_handler #define vl_api_vnet_ip6_fib_counters_t_endian vl_noop_handler #define vl_api_vnet_ip6_fib_counters_t_print vl_noop_handler +#define vl_api_vnet_ip4_nbr_counters_t_endian vl_noop_handler +#define vl_api_vnet_ip4_nbr_counters_t_print vl_noop_handler +#define vl_api_vnet_ip6_nbr_counters_t_endian vl_noop_handler +#define vl_api_vnet_ip6_nbr_counters_t_print vl_noop_handler #define vl_api_lisp_adjacencies_get_reply_t_endian vl_noop_handler #define vl_api_lisp_adjacencies_get_reply_t_print vl_noop_handler @@ -3799,6 +3876,8 @@ _(DHCP_COMPL_EVENT, dhcp_compl_event) \ _(VNET_INTERFACE_COUNTERS, vnet_interface_counters) \ _(VNET_IP4_FIB_COUNTERS, vnet_ip4_fib_counters) \ _(VNET_IP6_FIB_COUNTERS, vnet_ip6_fib_counters) \ +_(VNET_IP4_NBR_COUNTERS, vnet_ip4_nbr_counters) \ +_(VNET_IP6_NBR_COUNTERS, vnet_ip6_nbr_counters) \ _(MAP_ADD_DOMAIN_REPLY, map_add_domain_reply) \ _(MAP_DEL_DOMAIN_REPLY, map_del_domain_reply) \ _(MAP_ADD_DEL_RULE_REPLY, map_add_del_rule_reply) \ @@ -4131,6 +4210,8 @@ dump_stats_table (vat_main_t * vam) u64 packets; ip4_fib_counter_t *c4; ip6_fib_counter_t *c6; + ip4_nbr_counter_t *n4; + ip6_nbr_counter_t *n6; int i, j; if (!vam->json_output) @@ -4226,6 +4307,49 @@ dump_stats_table (vat_main_t * vam) } } + /* ip4 nbr counters */ + msg_array = vat_json_object_add (&node, "ip4_nbr_counters"); + vat_json_init_array (msg_array); + for (i = 0; i < vec_len (vam->ip4_nbr_counters); i++) + { + msg = vat_json_array_add (msg_array); + vat_json_init_object (msg); + vat_json_object_add_uint (msg, "sw_if_index", i); + counter_array = vat_json_object_add (msg, "c"); + vat_json_init_array (counter_array); + for (j = 0; j < vec_len (vam->ip4_nbr_counters[i]); j++) + { + counter = vat_json_array_add (counter_array); + vat_json_init_object (counter); + n4 = &vam->ip4_nbr_counters[i][j]; + vat_json_object_add_ip4 (counter, "address", n4->address); + vat_json_object_add_uint (counter, "link-type", n4->linkt); + vat_json_object_add_uint (counter, "packets", n4->packets); + vat_json_object_add_uint (counter, "bytes", n4->bytes); + } + } + + /* ip6 nbr counters */ + msg_array = vat_json_object_add (&node, "ip6_nbr_counters"); + vat_json_init_array (msg_array); + for (i = 0; i < vec_len (vam->ip6_nbr_counters); i++) + { + msg = vat_json_array_add (msg_array); + vat_json_init_object (msg); + vat_json_object_add_uint (msg, "sw_if_index", i); + counter_array = vat_json_object_add (msg, "c"); + vat_json_init_array (counter_array); + for (j = 0; j < vec_len (vam->ip6_nbr_counters[i]); j++) + { + counter = vat_json_array_add (counter_array); + vat_json_init_object (counter); + n6 = &vam->ip6_nbr_counters[i][j]; + vat_json_object_add_ip6 (counter, "address", n6->address); + vat_json_object_add_uint (counter, "packets", n6->packets); + vat_json_object_add_uint (counter, "bytes", n6->bytes); + } + } + vat_json_print (vam->ofp, &node); vat_json_free (&node); diff --git a/src/vat/vat.h b/src/vat/vat.h index 3d7d96ae..831bdf50 100644 --- a/src/vat/vat.h +++ b/src/vat/vat.h @@ -95,6 +95,22 @@ typedef struct u64 bytes; } ip6_fib_counter_t; +typedef struct +{ + struct in_addr address; + vnet_link_t linkt; + u64 packets; + u64 bytes; +} ip4_nbr_counter_t; + +typedef struct +{ + struct in6_addr address; + vnet_link_t linkt; + u64 packets; + u64 bytes; +} ip6_nbr_counter_t; + typedef struct { /* vpe input queue */ @@ -185,6 +201,8 @@ typedef struct u32 *ip4_fib_counters_vrf_id_by_index; ip6_fib_counter_t **ip6_fib_counters; u32 *ip6_fib_counters_vrf_id_by_index; + ip4_nbr_counter_t **ip4_nbr_counters; + ip6_nbr_counter_t **ip6_nbr_counters; /* Convenience */ vlib_main_t *vlib_main; diff --git a/src/vlib/counter.h b/src/vlib/counter.h index a7903206..abfa89ee 100644 --- a/src/vlib/counter.h +++ b/src/vlib/counter.h @@ -273,6 +273,20 @@ vlib_increment_combined_counter (vlib_combined_counter_main_t * cm, } } +#define vlib_prefetch_combined_counter(_cm, _cpu_index, _index) \ +{ \ + vlib_mini_counter_t *_cpu_minis; \ + \ + /* \ + * This CPU's mini index is assumed to already be in cache \ + */ \ + _cpu_minis = (_cm)->minis[(_cpu_index)]; \ + CLIB_PREFETCH(_cpu_minis + (_index), \ + sizeof(*_cpu_minis), \ + STORE); \ +} + + /** Get the value of a combined counter, never called in the speed path Scrapes the entire set of mini counters. Innacurate unless worker threads which might increment the counter are diff --git a/src/vnet/adj/adj.c b/src/vnet/adj/adj.c index e740c4cb..d0be0f0e 100644 --- a/src/vnet/adj/adj.c +++ b/src/vnet/adj/adj.c @@ -122,6 +122,10 @@ format_ip_adjacency (u8 * s, va_list * args) if (fiaf & FORMAT_IP_ADJACENCY_DETAIL) { + vlib_counter_t counts; + + vlib_get_combined_counter(&adjacency_counters, adj_index, &counts); + s = format (s, "\n counts:[%Ld:%Ld]", counts.packets, counts.bytes); s = format (s, "\n locks:%d", adj->ia_node.fn_locks); s = format (s, " node:[%d]:%U", adj->rewrite_header.node_index, diff --git a/src/vnet/adj/adj_l2.c b/src/vnet/adj/adj_l2.c index 4d2dd708..5a083643 100644 --- a/src/vnet/adj/adj_l2.c +++ b/src/vnet/adj/adj_l2.c @@ -94,11 +94,11 @@ adj_l2_rewrite_inline (vlib_main_t * vm, rw_len0 = adj0[0].rewrite_header.data_bytes; vnet_buffer(p0)->ip.save_rewrite_length = rw_len0; - vlib_increment_combined_counter - (&adjacency_counters, - cpu_index, adj_index0, - /* packet increment */ 0, - /* byte increment */ rw_len0-sizeof(ethernet_header_t)); + vlib_increment_combined_counter(&adjacency_counters, + cpu_index, + adj_index0, + /* packet increment */ 0, + /* byte increment */ rw_len0); /* Check MTU of outgoing interface. */ if (PREDICT_TRUE((vlib_buffer_length_in_chain (vm, p0) <= diff --git a/src/vnet/ip/ip4_forward.c b/src/vnet/ip/ip4_forward.c index 6e91b9e9..87b345bd 100644 --- a/src/vnet/ip/ip4_forward.c +++ b/src/vnet/ip/ip4_forward.c @@ -2402,19 +2402,12 @@ ip4_rewrite_inline (vlib_main_t * vm, error1); /* - * We've already accounted for an ethernet_header_t elsewhere + * pre-fetch the per-adjacency counters */ - if (PREDICT_FALSE (rw_len0 > sizeof (ethernet_header_t))) - vlib_increment_combined_counter - (&adjacency_counters, cpu_index, adj_index0, - /* packet increment */ 0, - /* byte increment */ rw_len0 - sizeof (ethernet_header_t)); - - if (PREDICT_FALSE (rw_len1 > sizeof (ethernet_header_t))) - vlib_increment_combined_counter - (&adjacency_counters, cpu_index, adj_index1, - /* packet increment */ 0, - /* byte increment */ rw_len1 - sizeof (ethernet_header_t)); + vlib_prefetch_combined_counter (&adjacency_counters, + cpu_index, adj_index0); + vlib_prefetch_combined_counter (&adjacency_counters, + cpu_index, adj_index1); /* Don't adjust the buffer for ttl issue; icmp-error node wants * to see the IP headerr */ @@ -2446,6 +2439,19 @@ ip4_rewrite_inline (vlib_main_t * vm, vnet_rewrite_two_headers (adj0[0], adj1[0], ip0, ip1, sizeof (ethernet_header_t)); + /* + * Bump the per-adjacency counters + */ + vlib_increment_combined_counter + (&adjacency_counters, + cpu_index, + adj_index0, 1, vlib_buffer_length_in_chain (vm, p0) + rw_len0); + + vlib_increment_combined_counter + (&adjacency_counters, + cpu_index, + adj_index1, 1, vlib_buffer_length_in_chain (vm, p1) + rw_len1); + if (is_midchain) { adj0->sub_type.midchain.fixup_func (vm, adj0, p0); @@ -2519,6 +2525,9 @@ ip4_rewrite_inline (vlib_main_t * vm, p0->flags &= ~VNET_BUFFER_LOCALLY_ORIGINATED; } + vlib_prefetch_combined_counter (&adjacency_counters, + cpu_index, adj_index0); + /* Guess we are only writing on simple Ethernet header. */ vnet_rewrite_one_header (adj0[0], ip0, sizeof (ethernet_header_t)); @@ -2526,11 +2535,10 @@ ip4_rewrite_inline (vlib_main_t * vm, rw_len0 = adj0[0].rewrite_header.data_bytes; vnet_buffer (p0)->ip.save_rewrite_length = rw_len0; - if (PREDICT_FALSE (rw_len0 > sizeof (ethernet_header_t))) - vlib_increment_combined_counter - (&adjacency_counters, cpu_index, adj_index0, - /* packet increment */ 0, - /* byte increment */ rw_len0 - sizeof (ethernet_header_t)); + vlib_increment_combined_counter + (&adjacency_counters, + cpu_index, + adj_index0, 1, vlib_buffer_length_in_chain (vm, p0) + rw_len0); /* Check MTU of outgoing interface. */ error0 = (vlib_buffer_length_in_chain (vm, p0) diff --git a/src/vnet/ip/ip6_forward.c b/src/vnet/ip/ip6_forward.c index 197a9b79..232f7283 100644 --- a/src/vnet/ip/ip6_forward.c +++ b/src/vnet/ip/ip6_forward.c @@ -2108,14 +2108,14 @@ ip6_rewrite_inline (vlib_main_t * vm, vnet_buffer (p0)->ip.save_rewrite_length = rw_len0; vnet_buffer (p1)->ip.save_rewrite_length = rw_len1; - vlib_increment_combined_counter (&adjacency_counters, - cpu_index, adj_index0, - /* packet increment */ 0, - /* byte increment */ rw_len0); - vlib_increment_combined_counter (&adjacency_counters, - cpu_index, adj_index1, - /* packet increment */ 0, - /* byte increment */ rw_len1); + vlib_increment_combined_counter + (&adjacency_counters, + cpu_index, + adj_index0, 1, vlib_buffer_length_in_chain (vm, p0) + rw_len0); + vlib_increment_combined_counter + (&adjacency_counters, + cpu_index, adj_index1, + 1, vlib_buffer_length_in_chain (vm, p1) + rw_len1); /* Check MTU of outgoing interface. */ error0 = @@ -2233,10 +2233,10 @@ ip6_rewrite_inline (vlib_main_t * vm, rw_len0 = adj0[0].rewrite_header.data_bytes; vnet_buffer (p0)->ip.save_rewrite_length = rw_len0; - vlib_increment_combined_counter (&adjacency_counters, - cpu_index, adj_index0, - /* packet increment */ 0, - /* byte increment */ rw_len0); + vlib_increment_combined_counter + (&adjacency_counters, + cpu_index, + adj_index0, 1, vlib_buffer_length_in_chain (vm, p0) + rw_len0); /* Check MTU of outgoing interface. */ error0 = diff --git a/src/vnet/mpls/mpls_output.c b/src/vnet/mpls/mpls_output.c index 8292a0cb..c06cf917 100644 --- a/src/vnet/mpls/mpls_output.c +++ b/src/vnet/mpls/mpls_output.c @@ -128,18 +128,19 @@ mpls_output_inline (vlib_main_t * vm, rw_len0 = adj0[0].rewrite_header.data_bytes; rw_len1 = adj1[0].rewrite_header.data_bytes; - if (PREDICT_FALSE (rw_len0 > sizeof(ethernet_header_t))) - vlib_increment_combined_counter - (&adjacency_counters, - cpu_index, adj_index0, - /* packet increment */ 0, - /* byte increment */ rw_len0-sizeof(ethernet_header_t)); - if (PREDICT_FALSE (rw_len1 > sizeof(ethernet_header_t))) - vlib_increment_combined_counter - (&adjacency_counters, - cpu_index, adj_index1, - /* packet increment */ 0, - /* byte increment */ rw_len1-sizeof(ethernet_header_t)); + /* Bump the adj counters for packet and bytes */ + vlib_increment_combined_counter + (&adjacency_counters, + cpu_index, + adj_index0, + 1, + vlib_buffer_length_in_chain (vm, p0) + rw_len0); + vlib_increment_combined_counter + (&adjacency_counters, + cpu_index, + adj_index1, + 1, + vlib_buffer_length_in_chain (vm, p1) + rw_len1); /* Check MTU of outgoing interface. */ if (PREDICT_TRUE(vlib_buffer_length_in_chain (vm, p0) <= @@ -234,12 +235,12 @@ mpls_output_inline (vlib_main_t * vm, /* Update packet buffer attributes/set output interface. */ rw_len0 = adj0[0].rewrite_header.data_bytes; - if (PREDICT_FALSE (rw_len0 > sizeof(ethernet_header_t))) - vlib_increment_combined_counter - (&adjacency_counters, - cpu_index, adj_index0, - /* packet increment */ 0, - /* byte increment */ rw_len0-sizeof(ethernet_header_t)); + vlib_increment_combined_counter + (&adjacency_counters, + cpu_index, + adj_index0, + 1, + vlib_buffer_length_in_chain (vm, p0) + rw_len0); /* Check MTU of outgoing interface. */ if (PREDICT_TRUE(vlib_buffer_length_in_chain (vm, p0) <= diff --git a/src/vpp/api/vpe.api b/src/vpp/api/vpe.api index f32ba670..a00033c5 100644 --- a/src/vpp/api/vpe.api +++ b/src/vpp/api/vpe.api @@ -230,6 +230,31 @@ manual_print manual_endian define vnet_ip4_fib_counters vl_api_ip4_fib_counter_t c[count]; }; +typeonly manual_print manual_endian define ip4_nbr_counter +{ + u32 address; + u8 link_type; + u64 packets; + u64 bytes; +}; + +/** + * @brief Per-neighbour (i.e. per-adjacency) coutners + * @param count The size of the array of counters + * @param sw_if_index The interface the adjacency is on + * @param begin Flag to indicate this is the first set of stats for this + * interface. If this flag is not set the it is a continuation of + * stats for this interface + * @param c counters + */ +manual_print manual_endian define vnet_ip4_nbr_counters +{ + u32 count; + u32 sw_if_index; + u8 begin; + vl_api_ip4_nbr_counter_t c[count]; +}; + typeonly manual_print manual_endian define ip6_fib_counter { u64 address[2]; @@ -245,6 +270,22 @@ manual_print manual_endian define vnet_ip6_fib_counters vl_api_ip6_fib_counter_t c[count]; }; +typeonly manual_print manual_endian define ip6_nbr_counter +{ + u64 address[2]; + u8 link_type; + u64 packets; + u64 bytes; +}; + +manual_print manual_endian define vnet_ip6_nbr_counters +{ + u32 count; + u32 sw_if_index; + u8 begin; + vl_api_ip6_nbr_counter_t c[count]; +}; + /** \brief Request for a single block of summary stats @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request diff --git a/src/vpp/stats/stats.c b/src/vpp/stats/stats.c index 391e02f6..5e9b0d69 100644 --- a/src/vpp/stats/stats.c +++ b/src/vpp/stats/stats.c @@ -49,7 +49,9 @@ _(WANT_STATS, want_stats) \ _(WANT_STATS_REPLY, want_stats_reply) \ _(VNET_INTERFACE_COUNTERS, vnet_interface_counters) \ _(VNET_IP4_FIB_COUNTERS, vnet_ip4_fib_counters) \ -_(VNET_IP6_FIB_COUNTERS, vnet_ip6_fib_counters) +_(VNET_IP6_FIB_COUNTERS, vnet_ip6_fib_counters) \ +_(VNET_IP4_NBR_COUNTERS, vnet_ip4_nbr_counters) \ +_(VNET_IP6_NBR_COUNTERS, vnet_ip6_nbr_counters) /* These constants ensure msg sizes <= 1024, aka ring allocation */ #define SIMPLE_COUNTER_BATCH_SIZE 126 @@ -258,6 +260,313 @@ ip46_fib_stats_delay (stats_main_t * sm, u32 sec, u32 nsec) } } +/** + * @brief The context passed when collecting adjacency counters + */ +typedef struct ip4_nbr_stats_ctx_t_ +{ + /** + * The SW IF index all these adjs belong to + */ + u32 sw_if_index; + + /** + * A vector of ip4 nbr counters + */ + vl_api_ip4_nbr_counter_t *counters; +} ip4_nbr_stats_ctx_t; + +static adj_walk_rc_t +ip4_nbr_stats_cb (adj_index_t ai, void *arg) +{ + vl_api_ip4_nbr_counter_t *vl_counter; + vlib_counter_t adj_counter; + ip4_nbr_stats_ctx_t *ctx; + ip_adjacency_t *adj; + + ctx = arg; + vlib_get_combined_counter (&adjacency_counters, ai, &adj_counter); + + if (0 != adj_counter.packets) + { + vec_add2 (ctx->counters, vl_counter, 1); + adj = adj_get (ai); + + vl_counter->packets = clib_host_to_net_u64 (adj_counter.packets); + vl_counter->bytes = clib_host_to_net_u64 (adj_counter.bytes); + vl_counter->address = adj->sub_type.nbr.next_hop.ip4.as_u32; + vl_counter->link_type = adj->ia_link; + } + return (ADJ_WALK_RC_CONTINUE); +} + +#define MIN(x,y) (((x)<(y))?(x):(y)) + +static void +ip4_nbr_ship (stats_main_t * sm, ip4_nbr_stats_ctx_t * ctx) +{ + api_main_t *am = sm->api_main; + vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr; + unix_shared_memory_queue_t *q = shmem_hdr->vl_input_queue; + vl_api_vnet_ip4_nbr_counters_t *mp = 0; + int first = 0; + + /* + * If the walk context has counters, which may be left over from the last + * suspend, then we continue from there. + */ + while (0 != vec_len (ctx->counters)) + { + u32 n_items = MIN (vec_len (ctx->counters), + IP4_FIB_COUNTER_BATCH_SIZE); + u8 pause = 0; + + dslock (sm, 0 /* release hint */ , 1 /* tag */ ); + + mp = vl_msg_api_alloc_as_if_client (sizeof (*mp) + + (n_items * + sizeof + (vl_api_ip4_nbr_counter_t))); + mp->_vl_msg_id = ntohs (VL_API_VNET_IP4_NBR_COUNTERS); + mp->count = ntohl (n_items); + mp->sw_if_index = ntohl (ctx->sw_if_index); + mp->begin = first; + first = 0; + + /* + * copy the counters from the back of the context, then we can easily + * 'erase' them by resetting the vector length. + * The order we push the stats to the caller is not important. + */ + clib_memcpy (mp->c, + &ctx->counters[vec_len (ctx->counters) - n_items], + n_items * sizeof (*ctx->counters)); + + _vec_len (ctx->counters) = vec_len (ctx->counters) - n_items; + + /* + * send to the shm q + */ + unix_shared_memory_queue_lock (q); + pause = unix_shared_memory_queue_is_full (q); + + vl_msg_api_send_shmem_nolock (q, (u8 *) & mp); + unix_shared_memory_queue_unlock (q); + dsunlock (sm); + + if (pause) + ip46_fib_stats_delay (sm, 0 /* sec */ , + STATS_RELEASE_DELAY_NS); + } +} + +static void +do_ip4_nbrs (stats_main_t * sm) +{ + vnet_main_t *vnm = vnet_get_main (); + vnet_interface_main_t *im = &vnm->interface_main; + vnet_sw_interface_t *si; + + ip4_nbr_stats_ctx_t ctx = { + .sw_if_index = 0, + .counters = NULL, + }; + + /* *INDENT-OFF* */ + pool_foreach (si, im->sw_interfaces, + ({ + /* + * update the interface we are now concerned with + */ + ctx.sw_if_index = si->sw_if_index; + + /* + * we are about to walk another interface, so we shouldn't have any pending + * stats to export. + */ + ASSERT(ctx.counters == NULL); + + /* + * visit each neighbour adjacency on the interface and collect + * its current stats. + * Because we hold the lock the walk is synchronous, so safe to routing + * updates. It's limited in work by the number of adjacenies on an + * interface, which is typically not huge. + */ + dslock (sm, 0 /* release hint */ , 1 /* tag */ ); + adj_nbr_walk (si->sw_if_index, + FIB_PROTOCOL_IP4, + ip4_nbr_stats_cb, + &ctx); + dsunlock (sm); + + /* + * if this interface has some adjacencies with counters then ship them, + * else continue to the next interface. + */ + if (NULL != ctx.counters) + { + ip4_nbr_ship(sm, &ctx); + } + })); + /* *INDENT-OFF* */ +} + +/** + * @brief The context passed when collecting adjacency counters + */ +typedef struct ip6_nbr_stats_ctx_t_ +{ + /** + * The SW IF index all these adjs belong to + */ + u32 sw_if_index; + + /** + * A vector of ip6 nbr counters + */ + vl_api_ip6_nbr_counter_t *counters; +} ip6_nbr_stats_ctx_t; + +static adj_walk_rc_t +ip6_nbr_stats_cb (adj_index_t ai, + void *arg) +{ + vl_api_ip6_nbr_counter_t *vl_counter; + vlib_counter_t adj_counter; + ip6_nbr_stats_ctx_t *ctx; + ip_adjacency_t *adj; + + ctx = arg; + vlib_get_combined_counter(&adjacency_counters, ai, &adj_counter); + + if (0 != adj_counter.packets) + { + vec_add2(ctx->counters, vl_counter, 1); + adj = adj_get(ai); + + vl_counter->packets = clib_host_to_net_u64(adj_counter.packets); + vl_counter->bytes = clib_host_to_net_u64(adj_counter.bytes); + vl_counter->address[0] = adj->sub_type.nbr.next_hop.ip6.as_u64[0]; + vl_counter->address[1] = adj->sub_type.nbr.next_hop.ip6.as_u64[1]; + vl_counter->link_type = adj->ia_link; + } + return (ADJ_WALK_RC_CONTINUE); +} + +#define MIN(x,y) (((x)<(y))?(x):(y)) + +static void +ip6_nbr_ship (stats_main_t * sm, + ip6_nbr_stats_ctx_t *ctx) +{ + api_main_t *am = sm->api_main; + vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr; + unix_shared_memory_queue_t *q = shmem_hdr->vl_input_queue; + vl_api_vnet_ip6_nbr_counters_t *mp = 0; + int first = 0; + + /* + * If the walk context has counters, which may be left over from the last + * suspend, then we continue from there. + */ + while (0 != vec_len(ctx->counters)) + { + u32 n_items = MIN (vec_len (ctx->counters), + IP6_FIB_COUNTER_BATCH_SIZE); + u8 pause = 0; + + dslock (sm, 0 /* release hint */ , 1 /* tag */ ); + + mp = vl_msg_api_alloc_as_if_client (sizeof (*mp) + + (n_items * + sizeof + (vl_api_ip6_nbr_counter_t))); + mp->_vl_msg_id = ntohs (VL_API_VNET_IP6_NBR_COUNTERS); + mp->count = ntohl (n_items); + mp->sw_if_index = ntohl (ctx->sw_if_index); + mp->begin = first; + first = 0; + + /* + * copy the counters from the back of the context, then we can easily + * 'erase' them by resetting the vector length. + * The order we push the stats to the caller is not important. + */ + clib_memcpy (mp->c, + &ctx->counters[vec_len (ctx->counters) - n_items], + n_items * sizeof (*ctx->counters)); + + _vec_len (ctx->counters) = vec_len (ctx->counters) - n_items; + + /* + * send to the shm q + */ + unix_shared_memory_queue_lock (q); + pause = unix_shared_memory_queue_is_full (q); + + vl_msg_api_send_shmem_nolock (q, (u8 *) & mp); + unix_shared_memory_queue_unlock (q); + dsunlock (sm); + + if (pause) + ip46_fib_stats_delay (sm, 0 /* sec */ , + STATS_RELEASE_DELAY_NS); + } +} + +static void +do_ip6_nbrs (stats_main_t * sm) +{ + vnet_main_t *vnm = vnet_get_main (); + vnet_interface_main_t *im = &vnm->interface_main; + vnet_sw_interface_t *si; + + ip6_nbr_stats_ctx_t ctx = { + .sw_if_index = 0, + .counters = NULL, + }; + + /* *INDENT-OFF* */ + pool_foreach (si, im->sw_interfaces, + ({ + /* + * update the interface we are now concerned with + */ + ctx.sw_if_index = si->sw_if_index; + + /* + * we are about to walk another interface, so we shouldn't have any pending + * stats to export. + */ + ASSERT(ctx.counters == NULL); + + /* + * visit each neighbour adjacency on the interface and collect + * its current stats. + * Because we hold the lock the walk is synchronous, so safe to routing + * updates. It's limited in work by the number of adjacenies on an + * interface, which is typically not huge. + */ + dslock (sm, 0 /* release hint */ , 1 /* tag */ ); + adj_nbr_walk (si->sw_if_index, + FIB_PROTOCOL_IP6, + ip6_nbr_stats_cb, + &ctx); + dsunlock (sm); + + /* + * if this interface has some adjacencies with counters then ship them, + * else continue to the next interface. + */ + if (NULL != ctx.counters) + { + ip6_nbr_ship(sm, &ctx); + } + })); + /* *INDENT-OFF* */ +} + static void do_ip4_fibs (stats_main_t * sm) { @@ -318,13 +627,7 @@ again: hash_foreach_pair (p, hash, ({ x.address.data_u32 = p->key; - if (lm->fib_result_n_words > 1) - { - x.index = vec_len (results); - vec_add (results, p->value, lm->fib_result_n_words); - } - else - x.index = p->value[0]; + x.index = p->value[0]; vec_add1 (routes, x); if (sm->data_structure_lock->release_hint) @@ -631,6 +934,8 @@ stats_thread_fn (void *arg) do_combined_interface_counters (sm); do_ip4_fibs (sm); do_ip6_fibs (sm); + do_ip4_nbrs (sm); + do_ip6_nbrs (sm); } } @@ -804,6 +1109,45 @@ vl_api_vnet_ip4_fib_counters_t_handler (vl_api_vnet_ip4_fib_counters_t * mp) } } +static void +vl_api_vnet_ip4_nbr_counters_t_handler (vl_api_vnet_ip4_nbr_counters_t * mp) +{ + vpe_client_registration_t *reg; + stats_main_t *sm = &stats_main; + unix_shared_memory_queue_t *q, *q_prev = NULL; + vl_api_vnet_ip4_nbr_counters_t *mp_copy = NULL; + u32 mp_size; + + mp_size = sizeof (*mp_copy) + + ntohl (mp->count) * sizeof (vl_api_ip4_nbr_counter_t); + + /* *INDENT-OFF* */ + pool_foreach(reg, sm->stats_registrations, + ({ + q = vl_api_client_index_to_input_queue (reg->client_index); + if (q) + { + if (q_prev && (q_prev->cursize < q_prev->maxsize)) + { + mp_copy = vl_msg_api_alloc_as_if_client(mp_size); + clib_memcpy(mp_copy, mp, mp_size); + vl_msg_api_send_shmem (q_prev, (u8 *)&mp); + mp = mp_copy; + } + q_prev = q; + } + })); + /* *INDENT-ON* */ + if (q_prev && (q_prev->cursize < q_prev->maxsize)) + { + vl_msg_api_send_shmem (q_prev, (u8 *) & mp); + } + else + { + vl_msg_api_free (mp); + } +} + static void vl_api_vnet_ip6_fib_counters_t_handler (vl_api_vnet_ip6_fib_counters_t * mp) { @@ -843,6 +1187,45 @@ vl_api_vnet_ip6_fib_counters_t_handler (vl_api_vnet_ip6_fib_counters_t * mp) } } +static void +vl_api_vnet_ip6_nbr_counters_t_handler (vl_api_vnet_ip6_nbr_counters_t * mp) +{ + vpe_client_registration_t *reg; + stats_main_t *sm = &stats_main; + unix_shared_memory_queue_t *q, *q_prev = NULL; + vl_api_vnet_ip6_nbr_counters_t *mp_copy = NULL; + u32 mp_size; + + mp_size = sizeof (*mp_copy) + + ntohl (mp->count) * sizeof (vl_api_ip6_nbr_counter_t); + + /* *INDENT-OFF* */ + pool_foreach(reg, sm->stats_registrations, + ({ + q = vl_api_client_index_to_input_queue (reg->client_index); + if (q) + { + if (q_prev && (q_prev->cursize < q_prev->maxsize)) + { + mp_copy = vl_msg_api_alloc_as_if_client(mp_size); + clib_memcpy(mp_copy, mp, mp_size); + vl_msg_api_send_shmem (q_prev, (u8 *)&mp); + mp = mp_copy; + } + q_prev = q; + } + })); + /* *INDENT-ON* */ + if (q_prev && (q_prev->cursize < q_prev->maxsize)) + { + vl_msg_api_send_shmem (q_prev, (u8 *) & mp); + } + else + { + vl_msg_api_free (mp); + } +} + static void vl_api_want_stats_reply_t_handler (vl_api_want_stats_reply_t * mp) { @@ -929,6 +1312,10 @@ stats_memclnt_delete_callback (u32 client_index) #define vl_api_vnet_ip4_fib_counters_t_print vl_noop_handler #define vl_api_vnet_ip6_fib_counters_t_endian vl_noop_handler #define vl_api_vnet_ip6_fib_counters_t_print vl_noop_handler +#define vl_api_vnet_ip4_nbr_counters_t_endian vl_noop_handler +#define vl_api_vnet_ip4_nbr_counters_t_print vl_noop_handler +#define vl_api_vnet_ip6_nbr_counters_t_endian vl_noop_handler +#define vl_api_vnet_ip6_nbr_counters_t_print vl_noop_handler static clib_error_t * stats_init (vlib_main_t * vm) @@ -961,6 +1348,8 @@ stats_init (vlib_main_t * vm) am->message_bounce[VL_API_VNET_INTERFACE_COUNTERS] = 1; am->message_bounce[VL_API_VNET_IP4_FIB_COUNTERS] = 1; am->message_bounce[VL_API_VNET_IP6_FIB_COUNTERS] = 1; + am->message_bounce[VL_API_VNET_IP4_NBR_COUNTERS] = 1; + am->message_bounce[VL_API_VNET_IP6_NBR_COUNTERS] = 1; return 0; } -- cgit 1.2.3-korg From a1a093d4e46e38503332a97ad216f80053a15f2b Mon Sep 17 00:00:00 2001 From: Dave Barach Date: Thu, 2 Mar 2017 13:13:23 -0500 Subject: Clean up binary api message handler registration issues Removed a fair number of "BUG" message handlers, due to conflicts with actual message handlers in api_format.c. Vpp itself had no business receiving certain messages, up to the point where we started building in relevant code from vpp_api_test. Eliminated all but one duplicate registration complaint. That one needs attention from the vxlan team since the duplicated handlers have diverged. Change-Id: Iafce5429d2f906270643b4ea5f0130e20beb4d1d Signed-off-by: Dave Barach --- src/vat/api_format.c | 43 ++++++- src/vlib/unix/input.c | 31 ++++- src/vlibapi/api.h | 15 +++ src/vlibapi/api_shared.c | 4 + src/vnet/classify/classify_api.c | 8 -- src/vnet/devices/virtio/vhost_user_api.c | 10 +- src/vnet/dhcp/dhcp_api.c | 8 -- src/vnet/interface_api.c | 7 - src/vnet/ip/ip_api.c | 83 ------------ src/vnet/l2/l2_api.c | 22 ---- src/vnet/mpls/mpls_api.c | 28 +--- src/vpp/api/api.c | 211 ------------------------------- src/vpp/api/api_main.c | 1 - src/vpp/stats/stats.c | 7 - 14 files changed, 81 insertions(+), 397 deletions(-) (limited to 'src/vpp/stats/stats.c') diff --git a/src/vat/api_format.c b/src/vat/api_format.c index 1321bade..52436917 100644 --- a/src/vat/api_format.c +++ b/src/vat/api_format.c @@ -944,6 +944,7 @@ static void vl_api_sw_interface_details_t_handler_json } } +#if VPP_API_TEST_BUILTIN == 0 static void vl_api_sw_interface_set_flags_t_handler (vl_api_sw_interface_set_flags_t * mp) { @@ -954,6 +955,7 @@ static void vl_api_sw_interface_set_flags_t_handler mp->admin_up_down ? "admin-up" : "admin-down", mp->link_up_down ? "link-up" : "link-down"); } +#endif static void vl_api_sw_interface_set_flags_t_handler_json (vl_api_sw_interface_set_flags_t * mp) @@ -4009,7 +4011,6 @@ foreach_standard_reply_retval_handler; #define foreach_vpe_api_reply_msg \ _(CREATE_LOOPBACK_REPLY, create_loopback_reply) \ _(SW_INTERFACE_DETAILS, sw_interface_details) \ -_(SW_INTERFACE_SET_FLAGS, sw_interface_set_flags) \ _(SW_INTERFACE_SET_FLAGS_REPLY, sw_interface_set_flags_reply) \ _(CONTROL_PING_REPLY, control_ping_reply) \ _(CLI_REPLY, cli_reply) \ @@ -4126,11 +4127,6 @@ _(IKEV2_INITIATE_REKEY_CHILD_SA_REPLY, ikev2_initiate_rekey_child_sa_reply) \ _(DELETE_LOOPBACK_REPLY, delete_loopback_reply) \ _(BD_IP_MAC_ADD_DEL_REPLY, bd_ip_mac_add_del_reply) \ _(DHCP_COMPL_EVENT, dhcp_compl_event) \ -_(VNET_INTERFACE_COUNTERS, vnet_interface_counters) \ -_(VNET_IP4_FIB_COUNTERS, vnet_ip4_fib_counters) \ -_(VNET_IP6_FIB_COUNTERS, vnet_ip6_fib_counters) \ -_(VNET_IP4_NBR_COUNTERS, vnet_ip4_nbr_counters) \ -_(VNET_IP6_NBR_COUNTERS, vnet_ip6_nbr_counters) \ _(MAP_ADD_DOMAIN_REPLY, map_add_domain_reply) \ _(MAP_DEL_DOMAIN_REPLY, map_del_domain_reply) \ _(MAP_ADD_DEL_RULE_REPLY, map_add_del_rule_reply) \ @@ -4232,6 +4228,14 @@ _(SW_INTERFACE_SET_MTU_REPLY, sw_interface_set_mtu_reply) \ _(IP_NEIGHBOR_DETAILS, ip_neighbor_details) \ _(SW_INTERFACE_GET_TABLE_REPLY, sw_interface_get_table_reply) +#define foreach_standalone_reply_msg \ +_(SW_INTERFACE_SET_FLAGS, sw_interface_set_flags) \ +_(VNET_INTERFACE_COUNTERS, vnet_interface_counters) \ +_(VNET_IP4_FIB_COUNTERS, vnet_ip4_fib_counters) \ +_(VNET_IP6_FIB_COUNTERS, vnet_ip6_fib_counters) \ +_(VNET_IP4_NBR_COUNTERS, vnet_ip4_nbr_counters) \ +_(VNET_IP6_NBR_COUNTERS, vnet_ip6_nbr_counters) + typedef struct { u8 *name; @@ -15425,7 +15429,15 @@ api_af_packet_create (vat_main_t * vam) vec_free (host_if_name); S (mp); - W2 (ret, fprintf (vam->ofp, " new sw_if_index = %d ", vam->sw_if_index)); + + /* *INDENT-OFF* */ + W2 (ret, + ({ + if (ret == 0) + fprintf (vam->ofp ? vam->ofp : stderr, + " new sw_if_index = %d\n", vam->sw_if_index); + })); + /* *INDENT-ON* */ return ret; } @@ -18417,6 +18429,9 @@ _(unset, "usage: unset ") } \ } foreach_vpe_api_reply_msg; +#if VPP_API_TEST_BUILTIN == 0 +foreach_standalone_reply_msg; +#endif #undef _ void @@ -18430,6 +18445,9 @@ vat_api_hookup (vat_main_t * vam) vl_api_##n##_t_print, \ sizeof(vl_api_##n##_t), 1); foreach_vpe_api_reply_msg; +#if VPP_API_TEST_BUILTIN == 0 + foreach_standalone_reply_msg; +#endif #undef _ #if (VPP_API_TEST_BUILTIN==0) @@ -18463,6 +18481,17 @@ vat_api_hookup (vat_main_t * vam) #undef _ } +#if VPP_API_TEST_BUILTIN +static clib_error_t * +vat_api_hookup_shim (vlib_main_t * vm) +{ + vat_api_hookup (&vat_main); + return 0; +} + +VLIB_API_INIT_FUNCTION (vat_api_hookup_shim); +#endif + /* * fd.io coding-style-patch-verification: ON * diff --git a/src/vlib/unix/input.c b/src/vlib/unix/input.c index 07096ed2..7b4183a4 100644 --- a/src/vlib/unix/input.c +++ b/src/vlib/unix/input.c @@ -66,6 +66,7 @@ linux_epoll_file_update (unix_file_t * f, unix_file_update_type_t update_type) unix_main_t *um = &unix_main; linux_epoll_main_t *em = &linux_epoll_main; struct epoll_event e; + int op; memset (&e, 0, sizeof (e)); @@ -76,13 +77,29 @@ linux_epoll_file_update (unix_file_t * f, unix_file_update_type_t update_type) e.events |= EPOLLET; e.data.u32 = f - um->file_pool; - if (epoll_ctl (em->epoll_fd, - (update_type == UNIX_FILE_UPDATE_ADD - ? EPOLL_CTL_ADD - : (update_type == UNIX_FILE_UPDATE_MODIFY - ? EPOLL_CTL_MOD - : EPOLL_CTL_DEL)), f->file_descriptor, &e) < 0) - clib_warning ("epoll_ctl"); + op = -1; + + switch (update_type) + { + case UNIX_FILE_UPDATE_ADD: + op = EPOLL_CTL_ADD; + break; + + case UNIX_FILE_UPDATE_MODIFY: + op = EPOLL_CTL_MOD; + break; + + case UNIX_FILE_UPDATE_DELETE: + op = EPOLL_CTL_DEL; + break; + + default: + clib_warning ("unknown update_type %d", update_type); + return; + } + + if (epoll_ctl (em->epoll_fd, op, f->file_descriptor, &e) < 0) + clib_unix_warning ("epoll_ctl"); } static uword diff --git a/src/vlibapi/api.h b/src/vlibapi/api.h index fcb101d7..b40ece15 100644 --- a/src/vlibapi/api.h +++ b/src/vlibapi/api.h @@ -271,6 +271,21 @@ vlib_node_t **vlib_node_unserialize (u8 * vector); #define VLIB_API_INIT_FUNCTION(x) VLIB_DECLARE_INIT_FUNCTION(x,api_init) +/* Call given init function: used for init function dependencies. */ +#define vlib_call_api_init_function(vm, x) \ + ({ \ + extern vlib_init_function_t * _VLIB_INIT_FUNCTION_SYMBOL (x,api_init); \ + vlib_init_function_t * _f = _VLIB_INIT_FUNCTION_SYMBOL (x,api_init); \ + clib_error_t * _error = 0; \ + if (! hash_get (vm->init_functions_called, _f)) \ + { \ + hash_set1 (vm->init_functions_called, _f); \ + _error = _f (vm); \ + } \ + _error; \ + }) + + #endif /* included_api_h */ /* diff --git a/src/vlibapi/api_shared.c b/src/vlibapi/api_shared.c index 1a2740e2..79921afe 100644 --- a/src/vlibapi/api_shared.c +++ b/src/vlibapi/api_shared.c @@ -667,6 +667,10 @@ vl_msg_api_config (vl_msg_api_msg_config_t * c) foreach_msg_api_vector; #undef _ + if (am->msg_names[c->id]) + clib_warning ("BUG: multiple registrations of 'vl_api_%s_t_handler'", + c->name); + am->msg_names[c->id] = c->name; am->msg_handlers[c->id] = c->handler; am->msg_cleanup_handlers[c->id] = c->cleanup; diff --git a/src/vnet/classify/classify_api.c b/src/vnet/classify/classify_api.c index 77a8b434..24c7a2b9 100644 --- a/src/vnet/classify/classify_api.c +++ b/src/vnet/classify/classify_api.c @@ -53,7 +53,6 @@ _(CLASSIFY_TABLE_IDS,classify_table_ids) \ _(CLASSIFY_TABLE_BY_INTERFACE, classify_table_by_interface) \ _(CLASSIFY_TABLE_INFO,classify_table_info) \ _(CLASSIFY_SESSION_DUMP,classify_session_dump) \ -_(CLASSIFY_SESSION_DETAILS,classify_session_details) \ _(POLICER_CLASSIFY_SET_INTERFACE, policer_classify_set_interface) \ _(POLICER_CLASSIFY_DUMP, policer_classify_dump) \ _(FLOW_CLASSIFY_SET_INTERFACE, flow_classify_set_interface) \ @@ -356,13 +355,6 @@ vl_api_classify_table_info_t_handler (vl_api_classify_table_info_t * mp) vl_msg_api_send_shmem (q, (u8 *) & rmp); } -static void -vl_api_classify_session_details_t_handler (vl_api_classify_session_details_t * - mp) -{ - clib_warning ("BUG"); -} - static void send_classify_session_details (unix_shared_memory_queue_t * q, u32 table_id, diff --git a/src/vnet/devices/virtio/vhost_user_api.c b/src/vnet/devices/virtio/vhost_user_api.c index dd517c26..8dbd032b 100644 --- a/src/vnet/devices/virtio/vhost_user_api.c +++ b/src/vnet/devices/virtio/vhost_user_api.c @@ -46,8 +46,7 @@ _(CREATE_VHOST_USER_IF, create_vhost_user_if) \ _(MODIFY_VHOST_USER_IF, modify_vhost_user_if) \ _(DELETE_VHOST_USER_IF, delete_vhost_user_if) \ -_(SW_INTERFACE_VHOST_USER_DUMP, sw_interface_vhost_user_dump) \ -_(SW_INTERFACE_VHOST_USER_DETAILS, sw_interface_vhost_user_details) +_(SW_INTERFACE_VHOST_USER_DUMP, sw_interface_vhost_user_dump) /* * WARNING: replicated pending api refactor completion @@ -148,13 +147,6 @@ vl_api_delete_vhost_user_if_t_handler (vl_api_delete_vhost_user_if_t * mp) } } -static void - vl_api_sw_interface_vhost_user_details_t_handler - (vl_api_sw_interface_vhost_user_details_t * mp) -{ - clib_warning ("BUG"); -} - static void send_sw_interface_vhost_user_details (vpe_api_main_t * am, unix_shared_memory_queue_t * q, diff --git a/src/vnet/dhcp/dhcp_api.c b/src/vnet/dhcp/dhcp_api.c index bdf02cae..ce34f6a4 100644 --- a/src/vnet/dhcp/dhcp_api.c +++ b/src/vnet/dhcp/dhcp_api.c @@ -46,7 +46,6 @@ #define foreach_vpe_api_msg \ _(DHCP_PROXY_CONFIG,dhcp_proxy_config) \ _(DHCP_PROXY_DUMP,dhcp_proxy_dump) \ -_(DHCP_PROXY_DETAILS,dhcp_proxy_details) \ _(DHCP_PROXY_SET_VSS,dhcp_proxy_set_vss) \ _(DHCP_CLIENT_CONFIG, dhcp_client_config) @@ -158,13 +157,6 @@ dhcp_send_details (fib_protocol_t proto, vl_msg_api_send_shmem (q, (u8 *) & mp); } - -static void -vl_api_dhcp_proxy_details_t_handler (vl_api_dhcp_proxy_details_t * mp) -{ - clib_warning ("BUG"); -} - void dhcp_compl_event_callback (u32 client_index, u32 pid, u8 * hostname, u8 is_ipv6, u8 * host_address, u8 * router_address, diff --git a/src/vnet/interface_api.c b/src/vnet/interface_api.c index 63f7cad4..60cd6d40 100644 --- a/src/vnet/interface_api.c +++ b/src/vnet/interface_api.c @@ -50,7 +50,6 @@ _(SW_INTERFACE_SET_FLAGS, sw_interface_set_flags) \ _(SW_INTERFACE_SET_MTU, sw_interface_set_mtu) \ _(WANT_INTERFACE_EVENTS, want_interface_events) \ _(SW_INTERFACE_DUMP, sw_interface_dump) \ -_(SW_INTERFACE_DETAILS, sw_interface_details) \ _(SW_INTERFACE_ADD_DEL_ADDRESS, sw_interface_add_del_address) \ _(SW_INTERFACE_SET_TABLE, sw_interface_set_table) \ _(SW_INTERFACE_GET_TABLE, sw_interface_get_table) \ @@ -684,12 +683,6 @@ out: REPLY_MACRO (VL_API_SW_INTERFACE_TAG_ADD_DEL_REPLY); } -static void -vl_api_sw_interface_details_t_handler (vl_api_sw_interface_details_t * mp) -{ - clib_warning ("BUG"); -} - /* * vpe_api_hookup * Add vpe's API message handlers to the table. diff --git a/src/vnet/ip/ip_api.c b/src/vnet/ip/ip_api.c index 49d941c2..ab164a5f 100644 --- a/src/vnet/ip/ip_api.c +++ b/src/vnet/ip/ip_api.c @@ -59,17 +59,12 @@ #define foreach_ip_api_msg \ _(IP_FIB_DUMP, ip_fib_dump) \ -_(IP_FIB_DETAILS, ip_fib_details) \ _(IP6_FIB_DUMP, ip6_fib_dump) \ -_(IP6_FIB_DETAILS, ip6_fib_details) \ _(IP_MFIB_DUMP, ip_mfib_dump) \ -_(IP_MFIB_DETAILS, ip_mfib_details) \ _(IP6_MFIB_DUMP, ip6_mfib_dump) \ -_(IP6_MFIB_DETAILS, ip6_mfib_details) \ _(IP_NEIGHBOR_DUMP, ip_neighbor_dump) \ _(IP_MROUTE_ADD_DEL, ip_mroute_add_del) \ _(MFIB_SIGNAL_DUMP, mfib_signal_dump) \ -_(IP_NEIGHBOR_DETAILS, ip_neighbor_details) \ _(IP_ADDRESS_DUMP, ip_address_dump) \ _(IP_DUMP, ip_dump) \ _(IP_NEIGHBOR_ADD_DEL, ip_neighbor_add_del) \ @@ -105,12 +100,6 @@ send_ip_neighbor_details (u8 is_ipv6, vl_msg_api_send_shmem (q, (u8 *) & mp); } -static void -vl_api_ip_neighbor_details_t_handler (vl_api_ip_neighbor_details_t * mp) -{ - clib_warning ("BUG"); -} - static void vl_api_ip_neighbor_dump_t_handler (vl_api_ip_neighbor_dump_t * mp) { @@ -185,24 +174,6 @@ copy_fib_next_hop (fib_route_path_encode_t * api_rpath, void *fp_arg) sizeof (api_rpath->rpath.frp_addr.ip6)); } -static void -vl_api_ip_fib_details_t_handler (vl_api_ip_fib_details_t * mp) -{ - clib_warning ("BUG"); -} - -static void -vl_api_ip_fib_details_t_endian (vl_api_ip_fib_details_t * mp) -{ - clib_warning ("BUG"); -} - -static void -vl_api_ip_fib_details_t_print (vl_api_ip_fib_details_t * mp) -{ - clib_warning ("BUG"); -} - static void send_ip_fib_details (vpe_api_main_t * am, unix_shared_memory_queue_t * q, @@ -316,24 +287,6 @@ vl_api_ip_fib_dump_t_handler (vl_api_ip_fib_dump_t * mp) vec_free (lfeis); } -static void -vl_api_ip6_fib_details_t_handler (vl_api_ip6_fib_details_t * mp) -{ - clib_warning ("BUG"); -} - -static void -vl_api_ip6_fib_details_t_endian (vl_api_ip6_fib_details_t * mp) -{ - clib_warning ("BUG"); -} - -static void -vl_api_ip6_fib_details_t_print (vl_api_ip6_fib_details_t * mp) -{ - clib_warning ("BUG"); -} - static void send_ip6_fib_details (vpe_api_main_t * am, unix_shared_memory_queue_t * q, @@ -469,24 +422,6 @@ vl_api_ip6_fib_dump_t_handler (vl_api_ip6_fib_dump_t * mp) /* *INDENT-ON* */ } -static void -vl_api_ip_mfib_details_t_handler (vl_api_ip_mfib_details_t * mp) -{ - clib_warning ("BUG"); -} - -static void -vl_api_ip_mfib_details_t_endian (vl_api_ip_mfib_details_t * mp) -{ - clib_warning ("BUG"); -} - -static void -vl_api_ip_mfib_details_t_print (vl_api_ip_mfib_details_t * mp) -{ - clib_warning ("BUG"); -} - static void send_ip_mfib_details (vpe_api_main_t * am, unix_shared_memory_queue_t * q, @@ -591,24 +526,6 @@ vl_api_ip_mfib_dump_t_handler (vl_api_ip_mfib_dump_t * mp) vec_free (api_rpaths); } -static void -vl_api_ip6_mfib_details_t_handler (vl_api_ip6_mfib_details_t * mp) -{ - clib_warning ("BUG"); -} - -static void -vl_api_ip6_mfib_details_t_endian (vl_api_ip6_mfib_details_t * mp) -{ - clib_warning ("BUG"); -} - -static void -vl_api_ip6_mfib_details_t_print (vl_api_ip6_mfib_details_t * mp) -{ - clib_warning ("BUG"); -} - static void send_ip6_mfib_details (vpe_api_main_t * am, unix_shared_memory_queue_t * q, diff --git a/src/vnet/l2/l2_api.c b/src/vnet/l2/l2_api.c index a3cc49bf..a985852c 100644 --- a/src/vnet/l2/l2_api.c +++ b/src/vnet/l2/l2_api.c @@ -48,13 +48,10 @@ _(L2_XCONNECT_DUMP, l2_xconnect_dump) \ _(L2_FIB_CLEAR_TABLE, l2_fib_clear_table) \ _(L2_FIB_TABLE_DUMP, l2_fib_table_dump) \ -_(L2_FIB_TABLE_ENTRY, l2_fib_table_entry) \ _(L2FIB_ADD_DEL, l2fib_add_del) \ _(L2_FLAGS, l2_flags) \ _(BRIDGE_DOMAIN_ADD_DEL, bridge_domain_add_del) \ _(BRIDGE_DOMAIN_DUMP, bridge_domain_dump) \ -_(BRIDGE_DOMAIN_DETAILS, bridge_domain_details) \ -_(BRIDGE_DOMAIN_SW_IF_DETAILS, bridge_domain_sw_if_details) \ _(BRIDGE_FLAGS, bridge_flags) \ _(L2_INTERFACE_VLAN_TAG_REWRITE, l2_interface_vlan_tag_rewrite) \ _(L2_INTERFACE_PBB_TAG_REWRITE, l2_interface_pbb_tag_rewrite) @@ -140,12 +137,6 @@ send_l2fib_table_entry (vpe_api_main_t * am, vl_msg_api_send_shmem (q, (u8 *) & mp); } -static void -vl_api_l2_fib_table_entry_t_handler (vl_api_l2_fib_table_entry_t * mp) -{ - clib_warning ("BUG"); -} - static void vl_api_l2_fib_table_dump_t_handler (vl_api_l2_fib_table_dump_t * mp) { @@ -329,19 +320,6 @@ vl_api_bridge_domain_add_del_t_handler (vl_api_bridge_domain_add_del_t * mp) REPLY_MACRO (VL_API_BRIDGE_DOMAIN_ADD_DEL_REPLY); } -static void -vl_api_bridge_domain_details_t_handler (vl_api_bridge_domain_details_t * mp) -{ - clib_warning ("BUG"); -} - -static void - vl_api_bridge_domain_sw_if_details_t_handler - (vl_api_bridge_domain_sw_if_details_t * mp) -{ - clib_warning ("BUG"); -} - static void send_bridge_domain_details (unix_shared_memory_queue_t * q, l2_bridge_domain_t * bd_config, diff --git a/src/vnet/mpls/mpls_api.c b/src/vnet/mpls/mpls_api.c index ebbeba69..a36a5046 100644 --- a/src/vnet/mpls/mpls_api.c +++ b/src/vnet/mpls/mpls_api.c @@ -50,9 +50,7 @@ _(MPLS_IP_BIND_UNBIND, mpls_ip_bind_unbind) \ _(MPLS_ROUTE_ADD_DEL, mpls_route_add_del) \ _(MPLS_TUNNEL_ADD_DEL, mpls_tunnel_add_del) \ _(MPLS_TUNNEL_DUMP, mpls_tunnel_dump) \ -_(MPLS_TUNNEL_DETAILS, mpls_tunnel_details) \ -_(MPLS_FIB_DUMP, mpls_fib_dump) \ -_(MPLS_FIB_DETAILS, mpls_fib_details) +_(MPLS_FIB_DUMP, mpls_fib_dump) extern void stats_dslock_with_hint (int hint, int tag); extern void stats_dsunlock (void); @@ -280,12 +278,6 @@ vl_api_mpls_tunnel_add_del_t_handler (vl_api_mpls_tunnel_add_del_t * mp) /* *INDENT-ON* */ } -static void -vl_api_mpls_tunnel_details_t_handler (vl_api_mpls_tunnel_details_t * mp) -{ - clib_warning ("BUG"); -} - typedef struct mpls_tunnel_send_walk_ctx_t_ { unix_shared_memory_queue_t *q; @@ -340,24 +332,6 @@ vl_api_mpls_tunnel_dump_t_handler (vl_api_mpls_tunnel_dump_t * mp) mpls_tunnel_walk (send_mpls_tunnel_entry, &ctx); } -static void -vl_api_mpls_fib_details_t_handler (vl_api_mpls_fib_details_t * mp) -{ - clib_warning ("BUG"); -} - -static void -vl_api_mpls_fib_details_t_endian (vl_api_mpls_fib_details_t * mp) -{ - clib_warning ("BUG"); -} - -static void -vl_api_mpls_fib_details_t_print (vl_api_mpls_fib_details_t * mp) -{ - clib_warning ("BUG"); -} - static void send_mpls_fib_details (vpe_api_main_t * am, unix_shared_memory_queue_t * q, diff --git a/src/vpp/api/api.c b/src/vpp/api/api.c index 60fd0199..a8f471e8 100644 --- a/src/vpp/api/api.c +++ b/src/vpp/api/api.c @@ -128,12 +128,8 @@ _(CLASSIFY_SET_INTERFACE_IP_TABLE, classify_set_interface_ip_table) \ _(CLASSIFY_SET_INTERFACE_L2_TABLES, classify_set_interface_l2_tables) \ _(GET_NODE_INDEX, get_node_index) \ _(ADD_NODE_NEXT, add_node_next) \ -_(VXLAN_ADD_DEL_TUNNEL, vxlan_add_del_tunnel) \ -_(VXLAN_TUNNEL_DUMP, vxlan_tunnel_dump) \ _(L2_INTERFACE_EFP_FILTER, l2_interface_efp_filter) \ _(SHOW_VERSION, show_version) \ -_(VXLAN_GPE_ADD_DEL_TUNNEL, vxlan_gpe_add_del_tunnel) \ -_(VXLAN_GPE_TUNNEL_DUMP, vxlan_gpe_tunnel_dump) \ _(INTERFACE_NAME_RENUMBER, interface_name_renumber) \ _(WANT_IP4_ARP_EVENTS, want_ip4_arp_events) \ _(WANT_IP6_ND_EVENTS, want_ip6_nd_events) \ @@ -1436,62 +1432,6 @@ out: /* *INDENT-ON* */ } -static void vl_api_vxlan_add_del_tunnel_t_handler - (vl_api_vxlan_add_del_tunnel_t * mp) -{ - vl_api_vxlan_add_del_tunnel_reply_t *rmp; - int rv = 0; - vnet_vxlan_add_del_tunnel_args_t _a, *a = &_a; - u32 encap_fib_index; - uword *p; - ip4_main_t *im = &ip4_main; - vnet_main_t *vnm = vnet_get_main (); - u32 sw_if_index = ~0; - - p = hash_get (im->fib_index_by_table_id, ntohl (mp->encap_vrf_id)); - if (!p) - { - rv = VNET_API_ERROR_NO_SUCH_FIB; - goto out; - } - encap_fib_index = p[0]; - memset (a, 0, sizeof (*a)); - - a->is_add = mp->is_add; - a->is_ip6 = mp->is_ipv6; - - /* ip addresses sent in network byte order */ - ip46_from_addr_buf (mp->is_ipv6, mp->dst_address, &a->dst); - ip46_from_addr_buf (mp->is_ipv6, mp->src_address, &a->src); - - /* Check src & dst are different */ - if (ip46_address_cmp (&a->dst, &a->src) == 0) - { - rv = VNET_API_ERROR_SAME_SRC_DST; - goto out; - } - a->mcast_sw_if_index = ntohl (mp->mcast_sw_if_index); - if (ip46_address_is_multicast (&a->dst) && - pool_is_free_index (vnm->interface_main.sw_interfaces, - a->mcast_sw_if_index)) - { - rv = VNET_API_ERROR_INVALID_SW_IF_INDEX; - goto out; - } - a->encap_fib_index = encap_fib_index; - a->decap_next_index = ntohl (mp->decap_next_index); - a->vni = ntohl (mp->vni); - rv = vnet_vxlan_add_del_tunnel (a, &sw_if_index); - -out: - /* *INDENT-OFF* */ - REPLY_MACRO2(VL_API_VXLAN_ADD_DEL_TUNNEL_REPLY, - ({ - rmp->sw_if_index = ntohl (sw_if_index); - })); - /* *INDENT-ON* */ -} - static void send_vxlan_tunnel_details (vxlan_tunnel_t * t, unix_shared_memory_queue_t * q, u32 context) { @@ -1525,43 +1465,6 @@ static void send_vxlan_tunnel_details vl_msg_api_send_shmem (q, (u8 *) & rmp); } -static void vl_api_vxlan_tunnel_dump_t_handler - (vl_api_vxlan_tunnel_dump_t * mp) -{ - unix_shared_memory_queue_t *q; - vxlan_main_t *vxm = &vxlan_main; - vxlan_tunnel_t *t; - u32 sw_if_index; - - q = vl_api_client_index_to_input_queue (mp->client_index); - if (q == 0) - { - return; - } - - sw_if_index = ntohl (mp->sw_if_index); - - if (~0 == sw_if_index) - { - /* *INDENT-OFF* */ - pool_foreach (t, vxm->tunnels, - ({ - send_vxlan_tunnel_details(t, q, mp->context); - })); - /* *INDENT-ON* */ - } - else - { - if ((sw_if_index >= vec_len (vxm->tunnel_index_by_sw_if_index)) || - (~0 == vxm->tunnel_index_by_sw_if_index[sw_if_index])) - { - return; - } - t = &vxm->tunnels[vxm->tunnel_index_by_sw_if_index[sw_if_index]]; - send_vxlan_tunnel_details (t, q, mp->context); - } -} - static void vl_api_l2_patch_add_del_t_handler (vl_api_l2_patch_add_del_t * mp) { @@ -1585,83 +1488,6 @@ vl_api_l2_patch_add_del_t_handler (vl_api_l2_patch_add_del_t * mp) REPLY_MACRO (VL_API_L2_PATCH_ADD_DEL_REPLY); } -static void - vl_api_vxlan_gpe_add_del_tunnel_t_handler - (vl_api_vxlan_gpe_add_del_tunnel_t * mp) -{ - vl_api_vxlan_gpe_add_del_tunnel_reply_t *rmp; - int rv = 0; - vnet_vxlan_gpe_add_del_tunnel_args_t _a, *a = &_a; - u32 encap_fib_index, decap_fib_index; - u8 protocol; - uword *p; - ip4_main_t *im = &ip4_main; - u32 sw_if_index = ~0; - - - p = hash_get (im->fib_index_by_table_id, ntohl (mp->encap_vrf_id)); - if (!p) - { - rv = VNET_API_ERROR_NO_SUCH_FIB; - goto out; - } - encap_fib_index = p[0]; - - protocol = mp->protocol; - - /* Interpret decap_vrf_id as an opaque if sending to other-than-ip4-input */ - if (protocol == VXLAN_GPE_INPUT_NEXT_IP4_INPUT) - { - p = hash_get (im->fib_index_by_table_id, ntohl (mp->decap_vrf_id)); - if (!p) - { - rv = VNET_API_ERROR_NO_SUCH_INNER_FIB; - goto out; - } - decap_fib_index = p[0]; - } - else - { - decap_fib_index = ntohl (mp->decap_vrf_id); - } - - /* Check src & dst are different */ - if ((mp->is_ipv6 && memcmp (mp->local, mp->remote, 16) == 0) || - (!mp->is_ipv6 && memcmp (mp->local, mp->remote, 4) == 0)) - { - rv = VNET_API_ERROR_SAME_SRC_DST; - goto out; - } - memset (a, 0, sizeof (*a)); - - a->is_add = mp->is_add; - a->is_ip6 = mp->is_ipv6; - /* ip addresses sent in network byte order */ - if (a->is_ip6) - { - clib_memcpy (&(a->local.ip6), mp->local, 16); - clib_memcpy (&(a->remote.ip6), mp->remote, 16); - } - else - { - clib_memcpy (&(a->local.ip4), mp->local, 4); - clib_memcpy (&(a->remote.ip4), mp->remote, 4); - } - a->encap_fib_index = encap_fib_index; - a->decap_fib_index = decap_fib_index; - a->protocol = protocol; - a->vni = ntohl (mp->vni); - rv = vnet_vxlan_gpe_add_del_tunnel (a, &sw_if_index); - -out: - /* *INDENT-OFF* */ - REPLY_MACRO2(VL_API_VXLAN_GPE_ADD_DEL_TUNNEL_REPLY, - ({ - rmp->sw_if_index = ntohl (sw_if_index); - })); - /* *INDENT-ON* */ -} - static void send_vxlan_gpe_tunnel_details (vxlan_gpe_tunnel_t * t, unix_shared_memory_queue_t * q, u32 context) { @@ -1696,43 +1522,6 @@ static void send_vxlan_gpe_tunnel_details vl_msg_api_send_shmem (q, (u8 *) & rmp); } -static void vl_api_vxlan_gpe_tunnel_dump_t_handler - (vl_api_vxlan_gpe_tunnel_dump_t * mp) -{ - unix_shared_memory_queue_t *q; - vxlan_gpe_main_t *vgm = &vxlan_gpe_main; - vxlan_gpe_tunnel_t *t; - u32 sw_if_index; - - q = vl_api_client_index_to_input_queue (mp->client_index); - if (q == 0) - { - return; - } - - sw_if_index = ntohl (mp->sw_if_index); - - if (~0 == sw_if_index) - { - /* *INDENT-OFF* */ - pool_foreach (t, vgm->tunnels, - ({ - send_vxlan_gpe_tunnel_details(t, q, mp->context); - })); - /* *INDENT-ON* */ - } - else - { - if ((sw_if_index >= vec_len (vgm->tunnel_index_by_sw_if_index)) || - (~0 == vgm->tunnel_index_by_sw_if_index[sw_if_index])) - { - return; - } - t = &vgm->tunnels[vgm->tunnel_index_by_sw_if_index[sw_if_index]]; - send_vxlan_gpe_tunnel_details (t, q, mp->context); - } -} - static void vl_api_interface_name_renumber_t_handler (vl_api_interface_name_renumber_t * mp) diff --git a/src/vpp/api/api_main.c b/src/vpp/api/api_main.c index 97b501e0..6ae510b1 100644 --- a/src/vpp/api/api_main.c +++ b/src/vpp/api/api_main.c @@ -48,7 +48,6 @@ api_main_init (vlib_main_t * vm) vam->vlib_main = vm; vam->my_client_index = (u32) ~ 0; init_error_string_table (vam); - vat_api_hookup (vam); rv = vat_plugin_init (vam); if (rv) clib_warning ("vat_plugin_init returned %d", rv); diff --git a/src/vpp/stats/stats.c b/src/vpp/stats/stats.c index 5e9b0d69..c46d441a 100644 --- a/src/vpp/stats/stats.c +++ b/src/vpp/stats/stats.c @@ -46,7 +46,6 @@ stats_main_t stats_main; #define foreach_stats_msg \ _(WANT_STATS, want_stats) \ -_(WANT_STATS_REPLY, want_stats_reply) \ _(VNET_INTERFACE_COUNTERS, vnet_interface_counters) \ _(VNET_IP4_FIB_COUNTERS, vnet_ip4_fib_counters) \ _(VNET_IP6_FIB_COUNTERS, vnet_ip6_fib_counters) \ @@ -1226,12 +1225,6 @@ vl_api_vnet_ip6_nbr_counters_t_handler (vl_api_vnet_ip6_nbr_counters_t * mp) } } -static void -vl_api_want_stats_reply_t_handler (vl_api_want_stats_reply_t * mp) -{ - clib_warning ("BUG"); -} - static void vl_api_want_stats_t_handler (vl_api_want_stats_t * mp) { -- cgit 1.2.3-korg From 1bd01099a6512b6119bbf337b36222a6f0770d49 Mon Sep 17 00:00:00 2001 From: Neale Ranns Date: Wed, 15 Mar 2017 15:41:17 -0400 Subject: 64 bit per-thread counters after: TenGigabitEthernet5/0/1-output active 107522 17375708 0 7.22e0 161.60 TenGigabitEthernet5/0/1-tx active 107522 17375708 0 6.93e1 161.60 ip4-input-no-checksum active 107522 17375708 0 2.52e1 161.60 ip4-lookup active 107522 17375708 0 3.10e1 161.60 ip4-rewrite active 107522 17375708 0 2.52e1 161.60 before TenGigabitEthernet5/0/1-output active 433575 110995200 0 6.95e0 256.00 TenGigabitEthernet5/0/1-tx active 433575 110995200 0 7.14e1 256.00 ip4-input-no-checksum active 433575 110995200 0 2.66e1 256.00 ip4-lookup active 433575 110995200 0 3.29e1 256.00 ip4-rewrite active 433575 110995200 0 2.59e1 256.00 Change-Id: I46405bd22189f48a39f06e3443bb7e13f410b539 Signed-off-by: Neale Ranns --- src/vlib/counter.c | 66 +++++++-------- src/vlib/counter.h | 207 ++++++++++++++++------------------------------ src/vnet/ip/ip4_forward.c | 36 ++++---- src/vnet/map/map.c | 2 +- src/vnet/map/map_api.c | 2 +- src/vnet/rewrite.c | 2 +- src/vpp/api/api.c | 2 +- src/vpp/stats/stats.c | 16 ++-- 8 files changed, 132 insertions(+), 201 deletions(-) (limited to 'src/vpp/stats/stats.c') diff --git a/src/vlib/counter.c b/src/vlib/counter.c index 9f66e04d..62f4bd66 100644 --- a/src/vlib/counter.c +++ b/src/vlib/counter.c @@ -42,56 +42,36 @@ void vlib_clear_simple_counters (vlib_simple_counter_main_t * cm) { + counter_t *my_counters; uword i, j; - u16 *my_minis; - for (i = 0; i < vec_len (cm->minis); i++) + for (i = 0; i < vec_len (cm->counters); i++) { - my_minis = cm->minis[i]; + my_counters = cm->counters[i]; - for (j = 0; j < vec_len (my_minis); j++) + for (j = 0; j < vec_len (my_counters); j++) { - cm->maxi[j] += my_minis[j]; - my_minis[j] = 0; + my_counters[j] = 0; } } - - j = vec_len (cm->maxi); - if (j > 0) - vec_validate (cm->value_at_last_clear, j - 1); - for (i = 0; i < j; i++) - cm->value_at_last_clear[i] = cm->maxi[i]; } void vlib_clear_combined_counters (vlib_combined_counter_main_t * cm) { + vlib_counter_t *my_counters; uword i, j; - vlib_mini_counter_t *my_minis; - for (i = 0; i < vec_len (cm->minis); i++) + for (i = 0; i < vec_len (cm->counters); i++) { - my_minis = cm->minis[i]; + my_counters = cm->counters[i]; - for (j = 0; j < vec_len (my_minis); j++) + for (j = 0; j < vec_len (my_counters); j++) { - cm->maxi[j].packets += my_minis[j].packets; - cm->maxi[j].bytes += my_minis[j].bytes; - my_minis[j].packets = 0; - my_minis[j].bytes = 0; + my_counters[j].packets = 0; + my_counters[j].bytes = 0; } } - - j = vec_len (cm->maxi); - if (j > 0) - vec_validate (cm->value_at_last_clear, j - 1); - - for (i = 0; i < j; i++) - { - vlib_counter_t *c = vec_elt_at_index (cm->value_at_last_clear, i); - - c[0] = cm->maxi[i]; - } } void @@ -100,10 +80,9 @@ vlib_validate_simple_counter (vlib_simple_counter_main_t * cm, u32 index) vlib_thread_main_t *tm = vlib_get_thread_main (); int i; - vec_validate (cm->minis, tm->n_vlib_mains - 1); + vec_validate (cm->counters, tm->n_vlib_mains - 1); for (i = 0; i < tm->n_vlib_mains; i++) - vec_validate_aligned (cm->minis[i], index, CLIB_CACHE_LINE_BYTES); - vec_validate_aligned (cm->maxi, index, CLIB_CACHE_LINE_BYTES); + vec_validate_aligned (cm->counters[i], index, CLIB_CACHE_LINE_BYTES); } void @@ -112,10 +91,23 @@ vlib_validate_combined_counter (vlib_combined_counter_main_t * cm, u32 index) vlib_thread_main_t *tm = vlib_get_thread_main (); int i; - vec_validate (cm->minis, tm->n_vlib_mains - 1); + vec_validate (cm->counters, tm->n_vlib_mains - 1); for (i = 0; i < tm->n_vlib_mains; i++) - vec_validate_aligned (cm->minis[i], index, CLIB_CACHE_LINE_BYTES); - vec_validate_aligned (cm->maxi, index, CLIB_CACHE_LINE_BYTES); + vec_validate_aligned (cm->counters[i], index, CLIB_CACHE_LINE_BYTES); +} + +u32 +vlib_combined_counter_n_counters (const vlib_combined_counter_main_t * cm) +{ + ASSERT (cm->counters); + return (vec_len (cm->counters[0])); +} + +u32 +vlib_simple_counter_n_counters (const vlib_simple_counter_main_t * cm) +{ + ASSERT (cm->counters); + return (vec_len (cm->counters[0])); } void diff --git a/src/vlib/counter.h b/src/vlib/counter.h index abfa89ee..17a85217 100644 --- a/src/vlib/counter.h +++ b/src/vlib/counter.h @@ -44,59 +44,48 @@ Optimized thread-safe counters. - Each vlib_[simple|combined]_counter_main_t consists of a single - vector of thread-safe / atomically-updated u64 counters [the - "maxi" vector], and a (u16 **) per-thread vector [the "minis" - vector] of narrow, per-thread counters. - - The idea is to drastically reduce the number of atomic operations. - In the case of packet counts, we divide the number of atomic ops - by 2**16, etc. + Each vlib_[simple|combined]_counter_main_t consists of a per-thread + vector of per-object counters. + + The idea is to drastically eliminate atomic operations. */ +/** 64bit counters */ +typedef u64 counter_t; + /** A collection of simple counters */ typedef struct { - u16 **minis; /**< Per-thread u16 non-atomic counters */ - u64 *maxi; /**< Shared wide counters */ - u64 *value_at_last_clear; /**< Counter values as of last clear. */ - u64 *value_at_last_serialize; /**< Values as of last serialize. */ + counter_t **counters; /**< Per-thread u64 non-atomic counters */ + counter_t *value_at_last_serialize; /**< Values as of last serialize. */ u32 last_incremental_serialize_index; /**< Last counter index serialized incrementally. */ char *name; /**< The counter collection's name. */ } vlib_simple_counter_main_t; +/** The number of counters (not the number of per-thread counters) */ +u32 vlib_simple_counter_n_counters (const vlib_simple_counter_main_t * cm); + /** Increment a simple counter @param cm - (vlib_simple_counter_main_t *) simple counter main pointer @param cpu_index - (u32) the current cpu index @param index - (u32) index of the counter to increment - @param increment - (u32) quantitiy to add to the counter + @param increment - (u64) quantitiy to add to the counter */ always_inline void vlib_increment_simple_counter (vlib_simple_counter_main_t * cm, - u32 cpu_index, u32 index, u32 increment) + u32 cpu_index, u32 index, u64 increment) { - u16 *my_minis; - u16 *mini; - u32 old, new; - - my_minis = cm->minis[cpu_index]; - mini = vec_elt_at_index (my_minis, index); - old = mini[0]; - new = old + increment; - mini[0] = new; + counter_t *my_counters; - if (PREDICT_FALSE (mini[0] != new)) - { - __sync_fetch_and_add (&cm->maxi[index], new); - my_minis[index] = 0; - } + my_counters = cm->counters[cpu_index]; + my_counters[index] += increment; } /** Get the value of a simple counter - Scrapes the entire set of mini counters. Innacurate unless + Scrapes the entire set of per-thread counters. Innacurate unless worker threads which might increment the counter are barrier-synchronized @@ -104,30 +93,21 @@ vlib_increment_simple_counter (vlib_simple_counter_main_t * cm, @param index - (u32) index of the counter to fetch @returns - (u64) current counter value */ -always_inline u64 +always_inline counter_t vlib_get_simple_counter (vlib_simple_counter_main_t * cm, u32 index) { - u16 *my_minis, *mini; - u64 v; + counter_t *my_counters; + counter_t v; int i; - ASSERT (index < vec_len (cm->maxi)); + ASSERT (index < vlib_simple_counter_n_counters (cm)); v = 0; - for (i = 0; i < vec_len (cm->minis); i++) + for (i = 0; i < vec_len (cm->counters); i++) { - my_minis = cm->minis[i]; - mini = vec_elt_at_index (my_minis, index); - v += mini[0]; - } - - v += cm->maxi[index]; - - if (index < vec_len (cm->value_at_last_clear)) - { - ASSERT (v >= cm->value_at_last_clear[index]); - v -= cm->value_at_last_clear[index]; + my_counters = cm->counters[i]; + v += my_counters[index]; } return v; @@ -142,29 +122,24 @@ vlib_get_simple_counter (vlib_simple_counter_main_t * cm, u32 index) always_inline void vlib_zero_simple_counter (vlib_simple_counter_main_t * cm, u32 index) { - u16 *my_minis; + counter_t *my_counters; int i; - ASSERT (index < vec_len (cm->maxi)); + ASSERT (index < vlib_simple_counter_n_counters (cm)); - for (i = 0; i < vec_len (cm->minis); i++) + for (i = 0; i < vec_len (cm->counters); i++) { - my_minis = cm->minis[i]; - my_minis[index] = 0; + my_counters = cm->counters[i]; + my_counters[index] = 0; } - - cm->maxi[index] = 0; - - if (index < vec_len (cm->value_at_last_clear)) - cm->value_at_last_clear[index] = 0; } /** Combined counter to hold both packets and byte differences. */ typedef struct { - u64 packets; /**< packet counter */ - u64 bytes; /**< byte counter */ + counter_t packets; /**< packet counter */ + counter_t bytes; /**< byte counter */ } vlib_counter_t; /** Add two combined counters, results in the first counter @@ -201,24 +176,19 @@ vlib_counter_zero (vlib_counter_t * a) a->packets = a->bytes = 0; } -/** Mini combined counter */ -typedef struct -{ - u16 packets; /**< Packet count */ - i16 bytes; /**< Byte count */ -} vlib_mini_counter_t; - /** A collection of combined counters */ typedef struct { - vlib_mini_counter_t **minis; /**< Per-thread u16 non-atomic counter pairs */ - vlib_counter_t *maxi; /**< Shared wide counter pairs */ - vlib_counter_t *value_at_last_clear; /**< Counter values as of last clear. */ + vlib_counter_t **counters; /**< Per-thread u64 non-atomic counter pairs */ vlib_counter_t *value_at_last_serialize; /**< Counter values as of last serialize. */ u32 last_incremental_serialize_index; /**< Last counter index serialized incrementally. */ char *name; /**< The counter collection's name. */ } vlib_combined_counter_main_t; +/** The number of counters (not the number of per-thread counters) */ +u32 vlib_combined_counter_n_counters (const vlib_combined_counter_main_t * + cm); + /** Clear a collection of simple counters @param cm - (vlib_simple_counter_main_t *) collection to clear */ @@ -233,62 +203,41 @@ void vlib_clear_combined_counters (vlib_combined_counter_main_t * cm); @param cm - (vlib_combined_counter_main_t *) comined counter main pointer @param cpu_index - (u32) the current cpu index @param index - (u32) index of the counter to increment - @param packet_increment - (u32) number of packets to add to the counter - @param byte_increment - (u32) number of bytes to add to the counter + @param packet_increment - (u64) number of packets to add to the counter + @param byte_increment - (u64) number of bytes to add to the counter */ always_inline void vlib_increment_combined_counter (vlib_combined_counter_main_t * cm, u32 cpu_index, - u32 index, - u32 packet_increment, u32 byte_increment) + u32 index, u64 n_packets, u64 n_bytes) { - vlib_mini_counter_t *my_minis, *mini; - u32 old_packets, new_packets; - i32 old_bytes, new_bytes; - - /* Use this CPU's mini counter array */ - my_minis = cm->minis[cpu_index]; - - mini = vec_elt_at_index (my_minis, index); - old_packets = mini->packets; - old_bytes = mini->bytes; - - new_packets = old_packets + packet_increment; - new_bytes = old_bytes + byte_increment; - - mini->packets = new_packets; - mini->bytes = new_bytes; - - /* Bytes always overflow before packets.. */ - if (PREDICT_FALSE (mini->bytes != new_bytes)) - { - vlib_counter_t *maxi = vec_elt_at_index (cm->maxi, index); + vlib_counter_t *my_counters; - __sync_fetch_and_add (&maxi->packets, new_packets); - __sync_fetch_and_add (&maxi->bytes, new_bytes); + /* Use this CPU's counter array */ + my_counters = cm->counters[cpu_index]; - mini->packets = 0; - mini->bytes = 0; - } + my_counters[index].packets += n_packets; + my_counters[index].bytes += n_bytes; } -#define vlib_prefetch_combined_counter(_cm, _cpu_index, _index) \ -{ \ - vlib_mini_counter_t *_cpu_minis; \ - \ - /* \ - * This CPU's mini index is assumed to already be in cache \ - */ \ - _cpu_minis = (_cm)->minis[(_cpu_index)]; \ - CLIB_PREFETCH(_cpu_minis + (_index), \ - sizeof(*_cpu_minis), \ - STORE); \ +/** Pre-fetch a per-thread combined counter for the given object index */ +always_inline void +vlib_prefetch_combined_counter (const vlib_combined_counter_main_t * cm, + u32 cpu_index, u32 index) +{ + vlib_counter_t *cpu_counters; + + /* + * This CPU's index is assumed to already be in cache + */ + cpu_counters = cm->counters[cpu_index]; + CLIB_PREFETCH (cpu_counters + index, CLIB_CACHE_LINE_BYTES, STORE); } /** Get the value of a combined counter, never called in the speed path - Scrapes the entire set of mini counters. Innacurate unless + Scrapes the entire set of per-thread counters. Innacurate unless worker threads which might increment the counter are barrier-synchronized @@ -298,35 +247,27 @@ vlib_increment_combined_counter (vlib_combined_counter_main_t * cm, */ static inline void -vlib_get_combined_counter (vlib_combined_counter_main_t * cm, +vlib_get_combined_counter (const vlib_combined_counter_main_t * cm, u32 index, vlib_counter_t * result) { - vlib_mini_counter_t *my_minis, *mini; - vlib_counter_t *maxi; + vlib_counter_t *my_counters, *counter; int i; result->packets = 0; result->bytes = 0; - for (i = 0; i < vec_len (cm->minis); i++) + for (i = 0; i < vec_len (cm->counters); i++) { - my_minis = cm->minis[i]; + my_counters = cm->counters[i]; - mini = vec_elt_at_index (my_minis, index); - result->packets += mini->packets; - result->bytes += mini->bytes; + counter = vec_elt_at_index (my_counters, index); + result->packets += counter->packets; + result->bytes += counter->bytes; } - - maxi = vec_elt_at_index (cm->maxi, index); - result->packets += maxi->packets; - result->bytes += maxi->bytes; - - if (index < vec_len (cm->value_at_last_clear)) - vlib_counter_sub (result, &cm->value_at_last_clear[index]); } /** Clear a combined counter - Clears the set of per-thread u16 counters, and the shared vlib_counter_t + Clears the set of per-thread counters. @param cm - (vlib_combined_counter_main_t *) combined counter main pointer @param index - (u32) index of the counter to clear @@ -334,21 +275,17 @@ vlib_get_combined_counter (vlib_combined_counter_main_t * cm, always_inline void vlib_zero_combined_counter (vlib_combined_counter_main_t * cm, u32 index) { - vlib_mini_counter_t *mini, *my_minis; + vlib_counter_t *my_counters, *counter; int i; - for (i = 0; i < vec_len (cm->minis); i++) + for (i = 0; i < vec_len (cm->counters); i++) { - my_minis = cm->minis[i]; + my_counters = cm->counters[i]; - mini = vec_elt_at_index (my_minis, index); - mini->packets = 0; - mini->bytes = 0; + counter = vec_elt_at_index (my_counters, index); + counter->packets = 0; + counter->bytes = 0; } - - vlib_counter_zero (&cm->maxi[index]); - if (index < vec_len (cm->value_at_last_clear)) - vlib_counter_zero (&cm->value_at_last_clear[index]); } /** validate a simple counter diff --git a/src/vnet/ip/ip4_forward.c b/src/vnet/ip/ip4_forward.c index 0dad61d4..7352c2e7 100644 --- a/src/vnet/ip/ip4_forward.c +++ b/src/vnet/ip/ip4_forward.c @@ -2375,6 +2375,17 @@ ip4_rewrite_inline (vlib_main_t * vm, adj_index0 = vnet_buffer (p0)->ip.adj_index[VLIB_TX]; adj_index1 = vnet_buffer (p1)->ip.adj_index[VLIB_TX]; + /* + * pre-fetch the per-adjacency counters + */ + if (do_counters) + { + vlib_prefetch_combined_counter (&adjacency_counters, + cpu_index, adj_index0); + vlib_prefetch_combined_counter (&adjacency_counters, + cpu_index, adj_index1); + } + /* We should never rewrite a pkt using the MISS adjacency */ ASSERT (adj_index0 && adj_index1); @@ -2480,17 +2491,6 @@ ip4_rewrite_inline (vlib_main_t * vm, rewrite_header.max_l3_packet_bytes ? IP4_ERROR_MTU_EXCEEDED : error1); - /* - * pre-fetch the per-adjacency counters - */ - if (do_counters) - { - vlib_prefetch_combined_counter (&adjacency_counters, - cpu_index, adj_index0); - vlib_prefetch_combined_counter (&adjacency_counters, - cpu_index, adj_index1); - } - /* Don't adjust the buffer for ttl issue; icmp-error node wants * to see the IP headerr */ if (PREDICT_TRUE (error0 == IP4_ERROR_NONE)) @@ -2624,8 +2624,9 @@ ip4_rewrite_inline (vlib_main_t * vm, p0->flags &= ~VNET_BUFFER_LOCALLY_ORIGINATED; } - vlib_prefetch_combined_counter (&adjacency_counters, - cpu_index, adj_index0); + if (do_counters) + vlib_prefetch_combined_counter (&adjacency_counters, + cpu_index, adj_index0); /* Guess we are only writing on simple Ethernet header. */ vnet_rewrite_one_header (adj0[0], ip0, sizeof (ethernet_header_t)); @@ -2641,10 +2642,11 @@ ip4_rewrite_inline (vlib_main_t * vm, rw_len0 = adj0[0].rewrite_header.data_bytes; vnet_buffer (p0)->ip.save_rewrite_length = rw_len0; - vlib_increment_combined_counter - (&adjacency_counters, - cpu_index, - adj_index0, 1, vlib_buffer_length_in_chain (vm, p0) + rw_len0); + if (do_counters) + vlib_increment_combined_counter + (&adjacency_counters, + cpu_index, adj_index0, 1, + vlib_buffer_length_in_chain (vm, p0) + rw_len0); /* Check MTU of outgoing interface. */ error0 = (vlib_buffer_length_in_chain (vm, p0) diff --git a/src/vnet/map/map.c b/src/vnet/map/map.c index 7006b1db..99305afa 100644 --- a/src/vnet/map/map.c +++ b/src/vnet/map/map.c @@ -1304,7 +1304,7 @@ show_map_stats_command_fn (vlib_main_t * vm, unformat_input_t * input, { which = cm - mm->domain_counters; - for (i = 0; i < vec_len (cm->maxi); i++) + for (i = 0; i < vlib_combined_counter_n_counters (cm); i++) { vlib_get_combined_counter (cm, i, &v); total_pkts[which] += v.packets; diff --git a/src/vnet/map/map_api.c b/src/vnet/map/map_api.c index 7febeb3d..d618e7a6 100644 --- a/src/vnet/map/map_api.c +++ b/src/vnet/map/map_api.c @@ -211,7 +211,7 @@ vl_api_map_summary_stats_t_handler (vl_api_map_summary_stats_t * mp) { which = cm - mm->domain_counters; - for (i = 0; i < vec_len (cm->maxi); i++) + for (i = 0; i < vlib_combined_counter_n_counters (cm); i++) { vlib_get_combined_counter (cm, i, &v); total_pkts[which] += v.packets; diff --git a/src/vnet/rewrite.c b/src/vnet/rewrite.c index c4a171c1..47fb74df 100644 --- a/src/vnet/rewrite.c +++ b/src/vnet/rewrite.c @@ -79,7 +79,7 @@ format_vnet_rewrite (u8 * s, va_list * args) if (NULL != si) s = format (s, "%U: ", format_vnet_sw_interface_name, vnm, si); else - s = format (s, "DELETED"); + s = format (s, "DELETED:%d", rw->sw_if_index); } /* Format rewrite string. */ diff --git a/src/vpp/api/api.c b/src/vpp/api/api.c index d8301fa6..8df40406 100644 --- a/src/vpp/api/api.c +++ b/src/vpp/api/api.c @@ -849,7 +849,7 @@ vl_api_vnet_get_summary_stats_t_handler (vl_api_vnet_get_summary_stats_t * mp) { which = cm - im->combined_sw_if_counters; - for (i = 0; i < vec_len (cm->maxi); i++) + for (i = 0; i < vlib_combined_counter_n_counters (cm); i++) { vlib_get_combined_counter (cm, i, &v); total_pkts[which] += v.packets; diff --git a/src/vpp/stats/stats.c b/src/vpp/stats/stats.c index c46d441a..1927da0b 100644 --- a/src/vpp/stats/stats.c +++ b/src/vpp/stats/stats.c @@ -134,7 +134,7 @@ do_simple_interface_counters (stats_main_t * sm) vlib_simple_counter_main_t *cm; u32 items_this_message = 0; u64 v, *vp = 0; - int i; + int i, n_counts; /* * Prevent interface registration from expanding / moving the vectors... @@ -144,13 +144,13 @@ do_simple_interface_counters (stats_main_t * sm) vec_foreach (cm, im->sw_if_counters) { - - for (i = 0; i < vec_len (cm->maxi); i++) + n_counts = vlib_simple_counter_n_counters (cm); + for (i = 0; i < n_counts; i++) { if (mp == 0) { items_this_message = clib_min (SIMPLE_COUNTER_BATCH_SIZE, - vec_len (cm->maxi) - i); + n_counts - i); mp = vl_msg_api_alloc_as_if_client (sizeof (*mp) + items_this_message * sizeof (v)); @@ -189,19 +189,19 @@ do_combined_interface_counters (stats_main_t * sm) vlib_combined_counter_main_t *cm; u32 items_this_message = 0; vlib_counter_t v, *vp = 0; - int i; + int i, n_counts; vnet_interface_counter_lock (im); vec_foreach (cm, im->combined_sw_if_counters) { - - for (i = 0; i < vec_len (cm->maxi); i++) + n_counts = vlib_combined_counter_n_counters (cm); + for (i = 0; i < n_counts; i++) { if (mp == 0) { items_this_message = clib_min (COMBINED_COUNTER_BATCH_SIZE, - vec_len (cm->maxi) - i); + n_counts - i); mp = vl_msg_api_alloc_as_if_client (sizeof (*mp) + items_this_message * sizeof (v)); -- cgit 1.2.3-korg From a3af337e06a79f7d1dacf42a319f241c907122fc Mon Sep 17 00:00:00 2001 From: Neale Ranns Date: Tue, 28 Mar 2017 03:49:52 -0700 Subject: MTRIE Optimisations 2 1) 16-8-8 stride. Reduce trie depth walk traded with increased memory in the top PLY. 2) separate the vector of protocol-independent (PI) fib_table_t with the vector of protocol dependent (PD) FIBs. PD FIBs are large structures, we don't want to burn the memory for ech PD type 3) Go straight to the PD FIB in the data-path thus avoiding an indirection through, e.g., a PLY pool. Change-Id: I800d1ed0b2049040d5da95213f3ed6b12bdd78b7 Signed-off-by: Neale Ranns --- src/vnet/cop/ip4_whitelist.c | 9 - src/vnet/dpo/load_balance.c | 6 +- src/vnet/dpo/lookup_dpo.c | 4 - src/vnet/fib/fib.c | 2 + src/vnet/fib/fib_entry.c | 4 +- src/vnet/fib/fib_path.c | 1 + src/vnet/fib/fib_table.c | 24 +- src/vnet/fib/fib_table.h | 12 - src/vnet/fib/fib_test.c | 6 +- src/vnet/fib/ip4_fib.c | 60 +++- src/vnet/fib/ip4_fib.h | 35 ++- src/vnet/fib/ip6_fib.c | 18 +- src/vnet/fib/ip6_fib.h | 2 +- src/vnet/fib/mpls_fib.c | 21 +- src/vnet/fib/mpls_fib.h | 28 +- src/vnet/ip/ip4.h | 29 +- src/vnet/ip/ip4_forward.c | 21 -- src/vnet/ip/ip4_mtrie.c | 611 +++++++++++++++++++++++++++-------------- src/vnet/ip/ip4_mtrie.h | 106 +++++-- src/vnet/ip/ip4_packet.h | 1 + src/vnet/ip/ip4_source_check.c | 8 - src/vnet/ip/ip6.h | 3 + src/vnet/ip/ip_api.c | 43 ++- src/vnet/mpls/interface.c | 1 + src/vnet/mpls/mpls.h | 26 +- src/vnet/mpls/mpls_api.c | 38 ++- src/vpp/api/api.c | 6 +- src/vpp/stats/stats.c | 8 +- 28 files changed, 720 insertions(+), 413 deletions(-) (limited to 'src/vpp/stats/stats.c') diff --git a/src/vnet/cop/ip4_whitelist.c b/src/vnet/cop/ip4_whitelist.c index ccb9dc03..6ef3d7d7 100644 --- a/src/vnet/cop/ip4_whitelist.c +++ b/src/vnet/cop/ip4_whitelist.c @@ -127,9 +127,6 @@ ip4_cop_whitelist_node_fn (vlib_main_t * vm, leaf0 = ip4_fib_mtrie_lookup_step_one (mtrie0, &ip0->src_address); - leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, - &ip0->src_address, 1); - leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, &ip0->src_address, 2); @@ -166,9 +163,6 @@ ip4_cop_whitelist_node_fn (vlib_main_t * vm, leaf1 = ip4_fib_mtrie_lookup_step_one (mtrie1, &ip1->src_address); - leaf1 = ip4_fib_mtrie_lookup_step (mtrie1, leaf1, - &ip1->src_address, 1); - leaf1 = ip4_fib_mtrie_lookup_step (mtrie1, leaf1, &ip1->src_address, 2); @@ -263,9 +257,6 @@ ip4_cop_whitelist_node_fn (vlib_main_t * vm, leaf0 = ip4_fib_mtrie_lookup_step_one (mtrie0, &ip0->src_address); - leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, - &ip0->src_address, 1); - leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, &ip0->src_address, 2); diff --git a/src/vnet/dpo/load_balance.c b/src/vnet/dpo/load_balance.c index d5e98e4e..6b0eda0e 100644 --- a/src/vnet/dpo/load_balance.c +++ b/src/vnet/dpo/load_balance.c @@ -827,14 +827,18 @@ const static char* const * const load_balance_nodes[DPO_PROTO_NUM] = void load_balance_module_init (void) { + index_t lbi; + dpo_register(DPO_LOAD_BALANCE, &lb_vft, load_balance_nodes); /* * Special LB with index zero. we need to define this since the v4 mtrie * assumes an index of 0 implies the ply is empty. therefore all 'real' * adjs need a non-zero index. + * This should never be used, but just in case, stack it on a drop. */ - load_balance_create(0, DPO_PROTO_IP4, 0); + lbi = load_balance_create(1, DPO_PROTO_IP4, 0); + load_balance_set_bucket(lbi, 0, drop_dpo_get(DPO_PROTO_IP4)); load_balance_map_module_init(); } diff --git a/src/vnet/dpo/lookup_dpo.c b/src/vnet/dpo/lookup_dpo.c index 3726c8fe..e94e871c 100644 --- a/src/vnet/dpo/lookup_dpo.c +++ b/src/vnet/dpo/lookup_dpo.c @@ -211,7 +211,6 @@ ip4_src_fib_lookup_one (u32 src_fib_index0, mtrie0 = &ip4_fib_get (src_fib_index0)->mtrie; leaf0 = ip4_fib_mtrie_lookup_step_one (mtrie0, addr0); - leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, addr0, 1); leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, addr0, 2); leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, addr0, 3); @@ -235,9 +234,6 @@ ip4_src_fib_lookup_two (u32 src_fib_index0, leaf0 = ip4_fib_mtrie_lookup_step_one (mtrie0, addr0); leaf1 = ip4_fib_mtrie_lookup_step_one (mtrie1, addr1); - leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, addr0, 1); - leaf1 = ip4_fib_mtrie_lookup_step (mtrie1, leaf1, addr1, 1); - leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, addr0, 2); leaf1 = ip4_fib_mtrie_lookup_step (mtrie1, leaf1, addr1, 2); diff --git a/src/vnet/fib/fib.c b/src/vnet/fib/fib.c index 413f93e8..b430e113 100644 --- a/src/vnet/fib/fib.c +++ b/src/vnet/fib/fib.c @@ -28,6 +28,8 @@ fib_module_init (vlib_main_t * vm) return (error); if ((error = vlib_call_init_function (vm, adj_module_init))) return (error); + if ((error = vlib_call_init_function (vm, ip4_mtrie_module_init))) + return (error); fib_entry_module_init(); fib_entry_src_module_init(); diff --git a/src/vnet/fib/fib_entry.c b/src/vnet/fib/fib_entry.c index 25005e11..6ac5461d 100644 --- a/src/vnet/fib/fib_entry.c +++ b/src/vnet/fib/fib_entry.c @@ -924,10 +924,10 @@ fib_entry_path_remove (fib_node_index_t fib_entry_index, /* * no more sources left. this entry is toast. */ - fib_entry_src_action_uninstall(fib_entry); fib_entry = fib_entry_post_flag_update_actions(fib_entry, source, bflags); + fib_entry_src_action_uninstall(fib_entry); return (FIB_ENTRY_SRC_FLAG_NONE); } @@ -1014,10 +1014,10 @@ fib_entry_special_remove (fib_node_index_t fib_entry_index, /* * no more sources left. this entry is toast. */ - fib_entry_src_action_uninstall(fib_entry); fib_entry = fib_entry_post_flag_update_actions(fib_entry, source, bflags); + fib_entry_src_action_uninstall(fib_entry); return (FIB_ENTRY_SRC_FLAG_NONE); } diff --git a/src/vnet/fib/fib_path.c b/src/vnet/fib/fib_path.c index 3ed309f3..928a9d43 100644 --- a/src/vnet/fib/fib_path.c +++ b/src/vnet/fib/fib_path.c @@ -32,6 +32,7 @@ #include #include #include +#include /** * Enurmeration of path types diff --git a/src/vnet/fib/fib_table.c b/src/vnet/fib/fib_table.c index 7818d02e..6c3162e7 100644 --- a/src/vnet/fib/fib_table.c +++ b/src/vnet/fib/fib_table.c @@ -47,7 +47,7 @@ fib_table_lookup_i (fib_table_t *fib_table, switch (prefix->fp_proto) { case FIB_PROTOCOL_IP4: - return (ip4_fib_table_lookup(&fib_table->v4, + return (ip4_fib_table_lookup(ip4_fib_get(fib_table->ft_index), &prefix->fp_addr.ip4, prefix->fp_len)); case FIB_PROTOCOL_IP6: @@ -55,7 +55,7 @@ fib_table_lookup_i (fib_table_t *fib_table, &prefix->fp_addr.ip6, prefix->fp_len)); case FIB_PROTOCOL_MPLS: - return (mpls_fib_table_lookup(&fib_table->mpls, + return (mpls_fib_table_lookup(mpls_fib_get(fib_table->ft_index), prefix->fp_label, prefix->fp_eos)); } @@ -76,7 +76,7 @@ fib_table_lookup_exact_match_i (const fib_table_t *fib_table, switch (prefix->fp_proto) { case FIB_PROTOCOL_IP4: - return (ip4_fib_table_lookup_exact_match(&fib_table->v4, + return (ip4_fib_table_lookup_exact_match(ip4_fib_get(fib_table->ft_index), &prefix->fp_addr.ip4, prefix->fp_len)); case FIB_PROTOCOL_IP6: @@ -84,7 +84,7 @@ fib_table_lookup_exact_match_i (const fib_table_t *fib_table, &prefix->fp_addr.ip6, prefix->fp_len)); case FIB_PROTOCOL_MPLS: - return (mpls_fib_table_lookup(&fib_table->mpls, + return (mpls_fib_table_lookup(mpls_fib_get(fib_table->ft_index), prefix->fp_label, prefix->fp_eos)); } @@ -148,7 +148,7 @@ fib_table_entry_remove (fib_table_t *fib_table, switch (prefix->fp_proto) { case FIB_PROTOCOL_IP4: - ip4_fib_table_entry_remove(&fib_table->v4, + ip4_fib_table_entry_remove(ip4_fib_get(fib_table->ft_index), &prefix->fp_addr.ip4, prefix->fp_len); break; @@ -158,7 +158,7 @@ fib_table_entry_remove (fib_table_t *fib_table, prefix->fp_len); break; case FIB_PROTOCOL_MPLS: - mpls_fib_table_entry_remove(&fib_table->mpls, + mpls_fib_table_entry_remove(mpls_fib_get(fib_table->ft_index), prefix->fp_label, prefix->fp_eos); break; @@ -208,7 +208,7 @@ fib_table_entry_insert (fib_table_t *fib_table, switch (prefix->fp_proto) { case FIB_PROTOCOL_IP4: - ip4_fib_table_entry_insert(&fib_table->v4, + ip4_fib_table_entry_insert(ip4_fib_get(fib_table->ft_index), &prefix->fp_addr.ip4, prefix->fp_len, fib_entry_index); @@ -220,7 +220,7 @@ fib_table_entry_insert (fib_table_t *fib_table, fib_entry_index); break; case FIB_PROTOCOL_MPLS: - mpls_fib_table_entry_insert(&fib_table->mpls, + mpls_fib_table_entry_insert(mpls_fib_get(fib_table->ft_index), prefix->fp_label, prefix->fp_eos, fib_entry_index); @@ -270,7 +270,9 @@ fib_table_fwding_dpo_remove (u32 fib_index, return (ip4_fib_table_fwding_dpo_remove(ip4_fib_get(fib_index), &prefix->fp_addr.ip4, prefix->fp_len, - dpo)); + dpo, + fib_table_get_less_specific(fib_index, + prefix))); case FIB_PROTOCOL_IP6: return (ip6_fib_table_fwding_dpo_remove(fib_index, &prefix->fp_addr.ip6, @@ -1034,13 +1036,13 @@ fib_table_destroy (fib_table_t *fib_table) switch (fib_table->ft_proto) { case FIB_PROTOCOL_IP4: - ip4_fib_table_destroy(&fib_table->v4); + ip4_fib_table_destroy(fib_table->ft_index); break; case FIB_PROTOCOL_IP6: ip6_fib_table_destroy(fib_table->ft_index); break; case FIB_PROTOCOL_MPLS: - mpls_fib_table_destroy(&fib_table->mpls); + mpls_fib_table_destroy(fib_table->ft_index); break; } } diff --git a/src/vnet/fib/fib_table.h b/src/vnet/fib/fib_table.h index e7e66acb..b310aea6 100644 --- a/src/vnet/fib/fib_table.h +++ b/src/vnet/fib/fib_table.h @@ -28,18 +28,6 @@ */ typedef struct fib_table_t_ { - /** - * A union of the protocol specific FIBs that provide the - * underlying LPM mechanism. - * This element is first in the struct so that it is in the - * first cache line. - */ - union { - ip4_fib_t v4; - ip6_fib_t v6; - mpls_fib_t mpls; - }; - /** * Which protocol this table serves. Used to switch on the union above. */ diff --git a/src/vnet/fib/fib_test.c b/src/vnet/fib/fib_test.c index 1a9cce24..92141ddf 100644 --- a/src/vnet/fib/fib_test.c +++ b/src/vnet/fib/fib_test.c @@ -40,8 +40,6 @@ fformat(stderr, "FAIL:%d: " _comment "\n", \ __LINE__, ##_args); \ } else { \ - fformat(stderr, "PASS:%d: " _comment "\n", \ - __LINE__, ##_args); \ } \ _evald; \ }) @@ -5727,7 +5725,7 @@ fib_test_label (void) &a_o_10_10_11_1, &adj_o_10_10_11_2), "1.1.1.1/32 LB 2 buckets via: " - "adj over 10.10.11.1", + "adj over 10.10.11.1, " "adj-v4 over 10.10.11.2"); fei = fib_table_lookup(MPLS_FIB_DEFAULT_TABLE_ID, @@ -5738,7 +5736,7 @@ fib_test_label (void) &a_o_10_10_11_1, &adj_o_10_10_11_2), "24001/eos LB 2 buckets via: " - "adj over 10.10.11.1", + "adj over 10.10.11.1, " "adj-v4 over 10.10.11.2"); fei = fib_table_lookup(MPLS_FIB_DEFAULT_TABLE_ID, diff --git a/src/vnet/fib/ip4_fib.c b/src/vnet/fib/ip4_fib.c index a7915620..98d4e52f 100644 --- a/src/vnet/fib/ip4_fib.c +++ b/src/vnet/fib/ip4_fib.c @@ -104,29 +104,35 @@ static u32 ip4_create_fib_with_table_id (u32 table_id) { fib_table_t *fib_table; + ip4_fib_t *v4_fib; pool_get_aligned(ip4_main.fibs, fib_table, CLIB_CACHE_LINE_BYTES); memset(fib_table, 0, sizeof(*fib_table)); + pool_get_aligned(ip4_main.v4_fibs, v4_fib, CLIB_CACHE_LINE_BYTES); + + ASSERT((fib_table - ip4_main.fibs) == + (v4_fib - ip4_main.v4_fibs)); + fib_table->ft_proto = FIB_PROTOCOL_IP4; fib_table->ft_index = - fib_table->v4.index = + v4_fib->index = (fib_table - ip4_main.fibs); hash_set (ip4_main.fib_index_by_table_id, table_id, fib_table->ft_index); fib_table->ft_table_id = - fib_table->v4.table_id = + v4_fib->table_id = table_id; fib_table->ft_flow_hash_config = - fib_table->v4.flow_hash_config = + v4_fib->flow_hash_config = IP_FLOW_HASH_DEFAULT; - fib_table->v4.fwd_classify_table_index = ~0; - fib_table->v4.rev_classify_table_index = ~0; + v4_fib->fwd_classify_table_index = ~0; + v4_fib->rev_classify_table_index = ~0; fib_table_lock(fib_table->ft_index, FIB_PROTOCOL_IP4); - ip4_mtrie_init(&fib_table->v4.mtrie); + ip4_mtrie_init(&v4_fib->mtrie); /* * add the special entries into the new FIB @@ -151,9 +157,10 @@ ip4_create_fib_with_table_id (u32 table_id) } void -ip4_fib_table_destroy (ip4_fib_t *fib) +ip4_fib_table_destroy (u32 fib_index) { - fib_table_t *fib_table = (fib_table_t*)fib; + fib_table_t *fib_table = pool_elt_at_index(ip4_main.fibs, fib_index); + ip4_fib_t *v4_fib = pool_elt_at_index(ip4_main.v4_fibs, fib_index); int ii; /* @@ -185,6 +192,10 @@ ip4_fib_table_destroy (ip4_fib_t *fib) { hash_unset (ip4_main.fib_index_by_table_id, fib_table->ft_table_id); } + + ip4_mtrie_free(&v4_fib->mtrie); + + pool_put(ip4_main.v4_fibs, v4_fib); pool_put(ip4_main.fibs, fib_table); } @@ -367,16 +378,33 @@ ip4_fib_table_fwding_dpo_update (ip4_fib_t *fib, u32 len, const dpo_id_t *dpo) { - ip4_fib_mtrie_add_del_route(fib, *addr, len, dpo->dpoi_index, 0); // ADD + ip4_fib_mtrie_route_add(&fib->mtrie, addr, len, dpo->dpoi_index); } void ip4_fib_table_fwding_dpo_remove (ip4_fib_t *fib, const ip4_address_t *addr, u32 len, - const dpo_id_t *dpo) + const dpo_id_t *dpo, + u32 cover_index) { - ip4_fib_mtrie_add_del_route(fib, *addr, len, dpo->dpoi_index, 1); // DELETE + fib_prefix_t cover_prefix = { + .fp_len = 0, + }; + const dpo_id_t *cover_dpo; + + /* + * We need to pass the MTRIE the LB index and address length of the + * covering prefix, so it can fill the plys with the correct replacement + * for the entry being removed + */ + fib_entry_get_prefix(cover_index, &cover_prefix); + cover_dpo = fib_entry_contribute_ip_forwarding(cover_index); + + ip4_fib_mtrie_route_del(&fib->mtrie, + addr, len, dpo->dpoi_index, + cover_prefix.fp_len, + cover_dpo->dpoi_index); } void @@ -498,7 +526,7 @@ ip4_show_fib (vlib_main_t * vm, pool_foreach (fib_table, im4->fibs, ({ - ip4_fib_t *fib = &fib_table->v4; + ip4_fib_t *fib = pool_elt_at_index(im4->v4_fibs, fib_table->ft_index); if (table_id >= 0 && table_id != (int)fib->table_id) continue; @@ -523,6 +551,11 @@ ip4_show_fib (vlib_main_t * vm, } continue; } + if (mtrie) + { + vlib_cli_output (vm, "%U", format_ip4_fib_mtrie, &fib->mtrie); + continue; + } if (!matching) { @@ -532,9 +565,6 @@ ip4_show_fib (vlib_main_t * vm, { ip4_fib_table_show_one(fib, vm, &matching_address, matching_mask); } - - if (mtrie) - vlib_cli_output (vm, "%U", format_ip4_fib_mtrie, &fib->mtrie); })); return 0; diff --git a/src/vnet/fib/ip4_fib.h b/src/vnet/fib/ip4_fib.h index 243fd77f..4cf9e58a 100644 --- a/src/vnet/fib/ip4_fib.h +++ b/src/vnet/fib/ip4_fib.h @@ -34,6 +34,33 @@ #include #include #include +#include + +typedef struct ip4_fib_t_ +{ + /** + * Mtrie for fast lookups. Hash is used to maintain overlapping prefixes. + * First member so it's in the first cacheline. + */ + ip4_fib_mtrie_t mtrie; + + /* Hash table for each prefix length mapping. */ + uword *fib_entry_by_dst_address[33]; + + /* Table ID (hash key) for this FIB. */ + u32 table_id; + + /* Index into FIB vector. */ + u32 index; + + /* flow hash configuration */ + flow_hash_config_t flow_hash_config; + + /* N-tuple classifier indices */ + u32 fwd_classify_table_index; + u32 rev_classify_table_index; + +} ip4_fib_t; extern fib_node_index_t ip4_fib_table_lookup(const ip4_fib_t *fib, const ip4_address_t *addr, @@ -50,7 +77,7 @@ extern void ip4_fib_table_entry_insert(ip4_fib_t *fib, const ip4_address_t *addr, u32 len, fib_node_index_t fib_entry_index); -extern void ip4_fib_table_destroy(ip4_fib_t *fib); +extern void ip4_fib_table_destroy(u32 fib_index); extern void ip4_fib_table_fwding_dpo_update(ip4_fib_t *fib, const ip4_address_t *addr, @@ -60,7 +87,8 @@ extern void ip4_fib_table_fwding_dpo_update(ip4_fib_t *fib, extern void ip4_fib_table_fwding_dpo_remove(ip4_fib_t *fib, const ip4_address_t *addr, u32 len, - const dpo_id_t *dpo); + const dpo_id_t *dpo, + fib_node_index_t cover_index); extern u32 ip4_fib_table_lookup_lb (ip4_fib_t *fib, const ip4_address_t * dst); @@ -79,7 +107,7 @@ extern void ip4_fib_table_walk(ip4_fib_t *fib, static inline ip4_fib_t * ip4_fib_get (u32 index) { - return (&(pool_elt_at_index(ip4_main.fibs, index)->v4)); + return (pool_elt_at_index(ip4_main.v4_fibs, index)); } always_inline u32 @@ -134,7 +162,6 @@ ip4_fib_forwarding_lookup (u32 fib_index, mtrie = &ip4_fib_get(fib_index)->mtrie; leaf = ip4_fib_mtrie_lookup_step_one (mtrie, addr); - leaf = ip4_fib_mtrie_lookup_step (mtrie, leaf, addr, 1); leaf = ip4_fib_mtrie_lookup_step (mtrie, leaf, addr, 2); leaf = ip4_fib_mtrie_lookup_step (mtrie, leaf, addr, 3); diff --git a/src/vnet/fib/ip6_fib.c b/src/vnet/fib/ip6_fib.c index 343ff55e..0ee029d3 100644 --- a/src/vnet/fib/ip6_fib.c +++ b/src/vnet/fib/ip6_fib.c @@ -55,22 +55,29 @@ static u32 create_fib_with_table_id (u32 table_id) { fib_table_t *fib_table; + ip6_fib_t *v6_fib; pool_get_aligned(ip6_main.fibs, fib_table, CLIB_CACHE_LINE_BYTES); + pool_get_aligned(ip6_main.v6_fibs, v6_fib, CLIB_CACHE_LINE_BYTES); + memset(fib_table, 0, sizeof(*fib_table)); + memset(v6_fib, 0, sizeof(*v6_fib)); + ASSERT((fib_table - ip6_main.fibs) == + (v6_fib - ip6_main.v6_fibs)); + fib_table->ft_proto = FIB_PROTOCOL_IP6; fib_table->ft_index = - fib_table->v6.index = - (fib_table - ip6_main.fibs); + v6_fib->index = + (fib_table - ip6_main.fibs); hash_set(ip6_main.fib_index_by_table_id, table_id, fib_table->ft_index); fib_table->ft_table_id = - fib_table->v6.table_id = + v6_fib->table_id = table_id; fib_table->ft_flow_hash_config = - fib_table->v6.flow_hash_config = + v6_fib->flow_hash_config = IP_FLOW_HASH_DEFAULT; vnet_ip6_fib_init(fib_table->ft_index); @@ -188,6 +195,7 @@ ip6_fib_table_destroy (u32 fib_index) { hash_unset (ip6_main.fib_index_by_table_id, fib_table->ft_table_id); } + pool_put_index(ip6_main.v6_fibs, fib_table->ft_index); pool_put(ip6_main.fibs, fib_table); } @@ -620,7 +628,7 @@ ip6_show_fib (vlib_main_t * vm, pool_foreach (fib_table, im6->fibs, ({ - fib = &(fib_table->v6); + fib = pool_elt_at_index(im6->v6_fibs, fib_table->ft_index); if (table_id >= 0 && table_id != (int)fib->table_id) continue; if (fib_index != ~0 && fib_index != (int)fib->index) diff --git a/src/vnet/fib/ip6_fib.h b/src/vnet/fib/ip6_fib.h index af864a75..e2f28452 100644 --- a/src/vnet/fib/ip6_fib.h +++ b/src/vnet/fib/ip6_fib.h @@ -115,7 +115,7 @@ static inline ip6_fib_t * ip6_fib_get (fib_node_index_t index) { ASSERT(!pool_is_free_index(ip6_main.fibs, index)); - return (&pool_elt_at_index (ip6_main.fibs, index)->v6); + return (pool_elt_at_index (ip6_main.v6_fibs, index)); } static inline diff --git a/src/vnet/fib/mpls_fib.c b/src/vnet/fib/mpls_fib.c index 5cd0fd23..4b2b76ea 100644 --- a/src/vnet/fib/mpls_fib.c +++ b/src/vnet/fib/mpls_fib.c @@ -97,11 +97,15 @@ mpls_fib_create_with_table_id (u32 table_id) int i; pool_get_aligned(mpls_main.fibs, fib_table, CLIB_CACHE_LINE_BYTES); + pool_get_aligned(mpls_main.mpls_fibs, mf, CLIB_CACHE_LINE_BYTES); + + ASSERT((fib_table - mpls_main.fibs) == + (mf - mpls_main.mpls_fibs)); + memset(fib_table, 0, sizeof(*fib_table)); fib_table->ft_proto = FIB_PROTOCOL_MPLS; - fib_table->ft_index = - (fib_table - mpls_main.fibs); + fib_table->ft_index = (fib_table - mpls_main.fibs); hash_set (mpls_main.fib_index_by_table_id, table_id, fib_table->ft_index); @@ -109,8 +113,6 @@ mpls_fib_create_with_table_id (u32 table_id) table_id; fib_table->ft_flow_hash_config = MPLS_FLOW_HASH_DEFAULT; - fib_table->v4.fwd_classify_table_index = ~0; - fib_table->v4.rev_classify_table_index = ~0; fib_table_lock(fib_table->ft_index, FIB_PROTOCOL_MPLS); @@ -122,7 +124,6 @@ mpls_fib_create_with_table_id (u32 table_id) drop_dpo_get(DPO_PROTO_MPLS)); } - mf = &fib_table->mpls; mf->mf_entries = hash_create(0, sizeof(fib_node_index_t)); for (i = 0; i < MPLS_FIB_DB_SIZE; i++) { @@ -241,9 +242,10 @@ mpls_fib_table_create_and_lock (void) } void -mpls_fib_table_destroy (mpls_fib_t *mf) +mpls_fib_table_destroy (u32 fib_index) { - fib_table_t *fib_table = (fib_table_t*)mf; + fib_table_t *fib_table = pool_elt_at_index(mpls_main.fibs, fib_index); + mpls_fib_t *mf = pool_elt_at_index(mpls_main.mpls_fibs, fib_index); fib_prefix_t prefix = { .fp_proto = FIB_PROTOCOL_MPLS, }; @@ -274,6 +276,7 @@ mpls_fib_table_destroy (mpls_fib_t *mf) } hash_free(mf->mf_entries); + pool_put(mpls_main.mpls_fibs, mf); pool_put(mpls_main.fibs, fib_table); } @@ -436,11 +439,11 @@ mpls_fib_show (vlib_main_t * vm, if (MPLS_LABEL_INVALID == label) { - mpls_fib_table_show_all(&(fib_table->mpls), vm); + mpls_fib_table_show_all(mpls_fib_get(fib_table->ft_index), vm); } else { - mpls_fib_table_show_one(&(fib_table->mpls), label, vm); + mpls_fib_table_show_one(mpls_fib_get(fib_table->ft_index), label, vm); } })); diff --git a/src/vnet/fib/mpls_fib.h b/src/vnet/fib/mpls_fib.h index 779decaa..78a61a14 100644 --- a/src/vnet/fib/mpls_fib.h +++ b/src/vnet/fib/mpls_fib.h @@ -25,10 +25,33 @@ #include #include +#define MPLS_FIB_DEFAULT_TABLE_ID 0 + +/** + * Type exposure is to allow the DP fast/inlined access + */ +#define MPLS_FIB_KEY_SIZE 21 +#define MPLS_FIB_DB_SIZE (1 << (MPLS_FIB_KEY_SIZE-1)) + +typedef struct mpls_fib_t_ +{ + /** + * A hash table of entries. 21 bit key + * Hash table for reduced memory footprint + */ + uword * mf_entries; + + /** + * The load-balance indices keyed by 21 bit label+eos bit. + * A flat array for maximum lookup performace. + */ + index_t mf_lbs[MPLS_FIB_DB_SIZE]; +} mpls_fib_t; + static inline mpls_fib_t* mpls_fib_get (fib_node_index_t index) { - return (&(pool_elt_at_index(mpls_main.fibs, index)->mpls)); + return (pool_elt_at_index(mpls_main.mpls_fibs, index)); } extern u32 mpls_fib_table_find_or_create_and_lock(u32 table_id); @@ -56,8 +79,7 @@ extern void mpls_fib_table_entry_insert(mpls_fib_t *mf, mpls_label_t label, mpls_eos_bit_t eos, fib_node_index_t fei); -extern void mpls_fib_table_destroy(mpls_fib_t *mf); - +extern void mpls_fib_table_destroy(u32 fib_index); extern void mpls_fib_forwarding_table_update(mpls_fib_t *mf, diff --git a/src/vnet/ip/ip4.h b/src/vnet/ip/ip4.h index 4e075d0f..71640def 100644 --- a/src/vnet/ip/ip4.h +++ b/src/vnet/ip/ip4.h @@ -40,34 +40,10 @@ #ifndef included_ip_ip4_h #define included_ip_ip4_h -#include #include #include #include -typedef struct ip4_fib_t -{ - /* Hash table for each prefix length mapping. */ - uword *fib_entry_by_dst_address[33]; - - /* Mtrie for fast lookups. Hash is used to maintain overlapping prefixes. */ - ip4_fib_mtrie_t mtrie; - - /* Table ID (hash key) for this FIB. */ - u32 table_id; - - /* Index into FIB vector. */ - u32 index; - - /* flow hash configuration */ - flow_hash_config_t flow_hash_config; - - /* N-tuple classifier indices */ - u32 fwd_classify_table_index; - u32 rev_classify_table_index; - -} ip4_fib_t; - typedef struct ip4_mfib_t { /* Hash table for each prefix length mapping. */ @@ -111,6 +87,9 @@ typedef struct ip4_main_t /** Vector of FIBs. */ struct fib_table_t_ *fibs; + /** Vector of MTries. */ + struct ip4_fib_t_ *v4_fibs; + /** Vector of MFIBs. */ struct mfib_table_t_ *mfibs; @@ -284,8 +263,6 @@ serialize_function_t serialize_vnet_ip4_main, unserialize_vnet_ip4_main; int vnet_set_ip4_flow_hash (u32 table_id, flow_hash_config_t flow_hash_config); -void ip4_mtrie_init (ip4_fib_mtrie_t * m); - int vnet_set_ip4_classify_intfc (vlib_main_t * vm, u32 sw_if_index, u32 table_index); diff --git a/src/vnet/ip/ip4_forward.c b/src/vnet/ip/ip4_forward.c index ef6dded5..ee1703e7 100644 --- a/src/vnet/ip/ip4_forward.c +++ b/src/vnet/ip/ip4_forward.c @@ -182,7 +182,6 @@ ip4_lookup_inline (vlib_main_t * vm, mtrie2 = &ip4_fib_get (fib_index2)->mtrie; mtrie3 = &ip4_fib_get (fib_index3)->mtrie; - leaf0 = ip4_fib_mtrie_lookup_step_one (mtrie0, dst_addr0); leaf1 = ip4_fib_mtrie_lookup_step_one (mtrie1, dst_addr1); leaf2 = ip4_fib_mtrie_lookup_step_one (mtrie2, dst_addr2); @@ -194,14 +193,6 @@ ip4_lookup_inline (vlib_main_t * vm, tcp2 = (void *) (ip2 + 1); tcp3 = (void *) (ip3 + 1); - if (!lookup_for_responses_to_locally_received_packets) - { - leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, dst_addr0, 1); - leaf1 = ip4_fib_mtrie_lookup_step (mtrie1, leaf1, dst_addr1, 1); - leaf2 = ip4_fib_mtrie_lookup_step (mtrie2, leaf2, dst_addr2, 1); - leaf3 = ip4_fib_mtrie_lookup_step (mtrie3, leaf3, dst_addr3, 1); - } - if (!lookup_for_responses_to_locally_received_packets) { leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, dst_addr0, 2); @@ -363,9 +354,6 @@ ip4_lookup_inline (vlib_main_t * vm, tcp0 = (void *) (ip0 + 1); - if (!lookup_for_responses_to_locally_received_packets) - leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, dst_addr0, 1); - if (!lookup_for_responses_to_locally_received_packets) leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, dst_addr0, 2); @@ -1622,11 +1610,6 @@ ip4_local_inline (vlib_main_t * vm, good_tcp_udp0 |= is_udp0 && udp0->checksum == 0; good_tcp_udp1 |= is_udp1 && udp1->checksum == 0; - leaf0 = - ip4_fib_mtrie_lookup_step (mtrie0, leaf0, &ip0->src_address, 1); - leaf1 = - ip4_fib_mtrie_lookup_step (mtrie1, leaf1, &ip1->src_address, 1); - /* Verify UDP length. */ ip_len0 = clib_net_to_host_u16 (ip0->length); ip_len1 = clib_net_to_host_u16 (ip1->length); @@ -1812,9 +1795,6 @@ ip4_local_inline (vlib_main_t * vm, /* Don't verify UDP checksum for packets with explicit zero checksum. */ good_tcp_udp0 |= is_udp0 && udp0->checksum == 0; - leaf0 = - ip4_fib_mtrie_lookup_step (mtrie0, leaf0, &ip0->src_address, 1); - /* Verify UDP length. */ ip_len0 = clib_net_to_host_u16 (ip0->length); udp_len0 = clib_net_to_host_u16 (udp0->length); @@ -2913,7 +2893,6 @@ ip4_lookup_validate (ip4_address_t * a, u32 fib_index0) mtrie0 = &ip4_fib_get (fib_index0)->mtrie; leaf0 = ip4_fib_mtrie_lookup_step_one (mtrie0, a); - leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, a, 1); leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, a, 2); leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, a, 3); diff --git a/src/vnet/ip/ip4_mtrie.c b/src/vnet/ip/ip4_mtrie.c index 317d8f10..adc95125 100644 --- a/src/vnet/ip/ip4_mtrie.c +++ b/src/vnet/ip/ip4_mtrie.c @@ -38,10 +38,17 @@ */ #include -#include +#include +#include + + +/** + * Global pool of IPv4 8bit PLYs + */ +ip4_fib_mtrie_8_ply_t *ip4_ply_pool; always_inline u32 -ip4_fib_mtrie_leaf_is_non_empty (ip4_fib_mtrie_ply_t * p, u8 dst_byte) +ip4_fib_mtrie_leaf_is_non_empty (ip4_fib_mtrie_8_ply_t * p, u8 dst_byte) { /* * It's 'non-empty' if the length of the leaf stored is greater than the @@ -84,61 +91,83 @@ ip4_fib_mtrie_leaf_set_next_ply_index (u32 i) return l; } -static void -ply_init (ip4_fib_mtrie_ply_t * p, - ip4_fib_mtrie_leaf_t init, u32 prefix_len, u32 ply_base_len) -{ - /* - * A leaf is 'empty' if it represents a leaf from the covering PLY - * i.e. if the prefix length of the leaf is less than or equal to - * the prefix length of the PLY - */ - p->n_non_empty_leafs = (prefix_len > ply_base_len ? - ARRAY_LEN (p->leaves) : 0); - memset (p->dst_address_bits_of_leaves, prefix_len, - sizeof (p->dst_address_bits_of_leaves)); - p->dst_address_bits_base = ply_base_len; - - /* Initialize leaves. */ -#ifdef CLIB_HAVE_VEC128 - { - u32x4 *l, init_x4; - #ifndef __ALTIVEC__ - init_x4 = u32x4_splat (init); +#define PLY_X4_SPLAT_INIT(init_x4, init) \ + init_x4 = u32x4_splat (init); #else - { - u32x4_union_t y; - y.as_u32[0] = init; - y.as_u32[1] = init; - y.as_u32[2] = init; - y.as_u32[3] = init; - init_x4 = y.as_u32x4; - } +#define PLY_X4_SPLAT_INIT(init_x4, init) \ +{ \ + u32x4_union_t y; \ + y.as_u32[0] = init; \ + y.as_u32[1] = init; \ + y.as_u32[2] = init; \ + y.as_u32[3] = init; \ + init_x4 = y.as_u32x4; \ +} #endif - for (l = p->leaves_as_u32x4; - l < p->leaves_as_u32x4 + ARRAY_LEN (p->leaves_as_u32x4); l += 4) - { - l[0] = init_x4; - l[1] = init_x4; - l[2] = init_x4; - l[3] = init_x4; - } - } +#ifdef CLIB_HAVE_VEC128 +#define PLY_INIT_LEAVES(p) \ +{ \ + u32x4 *l, init_x4; \ + \ + PLY_X4_SPLAT_INIT(init_x4, init); \ + for (l = p->leaves_as_u32x4; \ + l < p->leaves_as_u32x4 + ARRAY_LEN (p->leaves_as_u32x4); \ + l += 4) \ + { \ + l[0] = init_x4; \ + l[1] = init_x4; \ + l[2] = init_x4; \ + l[3] = init_x4; \ + } \ +} #else - { - u32 *l; - - for (l = p->leaves; l < p->leaves + ARRAY_LEN (p->leaves); l += 4) - { - l[0] = init; - l[1] = init; - l[2] = init; - l[3] = init; - } - } +#define PLY_INIT_LEAVES(p) \ +{ \ + u32 *l; \ + \ + for (l = p->leaves; l < p->leaves + ARRAY_LEN (p->leaves); l += 4) \ + { \ + l[0] = init; \ + l[1] = init; \ + l[2] = init; \ + l[3] = init; \ + } \ +} #endif + +#define PLY_INIT(p, init, prefix_len, ply_base_len) \ +{ \ + /* \ + * A leaf is 'empty' if it represents a leaf from the covering PLY \ + * i.e. if the prefix length of the leaf is less than or equal to \ + * the prefix length of the PLY \ + */ \ + p->n_non_empty_leafs = (prefix_len > ply_base_len ? \ + ARRAY_LEN (p->leaves) : 0); \ + memset (p->dst_address_bits_of_leaves, prefix_len, \ + sizeof (p->dst_address_bits_of_leaves)); \ + p->dst_address_bits_base = ply_base_len; \ + \ + /* Initialize leaves. */ \ + PLY_INIT_LEAVES(p); \ +} + +static void +ply_8_init (ip4_fib_mtrie_8_ply_t * p, + ip4_fib_mtrie_leaf_t init, uword prefix_len, u32 ply_base_len) +{ + PLY_INIT (p, init, prefix_len, ply_base_len); +} + +static void +ply_16_init (ip4_fib_mtrie_16_ply_t * p, + ip4_fib_mtrie_leaf_t init, uword prefix_len) +{ + memset (p->dst_address_bits_of_leaves, prefix_len, + sizeof (p->dst_address_bits_of_leaves)); + PLY_INIT_LEAVES (p); } static ip4_fib_mtrie_leaf_t @@ -146,49 +175,43 @@ ply_create (ip4_fib_mtrie_t * m, ip4_fib_mtrie_leaf_t init_leaf, u32 leaf_prefix_len, u32 ply_base_len) { - ip4_fib_mtrie_ply_t *p; + ip4_fib_mtrie_8_ply_t *p; /* Get cache aligned ply. */ - pool_get_aligned (m->ply_pool, p, sizeof (p[0])); + pool_get_aligned (ip4_ply_pool, p, CLIB_CACHE_LINE_BYTES); - ply_init (p, init_leaf, leaf_prefix_len, ply_base_len); - return ip4_fib_mtrie_leaf_set_next_ply_index (p - m->ply_pool); + ply_8_init (p, init_leaf, leaf_prefix_len, ply_base_len); + return ip4_fib_mtrie_leaf_set_next_ply_index (p - ip4_ply_pool); } -always_inline ip4_fib_mtrie_ply_t * +always_inline ip4_fib_mtrie_8_ply_t * get_next_ply_for_leaf (ip4_fib_mtrie_t * m, ip4_fib_mtrie_leaf_t l) { uword n = ip4_fib_mtrie_leaf_get_next_ply_index (l); - /* It better not be the root ply. */ - ASSERT (n != 0); - return pool_elt_at_index (m->ply_pool, n); + + return pool_elt_at_index (ip4_ply_pool, n); } -static void -ply_free (ip4_fib_mtrie_t * m, ip4_fib_mtrie_ply_t * p) +void +ip4_mtrie_free (ip4_fib_mtrie_t * m) { - uword i, is_root; - - is_root = p - m->ply_pool == 0; - - for (i = 0; i < ARRAY_LEN (p->leaves); i++) + /* the root ply is embedded so the is nothing to do, + * the assumption being that the IP4 FIB table has emptied the trie + * before deletion. + */ +#if CLIB_DEBUG > 0 + int i; + for (i = 0; i < ARRAY_LEN (m->root_ply.leaves); i++) { - ip4_fib_mtrie_leaf_t l = p->leaves[i]; - if (ip4_fib_mtrie_leaf_is_next_ply (l)) - ply_free (m, get_next_ply_for_leaf (m, l)); + ASSERT (!ip4_fib_mtrie_leaf_is_next_ply (m->root_ply.leaves[i])); } - - if (is_root) - ply_init (p, IP4_FIB_MTRIE_LEAF_EMPTY, /* prefix_len */ 0, 0); - else - pool_put (m->ply_pool, p); +#endif } void -ip4_fib_free (ip4_fib_mtrie_t * m) +ip4_mtrie_init (ip4_fib_mtrie_t * m) { - ip4_fib_mtrie_ply_t *root_ply = pool_elt_at_index (m->ply_pool, 0); - ply_free (m, root_ply); + ply_16_init (&m->root_ply, IP4_FIB_MTRIE_LEAF_EMPTY, 0); } typedef struct @@ -202,7 +225,7 @@ typedef struct static void set_ply_with_more_specific_leaf (ip4_fib_mtrie_t * m, - ip4_fib_mtrie_ply_t * ply, + ip4_fib_mtrie_8_ply_t * ply, ip4_fib_mtrie_leaf_t new_leaf, uword new_leaf_dst_address_bits) { @@ -218,7 +241,8 @@ set_ply_with_more_specific_leaf (ip4_fib_mtrie_t * m, /* Recurse into sub plies. */ if (!ip4_fib_mtrie_leaf_is_terminal (old_leaf)) { - ip4_fib_mtrie_ply_t *sub_ply = get_next_ply_for_leaf (m, old_leaf); + ip4_fib_mtrie_8_ply_t *sub_ply = + get_next_ply_for_leaf (m, old_leaf); set_ply_with_more_specific_leaf (m, sub_ply, new_leaf, new_leaf_dst_address_bits); } @@ -237,16 +261,20 @@ set_ply_with_more_specific_leaf (ip4_fib_mtrie_t * m, static void set_leaf (ip4_fib_mtrie_t * m, - ip4_fib_mtrie_set_unset_leaf_args_t * a, + const ip4_fib_mtrie_set_unset_leaf_args_t * a, u32 old_ply_index, u32 dst_address_byte_index) { ip4_fib_mtrie_leaf_t old_leaf, new_leaf; i32 n_dst_bits_next_plies; u8 dst_byte; + ip4_fib_mtrie_8_ply_t *old_ply; + + old_ply = pool_elt_at_index (ip4_ply_pool, old_ply_index); ASSERT (a->dst_address_length >= 0 && a->dst_address_length <= 32); ASSERT (dst_address_byte_index < ARRAY_LEN (a->dst_address.as_u8)); + /* how many bits of the destination address are in the next PLY */ n_dst_bits_next_plies = a->dst_address_length - BITS (u8) * (dst_address_byte_index + 1); @@ -255,30 +283,36 @@ set_leaf (ip4_fib_mtrie_t * m, /* Number of bits next plies <= 0 => insert leaves this ply. */ if (n_dst_bits_next_plies <= 0) { + /* The mask length of the address to insert maps to this ply */ uword i, n_dst_bits_this_ply, old_leaf_is_terminal; + /* The number of bits, and hence slots/buckets, we will fill */ n_dst_bits_this_ply = clib_min (8, -n_dst_bits_next_plies); ASSERT ((a->dst_address.as_u8[dst_address_byte_index] & pow2_mask (n_dst_bits_this_ply)) == 0); + /* Starting at the value of the byte at this section of the v4 address + * fill the buckets/slots of the ply */ for (i = dst_byte; i < dst_byte + (1 << n_dst_bits_this_ply); i++) { - ip4_fib_mtrie_ply_t *old_ply, *new_ply; - - old_ply = pool_elt_at_index (m->ply_pool, old_ply_index); + ip4_fib_mtrie_8_ply_t *new_ply; old_leaf = old_ply->leaves[i]; old_leaf_is_terminal = ip4_fib_mtrie_leaf_is_terminal (old_leaf); - /* Is leaf to be inserted more specific? */ if (a->dst_address_length >= old_ply->dst_address_bits_of_leaves[i]) { + /* The new leaf is more or equally specific than the one currently + * occupying the slot */ new_leaf = ip4_fib_mtrie_leaf_set_adj_index (a->adj_index); if (old_leaf_is_terminal) { + /* The current leaf is terminal, we can replace it with + * the new one */ old_ply->n_non_empty_leafs -= ip4_fib_mtrie_leaf_is_non_empty (old_ply, i); + old_ply->dst_address_bits_of_leaves[i] = a->dst_address_length; __sync_val_compare_and_swap (&old_ply->leaves[i], old_leaf, @@ -292,32 +326,42 @@ set_leaf (ip4_fib_mtrie_t * m, } else { - /* Existing leaf points to another ply. We need to place new_leaf into all - more specific slots. */ + /* Existing leaf points to another ply. We need to place + * new_leaf into all more specific slots. */ new_ply = get_next_ply_for_leaf (m, old_leaf); set_ply_with_more_specific_leaf (m, new_ply, new_leaf, a->dst_address_length); } } - else if (!old_leaf_is_terminal) { + /* The current leaf is less specific and not termial (i.e. a ply), + * recurse on down the trie */ new_ply = get_next_ply_for_leaf (m, old_leaf); - set_leaf (m, a, new_ply - m->ply_pool, + set_leaf (m, a, new_ply - ip4_ply_pool, dst_address_byte_index + 1); } + /* + * else + * the route we are adding is less specific than the leaf currently + * occupying this slot. leave it there + */ } } else { - ip4_fib_mtrie_ply_t *old_ply, *new_ply; + /* The address to insert requires us to move down at a lower level of + * the trie - recurse on down */ + ip4_fib_mtrie_8_ply_t *new_ply; u8 ply_base_len; ply_base_len = 8 * (dst_address_byte_index + 1); - old_ply = pool_elt_at_index (m->ply_pool, old_ply_index); + old_leaf = old_ply->leaves[dst_byte]; + if (ip4_fib_mtrie_leaf_is_terminal (old_leaf)) { + /* There is a leaf occupying the slot. Replace it with a new ply */ old_ply->n_non_empty_leafs -= ip4_fib_mtrie_leaf_is_non_empty (old_ply, dst_byte); @@ -328,28 +372,143 @@ set_leaf (ip4_fib_mtrie_t * m, new_ply = get_next_ply_for_leaf (m, new_leaf); /* Refetch since ply_create may move pool. */ - old_ply = pool_elt_at_index (m->ply_pool, old_ply_index); + old_ply = pool_elt_at_index (ip4_ply_pool, old_ply_index); __sync_val_compare_and_swap (&old_ply->leaves[dst_byte], old_leaf, new_leaf); ASSERT (old_ply->leaves[dst_byte] == new_leaf); old_ply->dst_address_bits_of_leaves[dst_byte] = ply_base_len; - /* Account for the ply we just created. */ - old_ply->n_non_empty_leafs += 1; + old_ply->n_non_empty_leafs += + ip4_fib_mtrie_leaf_is_non_empty (old_ply, dst_byte); ASSERT (old_ply->n_non_empty_leafs >= 0); } else new_ply = get_next_ply_for_leaf (m, old_leaf); - set_leaf (m, a, new_ply - m->ply_pool, dst_address_byte_index + 1); + set_leaf (m, a, new_ply - ip4_ply_pool, dst_address_byte_index + 1); + } +} + +static void +set_root_leaf (ip4_fib_mtrie_t * m, + const ip4_fib_mtrie_set_unset_leaf_args_t * a) +{ + ip4_fib_mtrie_leaf_t old_leaf, new_leaf; + ip4_fib_mtrie_16_ply_t *old_ply; + i32 n_dst_bits_next_plies; + u16 dst_byte; + + old_ply = &m->root_ply; + + ASSERT (a->dst_address_length >= 0 && a->dst_address_length <= 32); + + /* how many bits of the destination address are in the next PLY */ + n_dst_bits_next_plies = a->dst_address_length - BITS (u16); + + dst_byte = a->dst_address.as_u16[0]; + + /* Number of bits next plies <= 0 => insert leaves this ply. */ + if (n_dst_bits_next_plies <= 0) + { + /* The mask length of the address to insert maps to this ply */ + uword i, n_dst_bits_this_ply, old_leaf_is_terminal; + + /* The number of bits, and hence slots/buckets, we will fill */ + n_dst_bits_this_ply = 16 - a->dst_address_length; + ASSERT ((clib_host_to_net_u16 (a->dst_address.as_u16[0]) & + pow2_mask (n_dst_bits_this_ply)) == 0); + + /* Starting at the value of the byte at this section of the v4 address + * fill the buckets/slots of the ply */ + for (i = 0; i < (1 << n_dst_bits_this_ply); i++) + { + ip4_fib_mtrie_8_ply_t *new_ply; + u16 slot; + + slot = clib_net_to_host_u16 (dst_byte); + slot += i; + slot = clib_host_to_net_u16 (slot); + + old_leaf = old_ply->leaves[slot]; + old_leaf_is_terminal = ip4_fib_mtrie_leaf_is_terminal (old_leaf); + + if (a->dst_address_length >= + old_ply->dst_address_bits_of_leaves[slot]) + { + /* The new leaf is more or equally specific than the one currently + * occupying the slot */ + new_leaf = ip4_fib_mtrie_leaf_set_adj_index (a->adj_index); + + if (old_leaf_is_terminal) + { + /* The current leaf is terminal, we can replace it with + * the new one */ + old_ply->dst_address_bits_of_leaves[slot] = + a->dst_address_length; + __sync_val_compare_and_swap (&old_ply->leaves[slot], + old_leaf, new_leaf); + ASSERT (old_ply->leaves[slot] == new_leaf); + } + else + { + /* Existing leaf points to another ply. We need to place + * new_leaf into all more specific slots. */ + new_ply = get_next_ply_for_leaf (m, old_leaf); + set_ply_with_more_specific_leaf (m, new_ply, new_leaf, + a->dst_address_length); + } + } + else if (!old_leaf_is_terminal) + { + /* The current leaf is less specific and not termial (i.e. a ply), + * recurse on down the trie */ + new_ply = get_next_ply_for_leaf (m, old_leaf); + set_leaf (m, a, new_ply - ip4_ply_pool, 2); + } + /* + * else + * the route we are adding is less specific than the leaf currently + * occupying this slot. leave it there + */ + } + } + else + { + /* The address to insert requires us to move down at a lower level of + * the trie - recurse on down */ + ip4_fib_mtrie_8_ply_t *new_ply; + u8 ply_base_len; + + ply_base_len = 16; + + old_leaf = old_ply->leaves[dst_byte]; + + if (ip4_fib_mtrie_leaf_is_terminal (old_leaf)) + { + /* There is a leaf occupying the slot. Replace it with a new ply */ + new_leaf = ply_create (m, old_leaf, + clib_max (old_ply->dst_address_bits_of_leaves + [dst_byte], ply_base_len), + ply_base_len); + new_ply = get_next_ply_for_leaf (m, new_leaf); + + __sync_val_compare_and_swap (&old_ply->leaves[dst_byte], old_leaf, + new_leaf); + ASSERT (old_ply->leaves[dst_byte] == new_leaf); + old_ply->dst_address_bits_of_leaves[dst_byte] = ply_base_len; + } + else + new_ply = get_next_ply_for_leaf (m, old_leaf); + + set_leaf (m, a, new_ply - ip4_ply_pool, 2); } } static uword unset_leaf (ip4_fib_mtrie_t * m, - ip4_fib_mtrie_set_unset_leaf_args_t * a, - ip4_fib_mtrie_ply_t * old_ply, u32 dst_address_byte_index) + const ip4_fib_mtrie_set_unset_leaf_args_t * a, + ip4_fib_mtrie_8_ply_t * old_ply, u32 dst_address_byte_index) { ip4_fib_mtrie_leaf_t old_leaf, del_leaf; i32 n_dst_bits_next_plies; @@ -397,7 +556,7 @@ unset_leaf (ip4_fib_mtrie_t * m, ASSERT (old_ply->n_non_empty_leafs >= 0); if (old_ply->n_non_empty_leafs == 0 && dst_address_byte_index > 0) { - pool_put (m->ply_pool, old_ply); + pool_put (ip4_ply_pool, old_ply); /* Old ply was deleted. */ return 1; } @@ -419,106 +578,120 @@ unset_leaf (ip4_fib_mtrie_t * m, return 0; } -void -ip4_mtrie_init (ip4_fib_mtrie_t * m) +static void +unset_root_leaf (ip4_fib_mtrie_t * m, + const ip4_fib_mtrie_set_unset_leaf_args_t * a) { - ip4_fib_mtrie_leaf_t root; - memset (m, 0, sizeof (m[0])); - root = ply_create (m, IP4_FIB_MTRIE_LEAF_EMPTY, 0, 0); - ASSERT (ip4_fib_mtrie_leaf_get_next_ply_index (root) == 0); -} + ip4_fib_mtrie_leaf_t old_leaf, del_leaf; + i32 n_dst_bits_next_plies; + i32 i, n_dst_bits_this_ply, old_leaf_is_terminal; + u16 dst_byte; + ip4_fib_mtrie_16_ply_t *old_ply; -void -ip4_fib_mtrie_add_del_route (ip4_fib_t * fib, - ip4_address_t dst_address, - u32 dst_address_length, - u32 adj_index, u32 is_del) -{ - ip4_fib_mtrie_t *m = &fib->mtrie; - ip4_fib_mtrie_ply_t *root_ply; - ip4_fib_mtrie_set_unset_leaf_args_t a; - ip4_main_t *im = &ip4_main; + ASSERT (a->dst_address_length >= 0 && a->dst_address_length <= 32); - ASSERT (m->ply_pool != 0); + old_ply = &m->root_ply; + n_dst_bits_next_plies = a->dst_address_length - BITS (u16); - root_ply = pool_elt_at_index (m->ply_pool, 0); + dst_byte = a->dst_address.as_u16[0]; - /* Honor dst_address_length. Fib masks are in network byte order */ - dst_address.as_u32 &= im->fib_masks[dst_address_length]; - a.dst_address = dst_address; - a.dst_address_length = dst_address_length; - a.adj_index = adj_index; + n_dst_bits_this_ply = (n_dst_bits_next_plies <= 0 ? + (16 - a->dst_address_length) : 0); - if (!is_del) - { - set_leaf (m, &a, /* ply_index */ 0, /* dst_address_byte_index */ 0); - } - else + del_leaf = ip4_fib_mtrie_leaf_set_adj_index (a->adj_index); + + /* Starting at the value of the byte at this section of the v4 address + * fill the buckets/slots of the ply */ + for (i = 0; i < (1 << n_dst_bits_this_ply); i++) { - ip4_main_t *im = &ip4_main; + u16 slot; + + slot = clib_net_to_host_u16 (dst_byte); + slot += i; + slot = clib_host_to_net_u16 (slot); - if (dst_address_length) + old_leaf = old_ply->leaves[slot]; + old_leaf_is_terminal = ip4_fib_mtrie_leaf_is_terminal (old_leaf); + + if (old_leaf == del_leaf + || (!old_leaf_is_terminal + && unset_leaf (m, a, get_next_ply_for_leaf (m, old_leaf), 2))) { - word i; + old_ply->leaves[slot] = + ip4_fib_mtrie_leaf_set_adj_index (a->cover_adj_index); + old_ply->dst_address_bits_of_leaves[slot] = a->cover_address_length; + } + } +} - /* If the ply was not deleted, then we need to fill the - * bucket just reset will the leaf from the less specfic - * cover. - * Find next less specific route and insert into mtrie. */ - for (i = dst_address_length - 1; i >= 0; i--) - { - uword *p; - index_t lbi; - ip4_address_t key; +void +ip4_fib_mtrie_route_add (ip4_fib_mtrie_t * m, + const ip4_address_t * dst_address, + u32 dst_address_length, u32 adj_index) +{ + ip4_fib_mtrie_set_unset_leaf_args_t a; + ip4_main_t *im = &ip4_main; - if (!fib->fib_entry_by_dst_address[i]) - continue; + /* Honor dst_address_length. Fib masks are in network byte order */ + a.dst_address.as_u32 = (dst_address->as_u32 & + im->fib_masks[dst_address_length]); + a.dst_address_length = dst_address_length; + a.adj_index = adj_index; - key.as_u32 = dst_address.as_u32 & im->fib_masks[i]; - p = hash_get (fib->fib_entry_by_dst_address[i], key.as_u32); - if (p) - { - lbi = fib_entry_contribute_ip_forwarding (p[0])->dpoi_index; - if (INDEX_INVALID == lbi) - continue; + set_root_leaf (m, &a); +} - a.cover_adj_index = lbi; - a.cover_address_length = i; +void +ip4_fib_mtrie_route_del (ip4_fib_mtrie_t * m, + const ip4_address_t * dst_address, + u32 dst_address_length, + u32 adj_index, + u32 cover_address_length, u32 cover_adj_index) +{ + ip4_fib_mtrie_set_unset_leaf_args_t a; + ip4_main_t *im = &ip4_main; - break; - } - } - } - else - { - a.cover_adj_index = 0; - a.cover_address_length = 0; - } + /* Honor dst_address_length. Fib masks are in network byte order */ + a.dst_address.as_u32 = (dst_address->as_u32 & + im->fib_masks[dst_address_length]); + a.dst_address_length = dst_address_length; + a.adj_index = adj_index; + a.cover_adj_index = cover_adj_index; + a.cover_address_length = cover_address_length; - /* the top level ply is never removed, so we can ignore the return code */ - unset_leaf (m, &a, root_ply, 0); - } + /* the top level ply is never removed */ + unset_root_leaf (m, &a); } /* Returns number of bytes of memory used by mtrie. */ static uword -mtrie_memory_usage (ip4_fib_mtrie_t * m, ip4_fib_mtrie_ply_t * p) +mtrie_ply_memory_usage (ip4_fib_mtrie_t * m, ip4_fib_mtrie_8_ply_t * p) { uword bytes, i; - if (!p) - { - if (pool_is_free_index (m->ply_pool, 0)) - return 0; - p = pool_elt_at_index (m->ply_pool, 0); - } - bytes = sizeof (p[0]); for (i = 0; i < ARRAY_LEN (p->leaves); i++) { ip4_fib_mtrie_leaf_t l = p->leaves[i]; if (ip4_fib_mtrie_leaf_is_next_ply (l)) - bytes += mtrie_memory_usage (m, get_next_ply_for_leaf (m, l)); + bytes += mtrie_ply_memory_usage (m, get_next_ply_for_leaf (m, l)); + } + + return bytes; +} + +/* Returns number of bytes of memory used by mtrie. */ +static uword +mtrie_memory_usage (ip4_fib_mtrie_t * m) +{ + uword bytes, i; + + bytes = sizeof (*m); + for (i = 0; i < ARRAY_LEN (m->root_ply.leaves); i++) + { + ip4_fib_mtrie_leaf_t l = m->root_ply.leaves[i]; + if (ip4_fib_mtrie_leaf_is_next_ply (l)) + bytes += mtrie_ply_memory_usage (m, get_next_ply_for_leaf (m, l)); } return bytes; @@ -536,47 +709,49 @@ format_ip4_fib_mtrie_leaf (u8 * s, va_list * va) return s; } +#define FORMAT_PLY(s, _p, _i, _base_address, _ply_max_len, _indent) \ +({ \ + u32 a, ia_length; \ + ip4_address_t ia; \ + ip4_fib_mtrie_leaf_t _l = p->leaves[(_i)]; \ + \ + a = (_base_address) + ((_i) << (32 - (_ply_max_len))); \ + ia.as_u32 = clib_host_to_net_u32 (a); \ + ia_length = (_p)->dst_address_bits_of_leaves[(_i)]; \ + s = format (s, "\n%U%20U %U", \ + format_white_space, (_indent) + 2, \ + format_ip4_address_and_length, &ia, ia_length, \ + format_ip4_fib_mtrie_leaf, _l); \ + \ + if (ip4_fib_mtrie_leaf_is_next_ply (_l)) \ + s = format (s, "\n%U%U", \ + format_white_space, (_indent) + 2, \ + format_ip4_fib_mtrie_ply, m, a, \ + ip4_fib_mtrie_leaf_get_next_ply_index (_l)); \ + s; \ +}) + static u8 * format_ip4_fib_mtrie_ply (u8 * s, va_list * va) { ip4_fib_mtrie_t *m = va_arg (*va, ip4_fib_mtrie_t *); u32 base_address = va_arg (*va, u32); u32 ply_index = va_arg (*va, u32); - u32 dst_address_byte_index = va_arg (*va, u32); - ip4_fib_mtrie_ply_t *p; - uword i, indent; + ip4_fib_mtrie_8_ply_t *p; + uword indent; + int i; - p = pool_elt_at_index (m->ply_pool, ply_index); + p = pool_elt_at_index (ip4_ply_pool, ply_index); indent = format_get_indent (s); - s = - format (s, "ply index %d, %d non-empty leaves", ply_index, - p->n_non_empty_leafs); + s = format (s, "ply index %d, %d non-empty leaves", ply_index, + p->n_non_empty_leafs); + for (i = 0; i < ARRAY_LEN (p->leaves); i++) { - ip4_fib_mtrie_leaf_t l = p->leaves[i]; - if (ip4_fib_mtrie_leaf_is_non_empty (p, i)) { - u32 a, ia_length; - ip4_address_t ia; - - a = base_address + (i << (24 - 8 * dst_address_byte_index)); - ia.as_u32 = clib_host_to_net_u32 (a); - if (ip4_fib_mtrie_leaf_is_terminal (l)) - ia_length = p->dst_address_bits_of_leaves[i]; - else - ia_length = 8 * (1 + dst_address_byte_index); - s = format (s, "\n%U%20U %U", - format_white_space, indent + 2, - format_ip4_address_and_length, &ia, ia_length, - format_ip4_fib_mtrie_leaf, l); - - if (ip4_fib_mtrie_leaf_is_next_ply (l)) - s = format (s, "\n%U%U", - format_white_space, indent + 2, - format_ip4_fib_mtrie_ply, m, a, - ip4_fib_mtrie_leaf_get_next_ply_index (l), - dst_address_byte_index + 1); + FORMAT_PLY (s, p, i, base_address, + p->dst_address_bits_base + 8, indent); } } @@ -587,22 +762,44 @@ u8 * format_ip4_fib_mtrie (u8 * s, va_list * va) { ip4_fib_mtrie_t *m = va_arg (*va, ip4_fib_mtrie_t *); + ip4_fib_mtrie_16_ply_t *p; + u32 base_address = 0; + int i; - s = format (s, "%d plies, memory usage %U", - pool_elts (m->ply_pool), - format_memory_size, mtrie_memory_usage (m, 0)); + s = format (s, "%d plies, memory usage %U\n", + pool_elts (ip4_ply_pool), + format_memory_size, mtrie_memory_usage (m)); + s = format (s, "root-ply"); + p = &m->root_ply; - if (pool_elts (m->ply_pool) > 0) + for (i = 0; i < ARRAY_LEN (p->leaves); i++) { - ip4_address_t base_address; - base_address.as_u32 = 0; - s = - format (s, "\n %U", format_ip4_fib_mtrie_ply, m, base_address, 0, 0); + u16 slot; + + slot = clib_host_to_net_u16 (i); + + if (p->dst_address_bits_of_leaves[slot] > 0) + { + FORMAT_PLY (s, p, slot, base_address, 16, 2); + } } return s; } +static clib_error_t * +ip4_mtrie_module_init (vlib_main_t * vm) +{ + /* Burn one ply so index 0 is taken */ + CLIB_UNUSED (ip4_fib_mtrie_8_ply_t * p); + + pool_get (ip4_ply_pool, p); + + return (NULL); +} + +VLIB_INIT_FUNCTION (ip4_mtrie_module_init); + /* * fd.io coding-style-patch-verification: ON * diff --git a/src/vnet/ip/ip4_mtrie.h b/src/vnet/ip/ip4_mtrie.h index 128195d3..be262c2c 100644 --- a/src/vnet/ip/ip4_mtrie.h +++ b/src/vnet/ip/ip4_mtrie.h @@ -47,16 +47,43 @@ /* ip4 fib leafs: 4 ply 8-8-8-8 mtrie. 1 + 2*adj_index for terminal leaves. - 0 + 2*next_ply_index for non-terminals. + 0 + 2*next_ply_index for non-terminals, i.e. PLYs 1 => empty (adjacency index of zero is special miss adjacency). */ typedef u32 ip4_fib_mtrie_leaf_t; #define IP4_FIB_MTRIE_LEAF_EMPTY (1 + 2*0) +/** + * @brief the 16 way stride that is the top PLY of the mtrie + * We do not maintain the count of 'real' leaves in this PLY, since + * it is never removed. The FIB will destroy the mtrie and the ply once + * the FIB is destroyed. + */ +#define PLY_16_SIZE (1<<16) +typedef struct ip4_fib_mtrie_16_ply_t_ +{ + /** + * The leaves/slots/buckets to be filed with leafs + */ + union + { + ip4_fib_mtrie_leaf_t leaves[PLY_16_SIZE]; + +#ifdef CLIB_HAVE_VEC128 + u32x4 leaves_as_u32x4[PLY_16_SIZE / 4]; +#endif + }; + + /** + * Prefix length for terminal leaves. + */ + u8 dst_address_bits_of_leaves[PLY_16_SIZE]; +} ip4_fib_mtrie_16_ply_t; + /** * @brief One ply of the 4 ply mtrie fib. */ -typedef struct +typedef struct ip4_fib_mtrie_8_ply_t_ { /** * The leaves/slots/buckets to be filed with leafs @@ -90,34 +117,72 @@ typedef struct /* Pad to cache line boundary. */ u8 pad[CLIB_CACHE_LINE_BYTES - 2 * sizeof (i32)]; } -ip4_fib_mtrie_ply_t; +ip4_fib_mtrie_8_ply_t; -STATIC_ASSERT (0 == sizeof (ip4_fib_mtrie_ply_t) % CLIB_CACHE_LINE_BYTES, +STATIC_ASSERT (0 == sizeof (ip4_fib_mtrie_8_ply_t) % CLIB_CACHE_LINE_BYTES, "IP4 Mtrie ply cache line"); +/** + * @brief The mutiway-TRIE. + * There is no data associated with the mtrie apart from the top PLY + */ typedef struct { - /* Pool of plies. Index zero is root ply. */ - ip4_fib_mtrie_ply_t *ply_pool; + /** + * Embed the PLY with the mtrie struct. This means that the Data-plane + * 'get me the mtrie' returns the first ply, and not an indirect 'pointer' + * to it. therefore no cachline misses in the data-path. + */ + ip4_fib_mtrie_16_ply_t root_ply; } ip4_fib_mtrie_t; -void ip4_fib_mtrie_init (ip4_fib_mtrie_t * m); +/** + * @brief Initialise an mtrie + */ +void ip4_mtrie_init (ip4_fib_mtrie_t * m); -struct ip4_fib_t; +/** + * @brief Free an mtrie, It must be emty when free'd + */ +void ip4_mtrie_free (ip4_fib_mtrie_t * m); -void ip4_fib_mtrie_add_del_route (struct ip4_fib_t *f, - ip4_address_t dst_address, - u32 dst_address_length, - u32 adj_index, u32 is_del); +/** + * @brief Add a route/rntry to the mtrie + */ +void ip4_fib_mtrie_route_add (ip4_fib_mtrie_t * m, + const ip4_address_t * dst_address, + u32 dst_address_length, u32 adj_index); +/** + * @brief remove a route/rntry to the mtrie + */ +void ip4_fib_mtrie_route_del (ip4_fib_mtrie_t * m, + const ip4_address_t * dst_address, + u32 dst_address_length, + u32 adj_index, + u32 cover_address_length, u32 cover_adj_index); +/** + * @brief Format/display the contents of the mtrie + */ format_function_t format_ip4_fib_mtrie; +/** + * @brief A global pool of 8bit stride plys + */ +extern ip4_fib_mtrie_8_ply_t *ip4_ply_pool; + +/** + * Is the leaf terminal (i.e. an LB index) or non-terminak (i.e. a PLY index) + */ always_inline u32 ip4_fib_mtrie_leaf_is_terminal (ip4_fib_mtrie_leaf_t n) { return n & 1; } +/** + * From the stored slot value extract the LB index value + */ always_inline u32 ip4_fib_mtrie_leaf_get_adj_index (ip4_fib_mtrie_leaf_t n) { @@ -125,35 +190,38 @@ ip4_fib_mtrie_leaf_get_adj_index (ip4_fib_mtrie_leaf_t n) return n >> 1; } -/* Lookup step. Processes 1 byte of 4 byte ip4 address. */ +/** + * @brief Lookup step. Processes 1 byte of 4 byte ip4 address. + */ always_inline ip4_fib_mtrie_leaf_t ip4_fib_mtrie_lookup_step (const ip4_fib_mtrie_t * m, ip4_fib_mtrie_leaf_t current_leaf, const ip4_address_t * dst_address, u32 dst_address_byte_index) { - ip4_fib_mtrie_ply_t *ply; + ip4_fib_mtrie_8_ply_t *ply; + uword current_is_terminal = ip4_fib_mtrie_leaf_is_terminal (current_leaf); if (!current_is_terminal) { - ply = m->ply_pool + (current_leaf >> 1); + ply = ip4_ply_pool + (current_leaf >> 1); return (ply->leaves[dst_address->as_u8[dst_address_byte_index]]); } return current_leaf; } -/* Lookup step. Processes 1 byte of 4 byte ip4 address. */ +/** + * @brief Lookup step number 1. Processes 2 bytes of 4 byte ip4 address. + */ always_inline ip4_fib_mtrie_leaf_t ip4_fib_mtrie_lookup_step_one (const ip4_fib_mtrie_t * m, const ip4_address_t * dst_address) { ip4_fib_mtrie_leaf_t next_leaf; - ip4_fib_mtrie_ply_t *ply; - ply = m->ply_pool; - next_leaf = ply->leaves[dst_address->as_u8[0]]; + next_leaf = m->root_ply.leaves[dst_address->as_u16[0]]; return next_leaf; } diff --git a/src/vnet/ip/ip4_packet.h b/src/vnet/ip/ip4_packet.h index b2c1fcd4..1ff9fbdb 100644 --- a/src/vnet/ip/ip4_packet.h +++ b/src/vnet/ip/ip4_packet.h @@ -52,6 +52,7 @@ typedef union u32 data_u32; /* Aliases. */ u8 as_u8[4]; + u16 as_u16[2]; u32 as_u32; } ip4_address_t; diff --git a/src/vnet/ip/ip4_source_check.c b/src/vnet/ip/ip4_source_check.c index 7c2b7be8..6831066e 100644 --- a/src/vnet/ip/ip4_source_check.c +++ b/src/vnet/ip/ip4_source_check.c @@ -165,11 +165,6 @@ ip4_source_check_inline (vlib_main_t * vm, leaf0 = ip4_fib_mtrie_lookup_step_one (mtrie0, &ip0->src_address); leaf1 = ip4_fib_mtrie_lookup_step_one (mtrie1, &ip1->src_address); - leaf0 = - ip4_fib_mtrie_lookup_step (mtrie0, leaf0, &ip0->src_address, 1); - leaf1 = - ip4_fib_mtrie_lookup_step (mtrie1, leaf1, &ip1->src_address, 1); - leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, &ip0->src_address, 2); leaf1 = @@ -248,9 +243,6 @@ ip4_source_check_inline (vlib_main_t * vm, leaf0 = ip4_fib_mtrie_lookup_step_one (mtrie0, &ip0->src_address); - leaf0 = - ip4_fib_mtrie_lookup_step (mtrie0, leaf0, &ip0->src_address, 1); - leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, &ip0->src_address, 2); diff --git a/src/vnet/ip/ip6.h b/src/vnet/ip/ip6.h index 8fa9a479..bf7ec7d5 100644 --- a/src/vnet/ip/ip6.h +++ b/src/vnet/ip/ip6.h @@ -153,6 +153,9 @@ typedef struct ip6_main_t /* Pool of FIBs. */ struct fib_table_t_ *fibs; + /* Pool of V6 FIBs. */ + ip6_fib_t *v6_fibs; + /** Vector of MFIBs. */ struct mfib_table_t_ *mfibs; diff --git a/src/vnet/ip/ip_api.c b/src/vnet/ip/ip_api.c index e3a1fee8..b9f1782b 100644 --- a/src/vnet/ip/ip_api.c +++ b/src/vnet/ip/ip_api.c @@ -240,6 +240,21 @@ send_ip_fib_details (vpe_api_main_t * am, vl_msg_api_send_shmem (q, (u8 *) & mp); } +typedef struct vl_api_ip_fib_dump_walk_ctx_t_ +{ + fib_node_index_t *feis; +} vl_api_ip_fib_dump_walk_ctx_t; + +static int +vl_api_ip_fib_dump_walk (fib_node_index_t fei, void *arg) +{ + vl_api_ip_fib_dump_walk_ctx_t *ctx = arg; + + vec_add1 (ctx->feis, fei); + + return (1); +} + static void vl_api_ip_fib_dump_t_handler (vl_api_ip_fib_dump_t * mp) { @@ -247,12 +262,13 @@ vl_api_ip_fib_dump_t_handler (vl_api_ip_fib_dump_t * mp) unix_shared_memory_queue_t *q; ip4_main_t *im = &ip4_main; fib_table_t *fib_table; - fib_node_index_t lfei, *lfeip, *lfeis = NULL; - mpls_label_t key; + fib_node_index_t *lfeip; fib_prefix_t pfx; u32 fib_index; fib_route_path_encode_t *api_rpaths; - int i; + vl_api_ip_fib_dump_walk_ctx_t ctx = { + .feis = NULL, + }; q = vl_api_client_index_to_input_queue (mp->client_index); if (q == 0) @@ -261,19 +277,16 @@ vl_api_ip_fib_dump_t_handler (vl_api_ip_fib_dump_t * mp) /* *INDENT-OFF* */ pool_foreach (fib_table, im->fibs, ({ - for (i = 0; i < ARRAY_LEN (fib_table->v4.fib_entry_by_dst_address); i++) - { - hash_foreach(key, lfei, fib_table->v4.fib_entry_by_dst_address[i], - ({ - vec_add1(lfeis, lfei); - })); - } + fib_table_walk(fib_table->ft_index, + FIB_PROTOCOL_IP4, + vl_api_ip_fib_dump_walk, + &ctx); })); /* *INDENT-ON* */ - vec_sort_with_function (lfeis, fib_entry_cmp_for_sort); + vec_sort_with_function (ctx.feis, fib_entry_cmp_for_sort); - vec_foreach (lfeip, lfeis) + vec_foreach (lfeip, ctx.feis) { fib_entry_get_prefix (*lfeip, &pfx); fib_index = fib_entry_get_fib_index (*lfeip); @@ -286,7 +299,7 @@ vl_api_ip_fib_dump_t_handler (vl_api_ip_fib_dump_t * mp) vec_free (api_rpaths); } - vec_free (lfeis); + vec_free (ctx.feis); } static void @@ -377,10 +390,10 @@ api_ip6_fib_table_get_all (unix_shared_memory_queue_t * q, { vpe_api_main_t *am = &vpe_api_main; ip6_main_t *im6 = &ip6_main; - ip6_fib_t *fib = &fib_table->v6; fib_node_index_t *fib_entry_index; api_ip6_fib_show_ctx_t ctx = { - .fib_index = fib->index,.entries = NULL, + .fib_index = fib_table->ft_index, + .entries = NULL, }; fib_route_path_encode_t *api_rpaths; fib_prefix_t pfx; diff --git a/src/vnet/mpls/interface.c b/src/vnet/mpls/interface.c index f631dc76..a085aaa2 100644 --- a/src/vnet/mpls/interface.c +++ b/src/vnet/mpls/interface.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include #include diff --git a/src/vnet/mpls/mpls.h b/src/vnet/mpls/mpls.h index 300f2cfd..b0125e60 100644 --- a/src/vnet/mpls/mpls.h +++ b/src/vnet/mpls/mpls.h @@ -30,29 +30,6 @@ typedef enum { MPLS_N_ERROR, } mpls_error_t; -#define MPLS_FIB_DEFAULT_TABLE_ID 0 - -/** - * Type exposure is to allow the DP fast/inlined access - */ -#define MPLS_FIB_KEY_SIZE 21 -#define MPLS_FIB_DB_SIZE (1 << (MPLS_FIB_KEY_SIZE-1)) - -typedef struct mpls_fib_t_ -{ - /** - * A hash table of entries. 21 bit key - * Hash table for reduced memory footprint - */ - uword * mf_entries; - - /** - * The load-balance indeices keyed by 21 bit label+eos bit. - * A flat array for maximum lookup performace. - */ - index_t mf_lbs[MPLS_FIB_DB_SIZE]; -} mpls_fib_t; - /** * @brief Definition of a callback for receiving MPLS interface state change * notifications @@ -67,6 +44,9 @@ typedef struct { /** A pool of all the MPLS FIBs */ struct fib_table_t_ *fibs; + /** A pool of all the MPLS FIBs */ + struct mpls_fib_t_ *mpls_fibs; + /** A hash table to lookup the mpls_fib by table ID */ uword *fib_index_by_table_id; diff --git a/src/vnet/mpls/mpls_api.c b/src/vnet/mpls/mpls_api.c index a36a5046..f1aef6c9 100644 --- a/src/vnet/mpls/mpls_api.c +++ b/src/vnet/mpls/mpls_api.c @@ -26,6 +26,7 @@ #include #include #include +#include #include @@ -369,6 +370,21 @@ send_mpls_fib_details (vpe_api_main_t * am, vl_msg_api_send_shmem (q, (u8 *) & mp); } +typedef struct vl_api_mpls_fib_dump_table_walk_ctx_t_ +{ + fib_node_index_t *lfeis; +} vl_api_mpls_fib_dump_table_walk_ctx_t; + +static int +vl_api_mpls_fib_dump_table_walk (fib_node_index_t fei, void *arg) +{ + vl_api_mpls_fib_dump_table_walk_ctx_t *ctx = arg; + + vec_add1 (ctx->lfeis, fei); + + return (1); +} + static void vl_api_mpls_fib_dump_t_handler (vl_api_mpls_fib_dump_t * mp) { @@ -376,28 +392,30 @@ vl_api_mpls_fib_dump_t_handler (vl_api_mpls_fib_dump_t * mp) unix_shared_memory_queue_t *q; mpls_main_t *mm = &mpls_main; fib_table_t *fib_table; - fib_node_index_t lfei, *lfeip, *lfeis = NULL; - mpls_label_t key; + mpls_fib_t *mpls_fib; + fib_node_index_t *lfeip = NULL; fib_prefix_t pfx; u32 fib_index; fib_route_path_encode_t *api_rpaths; + vl_api_mpls_fib_dump_table_walk_ctx_t ctx = { + .lfeis = NULL, + }; q = vl_api_client_index_to_input_queue (mp->client_index); if (q == 0) return; /* *INDENT-OFF* */ - pool_foreach (fib_table, mm->fibs, + pool_foreach (mpls_fib, mm->mpls_fibs, ({ - hash_foreach(key, lfei, fib_table->mpls.mf_entries, - ({ - vec_add1(lfeis, lfei); - })); + mpls_fib_table_walk (mpls_fib, + vl_api_mpls_fib_dump_table_walk, + &ctx); })); /* *INDENT-ON* */ - vec_sort_with_function (lfeis, fib_entry_cmp_for_sort); + vec_sort_with_function (ctx.lfeis, fib_entry_cmp_for_sort); - vec_foreach (lfeip, lfeis) + vec_foreach (lfeip, ctx.lfeis) { fib_entry_get_prefix (*lfeip, &pfx); fib_index = fib_entry_get_fib_index (*lfeip); @@ -410,7 +428,7 @@ vl_api_mpls_fib_dump_t_handler (vl_api_mpls_fib_dump_t * mp) vec_free (api_rpaths); } - vec_free (lfeis); + vec_free (ctx.lfeis); } /* diff --git a/src/vpp/api/api.c b/src/vpp/api/api.c index 14ccd864..09ae8b8f 100644 --- a/src/vpp/api/api.c +++ b/src/vpp/api/api.c @@ -896,9 +896,10 @@ ip4_reset_fib_t_handler (vl_api_reset_fib_t * mp) /* *INDENT-OFF* */ pool_foreach (fib_table, im4->fibs, ({ - fib = &fib_table->v4; vnet_sw_interface_t * si; + fib = pool_elt_at_index (im4->v4_fibs, fib_table->ft_index); + if (fib->table_id != target_fib_id) continue; @@ -964,7 +965,8 @@ ip6_reset_fib_t_handler (vl_api_reset_fib_t * mp) pool_foreach (fib_table, im6->fibs, ({ vnet_sw_interface_t * si; - fib = &(fib_table->v6); + + fib = pool_elt_at_index (im6->v6_fibs, fib_table->ft_index); if (fib->table_id != target_fib_id) continue; diff --git a/src/vpp/stats/stats.c b/src/vpp/stats/stats.c index 1927da0b..042d02e2 100644 --- a/src/vpp/stats/stats.c +++ b/src/vpp/stats/stats.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #define STATS_DEBUG 0 @@ -576,6 +577,7 @@ do_ip4_fibs (stats_main_t * sm) static ip4_route_t *routes; ip4_route_t *r; fib_table_t *fib; + ip4_fib_t *v4_fib; ip_lookup_main_t *lm = &im4->lookup_main; static uword *results; vl_api_vnet_ip4_fib_counters_t *mp = 0; @@ -592,6 +594,8 @@ again: while ((fib - im4->fibs) < start_at_fib_index) continue; + v4_fib = pool_elt_at_index (im4->v4_fibs, fib->ft_index); + if (mp == 0) { items_this_message = IP4_FIB_COUNTER_BATCH_SIZE; @@ -615,9 +619,9 @@ again: vec_reset_length (routes); vec_reset_length (results); - for (i = 0; i < ARRAY_LEN (fib->v4.fib_entry_by_dst_address); i++) + for (i = 0; i < ARRAY_LEN (v4_fib->fib_entry_by_dst_address); i++) { - uword *hash = fib->v4.fib_entry_by_dst_address[i]; + uword *hash = v4_fib->fib_entry_by_dst_address[i]; hash_pair_t *p; ip4_route_t x; -- cgit 1.2.3-korg From 586afd762bfa149f5ca167bd5fd5a0cd59ce94fe Mon Sep 17 00:00:00 2001 From: Damjan Marion Date: Wed, 5 Apr 2017 19:18:20 +0200 Subject: Use thread local storage for thread index This patch deprecates stack-based thread identification, Also removes requirement that thread stacks are adjacent. Finally, possibly annoying for some folks, it renames all occurences of cpu_index and cpu_number with thread index. Using word "cpu" is misleading here as thread can be migrated ti different CPU, and also it is not related to linux cpu index. Change-Id: I68cdaf661e701d2336fc953dcb9978d10a70f7c1 Signed-off-by: Damjan Marion --- src/examples/srv6-sample-localsid/node.c | 4 +- src/plugins/dpdk/buffer.c | 2 +- src/plugins/dpdk/device/device.c | 8 +- src/plugins/dpdk/device/dpdk_priv.h | 8 +- src/plugins/dpdk/device/init.c | 2 +- src/plugins/dpdk/device/node.c | 32 +++--- src/plugins/dpdk/hqos/hqos.c | 16 +-- src/plugins/dpdk/ipsec/cli.c | 8 +- src/plugins/dpdk/ipsec/crypto_node.c | 4 +- src/plugins/dpdk/ipsec/esp.h | 4 +- src/plugins/dpdk/ipsec/esp_decrypt.c | 4 +- src/plugins/dpdk/ipsec/esp_encrypt.c | 5 +- src/plugins/dpdk/ipsec/ipsec.c | 2 +- src/plugins/dpdk/ipsec/ipsec.h | 4 +- src/plugins/dpdk/main.c | 2 +- src/plugins/flowperpkt/l2_node.c | 2 +- src/plugins/flowperpkt/node.c | 2 +- src/plugins/ioam/export-common/ioam_export.h | 6 +- .../ioam/ip6/ioam_cache_tunnel_select_node.c | 16 +-- src/plugins/ixge/ixge.c | 2 +- src/plugins/lb/lb.c | 8 +- src/plugins/lb/node.c | 22 ++-- src/plugins/lb/refcount.c | 8 +- src/plugins/lb/refcount.h | 4 +- src/plugins/memif/node.c | 35 +++--- src/plugins/snat/in2out.c | 110 +++++++++--------- src/plugins/snat/out2in.c | 102 ++++++++--------- src/plugins/snat/snat.h | 10 +- src/vlib/buffer.c | 6 +- src/vlib/buffer_funcs.h | 4 +- src/vlib/cli.c | 6 +- src/vlib/counter.h | 16 +-- src/vlib/error.c | 2 +- src/vlib/global_funcs.h | 2 +- src/vlib/main.c | 14 +-- src/vlib/main.h | 2 +- src/vlib/node.c | 2 +- src/vlib/node.h | 6 +- src/vlib/node_funcs.h | 8 +- src/vlib/threads.c | 69 ++++------- src/vlib/threads.h | 21 ++-- src/vlib/unix/cj.c | 7 +- src/vlib/unix/cj.h | 2 +- src/vlib/unix/main.c | 43 +++---- src/vnet/adj/adj_l2.c | 4 +- src/vnet/adj/adj_midchain.c | 8 +- src/vnet/adj/adj_nsh.c | 4 +- src/vnet/classify/vnet_classify.c | 16 +-- src/vnet/cop/ip4_whitelist.c | 8 +- src/vnet/cop/ip6_whitelist.c | 8 +- src/vnet/devices/af_packet/node.c | 20 ++-- src/vnet/devices/devices.c | 61 +++++----- src/vnet/devices/devices.h | 18 +-- src/vnet/devices/netmap/node.c | 24 ++-- src/vnet/devices/ssvm/node.c | 6 +- src/vnet/devices/virtio/vhost-user.c | 127 +++++++++++---------- src/vnet/dpo/lookup_dpo.c | 20 ++-- src/vnet/dpo/replicate_dpo.c | 12 +- src/vnet/ethernet/arp.c | 2 +- src/vnet/ethernet/interface.c | 7 +- src/vnet/ethernet/node.c | 14 +-- src/vnet/gre/node.c | 8 +- src/vnet/interface.h | 2 +- src/vnet/interface_output.c | 53 ++++----- src/vnet/ip/ip4_forward.c | 34 +++--- src/vnet/ip/ip4_input.c | 8 +- src/vnet/ip/ip6_forward.c | 24 ++-- src/vnet/ip/ip6_input.c | 8 +- src/vnet/ip/ip6_neighbor.c | 4 +- src/vnet/ipsec/esp.h | 8 +- src/vnet/ipsec/esp_decrypt.c | 13 ++- src/vnet/ipsec/esp_encrypt.c | 13 ++- src/vnet/ipsec/ikev2.c | 64 ++++++----- src/vnet/ipsec/ipsec.h | 12 +- src/vnet/ipsec/ipsec_if.c | 2 +- src/vnet/l2/l2_bvi.h | 2 +- src/vnet/l2/l2_input.c | 14 +-- src/vnet/l2/l2_output.c | 6 +- src/vnet/l2tp/decap.c | 2 +- src/vnet/l2tp/encap.c | 2 +- src/vnet/l2tp/l2tp.c | 6 +- src/vnet/lisp-gpe/decap.c | 16 +-- src/vnet/lldp/lldp_input.c | 2 +- src/vnet/map/ip4_map.c | 14 +-- src/vnet/map/ip4_map_t.c | 12 +- src/vnet/map/ip6_map.c | 19 +-- src/vnet/map/ip6_map_t.c | 12 +- src/vnet/mpls/mpls_input.c | 8 +- src/vnet/mpls/mpls_lookup.c | 20 ++-- src/vnet/mpls/mpls_output.c | 10 +- src/vnet/pg/input.c | 4 +- src/vnet/replication.c | 20 ++-- src/vnet/replication.h | 2 +- src/vnet/session/node.c | 2 +- src/vnet/sr/sr_localsid.c | 44 +++---- src/vnet/tcp/builtin_client.c | 2 +- src/vnet/tcp/tcp.c | 8 +- src/vnet/tcp/tcp_debug.h | 2 +- src/vnet/tcp/tcp_input.c | 10 +- src/vnet/tcp/tcp_output.c | 20 ++-- src/vnet/udp/udp_input.c | 2 +- src/vnet/unix/tapcli.c | 2 +- src/vnet/unix/tuntap.c | 4 +- src/vnet/vxlan-gpe/decap.c | 10 +- src/vnet/vxlan-gpe/encap.c | 12 +- src/vnet/vxlan/decap.c | 10 +- src/vnet/vxlan/encap.c | 12 +- src/vpp/stats/stats.c | 14 +-- src/vpp/stats/stats.h | 2 +- 109 files changed, 790 insertions(+), 791 deletions(-) (limited to 'src/vpp/stats/stats.c') diff --git a/src/examples/srv6-sample-localsid/node.c b/src/examples/srv6-sample-localsid/node.c index 7bae9cd7..e83e2352 100644 --- a/src/examples/srv6-sample-localsid/node.c +++ b/src/examples/srv6-sample-localsid/node.c @@ -114,7 +114,7 @@ srv6_localsid_sample_fn (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_fram from = vlib_frame_vector_args (frame); n_left_from = frame->n_vectors; next_index = node->cached_next_index; - u32 cpu_index = os_get_cpu_number (); + u32 thread_index = vlib_get_thread_index (); while (n_left_from > 0) { @@ -168,7 +168,7 @@ srv6_localsid_sample_fn (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_fram /* This increments the SRv6 per LocalSID counters.*/ vlib_increment_combined_counter (((next0 == SRV6_SAMPLE_LOCALSID_NEXT_ERROR) ? &(sm->sr_ls_invalid_counters) : &(sm->sr_ls_valid_counters)), - cpu_index, + thread_index, ls0 - sm->localsids, 1, vlib_buffer_length_in_chain (vm, b0)); diff --git a/src/plugins/dpdk/buffer.c b/src/plugins/dpdk/buffer.c index 2765c292..c80b3fa8 100644 --- a/src/plugins/dpdk/buffer.c +++ b/src/plugins/dpdk/buffer.c @@ -132,7 +132,7 @@ dpdk_buffer_delete_free_list (vlib_main_t * vm, u32 free_list_index) u32 merge_index; int i; - ASSERT (os_get_cpu_number () == 0); + ASSERT (vlib_get_thread_index () == 0); f = vlib_buffer_get_free_list (vm, free_list_index); diff --git a/src/plugins/dpdk/device/device.c b/src/plugins/dpdk/device/device.c index 50b26689..91661246 100644 --- a/src/plugins/dpdk/device/device.c +++ b/src/plugins/dpdk/device/device.c @@ -243,7 +243,7 @@ static_always_inline ASSERT (ring->tx_tail == 0); n_retry = 16; - queue_id = vm->cpu_index; + queue_id = vm->thread_index; do { @@ -266,7 +266,7 @@ static_always_inline { /* no wrap, transmit in one burst */ dpdk_device_hqos_per_worker_thread_t *hqos = - &xd->hqos_wt[vm->cpu_index]; + &xd->hqos_wt[vm->thread_index]; ASSERT (hqos->swq != NULL); @@ -332,7 +332,7 @@ dpdk_buffer_recycle (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_buffer_t * b, u32 bi, struct rte_mbuf **mbp) { dpdk_main_t *dm = &dpdk_main; - u32 my_cpu = vm->cpu_index; + u32 my_cpu = vm->thread_index; struct rte_mbuf *mb_new; if (PREDICT_FALSE (b->flags & VLIB_BUFFER_RECYCLE) == 0) @@ -376,7 +376,7 @@ dpdk_interface_tx (vlib_main_t * vm, tx_ring_hdr_t *ring; u32 n_on_ring; - my_cpu = vm->cpu_index; + my_cpu = vm->thread_index; queue_id = my_cpu; diff --git a/src/plugins/dpdk/device/dpdk_priv.h b/src/plugins/dpdk/device/dpdk_priv.h index dd40ff48..52b4ca4b 100644 --- a/src/plugins/dpdk/device/dpdk_priv.h +++ b/src/plugins/dpdk/device/dpdk_priv.h @@ -79,7 +79,7 @@ dpdk_update_counters (dpdk_device_t * xd, f64 now) { vlib_simple_counter_main_t *cm; vnet_main_t *vnm = vnet_get_main (); - u32 my_cpu = os_get_cpu_number (); + u32 thread_index = vlib_get_thread_index (); u64 rxerrors, last_rxerrors; /* only update counters for PMD interfaces */ @@ -96,7 +96,7 @@ dpdk_update_counters (dpdk_device_t * xd, f64 now) cm = vec_elt_at_index (vnm->interface_main.sw_if_counters, VNET_INTERFACE_COUNTER_RX_NO_BUF); - vlib_increment_simple_counter (cm, my_cpu, xd->vlib_sw_if_index, + vlib_increment_simple_counter (cm, thread_index, xd->vlib_sw_if_index, xd->stats.rx_nombuf - xd->last_stats.rx_nombuf); } @@ -107,7 +107,7 @@ dpdk_update_counters (dpdk_device_t * xd, f64 now) cm = vec_elt_at_index (vnm->interface_main.sw_if_counters, VNET_INTERFACE_COUNTER_RX_MISS); - vlib_increment_simple_counter (cm, my_cpu, xd->vlib_sw_if_index, + vlib_increment_simple_counter (cm, thread_index, xd->vlib_sw_if_index, xd->stats.imissed - xd->last_stats.imissed); } @@ -119,7 +119,7 @@ dpdk_update_counters (dpdk_device_t * xd, f64 now) cm = vec_elt_at_index (vnm->interface_main.sw_if_counters, VNET_INTERFACE_COUNTER_RX_ERROR); - vlib_increment_simple_counter (cm, my_cpu, xd->vlib_sw_if_index, + vlib_increment_simple_counter (cm, thread_index, xd->vlib_sw_if_index, rxerrors - last_rxerrors); } diff --git a/src/plugins/dpdk/device/init.c b/src/plugins/dpdk/device/init.c index 538db6cb..7eaf8da7 100755 --- a/src/plugins/dpdk/device/init.c +++ b/src/plugins/dpdk/device/init.c @@ -324,7 +324,7 @@ dpdk_port_setup (dpdk_main_t * dm, dpdk_device_t * xd) int rv; int j; - ASSERT (os_get_cpu_number () == 0); + ASSERT (vlib_get_thread_index () == 0); if (xd->flags & DPDK_DEVICE_FLAG_ADMIN_UP) { diff --git a/src/plugins/dpdk/device/node.c b/src/plugins/dpdk/device/node.c index e740fd18..b10e0fad 100644 --- a/src/plugins/dpdk/device/node.c +++ b/src/plugins/dpdk/device/node.c @@ -283,7 +283,7 @@ dpdk_buffer_init_from_template (void *d0, void *d1, void *d2, void *d3, */ static_always_inline u32 dpdk_device_input (dpdk_main_t * dm, dpdk_device_t * xd, - vlib_node_runtime_t * node, u32 cpu_index, u16 queue_id, + vlib_node_runtime_t * node, u32 thread_index, u16 queue_id, int maybe_multiseg) { u32 n_buffers; @@ -294,7 +294,7 @@ dpdk_device_input (dpdk_main_t * dm, dpdk_device_t * xd, uword n_rx_bytes = 0; u32 n_trace, trace_cnt __attribute__ ((unused)); vlib_buffer_free_list_t *fl; - vlib_buffer_t *bt = vec_elt_at_index (dm->buffer_templates, cpu_index); + vlib_buffer_t *bt = vec_elt_at_index (dm->buffer_templates, thread_index); if ((xd->flags & DPDK_DEVICE_FLAG_ADMIN_UP) == 0) return 0; @@ -306,7 +306,7 @@ dpdk_device_input (dpdk_main_t * dm, dpdk_device_t * xd, return 0; } - vec_reset_length (xd->d_trace_buffers[cpu_index]); + vec_reset_length (xd->d_trace_buffers[thread_index]); trace_cnt = n_trace = vlib_get_trace_count (vm, node); if (n_trace > 0) @@ -318,7 +318,7 @@ dpdk_device_input (dpdk_main_t * dm, dpdk_device_t * xd, { struct rte_mbuf *mb = xd->rx_vectors[queue_id][mb_index++]; vlib_buffer_t *b = vlib_buffer_from_rte_mbuf (mb); - vec_add1 (xd->d_trace_buffers[cpu_index], + vec_add1 (xd->d_trace_buffers[thread_index], vlib_get_buffer_index (vm, b)); } } @@ -546,20 +546,22 @@ dpdk_device_input (dpdk_main_t * dm, dpdk_device_t * xd, vlib_put_next_frame (vm, node, next_index, n_left_to_next); } - if (PREDICT_FALSE (vec_len (xd->d_trace_buffers[cpu_index]) > 0)) + if (PREDICT_FALSE (vec_len (xd->d_trace_buffers[thread_index]) > 0)) { - dpdk_rx_trace (dm, node, xd, queue_id, xd->d_trace_buffers[cpu_index], - vec_len (xd->d_trace_buffers[cpu_index])); - vlib_set_trace_count (vm, node, n_trace - - vec_len (xd->d_trace_buffers[cpu_index])); + dpdk_rx_trace (dm, node, xd, queue_id, + xd->d_trace_buffers[thread_index], + vec_len (xd->d_trace_buffers[thread_index])); + vlib_set_trace_count (vm, node, + n_trace - + vec_len (xd->d_trace_buffers[thread_index])); } vlib_increment_combined_counter (vnet_get_main ()->interface_main.combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX, - cpu_index, xd->vlib_sw_if_index, mb_index, n_rx_bytes); + thread_index, xd->vlib_sw_if_index, mb_index, n_rx_bytes); - vnet_device_increment_rx_packets (cpu_index, mb_index); + vnet_device_increment_rx_packets (thread_index, mb_index); return mb_index; } @@ -630,19 +632,19 @@ dpdk_input (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * f) dpdk_device_t *xd; uword n_rx_packets = 0; dpdk_device_and_queue_t *dq; - u32 cpu_index = os_get_cpu_number (); + u32 thread_index = vlib_get_thread_index (); /* * Poll all devices on this cpu for input/interrupts. */ /* *INDENT-OFF* */ - vec_foreach (dq, dm->devices_by_cpu[cpu_index]) + vec_foreach (dq, dm->devices_by_cpu[thread_index]) { xd = vec_elt_at_index(dm->devices, dq->device); if (xd->flags & DPDK_DEVICE_FLAG_MAYBE_MULTISEG) - n_rx_packets += dpdk_device_input (dm, xd, node, cpu_index, dq->queue_id, /* maybe_multiseg */ 1); + n_rx_packets += dpdk_device_input (dm, xd, node, thread_index, dq->queue_id, /* maybe_multiseg */ 1); else - n_rx_packets += dpdk_device_input (dm, xd, node, cpu_index, dq->queue_id, /* maybe_multiseg */ 0); + n_rx_packets += dpdk_device_input (dm, xd, node, thread_index, dq->queue_id, /* maybe_multiseg */ 0); } /* *INDENT-ON* */ diff --git a/src/plugins/dpdk/hqos/hqos.c b/src/plugins/dpdk/hqos/hqos.c index a288fca7..8b251beb 100644 --- a/src/plugins/dpdk/hqos/hqos.c +++ b/src/plugins/dpdk/hqos/hqos.c @@ -397,7 +397,7 @@ static_always_inline void dpdk_hqos_thread_internal_hqos_dbg_bypass (vlib_main_t * vm) { dpdk_main_t *dm = &dpdk_main; - u32 cpu_index = vm->cpu_index; + u32 thread_index = vm->thread_index; u32 dev_pos; dev_pos = 0; @@ -405,12 +405,12 @@ dpdk_hqos_thread_internal_hqos_dbg_bypass (vlib_main_t * vm) { vlib_worker_thread_barrier_check (); - u32 n_devs = vec_len (dm->devices_by_hqos_cpu[cpu_index]); + u32 n_devs = vec_len (dm->devices_by_hqos_cpu[thread_index]); if (dev_pos >= n_devs) dev_pos = 0; dpdk_device_and_queue_t *dq = - vec_elt_at_index (dm->devices_by_hqos_cpu[cpu_index], dev_pos); + vec_elt_at_index (dm->devices_by_hqos_cpu[thread_index], dev_pos); dpdk_device_t *xd = vec_elt_at_index (dm->devices, dq->device); dpdk_device_hqos_per_hqos_thread_t *hqos = xd->hqos_ht; @@ -479,7 +479,7 @@ static_always_inline void dpdk_hqos_thread_internal (vlib_main_t * vm) { dpdk_main_t *dm = &dpdk_main; - u32 cpu_index = vm->cpu_index; + u32 thread_index = vm->thread_index; u32 dev_pos; dev_pos = 0; @@ -487,7 +487,7 @@ dpdk_hqos_thread_internal (vlib_main_t * vm) { vlib_worker_thread_barrier_check (); - u32 n_devs = vec_len (dm->devices_by_hqos_cpu[cpu_index]); + u32 n_devs = vec_len (dm->devices_by_hqos_cpu[thread_index]); if (PREDICT_FALSE (n_devs == 0)) { dev_pos = 0; @@ -497,7 +497,7 @@ dpdk_hqos_thread_internal (vlib_main_t * vm) dev_pos = 0; dpdk_device_and_queue_t *dq = - vec_elt_at_index (dm->devices_by_hqos_cpu[cpu_index], dev_pos); + vec_elt_at_index (dm->devices_by_hqos_cpu[thread_index], dev_pos); dpdk_device_t *xd = vec_elt_at_index (dm->devices, dq->device); dpdk_device_hqos_per_hqos_thread_t *hqos = xd->hqos_ht; @@ -586,7 +586,7 @@ dpdk_hqos_thread (vlib_worker_thread_t * w) vm = vlib_get_main (); - ASSERT (vm->cpu_index == os_get_cpu_number ()); + ASSERT (vm->thread_index == vlib_get_thread_index ()); clib_time_init (&vm->clib_time); clib_mem_set_heap (w->thread_mheap); @@ -595,7 +595,7 @@ dpdk_hqos_thread (vlib_worker_thread_t * w) while (tm->worker_thread_release == 0) vlib_worker_thread_barrier_check (); - if (vec_len (dm->devices_by_hqos_cpu[vm->cpu_index]) == 0) + if (vec_len (dm->devices_by_hqos_cpu[vm->thread_index]) == 0) return clib_error ("current I/O TX thread does not have any devices assigned to it"); diff --git a/src/plugins/dpdk/ipsec/cli.c b/src/plugins/dpdk/ipsec/cli.c index cd0a6037..3ae8c9b8 100644 --- a/src/plugins/dpdk/ipsec/cli.c +++ b/src/plugins/dpdk/ipsec/cli.c @@ -42,8 +42,8 @@ dpdk_ipsec_show_mapping (vlib_main_t * vm, u16 detail_display) for (i = 0; i < tm->n_vlib_mains; i++) { uword key, data; - u32 cpu_index = vlib_mains[i]->cpu_index; - crypto_worker_main_t *cwm = &dcm->workers_main[cpu_index]; + u32 thread_index = vlib_mains[i]->thread_index; + crypto_worker_main_t *cwm = &dcm->workers_main[thread_index]; u8 *s = 0; if (skip_master) @@ -57,7 +57,7 @@ dpdk_ipsec_show_mapping (vlib_main_t * vm, u16 detail_display) i32 last_cdev = -1; crypto_qp_data_t *qpd; - s = format (s, "%u\t", cpu_index); + s = format (s, "%u\t", thread_index); /* *INDENT-OFF* */ vec_foreach (qpd, cwm->qp_data) @@ -95,7 +95,7 @@ dpdk_ipsec_show_mapping (vlib_main_t * vm, u16 detail_display) cap.sym.auth.algo = p_key->auth_algo; check_algo_is_supported (&cap, auth_str); vlib_cli_output (vm, "%u\t%10s\t%15s\t%3s\t%u\t%u\n", - vlib_mains[i]->cpu_index, cipher_str, auth_str, + vlib_mains[i]->thread_index, cipher_str, auth_str, p_key->is_outbound ? "out" : "in", cwm->qp_data[data].dev_id, cwm->qp_data[data].qp_id); diff --git a/src/plugins/dpdk/ipsec/crypto_node.c b/src/plugins/dpdk/ipsec/crypto_node.c index dc3452b2..a3c45902 100644 --- a/src/plugins/dpdk/ipsec/crypto_node.c +++ b/src/plugins/dpdk/ipsec/crypto_node.c @@ -171,9 +171,9 @@ static uword dpdk_crypto_input_fn (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) { - u32 cpu_index = os_get_cpu_number (); + u32 thread_index = vlib_get_thread_index (); dpdk_crypto_main_t *dcm = &dpdk_crypto_main; - crypto_worker_main_t *cwm = &dcm->workers_main[cpu_index]; + crypto_worker_main_t *cwm = &dcm->workers_main[thread_index]; crypto_qp_data_t *qpd; u32 n_deq = 0; diff --git a/src/plugins/dpdk/ipsec/esp.h b/src/plugins/dpdk/ipsec/esp.h index 320295b1..56f0c756 100644 --- a/src/plugins/dpdk/ipsec/esp.h +++ b/src/plugins/dpdk/ipsec/esp.h @@ -170,9 +170,9 @@ static_always_inline int create_sym_sess (ipsec_sa_t * sa, crypto_sa_session_t * sa_sess, u8 is_outbound) { - u32 cpu_index = os_get_cpu_number (); + u32 thread_index = vlib_get_thread_index (); dpdk_crypto_main_t *dcm = &dpdk_crypto_main; - crypto_worker_main_t *cwm = &dcm->workers_main[cpu_index]; + crypto_worker_main_t *cwm = &dcm->workers_main[thread_index]; struct rte_crypto_sym_xform cipher_xform = { 0 }; struct rte_crypto_sym_xform auth_xform = { 0 }; struct rte_crypto_sym_xform *xfs; diff --git a/src/plugins/dpdk/ipsec/esp_decrypt.c b/src/plugins/dpdk/ipsec/esp_decrypt.c index 286e03f8..bab76e3b 100644 --- a/src/plugins/dpdk/ipsec/esp_decrypt.c +++ b/src/plugins/dpdk/ipsec/esp_decrypt.c @@ -88,7 +88,7 @@ dpdk_esp_decrypt_node_fn (vlib_main_t * vm, { u32 n_left_from, *from, *to_next, next_index; ipsec_main_t *im = &ipsec_main; - u32 cpu_index = os_get_cpu_number(); + u32 thread_index = vlib_get_thread_index(); dpdk_crypto_main_t * dcm = &dpdk_crypto_main; dpdk_esp_main_t * em = &dpdk_esp_main; u32 i; @@ -104,7 +104,7 @@ dpdk_esp_decrypt_node_fn (vlib_main_t * vm, return n_left_from; } - crypto_worker_main_t *cwm = vec_elt_at_index(dcm->workers_main, cpu_index); + crypto_worker_main_t *cwm = vec_elt_at_index(dcm->workers_main, thread_index); u32 n_qps = vec_len(cwm->qp_data); struct rte_crypto_op ** cops_to_enq[n_qps]; u32 n_cop_qp[n_qps], * bi_to_enq[n_qps]; diff --git a/src/plugins/dpdk/ipsec/esp_encrypt.c b/src/plugins/dpdk/ipsec/esp_encrypt.c index 5b03de73..f996d7df 100644 --- a/src/plugins/dpdk/ipsec/esp_encrypt.c +++ b/src/plugins/dpdk/ipsec/esp_encrypt.c @@ -93,7 +93,7 @@ dpdk_esp_encrypt_node_fn (vlib_main_t * vm, { u32 n_left_from, *from, *to_next, next_index; ipsec_main_t *im = &ipsec_main; - u32 cpu_index = os_get_cpu_number (); + u32 thread_index = vlib_get_thread_index (); dpdk_crypto_main_t *dcm = &dpdk_crypto_main; dpdk_esp_main_t *em = &dpdk_esp_main; u32 i; @@ -111,7 +111,8 @@ dpdk_esp_encrypt_node_fn (vlib_main_t * vm, return n_left_from; } - crypto_worker_main_t *cwm = vec_elt_at_index (dcm->workers_main, cpu_index); + crypto_worker_main_t *cwm = + vec_elt_at_index (dcm->workers_main, thread_index); u32 n_qps = vec_len (cwm->qp_data); struct rte_crypto_op **cops_to_enq[n_qps]; u32 n_cop_qp[n_qps], *bi_to_enq[n_qps]; diff --git a/src/plugins/dpdk/ipsec/ipsec.c b/src/plugins/dpdk/ipsec/ipsec.c index b0aaaaec..5d8f4fba 100644 --- a/src/plugins/dpdk/ipsec/ipsec.c +++ b/src/plugins/dpdk/ipsec/ipsec.c @@ -289,7 +289,7 @@ dpdk_ipsec_process (vlib_main_t * vm, vlib_node_runtime_t * rt, if (!map) { clib_warning ("unable to create hash table for worker %u", - vlib_mains[i]->cpu_index); + vlib_mains[i]->thread_index); goto error; } cwm->algo_qp_map = map; diff --git a/src/plugins/dpdk/ipsec/ipsec.h b/src/plugins/dpdk/ipsec/ipsec.h index 28bffc80..f0f793c0 100644 --- a/src/plugins/dpdk/ipsec/ipsec.h +++ b/src/plugins/dpdk/ipsec/ipsec.h @@ -95,8 +95,8 @@ static_always_inline void crypto_alloc_cops () { dpdk_crypto_main_t *dcm = &dpdk_crypto_main; - u32 cpu_index = os_get_cpu_number (); - crypto_worker_main_t *cwm = &dcm->workers_main[cpu_index]; + u32 thread_index = vlib_get_thread_index (); + crypto_worker_main_t *cwm = &dcm->workers_main[thread_index]; unsigned socket_id = rte_socket_id (); crypto_qp_data_t *qpd; diff --git a/src/plugins/dpdk/main.c b/src/plugins/dpdk/main.c index 7ee2a785..942b8b2d 100644 --- a/src/plugins/dpdk/main.c +++ b/src/plugins/dpdk/main.c @@ -39,7 +39,7 @@ rte_delay_us_override (unsigned us) * thread then do not intercept. (Must not be called from an * independent pthread). */ - if (os_get_cpu_number () == 0) + if (vlib_get_thread_index () == 0) { /* * We're in the vlib main thread or a vlib process. Make sure diff --git a/src/plugins/flowperpkt/l2_node.c b/src/plugins/flowperpkt/l2_node.c index 1c2f681e..fdaf81d1 100644 --- a/src/plugins/flowperpkt/l2_node.c +++ b/src/plugins/flowperpkt/l2_node.c @@ -102,7 +102,7 @@ add_to_flow_record_l2 (vlib_main_t * vm, u8 * src_mac, u8 * dst_mac, u16 ethertype, u64 timestamp, u16 length, int do_flush) { - u32 my_cpu_number = vm->cpu_index; + u32 my_cpu_number = vm->thread_index; flow_report_main_t *frm = &flow_report_main; ip4_header_t *ip; udp_header_t *udp; diff --git a/src/plugins/flowperpkt/node.c b/src/plugins/flowperpkt/node.c index f77f087d..0277682d 100644 --- a/src/plugins/flowperpkt/node.c +++ b/src/plugins/flowperpkt/node.c @@ -101,7 +101,7 @@ add_to_flow_record_ipv4 (vlib_main_t * vm, u32 src_address, u32 dst_address, u8 tos, u64 timestamp, u16 length, int do_flush) { - u32 my_cpu_number = vm->cpu_index; + u32 my_cpu_number = vm->thread_index; flow_report_main_t *frm = &flow_report_main; ip4_header_t *ip; udp_header_t *udp; diff --git a/src/plugins/ioam/export-common/ioam_export.h b/src/plugins/ioam/export-common/ioam_export.h index 2bf3fd54..9de0d13b 100644 --- a/src/plugins/ioam/export-common/ioam_export.h +++ b/src/plugins/ioam/export-common/ioam_export.h @@ -477,8 +477,8 @@ do { \ from = vlib_frame_vector_args (F); \ n_left_from = (F)->n_vectors; \ next_index = (N)->cached_next_index; \ - while (__sync_lock_test_and_set ((EM)->lockp[(VM)->cpu_index], 1)); \ - my_buf = ioam_export_get_my_buffer (EM, (VM)->cpu_index); \ + while (__sync_lock_test_and_set ((EM)->lockp[(VM)->thread_index], 1)); \ + my_buf = ioam_export_get_my_buffer (EM, (VM)->thread_index); \ my_buf->touched_at = vlib_time_now (VM); \ while (n_left_from > 0) \ { \ @@ -620,7 +620,7 @@ do { \ } \ vlib_node_increment_counter (VM, export_node.index, \ EXPORT_ERROR_RECORDED, pkts_recorded); \ - *(EM)->lockp[(VM)->cpu_index] = 0; \ + *(EM)->lockp[(VM)->thread_index] = 0; \ } while(0) #endif /* __included_ioam_export_h__ */ diff --git a/src/plugins/ioam/ip6/ioam_cache_tunnel_select_node.c b/src/plugins/ioam/ip6/ioam_cache_tunnel_select_node.c index a56dc040..0cf742c9 100644 --- a/src/plugins/ioam/ip6/ioam_cache_tunnel_select_node.c +++ b/src/plugins/ioam/ip6/ioam_cache_tunnel_select_node.c @@ -396,7 +396,7 @@ ip6_reset_ts_hbh_node_fn (vlib_main_t * vm, clib_net_to_host_u32 (tcp0->seq_number) + 1, no_of_responses, now, - vm->cpu_index, &pool_index0)) + vm->thread_index, &pool_index0)) { cache_ts_added++; } @@ -419,7 +419,7 @@ ip6_reset_ts_hbh_node_fn (vlib_main_t * vm, e2e = (ioam_e2e_cache_option_t *) ((u8 *) hbh0 + cm->rewrite_pool_index_offset); - e2e->pool_id = (u8) vm->cpu_index; + e2e->pool_id = (u8) vm->thread_index; e2e->pool_index = pool_index0; ioam_e2e_id_rewrite_handler ((ioam_e2e_id_option_t *) ((u8 *) e2e + @@ -455,7 +455,7 @@ ip6_reset_ts_hbh_node_fn (vlib_main_t * vm, clib_net_to_host_u32 (tcp1->seq_number) + 1, no_of_responses, now, - vm->cpu_index, &pool_index1)) + vm->thread_index, &pool_index1)) { cache_ts_added++; } @@ -479,7 +479,7 @@ ip6_reset_ts_hbh_node_fn (vlib_main_t * vm, e2e = (ioam_e2e_cache_option_t *) ((u8 *) hbh1 + cm->rewrite_pool_index_offset); - e2e->pool_id = (u8) vm->cpu_index; + e2e->pool_id = (u8) vm->thread_index; e2e->pool_index = pool_index1; ioam_e2e_id_rewrite_handler ((ioam_e2e_id_option_t *) ((u8 *) e2e + @@ -562,7 +562,7 @@ ip6_reset_ts_hbh_node_fn (vlib_main_t * vm, clib_net_to_host_u32 (tcp0->seq_number) + 1, no_of_responses, now, - vm->cpu_index, &pool_index0)) + vm->thread_index, &pool_index0)) { cache_ts_added++; } @@ -585,7 +585,7 @@ ip6_reset_ts_hbh_node_fn (vlib_main_t * vm, e2e = (ioam_e2e_cache_option_t *) ((u8 *) hbh0 + cm->rewrite_pool_index_offset); - e2e->pool_id = (u8) vm->cpu_index; + e2e->pool_id = (u8) vm->thread_index; e2e->pool_index = pool_index0; ioam_e2e_id_rewrite_handler ((ioam_e2e_id_option_t *) ((u8 *) e2e + @@ -701,7 +701,7 @@ expired_cache_ts_timer_callback (u32 * expired_timers) ioam_cache_main_t *cm = &ioam_cache_main; int i; u32 pool_index; - u32 thread_index = os_get_cpu_number (); + u32 thread_index = vlib_get_thread_index (); u32 count = 0; for (i = 0; i < vec_len (expired_timers); i++) @@ -724,7 +724,7 @@ ioam_cache_ts_timer_tick_node_fn (vlib_main_t * vm, vlib_frame_t * f) { ioam_cache_main_t *cm = &ioam_cache_main; - u32 my_thread_index = os_get_cpu_number (); + u32 my_thread_index = vlib_get_thread_index (); struct timespec ts, tsrem; tw_timer_expire_timers_16t_2w_512sl (&cm->timer_wheels[my_thread_index], diff --git a/src/plugins/ixge/ixge.c b/src/plugins/ixge/ixge.c index f3c5cc09..08f5b692 100644 --- a/src/plugins/ixge/ixge.c +++ b/src/plugins/ixge/ixge.c @@ -1887,7 +1887,7 @@ done: vlib_increment_combined_counter (vnet_main. interface_main.combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX, - 0 /* cpu_index */ , + 0 /* thread_index */ , xd->vlib_sw_if_index, n_packets, dq->rx.n_bytes); diff --git a/src/plugins/lb/lb.c b/src/plugins/lb/lb.c index add81236..addc2a42 100644 --- a/src/plugins/lb/lb.c +++ b/src/plugins/lb/lb.c @@ -63,11 +63,11 @@ u8 *format_lb_main (u8 * s, va_list * args) s = format(s, " #vips: %u\n", pool_elts(lbm->vips)); s = format(s, " #ass: %u\n", pool_elts(lbm->ass) - 1); - u32 cpu_index; - for(cpu_index = 0; cpu_index < tm->n_vlib_mains; cpu_index++ ) { - lb_hash_t *h = lbm->per_cpu[cpu_index].sticky_ht; + u32 thread_index; + for(thread_index = 0; thread_index < tm->n_vlib_mains; thread_index++ ) { + lb_hash_t *h = lbm->per_cpu[thread_index].sticky_ht; if (h) { - s = format(s, "core %d\n", cpu_index); + s = format(s, "core %d\n", thread_index); s = format(s, " timeout: %ds\n", h->timeout); s = format(s, " usage: %d / %d\n", lb_hash_elts(h, lb_hash_time_now(vlib_get_main())), lb_hash_size(h)); } diff --git a/src/plugins/lb/node.c b/src/plugins/lb/node.c index 8b763c53..3171148b 100644 --- a/src/plugins/lb/node.c +++ b/src/plugins/lb/node.c @@ -60,10 +60,10 @@ format_lb_trace (u8 * s, va_list * args) return s; } -lb_hash_t *lb_get_sticky_table(u32 cpu_index) +lb_hash_t *lb_get_sticky_table(u32 thread_index) { lb_main_t *lbm = &lb_main; - lb_hash_t *sticky_ht = lbm->per_cpu[cpu_index].sticky_ht; + lb_hash_t *sticky_ht = lbm->per_cpu[thread_index].sticky_ht; //Check if size changed if (PREDICT_FALSE(sticky_ht && (lbm->per_cpu_sticky_buckets != lb_hash_nbuckets(sticky_ht)))) { @@ -71,8 +71,8 @@ lb_hash_t *lb_get_sticky_table(u32 cpu_index) lb_hash_bucket_t *b; u32 i; lb_hash_foreach_entry(sticky_ht, b, i) { - vlib_refcount_add(&lbm->as_refcount, cpu_index, b->value[i], -1); - vlib_refcount_add(&lbm->as_refcount, cpu_index, 0, 1); + vlib_refcount_add(&lbm->as_refcount, thread_index, b->value[i], -1); + vlib_refcount_add(&lbm->as_refcount, thread_index, 0, 1); } lb_hash_free(sticky_ht); @@ -81,8 +81,8 @@ lb_hash_t *lb_get_sticky_table(u32 cpu_index) //Create if necessary if (PREDICT_FALSE(sticky_ht == NULL)) { - lbm->per_cpu[cpu_index].sticky_ht = lb_hash_alloc(lbm->per_cpu_sticky_buckets, lbm->flow_timeout); - sticky_ht = lbm->per_cpu[cpu_index].sticky_ht; + lbm->per_cpu[thread_index].sticky_ht = lb_hash_alloc(lbm->per_cpu_sticky_buckets, lbm->flow_timeout); + sticky_ht = lbm->per_cpu[thread_index].sticky_ht; clib_warning("Regenerated sticky table %p", sticky_ht); } @@ -153,10 +153,10 @@ lb_node_fn (vlib_main_t * vm, { lb_main_t *lbm = &lb_main; u32 n_left_from, *from, next_index, *to_next, n_left_to_next; - u32 cpu_index = os_get_cpu_number(); + u32 thread_index = vlib_get_thread_index(); u32 lb_time = lb_hash_time_now(vm); - lb_hash_t *sticky_ht = lb_get_sticky_table(cpu_index); + lb_hash_t *sticky_ht = lb_get_sticky_table(thread_index); from = vlib_frame_vector_args (frame); n_left_from = frame->n_vectors; next_index = node->cached_next_index; @@ -240,9 +240,9 @@ lb_node_fn (vlib_main_t * vm, //Configuration may be changed, vectors resized, etc... //Dereference previously used - vlib_refcount_add(&lbm->as_refcount, cpu_index, + vlib_refcount_add(&lbm->as_refcount, thread_index, lb_hash_available_value(sticky_ht, hash0, available_index0), -1); - vlib_refcount_add(&lbm->as_refcount, cpu_index, + vlib_refcount_add(&lbm->as_refcount, thread_index, asindex0, 1); //Add sticky entry @@ -260,7 +260,7 @@ lb_node_fn (vlib_main_t * vm, } vlib_increment_simple_counter(&lbm->vip_counters[counter], - cpu_index, + thread_index, vnet_buffer (p0)->ip.adj_index[VLIB_TX], 1); diff --git a/src/plugins/lb/refcount.c b/src/plugins/lb/refcount.c index 22415c88..6f01ab5a 100644 --- a/src/plugins/lb/refcount.c +++ b/src/plugins/lb/refcount.c @@ -31,10 +31,10 @@ u64 vlib_refcount_get(vlib_refcount_t *r, u32 index) { u64 count = 0; vlib_thread_main_t *tm = vlib_get_thread_main (); - u32 cpu_index; - for (cpu_index = 0; cpu_index < tm->n_vlib_mains; cpu_index++) { - if (r->per_cpu[cpu_index].length > index) - count += r->per_cpu[cpu_index].counters[index]; + u32 thread_index; + for (thread_index = 0; thread_index < tm->n_vlib_mains; thread_index++) { + if (r->per_cpu[thread_index].length > index) + count += r->per_cpu[thread_index].counters[index]; } return count; } diff --git a/src/plugins/lb/refcount.h b/src/plugins/lb/refcount.h index 8c26e7be..dcfcb3fe 100644 --- a/src/plugins/lb/refcount.h +++ b/src/plugins/lb/refcount.h @@ -45,9 +45,9 @@ typedef struct { void __vlib_refcount_resize(vlib_refcount_per_cpu_t *per_cpu, u32 size); static_always_inline -void vlib_refcount_add(vlib_refcount_t *r, u32 cpu_index, u32 counter_index, i32 v) +void vlib_refcount_add(vlib_refcount_t *r, u32 thread_index, u32 counter_index, i32 v) { - vlib_refcount_per_cpu_t *per_cpu = &r->per_cpu[cpu_index]; + vlib_refcount_per_cpu_t *per_cpu = &r->per_cpu[thread_index]; if (PREDICT_FALSE(counter_index >= per_cpu->length)) __vlib_refcount_resize(per_cpu, clib_max(counter_index + 16, per_cpu->length * 2)); diff --git a/src/plugins/memif/node.c b/src/plugins/memif/node.c index 659d5dfb..cee1f3d1 100644 --- a/src/plugins/memif/node.c +++ b/src/plugins/memif/node.c @@ -94,7 +94,7 @@ memif_device_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node, u32 n_rx_bytes = 0; u32 *to_next = 0; u32 n_free_bufs; - u32 cpu_index = os_get_cpu_number (); + u32 thread_index = vlib_get_thread_index (); u32 bi0, bi1; vlib_buffer_t *b0, *b1; u16 ring_size = 1 << mif->log2_ring_size; @@ -105,14 +105,15 @@ memif_device_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node, if (mif->per_interface_next_index != ~0) next_index = mif->per_interface_next_index; - n_free_bufs = vec_len (nm->rx_buffers[cpu_index]); + n_free_bufs = vec_len (nm->rx_buffers[thread_index]); if (PREDICT_FALSE (n_free_bufs < ring_size)) { - vec_validate (nm->rx_buffers[cpu_index], ring_size + n_free_bufs - 1); + vec_validate (nm->rx_buffers[thread_index], + ring_size + n_free_bufs - 1); n_free_bufs += - vlib_buffer_alloc (vm, &nm->rx_buffers[cpu_index][n_free_bufs], + vlib_buffer_alloc (vm, &nm->rx_buffers[thread_index][n_free_bufs], ring_size); - _vec_len (nm->rx_buffers[cpu_index]) = n_free_bufs; + _vec_len (nm->rx_buffers[thread_index]) = n_free_bufs; } head = ring->head; @@ -158,15 +159,15 @@ memif_device_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node, CLIB_CACHE_LINE_BYTES, LOAD); } /* get empty buffer */ - u32 last_buf = vec_len (nm->rx_buffers[cpu_index]) - 1; - bi0 = nm->rx_buffers[cpu_index][last_buf]; - bi1 = nm->rx_buffers[cpu_index][last_buf - 1]; - _vec_len (nm->rx_buffers[cpu_index]) -= 2; + u32 last_buf = vec_len (nm->rx_buffers[thread_index]) - 1; + bi0 = nm->rx_buffers[thread_index][last_buf]; + bi1 = nm->rx_buffers[thread_index][last_buf - 1]; + _vec_len (nm->rx_buffers[thread_index]) -= 2; if (last_buf > 4) { - memif_prefetch (vm, nm->rx_buffers[cpu_index][last_buf - 2]); - memif_prefetch (vm, nm->rx_buffers[cpu_index][last_buf - 3]); + memif_prefetch (vm, nm->rx_buffers[thread_index][last_buf - 2]); + memif_prefetch (vm, nm->rx_buffers[thread_index][last_buf - 3]); } /* enqueue buffer */ @@ -256,9 +257,9 @@ memif_device_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node, while (num_slots && n_left_to_next) { /* get empty buffer */ - u32 last_buf = vec_len (nm->rx_buffers[cpu_index]) - 1; - bi0 = nm->rx_buffers[cpu_index][last_buf]; - _vec_len (nm->rx_buffers[cpu_index]) = last_buf; + u32 last_buf = vec_len (nm->rx_buffers[thread_index]) - 1; + bi0 = nm->rx_buffers[thread_index][last_buf]; + _vec_len (nm->rx_buffers[thread_index]) = last_buf; /* enqueue buffer */ to_next[0] = bi0; @@ -315,7 +316,7 @@ memif_device_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node, ring->tail = head; vlib_increment_combined_counter (vnm->interface_main.combined_sw_if_counters - + VNET_INTERFACE_COUNTER_RX, cpu_index, + + VNET_INTERFACE_COUNTER_RX, thread_index, mif->hw_if_index, n_rx_packets, n_rx_bytes); @@ -327,7 +328,7 @@ memif_input_fn (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) { u32 n_rx_packets = 0; - u32 cpu_index = os_get_cpu_number (); + u32 thread_index = vlib_get_thread_index (); memif_main_t *nm = &memif_main; memif_if_t *mif; @@ -337,7 +338,7 @@ memif_input_fn (vlib_main_t * vm, vlib_node_runtime_t * node, if (mif->flags & MEMIF_IF_FLAG_ADMIN_UP && mif->flags & MEMIF_IF_FLAG_CONNECTED && (mif->if_index % nm->input_cpu_count) == - (cpu_index - nm->input_cpu_first_index)) + (thread_index - nm->input_cpu_first_index)) { if (mif->flags & MEMIF_IF_FLAG_IS_SLAVE) n_rx_packets += diff --git a/src/plugins/snat/in2out.c b/src/plugins/snat/in2out.c index b4961365..e5ee965f 100644 --- a/src/plugins/snat/in2out.c +++ b/src/plugins/snat/in2out.c @@ -212,7 +212,7 @@ static u32 slow_path (snat_main_t *sm, vlib_buffer_t *b0, snat_session_t ** sessionp, vlib_node_runtime_t * node, u32 next0, - u32 cpu_index) + u32 thread_index) { snat_user_t *u; snat_user_key_t user_key; @@ -246,27 +246,27 @@ static u32 slow_path (snat_main_t *sm, vlib_buffer_t *b0, if (clib_bihash_search_8_8 (&sm->user_hash, &kv0, &value0)) { /* no, make a new one */ - pool_get (sm->per_thread_data[cpu_index].users, u); + pool_get (sm->per_thread_data[thread_index].users, u); memset (u, 0, sizeof (*u)); u->addr = ip0->src_address; u->fib_index = rx_fib_index0; - pool_get (sm->per_thread_data[cpu_index].list_pool, per_user_list_head_elt); + pool_get (sm->per_thread_data[thread_index].list_pool, per_user_list_head_elt); u->sessions_per_user_list_head_index = per_user_list_head_elt - - sm->per_thread_data[cpu_index].list_pool; + sm->per_thread_data[thread_index].list_pool; - clib_dlist_init (sm->per_thread_data[cpu_index].list_pool, + clib_dlist_init (sm->per_thread_data[thread_index].list_pool, u->sessions_per_user_list_head_index); - kv0.value = u - sm->per_thread_data[cpu_index].users; + kv0.value = u - sm->per_thread_data[thread_index].users; /* add user */ clib_bihash_add_del_8_8 (&sm->user_hash, &kv0, 1 /* is_add */); } else { - u = pool_elt_at_index (sm->per_thread_data[cpu_index].users, + u = pool_elt_at_index (sm->per_thread_data[thread_index].users, value0.value); } @@ -276,25 +276,25 @@ static u32 slow_path (snat_main_t *sm, vlib_buffer_t *b0, /* Remove the oldest dynamic translation */ do { oldest_per_user_translation_list_index = - clib_dlist_remove_head (sm->per_thread_data[cpu_index].list_pool, + clib_dlist_remove_head (sm->per_thread_data[thread_index].list_pool, u->sessions_per_user_list_head_index); ASSERT (oldest_per_user_translation_list_index != ~0); /* add it back to the end of the LRU list */ - clib_dlist_addtail (sm->per_thread_data[cpu_index].list_pool, + clib_dlist_addtail (sm->per_thread_data[thread_index].list_pool, u->sessions_per_user_list_head_index, oldest_per_user_translation_list_index); /* Get the list element */ oldest_per_user_translation_list_elt = - pool_elt_at_index (sm->per_thread_data[cpu_index].list_pool, + pool_elt_at_index (sm->per_thread_data[thread_index].list_pool, oldest_per_user_translation_list_index); /* Get the session index from the list element */ session_index = oldest_per_user_translation_list_elt->value; /* Get the session */ - s = pool_elt_at_index (sm->per_thread_data[cpu_index].sessions, + s = pool_elt_at_index (sm->per_thread_data[thread_index].sessions, session_index); } while (snat_is_session_static (s)); @@ -346,7 +346,7 @@ static u32 slow_path (snat_main_t *sm, vlib_buffer_t *b0, } /* Create a new session */ - pool_get (sm->per_thread_data[cpu_index].sessions, s); + pool_get (sm->per_thread_data[thread_index].sessions, s); memset (s, 0, sizeof (*s)); s->outside_address_index = address_index; @@ -362,22 +362,22 @@ static u32 slow_path (snat_main_t *sm, vlib_buffer_t *b0, } /* Create list elts */ - pool_get (sm->per_thread_data[cpu_index].list_pool, + pool_get (sm->per_thread_data[thread_index].list_pool, per_user_translation_list_elt); - clib_dlist_init (sm->per_thread_data[cpu_index].list_pool, + clib_dlist_init (sm->per_thread_data[thread_index].list_pool, per_user_translation_list_elt - - sm->per_thread_data[cpu_index].list_pool); + sm->per_thread_data[thread_index].list_pool); per_user_translation_list_elt->value = - s - sm->per_thread_data[cpu_index].sessions; + s - sm->per_thread_data[thread_index].sessions; s->per_user_index = per_user_translation_list_elt - - sm->per_thread_data[cpu_index].list_pool; + sm->per_thread_data[thread_index].list_pool; s->per_user_list_head_index = u->sessions_per_user_list_head_index; - clib_dlist_addtail (sm->per_thread_data[cpu_index].list_pool, + clib_dlist_addtail (sm->per_thread_data[thread_index].list_pool, s->per_user_list_head_index, per_user_translation_list_elt - - sm->per_thread_data[cpu_index].list_pool); + sm->per_thread_data[thread_index].list_pool); } s->in2out = *key0; @@ -388,12 +388,12 @@ static u32 slow_path (snat_main_t *sm, vlib_buffer_t *b0, /* Add to translation hashes */ kv0.key = s->in2out.as_u64; - kv0.value = s - sm->per_thread_data[cpu_index].sessions; + kv0.value = s - sm->per_thread_data[thread_index].sessions; if (clib_bihash_add_del_8_8 (&sm->in2out, &kv0, 1 /* is_add */)) clib_warning ("in2out key add failed"); kv0.key = s->out2in.as_u64; - kv0.value = s - sm->per_thread_data[cpu_index].sessions; + kv0.value = s - sm->per_thread_data[thread_index].sessions; if (clib_bihash_add_del_8_8 (&sm->out2in, &kv0, 1 /* is_add */)) clib_warning ("out2in key add failed"); @@ -403,7 +403,7 @@ static u32 slow_path (snat_main_t *sm, vlib_buffer_t *b0, worker_by_out_key.port = s->out2in.port; worker_by_out_key.fib_index = s->out2in.fib_index; kv0.key = worker_by_out_key.as_u64; - kv0.value = cpu_index; + kv0.value = thread_index; clib_bihash_add_del_8_8 (&sm->worker_by_out, &kv0, 1); /* log NAT event */ @@ -465,7 +465,7 @@ snat_in2out_error_t icmp_get_key(icmp46_header_t *icmp0, * * @param[in,out] sm SNAT main * @param[in,out] node SNAT node runtime - * @param[in] cpu_index CPU index + * @param[in] thread_index thread index * @param[in,out] b0 buffer containing packet to be translated * @param[out] p_key address and port before NAT translation * @param[out] p_value address and port after NAT translation @@ -473,7 +473,7 @@ snat_in2out_error_t icmp_get_key(icmp46_header_t *icmp0, * @param d optional parameter */ u32 icmp_match_in2out_slow(snat_main_t *sm, vlib_node_runtime_t *node, - u32 cpu_index, vlib_buffer_t *b0, + u32 thread_index, vlib_buffer_t *b0, snat_session_key_t *p_key, snat_session_key_t *p_value, u8 *p_dont_translate, void *d) @@ -524,13 +524,13 @@ u32 icmp_match_in2out_slow(snat_main_t *sm, vlib_node_runtime_t *node, } next0 = slow_path (sm, b0, ip0, rx_fib_index0, &key0, - &s0, node, next0, cpu_index); + &s0, node, next0, thread_index); if (PREDICT_FALSE (next0 == SNAT_IN2OUT_NEXT_DROP)) goto out; } else - s0 = pool_elt_at_index (sm->per_thread_data[cpu_index].sessions, + s0 = pool_elt_at_index (sm->per_thread_data[thread_index].sessions, value0.value); out: @@ -548,7 +548,7 @@ out: * * @param[in] sm SNAT main * @param[in,out] node SNAT node runtime - * @param[in] cpu_index CPU index + * @param[in] thread_index thread index * @param[in,out] b0 buffer containing packet to be translated * @param[out] p_key address and port before NAT translation * @param[out] p_value address and port after NAT translation @@ -556,7 +556,7 @@ out: * @param d optional parameter */ u32 icmp_match_in2out_fast(snat_main_t *sm, vlib_node_runtime_t *node, - u32 cpu_index, vlib_buffer_t *b0, + u32 thread_index, vlib_buffer_t *b0, snat_session_key_t *p_key, snat_session_key_t *p_value, u8 *p_dont_translate, void *d) @@ -624,7 +624,7 @@ static inline u32 icmp_in2out (snat_main_t *sm, u32 rx_fib_index0, vlib_node_runtime_t * node, u32 next0, - u32 cpu_index, + u32 thread_index, void *d) { snat_session_key_t key0, sm0; @@ -641,7 +641,7 @@ static inline u32 icmp_in2out (snat_main_t *sm, echo0 = (icmp_echo_header_t *)(icmp0+1); - next0_tmp = sm->icmp_match_in2out_cb(sm, node, cpu_index, b0, + next0_tmp = sm->icmp_match_in2out_cb(sm, node, thread_index, b0, &key0, &sm0, &dont_translate, d); if (next0_tmp != ~0) next0 = next0_tmp; @@ -847,11 +847,11 @@ static inline u32 icmp_in2out_slow_path (snat_main_t *sm, vlib_node_runtime_t * node, u32 next0, f64 now, - u32 cpu_index, + u32 thread_index, snat_session_t ** p_s0) { next0 = icmp_in2out(sm, b0, ip0, icmp0, sw_if_index0, rx_fib_index0, node, - next0, cpu_index, p_s0); + next0, thread_index, p_s0); snat_session_t * s0 = *p_s0; if (PREDICT_TRUE(next0 != SNAT_IN2OUT_NEXT_DROP && s0)) { @@ -862,9 +862,9 @@ static inline u32 icmp_in2out_slow_path (snat_main_t *sm, /* Per-user LRU list maintenance for dynamic translations */ if (!snat_is_session_static (s0)) { - clib_dlist_remove (sm->per_thread_data[cpu_index].list_pool, + clib_dlist_remove (sm->per_thread_data[thread_index].list_pool, s0->per_user_index); - clib_dlist_addtail (sm->per_thread_data[cpu_index].list_pool, + clib_dlist_addtail (sm->per_thread_data[thread_index].list_pool, s0->per_user_list_head_index, s0->per_user_index); } @@ -884,7 +884,7 @@ snat_in2out_node_fn_inline (vlib_main_t * vm, snat_runtime_t * rt = (snat_runtime_t *)node->runtime_data; f64 now = vlib_time_now (vm); u32 stats_node_index; - u32 cpu_index = os_get_cpu_number (); + u32 thread_index = vlib_get_thread_index (); stats_node_index = is_slow_path ? snat_in2out_slowpath_node.index : snat_in2out_node.index; @@ -977,7 +977,7 @@ snat_in2out_node_fn_inline (vlib_main_t * vm, { next0 = icmp_in2out_slow_path (sm, b0, ip0, icmp0, sw_if_index0, rx_fib_index0, - node, next0, now, cpu_index, &s0); + node, next0, now, thread_index, &s0); goto trace00; } } @@ -1006,7 +1006,7 @@ snat_in2out_node_fn_inline (vlib_main_t * vm, goto trace00; next0 = slow_path (sm, b0, ip0, rx_fib_index0, &key0, - &s0, node, next0, cpu_index); + &s0, node, next0, thread_index); if (PREDICT_FALSE (next0 == SNAT_IN2OUT_NEXT_DROP)) goto trace00; } @@ -1017,7 +1017,7 @@ snat_in2out_node_fn_inline (vlib_main_t * vm, } } else - s0 = pool_elt_at_index (sm->per_thread_data[cpu_index].sessions, + s0 = pool_elt_at_index (sm->per_thread_data[thread_index].sessions, value0.value); old_addr0 = ip0->src_address.as_u32; @@ -1063,9 +1063,9 @@ snat_in2out_node_fn_inline (vlib_main_t * vm, /* Per-user LRU list maintenance for dynamic translation */ if (!snat_is_session_static (s0)) { - clib_dlist_remove (sm->per_thread_data[cpu_index].list_pool, + clib_dlist_remove (sm->per_thread_data[thread_index].list_pool, s0->per_user_index); - clib_dlist_addtail (sm->per_thread_data[cpu_index].list_pool, + clib_dlist_addtail (sm->per_thread_data[thread_index].list_pool, s0->per_user_list_head_index, s0->per_user_index); } @@ -1081,7 +1081,7 @@ snat_in2out_node_fn_inline (vlib_main_t * vm, t->next_index = next0; t->session_index = ~0; if (s0) - t->session_index = s0 - sm->per_thread_data[cpu_index].sessions; + t->session_index = s0 - sm->per_thread_data[thread_index].sessions; } pkts_processed += next0 != SNAT_IN2OUT_NEXT_DROP; @@ -1117,7 +1117,7 @@ snat_in2out_node_fn_inline (vlib_main_t * vm, { next1 = icmp_in2out_slow_path (sm, b1, ip1, icmp1, sw_if_index1, rx_fib_index1, node, - next1, now, cpu_index, &s1); + next1, now, thread_index, &s1); goto trace01; } } @@ -1146,7 +1146,7 @@ snat_in2out_node_fn_inline (vlib_main_t * vm, goto trace01; next1 = slow_path (sm, b1, ip1, rx_fib_index1, &key1, - &s1, node, next1, cpu_index); + &s1, node, next1, thread_index); if (PREDICT_FALSE (next1 == SNAT_IN2OUT_NEXT_DROP)) goto trace01; } @@ -1157,7 +1157,7 @@ snat_in2out_node_fn_inline (vlib_main_t * vm, } } else - s1 = pool_elt_at_index (sm->per_thread_data[cpu_index].sessions, + s1 = pool_elt_at_index (sm->per_thread_data[thread_index].sessions, value1.value); old_addr1 = ip1->src_address.as_u32; @@ -1203,9 +1203,9 @@ snat_in2out_node_fn_inline (vlib_main_t * vm, /* Per-user LRU list maintenance for dynamic translation */ if (!snat_is_session_static (s1)) { - clib_dlist_remove (sm->per_thread_data[cpu_index].list_pool, + clib_dlist_remove (sm->per_thread_data[thread_index].list_pool, s1->per_user_index); - clib_dlist_addtail (sm->per_thread_data[cpu_index].list_pool, + clib_dlist_addtail (sm->per_thread_data[thread_index].list_pool, s1->per_user_list_head_index, s1->per_user_index); } @@ -1220,7 +1220,7 @@ snat_in2out_node_fn_inline (vlib_main_t * vm, t->next_index = next1; t->session_index = ~0; if (s1) - t->session_index = s1 - sm->per_thread_data[cpu_index].sessions; + t->session_index = s1 - sm->per_thread_data[thread_index].sessions; } pkts_processed += next1 != SNAT_IN2OUT_NEXT_DROP; @@ -1292,7 +1292,7 @@ snat_in2out_node_fn_inline (vlib_main_t * vm, { next0 = icmp_in2out_slow_path (sm, b0, ip0, icmp0, sw_if_index0, rx_fib_index0, node, - next0, now, cpu_index, &s0); + next0, now, thread_index, &s0); goto trace0; } } @@ -1321,7 +1321,7 @@ snat_in2out_node_fn_inline (vlib_main_t * vm, goto trace0; next0 = slow_path (sm, b0, ip0, rx_fib_index0, &key0, - &s0, node, next0, cpu_index); + &s0, node, next0, thread_index); if (PREDICT_FALSE (next0 == SNAT_IN2OUT_NEXT_DROP)) goto trace0; @@ -1333,7 +1333,7 @@ snat_in2out_node_fn_inline (vlib_main_t * vm, } } else - s0 = pool_elt_at_index (sm->per_thread_data[cpu_index].sessions, + s0 = pool_elt_at_index (sm->per_thread_data[thread_index].sessions, value0.value); old_addr0 = ip0->src_address.as_u32; @@ -1379,9 +1379,9 @@ snat_in2out_node_fn_inline (vlib_main_t * vm, /* Per-user LRU list maintenance for dynamic translation */ if (!snat_is_session_static (s0)) { - clib_dlist_remove (sm->per_thread_data[cpu_index].list_pool, + clib_dlist_remove (sm->per_thread_data[thread_index].list_pool, s0->per_user_index); - clib_dlist_addtail (sm->per_thread_data[cpu_index].list_pool, + clib_dlist_addtail (sm->per_thread_data[thread_index].list_pool, s0->per_user_list_head_index, s0->per_user_index); } @@ -1397,7 +1397,7 @@ snat_in2out_node_fn_inline (vlib_main_t * vm, t->next_index = next0; t->session_index = ~0; if (s0) - t->session_index = s0 - sm->per_thread_data[cpu_index].sessions; + t->session_index = s0 - sm->per_thread_data[thread_index].sessions; } pkts_processed += next0 != SNAT_IN2OUT_NEXT_DROP; @@ -2010,7 +2010,7 @@ snat_in2out_worker_handoff_fn (vlib_main_t * vm, u32 n_left_to_next_worker = 0, *to_next_worker = 0; u32 next_worker_index = 0; u32 current_worker_index = ~0; - u32 cpu_index = os_get_cpu_number (); + u32 thread_index = vlib_get_thread_index (); ASSERT (vec_len (sm->workers)); @@ -2048,7 +2048,7 @@ snat_in2out_worker_handoff_fn (vlib_main_t * vm, next_worker_index = sm->worker_in2out_cb(ip0, rx_fib_index0); - if (PREDICT_FALSE (next_worker_index != cpu_index)) + if (PREDICT_FALSE (next_worker_index != thread_index)) { do_handoff = 1; diff --git a/src/plugins/snat/out2in.c b/src/plugins/snat/out2in.c index 656e42db..5d308d78 100644 --- a/src/plugins/snat/out2in.c +++ b/src/plugins/snat/out2in.c @@ -129,7 +129,7 @@ create_session_for_static_mapping (snat_main_t *sm, snat_session_key_t in2out, snat_session_key_t out2in, vlib_node_runtime_t * node, - u32 cpu_index) + u32 thread_index) { snat_user_t *u; snat_user_key_t user_key; @@ -146,36 +146,36 @@ create_session_for_static_mapping (snat_main_t *sm, if (clib_bihash_search_8_8 (&sm->user_hash, &kv0, &value0)) { /* no, make a new one */ - pool_get (sm->per_thread_data[cpu_index].users, u); + pool_get (sm->per_thread_data[thread_index].users, u); memset (u, 0, sizeof (*u)); u->addr = in2out.addr; u->fib_index = in2out.fib_index; - pool_get (sm->per_thread_data[cpu_index].list_pool, + pool_get (sm->per_thread_data[thread_index].list_pool, per_user_list_head_elt); u->sessions_per_user_list_head_index = per_user_list_head_elt - - sm->per_thread_data[cpu_index].list_pool; + sm->per_thread_data[thread_index].list_pool; - clib_dlist_init (sm->per_thread_data[cpu_index].list_pool, + clib_dlist_init (sm->per_thread_data[thread_index].list_pool, u->sessions_per_user_list_head_index); - kv0.value = u - sm->per_thread_data[cpu_index].users; + kv0.value = u - sm->per_thread_data[thread_index].users; /* add user */ clib_bihash_add_del_8_8 (&sm->user_hash, &kv0, 1 /* is_add */); /* add non-traslated packets worker lookup */ - kv0.value = cpu_index; + kv0.value = thread_index; clib_bihash_add_del_8_8 (&sm->worker_by_in, &kv0, 1); } else { - u = pool_elt_at_index (sm->per_thread_data[cpu_index].users, + u = pool_elt_at_index (sm->per_thread_data[thread_index].users, value0.value); } - pool_get (sm->per_thread_data[cpu_index].sessions, s); + pool_get (sm->per_thread_data[thread_index].sessions, s); memset (s, 0, sizeof (*s)); s->outside_address_index = ~0; @@ -183,22 +183,22 @@ create_session_for_static_mapping (snat_main_t *sm, u->nstaticsessions++; /* Create list elts */ - pool_get (sm->per_thread_data[cpu_index].list_pool, + pool_get (sm->per_thread_data[thread_index].list_pool, per_user_translation_list_elt); - clib_dlist_init (sm->per_thread_data[cpu_index].list_pool, + clib_dlist_init (sm->per_thread_data[thread_index].list_pool, per_user_translation_list_elt - - sm->per_thread_data[cpu_index].list_pool); + sm->per_thread_data[thread_index].list_pool); per_user_translation_list_elt->value = - s - sm->per_thread_data[cpu_index].sessions; + s - sm->per_thread_data[thread_index].sessions; s->per_user_index = - per_user_translation_list_elt - sm->per_thread_data[cpu_index].list_pool; + per_user_translation_list_elt - sm->per_thread_data[thread_index].list_pool; s->per_user_list_head_index = u->sessions_per_user_list_head_index; - clib_dlist_addtail (sm->per_thread_data[cpu_index].list_pool, + clib_dlist_addtail (sm->per_thread_data[thread_index].list_pool, s->per_user_list_head_index, per_user_translation_list_elt - - sm->per_thread_data[cpu_index].list_pool); + sm->per_thread_data[thread_index].list_pool); s->in2out = in2out; s->out2in = out2in; @@ -206,12 +206,12 @@ create_session_for_static_mapping (snat_main_t *sm, /* Add to translation hashes */ kv0.key = s->in2out.as_u64; - kv0.value = s - sm->per_thread_data[cpu_index].sessions; + kv0.value = s - sm->per_thread_data[thread_index].sessions; if (clib_bihash_add_del_8_8 (&sm->in2out, &kv0, 1 /* is_add */)) clib_warning ("in2out key add failed"); kv0.key = s->out2in.as_u64; - kv0.value = s - sm->per_thread_data[cpu_index].sessions; + kv0.value = s - sm->per_thread_data[thread_index].sessions; if (clib_bihash_add_del_8_8 (&sm->out2in, &kv0, 1 /* is_add */)) clib_warning ("out2in key add failed"); @@ -298,7 +298,7 @@ is_interface_addr(snat_main_t *sm, vlib_node_runtime_t *node, u32 sw_if_index0, * * @param[in,out] sm SNAT main * @param[in,out] node SNAT node runtime - * @param[in] cpu_index CPU index + * @param[in] thread_index thread index * @param[in,out] b0 buffer containing packet to be translated * @param[out] p_key address and port before NAT translation * @param[out] p_value address and port after NAT translation @@ -306,7 +306,7 @@ is_interface_addr(snat_main_t *sm, vlib_node_runtime_t *node, u32 sw_if_index0, * @param d optional parameter */ u32 icmp_match_out2in_slow(snat_main_t *sm, vlib_node_runtime_t *node, - u32 cpu_index, vlib_buffer_t *b0, + u32 thread_index, vlib_buffer_t *b0, snat_session_key_t *p_key, snat_session_key_t *p_value, u8 *p_dont_translate, void *d) @@ -366,7 +366,7 @@ u32 icmp_match_out2in_slow(snat_main_t *sm, vlib_node_runtime_t *node, /* Create session initiated by host from external network */ s0 = create_session_for_static_mapping(sm, b0, sm0, key0, - node, cpu_index); + node, thread_index); if (!s0) { @@ -375,7 +375,7 @@ u32 icmp_match_out2in_slow(snat_main_t *sm, vlib_node_runtime_t *node, } } else - s0 = pool_elt_at_index (sm->per_thread_data[cpu_index].sessions, + s0 = pool_elt_at_index (sm->per_thread_data[thread_index].sessions, value0.value); out: @@ -393,7 +393,7 @@ out: * * @param[in] sm SNAT main * @param[in,out] node SNAT node runtime - * @param[in] cpu_index CPU index + * @param[in] thread_index thread index * @param[in,out] b0 buffer containing packet to be translated * @param[out] p_key address and port before NAT translation * @param[out] p_value address and port after NAT translation @@ -401,7 +401,7 @@ out: * @param d optional parameter */ u32 icmp_match_out2in_fast(snat_main_t *sm, vlib_node_runtime_t *node, - u32 cpu_index, vlib_buffer_t *b0, + u32 thread_index, vlib_buffer_t *b0, snat_session_key_t *p_key, snat_session_key_t *p_value, u8 *p_dont_translate, void *d) @@ -460,7 +460,7 @@ static inline u32 icmp_out2in (snat_main_t *sm, u32 rx_fib_index0, vlib_node_runtime_t * node, u32 next0, - u32 cpu_index, + u32 thread_index, void *d) { snat_session_key_t key0, sm0; @@ -477,7 +477,7 @@ static inline u32 icmp_out2in (snat_main_t *sm, echo0 = (icmp_echo_header_t *)(icmp0+1); - next0_tmp = sm->icmp_match_out2in_cb(sm, node, cpu_index, b0, + next0_tmp = sm->icmp_match_out2in_cb(sm, node, thread_index, b0, &key0, &sm0, &dont_translate, d); if (next0_tmp != ~0) next0 = next0_tmp; @@ -589,11 +589,11 @@ static inline u32 icmp_out2in_slow_path (snat_main_t *sm, u32 rx_fib_index0, vlib_node_runtime_t * node, u32 next0, f64 now, - u32 cpu_index, + u32 thread_index, snat_session_t ** p_s0) { next0 = icmp_out2in(sm, b0, ip0, icmp0, sw_if_index0, rx_fib_index0, node, - next0, cpu_index, p_s0); + next0, thread_index, p_s0); snat_session_t * s0 = *p_s0; if (PREDICT_TRUE(next0 != SNAT_OUT2IN_NEXT_DROP && s0)) { @@ -604,9 +604,9 @@ static inline u32 icmp_out2in_slow_path (snat_main_t *sm, /* Per-user LRU list maintenance for dynamic translation */ if (!snat_is_session_static (s0)) { - clib_dlist_remove (sm->per_thread_data[cpu_index].list_pool, + clib_dlist_remove (sm->per_thread_data[thread_index].list_pool, s0->per_user_index); - clib_dlist_addtail (sm->per_thread_data[cpu_index].list_pool, + clib_dlist_addtail (sm->per_thread_data[thread_index].list_pool, s0->per_user_list_head_index, s0->per_user_index); } @@ -624,7 +624,7 @@ snat_out2in_node_fn (vlib_main_t * vm, u32 pkts_processed = 0; snat_main_t * sm = &snat_main; f64 now = vlib_time_now (vm); - u32 cpu_index = os_get_cpu_number (); + u32 thread_index = vlib_get_thread_index (); from = vlib_frame_vector_args (frame); n_left_from = frame->n_vectors; @@ -712,7 +712,7 @@ snat_out2in_node_fn (vlib_main_t * vm, { next0 = icmp_out2in_slow_path (sm, b0, ip0, icmp0, sw_if_index0, rx_fib_index0, node, - next0, now, cpu_index, &s0); + next0, now, thread_index, &s0); goto trace0; } @@ -743,7 +743,7 @@ snat_out2in_node_fn (vlib_main_t * vm, /* Create session initiated by host from external network */ s0 = create_session_for_static_mapping(sm, b0, sm0, key0, node, - cpu_index); + thread_index); if (!s0) { b0->error = node->errors[SNAT_OUT2IN_ERROR_NO_TRANSLATION]; @@ -752,7 +752,7 @@ snat_out2in_node_fn (vlib_main_t * vm, } } else - s0 = pool_elt_at_index (sm->per_thread_data[cpu_index].sessions, + s0 = pool_elt_at_index (sm->per_thread_data[thread_index].sessions, value0.value); old_addr0 = ip0->dst_address.as_u32; @@ -796,9 +796,9 @@ snat_out2in_node_fn (vlib_main_t * vm, /* Per-user LRU list maintenance for dynamic translation */ if (!snat_is_session_static (s0)) { - clib_dlist_remove (sm->per_thread_data[cpu_index].list_pool, + clib_dlist_remove (sm->per_thread_data[thread_index].list_pool, s0->per_user_index); - clib_dlist_addtail (sm->per_thread_data[cpu_index].list_pool, + clib_dlist_addtail (sm->per_thread_data[thread_index].list_pool, s0->per_user_list_head_index, s0->per_user_index); } @@ -813,7 +813,7 @@ snat_out2in_node_fn (vlib_main_t * vm, t->next_index = next0; t->session_index = ~0; if (s0) - t->session_index = s0 - sm->per_thread_data[cpu_index].sessions; + t->session_index = s0 - sm->per_thread_data[thread_index].sessions; } pkts_processed += next0 != SNAT_OUT2IN_NEXT_DROP; @@ -847,7 +847,7 @@ snat_out2in_node_fn (vlib_main_t * vm, { next1 = icmp_out2in_slow_path (sm, b1, ip1, icmp1, sw_if_index1, rx_fib_index1, node, - next1, now, cpu_index, &s1); + next1, now, thread_index, &s1); goto trace1; } @@ -878,7 +878,7 @@ snat_out2in_node_fn (vlib_main_t * vm, /* Create session initiated by host from external network */ s1 = create_session_for_static_mapping(sm, b1, sm1, key1, node, - cpu_index); + thread_index); if (!s1) { b1->error = node->errors[SNAT_OUT2IN_ERROR_NO_TRANSLATION]; @@ -887,7 +887,7 @@ snat_out2in_node_fn (vlib_main_t * vm, } } else - s1 = pool_elt_at_index (sm->per_thread_data[cpu_index].sessions, + s1 = pool_elt_at_index (sm->per_thread_data[thread_index].sessions, value1.value); old_addr1 = ip1->dst_address.as_u32; @@ -931,9 +931,9 @@ snat_out2in_node_fn (vlib_main_t * vm, /* Per-user LRU list maintenance for dynamic translation */ if (!snat_is_session_static (s1)) { - clib_dlist_remove (sm->per_thread_data[cpu_index].list_pool, + clib_dlist_remove (sm->per_thread_data[thread_index].list_pool, s1->per_user_index); - clib_dlist_addtail (sm->per_thread_data[cpu_index].list_pool, + clib_dlist_addtail (sm->per_thread_data[thread_index].list_pool, s1->per_user_list_head_index, s1->per_user_index); } @@ -948,7 +948,7 @@ snat_out2in_node_fn (vlib_main_t * vm, t->next_index = next1; t->session_index = ~0; if (s1) - t->session_index = s1 - sm->per_thread_data[cpu_index].sessions; + t->session_index = s1 - sm->per_thread_data[thread_index].sessions; } pkts_processed += next1 != SNAT_OUT2IN_NEXT_DROP; @@ -1016,7 +1016,7 @@ snat_out2in_node_fn (vlib_main_t * vm, { next0 = icmp_out2in_slow_path (sm, b0, ip0, icmp0, sw_if_index0, rx_fib_index0, node, - next0, now, cpu_index, &s0); + next0, now, thread_index, &s0); goto trace00; } @@ -1048,7 +1048,7 @@ snat_out2in_node_fn (vlib_main_t * vm, /* Create session initiated by host from external network */ s0 = create_session_for_static_mapping(sm, b0, sm0, key0, node, - cpu_index); + thread_index); if (!s0) { b0->error = node->errors[SNAT_OUT2IN_ERROR_NO_TRANSLATION]; @@ -1057,7 +1057,7 @@ snat_out2in_node_fn (vlib_main_t * vm, } } else - s0 = pool_elt_at_index (sm->per_thread_data[cpu_index].sessions, + s0 = pool_elt_at_index (sm->per_thread_data[thread_index].sessions, value0.value); old_addr0 = ip0->dst_address.as_u32; @@ -1101,9 +1101,9 @@ snat_out2in_node_fn (vlib_main_t * vm, /* Per-user LRU list maintenance for dynamic translation */ if (!snat_is_session_static (s0)) { - clib_dlist_remove (sm->per_thread_data[cpu_index].list_pool, + clib_dlist_remove (sm->per_thread_data[thread_index].list_pool, s0->per_user_index); - clib_dlist_addtail (sm->per_thread_data[cpu_index].list_pool, + clib_dlist_addtail (sm->per_thread_data[thread_index].list_pool, s0->per_user_list_head_index, s0->per_user_index); } @@ -1118,7 +1118,7 @@ snat_out2in_node_fn (vlib_main_t * vm, t->next_index = next0; t->session_index = ~0; if (s0) - t->session_index = s0 - sm->per_thread_data[cpu_index].sessions; + t->session_index = s0 - sm->per_thread_data[thread_index].sessions; } pkts_processed += next0 != SNAT_OUT2IN_NEXT_DROP; @@ -1599,7 +1599,7 @@ snat_out2in_worker_handoff_fn (vlib_main_t * vm, u32 n_left_to_next_worker = 0, *to_next_worker = 0; u32 next_worker_index = 0; u32 current_worker_index = ~0; - u32 cpu_index = os_get_cpu_number (); + u32 thread_index = vlib_get_thread_index (); ASSERT (vec_len (sm->workers)); @@ -1637,7 +1637,7 @@ snat_out2in_worker_handoff_fn (vlib_main_t * vm, next_worker_index = sm->worker_out2in_cb(ip0, rx_fib_index0); - if (PREDICT_FALSE (next_worker_index != cpu_index)) + if (PREDICT_FALSE (next_worker_index != thread_index)) { do_handoff = 1; diff --git a/src/plugins/snat/snat.h b/src/plugins/snat/snat.h index 017825c0..f4e1c5c0 100644 --- a/src/plugins/snat/snat.h +++ b/src/plugins/snat/snat.h @@ -221,7 +221,7 @@ struct snat_main_s; typedef u32 snat_icmp_match_function_t (struct snat_main_s *sm, vlib_node_runtime_t *node, - u32 cpu_index, + u32 thread_index, vlib_buffer_t *b0, snat_session_key_t *p_key, snat_session_key_t *p_value, @@ -402,22 +402,22 @@ typedef struct { } tcp_udp_header_t; u32 icmp_match_in2out_fast(snat_main_t *sm, vlib_node_runtime_t *node, - u32 cpu_index, vlib_buffer_t *b0, + u32 thread_index, vlib_buffer_t *b0, snat_session_key_t *p_key, snat_session_key_t *p_value, u8 *p_dont_translate, void *d); u32 icmp_match_in2out_slow(snat_main_t *sm, vlib_node_runtime_t *node, - u32 cpu_index, vlib_buffer_t *b0, + u32 thread_index, vlib_buffer_t *b0, snat_session_key_t *p_key, snat_session_key_t *p_value, u8 *p_dont_translate, void *d); u32 icmp_match_out2in_fast(snat_main_t *sm, vlib_node_runtime_t *node, - u32 cpu_index, vlib_buffer_t *b0, + u32 thread_index, vlib_buffer_t *b0, snat_session_key_t *p_key, snat_session_key_t *p_value, u8 *p_dont_translate, void *d); u32 icmp_match_out2in_slow(snat_main_t *sm, vlib_node_runtime_t *node, - u32 cpu_index, vlib_buffer_t *b0, + u32 thread_index, vlib_buffer_t *b0, snat_session_key_t *p_key, snat_session_key_t *p_value, u8 *p_dont_translate, void *d); diff --git a/src/vlib/buffer.c b/src/vlib/buffer.c index a517a597..be3b41ef 100644 --- a/src/vlib/buffer.c +++ b/src/vlib/buffer.c @@ -299,7 +299,7 @@ vlib_buffer_validate_alloc_free (vlib_main_t * vm, if (CLIB_DEBUG == 0) return; - ASSERT (os_get_cpu_number () == 0); + ASSERT (vlib_get_thread_index () == 0); /* smp disaster check */ if (vec_len (vlib_mains) > 1) @@ -355,7 +355,7 @@ vlib_buffer_create_free_list_helper (vlib_main_t * vm, vlib_buffer_free_list_t *f; int i; - ASSERT (os_get_cpu_number () == 0); + ASSERT (vlib_get_thread_index () == 0); if (!is_default && pool_elts (bm->buffer_free_list_pool) == 0) { @@ -474,7 +474,7 @@ vlib_buffer_delete_free_list_internal (vlib_main_t * vm, u32 free_list_index) u32 merge_index; int i; - ASSERT (os_get_cpu_number () == 0); + ASSERT (vlib_get_thread_index () == 0); f = vlib_buffer_get_free_list (vm, free_list_index); diff --git a/src/vlib/buffer_funcs.h b/src/vlib/buffer_funcs.h index 394c336a..328660a3 100644 --- a/src/vlib/buffer_funcs.h +++ b/src/vlib/buffer_funcs.h @@ -209,7 +209,7 @@ always_inline vlib_buffer_known_state_t vlib_buffer_is_known (vlib_main_t * vm, u32 buffer_index) { vlib_buffer_main_t *bm = vm->buffer_main; - ASSERT (os_get_cpu_number () == 0); + ASSERT (vlib_get_thread_index () == 0); uword *p = hash_get (bm->buffer_known_hash, buffer_index); return p ? p[0] : VLIB_BUFFER_UNKNOWN; @@ -221,7 +221,7 @@ vlib_buffer_set_known_state (vlib_main_t * vm, vlib_buffer_known_state_t state) { vlib_buffer_main_t *bm = vm->buffer_main; - ASSERT (os_get_cpu_number () == 0); + ASSERT (vlib_get_thread_index () == 0); hash_set (bm->buffer_known_hash, buffer_index, state); } diff --git a/src/vlib/cli.c b/src/vlib/cli.c index f853f655..3cc95076 100644 --- a/src/vlib/cli.c +++ b/src/vlib/cli.c @@ -709,7 +709,7 @@ test_heap_validate (vlib_main_t * vm, unformat_input_t * input, { /* *INDENT-OFF* */ foreach_vlib_main({ - heap = clib_per_cpu_mheaps[this_vlib_main->cpu_index]; + heap = clib_per_cpu_mheaps[this_vlib_main->thread_index]; mheap = mheap_header(heap); mheap->flags |= MHEAP_FLAG_VALIDATE; // Turn off small object cache because it delays detection of errors @@ -722,7 +722,7 @@ test_heap_validate (vlib_main_t * vm, unformat_input_t * input, { /* *INDENT-OFF* */ foreach_vlib_main({ - heap = clib_per_cpu_mheaps[this_vlib_main->cpu_index]; + heap = clib_per_cpu_mheaps[this_vlib_main->thread_index]; mheap = mheap_header(heap); mheap->flags &= ~MHEAP_FLAG_VALIDATE; mheap->flags |= MHEAP_FLAG_SMALL_OBJECT_CACHE; @@ -733,7 +733,7 @@ test_heap_validate (vlib_main_t * vm, unformat_input_t * input, { /* *INDENT-OFF* */ foreach_vlib_main({ - heap = clib_per_cpu_mheaps[this_vlib_main->cpu_index]; + heap = clib_per_cpu_mheaps[this_vlib_main->thread_index]; mheap = mheap_header(heap); mheap_validate(heap); }); diff --git a/src/vlib/counter.h b/src/vlib/counter.h index 17a85217..60e2055d 100644 --- a/src/vlib/counter.h +++ b/src/vlib/counter.h @@ -70,17 +70,17 @@ u32 vlib_simple_counter_n_counters (const vlib_simple_counter_main_t * cm); /** Increment a simple counter @param cm - (vlib_simple_counter_main_t *) simple counter main pointer - @param cpu_index - (u32) the current cpu index + @param thread_index - (u32) the current cpu index @param index - (u32) index of the counter to increment @param increment - (u64) quantitiy to add to the counter */ always_inline void vlib_increment_simple_counter (vlib_simple_counter_main_t * cm, - u32 cpu_index, u32 index, u64 increment) + u32 thread_index, u32 index, u64 increment) { counter_t *my_counters; - my_counters = cm->counters[cpu_index]; + my_counters = cm->counters[thread_index]; my_counters[index] += increment; } @@ -201,7 +201,7 @@ void vlib_clear_combined_counters (vlib_combined_counter_main_t * cm); /** Increment a combined counter @param cm - (vlib_combined_counter_main_t *) comined counter main pointer - @param cpu_index - (u32) the current cpu index + @param thread_index - (u32) the current cpu index @param index - (u32) index of the counter to increment @param packet_increment - (u64) number of packets to add to the counter @param byte_increment - (u64) number of bytes to add to the counter @@ -209,13 +209,13 @@ void vlib_clear_combined_counters (vlib_combined_counter_main_t * cm); always_inline void vlib_increment_combined_counter (vlib_combined_counter_main_t * cm, - u32 cpu_index, + u32 thread_index, u32 index, u64 n_packets, u64 n_bytes) { vlib_counter_t *my_counters; /* Use this CPU's counter array */ - my_counters = cm->counters[cpu_index]; + my_counters = cm->counters[thread_index]; my_counters[index].packets += n_packets; my_counters[index].bytes += n_bytes; @@ -224,14 +224,14 @@ vlib_increment_combined_counter (vlib_combined_counter_main_t * cm, /** Pre-fetch a per-thread combined counter for the given object index */ always_inline void vlib_prefetch_combined_counter (const vlib_combined_counter_main_t * cm, - u32 cpu_index, u32 index) + u32 thread_index, u32 index) { vlib_counter_t *cpu_counters; /* * This CPU's index is assumed to already be in cache */ - cpu_counters = cm->counters[cpu_index]; + cpu_counters = cm->counters[thread_index]; CLIB_PREFETCH (cpu_counters + index, CLIB_CACHE_LINE_BYTES, STORE); } diff --git a/src/vlib/error.c b/src/vlib/error.c index a2c23176..e4ed4ee3 100644 --- a/src/vlib/error.c +++ b/src/vlib/error.c @@ -149,7 +149,7 @@ vlib_register_errors (vlib_main_t * vm, vlib_node_t *n = vlib_get_node (vm, node_index); uword l; - ASSERT (os_get_cpu_number () == 0); + ASSERT (vlib_get_thread_index () == 0); /* Free up any previous error strings. */ if (n->n_errors > 0) diff --git a/src/vlib/global_funcs.h b/src/vlib/global_funcs.h index f51ec381..9dd01fbf 100644 --- a/src/vlib/global_funcs.h +++ b/src/vlib/global_funcs.h @@ -23,7 +23,7 @@ always_inline vlib_main_t * vlib_get_main (void) { vlib_main_t *vm; - vm = vlib_mains[os_get_cpu_number ()]; + vm = vlib_mains[vlib_get_thread_index ()]; ASSERT (vm); return vm; } diff --git a/src/vlib/main.c b/src/vlib/main.c index b22203f0..422d3e26 100644 --- a/src/vlib/main.c +++ b/src/vlib/main.c @@ -136,18 +136,18 @@ vlib_frame_alloc_to_node (vlib_main_t * vm, u32 to_node_index, else { f = clib_mem_alloc_aligned_no_fail (n, VLIB_FRAME_ALIGN); - f->cpu_index = vm->cpu_index; + f->thread_index = vm->thread_index; fi = vlib_frame_index_no_check (vm, f); } /* Poison frame when debugging. */ if (CLIB_DEBUG > 0) { - u32 save_cpu_index = f->cpu_index; + u32 save_thread_index = f->thread_index; memset (f, 0xfe, n); - f->cpu_index = save_cpu_index; + f->thread_index = save_thread_index; } /* Insert magic number. */ @@ -517,7 +517,7 @@ vlib_put_next_frame (vlib_main_t * vm, * a dangling frame reference. Each thread has its own copy of * the next_frames vector. */ - if (0 && r->cpu_index != next_runtime->cpu_index) + if (0 && r->thread_index != next_runtime->thread_index) { nf->frame_index = ~0; nf->flags &= ~(VLIB_FRAME_PENDING | VLIB_FRAME_IS_ALLOCATED); @@ -866,7 +866,7 @@ vlib_elog_main_loop_event (vlib_main_t * vm, : evm->node_call_elog_event_types, node_index), /* track */ - (vm->cpu_index ? &vlib_worker_threads[vm->cpu_index]. + (vm->thread_index ? &vlib_worker_threads[vm->thread_index]. elog_track : &em->default_track), /* data to log */ n_vectors); } @@ -963,7 +963,7 @@ dispatch_node (vlib_main_t * vm, vm->cpu_time_last_node_dispatch = last_time_stamp; - if (1 /* || vm->cpu_index == node->cpu_index */ ) + if (1 /* || vm->thread_index == node->thread_index */ ) { vlib_main_t *stat_vm; @@ -1029,7 +1029,7 @@ dispatch_node (vlib_main_t * vm, { u32 node_name, vector_length, is_polling; } *ed; - vlib_worker_thread_t *w = vlib_worker_threads + vm->cpu_index; + vlib_worker_thread_t *w = vlib_worker_threads + vm->thread_index; #endif if ((dispatch_state == VLIB_NODE_STATE_INTERRUPT diff --git a/src/vlib/main.h b/src/vlib/main.h index 0197b4f3..329bf073 100644 --- a/src/vlib/main.h +++ b/src/vlib/main.h @@ -156,7 +156,7 @@ typedef struct vlib_main_t uword *init_functions_called; /* to compare with node runtime */ - u32 cpu_index; + u32 thread_index; void **mbuf_alloc_list; diff --git a/src/vlib/node.c b/src/vlib/node.c index dc0a4de5..bbd3a42e 100644 --- a/src/vlib/node.c +++ b/src/vlib/node.c @@ -99,7 +99,7 @@ vlib_node_runtime_update (vlib_main_t * vm, u32 node_index, u32 next_index) vlib_pending_frame_t *pf; i32 i, j, n_insert; - ASSERT (os_get_cpu_number () == 0); + ASSERT (vlib_get_thread_index () == 0); vlib_worker_thread_barrier_sync (vm); diff --git a/src/vlib/node.h b/src/vlib/node.h index fc7e7da2..1e2f4c38 100644 --- a/src/vlib/node.h +++ b/src/vlib/node.h @@ -344,8 +344,8 @@ typedef struct vlib_frame_t /* Number of vector elements currently in frame. */ u16 n_vectors; - /* Owner cpuid / heap id */ - u16 cpu_index; + /* Owner thread / heap id */ + u16 thread_index; /* Scalar and vector arguments to next node. */ u8 arguments[0]; @@ -459,7 +459,7 @@ typedef struct vlib_node_runtime_t zero before first run of this node. */ - u16 cpu_index; /**< CPU this node runs on */ + u16 thread_index; /**< thread this node runs on */ u8 runtime_data[0]; /**< Function dependent node-runtime data. This data is diff --git a/src/vlib/node_funcs.h b/src/vlib/node_funcs.h index 1f7d94e1..54e36874 100644 --- a/src/vlib/node_funcs.h +++ b/src/vlib/node_funcs.h @@ -201,9 +201,9 @@ always_inline vlib_frame_t * vlib_get_frame_no_check (vlib_main_t * vm, uword frame_index) { vlib_frame_t *f; - u32 cpu_index = frame_index & VLIB_CPU_MASK; + u32 thread_index = frame_index & VLIB_CPU_MASK; u32 offset = frame_index & VLIB_OFFSET_MASK; - vm = vlib_mains[cpu_index]; + vm = vlib_mains[thread_index]; f = vm->heap_base + offset; return f; } @@ -215,10 +215,10 @@ vlib_frame_index_no_check (vlib_main_t * vm, vlib_frame_t * f) ASSERT (((uword) f & VLIB_CPU_MASK) == 0); - vm = vlib_mains[f->cpu_index]; + vm = vlib_mains[f->thread_index]; i = ((u8 *) f - (u8 *) vm->heap_base); - return i | f->cpu_index; + return i | f->thread_index; } always_inline vlib_frame_t * diff --git a/src/vlib/threads.c b/src/vlib/threads.c index ef3a24d3..4a111f8d 100644 --- a/src/vlib/threads.c +++ b/src/vlib/threads.c @@ -35,27 +35,12 @@ vl (void *p) vlib_worker_thread_t *vlib_worker_threads; vlib_thread_main_t vlib_thread_main; +__thread uword vlib_thread_index = 0; + uword os_get_cpu_number (void) { - void *sp; - uword n; - u32 len; - - len = vec_len (vlib_thread_stacks); - if (len == 0) - return 0; - - /* Get any old stack address. */ - sp = &sp; - - n = ((uword) sp - (uword) vlib_thread_stacks[0]) - >> VLIB_LOG2_THREAD_STACK_SIZE; - - /* "processes" have their own stacks, and they always run in thread 0 */ - n = n >= len ? 0 : n; - - return n; + return vlib_thread_index; } uword @@ -275,21 +260,6 @@ vlib_thread_init (vlib_main_t * vm) return 0; } -vlib_worker_thread_t * -vlib_alloc_thread (vlib_main_t * vm) -{ - vlib_worker_thread_t *w; - - if (vec_len (vlib_worker_threads) >= vec_len (vlib_thread_stacks)) - { - clib_warning ("out of worker threads... Quitting..."); - exit (1); - } - vec_add2 (vlib_worker_threads, w, 1); - w->thread_stack = vlib_thread_stacks[w - vlib_worker_threads]; - return w; -} - vlib_frame_queue_t * vlib_frame_queue_alloc (int nelts) { @@ -427,7 +397,7 @@ vlib_frame_queue_enqueue (vlib_main_t * vm, u32 node_runtime_index, f64 b4 = vlib_time_now_ticks (vm, before); vlib_worker_thread_barrier_check (vm, b4); /* Bad idea. Dequeue -> enqueue -> dequeue -> trouble */ - // vlib_frame_queue_dequeue (vm->cpu_index, vm, nm); + // vlib_frame_queue_dequeue (vm->thread_index, vm, nm); } elt = fq->elts + (new_tail & (fq->nelts - 1)); @@ -497,6 +467,8 @@ vlib_worker_thread_bootstrap_fn (void *arg) w->lwp = syscall (SYS_gettid); w->thread_id = pthread_self (); + vlib_thread_index = w - vlib_worker_threads; + rv = (void *) clib_calljmp ((uword (*)(uword)) w->thread_function, (uword) arg, w->thread_stack + VLIB_THREAD_STACK_SIZE); @@ -610,7 +582,9 @@ start_workers (vlib_main_t * vm) mheap_alloc (0 /* use VM */ , tr->mheap_size); else w->thread_mheap = main_heap; - w->thread_stack = vlib_thread_stacks[w - vlib_worker_threads]; + + w->thread_stack = + vlib_thread_stack_init (w - vlib_worker_threads); w->thread_function = tr->function; w->thread_function_arg = w; w->instance_id = k; @@ -630,7 +604,7 @@ start_workers (vlib_main_t * vm) vm_clone = clib_mem_alloc (sizeof (*vm_clone)); clib_memcpy (vm_clone, vlib_mains[0], sizeof (*vm_clone)); - vm_clone->cpu_index = worker_thread_index; + vm_clone->thread_index = worker_thread_index; vm_clone->heap_base = w->thread_mheap; vm_clone->mbuf_alloc_list = 0; vm_clone->init_functions_called = @@ -679,7 +653,7 @@ start_workers (vlib_main_t * vm) vec_foreach (rt, nm_clone->nodes_by_type[VLIB_NODE_TYPE_INPUT]) { vlib_node_t *n = vlib_get_node (vm, rt->node_index); - rt->cpu_index = vm_clone->cpu_index; + rt->thread_index = vm_clone->thread_index; /* copy initial runtime_data from node */ if (n->runtime_data && n->runtime_data_bytes > 0) clib_memcpy (rt->runtime_data, n->runtime_data, @@ -692,7 +666,7 @@ start_workers (vlib_main_t * vm) vec_foreach (rt, nm_clone->nodes_by_type[VLIB_NODE_TYPE_INPUT]) { vlib_node_t *n = vlib_get_node (vm, rt->node_index); - rt->cpu_index = vm_clone->cpu_index; + rt->thread_index = vm_clone->thread_index; /* copy initial runtime_data from node */ if (n->runtime_data && n->runtime_data_bytes > 0) clib_memcpy (rt->runtime_data, n->runtime_data, @@ -756,7 +730,8 @@ start_workers (vlib_main_t * vm) mheap_alloc (0 /* use VM */ , tr->mheap_size); else w->thread_mheap = main_heap; - w->thread_stack = vlib_thread_stacks[w - vlib_worker_threads]; + w->thread_stack = + vlib_thread_stack_init (w - vlib_worker_threads); w->thread_function = tr->function; w->thread_function_arg = w; w->instance_id = j; @@ -827,7 +802,7 @@ vlib_worker_thread_node_runtime_update (void) uword n_calls, uword n_vectors, uword n_clocks); - ASSERT (os_get_cpu_number () == 0); + ASSERT (vlib_get_thread_index () == 0); if (vec_len (vlib_mains) == 1) return; @@ -835,7 +810,7 @@ vlib_worker_thread_node_runtime_update (void) vm = vlib_mains[0]; nm = &vm->node_main; - ASSERT (os_get_cpu_number () == 0); + ASSERT (vlib_get_thread_index () == 0); ASSERT (*vlib_worker_threads->wait_at_barrier == 1); /* @@ -955,7 +930,7 @@ vlib_worker_thread_node_runtime_update (void) vec_foreach (rt, nm_clone->nodes_by_type[VLIB_NODE_TYPE_INTERNAL]) { vlib_node_t *n = vlib_get_node (vm, rt->node_index); - rt->cpu_index = vm_clone->cpu_index; + rt->thread_index = vm_clone->thread_index; /* copy runtime_data, will be overwritten later for existing rt */ if (n->runtime_data && n->runtime_data_bytes > 0) clib_memcpy (rt->runtime_data, n->runtime_data, @@ -981,7 +956,7 @@ vlib_worker_thread_node_runtime_update (void) vec_foreach (rt, nm_clone->nodes_by_type[VLIB_NODE_TYPE_INPUT]) { vlib_node_t *n = vlib_get_node (vm, rt->node_index); - rt->cpu_index = vm_clone->cpu_index; + rt->thread_index = vm_clone->thread_index; /* copy runtime_data, will be overwritten later for existing rt */ if (n->runtime_data && n->runtime_data_bytes > 0) clib_memcpy (rt->runtime_data, n->runtime_data, @@ -1180,7 +1155,7 @@ vlib_worker_thread_fork_fixup (vlib_fork_fixup_t which) if (vlib_mains == 0) return; - ASSERT (os_get_cpu_number () == 0); + ASSERT (vlib_get_thread_index () == 0); vlib_worker_thread_barrier_sync (vm); switch (which) @@ -1212,7 +1187,7 @@ vlib_worker_thread_barrier_sync (vlib_main_t * vm) vlib_worker_threads[0].barrier_sync_count++; - ASSERT (os_get_cpu_number () == 0); + ASSERT (vlib_get_thread_index () == 0); deadline = vlib_time_now (vm) + BARRIER_SYNC_TIMEOUT; @@ -1260,7 +1235,7 @@ vlib_worker_thread_barrier_release (vlib_main_t * vm) int vlib_frame_queue_dequeue (vlib_main_t * vm, vlib_frame_queue_main_t * fqm) { - u32 thread_id = vm->cpu_index; + u32 thread_id = vm->thread_index; vlib_frame_queue_t *fq = fqm->vlib_frame_queues[thread_id]; vlib_frame_queue_elt_t *elt; u32 *from, *to; @@ -1393,7 +1368,7 @@ vlib_worker_thread_fn (void *arg) vlib_main_t *vm = vlib_get_main (); clib_error_t *e; - ASSERT (vm->cpu_index == os_get_cpu_number ()); + ASSERT (vm->thread_index == vlib_get_thread_index ()); vlib_worker_thread_init (w); clib_time_init (&vm->clib_time); diff --git a/src/vlib/threads.h b/src/vlib/threads.h index eca4fc26..101d3d4a 100644 --- a/src/vlib/threads.h +++ b/src/vlib/threads.h @@ -153,8 +153,6 @@ typedef struct /* Called early, in thread 0's context */ clib_error_t *vlib_thread_init (vlib_main_t * vm); -vlib_worker_thread_t *vlib_alloc_thread (vlib_main_t * vm); - int vlib_frame_queue_enqueue (vlib_main_t * vm, u32 node_runtime_index, u32 frame_queue_index, vlib_frame_t * frame, vlib_frame_queue_msg_type_t type); @@ -183,12 +181,19 @@ u32 vlib_frame_queue_main_init (u32 node_index, u32 frame_queue_nelts); void vlib_worker_thread_barrier_sync (vlib_main_t * vm); void vlib_worker_thread_barrier_release (vlib_main_t * vm); +extern __thread uword vlib_thread_index; +static_always_inline uword +vlib_get_thread_index (void) +{ + return vlib_thread_index; +} + always_inline void vlib_smp_unsafe_warning (void) { if (CLIB_DEBUG > 0) { - if (os_get_cpu_number ()) + if (vlib_get_thread_index ()) fformat (stderr, "%s: SMP unsafe warning...\n", __FUNCTION__); } } @@ -331,21 +336,21 @@ vlib_num_workers () } always_inline u32 -vlib_get_worker_cpu_index (u32 worker_index) +vlib_get_worker_thread_index (u32 worker_index) { return worker_index + 1; } always_inline u32 -vlib_get_worker_index (u32 cpu_index) +vlib_get_worker_index (u32 thread_index) { - return cpu_index - 1; + return thread_index - 1; } always_inline u32 vlib_get_current_worker_index () { - return os_get_cpu_number () - 1; + return vlib_get_thread_index () - 1; } static inline void @@ -467,6 +472,8 @@ vlib_get_worker_handoff_queue_elt (u32 frame_queue_index, return elt; } +u8 *vlib_thread_stack_init (uword thread_index); + int vlib_thread_cb_register (struct vlib_main_t *vm, vlib_thread_callbacks_t * cb); diff --git a/src/vlib/unix/cj.c b/src/vlib/unix/cj.c index 33ba163a..7c1e9475 100644 --- a/src/vlib/unix/cj.c +++ b/src/vlib/unix/cj.c @@ -48,7 +48,7 @@ cj_log (u32 type, void *data0, void *data1) r = (cj_record_t *) & (cjm->records[new_tail & (cjm->num_records - 1)]); r->time = vlib_time_now (cjm->vlib_main); - r->cpu = os_get_cpu_number (); + r->thread_index = vlib_get_thread_index (); r->type = type; r->data[0] = pointer_to_uword (data0); r->data[1] = pointer_to_uword (data1); @@ -133,7 +133,8 @@ static inline void cj_dump_one_record (cj_record_t * r) { fprintf (stderr, "[%d]: %10.6f T%02d %llx %llx\n", - r->cpu, r->time, r->type, (long long unsigned int) r->data[0], + r->thread_index, r->time, r->type, + (long long unsigned int) r->data[0], (long long unsigned int) r->data[1]); } @@ -161,7 +162,7 @@ cj_dump_internal (u8 filter0_enable, u64 filter0, index = (cjm->tail + 1) & (cjm->num_records - 1); r = &(cjm->records[index]); - if (r->cpu != (u32) ~ 0) + if (r->thread_index != (u32) ~ 0) { /* Yes, dump from tail + 1 to the end */ for (i = index; i < cjm->num_records; i++) diff --git a/src/vlib/unix/cj.h b/src/vlib/unix/cj.h index 67626afe..d0a1d46e 100644 --- a/src/vlib/unix/cj.h +++ b/src/vlib/unix/cj.h @@ -23,7 +23,7 @@ typedef struct { f64 time; - u32 cpu; + u32 thread_index; u32 type; u64 data[2]; } cj_record_t; diff --git a/src/vlib/unix/main.c b/src/vlib/unix/main.c index 6b96cc0d..db5ddd64 100644 --- a/src/vlib/unix/main.c +++ b/src/vlib/unix/main.c @@ -510,13 +510,28 @@ thread0 (uword arg) return i; } +u8 * +vlib_thread_stack_init (uword thread_index) +{ + vec_validate (vlib_thread_stacks, thread_index); + vlib_thread_stacks[thread_index] = clib_mem_alloc_aligned + (VLIB_THREAD_STACK_SIZE, VLIB_THREAD_STACK_SIZE); + + /* + * Disallow writes to the bottom page of the stack, to + * catch stack overflows. + */ + if (mprotect (vlib_thread_stacks[thread_index], + clib_mem_get_page_size (), PROT_READ) < 0) + clib_unix_warning ("thread stack"); + return vlib_thread_stacks[thread_index]; +} + int vlib_unix_main (int argc, char *argv[]) { vlib_main_t *vm = &vlib_global_main; /* one and only time for this! */ - vlib_thread_main_t *tm = &vlib_thread_main; unformat_input_t input; - u8 *thread_stacks; clib_error_t *e; int i; @@ -548,29 +563,9 @@ vlib_unix_main (int argc, char *argv[]) } unformat_free (&input); - /* - * allocate n x VLIB_THREAD_STACK_SIZE stacks, aligned to a - * VLIB_THREAD_STACK_SIZE boundary - * See also: os_get_cpu_number() in vlib/vlib/threads.c - */ - thread_stacks = clib_mem_alloc_aligned - ((uword) tm->n_thread_stacks * VLIB_THREAD_STACK_SIZE, - VLIB_THREAD_STACK_SIZE); - - vec_validate (vlib_thread_stacks, tm->n_thread_stacks - 1); - for (i = 0; i < vec_len (vlib_thread_stacks); i++) - { - vlib_thread_stacks[i] = thread_stacks; - - /* - * Disallow writes to the bottom page of the stack, to - * catch stack overflows. - */ - if (mprotect (thread_stacks, clib_mem_get_page_size (), PROT_READ) < 0) - clib_unix_warning ("thread stack"); + vlib_thread_stack_init (0); - thread_stacks += VLIB_THREAD_STACK_SIZE; - } + vlib_thread_index = 0; i = clib_calljmp (thread0, (uword) vm, (void *) (vlib_thread_stacks[0] + diff --git a/src/vnet/adj/adj_l2.c b/src/vnet/adj/adj_l2.c index f68e54e0..20d70dd4 100644 --- a/src/vnet/adj/adj_l2.c +++ b/src/vnet/adj/adj_l2.c @@ -52,7 +52,7 @@ adj_l2_rewrite_inline (vlib_main_t * vm, { u32 * from = vlib_frame_vector_args (frame); u32 n_left_from, n_left_to_next, * to_next, next_index; - u32 cpu_index = os_get_cpu_number(); + u32 thread_index = vlib_get_thread_index(); ethernet_main_t * em = ðernet_main; n_left_from = frame->n_vectors; @@ -93,7 +93,7 @@ adj_l2_rewrite_inline (vlib_main_t * vm, vnet_buffer(p0)->sw_if_index[VLIB_TX] = adj0->rewrite_header.sw_if_index; vlib_increment_combined_counter(&adjacency_counters, - cpu_index, + thread_index, adj_index0, /* packet increment */ 0, /* byte increment */ rw_len0); diff --git a/src/vnet/adj/adj_midchain.c b/src/vnet/adj/adj_midchain.c index e8087f08..5756de43 100644 --- a/src/vnet/adj/adj_midchain.c +++ b/src/vnet/adj/adj_midchain.c @@ -49,7 +49,7 @@ adj_midchain_tx_inline (vlib_main_t * vm, u32 next_index; vnet_main_t *vnm = vnet_get_main (); vnet_interface_main_t *im = &vnm->interface_main; - u32 cpu_index = vm->cpu_index; + u32 thread_index = vm->thread_index; /* Vector of buffer / pkt indices we're supposed to process */ from = vlib_frame_vector_args (frame); @@ -124,13 +124,13 @@ adj_midchain_tx_inline (vlib_main_t * vm, { vlib_increment_combined_counter (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX, - cpu_index, + thread_index, adj0->rewrite_header.sw_if_index, 1, vlib_buffer_length_in_chain (vm, b0)); vlib_increment_combined_counter (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX, - cpu_index, + thread_index, adj1->rewrite_header.sw_if_index, 1, vlib_buffer_length_in_chain (vm, b1)); @@ -181,7 +181,7 @@ adj_midchain_tx_inline (vlib_main_t * vm, { vlib_increment_combined_counter (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX, - cpu_index, + thread_index, adj0->rewrite_header.sw_if_index, 1, vlib_buffer_length_in_chain (vm, b0)); diff --git a/src/vnet/adj/adj_nsh.c b/src/vnet/adj/adj_nsh.c index 9a0f9d8b..128570b0 100644 --- a/src/vnet/adj/adj_nsh.c +++ b/src/vnet/adj/adj_nsh.c @@ -53,7 +53,7 @@ adj_nsh_rewrite_inline (vlib_main_t * vm, { u32 * from = vlib_frame_vector_args (frame); u32 n_left_from, n_left_to_next, * to_next, next_index; - u32 cpu_index = os_get_cpu_number(); + u32 thread_index = vlib_get_thread_index(); n_left_from = frame->n_vectors; next_index = node->cached_next_index; @@ -94,7 +94,7 @@ adj_nsh_rewrite_inline (vlib_main_t * vm, vnet_buffer(p0)->ip.save_rewrite_length = rw_len0; vlib_increment_combined_counter(&adjacency_counters, - cpu_index, + thread_index, adj_index0, /* packet increment */ 0, /* byte increment */ rw_len0); diff --git a/src/vnet/classify/vnet_classify.c b/src/vnet/classify/vnet_classify.c index 98842a48..70a189b0 100644 --- a/src/vnet/classify/vnet_classify.c +++ b/src/vnet/classify/vnet_classify.c @@ -251,12 +251,12 @@ static inline void make_working_copy vnet_classify_entry_##size##_t * working_copy##size = 0; foreach_size_in_u32x4; #undef _ - u32 cpu_number = os_get_cpu_number(); + u32 thread_index = vlib_get_thread_index(); - if (cpu_number >= vec_len (t->working_copies)) + if (thread_index >= vec_len (t->working_copies)) { oldheap = clib_mem_set_heap (t->mheap); - vec_validate (t->working_copies, cpu_number); + vec_validate (t->working_copies, thread_index); clib_mem_set_heap (oldheap); } @@ -265,7 +265,7 @@ static inline void make_working_copy * updates from multiple threads will not result in sporadic, spurious * lookup failures. */ - working_copy = t->working_copies[cpu_number]; + working_copy = t->working_copies[thread_index]; t->saved_bucket.as_u64 = b->as_u64; oldheap = clib_mem_set_heap (t->mheap); @@ -290,7 +290,7 @@ static inline void make_working_copy default: abort(); } - t->working_copies[cpu_number] = working_copy; + t->working_copies[thread_index] = working_copy; } _vec_len(working_copy) = (1<log2_pages)*t->entries_per_page; @@ -318,7 +318,7 @@ static inline void make_working_copy working_bucket.offset = vnet_classify_get_offset (t, working_copy); CLIB_MEMORY_BARRIER(); b->as_u64 = working_bucket.as_u64; - t->working_copies[cpu_number] = working_copy; + t->working_copies[thread_index] = working_copy; } static vnet_classify_entry_t * @@ -387,7 +387,7 @@ int vnet_classify_add_del (vnet_classify_table_t * t, int i; u64 hash, new_hash; u32 new_log2_pages; - u32 cpu_number = os_get_cpu_number(); + u32 thread_index = vlib_get_thread_index(); u8 * key_minus_skip; ASSERT ((add_v->flags & VNET_CLASSIFY_ENTRY_FREE) == 0); @@ -498,7 +498,7 @@ int vnet_classify_add_del (vnet_classify_table_t * t, new_log2_pages = t->saved_bucket.log2_pages + 1; expand_again: - working_copy = t->working_copies[cpu_number]; + working_copy = t->working_copies[thread_index]; new_v = split_and_rehash (t, working_copy, new_log2_pages); if (new_v == 0) diff --git a/src/vnet/cop/ip4_whitelist.c b/src/vnet/cop/ip4_whitelist.c index 6ef3d7d7..1b5e336b 100644 --- a/src/vnet/cop/ip4_whitelist.c +++ b/src/vnet/cop/ip4_whitelist.c @@ -60,7 +60,7 @@ ip4_cop_whitelist_node_fn (vlib_main_t * vm, cop_feature_type_t next_index; cop_main_t *cm = &cop_main; vlib_combined_counter_main_t * vcm = &load_balance_main.lbm_via_counters; - u32 cpu_index = vm->cpu_index; + u32 thread_index = vm->thread_index; from = vlib_frame_vector_args (frame); n_left_from = frame->n_vectors; @@ -177,12 +177,12 @@ ip4_cop_whitelist_node_fn (vlib_main_t * vm, dpo1 = load_balance_get_bucket_i(lb1, 0); vlib_increment_combined_counter - (vcm, cpu_index, lb_index0, 1, + (vcm, thread_index, lb_index0, 1, vlib_buffer_length_in_chain (vm, b0) + sizeof(ethernet_header_t)); vlib_increment_combined_counter - (vcm, cpu_index, lb_index1, 1, + (vcm, thread_index, lb_index1, 1, vlib_buffer_length_in_chain (vm, b1) + sizeof(ethernet_header_t)); @@ -273,7 +273,7 @@ ip4_cop_whitelist_node_fn (vlib_main_t * vm, dpo0 = load_balance_get_bucket_i(lb0, 0); vlib_increment_combined_counter - (vcm, cpu_index, lb_index0, 1, + (vcm, thread_index, lb_index0, 1, vlib_buffer_length_in_chain (vm, b0) + sizeof(ethernet_header_t)); diff --git a/src/vnet/cop/ip6_whitelist.c b/src/vnet/cop/ip6_whitelist.c index c2e16ccf..f3fe62e3 100644 --- a/src/vnet/cop/ip6_whitelist.c +++ b/src/vnet/cop/ip6_whitelist.c @@ -61,7 +61,7 @@ ip6_cop_whitelist_node_fn (vlib_main_t * vm, cop_main_t *cm = &cop_main; ip6_main_t * im6 = &ip6_main; vlib_combined_counter_main_t * vcm = &load_balance_main.lbm_via_counters; - u32 cpu_index = vm->cpu_index; + u32 thread_index = vm->thread_index; from = vlib_frame_vector_args (frame); n_left_from = frame->n_vectors; @@ -153,12 +153,12 @@ ip6_cop_whitelist_node_fn (vlib_main_t * vm, dpo1 = load_balance_get_bucket_i(lb1, 0); vlib_increment_combined_counter - (vcm, cpu_index, lb_index0, 1, + (vcm, thread_index, lb_index0, 1, vlib_buffer_length_in_chain (vm, b0) + sizeof(ethernet_header_t)); vlib_increment_combined_counter - (vcm, cpu_index, lb_index1, 1, + (vcm, thread_index, lb_index1, 1, vlib_buffer_length_in_chain (vm, b1) + sizeof(ethernet_header_t)); @@ -233,7 +233,7 @@ ip6_cop_whitelist_node_fn (vlib_main_t * vm, dpo0 = load_balance_get_bucket_i(lb0, 0); vlib_increment_combined_counter - (vcm, cpu_index, lb_index0, 1, + (vcm, thread_index, lb_index0, 1, vlib_buffer_length_in_chain (vm, b0) + sizeof(ethernet_header_t)); diff --git a/src/vnet/devices/af_packet/node.c b/src/vnet/devices/af_packet/node.c index ba337f3f..76980102 100644 --- a/src/vnet/devices/af_packet/node.c +++ b/src/vnet/devices/af_packet/node.c @@ -124,7 +124,7 @@ af_packet_device_input_fn (vlib_main_t * vm, vlib_node_runtime_t * node, u32 frame_num = apif->rx_req->tp_frame_nr; u8 *block_start = apif->rx_ring + block * block_size; uword n_trace = vlib_get_trace_count (vm, node); - u32 cpu_index = os_get_cpu_number (); + u32 thread_index = vlib_get_thread_index (); u32 n_buffer_bytes = vlib_buffer_free_list_buffer_size (vm, VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX); u32 min_bufs = apif->rx_req->tp_frame_size / n_buffer_bytes; @@ -132,15 +132,15 @@ af_packet_device_input_fn (vlib_main_t * vm, vlib_node_runtime_t * node, if (apif->per_interface_next_index != ~0) next_index = apif->per_interface_next_index; - n_free_bufs = vec_len (apm->rx_buffers[cpu_index]); + n_free_bufs = vec_len (apm->rx_buffers[thread_index]); if (PREDICT_FALSE (n_free_bufs < VLIB_FRAME_SIZE)) { - vec_validate (apm->rx_buffers[cpu_index], + vec_validate (apm->rx_buffers[thread_index], VLIB_FRAME_SIZE + n_free_bufs - 1); n_free_bufs += - vlib_buffer_alloc (vm, &apm->rx_buffers[cpu_index][n_free_bufs], + vlib_buffer_alloc (vm, &apm->rx_buffers[thread_index][n_free_bufs], VLIB_FRAME_SIZE); - _vec_len (apm->rx_buffers[cpu_index]) = n_free_bufs; + _vec_len (apm->rx_buffers[thread_index]) = n_free_bufs; } rx_frame = apif->next_rx_frame; @@ -163,11 +163,11 @@ af_packet_device_input_fn (vlib_main_t * vm, vlib_node_runtime_t * node, { /* grab free buffer */ u32 last_empty_buffer = - vec_len (apm->rx_buffers[cpu_index]) - 1; + vec_len (apm->rx_buffers[thread_index]) - 1; prev_bi0 = bi0; - bi0 = apm->rx_buffers[cpu_index][last_empty_buffer]; + bi0 = apm->rx_buffers[thread_index][last_empty_buffer]; b0 = vlib_get_buffer (vm, bi0); - _vec_len (apm->rx_buffers[cpu_index]) = last_empty_buffer; + _vec_len (apm->rx_buffers[thread_index]) = last_empty_buffer; n_free_bufs--; /* copy data */ @@ -236,9 +236,9 @@ af_packet_device_input_fn (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_increment_combined_counter (vnet_get_main ()->interface_main.combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX, - os_get_cpu_number (), apif->hw_if_index, n_rx_packets, n_rx_bytes); + vlib_get_thread_index (), apif->hw_if_index, n_rx_packets, n_rx_bytes); - vnet_device_increment_rx_packets (cpu_index, n_rx_packets); + vnet_device_increment_rx_packets (thread_index, n_rx_packets); return n_rx_packets; } diff --git a/src/vnet/devices/devices.c b/src/vnet/devices/devices.c index 41645220..5e5e812c 100644 --- a/src/vnet/devices/devices.c +++ b/src/vnet/devices/devices.c @@ -104,7 +104,7 @@ vnet_device_queue_sort (void *a1, void *a2) void vnet_device_input_assign_thread (u32 hw_if_index, - u16 queue_id, uword cpu_index) + u16 queue_id, uword thread_index) { vnet_main_t *vnm = vnet_get_main (); vnet_device_main_t *vdm = &vnet_device_main; @@ -115,19 +115,19 @@ vnet_device_input_assign_thread (u32 hw_if_index, ASSERT (hw->input_node_index > 0); - if (vdm->first_worker_cpu_index == 0) - cpu_index = 0; + if (vdm->first_worker_thread_index == 0) + thread_index = 0; - if (cpu_index != 0 && - (cpu_index < vdm->first_worker_cpu_index || - cpu_index > vdm->last_worker_cpu_index)) + if (thread_index != 0 && + (thread_index < vdm->first_worker_thread_index || + thread_index > vdm->last_worker_thread_index)) { - cpu_index = vdm->next_worker_cpu_index++; - if (vdm->next_worker_cpu_index > vdm->last_worker_cpu_index) - vdm->next_worker_cpu_index = vdm->first_worker_cpu_index; + thread_index = vdm->next_worker_thread_index++; + if (vdm->next_worker_thread_index > vdm->last_worker_thread_index) + vdm->next_worker_thread_index = vdm->first_worker_thread_index; } - vm = vlib_mains[cpu_index]; + vm = vlib_mains[thread_index]; rt = vlib_node_get_runtime_data (vm, hw->input_node_index); vec_add2 (rt->devices_and_queues, dq, 1); @@ -136,33 +136,33 @@ vnet_device_input_assign_thread (u32 hw_if_index, dq->queue_id = queue_id; vec_sort_with_function (rt->devices_and_queues, vnet_device_queue_sort); - vec_validate (hw->input_node_cpu_index_by_queue, queue_id); - hw->input_node_cpu_index_by_queue[queue_id] = cpu_index; + vec_validate (hw->input_node_thread_index_by_queue, queue_id); + hw->input_node_thread_index_by_queue[queue_id] = thread_index; } static int vnet_device_input_unassign_thread (u32 hw_if_index, u16 queue_id, - uword cpu_index) + uword thread_index) { vnet_main_t *vnm = vnet_get_main (); vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index); vnet_device_input_runtime_t *rt; vnet_device_and_queue_t *dq; - uword old_cpu_index; + uword old_thread_index; - if (hw->input_node_cpu_index_by_queue == 0) + if (hw->input_node_thread_index_by_queue == 0) return VNET_API_ERROR_INVALID_INTERFACE; - if (vec_len (hw->input_node_cpu_index_by_queue) < queue_id + 1) + if (vec_len (hw->input_node_thread_index_by_queue) < queue_id + 1) return VNET_API_ERROR_INVALID_INTERFACE; - old_cpu_index = hw->input_node_cpu_index_by_queue[queue_id]; + old_thread_index = hw->input_node_thread_index_by_queue[queue_id]; - if (old_cpu_index == cpu_index) + if (old_thread_index == thread_index) return 0; rt = - vlib_node_get_runtime_data (vlib_mains[old_cpu_index], + vlib_node_get_runtime_data (vlib_mains[old_thread_index], hw->input_node_index); vec_foreach (dq, rt->devices_and_queues) @@ -240,7 +240,7 @@ set_device_placement (vlib_main_t * vm, unformat_input_t * input, vnet_device_main_t *vdm = &vnet_device_main; u32 hw_if_index = (u32) ~ 0; u32 queue_id = (u32) 0; - u32 cpu_index = (u32) ~ 0; + u32 thread_index = (u32) ~ 0; int rv; if (!unformat_user (input, unformat_line_input, line_input)) @@ -253,10 +253,10 @@ set_device_placement (vlib_main_t * vm, unformat_input_t * input, ; else if (unformat (line_input, "queue %d", &queue_id)) ; - else if (unformat (line_input, "main", &cpu_index)) - cpu_index = 0; - else if (unformat (line_input, "worker %d", &cpu_index)) - cpu_index += vdm->first_worker_cpu_index; + else if (unformat (line_input, "main", &thread_index)) + thread_index = 0; + else if (unformat (line_input, "worker %d", &thread_index)) + thread_index += vdm->first_worker_thread_index; else { error = clib_error_return (0, "parse error: '%U'", @@ -271,16 +271,17 @@ set_device_placement (vlib_main_t * vm, unformat_input_t * input, if (hw_if_index == (u32) ~ 0) return clib_error_return (0, "please specify valid interface name"); - if (cpu_index > vdm->last_worker_cpu_index) + if (thread_index > vdm->last_worker_thread_index) return clib_error_return (0, "please specify valid worker thread or main"); - rv = vnet_device_input_unassign_thread (hw_if_index, queue_id, cpu_index); + rv = + vnet_device_input_unassign_thread (hw_if_index, queue_id, thread_index); if (rv) return clib_error_return (0, "not found"); - vnet_device_input_assign_thread (hw_if_index, queue_id, cpu_index); + vnet_device_input_assign_thread (hw_if_index, queue_id, thread_index); return 0; } @@ -326,9 +327,9 @@ vnet_device_init (vlib_main_t * vm) tr = p ? (vlib_thread_registration_t *) p[0] : 0; if (tr && tr->count > 0) { - vdm->first_worker_cpu_index = tr->first_index; - vdm->next_worker_cpu_index = tr->first_index; - vdm->last_worker_cpu_index = tr->first_index + tr->count - 1; + vdm->first_worker_thread_index = tr->first_index; + vdm->next_worker_thread_index = tr->first_index; + vdm->last_worker_thread_index = tr->first_index + tr->count - 1; } return 0; } diff --git a/src/vnet/devices/devices.h b/src/vnet/devices/devices.h index bbb29fe3..966f8302 100644 --- a/src/vnet/devices/devices.h +++ b/src/vnet/devices/devices.h @@ -50,9 +50,9 @@ typedef struct typedef struct { vnet_device_per_worker_data_t *workers; - uword first_worker_cpu_index; - uword last_worker_cpu_index; - uword next_worker_cpu_index; + uword first_worker_thread_index; + uword last_worker_thread_index; + uword next_worker_thread_index; } vnet_device_main_t; typedef struct @@ -80,7 +80,7 @@ vnet_set_device_input_node (u32 hw_if_index, u32 node_index) } void vnet_device_input_assign_thread (u32 hw_if_index, u16 queue_id, - uword cpu_index); + uword thread_index); static inline u64 vnet_get_aggregate_rx_packets (void) @@ -95,12 +95,12 @@ vnet_get_aggregate_rx_packets (void) } static inline void -vnet_device_increment_rx_packets (u32 cpu_index, u64 count) +vnet_device_increment_rx_packets (u32 thread_index, u64 count) { vnet_device_main_t *vdm = &vnet_device_main; vnet_device_per_worker_data_t *pwd; - pwd = vec_elt_at_index (vdm->workers, cpu_index); + pwd = vec_elt_at_index (vdm->workers, thread_index); pwd->aggregate_rx_packets += count; } @@ -117,9 +117,9 @@ vnet_device_input_set_interrupt_pending (vnet_main_t * vnm, u32 hw_if_index, { vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index); - ASSERT (queue_id < vec_len (hw->input_node_cpu_index_by_queue)); - u32 cpu_index = hw->input_node_cpu_index_by_queue[queue_id]; - vlib_node_set_interrupt_pending (vlib_mains[cpu_index], + ASSERT (queue_id < vec_len (hw->input_node_thread_index_by_queue)); + u32 thread_index = hw->input_node_thread_index_by_queue[queue_id]; + vlib_node_set_interrupt_pending (vlib_mains[thread_index], hw->input_node_index); } diff --git a/src/vnet/devices/netmap/node.c b/src/vnet/devices/netmap/node.c index 68ea7832..e120eeae 100644 --- a/src/vnet/devices/netmap/node.c +++ b/src/vnet/devices/netmap/node.c @@ -98,22 +98,22 @@ netmap_device_input_fn (vlib_main_t * vm, vlib_node_runtime_t * node, u32 n_free_bufs; struct netmap_ring *ring; int cur_ring; - u32 cpu_index = os_get_cpu_number (); + u32 thread_index = vlib_get_thread_index (); u32 n_buffer_bytes = vlib_buffer_free_list_buffer_size (vm, VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX); if (nif->per_interface_next_index != ~0) next_index = nif->per_interface_next_index; - n_free_bufs = vec_len (nm->rx_buffers[cpu_index]); + n_free_bufs = vec_len (nm->rx_buffers[thread_index]); if (PREDICT_FALSE (n_free_bufs < VLIB_FRAME_SIZE)) { - vec_validate (nm->rx_buffers[cpu_index], + vec_validate (nm->rx_buffers[thread_index], VLIB_FRAME_SIZE + n_free_bufs - 1); n_free_bufs += - vlib_buffer_alloc (vm, &nm->rx_buffers[cpu_index][n_free_bufs], + vlib_buffer_alloc (vm, &nm->rx_buffers[thread_index][n_free_bufs], VLIB_FRAME_SIZE); - _vec_len (nm->rx_buffers[cpu_index]) = n_free_bufs; + _vec_len (nm->rx_buffers[thread_index]) = n_free_bufs; } cur_ring = nif->first_rx_ring; @@ -163,11 +163,11 @@ netmap_device_input_fn (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_buffer_t *b0; /* grab free buffer */ u32 last_empty_buffer = - vec_len (nm->rx_buffers[cpu_index]) - 1; + vec_len (nm->rx_buffers[thread_index]) - 1; prev_bi0 = bi0; - bi0 = nm->rx_buffers[cpu_index][last_empty_buffer]; + bi0 = nm->rx_buffers[thread_index][last_empty_buffer]; b0 = vlib_get_buffer (vm, bi0); - _vec_len (nm->rx_buffers[cpu_index]) = last_empty_buffer; + _vec_len (nm->rx_buffers[thread_index]) = last_empty_buffer; n_free_bufs--; /* copy data */ @@ -247,9 +247,9 @@ netmap_device_input_fn (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_increment_combined_counter (vnet_get_main ()->interface_main.combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX, - os_get_cpu_number (), nif->hw_if_index, n_rx_packets, n_rx_bytes); + vlib_get_thread_index (), nif->hw_if_index, n_rx_packets, n_rx_bytes); - vnet_device_increment_rx_packets (cpu_index, n_rx_packets); + vnet_device_increment_rx_packets (thread_index, n_rx_packets); return n_rx_packets; } @@ -260,7 +260,7 @@ netmap_input_fn (vlib_main_t * vm, vlib_node_runtime_t * node, { int i; u32 n_rx_packets = 0; - u32 cpu_index = os_get_cpu_number (); + u32 thread_index = vlib_get_thread_index (); netmap_main_t *nm = &netmap_main; netmap_if_t *nmi; @@ -269,7 +269,7 @@ netmap_input_fn (vlib_main_t * vm, vlib_node_runtime_t * node, nmi = vec_elt_at_index (nm->interfaces, i); if (nmi->is_admin_up && (i % nm->input_cpu_count) == - (cpu_index - nm->input_cpu_first_index)) + (thread_index - nm->input_cpu_first_index)) n_rx_packets += netmap_device_input_fn (vm, node, frame, nmi); } diff --git a/src/vnet/devices/ssvm/node.c b/src/vnet/devices/ssvm/node.c index a6c9dfd7..539b4161 100644 --- a/src/vnet/devices/ssvm/node.c +++ b/src/vnet/devices/ssvm/node.c @@ -89,7 +89,7 @@ ssvm_eth_device_input (ssvm_eth_main_t * em, ethernet_header_t *eh0; u16 type0; u32 n_rx_bytes = 0, l3_offset0; - u32 cpu_index = os_get_cpu_number (); + u32 thread_index = vlib_get_thread_index (); u32 trace_cnt __attribute__ ((unused)) = vlib_get_trace_count (vm, node); volatile u32 *lock; u32 *elt_indices; @@ -284,10 +284,10 @@ out: vlib_increment_combined_counter (vnet_get_main ()->interface_main.combined_sw_if_counters - + VNET_INTERFACE_COUNTER_RX, cpu_index, + + VNET_INTERFACE_COUNTER_RX, thread_index, intfc->vlib_hw_if_index, rx_queue_index, n_rx_bytes); - vnet_device_increment_rx_packets (cpu_index, rx_queue_index); + vnet_device_increment_rx_packets (thread_index, rx_queue_index); return rx_queue_index; } diff --git a/src/vnet/devices/virtio/vhost-user.c b/src/vnet/devices/virtio/vhost-user.c index 00807dc0..5e720f65 100644 --- a/src/vnet/devices/virtio/vhost-user.c +++ b/src/vnet/devices/virtio/vhost-user.c @@ -331,7 +331,7 @@ vhost_user_tx_thread_placement (vhost_user_intf_t * vui) { //Let's try to assign one queue to each thread u32 qid = 0; - u32 cpu_index = 0; + u32 thread_index = 0; vui->use_tx_spinlock = 0; while (1) { @@ -341,20 +341,21 @@ vhost_user_tx_thread_placement (vhost_user_intf_t * vui) if (!rxvq->started || !rxvq->enabled) continue; - vui->per_cpu_tx_qid[cpu_index] = qid; - cpu_index++; - if (cpu_index == vlib_get_thread_main ()->n_vlib_mains) + vui->per_cpu_tx_qid[thread_index] = qid; + thread_index++; + if (thread_index == vlib_get_thread_main ()->n_vlib_mains) return; } //We need to loop, meaning the spinlock has to be used vui->use_tx_spinlock = 1; - if (cpu_index == 0) + if (thread_index == 0) { //Could not find a single valid one - for (cpu_index = 0; - cpu_index < vlib_get_thread_main ()->n_vlib_mains; cpu_index++) + for (thread_index = 0; + thread_index < vlib_get_thread_main ()->n_vlib_mains; + thread_index++) { - vui->per_cpu_tx_qid[cpu_index] = 0; + vui->per_cpu_tx_qid[thread_index] = 0; } return; } @@ -368,7 +369,7 @@ vhost_user_rx_thread_placement () vhost_user_intf_t *vui; vhost_cpu_t *vhc; u32 *workers = 0; - u32 cpu_index; + u32 thread_index; vlib_main_t *vm; //Let's list all workers cpu indexes @@ -400,9 +401,9 @@ vhost_user_rx_thread_placement () continue; i %= vec_len (vui_workers); - cpu_index = vui_workers[i]; + thread_index = vui_workers[i]; i++; - vhc = &vum->cpus[cpu_index]; + vhc = &vum->cpus[thread_index]; iaq.qid = qid; iaq.vhost_iface_index = vui - vum->vhost_user_interfaces; @@ -429,14 +430,14 @@ vhost_user_rx_thread_placement () vhc->operation_mode = mode; } - for (cpu_index = vum->input_cpu_first_index; - cpu_index < vum->input_cpu_first_index + vum->input_cpu_count; - cpu_index++) + for (thread_index = vum->input_cpu_first_index; + thread_index < vum->input_cpu_first_index + vum->input_cpu_count; + thread_index++) { vlib_node_state_t state = VLIB_NODE_STATE_POLLING; - vhc = &vum->cpus[cpu_index]; - vm = vlib_mains ? vlib_mains[cpu_index] : &vlib_global_main; + vhc = &vum->cpus[thread_index]; + vm = vlib_mains ? vlib_mains[thread_index] : &vlib_global_main; switch (vhc->operation_mode) { case VHOST_USER_INTERRUPT_MODE: @@ -532,7 +533,7 @@ vhost_user_set_interrupt_pending (vhost_user_intf_t * vui, u32 ifq) { vhost_user_main_t *vum = &vhost_user_main; vhost_cpu_t *vhc; - u32 cpu_index; + u32 thread_index; vhost_iface_and_queue_t *vhiq; vlib_main_t *vm; u32 ifq2; @@ -553,8 +554,8 @@ vhost_user_set_interrupt_pending (vhost_user_intf_t * vui, u32 ifq) if ((vhiq->vhost_iface_index == (ifq >> 8)) && (VHOST_VRING_IDX_TX (vhiq->qid) == (ifq & 0xff))) { - cpu_index = vhc - vum->cpus; - vm = vlib_mains ? vlib_mains[cpu_index] : &vlib_global_main; + thread_index = vhc - vum->cpus; + vm = vlib_mains ? vlib_mains[thread_index] : &vlib_global_main; /* * Convert RX virtqueue number in the lower byte to vring * queue index for the input node process. Top bytes contain @@ -1592,7 +1593,7 @@ vhost_user_if_input (vlib_main_t * vm, u32 n_trace = vlib_get_trace_count (vm, node); u16 qsz_mask; u32 map_hint = 0; - u16 cpu_index = os_get_cpu_number (); + u16 thread_index = vlib_get_thread_index (); u16 copy_len = 0; { @@ -1651,32 +1652,32 @@ vhost_user_if_input (vlib_main_t * vm, * in the loop and come back later. This is not an issue as for big packet, * processing cost really comes from the memory copy. */ - if (PREDICT_FALSE (vum->cpus[cpu_index].rx_buffers_len < n_left + 1)) + if (PREDICT_FALSE (vum->cpus[thread_index].rx_buffers_len < n_left + 1)) { - u32 curr_len = vum->cpus[cpu_index].rx_buffers_len; - vum->cpus[cpu_index].rx_buffers_len += + u32 curr_len = vum->cpus[thread_index].rx_buffers_len; + vum->cpus[thread_index].rx_buffers_len += vlib_buffer_alloc_from_free_list (vm, - vum->cpus[cpu_index].rx_buffers + + vum->cpus[thread_index].rx_buffers + curr_len, VHOST_USER_RX_BUFFERS_N - curr_len, VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX); if (PREDICT_FALSE - (vum->cpus[cpu_index].rx_buffers_len < + (vum->cpus[thread_index].rx_buffers_len < VHOST_USER_RX_BUFFER_STARVATION)) { /* In case of buffer starvation, discard some packets from the queue * and log the event. * We keep doing best effort for the remaining packets. */ - u32 flush = (n_left + 1 > vum->cpus[cpu_index].rx_buffers_len) ? - n_left + 1 - vum->cpus[cpu_index].rx_buffers_len : 1; + u32 flush = (n_left + 1 > vum->cpus[thread_index].rx_buffers_len) ? + n_left + 1 - vum->cpus[thread_index].rx_buffers_len : 1; flush = vhost_user_rx_discard_packet (vm, vui, txvq, flush); n_left -= flush; vlib_increment_simple_counter (vnet_main. interface_main.sw_if_counters + VNET_INTERFACE_COUNTER_DROP, - os_get_cpu_number (), + vlib_get_thread_index (), vui->sw_if_index, flush); vlib_error_count (vm, vhost_user_input_node.index, @@ -1696,7 +1697,7 @@ vhost_user_if_input (vlib_main_t * vm, u32 desc_data_offset; vring_desc_t *desc_table = txvq->desc; - if (PREDICT_FALSE (vum->cpus[cpu_index].rx_buffers_len <= 1)) + if (PREDICT_FALSE (vum->cpus[thread_index].rx_buffers_len <= 1)) { /* Not enough rx_buffers * Note: We yeld on 1 so we don't need to do an additional @@ -1707,17 +1708,18 @@ vhost_user_if_input (vlib_main_t * vm, } desc_current = txvq->avail->ring[txvq->last_avail_idx & qsz_mask]; - vum->cpus[cpu_index].rx_buffers_len--; - bi_current = (vum->cpus[cpu_index].rx_buffers) - [vum->cpus[cpu_index].rx_buffers_len]; + vum->cpus[thread_index].rx_buffers_len--; + bi_current = (vum->cpus[thread_index].rx_buffers) + [vum->cpus[thread_index].rx_buffers_len]; b_head = b_current = vlib_get_buffer (vm, bi_current); to_next[0] = bi_current; //We do that now so we can forget about bi_current to_next++; n_left_to_next--; vlib_prefetch_buffer_with_index (vm, - (vum->cpus[cpu_index].rx_buffers) - [vum->cpus[cpu_index]. + (vum-> + cpus[thread_index].rx_buffers) + [vum->cpus[thread_index]. rx_buffers_len - 1], LOAD); /* Just preset the used descriptor id and length for later */ @@ -1791,7 +1793,7 @@ vhost_user_if_input (vlib_main_t * vm, (b_current->current_length == VLIB_BUFFER_DATA_SIZE)) { if (PREDICT_FALSE - (vum->cpus[cpu_index].rx_buffers_len == 0)) + (vum->cpus[thread_index].rx_buffers_len == 0)) { /* Cancel speculation */ to_next--; @@ -1805,17 +1807,18 @@ vhost_user_if_input (vlib_main_t * vm, * but valid. */ vhost_user_input_rewind_buffers (vm, - &vum->cpus[cpu_index], + &vum->cpus + [thread_index], b_head); n_left = 0; goto stop; } /* Get next output */ - vum->cpus[cpu_index].rx_buffers_len--; + vum->cpus[thread_index].rx_buffers_len--; u32 bi_next = - (vum->cpus[cpu_index].rx_buffers)[vum->cpus - [cpu_index].rx_buffers_len]; + (vum->cpus[thread_index].rx_buffers)[vum->cpus + [thread_index].rx_buffers_len]; b_current->next_buffer = bi_next; b_current->flags |= VLIB_BUFFER_NEXT_PRESENT; bi_current = bi_next; @@ -1823,7 +1826,7 @@ vhost_user_if_input (vlib_main_t * vm, } /* Prepare a copy order executed later for the data */ - vhost_copy_t *cpy = &vum->cpus[cpu_index].copy[copy_len]; + vhost_copy_t *cpy = &vum->cpus[thread_index].copy[copy_len]; copy_len++; u32 desc_data_l = desc_table[desc_current].len - desc_data_offset; @@ -1880,7 +1883,7 @@ vhost_user_if_input (vlib_main_t * vm, if (PREDICT_FALSE (copy_len >= VHOST_USER_RX_COPY_THRESHOLD)) { if (PREDICT_FALSE - (vhost_user_input_copy (vui, vum->cpus[cpu_index].copy, + (vhost_user_input_copy (vui, vum->cpus[thread_index].copy, copy_len, &map_hint))) { clib_warning @@ -1905,7 +1908,7 @@ vhost_user_if_input (vlib_main_t * vm, /* Do the memory copies */ if (PREDICT_FALSE - (vhost_user_input_copy (vui, vum->cpus[cpu_index].copy, + (vhost_user_input_copy (vui, vum->cpus[thread_index].copy, copy_len, &map_hint))) { clib_warning ("Memory mapping error on interface hw_if_index=%d " @@ -1933,9 +1936,9 @@ vhost_user_if_input (vlib_main_t * vm, vlib_increment_combined_counter (vnet_main.interface_main.combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX, - os_get_cpu_number (), vui->sw_if_index, n_rx_packets, n_rx_bytes); + vlib_get_thread_index (), vui->sw_if_index, n_rx_packets, n_rx_bytes); - vnet_device_increment_rx_packets (cpu_index, n_rx_packets); + vnet_device_increment_rx_packets (thread_index, n_rx_packets); return n_rx_packets; } @@ -1946,15 +1949,15 @@ vhost_user_input (vlib_main_t * vm, { vhost_user_main_t *vum = &vhost_user_main; uword n_rx_packets = 0; - u32 cpu_index = os_get_cpu_number (); + u32 thread_index = vlib_get_thread_index (); vhost_iface_and_queue_t *vhiq; vhost_user_intf_t *vui; vhost_cpu_t *vhc; - vhc = &vum->cpus[cpu_index]; + vhc = &vum->cpus[thread_index]; if (PREDICT_TRUE (vhc->operation_mode == VHOST_USER_POLLING_MODE)) { - vec_foreach (vhiq, vum->cpus[cpu_index].rx_queues) + vec_foreach (vhiq, vum->cpus[thread_index].rx_queues) { vui = &vum->vhost_user_interfaces[vhiq->vhost_iface_index]; n_rx_packets += vhost_user_if_input (vm, vum, vui, vhiq->qid, node); @@ -2096,7 +2099,7 @@ vhost_user_tx (vlib_main_t * vm, vhost_user_vring_t *rxvq; u16 qsz_mask; u8 error; - u32 cpu_index = os_get_cpu_number (); + u32 thread_index = vlib_get_thread_index (); u32 map_hint = 0; u8 retry = 8; u16 copy_len; @@ -2116,7 +2119,7 @@ vhost_user_tx (vlib_main_t * vm, qid = VHOST_VRING_IDX_RX (*vec_elt_at_index - (vui->per_cpu_tx_qid, os_get_cpu_number ())); + (vui->per_cpu_tx_qid, vlib_get_thread_index ())); rxvq = &vui->vrings[qid]; if (PREDICT_FALSE (vui->use_tx_spinlock)) vhost_user_vring_lock (vui, qid); @@ -2143,10 +2146,10 @@ retry: if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED)) { - vum->cpus[cpu_index].current_trace = + vum->cpus[thread_index].current_trace = vlib_add_trace (vm, node, b0, - sizeof (*vum->cpus[cpu_index].current_trace)); - vhost_user_tx_trace (vum->cpus[cpu_index].current_trace, + sizeof (*vum->cpus[thread_index].current_trace)); + vhost_user_tx_trace (vum->cpus[thread_index].current_trace, vui, qid / 2, b0, rxvq); } @@ -2188,14 +2191,14 @@ retry: { // Get a header from the header array virtio_net_hdr_mrg_rxbuf_t *hdr = - &vum->cpus[cpu_index].tx_headers[tx_headers_len]; + &vum->cpus[thread_index].tx_headers[tx_headers_len]; tx_headers_len++; hdr->hdr.flags = 0; hdr->hdr.gso_type = 0; hdr->num_buffers = 1; //This is local, no need to check // Prepare a copy order executed later for the header - vhost_copy_t *cpy = &vum->cpus[cpu_index].copy[copy_len]; + vhost_copy_t *cpy = &vum->cpus[thread_index].copy[copy_len]; copy_len++; cpy->len = vui->virtio_net_hdr_sz; cpy->dst = buffer_map_addr; @@ -2220,7 +2223,7 @@ retry: else if (vui->virtio_net_hdr_sz == 12) //MRG is available { virtio_net_hdr_mrg_rxbuf_t *hdr = - &vum->cpus[cpu_index].tx_headers[tx_headers_len - 1]; + &vum->cpus[thread_index].tx_headers[tx_headers_len - 1]; //Move from available to used buffer rxvq->used->ring[rxvq->last_used_idx & qsz_mask].id = @@ -2282,7 +2285,7 @@ retry: } { - vhost_copy_t *cpy = &vum->cpus[cpu_index].copy[copy_len]; + vhost_copy_t *cpy = &vum->cpus[thread_index].copy[copy_len]; copy_len++; cpy->len = bytes_left; cpy->len = (cpy->len > buffer_len) ? buffer_len : cpy->len; @@ -2325,8 +2328,8 @@ retry: if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED)) { - vum->cpus[cpu_index].current_trace->hdr = - vum->cpus[cpu_index].tx_headers[tx_headers_len - 1]; + vum->cpus[thread_index].current_trace->hdr = + vum->cpus[thread_index].tx_headers[tx_headers_len - 1]; } n_left--; //At the end for error counting when 'goto done' is invoked @@ -2336,7 +2339,7 @@ retry: done: //Do the memory copies if (PREDICT_FALSE - (vhost_user_tx_copy (vui, vum->cpus[cpu_index].copy, + (vhost_user_tx_copy (vui, vum->cpus[thread_index].copy, copy_len, &map_hint))) { clib_warning ("Memory mapping error on interface hw_if_index=%d " @@ -2386,7 +2389,7 @@ done3: vlib_increment_simple_counter (vnet_main.interface_main.sw_if_counters + VNET_INTERFACE_COUNTER_DROP, - os_get_cpu_number (), vui->sw_if_index, n_left); + vlib_get_thread_index (), vui->sw_if_index, n_left); } vlib_buffer_free (vm, vlib_frame_args (frame), frame->n_vectors); @@ -2773,11 +2776,11 @@ vhost_user_send_interrupt_process (vlib_main_t * vm, case ~0: vec_foreach (vhc, vum->cpus) { - u32 cpu_index = vhc - vum->cpus; + u32 thread_index = vhc - vum->cpus; f64 next_timeout; next_timeout = timeout; - vec_foreach (vhiq, vum->cpus[cpu_index].rx_queues) + vec_foreach (vhiq, vum->cpus[thread_index].rx_queues) { vui = &vum->vhost_user_interfaces[vhiq->vhost_iface_index]; vhost_user_vring_t *rxvq = diff --git a/src/vnet/dpo/lookup_dpo.c b/src/vnet/dpo/lookup_dpo.c index e94e871c..97ad0a44 100644 --- a/src/vnet/dpo/lookup_dpo.c +++ b/src/vnet/dpo/lookup_dpo.c @@ -266,7 +266,7 @@ lookup_dpo_ip4_inline (vlib_main_t * vm, int table_from_interface) { u32 n_left_from, next_index, * from, * to_next; - u32 cpu_index = os_get_cpu_number(); + u32 thread_index = vlib_get_thread_index(); vlib_combined_counter_main_t * cm = &load_balance_main.lbm_to_counters; from = vlib_frame_vector_args (from_frame); @@ -407,10 +407,10 @@ lookup_dpo_ip4_inline (vlib_main_t * vm, vnet_buffer(b1)->ip.adj_index[VLIB_TX] = dpo1->dpoi_index; vlib_increment_combined_counter - (cm, cpu_index, lbi0, 1, + (cm, thread_index, lbi0, 1, vlib_buffer_length_in_chain (vm, b0)); vlib_increment_combined_counter - (cm, cpu_index, lbi1, 1, + (cm, thread_index, lbi1, 1, vlib_buffer_length_in_chain (vm, b1)); if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED)) @@ -511,7 +511,7 @@ lookup_dpo_ip4_inline (vlib_main_t * vm, vnet_buffer(b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index; vlib_increment_combined_counter - (cm, cpu_index, lbi0, 1, + (cm, thread_index, lbi0, 1, vlib_buffer_length_in_chain (vm, b0)); if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED)) @@ -606,7 +606,7 @@ lookup_dpo_ip6_inline (vlib_main_t * vm, { vlib_combined_counter_main_t * cm = &load_balance_main.lbm_to_counters; u32 n_left_from, next_index, * from, * to_next; - u32 cpu_index = os_get_cpu_number(); + u32 thread_index = vlib_get_thread_index(); from = vlib_frame_vector_args (from_frame); n_left_from = from_frame->n_vectors; @@ -749,10 +749,10 @@ lookup_dpo_ip6_inline (vlib_main_t * vm, vnet_buffer(b1)->ip.adj_index[VLIB_TX] = dpo1->dpoi_index; vlib_increment_combined_counter - (cm, cpu_index, lbi0, 1, + (cm, thread_index, lbi0, 1, vlib_buffer_length_in_chain (vm, b0)); vlib_increment_combined_counter - (cm, cpu_index, lbi1, 1, + (cm, thread_index, lbi1, 1, vlib_buffer_length_in_chain (vm, b1)); if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED)) @@ -853,7 +853,7 @@ lookup_dpo_ip6_inline (vlib_main_t * vm, vnet_buffer(b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index; vlib_increment_combined_counter - (cm, cpu_index, lbi0, 1, + (cm, thread_index, lbi0, 1, vlib_buffer_length_in_chain (vm, b0)); if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED)) @@ -930,7 +930,7 @@ lookup_dpo_mpls_inline (vlib_main_t * vm, int table_from_interface) { u32 n_left_from, next_index, * from, * to_next; - u32 cpu_index = os_get_cpu_number(); + u32 thread_index = vlib_get_thread_index(); vlib_combined_counter_main_t * cm = &load_balance_main.lbm_to_counters; from = vlib_frame_vector_args (from_frame); @@ -994,7 +994,7 @@ lookup_dpo_mpls_inline (vlib_main_t * vm, vnet_buffer(b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index; vlib_increment_combined_counter - (cm, cpu_index, lbi0, 1, + (cm, thread_index, lbi0, 1, vlib_buffer_length_in_chain (vm, b0)); if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED)) diff --git a/src/vnet/dpo/replicate_dpo.c b/src/vnet/dpo/replicate_dpo.c index a9f334be..e25ceae9 100644 --- a/src/vnet/dpo/replicate_dpo.c +++ b/src/vnet/dpo/replicate_dpo.c @@ -627,7 +627,7 @@ replicate_inline (vlib_main_t * vm, vlib_combined_counter_main_t * cm = &replicate_main.repm_counters; replicate_main_t * rm = &replicate_main; u32 n_left_from, * from, * to_next, next_index; - u32 cpu_index = os_get_cpu_number(); + u32 thread_index = vlib_get_thread_index(); from = vlib_frame_vector_args (frame); n_left_from = frame->n_vectors; @@ -657,12 +657,12 @@ replicate_inline (vlib_main_t * vm, rep0 = replicate_get(repi0); vlib_increment_combined_counter( - cm, cpu_index, repi0, 1, + cm, thread_index, repi0, 1, vlib_buffer_length_in_chain(vm, b0)); - vec_validate (rm->clones[cpu_index], rep0->rep_n_buckets - 1); + vec_validate (rm->clones[thread_index], rep0->rep_n_buckets - 1); - num_cloned = vlib_buffer_clone (vm, bi0, rm->clones[cpu_index], rep0->rep_n_buckets, 128); + num_cloned = vlib_buffer_clone (vm, bi0, rm->clones[thread_index], rep0->rep_n_buckets, 128); if (num_cloned != rep0->rep_n_buckets) { @@ -673,7 +673,7 @@ replicate_inline (vlib_main_t * vm, for (bucket = 0; bucket < num_cloned; bucket++) { - ci0 = rm->clones[cpu_index][bucket]; + ci0 = rm->clones[thread_index][bucket]; c0 = vlib_get_buffer(vm, ci0); to_next[0] = ci0; @@ -700,7 +700,7 @@ replicate_inline (vlib_main_t * vm, vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); } } - vec_reset_length (rm->clones[cpu_index]); + vec_reset_length (rm->clones[thread_index]); } vlib_put_next_frame (vm, node, next_index, n_left_to_next); diff --git a/src/vnet/ethernet/arp.c b/src/vnet/ethernet/arp.c index ee757505..c74a097e 100644 --- a/src/vnet/ethernet/arp.c +++ b/src/vnet/ethernet/arp.c @@ -1771,7 +1771,7 @@ set_ip4_over_ethernet_rpc_callback (vnet_arp_set_ip4_over_ethernet_rpc_args_t * a) { vnet_main_t *vm = vnet_get_main (); - ASSERT (os_get_cpu_number () == 0); + ASSERT (vlib_get_thread_index () == 0); if (a->flags & ETHERNET_ARP_ARGS_REMOVE) vnet_arp_unset_ip4_over_ethernet_internal (vm, a); diff --git a/src/vnet/ethernet/interface.c b/src/vnet/ethernet/interface.c index 9894e3c8..335e3f9f 100644 --- a/src/vnet/ethernet/interface.c +++ b/src/vnet/ethernet/interface.c @@ -362,7 +362,7 @@ simulated_ethernet_interface_tx (vlib_main_t * vm, u32 next_index = VNET_SIMULATED_ETHERNET_TX_NEXT_ETHERNET_INPUT; u32 i, next_node_index, bvi_flag, sw_if_index; u32 n_pkts = 0, n_bytes = 0; - u32 cpu_index = vm->cpu_index; + u32 thread_index = vm->thread_index; vnet_main_t *vnm = vnet_get_main (); vnet_interface_main_t *im = &vnm->interface_main; vlib_node_main_t *nm = &vm->node_main; @@ -420,8 +420,9 @@ simulated_ethernet_interface_tx (vlib_main_t * vm, /* increment TX interface stat */ vlib_increment_combined_counter (im->combined_sw_if_counters + - VNET_INTERFACE_COUNTER_TX, cpu_index, - sw_if_index, n_pkts, n_bytes); + VNET_INTERFACE_COUNTER_TX, + thread_index, sw_if_index, n_pkts, + n_bytes); } return n_left_from; diff --git a/src/vnet/ethernet/node.c b/src/vnet/ethernet/node.c index b699e381..f7787ed2 100755 --- a/src/vnet/ethernet/node.c +++ b/src/vnet/ethernet/node.c @@ -291,7 +291,7 @@ ethernet_input_inline (vlib_main_t * vm, vlib_node_runtime_t *error_node; u32 n_left_from, next_index, *from, *to_next; u32 stats_sw_if_index, stats_n_packets, stats_n_bytes; - u32 cpu_index = os_get_cpu_number (); + u32 thread_index = vlib_get_thread_index (); u32 cached_sw_if_index = ~0; u32 cached_is_l2 = 0; /* shut up gcc */ vnet_hw_interface_t *hi = NULL; /* used for main interface only */ @@ -510,7 +510,7 @@ ethernet_input_inline (vlib_main_t * vm, interface_main.combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX, - cpu_index, + thread_index, new_sw_if_index0, 1, len0); if (new_sw_if_index1 != old_sw_if_index1 @@ -519,7 +519,7 @@ ethernet_input_inline (vlib_main_t * vm, interface_main.combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX, - cpu_index, + thread_index, new_sw_if_index1, 1, len1); @@ -530,7 +530,7 @@ ethernet_input_inline (vlib_main_t * vm, vlib_increment_combined_counter (vnm->interface_main.combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX, - cpu_index, + thread_index, stats_sw_if_index, stats_n_packets, stats_n_bytes); stats_n_packets = stats_n_bytes = 0; @@ -696,13 +696,13 @@ ethernet_input_inline (vlib_main_t * vm, vlib_increment_combined_counter (vnm->interface_main.combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX, - cpu_index, new_sw_if_index0, 1, len0); + thread_index, new_sw_if_index0, 1, len0); if (stats_n_packets > 0) { vlib_increment_combined_counter (vnm->interface_main.combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX, - cpu_index, + thread_index, stats_sw_if_index, stats_n_packets, stats_n_bytes); stats_n_packets = stats_n_bytes = 0; } @@ -734,7 +734,7 @@ ethernet_input_inline (vlib_main_t * vm, vlib_increment_combined_counter (vnm->interface_main.combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX, - cpu_index, stats_sw_if_index, stats_n_packets, stats_n_bytes); + thread_index, stats_sw_if_index, stats_n_packets, stats_n_bytes); node->runtime_data[0] = stats_sw_if_index; } diff --git a/src/vnet/gre/node.c b/src/vnet/gre/node.c index 2683586e..acf15f24 100644 --- a/src/vnet/gre/node.c +++ b/src/vnet/gre/node.c @@ -75,7 +75,7 @@ gre_input (vlib_main_t * vm, u64 cached_tunnel_key6[4]; u32 cached_tunnel_sw_if_index = 0, tunnel_sw_if_index = 0; - u32 cpu_index = os_get_cpu_number(); + u32 thread_index = vlib_get_thread_index(); u32 len; vnet_interface_main_t *im = &gm->vnet_main->interface_main; @@ -257,7 +257,7 @@ gre_input (vlib_main_t * vm, len = vlib_buffer_length_in_chain (vm, b0); vlib_increment_combined_counter (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX, - cpu_index, + thread_index, tunnel_sw_if_index, 1 /* packets */, len /* bytes */); @@ -324,7 +324,7 @@ drop0: len = vlib_buffer_length_in_chain (vm, b1); vlib_increment_combined_counter (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX, - cpu_index, + thread_index, tunnel_sw_if_index, 1 /* packets */, len /* bytes */); @@ -502,7 +502,7 @@ drop1: len = vlib_buffer_length_in_chain (vm, b0); vlib_increment_combined_counter (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX, - cpu_index, + thread_index, tunnel_sw_if_index, 1 /* packets */, len /* bytes */); diff --git a/src/vnet/interface.h b/src/vnet/interface.h index a1ea2d61..08f08b10 100644 --- a/src/vnet/interface.h +++ b/src/vnet/interface.h @@ -468,7 +468,7 @@ typedef struct vnet_hw_interface_t u32 input_node_index; /* input node cpu index by queue */ - u32 *input_node_cpu_index_by_queue; + u32 *input_node_thread_index_by_queue; } vnet_hw_interface_t; diff --git a/src/vnet/interface_output.c b/src/vnet/interface_output.c index 03f2cdca..663dc309 100644 --- a/src/vnet/interface_output.c +++ b/src/vnet/interface_output.c @@ -196,7 +196,7 @@ slow_path (vlib_main_t * vm, */ static_always_inline void incr_output_stats (vnet_main_t * vnm, - u32 cpu_index, + u32 thread_index, u32 length, u32 sw_if_index, u32 * last_sw_if_index, u32 * n_packets, u32 * n_bytes) @@ -216,7 +216,7 @@ incr_output_stats (vnet_main_t * vnm, vlib_increment_combined_counter (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX, - cpu_index, + thread_index, *last_sw_if_index, *n_packets, *n_bytes); } @@ -240,7 +240,7 @@ vnet_interface_output_node_flatten (vlib_main_t * vm, u32 n_left_to_tx, *from, *from_end, *to_tx; u32 n_bytes, n_buffers, n_packets; u32 last_sw_if_index; - u32 cpu_index = vm->cpu_index; + u32 thread_index = vm->thread_index; n_buffers = frame->n_vectors; @@ -266,7 +266,7 @@ vnet_interface_output_node_flatten (vlib_main_t * vm, cm = vec_elt_at_index (vnm->interface_main.sw_if_counters, VNET_INTERFACE_COUNTER_TX_ERROR); - vlib_increment_simple_counter (cm, cpu_index, + vlib_increment_simple_counter (cm, thread_index, rt->sw_if_index, n_buffers); return vlib_error_drop_buffers (vm, node, from, /* buffer stride */ 1, @@ -341,18 +341,18 @@ vnet_interface_output_node_flatten (vlib_main_t * vm, from += 1; to_tx += n_buffers; n_left_to_tx -= n_buffers; - incr_output_stats (vnm, cpu_index, n_slow_bytes, + incr_output_stats (vnm, thread_index, n_slow_bytes, vnet_buffer (b)->sw_if_index[VLIB_TX], &last_sw_if_index, &n_packets, &n_bytes); } } else { - incr_output_stats (vnm, cpu_index, + incr_output_stats (vnm, thread_index, vlib_buffer_length_in_chain (vm, b0), vnet_buffer (b0)->sw_if_index[VLIB_TX], &last_sw_if_index, &n_packets, &n_bytes); - incr_output_stats (vnm, cpu_index, + incr_output_stats (vnm, thread_index, vlib_buffer_length_in_chain (vm, b0), vnet_buffer (b1)->sw_if_index[VLIB_TX], &last_sw_if_index, &n_packets, &n_bytes); @@ -396,7 +396,7 @@ vnet_interface_output_node_flatten (vlib_main_t * vm, to_tx += n_buffers; n_left_to_tx -= n_buffers; } - incr_output_stats (vnm, cpu_index, + incr_output_stats (vnm, thread_index, vlib_buffer_length_in_chain (vm, b0), vnet_buffer (b0)->sw_if_index[VLIB_TX], &last_sw_if_index, &n_packets, &n_bytes); @@ -408,7 +408,7 @@ vnet_interface_output_node_flatten (vlib_main_t * vm, } /* Final update of interface stats. */ - incr_output_stats (vnm, cpu_index, 0, ~0, /* ~0 will flush stats */ + incr_output_stats (vnm, thread_index, 0, ~0, /* ~0 will flush stats */ &last_sw_if_index, &n_packets, &n_bytes); return n_buffers; @@ -428,7 +428,7 @@ vnet_interface_output_node (vlib_main_t * vm, u32 n_left_to_tx, *from, *from_end, *to_tx; u32 n_bytes, n_buffers, n_packets; u32 n_bytes_b0, n_bytes_b1, n_bytes_b2, n_bytes_b3; - u32 cpu_index = vm->cpu_index; + u32 thread_index = vm->thread_index; vnet_interface_main_t *im = &vnm->interface_main; u32 next_index = VNET_INTERFACE_OUTPUT_NEXT_TX; u32 current_config_index = ~0; @@ -458,7 +458,7 @@ vnet_interface_output_node (vlib_main_t * vm, cm = vec_elt_at_index (vnm->interface_main.sw_if_counters, VNET_INTERFACE_COUNTER_TX_ERROR); - vlib_increment_simple_counter (cm, cpu_index, + vlib_increment_simple_counter (cm, thread_index, rt->sw_if_index, n_buffers); return vlib_error_drop_buffers (vm, node, from, @@ -558,7 +558,7 @@ vnet_interface_output_node (vlib_main_t * vm, { vlib_increment_combined_counter (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX, - cpu_index, tx_swif0, 1, + thread_index, tx_swif0, 1, n_bytes_b0); } @@ -567,7 +567,7 @@ vnet_interface_output_node (vlib_main_t * vm, vlib_increment_combined_counter (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX, - cpu_index, tx_swif1, 1, + thread_index, tx_swif1, 1, n_bytes_b1); } @@ -576,7 +576,7 @@ vnet_interface_output_node (vlib_main_t * vm, vlib_increment_combined_counter (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX, - cpu_index, tx_swif2, 1, + thread_index, tx_swif2, 1, n_bytes_b2); } if (PREDICT_FALSE (tx_swif3 != rt->sw_if_index)) @@ -584,7 +584,7 @@ vnet_interface_output_node (vlib_main_t * vm, vlib_increment_combined_counter (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX, - cpu_index, tx_swif3, 1, + thread_index, tx_swif3, 1, n_bytes_b3); } } @@ -623,7 +623,7 @@ vnet_interface_output_node (vlib_main_t * vm, vlib_increment_combined_counter (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX, - cpu_index, tx_swif0, 1, + thread_index, tx_swif0, 1, n_bytes_b0); } } @@ -634,7 +634,7 @@ vnet_interface_output_node (vlib_main_t * vm, /* Update main interface stats. */ vlib_increment_combined_counter (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX, - cpu_index, + thread_index, rt->sw_if_index, n_packets, n_bytes); return n_buffers; } @@ -893,7 +893,7 @@ process_drop_punt (vlib_main_t * vm, u32 current_sw_if_index, n_errors_current_sw_if_index; u64 current_counter; vlib_simple_counter_main_t *cm; - u32 cpu_index = vm->cpu_index; + u32 thread_index = vm->thread_index; static vlib_error_t memory[VNET_ERROR_N_DISPOSITION]; static char memory_init[VNET_ERROR_N_DISPOSITION]; @@ -965,19 +965,19 @@ process_drop_punt (vlib_main_t * vm, current_counter -= 2; n_errors_current_sw_if_index -= 2; - vlib_increment_simple_counter (cm, cpu_index, sw_if_index0, 1); - vlib_increment_simple_counter (cm, cpu_index, sw_if_index1, 1); + vlib_increment_simple_counter (cm, thread_index, sw_if_index0, 1); + vlib_increment_simple_counter (cm, thread_index, sw_if_index1, 1); /* Increment super-interface drop/punt counters for sub-interfaces. */ sw_if0 = vnet_get_sw_interface (vnm, sw_if_index0); vlib_increment_simple_counter - (cm, cpu_index, sw_if0->sup_sw_if_index, + (cm, thread_index, sw_if0->sup_sw_if_index, sw_if0->sup_sw_if_index != sw_if_index0); sw_if1 = vnet_get_sw_interface (vnm, sw_if_index1); vlib_increment_simple_counter - (cm, cpu_index, sw_if1->sup_sw_if_index, + (cm, thread_index, sw_if1->sup_sw_if_index, sw_if1->sup_sw_if_index != sw_if_index1); em->counters[current_counter_index] = current_counter; @@ -1013,11 +1013,12 @@ process_drop_punt (vlib_main_t * vm, sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX]; /* Increment drop/punt counters. */ - vlib_increment_simple_counter (cm, cpu_index, sw_if_index0, 1); + vlib_increment_simple_counter (cm, thread_index, sw_if_index0, 1); /* Increment super-interface drop/punt counters for sub-interfaces. */ sw_if0 = vnet_get_sw_interface (vnm, sw_if_index0); - vlib_increment_simple_counter (cm, cpu_index, sw_if0->sup_sw_if_index, + vlib_increment_simple_counter (cm, thread_index, + sw_if0->sup_sw_if_index, sw_if0->sup_sw_if_index != sw_if_index0); if (PREDICT_FALSE (e0 != current_error)) @@ -1041,12 +1042,12 @@ process_drop_punt (vlib_main_t * vm, { vnet_sw_interface_t *si; - vlib_increment_simple_counter (cm, cpu_index, current_sw_if_index, + vlib_increment_simple_counter (cm, thread_index, current_sw_if_index, n_errors_current_sw_if_index); si = vnet_get_sw_interface (vnm, current_sw_if_index); if (si->sup_sw_if_index != current_sw_if_index) - vlib_increment_simple_counter (cm, cpu_index, si->sup_sw_if_index, + vlib_increment_simple_counter (cm, thread_index, si->sup_sw_if_index, n_errors_current_sw_if_index); } diff --git a/src/vnet/ip/ip4_forward.c b/src/vnet/ip/ip4_forward.c index ee1703e7..fdfe7f63 100644 --- a/src/vnet/ip/ip4_forward.c +++ b/src/vnet/ip/ip4_forward.c @@ -75,7 +75,7 @@ ip4_lookup_inline (vlib_main_t * vm, vlib_combined_counter_main_t *cm = &load_balance_main.lbm_to_counters; u32 n_left_from, n_left_to_next, *from, *to_next; ip_lookup_next_t next; - u32 cpu_index = os_get_cpu_number (); + u32 thread_index = vlib_get_thread_index (); from = vlib_frame_vector_args (frame); n_left_from = frame->n_vectors; @@ -292,19 +292,19 @@ ip4_lookup_inline (vlib_main_t * vm, vnet_buffer (p3)->ip.adj_index[VLIB_TX] = dpo3->dpoi_index; vlib_increment_combined_counter - (cm, cpu_index, lb_index0, 1, + (cm, thread_index, lb_index0, 1, vlib_buffer_length_in_chain (vm, p0) + sizeof (ethernet_header_t)); vlib_increment_combined_counter - (cm, cpu_index, lb_index1, 1, + (cm, thread_index, lb_index1, 1, vlib_buffer_length_in_chain (vm, p1) + sizeof (ethernet_header_t)); vlib_increment_combined_counter - (cm, cpu_index, lb_index2, 1, + (cm, thread_index, lb_index2, 1, vlib_buffer_length_in_chain (vm, p2) + sizeof (ethernet_header_t)); vlib_increment_combined_counter - (cm, cpu_index, lb_index3, 1, + (cm, thread_index, lb_index3, 1, vlib_buffer_length_in_chain (vm, p3) + sizeof (ethernet_header_t)); @@ -392,7 +392,7 @@ ip4_lookup_inline (vlib_main_t * vm, vnet_buffer (p0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index; vlib_increment_combined_counter - (cm, cpu_index, lbi0, 1, vlib_buffer_length_in_chain (vm, p0)); + (cm, thread_index, lbi0, 1, vlib_buffer_length_in_chain (vm, p0)); from += 1; to_next += 1; @@ -479,7 +479,7 @@ ip4_load_balance (vlib_main_t * vm, vlib_combined_counter_main_t *cm = &load_balance_main.lbm_via_counters; u32 n_left_from, n_left_to_next, *from, *to_next; ip_lookup_next_t next; - u32 cpu_index = os_get_cpu_number (); + u32 thread_index = vlib_get_thread_index (); from = vlib_frame_vector_args (frame); n_left_from = frame->n_vectors; @@ -584,9 +584,9 @@ ip4_load_balance (vlib_main_t * vm, vnet_buffer (p1)->ip.adj_index[VLIB_TX] = dpo1->dpoi_index; vlib_increment_combined_counter - (cm, cpu_index, lbi0, 1, vlib_buffer_length_in_chain (vm, p0)); + (cm, thread_index, lbi0, 1, vlib_buffer_length_in_chain (vm, p0)); vlib_increment_combined_counter - (cm, cpu_index, lbi1, 1, vlib_buffer_length_in_chain (vm, p1)); + (cm, thread_index, lbi1, 1, vlib_buffer_length_in_chain (vm, p1)); vlib_validate_buffer_enqueue_x2 (vm, node, next, to_next, n_left_to_next, @@ -639,7 +639,7 @@ ip4_load_balance (vlib_main_t * vm, vnet_buffer (p0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index; vlib_increment_combined_counter - (cm, cpu_index, lbi0, 1, vlib_buffer_length_in_chain (vm, p0)); + (cm, thread_index, lbi0, 1, vlib_buffer_length_in_chain (vm, p0)); vlib_validate_buffer_enqueue_x1 (vm, node, next, to_next, n_left_to_next, @@ -2330,7 +2330,7 @@ ip4_rewrite_inline (vlib_main_t * vm, n_left_from = frame->n_vectors; next_index = node->cached_next_index; - u32 cpu_index = os_get_cpu_number (); + u32 thread_index = vlib_get_thread_index (); while (n_left_from > 0) { @@ -2379,9 +2379,9 @@ ip4_rewrite_inline (vlib_main_t * vm, if (do_counters) { vlib_prefetch_combined_counter (&adjacency_counters, - cpu_index, adj_index0); + thread_index, adj_index0); vlib_prefetch_combined_counter (&adjacency_counters, - cpu_index, adj_index1); + thread_index, adj_index1); } ip0 = vlib_buffer_get_current (p0); @@ -2527,13 +2527,13 @@ ip4_rewrite_inline (vlib_main_t * vm, { vlib_increment_combined_counter (&adjacency_counters, - cpu_index, + thread_index, adj_index0, 1, vlib_buffer_length_in_chain (vm, p0) + rw_len0); vlib_increment_combined_counter (&adjacency_counters, - cpu_index, + thread_index, adj_index1, 1, vlib_buffer_length_in_chain (vm, p1) + rw_len1); } @@ -2618,7 +2618,7 @@ ip4_rewrite_inline (vlib_main_t * vm, if (do_counters) vlib_prefetch_combined_counter (&adjacency_counters, - cpu_index, adj_index0); + thread_index, adj_index0); /* Guess we are only writing on simple Ethernet header. */ vnet_rewrite_one_header (adj0[0], ip0, sizeof (ethernet_header_t)); @@ -2637,7 +2637,7 @@ ip4_rewrite_inline (vlib_main_t * vm, if (do_counters) vlib_increment_combined_counter (&adjacency_counters, - cpu_index, adj_index0, 1, + thread_index, adj_index0, 1, vlib_buffer_length_in_chain (vm, p0) + rw_len0); /* Check MTU of outgoing interface. */ diff --git a/src/vnet/ip/ip4_input.c b/src/vnet/ip/ip4_input.c index ba200a9f..3b08f4b0 100644 --- a/src/vnet/ip/ip4_input.c +++ b/src/vnet/ip/ip4_input.c @@ -85,7 +85,7 @@ ip4_input_inline (vlib_main_t * vm, vlib_node_runtime_t *error_node = vlib_node_get_runtime (vm, ip4_input_node.index); vlib_simple_counter_main_t *cm; - u32 cpu_index = os_get_cpu_number (); + u32 thread_index = vlib_get_thread_index (); from = vlib_frame_vector_args (frame); n_left_from = frame->n_vectors; @@ -178,8 +178,8 @@ ip4_input_inline (vlib_main_t * vm, vnet_feature_arc_start (arc0, sw_if_index0, &next0, p0); vnet_feature_arc_start (arc1, sw_if_index1, &next1, p1); - vlib_increment_simple_counter (cm, cpu_index, sw_if_index0, 1); - vlib_increment_simple_counter (cm, cpu_index, sw_if_index1, 1); + vlib_increment_simple_counter (cm, thread_index, sw_if_index0, 1); + vlib_increment_simple_counter (cm, thread_index, sw_if_index1, 1); /* Punt packets with options or wrong version. */ if (PREDICT_FALSE (ip0->ip_version_and_header_length != 0x45)) @@ -299,7 +299,7 @@ ip4_input_inline (vlib_main_t * vm, vnet_buffer (p0)->ip.adj_index[VLIB_RX] = ~0; vnet_feature_arc_start (arc0, sw_if_index0, &next0, p0); - vlib_increment_simple_counter (cm, cpu_index, sw_if_index0, 1); + vlib_increment_simple_counter (cm, thread_index, sw_if_index0, 1); /* Punt packets with options or wrong version. */ if (PREDICT_FALSE (ip0->ip_version_and_header_length != 0x45)) diff --git a/src/vnet/ip/ip6_forward.c b/src/vnet/ip/ip6_forward.c index c120f12c..c2fc4f87 100644 --- a/src/vnet/ip/ip6_forward.c +++ b/src/vnet/ip/ip6_forward.c @@ -74,7 +74,7 @@ ip6_lookup_inline (vlib_main_t * vm, vlib_combined_counter_main_t *cm = &load_balance_main.lbm_to_counters; u32 n_left_from, n_left_to_next, *from, *to_next; ip_lookup_next_t next; - u32 cpu_index = os_get_cpu_number (); + u32 thread_index = vlib_get_thread_index (); from = vlib_frame_vector_args (frame); n_left_from = frame->n_vectors; @@ -185,9 +185,9 @@ ip6_lookup_inline (vlib_main_t * vm, vnet_buffer (p1)->ip.adj_index[VLIB_TX] = dpo1->dpoi_index; vlib_increment_combined_counter - (cm, cpu_index, lbi0, 1, vlib_buffer_length_in_chain (vm, p0)); + (cm, thread_index, lbi0, 1, vlib_buffer_length_in_chain (vm, p0)); vlib_increment_combined_counter - (cm, cpu_index, lbi1, 1, vlib_buffer_length_in_chain (vm, p1)); + (cm, thread_index, lbi1, 1, vlib_buffer_length_in_chain (vm, p1)); from += 2; to_next += 2; @@ -291,7 +291,7 @@ ip6_lookup_inline (vlib_main_t * vm, vnet_buffer (p0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index; vlib_increment_combined_counter - (cm, cpu_index, lbi0, 1, vlib_buffer_length_in_chain (vm, p0)); + (cm, thread_index, lbi0, 1, vlib_buffer_length_in_chain (vm, p0)); from += 1; to_next += 1; @@ -703,7 +703,7 @@ ip6_load_balance (vlib_main_t * vm, vlib_combined_counter_main_t *cm = &load_balance_main.lbm_via_counters; u32 n_left_from, n_left_to_next, *from, *to_next; ip_lookup_next_t next; - u32 cpu_index = os_get_cpu_number (); + u32 thread_index = vlib_get_thread_index (); ip6_main_t *im = &ip6_main; from = vlib_frame_vector_args (frame); @@ -824,9 +824,9 @@ ip6_load_balance (vlib_main_t * vm, vnet_buffer (p1)->ip.adj_index[VLIB_TX] = dpo1->dpoi_index; vlib_increment_combined_counter - (cm, cpu_index, lbi0, 1, vlib_buffer_length_in_chain (vm, p0)); + (cm, thread_index, lbi0, 1, vlib_buffer_length_in_chain (vm, p0)); vlib_increment_combined_counter - (cm, cpu_index, lbi1, 1, vlib_buffer_length_in_chain (vm, p1)); + (cm, thread_index, lbi1, 1, vlib_buffer_length_in_chain (vm, p1)); vlib_validate_buffer_enqueue_x2 (vm, node, next, to_next, n_left_to_next, @@ -886,7 +886,7 @@ ip6_load_balance (vlib_main_t * vm, } vlib_increment_combined_counter - (cm, cpu_index, lbi0, 1, vlib_buffer_length_in_chain (vm, p0)); + (cm, thread_index, lbi0, 1, vlib_buffer_length_in_chain (vm, p0)); vlib_validate_buffer_enqueue_x1 (vm, node, next, to_next, n_left_to_next, @@ -1897,7 +1897,7 @@ ip6_rewrite_inline (vlib_main_t * vm, n_left_from = frame->n_vectors; next_index = node->cached_next_index; - u32 cpu_index = os_get_cpu_number (); + u32 thread_index = vlib_get_thread_index (); while (n_left_from > 0) { @@ -2019,11 +2019,11 @@ ip6_rewrite_inline (vlib_main_t * vm, { vlib_increment_combined_counter (&adjacency_counters, - cpu_index, adj_index0, 1, + thread_index, adj_index0, 1, vlib_buffer_length_in_chain (vm, p0) + rw_len0); vlib_increment_combined_counter (&adjacency_counters, - cpu_index, adj_index1, 1, + thread_index, adj_index1, 1, vlib_buffer_length_in_chain (vm, p1) + rw_len1); } @@ -2156,7 +2156,7 @@ ip6_rewrite_inline (vlib_main_t * vm, { vlib_increment_combined_counter (&adjacency_counters, - cpu_index, adj_index0, 1, + thread_index, adj_index0, 1, vlib_buffer_length_in_chain (vm, p0) + rw_len0); } diff --git a/src/vnet/ip/ip6_input.c b/src/vnet/ip/ip6_input.c index 20306088..ffdc4727 100644 --- a/src/vnet/ip/ip6_input.c +++ b/src/vnet/ip/ip6_input.c @@ -82,7 +82,7 @@ ip6_input (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) vlib_node_runtime_t *error_node = vlib_node_get_runtime (vm, ip6_input_node.index); vlib_simple_counter_main_t *cm; - u32 cpu_index = os_get_cpu_number (); + u32 thread_index = vlib_get_thread_index (); from = vlib_frame_vector_args (frame); n_left_from = frame->n_vectors; @@ -171,8 +171,8 @@ ip6_input (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) vnet_feature_arc_start (arc0, sw_if_index0, &next0, p0); vnet_feature_arc_start (arc1, sw_if_index1, &next1, p1); - vlib_increment_simple_counter (cm, cpu_index, sw_if_index0, 1); - vlib_increment_simple_counter (cm, cpu_index, sw_if_index1, 1); + vlib_increment_simple_counter (cm, thread_index, sw_if_index0, 1); + vlib_increment_simple_counter (cm, thread_index, sw_if_index1, 1); error0 = error1 = IP6_ERROR_NONE; @@ -270,7 +270,7 @@ ip6_input (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) vnet_buffer (p0)->ip.adj_index[VLIB_RX] = ~0; vnet_feature_arc_start (arc0, sw_if_index0, &next0, p0); - vlib_increment_simple_counter (cm, cpu_index, sw_if_index0, 1); + vlib_increment_simple_counter (cm, thread_index, sw_if_index0, 1); error0 = IP6_ERROR_NONE; /* Version != 6? Drop it. */ diff --git a/src/vnet/ip/ip6_neighbor.c b/src/vnet/ip/ip6_neighbor.c index 5d1fb6f8..2af546df 100644 --- a/src/vnet/ip/ip6_neighbor.c +++ b/src/vnet/ip/ip6_neighbor.c @@ -581,7 +581,7 @@ vnet_set_ip6_ethernet_neighbor (vlib_main_t * vm, u32 next_index; pending_resolution_t *pr, *mc; - if (os_get_cpu_number ()) + if (vlib_get_thread_index ()) { set_unset_ip6_neighbor_rpc (vm, sw_if_index, a, link_layer_address, 1 /* set new neighbor */ , is_static, @@ -722,7 +722,7 @@ vnet_unset_ip6_ethernet_neighbor (vlib_main_t * vm, uword *p; int rv = 0; - if (os_get_cpu_number ()) + if (vlib_get_thread_index ()) { set_unset_ip6_neighbor_rpc (vm, sw_if_index, a, link_layer_address, 0 /* unset */ , 0, 0); diff --git a/src/vnet/ipsec/esp.h b/src/vnet/ipsec/esp.h index 50cac806..799003b9 100644 --- a/src/vnet/ipsec/esp.h +++ b/src/vnet/ipsec/esp.h @@ -282,8 +282,8 @@ hmac_calc (ipsec_integ_alg_t alg, u8 * data, int data_len, u8 * signature, u8 use_esn, u32 seq_hi) { esp_main_t *em = &esp_main; - u32 cpu_index = os_get_cpu_number (); - HMAC_CTX *ctx = &(em->per_thread_data[cpu_index].hmac_ctx); + u32 thread_index = vlib_get_thread_index (); + HMAC_CTX *ctx = &(em->per_thread_data[thread_index].hmac_ctx); const EVP_MD *md = NULL; unsigned int len; @@ -292,10 +292,10 @@ hmac_calc (ipsec_integ_alg_t alg, if (PREDICT_FALSE (em->esp_integ_algs[alg].md == 0)) return 0; - if (PREDICT_FALSE (alg != em->per_thread_data[cpu_index].last_integ_alg)) + if (PREDICT_FALSE (alg != em->per_thread_data[thread_index].last_integ_alg)) { md = em->esp_integ_algs[alg].md; - em->per_thread_data[cpu_index].last_integ_alg = alg; + em->per_thread_data[thread_index].last_integ_alg = alg; } HMAC_Init (ctx, key, key_len, md); diff --git a/src/vnet/ipsec/esp_decrypt.c b/src/vnet/ipsec/esp_decrypt.c index 7289b260..925d2b45 100644 --- a/src/vnet/ipsec/esp_decrypt.c +++ b/src/vnet/ipsec/esp_decrypt.c @@ -85,8 +85,8 @@ esp_decrypt_aes_cbc (ipsec_crypto_alg_t alg, u8 * in, u8 * out, size_t in_len, u8 * key, u8 * iv) { esp_main_t *em = &esp_main; - u32 cpu_index = os_get_cpu_number (); - EVP_CIPHER_CTX *ctx = &(em->per_thread_data[cpu_index].decrypt_ctx); + u32 thread_index = vlib_get_thread_index (); + EVP_CIPHER_CTX *ctx = &(em->per_thread_data[thread_index].decrypt_ctx); const EVP_CIPHER *cipher = NULL; int out_len; @@ -95,10 +95,11 @@ esp_decrypt_aes_cbc (ipsec_crypto_alg_t alg, if (PREDICT_FALSE (em->esp_crypto_algs[alg].type == 0)) return; - if (PREDICT_FALSE (alg != em->per_thread_data[cpu_index].last_decrypt_alg)) + if (PREDICT_FALSE + (alg != em->per_thread_data[thread_index].last_decrypt_alg)) { cipher = em->esp_crypto_algs[alg].type; - em->per_thread_data[cpu_index].last_decrypt_alg = alg; + em->per_thread_data[thread_index].last_decrypt_alg = alg; } EVP_DecryptInit_ex (ctx, cipher, NULL, key, iv); @@ -117,11 +118,11 @@ esp_decrypt_node_fn (vlib_main_t * vm, u32 *recycle = 0; from = vlib_frame_vector_args (from_frame); n_left_from = from_frame->n_vectors; - u32 cpu_index = os_get_cpu_number (); + u32 thread_index = vlib_get_thread_index (); ipsec_alloc_empty_buffers (vm, im); - u32 *empty_buffers = im->empty_buffers[cpu_index]; + u32 *empty_buffers = im->empty_buffers[thread_index]; if (PREDICT_FALSE (vec_len (empty_buffers) < n_left_from)) { diff --git a/src/vnet/ipsec/esp_encrypt.c b/src/vnet/ipsec/esp_encrypt.c index 44ae2297..b2bc4e0b 100644 --- a/src/vnet/ipsec/esp_encrypt.c +++ b/src/vnet/ipsec/esp_encrypt.c @@ -88,8 +88,8 @@ esp_encrypt_aes_cbc (ipsec_crypto_alg_t alg, u8 * in, u8 * out, size_t in_len, u8 * key, u8 * iv) { esp_main_t *em = &esp_main; - u32 cpu_index = os_get_cpu_number (); - EVP_CIPHER_CTX *ctx = &(em->per_thread_data[cpu_index].encrypt_ctx); + u32 thread_index = vlib_get_thread_index (); + EVP_CIPHER_CTX *ctx = &(em->per_thread_data[thread_index].encrypt_ctx); const EVP_CIPHER *cipher = NULL; int out_len; @@ -98,10 +98,11 @@ esp_encrypt_aes_cbc (ipsec_crypto_alg_t alg, if (PREDICT_FALSE (em->esp_crypto_algs[alg].type == IPSEC_CRYPTO_ALG_NONE)) return; - if (PREDICT_FALSE (alg != em->per_thread_data[cpu_index].last_encrypt_alg)) + if (PREDICT_FALSE + (alg != em->per_thread_data[thread_index].last_encrypt_alg)) { cipher = em->esp_crypto_algs[alg].type; - em->per_thread_data[cpu_index].last_encrypt_alg = alg; + em->per_thread_data[thread_index].last_encrypt_alg = alg; } EVP_EncryptInit_ex (ctx, cipher, NULL, key, iv); @@ -119,11 +120,11 @@ esp_encrypt_node_fn (vlib_main_t * vm, n_left_from = from_frame->n_vectors; ipsec_main_t *im = &ipsec_main; u32 *recycle = 0; - u32 cpu_index = os_get_cpu_number (); + u32 thread_index = vlib_get_thread_index (); ipsec_alloc_empty_buffers (vm, im); - u32 *empty_buffers = im->empty_buffers[cpu_index]; + u32 *empty_buffers = im->empty_buffers[thread_index]; if (PREDICT_FALSE (vec_len (empty_buffers) < n_left_from)) { diff --git a/src/vnet/ipsec/ikev2.c b/src/vnet/ipsec/ikev2.c index 2c1074d8..3f9978a7 100644 --- a/src/vnet/ipsec/ikev2.c +++ b/src/vnet/ipsec/ikev2.c @@ -303,16 +303,16 @@ static void ikev2_delete_sa (ikev2_sa_t * sa) { ikev2_main_t *km = &ikev2_main; - u32 cpu_index = os_get_cpu_number (); + u32 thread_index = vlib_get_thread_index (); uword *p; ikev2_sa_free_all_vec (sa); - p = hash_get (km->per_thread_data[cpu_index].sa_by_rspi, sa->rspi); + p = hash_get (km->per_thread_data[thread_index].sa_by_rspi, sa->rspi); if (p) { - hash_unset (km->per_thread_data[cpu_index].sa_by_rspi, sa->rspi); - pool_put (km->per_thread_data[cpu_index].sas, sa); + hash_unset (km->per_thread_data[thread_index].sa_by_rspi, sa->rspi); + pool_put (km->per_thread_data[thread_index].sas, sa); } } @@ -776,29 +776,31 @@ ikev2_initial_contact_cleanup (ikev2_sa_t * sa) ikev2_sa_t *tmp; u32 i, *delete = 0; ikev2_child_sa_t *c; - u32 cpu_index = os_get_cpu_number (); + u32 thread_index = vlib_get_thread_index (); if (!sa->initial_contact) return; /* find old IKE SAs with the same authenticated identity */ /* *INDENT-OFF* */ - pool_foreach (tmp, km->per_thread_data[cpu_index].sas, ({ + pool_foreach (tmp, km->per_thread_data[thread_index].sas, ({ if (tmp->i_id.type != sa->i_id.type || vec_len(tmp->i_id.data) != vec_len(sa->i_id.data) || memcmp(sa->i_id.data, tmp->i_id.data, vec_len(sa->i_id.data))) continue; if (sa->rspi != tmp->rspi) - vec_add1(delete, tmp - km->per_thread_data[cpu_index].sas); + vec_add1(delete, tmp - km->per_thread_data[thread_index].sas); })); /* *INDENT-ON* */ for (i = 0; i < vec_len (delete); i++) { - tmp = pool_elt_at_index (km->per_thread_data[cpu_index].sas, delete[i]); - vec_foreach (c, tmp->childs) - ikev2_delete_tunnel_interface (km->vnet_main, tmp, c); + tmp = + pool_elt_at_index (km->per_thread_data[thread_index].sas, delete[i]); + vec_foreach (c, + tmp->childs) ikev2_delete_tunnel_interface (km->vnet_main, + tmp, c); ikev2_delete_sa (tmp); } @@ -1922,10 +1924,10 @@ ikev2_retransmit_sa_init (ike_header_t * ike, { ikev2_main_t *km = &ikev2_main; ikev2_sa_t *sa; - u32 cpu_index = os_get_cpu_number (); + u32 thread_index = vlib_get_thread_index (); /* *INDENT-OFF* */ - pool_foreach (sa, km->per_thread_data[cpu_index].sas, ({ + pool_foreach (sa, km->per_thread_data[thread_index].sas, ({ if (sa->ispi == clib_net_to_host_u64(ike->ispi) && sa->iaddr.as_u32 == iaddr.as_u32 && sa->raddr.as_u32 == raddr.as_u32) @@ -2036,7 +2038,7 @@ ikev2_node_fn (vlib_main_t * vm, u32 n_left_from, *from, *to_next; ikev2_next_t next_index; ikev2_main_t *km = &ikev2_main; - u32 cpu_index = os_get_cpu_number (); + u32 thread_index = vlib_get_thread_index (); from = vlib_frame_vector_args (frame); n_left_from = frame->n_vectors; @@ -2134,11 +2136,14 @@ ikev2_node_fn (vlib_main_t * vm, if (sa0->state == IKEV2_STATE_SA_INIT) { /* add SA to the pool */ - pool_get (km->per_thread_data[cpu_index].sas, sa0); + pool_get (km->per_thread_data[thread_index].sas, + sa0); clib_memcpy (sa0, &sa, sizeof (*sa0)); - hash_set (km->per_thread_data[cpu_index].sa_by_rspi, + hash_set (km-> + per_thread_data[thread_index].sa_by_rspi, sa0->rspi, - sa0 - km->per_thread_data[cpu_index].sas); + sa0 - + km->per_thread_data[thread_index].sas); } else { @@ -2169,11 +2174,11 @@ ikev2_node_fn (vlib_main_t * vm, if (sa0->state == IKEV2_STATE_SA_INIT) { /* add SA to the pool */ - pool_get (km->per_thread_data[cpu_index].sas, sa0); + pool_get (km->per_thread_data[thread_index].sas, sa0); clib_memcpy (sa0, &sa, sizeof (*sa0)); - hash_set (km->per_thread_data[cpu_index].sa_by_rspi, + hash_set (km->per_thread_data[thread_index].sa_by_rspi, sa0->rspi, - sa0 - km->per_thread_data[cpu_index].sas); + sa0 - km->per_thread_data[thread_index].sas); } else { @@ -2184,12 +2189,13 @@ ikev2_node_fn (vlib_main_t * vm, else if (ike0->exchange == IKEV2_EXCHANGE_IKE_AUTH) { uword *p; - p = hash_get (km->per_thread_data[cpu_index].sa_by_rspi, + p = hash_get (km->per_thread_data[thread_index].sa_by_rspi, clib_net_to_host_u64 (ike0->rspi)); if (p) { - sa0 = pool_elt_at_index (km->per_thread_data[cpu_index].sas, - p[0]); + sa0 = + pool_elt_at_index (km->per_thread_data[thread_index].sas, + p[0]); r = ikev2_retransmit_resp (sa0, ike0); if (r == 1) @@ -2240,12 +2246,13 @@ ikev2_node_fn (vlib_main_t * vm, else if (ike0->exchange == IKEV2_EXCHANGE_INFORMATIONAL) { uword *p; - p = hash_get (km->per_thread_data[cpu_index].sa_by_rspi, + p = hash_get (km->per_thread_data[thread_index].sa_by_rspi, clib_net_to_host_u64 (ike0->rspi)); if (p) { - sa0 = pool_elt_at_index (km->per_thread_data[cpu_index].sas, - p[0]); + sa0 = + pool_elt_at_index (km->per_thread_data[thread_index].sas, + p[0]); r = ikev2_retransmit_resp (sa0, ike0); if (r == 1) @@ -2305,12 +2312,13 @@ ikev2_node_fn (vlib_main_t * vm, else if (ike0->exchange == IKEV2_EXCHANGE_CREATE_CHILD_SA) { uword *p; - p = hash_get (km->per_thread_data[cpu_index].sa_by_rspi, + p = hash_get (km->per_thread_data[thread_index].sa_by_rspi, clib_net_to_host_u64 (ike0->rspi)); if (p) { - sa0 = pool_elt_at_index (km->per_thread_data[cpu_index].sas, - p[0]); + sa0 = + pool_elt_at_index (km->per_thread_data[thread_index].sas, + p[0]); r = ikev2_retransmit_resp (sa0, ike0); if (r == 1) diff --git a/src/vnet/ipsec/ipsec.h b/src/vnet/ipsec/ipsec.h index 58f0f145..c884e360 100644 --- a/src/vnet/ipsec/ipsec.h +++ b/src/vnet/ipsec/ipsec.h @@ -324,21 +324,21 @@ int ipsec_set_interface_key (vnet_main_t * vnm, u32 hw_if_index, always_inline void ipsec_alloc_empty_buffers (vlib_main_t * vm, ipsec_main_t * im) { - u32 cpu_index = os_get_cpu_number (); - uword l = vec_len (im->empty_buffers[cpu_index]); + u32 thread_index = vlib_get_thread_index (); + uword l = vec_len (im->empty_buffers[thread_index]); uword n_alloc = 0; if (PREDICT_FALSE (l < VLIB_FRAME_SIZE)) { - if (!im->empty_buffers[cpu_index]) + if (!im->empty_buffers[thread_index]) { - vec_alloc (im->empty_buffers[cpu_index], 2 * VLIB_FRAME_SIZE); + vec_alloc (im->empty_buffers[thread_index], 2 * VLIB_FRAME_SIZE); } - n_alloc = vlib_buffer_alloc (vm, im->empty_buffers[cpu_index] + l, + n_alloc = vlib_buffer_alloc (vm, im->empty_buffers[thread_index] + l, 2 * VLIB_FRAME_SIZE - l); - _vec_len (im->empty_buffers[cpu_index]) = l + n_alloc; + _vec_len (im->empty_buffers[thread_index]) = l + n_alloc; } } diff --git a/src/vnet/ipsec/ipsec_if.c b/src/vnet/ipsec/ipsec_if.c index dc882004..ed124894 100644 --- a/src/vnet/ipsec/ipsec_if.c +++ b/src/vnet/ipsec/ipsec_if.c @@ -99,7 +99,7 @@ static int ipsec_add_del_tunnel_if_rpc_callback (ipsec_add_del_tunnel_args_t * a) { vnet_main_t *vnm = vnet_get_main (); - ASSERT (os_get_cpu_number () == 0); + ASSERT (vlib_get_thread_index () == 0); return ipsec_add_del_tunnel_if_internal (vnm, a); } diff --git a/src/vnet/l2/l2_bvi.h b/src/vnet/l2/l2_bvi.h index dd1130a6..e21a1616 100644 --- a/src/vnet/l2/l2_bvi.h +++ b/src/vnet/l2/l2_bvi.h @@ -97,7 +97,7 @@ l2_to_bvi (vlib_main_t * vlib_main, vlib_increment_combined_counter (vnet_main->interface_main.combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX, - vlib_main->cpu_index, + vlib_main->thread_index, vnet_buffer (b0)->sw_if_index[VLIB_RX], 1, vlib_buffer_length_in_chain (vlib_main, b0)); return TO_BVI_ERR_OK; diff --git a/src/vnet/l2/l2_input.c b/src/vnet/l2/l2_input.c index 041ff38d..e5d6878a 100644 --- a/src/vnet/l2/l2_input.c +++ b/src/vnet/l2/l2_input.c @@ -117,7 +117,7 @@ typedef enum static_always_inline void classify_and_dispatch (vlib_main_t * vm, vlib_node_runtime_t * node, - u32 cpu_index, + u32 thread_index, l2input_main_t * msm, vlib_buffer_t * b0, u32 * next0) { /* @@ -237,7 +237,7 @@ l2input_node_inline (vlib_main_t * vm, u32 n_left_from, *from, *to_next; l2input_next_t next_index; l2input_main_t *msm = &l2input_main; - u32 cpu_index = os_get_cpu_number (); + u32 thread_index = vlib_get_thread_index (); from = vlib_frame_vector_args (frame); n_left_from = frame->n_vectors; /* number of packets to process */ @@ -350,10 +350,10 @@ l2input_node_inline (vlib_main_t * vm, vlib_node_increment_counter (vm, l2input_node.index, L2INPUT_ERROR_L2INPUT, 4); - classify_and_dispatch (vm, node, cpu_index, msm, b0, &next0); - classify_and_dispatch (vm, node, cpu_index, msm, b1, &next1); - classify_and_dispatch (vm, node, cpu_index, msm, b2, &next2); - classify_and_dispatch (vm, node, cpu_index, msm, b3, &next3); + classify_and_dispatch (vm, node, thread_index, msm, b0, &next0); + classify_and_dispatch (vm, node, thread_index, msm, b1, &next1); + classify_and_dispatch (vm, node, thread_index, msm, b2, &next2); + classify_and_dispatch (vm, node, thread_index, msm, b3, &next3); /* verify speculative enqueues, maybe switch current next frame */ /* if next0==next1==next_index then nothing special needs to be done */ @@ -393,7 +393,7 @@ l2input_node_inline (vlib_main_t * vm, vlib_node_increment_counter (vm, l2input_node.index, L2INPUT_ERROR_L2INPUT, 1); - classify_and_dispatch (vm, node, cpu_index, msm, b0, &next0); + classify_and_dispatch (vm, node, thread_index, msm, b0, &next0); /* verify speculative enqueue, maybe switch current next frame */ vlib_validate_buffer_enqueue_x1 (vm, node, next_index, diff --git a/src/vnet/l2/l2_output.c b/src/vnet/l2/l2_output.c index 00f22571..e17b2a16 100644 --- a/src/vnet/l2/l2_output.c +++ b/src/vnet/l2/l2_output.c @@ -643,11 +643,11 @@ l2output_create_output_node_mapping (vlib_main_t * vlib_main, vnet_main_t * vnet hw0 = vnet_get_sup_hw_interface (vnet_main, sw_if_index); - uword cpu_number; + uword thread_index; - cpu_number = os_get_cpu_number (); + thread_index = vlib_get_thread_index (); - if (cpu_number) + if (thread_index) { u32 oldflags; diff --git a/src/vnet/l2tp/decap.c b/src/vnet/l2tp/decap.c index e8986935..46104129 100644 --- a/src/vnet/l2tp/decap.c +++ b/src/vnet/l2tp/decap.c @@ -149,7 +149,7 @@ last_stage (vlib_main_t * vm, vlib_node_runtime_t * node, u32 bi) /* per-mapping byte stats include the ethernet header */ vlib_increment_combined_counter (&lm->counter_main, - os_get_cpu_number (), + vlib_get_thread_index (), counter_index, 1 /* packet_increment */ , vlib_buffer_length_in_chain (vm, b) + sizeof (ethernet_header_t)); diff --git a/src/vnet/l2tp/encap.c b/src/vnet/l2tp/encap.c index ed7a9580..dcdfde4b 100644 --- a/src/vnet/l2tp/encap.c +++ b/src/vnet/l2tp/encap.c @@ -124,7 +124,7 @@ last_stage (vlib_main_t * vm, vlib_node_runtime_t * node, u32 bi) /* per-mapping byte stats include the ethernet header */ vlib_increment_combined_counter (&lm->counter_main, - os_get_cpu_number (), + vlib_get_thread_index (), counter_index, 1 /* packet_increment */ , vlib_buffer_length_in_chain (vm, b)); diff --git a/src/vnet/l2tp/l2tp.c b/src/vnet/l2tp/l2tp.c index cb94d7e7..3dedc447 100644 --- a/src/vnet/l2tp/l2tp.c +++ b/src/vnet/l2tp/l2tp.c @@ -157,7 +157,7 @@ test_counters_command_fn (vlib_main_t * vm, u32 session_index; u32 counter_index; u32 nincr = 0; - u32 cpu_index = os_get_cpu_number (); + u32 thread_index = vlib_get_thread_index (); /* *INDENT-OFF* */ pool_foreach (session, lm->sessions, @@ -167,11 +167,11 @@ test_counters_command_fn (vlib_main_t * vm, session_index_to_counter_index (session_index, SESSION_COUNTER_USER_TO_NETWORK); vlib_increment_combined_counter (&lm->counter_main, - cpu_index, + thread_index, counter_index, 1/*pkt*/, 1111 /*bytes*/); vlib_increment_combined_counter (&lm->counter_main, - cpu_index, + thread_index, counter_index+1, 1/*pkt*/, 2222 /*bytes*/); nincr++; diff --git a/src/vnet/lisp-gpe/decap.c b/src/vnet/lisp-gpe/decap.c index d887a95f..68769710 100644 --- a/src/vnet/lisp-gpe/decap.c +++ b/src/vnet/lisp-gpe/decap.c @@ -103,7 +103,7 @@ next_index_to_iface (lisp_gpe_main_t * lgm, u32 next_index) } static_always_inline void -incr_decap_stats (vnet_main_t * vnm, u32 cpu_index, u32 length, +incr_decap_stats (vnet_main_t * vnm, u32 thread_index, u32 length, u32 sw_if_index, u32 * last_sw_if_index, u32 * n_packets, u32 * n_bytes) { @@ -122,7 +122,7 @@ incr_decap_stats (vnet_main_t * vnm, u32 cpu_index, u32 length, vlib_increment_combined_counter (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX, - cpu_index, *last_sw_if_index, + thread_index, *last_sw_if_index, *n_packets, *n_bytes); } *last_sw_if_index = sw_if_index; @@ -150,11 +150,11 @@ static uword lisp_gpe_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * from_frame, u8 is_v4) { - u32 n_left_from, next_index, *from, *to_next, cpu_index; + u32 n_left_from, next_index, *from, *to_next, thread_index; u32 n_bytes = 0, n_packets = 0, last_sw_if_index = ~0, drops = 0; lisp_gpe_main_t *lgm = vnet_lisp_gpe_get_main (); - cpu_index = os_get_cpu_number (); + thread_index = vlib_get_thread_index (); from = vlib_frame_vector_args (from_frame); n_left_from = from_frame->n_vectors; @@ -267,7 +267,7 @@ lisp_gpe_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node, if (si0) { - incr_decap_stats (lgm->vnet_main, cpu_index, + incr_decap_stats (lgm->vnet_main, thread_index, vlib_buffer_length_in_chain (vm, b0), si0[0], &last_sw_if_index, &n_packets, &n_bytes); vnet_buffer (b0)->sw_if_index[VLIB_RX] = si0[0]; @@ -282,7 +282,7 @@ lisp_gpe_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node, if (si1) { - incr_decap_stats (lgm->vnet_main, cpu_index, + incr_decap_stats (lgm->vnet_main, thread_index, vlib_buffer_length_in_chain (vm, b1), si1[0], &last_sw_if_index, &n_packets, &n_bytes); vnet_buffer (b1)->sw_if_index[VLIB_RX] = si1[0]; @@ -397,7 +397,7 @@ lisp_gpe_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node, if (si0) { - incr_decap_stats (lgm->vnet_main, cpu_index, + incr_decap_stats (lgm->vnet_main, thread_index, vlib_buffer_length_in_chain (vm, b0), si0[0], &last_sw_if_index, &n_packets, &n_bytes); vnet_buffer (b0)->sw_if_index[VLIB_RX] = si0[0]; @@ -430,7 +430,7 @@ lisp_gpe_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node, } /* flush iface stats */ - incr_decap_stats (lgm->vnet_main, cpu_index, 0, ~0, &last_sw_if_index, + incr_decap_stats (lgm->vnet_main, thread_index, 0, ~0, &last_sw_if_index, &n_packets, &n_bytes); vlib_node_increment_counter (vm, lisp_gpe_ip4_input_node.index, LISP_GPE_ERROR_NO_TUNNEL, drops); diff --git a/src/vnet/lldp/lldp_input.c b/src/vnet/lldp/lldp_input.c index 762743d0..e88f6fdb 100644 --- a/src/vnet/lldp/lldp_input.c +++ b/src/vnet/lldp/lldp_input.c @@ -35,7 +35,7 @@ typedef struct static void lldp_rpc_update_peer_cb (const lldp_intf_update_t * a) { - ASSERT (os_get_cpu_number () == 0); + ASSERT (vlib_get_thread_index () == 0); lldp_intf_t *n = lldp_get_intf (&lldp_main, a->hw_if_index); if (!n) diff --git a/src/vnet/map/ip4_map.c b/src/vnet/map/ip4_map.c index 1a20d704..e39b6f14 100644 --- a/src/vnet/map/ip4_map.c +++ b/src/vnet/map/ip4_map.c @@ -248,7 +248,7 @@ ip4_map (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) next_index = node->cached_next_index; map_main_t *mm = &map_main; vlib_combined_counter_main_t *cm = mm->domain_counters; - u32 cpu_index = os_get_cpu_number (); + u32 thread_index = vlib_get_thread_index (); while (n_left_from > 0) { @@ -377,7 +377,7 @@ ip4_map (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) ip40) ? IP4_MAP_NEXT_IP6_REWRITE : next0; vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_TX, - cpu_index, + thread_index, map_domain_index0, 1, clib_net_to_host_u16 (ip6h0->payload_length) + @@ -409,7 +409,7 @@ ip4_map (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) ip41) ? IP4_MAP_NEXT_IP6_REWRITE : next1; vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_TX, - cpu_index, + thread_index, map_domain_index1, 1, clib_net_to_host_u16 (ip6h1->payload_length) + @@ -520,7 +520,7 @@ ip4_map (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) ip40) ? IP4_MAP_NEXT_IP6_REWRITE : next0; vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_TX, - cpu_index, + thread_index, map_domain_index0, 1, clib_net_to_host_u16 (ip6h0->payload_length) + @@ -564,7 +564,7 @@ ip4_map_reass (vlib_main_t * vm, next_index = node->cached_next_index; map_main_t *mm = &map_main; vlib_combined_counter_main_t *cm = mm->domain_counters; - u32 cpu_index = os_get_cpu_number (); + u32 thread_index = vlib_get_thread_index (); u32 *fragments_to_drop = NULL; u32 *fragments_to_loopback = NULL; @@ -694,8 +694,8 @@ ip4_map_reass (vlib_main_t * vm, { if (error0 == MAP_ERROR_NONE) vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_TX, - cpu_index, map_domain_index0, - 1, + thread_index, + map_domain_index0, 1, clib_net_to_host_u16 (ip60->payload_length) + 40); next0 = diff --git a/src/vnet/map/ip4_map_t.c b/src/vnet/map/ip4_map_t.c index b63d76bf..5f2bcbf9 100644 --- a/src/vnet/map/ip4_map_t.c +++ b/src/vnet/map/ip4_map_t.c @@ -477,7 +477,7 @@ ip4_map_t_icmp (vlib_main_t * vm, n_left_from = frame->n_vectors; next_index = node->cached_next_index; vlib_combined_counter_main_t *cm = map_main.domain_counters; - u32 cpu_index = os_get_cpu_number (); + u32 thread_index = vlib_get_thread_index (); while (n_left_from > 0) { @@ -520,7 +520,7 @@ ip4_map_t_icmp (vlib_main_t * vm, if (PREDICT_TRUE (error0 == MAP_ERROR_NONE)) { vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_TX, - cpu_index, + thread_index, vnet_buffer (p0)->map_t. map_domain_index, 1, len0); } @@ -1051,7 +1051,7 @@ ip4_map_t (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) n_left_from = frame->n_vectors; next_index = node->cached_next_index; vlib_combined_counter_main_t *cm = map_main.domain_counters; - u32 cpu_index = os_get_cpu_number (); + u32 thread_index = vlib_get_thread_index (); while (n_left_from > 0) { @@ -1158,7 +1158,7 @@ ip4_map_t (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) (error0 == MAP_ERROR_NONE && next0 != IP4_MAPT_NEXT_MAPT_ICMP)) { vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_TX, - cpu_index, + thread_index, vnet_buffer (p0)->map_t. map_domain_index, 1, clib_net_to_host_u16 (ip40-> @@ -1169,7 +1169,7 @@ ip4_map_t (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) (error1 == MAP_ERROR_NONE && next1 != IP4_MAPT_NEXT_MAPT_ICMP)) { vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_TX, - cpu_index, + thread_index, vnet_buffer (p1)->map_t. map_domain_index, 1, clib_net_to_host_u16 (ip41-> @@ -1252,7 +1252,7 @@ ip4_map_t (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) (error0 == MAP_ERROR_NONE && next0 != IP4_MAPT_NEXT_MAPT_ICMP)) { vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_TX, - cpu_index, + thread_index, vnet_buffer (p0)->map_t. map_domain_index, 1, clib_net_to_host_u16 (ip40-> diff --git a/src/vnet/map/ip6_map.c b/src/vnet/map/ip6_map.c index f7eb768f..63ada962 100644 --- a/src/vnet/map/ip6_map.c +++ b/src/vnet/map/ip6_map.c @@ -172,7 +172,7 @@ ip6_map (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) vlib_node_get_runtime (vm, ip6_map_node.index); map_main_t *mm = &map_main; vlib_combined_counter_main_t *cm = mm->domain_counters; - u32 cpu_index = os_get_cpu_number (); + u32 thread_index = vlib_get_thread_index (); from = vlib_frame_vector_args (frame); n_left_from = frame->n_vectors; @@ -319,7 +319,7 @@ ip6_map (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) IP6_MAP_NEXT_IP4_REWRITE : next0; } vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_RX, - cpu_index, + thread_index, map_domain_index0, 1, clib_net_to_host_u16 (ip40->length)); @@ -352,7 +352,7 @@ ip6_map (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) IP6_MAP_NEXT_IP4_REWRITE : next1; } vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_RX, - cpu_index, + thread_index, map_domain_index1, 1, clib_net_to_host_u16 (ip41->length)); @@ -505,7 +505,7 @@ ip6_map (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) IP6_MAP_NEXT_IP4_REWRITE : next0; } vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_RX, - cpu_index, + thread_index, map_domain_index0, 1, clib_net_to_host_u16 (ip40->length)); @@ -820,7 +820,7 @@ ip6_map_ip4_reass (vlib_main_t * vm, vlib_node_get_runtime (vm, ip6_map_ip4_reass_node.index); map_main_t *mm = &map_main; vlib_combined_counter_main_t *cm = mm->domain_counters; - u32 cpu_index = os_get_cpu_number (); + u32 thread_index = vlib_get_thread_index (); u32 *fragments_to_drop = NULL; u32 *fragments_to_loopback = NULL; @@ -958,8 +958,8 @@ ip6_map_ip4_reass (vlib_main_t * vm, { if (error0 == MAP_ERROR_NONE) vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_RX, - cpu_index, map_domain_index0, - 1, + thread_index, + map_domain_index0, 1, clib_net_to_host_u16 (ip40->length)); next0 = @@ -1015,7 +1015,7 @@ ip6_map_icmp_relay (vlib_main_t * vm, vlib_node_runtime_t *error_node = vlib_node_get_runtime (vm, ip6_map_icmp_relay_node.index); map_main_t *mm = &map_main; - u32 cpu_index = os_get_cpu_number (); + u32 thread_index = vlib_get_thread_index (); u16 *fragment_ids, *fid; from = vlib_frame_vector_args (frame); @@ -1143,7 +1143,8 @@ ip6_map_icmp_relay (vlib_main_t * vm, ip_csum_t sum = ip_incremental_checksum (0, new_icmp40, nlen - 20); new_icmp40->checksum = ~ip_csum_fold (sum); - vlib_increment_simple_counter (&mm->icmp_relayed, cpu_index, 0, 1); + vlib_increment_simple_counter (&mm->icmp_relayed, thread_index, 0, + 1); error: if (PREDICT_FALSE (p0->flags & VLIB_BUFFER_IS_TRACED)) diff --git a/src/vnet/map/ip6_map_t.c b/src/vnet/map/ip6_map_t.c index eb3996c2..99151678 100644 --- a/src/vnet/map/ip6_map_t.c +++ b/src/vnet/map/ip6_map_t.c @@ -448,7 +448,7 @@ ip6_map_t_icmp (vlib_main_t * vm, n_left_from = frame->n_vectors; next_index = node->cached_next_index; vlib_combined_counter_main_t *cm = map_main.domain_counters; - u32 cpu_index = os_get_cpu_number (); + u32 thread_index = vlib_get_thread_index (); while (n_left_from > 0) { @@ -493,7 +493,7 @@ ip6_map_t_icmp (vlib_main_t * vm, if (PREDICT_TRUE (error0 == MAP_ERROR_NONE)) { vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_RX, - cpu_index, + thread_index, vnet_buffer (p0)-> map_t.map_domain_index, 1, len0); @@ -1051,7 +1051,7 @@ ip6_map_t (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) vlib_node_runtime_t *error_node = vlib_node_get_runtime (vm, ip6_map_t_node.index); vlib_combined_counter_main_t *cm = map_main.domain_counters; - u32 cpu_index = os_get_cpu_number (); + u32 thread_index = vlib_get_thread_index (); from = vlib_frame_vector_args (frame); n_left_from = frame->n_vectors; @@ -1218,7 +1218,7 @@ ip6_map_t (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) (error0 == MAP_ERROR_NONE && next0 != IP6_MAPT_NEXT_MAPT_ICMP)) { vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_RX, - cpu_index, + thread_index, vnet_buffer (p0)-> map_t.map_domain_index, 1, clib_net_to_host_u16 @@ -1229,7 +1229,7 @@ ip6_map_t (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) (error1 == MAP_ERROR_NONE && next1 != IP6_MAPT_NEXT_MAPT_ICMP)) { vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_RX, - cpu_index, + thread_index, vnet_buffer (p1)-> map_t.map_domain_index, 1, clib_net_to_host_u16 @@ -1403,7 +1403,7 @@ ip6_map_t (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) (error0 == MAP_ERROR_NONE && next0 != IP6_MAPT_NEXT_MAPT_ICMP)) { vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_RX, - cpu_index, + thread_index, vnet_buffer (p0)-> map_t.map_domain_index, 1, clib_net_to_host_u16 diff --git a/src/vnet/mpls/mpls_input.c b/src/vnet/mpls/mpls_input.c index 893c4511..1b9bdd05 100644 --- a/src/vnet/mpls/mpls_input.c +++ b/src/vnet/mpls/mpls_input.c @@ -76,7 +76,7 @@ mpls_input_inline (vlib_main_t * vm, u32 n_left_from, next_index, * from, * to_next; mpls_input_runtime_t * rt; mpls_main_t * mm; - u32 cpu_index = os_get_cpu_number(); + u32 thread_index = vlib_get_thread_index(); vlib_simple_counter_main_t * cm; vnet_main_t * vnm = vnet_get_main(); @@ -151,7 +151,7 @@ mpls_input_inline (vlib_main_t * vm, next0 = MPLS_INPUT_NEXT_LOOKUP; vnet_feature_arc_start(mm->input_feature_arc_index, sw_if_index0, &next0, b0); - vlib_increment_simple_counter (cm, cpu_index, sw_if_index0, 1); + vlib_increment_simple_counter (cm, thread_index, sw_if_index0, 1); } if (PREDICT_FALSE(h1[3] == 0)) @@ -164,7 +164,7 @@ mpls_input_inline (vlib_main_t * vm, next1 = MPLS_INPUT_NEXT_LOOKUP; vnet_feature_arc_start(mm->input_feature_arc_index, sw_if_index1, &next1, b1); - vlib_increment_simple_counter (cm, cpu_index, sw_if_index1, 1); + vlib_increment_simple_counter (cm, thread_index, sw_if_index1, 1); } if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED)) @@ -215,7 +215,7 @@ mpls_input_inline (vlib_main_t * vm, { next0 = MPLS_INPUT_NEXT_LOOKUP; vnet_feature_arc_start(mm->input_feature_arc_index, sw_if_index0, &next0, b0); - vlib_increment_simple_counter (cm, cpu_index, sw_if_index0, 1); + vlib_increment_simple_counter (cm, thread_index, sw_if_index0, 1); } if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED)) diff --git a/src/vnet/mpls/mpls_lookup.c b/src/vnet/mpls/mpls_lookup.c index 475bb204..ace6a70f 100644 --- a/src/vnet/mpls/mpls_lookup.c +++ b/src/vnet/mpls/mpls_lookup.c @@ -67,7 +67,7 @@ mpls_lookup (vlib_main_t * vm, vlib_combined_counter_main_t * cm = &load_balance_main.lbm_to_counters; u32 n_left_from, next_index, * from, * to_next; mpls_main_t * mm = &mpls_main; - u32 cpu_index = os_get_cpu_number(); + u32 thread_index = vlib_get_thread_index(); from = vlib_frame_vector_args (from_frame); n_left_from = from_frame->n_vectors; @@ -220,16 +220,16 @@ mpls_lookup (vlib_main_t * vm, vnet_buffer (b3)->ip.adj_index[VLIB_TX] = dpo3->dpoi_index; vlib_increment_combined_counter - (cm, cpu_index, lbi0, 1, + (cm, thread_index, lbi0, 1, vlib_buffer_length_in_chain (vm, b0)); vlib_increment_combined_counter - (cm, cpu_index, lbi1, 1, + (cm, thread_index, lbi1, 1, vlib_buffer_length_in_chain (vm, b1)); vlib_increment_combined_counter - (cm, cpu_index, lbi2, 1, + (cm, thread_index, lbi2, 1, vlib_buffer_length_in_chain (vm, b2)); vlib_increment_combined_counter - (cm, cpu_index, lbi3, 1, + (cm, thread_index, lbi3, 1, vlib_buffer_length_in_chain (vm, b3)); /* @@ -351,7 +351,7 @@ mpls_lookup (vlib_main_t * vm, vnet_buffer (b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index; vlib_increment_combined_counter - (cm, cpu_index, lbi0, 1, + (cm, thread_index, lbi0, 1, vlib_buffer_length_in_chain (vm, b0)); /* @@ -440,7 +440,7 @@ mpls_load_balance (vlib_main_t * vm, { vlib_combined_counter_main_t * cm = &load_balance_main.lbm_via_counters; u32 n_left_from, n_left_to_next, * from, * to_next; - u32 cpu_index = os_get_cpu_number(); + u32 thread_index = vlib_get_thread_index(); u32 next; from = vlib_frame_vector_args (frame); @@ -536,10 +536,10 @@ mpls_load_balance (vlib_main_t * vm, vnet_buffer (p1)->ip.adj_index[VLIB_TX] = dpo1->dpoi_index; vlib_increment_combined_counter - (cm, cpu_index, lbi0, 1, + (cm, thread_index, lbi0, 1, vlib_buffer_length_in_chain (vm, p0)); vlib_increment_combined_counter - (cm, cpu_index, lbi1, 1, + (cm, thread_index, lbi1, 1, vlib_buffer_length_in_chain (vm, p1)); if (PREDICT_FALSE(p0->flags & VLIB_BUFFER_IS_TRACED)) @@ -597,7 +597,7 @@ mpls_load_balance (vlib_main_t * vm, vnet_buffer (p0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index; vlib_increment_combined_counter - (cm, cpu_index, lbi0, 1, + (cm, thread_index, lbi0, 1, vlib_buffer_length_in_chain (vm, p0)); vlib_validate_buffer_enqueue_x1 (vm, node, next, diff --git a/src/vnet/mpls/mpls_output.c b/src/vnet/mpls/mpls_output.c index 08018fd1..d90dec21 100644 --- a/src/vnet/mpls/mpls_output.c +++ b/src/vnet/mpls/mpls_output.c @@ -64,12 +64,12 @@ mpls_output_inline (vlib_main_t * vm, vlib_frame_t * from_frame, int is_midchain) { - u32 n_left_from, next_index, * from, * to_next, cpu_index; + u32 n_left_from, next_index, * from, * to_next, thread_index; vlib_node_runtime_t * error_node; u32 n_left_to_next; mpls_main_t *mm; - cpu_index = os_get_cpu_number(); + thread_index = vlib_get_thread_index(); error_node = vlib_node_get_runtime (vm, mpls_output_node.index); from = vlib_frame_vector_args (from_frame); n_left_from = from_frame->n_vectors; @@ -137,13 +137,13 @@ mpls_output_inline (vlib_main_t * vm, /* Bump the adj counters for packet and bytes */ vlib_increment_combined_counter (&adjacency_counters, - cpu_index, + thread_index, adj_index0, 1, vlib_buffer_length_in_chain (vm, p0) + rw_len0); vlib_increment_combined_counter (&adjacency_counters, - cpu_index, + thread_index, adj_index1, 1, vlib_buffer_length_in_chain (vm, p1) + rw_len1); @@ -245,7 +245,7 @@ mpls_output_inline (vlib_main_t * vm, vlib_increment_combined_counter (&adjacency_counters, - cpu_index, + thread_index, adj_index0, 1, vlib_buffer_length_in_chain (vm, p0) + rw_len0); diff --git a/src/vnet/pg/input.c b/src/vnet/pg/input.c index 2649798b..597ae060 100644 --- a/src/vnet/pg/input.c +++ b/src/vnet/pg/input.c @@ -893,7 +893,7 @@ pg_generate_set_lengths (pg_main_t * pg, vlib_increment_combined_counter (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX, - os_get_cpu_number (), + vlib_get_thread_index (), si->sw_if_index, n_buffers, length_sum); } @@ -1266,7 +1266,7 @@ pg_stream_fill_helper (pg_main_t * pg, l += vlib_buffer_index_length_in_chain (vm, buffers[i]); vlib_increment_combined_counter (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX, - os_get_cpu_number (), + vlib_get_thread_index (), si->sw_if_index, n_alloc, l); s->current_replay_packet_index += n_alloc; s->current_replay_packet_index %= diff --git a/src/vnet/replication.c b/src/vnet/replication.c index 86d922b5..233a8c2f 100644 --- a/src/vnet/replication.c +++ b/src/vnet/replication.c @@ -31,16 +31,16 @@ replication_prep (vlib_main_t * vm, { replication_main_t *rm = &replication_main; replication_context_t *ctx; - uword cpu_number = vm->cpu_index; + uword thread_index = vm->thread_index; ip4_header_t *ip; u32 ctx_id; /* Allocate a context, reserve context 0 */ - if (PREDICT_FALSE (rm->contexts[cpu_number] == 0)) - pool_get_aligned (rm->contexts[cpu_number], ctx, CLIB_CACHE_LINE_BYTES); + if (PREDICT_FALSE (rm->contexts[thread_index] == 0)) + pool_get_aligned (rm->contexts[thread_index], ctx, CLIB_CACHE_LINE_BYTES); - pool_get_aligned (rm->contexts[cpu_number], ctx, CLIB_CACHE_LINE_BYTES); - ctx_id = ctx - rm->contexts[cpu_number]; + pool_get_aligned (rm->contexts[thread_index], ctx, CLIB_CACHE_LINE_BYTES); + ctx_id = ctx - rm->contexts[thread_index]; /* Save state from vlib buffer */ ctx->saved_free_list_index = b0->free_list_index; @@ -94,11 +94,11 @@ replication_recycle (vlib_main_t * vm, vlib_buffer_t * b0, u32 is_last) { replication_main_t *rm = &replication_main; replication_context_t *ctx; - uword cpu_number = vm->cpu_index; + uword thread_index = vm->thread_index; ip4_header_t *ip; /* Get access to the replication context */ - ctx = pool_elt_at_index (rm->contexts[cpu_number], b0->recycle_count); + ctx = pool_elt_at_index (rm->contexts[thread_index], b0->recycle_count); /* Restore vnet buffer state */ clib_memcpy (vnet_buffer (b0), ctx->vnet_buffer, @@ -133,7 +133,7 @@ replication_recycle (vlib_main_t * vm, vlib_buffer_t * b0, u32 is_last) b0->flags &= ~VLIB_BUFFER_RECYCLE; /* Free context back to its pool */ - pool_put (rm->contexts[cpu_number], ctx); + pool_put (rm->contexts[thread_index], ctx); } return ctx; @@ -160,7 +160,7 @@ replication_recycle_callback (vlib_main_t * vm, vlib_buffer_free_list_t * fl) replication_main_t *rm = &replication_main; replication_context_t *ctx; u32 feature_node_index = 0; - uword cpu_number = vm->cpu_index; + uword thread_index = vm->thread_index; /* * All buffers in the list are destined to the same recycle node. @@ -172,7 +172,7 @@ replication_recycle_callback (vlib_main_t * vm, vlib_buffer_free_list_t * fl) { bi0 = fl->buffers[0]; b0 = vlib_get_buffer (vm, bi0); - ctx = pool_elt_at_index (rm->contexts[cpu_number], b0->recycle_count); + ctx = pool_elt_at_index (rm->contexts[thread_index], b0->recycle_count); feature_node_index = ctx->recycle_node_index; } diff --git a/src/vnet/replication.h b/src/vnet/replication.h index 5dc554c9..ce4b3ff1 100644 --- a/src/vnet/replication.h +++ b/src/vnet/replication.h @@ -100,7 +100,7 @@ replication_get_ctx (vlib_buffer_t * b0) replication_main_t *rm = &replication_main; return replication_is_recycled (b0) ? - pool_elt_at_index (rm->contexts[os_get_cpu_number ()], + pool_elt_at_index (rm->contexts[vlib_get_thread_index ()], b0->recycle_count) : 0; } diff --git a/src/vnet/session/node.c b/src/vnet/session/node.c index b86e87d9..dd211c51 100644 --- a/src/vnet/session/node.c +++ b/src/vnet/session/node.c @@ -311,7 +311,7 @@ session_queue_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node, unix_shared_memory_queue_t *q; application_t *app; int n_tx_packets = 0; - u32 my_thread_index = vm->cpu_index; + u32 my_thread_index = vm->thread_index; int i, rv; f64 now = vlib_time_now (vm); diff --git a/src/vnet/sr/sr_localsid.c b/src/vnet/sr/sr_localsid.c index 2e3d56de..6d72a506 100755 --- a/src/vnet/sr/sr_localsid.c +++ b/src/vnet/sr/sr_localsid.c @@ -887,7 +887,7 @@ sr_localsid_d_fn (vlib_main_t * vm, vlib_node_runtime_t * node, from = vlib_frame_vector_args (from_frame); n_left_from = from_frame->n_vectors; next_index = node->cached_next_index; - u32 cpu_index = os_get_cpu_number (); + u32 thread_index = vlib_get_thread_index (); while (n_left_from > 0) { @@ -974,26 +974,26 @@ sr_localsid_d_fn (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_increment_combined_counter (((next0 == SR_LOCALSID_NEXT_ERROR) ? &(sm->sr_ls_invalid_counters) : - &(sm->sr_ls_valid_counters)), cpu_index, ls0 - sm->localsids, 1, - vlib_buffer_length_in_chain (vm, b0)); + &(sm->sr_ls_valid_counters)), thread_index, ls0 - sm->localsids, + 1, vlib_buffer_length_in_chain (vm, b0)); vlib_increment_combined_counter (((next1 == SR_LOCALSID_NEXT_ERROR) ? &(sm->sr_ls_invalid_counters) : - &(sm->sr_ls_valid_counters)), cpu_index, ls1 - sm->localsids, 1, - vlib_buffer_length_in_chain (vm, b1)); + &(sm->sr_ls_valid_counters)), thread_index, ls1 - sm->localsids, + 1, vlib_buffer_length_in_chain (vm, b1)); vlib_increment_combined_counter (((next2 == SR_LOCALSID_NEXT_ERROR) ? &(sm->sr_ls_invalid_counters) : - &(sm->sr_ls_valid_counters)), cpu_index, ls2 - sm->localsids, 1, - vlib_buffer_length_in_chain (vm, b2)); + &(sm->sr_ls_valid_counters)), thread_index, ls2 - sm->localsids, + 1, vlib_buffer_length_in_chain (vm, b2)); vlib_increment_combined_counter (((next3 == SR_LOCALSID_NEXT_ERROR) ? &(sm->sr_ls_invalid_counters) : - &(sm->sr_ls_valid_counters)), cpu_index, ls3 - sm->localsids, 1, - vlib_buffer_length_in_chain (vm, b3)); + &(sm->sr_ls_valid_counters)), thread_index, ls3 - sm->localsids, + 1, vlib_buffer_length_in_chain (vm, b3)); vlib_validate_buffer_enqueue_x4 (vm, node, next_index, to_next, n_left_to_next, bi0, bi1, bi2, bi3, @@ -1062,8 +1062,8 @@ sr_localsid_d_fn (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_increment_combined_counter (((next0 == SR_LOCALSID_NEXT_ERROR) ? &(sm->sr_ls_invalid_counters) : - &(sm->sr_ls_valid_counters)), cpu_index, ls0 - sm->localsids, 1, - vlib_buffer_length_in_chain (vm, b0)); + &(sm->sr_ls_valid_counters)), thread_index, ls0 - sm->localsids, + 1, vlib_buffer_length_in_chain (vm, b0)); vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next, n_left_to_next, bi0, next0); @@ -1103,7 +1103,7 @@ sr_localsid_fn (vlib_main_t * vm, vlib_node_runtime_t * node, from = vlib_frame_vector_args (from_frame); n_left_from = from_frame->n_vectors; next_index = node->cached_next_index; - u32 cpu_index = os_get_cpu_number (); + u32 thread_index = vlib_get_thread_index (); while (n_left_from > 0) { @@ -1205,26 +1205,26 @@ sr_localsid_fn (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_increment_combined_counter (((next0 == SR_LOCALSID_NEXT_ERROR) ? &(sm->sr_ls_invalid_counters) : - &(sm->sr_ls_valid_counters)), cpu_index, ls0 - sm->localsids, 1, - vlib_buffer_length_in_chain (vm, b0)); + &(sm->sr_ls_valid_counters)), thread_index, ls0 - sm->localsids, + 1, vlib_buffer_length_in_chain (vm, b0)); vlib_increment_combined_counter (((next1 == SR_LOCALSID_NEXT_ERROR) ? &(sm->sr_ls_invalid_counters) : - &(sm->sr_ls_valid_counters)), cpu_index, ls1 - sm->localsids, 1, - vlib_buffer_length_in_chain (vm, b1)); + &(sm->sr_ls_valid_counters)), thread_index, ls1 - sm->localsids, + 1, vlib_buffer_length_in_chain (vm, b1)); vlib_increment_combined_counter (((next2 == SR_LOCALSID_NEXT_ERROR) ? &(sm->sr_ls_invalid_counters) : - &(sm->sr_ls_valid_counters)), cpu_index, ls2 - sm->localsids, 1, - vlib_buffer_length_in_chain (vm, b2)); + &(sm->sr_ls_valid_counters)), thread_index, ls2 - sm->localsids, + 1, vlib_buffer_length_in_chain (vm, b2)); vlib_increment_combined_counter (((next3 == SR_LOCALSID_NEXT_ERROR) ? &(sm->sr_ls_invalid_counters) : - &(sm->sr_ls_valid_counters)), cpu_index, ls3 - sm->localsids, 1, - vlib_buffer_length_in_chain (vm, b3)); + &(sm->sr_ls_valid_counters)), thread_index, ls3 - sm->localsids, + 1, vlib_buffer_length_in_chain (vm, b3)); vlib_validate_buffer_enqueue_x4 (vm, node, next_index, to_next, n_left_to_next, bi0, bi1, bi2, bi3, @@ -1295,8 +1295,8 @@ sr_localsid_fn (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_increment_combined_counter (((next0 == SR_LOCALSID_NEXT_ERROR) ? &(sm->sr_ls_invalid_counters) : - &(sm->sr_ls_valid_counters)), cpu_index, ls0 - sm->localsids, 1, - vlib_buffer_length_in_chain (vm, b0)); + &(sm->sr_ls_valid_counters)), thread_index, ls0 - sm->localsids, + 1, vlib_buffer_length_in_chain (vm, b0)); vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next, n_left_to_next, bi0, next0); diff --git a/src/vnet/tcp/builtin_client.c b/src/vnet/tcp/builtin_client.c index e3705060..c1567aa0 100644 --- a/src/vnet/tcp/builtin_client.c +++ b/src/vnet/tcp/builtin_client.c @@ -174,7 +174,7 @@ tclient_thread_fn (void *arg) pthread_sigmask (SIG_SETMASK, &s, 0); } - clib_per_cpu_mheaps[os_get_cpu_number ()] = clib_per_cpu_mheaps[0]; + clib_per_cpu_mheaps[vlib_get_thread_index ()] = clib_per_cpu_mheaps[0]; while (1) { diff --git a/src/vnet/tcp/tcp.c b/src/vnet/tcp/tcp.c index b2a371e2..b6c34828 100644 --- a/src/vnet/tcp/tcp.c +++ b/src/vnet/tcp/tcp.c @@ -646,10 +646,10 @@ const static transport_proto_vft_t tcp6_proto = { void tcp_timer_keep_handler (u32 conn_index) { - u32 cpu_index = os_get_cpu_number (); + u32 thread_index = vlib_get_thread_index (); tcp_connection_t *tc; - tc = tcp_connection_get (conn_index, cpu_index); + tc = tcp_connection_get (conn_index, thread_index); tc->timers[TCP_TIMER_KEEP] = TCP_TIMER_HANDLE_INVALID; tcp_connection_close (tc); @@ -675,10 +675,10 @@ tcp_timer_establish_handler (u32 conn_index) void tcp_timer_waitclose_handler (u32 conn_index) { - u32 cpu_index = os_get_cpu_number (); + u32 thread_index = vlib_get_thread_index (); tcp_connection_t *tc; - tc = tcp_connection_get (conn_index, cpu_index); + tc = tcp_connection_get (conn_index, thread_index); tc->timers[TCP_TIMER_WAITCLOSE] = TCP_TIMER_HANDLE_INVALID; /* Session didn't come back with a close(). Send FIN either way diff --git a/src/vnet/tcp/tcp_debug.h b/src/vnet/tcp/tcp_debug.h index 0090e15e..eaca672c 100644 --- a/src/vnet/tcp/tcp_debug.h +++ b/src/vnet/tcp/tcp_debug.h @@ -343,7 +343,7 @@ typedef enum _tcp_dbg_evt } \ else \ { \ - u32 _thread_index = os_get_cpu_number (); \ + u32 _thread_index = vlib_get_thread_index (); \ _tc = tcp_connection_get (_tc_index, _thread_index); \ } \ ELOG_TYPE_DECLARE (_e) = \ diff --git a/src/vnet/tcp/tcp_input.c b/src/vnet/tcp/tcp_input.c index a8224dc2..7e9fa47b 100644 --- a/src/vnet/tcp/tcp_input.c +++ b/src/vnet/tcp/tcp_input.c @@ -1142,7 +1142,7 @@ tcp46_established_inline (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * from_frame, int is_ip4) { u32 n_left_from, next_index, *from, *to_next; - u32 my_thread_index = vm->cpu_index, errors = 0; + u32 my_thread_index = vm->thread_index, errors = 0; tcp_main_t *tm = vnet_get_tcp_main (); from = vlib_frame_vector_args (from_frame); @@ -1332,7 +1332,7 @@ tcp46_syn_sent_inline (vlib_main_t * vm, vlib_node_runtime_t * node, { tcp_main_t *tm = vnet_get_tcp_main (); u32 n_left_from, next_index, *from, *to_next; - u32 my_thread_index = vm->cpu_index, errors = 0; + u32 my_thread_index = vm->thread_index, errors = 0; u8 sst = is_ip4 ? SESSION_TYPE_IP4_TCP : SESSION_TYPE_IP6_TCP; from = vlib_frame_vector_args (from_frame); @@ -1634,7 +1634,7 @@ tcp46_rcv_process_inline (vlib_main_t * vm, vlib_node_runtime_t * node, { tcp_main_t *tm = vnet_get_tcp_main (); u32 n_left_from, next_index, *from, *to_next; - u32 my_thread_index = vm->cpu_index, errors = 0; + u32 my_thread_index = vm->thread_index, errors = 0; from = vlib_frame_vector_args (from_frame); n_left_from = from_frame->n_vectors; @@ -1989,7 +1989,7 @@ tcp46_listen_inline (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * from_frame, int is_ip4) { u32 n_left_from, next_index, *from, *to_next; - u32 my_thread_index = vm->cpu_index; + u32 my_thread_index = vm->thread_index; tcp_main_t *tm = vnet_get_tcp_main (); u8 sst = is_ip4 ? SESSION_TYPE_IP4_TCP : SESSION_TYPE_IP6_TCP; @@ -2243,7 +2243,7 @@ tcp46_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * from_frame, int is_ip4) { u32 n_left_from, next_index, *from, *to_next; - u32 my_thread_index = vm->cpu_index; + u32 my_thread_index = vm->thread_index; tcp_main_t *tm = vnet_get_tcp_main (); from = vlib_frame_vector_args (from_frame); diff --git a/src/vnet/tcp/tcp_output.c b/src/vnet/tcp/tcp_output.c index ea157bd7..e18bfad7 100644 --- a/src/vnet/tcp/tcp_output.c +++ b/src/vnet/tcp/tcp_output.c @@ -387,8 +387,8 @@ tcp_make_options (tcp_connection_t * tc, tcp_options_t * opts, #define tcp_get_free_buffer_index(tm, bidx) \ do { \ u32 *my_tx_buffers, n_free_buffers; \ - u32 cpu_index = os_get_cpu_number(); \ - my_tx_buffers = tm->tx_buffers[cpu_index]; \ + u32 thread_index = vlib_get_thread_index(); \ + my_tx_buffers = tm->tx_buffers[thread_index]; \ if (PREDICT_FALSE(vec_len (my_tx_buffers) == 0)) \ { \ n_free_buffers = 32; /* TODO config or macro */ \ @@ -396,7 +396,7 @@ do { \ _vec_len(my_tx_buffers) = vlib_buffer_alloc_from_free_list ( \ tm->vlib_main, my_tx_buffers, n_free_buffers, \ VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX); \ - tm->tx_buffers[cpu_index] = my_tx_buffers; \ + tm->tx_buffers[thread_index] = my_tx_buffers; \ } \ /* buffer shortage */ \ if (PREDICT_FALSE (vec_len (my_tx_buffers) == 0)) \ @@ -408,8 +408,8 @@ do { \ #define tcp_return_buffer(tm) \ do { \ u32 *my_tx_buffers; \ - u32 cpu_index = os_get_cpu_number(); \ - my_tx_buffers = tm->tx_buffers[cpu_index]; \ + u32 thread_index = vlib_get_thread_index(); \ + my_tx_buffers = tm->tx_buffers[thread_index]; \ _vec_len (my_tx_buffers) +=1; \ } while (0) @@ -942,7 +942,7 @@ tcp_send_ack (tcp_connection_t * tc) void tcp_timer_delack_handler (u32 index) { - u32 thread_index = os_get_cpu_number (); + u32 thread_index = vlib_get_thread_index (); tcp_connection_t *tc; tc = tcp_connection_get (index, thread_index); @@ -1022,7 +1022,7 @@ tcp_timer_retransmit_handler_i (u32 index, u8 is_syn) { tcp_main_t *tm = vnet_get_tcp_main (); vlib_main_t *vm = vlib_get_main (); - u32 thread_index = os_get_cpu_number (); + u32 thread_index = vlib_get_thread_index (); tcp_connection_t *tc; vlib_buffer_t *b; u32 bi, snd_space, n_bytes; @@ -1152,7 +1152,7 @@ tcp_timer_persist_handler (u32 index) { tcp_main_t *tm = vnet_get_tcp_main (); vlib_main_t *vm = vlib_get_main (); - u32 thread_index = os_get_cpu_number (); + u32 thread_index = vlib_get_thread_index (); tcp_connection_t *tc; vlib_buffer_t *b; u32 bi, n_bytes; @@ -1313,7 +1313,7 @@ tcp46_output_inline (vlib_main_t * vm, vlib_frame_t * from_frame, int is_ip4) { u32 n_left_from, next_index, *from, *to_next; - u32 my_thread_index = vm->cpu_index; + u32 my_thread_index = vm->thread_index; from = vlib_frame_vector_args (from_frame); n_left_from = from_frame->n_vectors; @@ -1524,7 +1524,7 @@ tcp46_send_reset_inline (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * from_frame, u8 is_ip4) { u32 n_left_from, next_index, *from, *to_next; - u32 my_thread_index = vm->cpu_index; + u32 my_thread_index = vm->thread_index; from = vlib_frame_vector_args (from_frame); n_left_from = from_frame->n_vectors; diff --git a/src/vnet/udp/udp_input.c b/src/vnet/udp/udp_input.c index 4b22109b..810278e6 100644 --- a/src/vnet/udp/udp_input.c +++ b/src/vnet/udp/udp_input.c @@ -70,7 +70,7 @@ udp4_uri_input_node_fn (vlib_main_t * vm, udp4_uri_input_next_t next_index; udp_uri_main_t *um = vnet_get_udp_main (); session_manager_main_t *smm = vnet_get_session_manager_main (); - u32 my_thread_index = vm->cpu_index; + u32 my_thread_index = vm->thread_index; u8 my_enqueue_epoch; u32 *session_indices_to_enqueue; static u32 serial_number; diff --git a/src/vnet/unix/tapcli.c b/src/vnet/unix/tapcli.c index fb1a8bac..0fc62f6c 100644 --- a/src/vnet/unix/tapcli.c +++ b/src/vnet/unix/tapcli.c @@ -366,7 +366,7 @@ static uword tapcli_rx_iface(vlib_main_t * vm, vlib_increment_combined_counter ( vnet_main.interface_main.combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX, - os_get_cpu_number(), ti->sw_if_index, + vlib_get_thread_index(), ti->sw_if_index, 1, n_bytes_in_packet); if (PREDICT_FALSE(n_trace > 0)) { diff --git a/src/vnet/unix/tuntap.c b/src/vnet/unix/tuntap.c index 2cfcc92f..ac674653 100644 --- a/src/vnet/unix/tuntap.c +++ b/src/vnet/unix/tuntap.c @@ -189,7 +189,7 @@ tuntap_tx (vlib_main_t * vm, /* Update tuntap interface output stats. */ vlib_increment_combined_counter (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX, - vm->cpu_index, + vm->thread_index, tm->sw_if_index, n_packets, n_bytes); @@ -297,7 +297,7 @@ tuntap_rx (vlib_main_t * vm, vlib_increment_combined_counter (vnet_main.interface_main.combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX, - os_get_cpu_number(), + vlib_get_thread_index(), tm->sw_if_index, 1, n_bytes_in_packet); diff --git a/src/vnet/vxlan-gpe/decap.c b/src/vnet/vxlan-gpe/decap.c index 22ab4b62..d4fe4231 100644 --- a/src/vnet/vxlan-gpe/decap.c +++ b/src/vnet/vxlan-gpe/decap.c @@ -115,7 +115,7 @@ vxlan_gpe_input (vlib_main_t * vm, vxlan4_gpe_tunnel_key_t last_key4; vxlan6_gpe_tunnel_key_t last_key6; u32 pkts_decapsulated = 0; - u32 cpu_index = os_get_cpu_number (); + u32 thread_index = vlib_get_thread_index (); u32 stats_sw_if_index, stats_n_packets, stats_n_bytes; if (is_ip4) @@ -342,7 +342,7 @@ vxlan_gpe_input (vlib_main_t * vm, if (stats_n_packets) vlib_increment_combined_counter ( im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX, - cpu_index, stats_sw_if_index, stats_n_packets, stats_n_bytes); + thread_index, stats_sw_if_index, stats_n_packets, stats_n_bytes); stats_n_packets = 1; stats_n_bytes = len0; stats_sw_if_index = sw_if_index0; @@ -427,7 +427,7 @@ vxlan_gpe_input (vlib_main_t * vm, if (stats_n_packets) vlib_increment_combined_counter ( im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX, - cpu_index, stats_sw_if_index, stats_n_packets, stats_n_bytes); + thread_index, stats_sw_if_index, stats_n_packets, stats_n_bytes); stats_n_packets = 1; stats_n_bytes = len1; stats_sw_if_index = sw_if_index1; @@ -588,7 +588,7 @@ vxlan_gpe_input (vlib_main_t * vm, if (stats_n_packets) vlib_increment_combined_counter ( im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX, - cpu_index, stats_sw_if_index, stats_n_packets, stats_n_bytes); + thread_index, stats_sw_if_index, stats_n_packets, stats_n_bytes); stats_n_packets = 1; stats_n_bytes = len0; stats_sw_if_index = sw_if_index0; @@ -615,7 +615,7 @@ vxlan_gpe_input (vlib_main_t * vm, if (stats_n_packets) { vlib_increment_combined_counter ( - im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX, cpu_index, + im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX, thread_index, stats_sw_if_index, stats_n_packets, stats_n_bytes); node->runtime_data[0] = stats_sw_if_index; } diff --git a/src/vnet/vxlan-gpe/encap.c b/src/vnet/vxlan-gpe/encap.c index 3a486e56..67ed94b4 100644 --- a/src/vnet/vxlan-gpe/encap.c +++ b/src/vnet/vxlan-gpe/encap.c @@ -151,7 +151,7 @@ vxlan_gpe_encap (vlib_main_t * vm, vnet_main_t * vnm = ngm->vnet_main; vnet_interface_main_t * im = &vnm->interface_main; u32 pkts_encapsulated = 0; - u32 cpu_index = os_get_cpu_number (); + u32 thread_index = vlib_get_thread_index (); u32 stats_sw_if_index, stats_n_packets, stats_n_bytes; from = vlib_frame_vector_args (from_frame); @@ -253,7 +253,7 @@ vxlan_gpe_encap (vlib_main_t * vm, if (stats_n_packets) vlib_increment_combined_counter ( im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX, - cpu_index, stats_sw_if_index, stats_n_packets, stats_n_bytes); + thread_index, stats_sw_if_index, stats_n_packets, stats_n_bytes); stats_sw_if_index = sw_if_index0; stats_n_packets = 2; stats_n_bytes = len0 + len1; @@ -262,10 +262,10 @@ vxlan_gpe_encap (vlib_main_t * vm, { vlib_increment_combined_counter ( im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX, - cpu_index, sw_if_index0, 1, len0); + thread_index, sw_if_index0, 1, len0); vlib_increment_combined_counter ( im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX, - cpu_index, sw_if_index1, 1, len1); + thread_index, sw_if_index1, 1, len1); } } @@ -335,7 +335,7 @@ vxlan_gpe_encap (vlib_main_t * vm, if (stats_n_packets) vlib_increment_combined_counter ( im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX, - cpu_index, stats_sw_if_index, stats_n_packets, stats_n_bytes); + thread_index, stats_sw_if_index, stats_n_packets, stats_n_bytes); stats_n_packets = 1; stats_n_bytes = len0; stats_sw_if_index = sw_if_index0; @@ -359,7 +359,7 @@ vxlan_gpe_encap (vlib_main_t * vm, if (stats_n_packets) { vlib_increment_combined_counter ( - im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX, cpu_index, + im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX, thread_index, stats_sw_if_index, stats_n_packets, stats_n_bytes); node->runtime_data[0] = stats_sw_if_index; } diff --git a/src/vnet/vxlan/decap.c b/src/vnet/vxlan/decap.c index 514b2c99..2acb1f6f 100644 --- a/src/vnet/vxlan/decap.c +++ b/src/vnet/vxlan/decap.c @@ -81,7 +81,7 @@ vxlan_input (vlib_main_t * vm, vxlan4_tunnel_key_t last_key4; vxlan6_tunnel_key_t last_key6; u32 pkts_decapsulated = 0; - u32 cpu_index = os_get_cpu_number(); + u32 thread_index = vlib_get_thread_index(); u32 stats_sw_if_index, stats_n_packets, stats_n_bytes; if (is_ip4) @@ -314,7 +314,7 @@ vxlan_input (vlib_main_t * vm, if (stats_n_packets) vlib_increment_combined_counter (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX, - cpu_index, stats_sw_if_index, + thread_index, stats_sw_if_index, stats_n_packets, stats_n_bytes); stats_n_packets = 1; stats_n_bytes = len0; @@ -468,7 +468,7 @@ vxlan_input (vlib_main_t * vm, if (stats_n_packets) vlib_increment_combined_counter (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX, - cpu_index, stats_sw_if_index, + thread_index, stats_sw_if_index, stats_n_packets, stats_n_bytes); stats_n_packets = 1; stats_n_bytes = len1; @@ -674,7 +674,7 @@ vxlan_input (vlib_main_t * vm, if (stats_n_packets) vlib_increment_combined_counter (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX, - cpu_index, stats_sw_if_index, + thread_index, stats_sw_if_index, stats_n_packets, stats_n_bytes); stats_n_packets = 1; stats_n_bytes = len0; @@ -711,7 +711,7 @@ vxlan_input (vlib_main_t * vm, { vlib_increment_combined_counter (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX, - cpu_index, stats_sw_if_index, stats_n_packets, stats_n_bytes); + thread_index, stats_sw_if_index, stats_n_packets, stats_n_bytes); node->runtime_data[0] = stats_sw_if_index; } diff --git a/src/vnet/vxlan/encap.c b/src/vnet/vxlan/encap.c index 5b63064a..4cfbbc23 100644 --- a/src/vnet/vxlan/encap.c +++ b/src/vnet/vxlan/encap.c @@ -77,7 +77,7 @@ vxlan_encap_inline (vlib_main_t * vm, vnet_interface_main_t * im = &vnm->interface_main; u32 pkts_encapsulated = 0; u16 old_l0 = 0, old_l1 = 0; - u32 cpu_index = os_get_cpu_number(); + u32 thread_index = vlib_get_thread_index(); u32 stats_sw_if_index, stats_n_packets, stats_n_bytes; u32 sw_if_index0 = 0, sw_if_index1 = 0; u32 next0 = 0, next1 = 0; @@ -301,7 +301,7 @@ vxlan_encap_inline (vlib_main_t * vm, if (stats_n_packets) vlib_increment_combined_counter (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX, - cpu_index, stats_sw_if_index, + thread_index, stats_sw_if_index, stats_n_packets, stats_n_bytes); stats_sw_if_index = sw_if_index0; stats_n_packets = 2; @@ -311,10 +311,10 @@ vxlan_encap_inline (vlib_main_t * vm, { vlib_increment_combined_counter (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX, - cpu_index, sw_if_index0, 1, len0); + thread_index, sw_if_index0, 1, len0); vlib_increment_combined_counter (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX, - cpu_index, sw_if_index1, 1, len1); + thread_index, sw_if_index1, 1, len1); } } @@ -464,7 +464,7 @@ vxlan_encap_inline (vlib_main_t * vm, if (stats_n_packets) vlib_increment_combined_counter (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX, - cpu_index, stats_sw_if_index, + thread_index, stats_sw_if_index, stats_n_packets, stats_n_bytes); stats_n_packets = 1; stats_n_bytes = len0; @@ -496,7 +496,7 @@ vxlan_encap_inline (vlib_main_t * vm, { vlib_increment_combined_counter (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX, - cpu_index, stats_sw_if_index, stats_n_packets, stats_n_bytes); + thread_index, stats_sw_if_index, stats_n_packets, stats_n_bytes); node->runtime_data[0] = stats_sw_if_index; } diff --git a/src/vpp/stats/stats.c b/src/vpp/stats/stats.c index 042d02e2..4309cd51 100644 --- a/src/vpp/stats/stats.c +++ b/src/vpp/stats/stats.c @@ -66,14 +66,14 @@ _(VNET_IP6_NBR_COUNTERS, vnet_ip6_nbr_counters) void dslock (stats_main_t * sm, int release_hint, int tag) { - u32 thread_id; + u32 thread_index; data_structure_lock_t *l = sm->data_structure_lock; if (PREDICT_FALSE (l == 0)) return; - thread_id = os_get_cpu_number (); - if (l->lock && l->thread_id == thread_id) + thread_index = vlib_get_thread_index (); + if (l->lock && l->thread_index == thread_index) { l->count++; return; @@ -85,7 +85,7 @@ dslock (stats_main_t * sm, int release_hint, int tag) while (__sync_lock_test_and_set (&l->lock, 1)) /* zzzz */ ; l->tag = tag; - l->thread_id = thread_id; + l->thread_index = thread_index; l->count = 1; } @@ -99,14 +99,14 @@ stats_dslock_with_hint (int hint, int tag) void dsunlock (stats_main_t * sm) { - u32 thread_id; + u32 thread_index; data_structure_lock_t *l = sm->data_structure_lock; if (PREDICT_FALSE (l == 0)) return; - thread_id = os_get_cpu_number (); - ASSERT (l->lock && l->thread_id == thread_id); + thread_index = vlib_get_thread_index (); + ASSERT (l->lock && l->thread_index == thread_index); l->count--; if (l->count == 0) { diff --git a/src/vpp/stats/stats.h b/src/vpp/stats/stats.h index 118115be..024dc78e 100644 --- a/src/vpp/stats/stats.h +++ b/src/vpp/stats/stats.h @@ -30,7 +30,7 @@ typedef struct { volatile u32 lock; volatile u32 release_hint; - u32 thread_id; + u32 thread_index; u32 count; int tag; } data_structure_lock_t; -- cgit 1.2.3-korg From ee551988bcce08ad7a15f71fb9d2239b4239ab08 Mon Sep 17 00:00:00 2001 From: Aloys Augustin Date: Fri, 17 Feb 2017 14:55:29 +0100 Subject: Fix vnet_interface_counters API definition The api specification had u8 as data type, which caused the python binding to fail. Fixes VPP-642 Change-Id: I9ba97959740d44c8f4a12db9356d0d1bcd709a73 Signed-off-by: Aloys Augustin Signed-off-by: Ole Troan --- src/vat/api_format.c | 76 +++++++++------ src/vnet/interface.api | 35 +++++-- src/vpp/stats/stats.c | 260 +++++++++++++++++++++++++++++-------------------- 3 files changed, 227 insertions(+), 144 deletions(-) (limited to 'src/vpp/stats/stats.c') diff --git a/src/vat/api_format.c b/src/vat/api_format.c index 495b660e..efb71ef6 100644 --- a/src/vat/api_format.c +++ b/src/vat/api_format.c @@ -2098,17 +2098,21 @@ set_combined_interface_counter (u8 vnet_counter_type, u32 sw_if_index, vam->combined_interface_counters[vnet_counter_type][sw_if_index] = counter; } -static void vl_api_vnet_interface_counters_t_handler - (vl_api_vnet_interface_counters_t * mp) +static void vl_api_vnet_interface_simple_counters_t_handler + (vl_api_vnet_interface_simple_counters_t * mp) { /* not supported */ } -static void vl_api_vnet_interface_counters_t_handler_json - (vl_api_vnet_interface_counters_t * mp) +static void vl_api_vnet_interface_combined_counters_t_handler + (vl_api_vnet_interface_combined_counters_t * mp) +{ + /* not supported */ +} + +static void vl_api_vnet_interface_simple_counters_t_handler_json + (vl_api_vnet_interface_simple_counters_t * mp) { - interface_counter_t counter; - vlib_counter_t *v; u64 *v_packets; u64 packets; u32 count; @@ -2118,31 +2122,38 @@ static void vl_api_vnet_interface_counters_t_handler_json count = ntohl (mp->count); first_sw_if_index = ntohl (mp->first_sw_if_index); - if (!mp->is_combined) + v_packets = (u64 *) & mp->data; + for (i = 0; i < count; i++) { - v_packets = (u64 *) & mp->data; - for (i = 0; i < count; i++) - { - packets = - clib_net_to_host_u64 (clib_mem_unaligned (v_packets, u64)); - set_simple_interface_counter (mp->vnet_counter_type, - first_sw_if_index + i, packets); - v_packets++; - } + packets = clib_net_to_host_u64 (clib_mem_unaligned (v_packets, u64)); + set_simple_interface_counter (mp->vnet_counter_type, + first_sw_if_index + i, packets); + v_packets++; } - else +} + +static void vl_api_vnet_interface_combined_counters_t_handler_json + (vl_api_vnet_interface_combined_counters_t * mp) +{ + interface_counter_t counter; + vlib_counter_t *v; + u32 first_sw_if_index; + int i; + u32 count; + + count = ntohl (mp->count); + first_sw_if_index = ntohl (mp->first_sw_if_index); + + v = (vlib_counter_t *) & mp->data; + for (i = 0; i < count; i++) { - v = (vlib_counter_t *) & mp->data; - for (i = 0; i < count; i++) - { - counter.packets = - clib_net_to_host_u64 (clib_mem_unaligned (&v->packets, u64)); - counter.bytes = - clib_net_to_host_u64 (clib_mem_unaligned (&v->bytes, u64)); - set_combined_interface_counter (mp->vnet_counter_type, - first_sw_if_index + i, counter); - v++; - } + counter.packets = + clib_net_to_host_u64 (clib_mem_unaligned (&v->packets, u64)); + counter.bytes = + clib_net_to_host_u64 (clib_mem_unaligned (&v->bytes, u64)); + set_combined_interface_counter (mp->vnet_counter_type, + first_sw_if_index + i, counter); + v++; } } @@ -4118,8 +4129,10 @@ static void vl_api_flow_classify_details_t_handler_json vat_json_object_add_uint (node, "table_index", ntohl (mp->table_index)); } - - +#define vl_api_vnet_interface_simple_counters_t_endian vl_noop_handler +#define vl_api_vnet_interface_simple_counters_t_print vl_noop_handler +#define vl_api_vnet_interface_combined_counters_t_endian vl_noop_handler +#define vl_api_vnet_interface_combined_counters_t_print vl_noop_handler #define vl_api_vnet_ip4_fib_counters_t_endian vl_noop_handler #define vl_api_vnet_ip4_fib_counters_t_print vl_noop_handler #define vl_api_vnet_ip6_fib_counters_t_endian vl_noop_handler @@ -4539,7 +4552,8 @@ _(SW_INTERFACE_GET_TABLE_REPLY, sw_interface_get_table_reply) #define foreach_standalone_reply_msg \ _(SW_INTERFACE_SET_FLAGS, sw_interface_set_flags) \ -_(VNET_INTERFACE_COUNTERS, vnet_interface_counters) \ +_(VNET_INTERFACE_SIMPLE_COUNTERS, vnet_interface_simple_counters) \ +_(VNET_INTERFACE_COMBINED_COUNTERS, vnet_interface_combined_counters) \ _(VNET_IP4_FIB_COUNTERS, vnet_ip4_fib_counters) \ _(VNET_IP6_FIB_COUNTERS, vnet_ip6_fib_counters) \ _(VNET_IP4_NBR_COUNTERS, vnet_ip4_nbr_counters) \ diff --git a/src/vnet/interface.api b/src/vnet/interface.api index 9df63f18..14ff6d5a 100644 --- a/src/vnet/interface.api +++ b/src/vnet/interface.api @@ -206,21 +206,42 @@ define sw_interface_get_table_reply u32 vrf_id; }; -/** \brief Stats counters structure +typeonly manual_print manual_endian define vlib_counter +{ + u64 packets; /**< packet counter */ + u64 bytes; /**< byte counter */ +}; + +/** \brief Simple stats counters structure + @param vnet_counter_type- such as ip4, ip6, punts, etc + @param first_sw_if_index - first sw index in block of index, counts + @param count - number of counters, equal to the number of interfaces in + this stats block + @param data - contiguous block of u64 counters +*/ +manual_print manual_endian define vnet_interface_simple_counters +{ + /* enums - plural - in vnet/interface.h */ + u8 vnet_counter_type; + u32 first_sw_if_index; + u32 count; + u64 data[count]; +}; + +/** \brief Combined stats counters structure @param vnet_counter_type- such as ip4, ip6, punts, etc - @param is_combined - rx & tx total (all types) counts @param first_sw_if_index - first sw index in block of index, counts - @param count - number of interfaces this stats block includes counters for - @param data - contiguous block of vlib_counter_t structures + @param count - number of counters, equal to the number of interfaces in + this stats block + @param data - contiguous block of vlib_counter_t structures */ -define vnet_interface_counters +manual_print manual_endian define vnet_interface_combined_counters { /* enums - plural - in vnet/interface.h */ u8 vnet_counter_type; - u8 is_combined; u32 first_sw_if_index; u32 count; - u8 data[count]; + vl_api_vlib_counter_t data[count]; }; /** \brief Set unnumbered interface add / del request diff --git a/src/vpp/stats/stats.c b/src/vpp/stats/stats.c index 4309cd51..38821da7 100644 --- a/src/vpp/stats/stats.c +++ b/src/vpp/stats/stats.c @@ -45,12 +45,13 @@ stats_main_t stats_main; #include #undef vl_printfun -#define foreach_stats_msg \ -_(WANT_STATS, want_stats) \ -_(VNET_INTERFACE_COUNTERS, vnet_interface_counters) \ -_(VNET_IP4_FIB_COUNTERS, vnet_ip4_fib_counters) \ -_(VNET_IP6_FIB_COUNTERS, vnet_ip6_fib_counters) \ -_(VNET_IP4_NBR_COUNTERS, vnet_ip4_nbr_counters) \ +#define foreach_stats_msg \ +_(WANT_STATS, want_stats) \ +_(VNET_INTERFACE_SIMPLE_COUNTERS, vnet_interface_simple_counters) \ +_(VNET_INTERFACE_COMBINED_COUNTERS, vnet_interface_combined_counters) \ +_(VNET_IP4_FIB_COUNTERS, vnet_ip4_fib_counters) \ +_(VNET_IP6_FIB_COUNTERS, vnet_ip6_fib_counters) \ +_(VNET_IP4_NBR_COUNTERS, vnet_ip4_nbr_counters) \ _(VNET_IP6_NBR_COUNTERS, vnet_ip6_nbr_counters) /* These constants ensure msg sizes <= 1024, aka ring allocation */ @@ -127,7 +128,7 @@ stats_dsunlock (int hint, int tag) static void do_simple_interface_counters (stats_main_t * sm) { - vl_api_vnet_interface_counters_t *mp = 0; + vl_api_vnet_interface_simple_counters_t *mp = 0; vnet_interface_main_t *im = sm->interface_main; api_main_t *am = sm->api_main; vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr; @@ -155,9 +156,8 @@ do_simple_interface_counters (stats_main_t * sm) mp = vl_msg_api_alloc_as_if_client (sizeof (*mp) + items_this_message * sizeof (v)); - mp->_vl_msg_id = ntohs (VL_API_VNET_INTERFACE_COUNTERS); + mp->_vl_msg_id = ntohs (VL_API_VNET_INTERFACE_SIMPLE_COUNTERS); mp->vnet_counter_type = cm - im->sw_if_counters; - mp->is_combined = 0; mp->first_sw_if_index = htonl (i); mp->count = 0; vp = (u64 *) mp->data; @@ -182,7 +182,7 @@ do_simple_interface_counters (stats_main_t * sm) static void do_combined_interface_counters (stats_main_t * sm) { - vl_api_vnet_interface_counters_t *mp = 0; + vl_api_vnet_interface_combined_counters_t *mp = 0; vnet_interface_main_t *im = sm->interface_main; api_main_t *am = sm->api_main; vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr; @@ -206,9 +206,8 @@ do_combined_interface_counters (stats_main_t * sm) mp = vl_msg_api_alloc_as_if_client (sizeof (*mp) + items_this_message * sizeof (v)); - mp->_vl_msg_id = ntohs (VL_API_VNET_INTERFACE_COUNTERS); + mp->_vl_msg_id = ntohs (VL_API_VNET_INTERFACE_COMBINED_COUNTERS); mp->vnet_counter_type = cm - im->combined_sw_if_counters; - mp->is_combined = 1; mp->first_sw_if_index = htonl (i); mp->count = 0; vp = (vlib_counter_t *) mp->data; @@ -943,13 +942,13 @@ stats_thread_fn (void *arg) } static void -vl_api_vnet_interface_counters_t_handler (vl_api_vnet_interface_counters_t * - mp) + vl_api_vnet_interface_simple_counters_t_handler + (vl_api_vnet_interface_simple_counters_t * mp) { vpe_client_registration_t *reg; stats_main_t *sm = &stats_main; unix_shared_memory_queue_t *q, *q_prev = NULL; - vl_api_vnet_interface_counters_t *mp_copy = NULL; + vl_api_vnet_interface_simple_counters_t *mp_copy = NULL; u32 mp_size; #if STATS_DEBUG > 0 @@ -958,110 +957,154 @@ vl_api_vnet_interface_counters_t_handler (vl_api_vnet_interface_counters_t * int i; #endif - mp_size = sizeof (*mp) + (ntohl (mp->count) * - (mp->is_combined ? sizeof (vlib_counter_t) : - sizeof (u64))); + mp_size = sizeof (*mp) + (ntohl (mp->count) * sizeof (u64)); /* *INDENT-OFF* */ pool_foreach(reg, sm->stats_registrations, - ({ - q = vl_api_client_index_to_input_queue (reg->client_index); - if (q) - { - if (q_prev && (q_prev->cursize < q_prev->maxsize)) - { - mp_copy = vl_msg_api_alloc_as_if_client(mp_size); - clib_memcpy(mp_copy, mp, mp_size); - vl_msg_api_send_shmem (q_prev, (u8 *)&mp); - mp = mp_copy; - } - q_prev = q; - } - })); + ({ + q = vl_api_client_index_to_input_queue (reg->client_index); + if (q) + { + if (q_prev && (q_prev->cursize < q_prev->maxsize)) + { + mp_copy = vl_msg_api_alloc_as_if_client(mp_size); + clib_memcpy(mp_copy, mp, mp_size); + vl_msg_api_send_shmem (q_prev, (u8 *)&mp); + mp = mp_copy; + } + q_prev = q; + } + })); /* *INDENT-ON* */ #if STATS_DEBUG > 0 count = ntohl (mp->count); sw_if_index = ntohl (mp->first_sw_if_index); - if (mp->is_combined == 0) - { - u64 *vp, v; - vp = (u64 *) mp->data; + u64 *vp, v; + vp = (u64 *) mp->data; - switch (mp->vnet_counter_type) - { - case VNET_INTERFACE_COUNTER_DROP: - counter_name = "drop"; - break; - case VNET_INTERFACE_COUNTER_PUNT: - counter_name = "punt"; - break; - case VNET_INTERFACE_COUNTER_IP4: - counter_name = "ip4"; - break; - case VNET_INTERFACE_COUNTER_IP6: - counter_name = "ip6"; - break; - case VNET_INTERFACE_COUNTER_RX_NO_BUF: - counter_name = "rx-no-buff"; - break; - case VNET_INTERFACE_COUNTER_RX_MISS: - , counter_name = "rx-miss"; - break; - case VNET_INTERFACE_COUNTER_RX_ERROR: - , counter_name = "rx-error (fifo-full)"; - break; - case VNET_INTERFACE_COUNTER_TX_ERROR: - , counter_name = "tx-error (fifo-full)"; - break; - default: - counter_name = "bogus"; - break; - } - for (i = 0; i < count; i++) - { - v = clib_mem_unaligned (vp, u64); - v = clib_net_to_host_u64 (v); - vp++; - fformat (stdout, "%U.%s %lld\n", format_vnet_sw_if_index_name, - sm->vnet_main, sw_if_index, counter_name, v); - sw_if_index++; - } + switch (mp->vnet_counter_type) + { + case VNET_INTERFACE_COUNTER_DROP: + counter_name = "drop"; + break; + case VNET_INTERFACE_COUNTER_PUNT: + counter_name = "punt"; + break; + case VNET_INTERFACE_COUNTER_IP4: + counter_name = "ip4"; + break; + case VNET_INTERFACE_COUNTER_IP6: + counter_name = "ip6"; + break; + case VNET_INTERFACE_COUNTER_RX_NO_BUF: + counter_name = "rx-no-buff"; + break; + case VNET_INTERFACE_COUNTER_RX_MISS: + , counter_name = "rx-miss"; + break; + case VNET_INTERFACE_COUNTER_RX_ERROR: + , counter_name = "rx-error (fifo-full)"; + break; + case VNET_INTERFACE_COUNTER_TX_ERROR: + , counter_name = "tx-error (fifo-full)"; + break; + default: + counter_name = "bogus"; + break; + } + for (i = 0; i < count; i++) + { + v = clib_mem_unaligned (vp, u64); + v = clib_net_to_host_u64 (v); + vp++; + fformat (stdout, "%U.%s %lld\n", format_vnet_sw_if_index_name, + sm->vnet_main, sw_if_index, counter_name, v); + sw_if_index++; + } +#endif + if (q_prev && (q_prev->cursize < q_prev->maxsize)) + { + vl_msg_api_send_shmem (q_prev, (u8 *) & mp); } else { - vlib_counter_t *vp; - u64 packets, bytes; - vp = (vlib_counter_t *) mp->data; + vl_msg_api_free (mp); + } +} - switch (mp->vnet_counter_type) - { - case VNET_INTERFACE_COUNTER_RX: - counter_name = "rx"; - break; - case VNET_INTERFACE_COUNTER_TX: - counter_name = "tx"; - break; - default: - counter_name = "bogus"; - break; - } - for (i = 0; i < count; i++) - { - packets = clib_mem_unaligned (&vp->packets, u64); - packets = clib_net_to_host_u64 (packets); - bytes = clib_mem_unaligned (&vp->bytes, u64); - bytes = clib_net_to_host_u64 (bytes); - vp++; - fformat (stdout, "%U.%s.packets %lld\n", - format_vnet_sw_if_index_name, - sm->vnet_main, sw_if_index, counter_name, packets); - fformat (stdout, "%U.%s.bytes %lld\n", - format_vnet_sw_if_index_name, - sm->vnet_main, sw_if_index, counter_name, bytes); - sw_if_index++; - } +static void + vl_api_vnet_interface_combined_counters_t_handler + (vl_api_vnet_interface_combined_counters_t * mp) +{ + vpe_client_registration_t *reg; + stats_main_t *sm = &stats_main; + unix_shared_memory_queue_t *q, *q_prev = NULL; + vl_api_vnet_interface_combined_counters_t *mp_copy = NULL; + u32 mp_size; + +#if STATS_DEBUG > 0 + char *counter_name; + u32 count, sw_if_index; + int i; +#endif + + mp_size = sizeof (*mp) + (ntohl (mp->count) * sizeof (vlib_counter_t)); + + /* *INDENT-OFF* */ + pool_foreach(reg, sm->stats_registrations, + ({ + q = vl_api_client_index_to_input_queue (reg->client_index); + if (q) + { + if (q_prev && (q_prev->cursize < q_prev->maxsize)) + { + mp_copy = vl_msg_api_alloc_as_if_client(mp_size); + clib_memcpy(mp_copy, mp, mp_size); + vl_msg_api_send_shmem (q_prev, (u8 *)&mp); + mp = mp_copy; + } + q_prev = q; + } + })); + /* *INDENT-ON* */ + +#if STATS_DEBUG > 0 + count = ntohl (mp->count); + sw_if_index = ntohl (mp->first_sw_if_index); + + vlib_counter_t *vp; + u64 packets, bytes; + vp = (vlib_counter_t *) mp->data; + + switch (mp->vnet_counter_type) + { + case VNET_INTERFACE_COUNTER_RX: + counter_name = "rx"; + break; + case VNET_INTERFACE_COUNTER_TX: + counter_name = "tx"; + break; + default: + counter_name = "bogus"; + break; + } + for (i = 0; i < count; i++) + { + packets = clib_mem_unaligned (&vp->packets, u64); + packets = clib_net_to_host_u64 (packets); + bytes = clib_mem_unaligned (&vp->bytes, u64); + bytes = clib_net_to_host_u64 (bytes); + vp++; + fformat (stdout, "%U.%s.packets %lld\n", + format_vnet_sw_if_index_name, + sm->vnet_main, sw_if_index, counter_name, packets); + fformat (stdout, "%U.%s.bytes %lld\n", + format_vnet_sw_if_index_name, + sm->vnet_main, sw_if_index, counter_name, bytes); + sw_if_index++; } + #endif if (q_prev && (q_prev->cursize < q_prev->maxsize)) { @@ -1305,6 +1348,10 @@ stats_memclnt_delete_callback (u32 client_index) return 0; } +#define vl_api_vnet_interface_simple_counters_t_endian vl_noop_handler +#define vl_api_vnet_interface_simple_counters_t_print vl_noop_handler +#define vl_api_vnet_interface_combined_counters_t_endian vl_noop_handler +#define vl_api_vnet_interface_combined_counters_t_print vl_noop_handler #define vl_api_vnet_ip4_fib_counters_t_endian vl_noop_handler #define vl_api_vnet_ip4_fib_counters_t_print vl_noop_handler #define vl_api_vnet_ip6_fib_counters_t_endian vl_noop_handler @@ -1342,7 +1389,8 @@ stats_init (vlib_main_t * vm) #undef _ /* tell the msg infra not to free these messages... */ - am->message_bounce[VL_API_VNET_INTERFACE_COUNTERS] = 1; + am->message_bounce[VL_API_VNET_INTERFACE_SIMPLE_COUNTERS] = 1; + am->message_bounce[VL_API_VNET_INTERFACE_COMBINED_COUNTERS] = 1; am->message_bounce[VL_API_VNET_IP4_FIB_COUNTERS] = 1; am->message_bounce[VL_API_VNET_IP6_FIB_COUNTERS] = 1; am->message_bounce[VL_API_VNET_IP4_NBR_COUNTERS] = 1; -- cgit 1.2.3-korg From d756b35032cdf7fdaaf0d6611388a54d32d72e92 Mon Sep 17 00:00:00 2001 From: Dave Wallace Date: Mon, 3 Jul 2017 13:11:38 -0400 Subject: Fix unlinking of /dev/shm files. - api-segment prefix not used when unlinking shm files - unlink root region on exit if no clients referenced - stale reference to freed segment name - don't add fake client to /db unless CLIB_DEBUG > 2 - turn off the gmond plugin - clean up unused vars in vpp/api Change-Id: I66451fcfd6ee64a12466c2d6c209050e3cdb74b7 Signed-off-by: Dave Wallace Signed-off-by: Dave Barach --- Makefile | 9 +++++++-- build-data/platforms/vpp.mk | 2 +- src/svm/ssvm.c | 11 ++++++++++- src/svm/svm.c | 38 +++++++++++++++++++++++++++++++------- src/svm/svm_fifo_segment.c | 2 +- src/svm/svmdb.c | 15 ++++++++++----- src/vlib/buffer.h | 18 +++++++++--------- src/vpp.am | 6 +++--- src/vpp/api/api.c | 1 - src/vpp/api/api_main.c | 2 -- src/vpp/api/custom_dump.c | 8 +------- src/vpp/stats/stats.c | 1 - 12 files changed, 73 insertions(+), 40 deletions(-) (limited to 'src/vpp/stats/stats.c') diff --git a/Makefile b/Makefile index 0d21f335..46c51dd8 100644 --- a/Makefile +++ b/Makefile @@ -55,8 +55,10 @@ else ifeq ($(filter rhel centos fedora opensuse,$(OS_ID)),$(OS_ID)) PKG=rpm endif +# +libganglia1-dev if building the gmond plugin + DEB_DEPENDS = curl build-essential autoconf automake bison libssl-dev ccache -DEB_DEPENDS += debhelper dkms git libtool libganglia1-dev libapr1-dev dh-systemd +DEB_DEPENDS += debhelper dkms git libtool libapr1-dev dh-systemd DEB_DEPENDS += libconfuse-dev git-review exuberant-ctags cscope pkg-config DEB_DEPENDS += lcov chrpath autoconf nasm indent DEB_DEPENDS += python-all python-dev python-virtualenv python-pip libffi6 @@ -79,9 +81,12 @@ else RPM_DEPENDS += python-virtualenv RPM_DEPENDS_GROUPS = 'Development Tools' endif + +# +ganglia-devel if building the ganglia plugin + RPM_DEPENDS += chrpath libffi-devel rpm-build RPM_DEPENDS += https://kojipkgs.fedoraproject.org//packages/nasm/2.12.02/2.fc26/x86_64/nasm-2.12.02-2.fc26.x86_64.rpm -EPEL_DEPENDS = libconfuse-devel ganglia-devel epel-rpm-macros +EPEL_DEPENDS = libconfuse-devel epel-rpm-macros ifeq ($(filter rhel centos,$(OS_ID)),$(OS_ID)) EPEL_DEPENDS += lcov else diff --git a/build-data/platforms/vpp.mk b/build-data/platforms/vpp.mk index 4577fa2e..acbe0e7f 100644 --- a/build-data/platforms/vpp.mk +++ b/build-data/platforms/vpp.mk @@ -36,7 +36,7 @@ vpp_uses_dpdk = yes # Uncoment to enable building unit tests # vpp_enable_tests = yes -vpp_root_packages = vpp gmod +vpp_root_packages = vpp # DPDK configuration parameters # vpp_uses_dpdk_cryptodev_sw = yes diff --git a/src/svm/ssvm.c b/src/svm/ssvm.c index 6cda1f27..23e3cf44 100644 --- a/src/svm/ssvm.c +++ b/src/svm/ssvm.c @@ -29,6 +29,9 @@ ssvm_master_init (ssvm_private_t * ssvm, u32 master_index) if (ssvm->ssvm_size == 0) return SSVM_API_ERROR_NO_SIZE; + if (CLIB_DEBUG > 1) + clib_warning ("[%d] creating segment '%s'", getpid (), ssvm->name); + ssvm_filename = format (0, "/dev/shm/%s%c", ssvm->name, 0); unlink ((char *) ssvm_filename); @@ -176,12 +179,18 @@ ssvm_delete (ssvm_private_t * ssvm) fn = format (0, "/dev/shm/%s%c", ssvm->name, 0); + if (CLIB_DEBUG > 1) + clib_warning ("[%d] unlinking ssvm (%s) backing file '%s'", getpid (), + ssvm->name, fn); + /* Throw away the backing file */ if (unlink ((char *) fn) < 0) clib_unix_warning ("unlink segment '%s'", ssvm->name); - munmap ((void *) ssvm->requested_va, ssvm->ssvm_size); vec_free (fn); + vec_free (ssvm->name); + + munmap ((void *) ssvm->requested_va, ssvm->ssvm_size); } diff --git a/src/svm/svm.c b/src/svm/svm.c index c96135cf..600fa744 100644 --- a/src/svm/svm.c +++ b/src/svm/svm.c @@ -458,14 +458,15 @@ svm_map_region (svm_map_region_args_t * a) struct stat stat; struct timespec ts, tsrem; - if (CLIB_DEBUG > 1) - clib_warning ("[%d] map region %s", getpid (), a->name); - ASSERT ((a->size & ~(MMAP_PAGESIZE - 1)) == a->size); ASSERT (a->name); shm_name = shm_name_from_svm_map_region_args (a); + if (CLIB_DEBUG > 1) + clib_warning ("[%d] map region %s: shm_open (%s)", + getpid (), a->name, shm_name); + svm_fd = shm_open ((char *) shm_name, O_RDWR | O_CREAT | O_EXCL, 0777); if (svm_fd >= 0) @@ -947,6 +948,29 @@ svm_region_find_or_create (svm_map_region_args_t * a) return (rp); } +void +svm_region_unlink (svm_region_t * rp) +{ + svm_map_region_args_t _a, *a = &_a; + svm_main_region_t *mp; + u8 *shm_name; + + ASSERT (root_rp); + ASSERT (rp); + ASSERT (vec_c_string_is_terminated (rp->region_name)); + + mp = root_rp->data_base; + ASSERT (mp); + + a->root_path = (char *) mp->root_path; + a->name = rp->region_name; + shm_name = shm_name_from_svm_map_region_args (a); + if (CLIB_DEBUG > 1) + clib_warning ("[%d] shm_unlink (%s)", getpid (), shm_name); + shm_unlink ((const char *) shm_name); + vec_free (shm_name); +} + /* * svm_region_unmap * @@ -1056,7 +1080,7 @@ found: vec_free (name); region_unlock (rp); - shm_unlink (rp->region_name); + svm_region_unlink (rp); munmap ((void *) virtual_base, virtual_size); region_unlock (root_rp); svm_pop_heap (oldheap); @@ -1071,9 +1095,6 @@ found: /* * svm_region_exit - * There is no clean way to unlink the - * root region when all clients go away, - * so remove the pid entry and call it a day. */ void svm_region_exit () @@ -1116,6 +1137,9 @@ svm_region_exit () found: + if (vec_len (root_rp->client_pids) == 0) + svm_region_unlink (root_rp); + region_unlock (root_rp); svm_pop_heap (oldheap); diff --git a/src/svm/svm_fifo_segment.c b/src/svm/svm_fifo_segment.c index 69d4ecb9..c80374a7 100644 --- a/src/svm/svm_fifo_segment.c +++ b/src/svm/svm_fifo_segment.c @@ -105,7 +105,7 @@ svm_fifo_segment_create (svm_fifo_segment_create_args_t * a) s->ssvm.ssvm_size = a->segment_size; s->ssvm.i_am_master = 1; s->ssvm.my_pid = getpid (); - s->ssvm.name = (u8 *) a->segment_name; + s->ssvm.name = format (0, "%s", a->segment_name); s->ssvm.requested_va = sm->next_baseva; rv = ssvm_master_init (&s->ssvm, s - sm->segments); diff --git a/src/svm/svmdb.c b/src/svm/svmdb.c index 03dfe7c3..043b0924 100644 --- a/src/svm/svmdb.c +++ b/src/svm/svmdb.c @@ -106,11 +106,16 @@ svmdb_map (svmdb_map_args_t * dba) } /* Nope, it's our problem... */ - /* Add a bogus client (pid=0) so the svm won't be deallocated */ - oldheap = svm_push_pvt_heap (db_rp); - vec_add1 (client->db_rp->client_pids, 0); - svm_pop_heap (oldheap); - + if (CLIB_DEBUG > 2) + { + /* Add a bogus client (pid=0) so the svm won't be deallocated */ + clib_warning + ("[%d] adding fake client (pid=0) so '%s' won't be unlinked", + getpid (), db_rp->region_name); + oldheap = svm_push_pvt_heap (db_rp); + vec_add1 (client->db_rp->client_pids, 0); + svm_pop_heap (oldheap); + } oldheap = svm_push_data_heap (db_rp); vec_validate (hp, 0); diff --git a/src/vlib/buffer.h b/src/vlib/buffer.h index c810db4e..77528e77 100644 --- a/src/vlib/buffer.h +++ b/src/vlib/buffer.h @@ -87,17 +87,17 @@ typedef struct /* any change to the following line requres update of * vlib_buffer_get_free_list_index(...) and * vlib_buffer_set_free_list_index(...) functions */ -#define VLIB_BUFFER_FREE_LIST_INDEX_MASK ((1 << 4) - 1) +#define VLIB_BUFFER_FREE_LIST_INDEX_MASK ((1 << 5) - 1) -#define VLIB_BUFFER_IS_TRACED (1 << 4) -#define VLIB_BUFFER_LOG2_NEXT_PRESENT (5) +#define VLIB_BUFFER_IS_TRACED (1 << 5) +#define VLIB_BUFFER_LOG2_NEXT_PRESENT (6) #define VLIB_BUFFER_NEXT_PRESENT (1 << VLIB_BUFFER_LOG2_NEXT_PRESENT) -#define VLIB_BUFFER_IS_RECYCLED (1 << 6) -#define VLIB_BUFFER_TOTAL_LENGTH_VALID (1 << 7) -#define VLIB_BUFFER_REPL_FAIL (1 << 8) -#define VLIB_BUFFER_RECYCLE (1 << 9) -#define VLIB_BUFFER_FLOW_REPORT (1 << 10) -#define VLIB_BUFFER_EXT_HDR_VALID (1 << 11) +#define VLIB_BUFFER_IS_RECYCLED (1 << 7) +#define VLIB_BUFFER_TOTAL_LENGTH_VALID (1 << 8) +#define VLIB_BUFFER_REPL_FAIL (1 << 9) +#define VLIB_BUFFER_RECYCLE (1 << 10) +#define VLIB_BUFFER_FLOW_REPORT (1 << 11) +#define VLIB_BUFFER_EXT_HDR_VALID (1 << 12) /* User defined buffer flags. */ #define LOG2_VLIB_BUFFER_FLAG_USER(n) (32 - (n)) diff --git a/src/vpp.am b/src/vpp.am index 614bd26a..10a4e311 100644 --- a/src/vpp.am +++ b/src/vpp.am @@ -33,11 +33,11 @@ if WITH_APICLI vpp/api/plugin.h endif -# comment out to disable stats upload to gmond +# uncomment to enable stats upload to gmond +# bin_vpp_SOURCES += \ +# vpp/api/gmon.c bin_vpp_CFLAGS = @APICLI@ -bin_vpp_SOURCES += \ - vpp/api/gmon.c nobase_include_HEADERS += \ vpp/api/vpe_all_api_h.h \ diff --git a/src/vpp/api/api.c b/src/vpp/api/api.c index de9a2477..4e892431 100644 --- a/src/vpp/api/api.c +++ b/src/vpp/api/api.c @@ -686,7 +686,6 @@ static void BAD_SW_IF_INDEX_LABEL; -out: REPLY_MACRO (VL_API_PROXY_ARP_INTFC_ENABLE_DISABLE_REPLY); } diff --git a/src/vpp/api/api_main.c b/src/vpp/api/api_main.c index ac09cd15..c355a5fd 100644 --- a/src/vpp/api/api_main.c +++ b/src/vpp/api/api_main.c @@ -232,8 +232,6 @@ unformat_sw_if_index (unformat_input_t * input, va_list * args) u32 *result = va_arg (*args, u32 *); vnet_main_t *vnm = vnet_get_main (); u32 sw_if_index = ~0; - u8 *if_name; - uword *p; if (unformat (input, "%U", unformat_vnet_sw_interface, vnm, &sw_if_index)) { diff --git a/src/vpp/api/custom_dump.c b/src/vpp/api/custom_dump.c index 3ac8874e..7f3a58d9 100644 --- a/src/vpp/api/custom_dump.c +++ b/src/vpp/api/custom_dump.c @@ -1174,7 +1174,6 @@ static void *vl_api_sr_policy_mod_t_print (vl_api_sr_policy_mod_t * mp, void *handle) { u8 *s; - u32 weight; ip6_address_t *segments = 0, *seg; ip6_address_t *this_address = (ip6_address_t *) mp->segments; @@ -1216,8 +1215,6 @@ static void *vl_api_sr_policy_del_t_print u8 *s; s = format (0, "SCRIPT: sr_policy_del "); - u8 bsid_addr[16]; - u32 sr_policy_index; s = format (s, "To be delivered. Good luck."); FINISH; } @@ -2432,7 +2429,7 @@ static void *vl_api_lisp_add_del_remote_mapping_t_print (vl_api_lisp_add_del_remote_mapping_t * mp, void *handle) { u8 *s; - u32 i, rloc_num = 0; + u32 rloc_num = 0; s = format (0, "SCRIPT: lisp_add_del_remote_mapping "); @@ -2574,7 +2571,6 @@ static void *vl_api_lisp_add_del_locator_set_t_print (vl_api_lisp_add_del_locator_set_t * mp, void *handle) { u8 *s; - u32 loc_num = 0, i; s = format (0, "SCRIPT: lisp_add_del_locator_set "); @@ -2583,8 +2579,6 @@ static void *vl_api_lisp_add_del_locator_set_t_print s = format (s, "locator-set %s ", mp->locator_set_name); - loc_num = clib_net_to_host_u32 (mp->locator_num); - FINISH; } diff --git a/src/vpp/stats/stats.c b/src/vpp/stats/stats.c index 38821da7..422b7b3b 100644 --- a/src/vpp/stats/stats.c +++ b/src/vpp/stats/stats.c @@ -577,7 +577,6 @@ do_ip4_fibs (stats_main_t * sm) ip4_route_t *r; fib_table_t *fib; ip4_fib_t *v4_fib; - ip_lookup_main_t *lm = &im4->lookup_main; static uword *results; vl_api_vnet_ip4_fib_counters_t *mp = 0; u32 items_this_message; -- cgit 1.2.3-korg From 8ea8916bb8fa85b298e0e5700ba15e4455235015 Mon Sep 17 00:00:00 2001 From: "Keith Burns (alagalah)" Date: Thu, 17 Aug 2017 13:33:15 -0700 Subject: IP4_FIB stats API not indexing after FIB changes - VPP-951 Change-Id: I9ec36ee82ddd8f2f7a551b458e19b2fbae4aa7e7 Signed-off-by: Keith Burns (alagalah) --- src/vpp/stats/stats.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'src/vpp/stats/stats.c') diff --git a/src/vpp/stats/stats.c b/src/vpp/stats/stats.c index 422b7b3b..b0fac73d 100644 --- a/src/vpp/stats/stats.c +++ b/src/vpp/stats/stats.c @@ -647,9 +647,11 @@ again: vec_foreach (r, routes) { vlib_counter_t c; + const dpo_id_t *dpo_id; + dpo_id = fib_entry_contribute_ip_forwarding(r->index); vlib_get_combined_counter (&load_balance_main.lbm_to_counters, - r->index, &c); + (u32)dpo_id->dpoi_index, &c); /* * If it has actually * seen at least one packet, send it. -- cgit 1.2.3-korg From 8a19f12a0cfe6d611f6e266931af691fb69a74ad Mon Sep 17 00:00:00 2001 From: "Keith Burns (alagalah)" Date: Sun, 6 Aug 2017 08:26:29 -0700 Subject: Allow individual stats API and introduce stats.api - want_interface_simple_stats - want_interface_combined_stats - want_ip4|6_fib|nbr_stats Change-Id: I4e97461def508958b3e429c3fe8859b36fef2d18 Signed-off-by: Keith Burns (alagalah) --- .gitignore | 1 + src/uri.am | 4 +- src/vlibapi/api_helper_macros.h | 1 - src/vpp.am | 12 +- src/vpp/api/api.c | 59 -- src/vpp/api/vpe.api | 112 +--- src/vpp/api/vpe_all_api_h.h | 3 + src/vpp/stats/stats.api | 222 +++++++ src/vpp/stats/stats.c | 1265 ++++++++++++++++++++++++++------------- src/vpp/stats/stats.h | 45 +- 10 files changed, 1132 insertions(+), 592 deletions(-) create mode 100644 src/vpp/stats/stats.api (limited to 'src/vpp/stats/stats.c') diff --git a/.gitignore b/.gitignore index 10aab98c..dabb31e8 100644 --- a/.gitignore +++ b/.gitignore @@ -55,6 +55,7 @@ test-driver *.iml .bootstrap.ok .settings +.autotools # stop autotools ignore # OSX and some IDE diff --git a/src/uri.am b/src/uri.am index ae6feb6f..660f897d 100644 --- a/src/uri.am +++ b/src/uri.am @@ -1,4 +1,4 @@ -# Copyright (c) 2016 Cisco and/or its affiliates. +# Copyright (c) 2017 Cisco and/or its affiliates. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at: @@ -58,7 +58,7 @@ vcl_test_server_SOURCES = uri/vcl_test_server.c vcl_test_server_LDADD = libvppcom.la vcl_test_client_SOURCES = uri/vcl_test_client.c -vcl_test_client_LDADD = libvppcom.la +vcl_test_client_LDADD = libvppcom.la sock_test_server_SOURCES = uri/sock_test_server.c sock_test_client_SOURCES = uri/sock_test_client.c diff --git a/src/vlibapi/api_helper_macros.h b/src/vlibapi/api_helper_macros.h index 2e29d4d7..b8a9978a 100644 --- a/src/vlibapi/api_helper_macros.h +++ b/src/vlibapi/api_helper_macros.h @@ -217,7 +217,6 @@ _(from_netconf_client) \ _(oam_events) \ _(bfd_events) -/* WARNING: replicated in vpp/stats.h */ typedef struct { u32 client_index; /* in memclnt registration pool */ diff --git a/src/vpp.am b/src/vpp.am index 10a4e311..9a07cefe 100644 --- a/src/vpp.am +++ b/src/vpp.am @@ -42,9 +42,11 @@ bin_vpp_CFLAGS = @APICLI@ nobase_include_HEADERS += \ vpp/api/vpe_all_api_h.h \ vpp/api/vpe_msg_enum.h \ + vpp/stats/stats.api.h \ vpp/api/vpe.api.h API_FILES += vpp/api/vpe.api +API_FILES += vpp/stats/stats.api BUILT_SOURCES += .version @@ -116,13 +118,13 @@ endif noinst_PROGRAMS += bin/summary_stats_client bin_summary_stats_client_SOURCES = \ - vpp/api/summary_stats_client.c + vpp/api/summary_stats_client.c bin_summary_stats_client_LDADD = \ - libvlibmemoryclient.la \ - libsvm.la \ - libvppinfra.la \ - -lpthread -lm -lrt + libvlibmemoryclient.la \ + libsvm.la \ + libvppinfra.la \ + -lpthread -lm -lrt bin_PROGRAMS += bin/vpp_get_metrics diff --git a/src/vpp/api/api.c b/src/vpp/api/api.c index e8bc22ae..f9c3129c 100644 --- a/src/vpp/api/api.c +++ b/src/vpp/api/api.c @@ -110,7 +110,6 @@ _(CREATE_VLAN_SUBIF, create_vlan_subif) \ _(CREATE_SUBIF, create_subif) \ _(PROXY_ARP_ADD_DEL, proxy_arp_add_del) \ _(PROXY_ARP_INTFC_ENABLE_DISABLE, proxy_arp_intfc_enable_disable) \ -_(VNET_GET_SUMMARY_STATS, vnet_get_summary_stats) \ _(RESET_FIB, reset_fib) \ _(CREATE_LOOPBACK, create_loopback) \ _(CREATE_LOOPBACK_INSTANCE, create_loopback_instance) \ @@ -745,64 +744,6 @@ vl_api_oam_add_del_t_handler (vl_api_oam_add_del_t * mp) REPLY_MACRO (VL_API_OAM_ADD_DEL_REPLY); } -static void -vl_api_vnet_get_summary_stats_t_handler (vl_api_vnet_get_summary_stats_t * mp) -{ - stats_main_t *sm = &stats_main; - vnet_interface_main_t *im = sm->interface_main; - vl_api_vnet_get_summary_stats_reply_t *rmp; - vlib_combined_counter_main_t *cm; - vlib_counter_t v; - int i, which; - u64 total_pkts[VLIB_N_RX_TX]; - u64 total_bytes[VLIB_N_RX_TX]; - - unix_shared_memory_queue_t *q = - vl_api_client_index_to_input_queue (mp->client_index); - - if (!q) - return; - - rmp = vl_msg_api_alloc (sizeof (*rmp)); - rmp->_vl_msg_id = ntohs (VL_API_VNET_GET_SUMMARY_STATS_REPLY); - rmp->context = mp->context; - rmp->retval = 0; - - memset (total_pkts, 0, sizeof (total_pkts)); - memset (total_bytes, 0, sizeof (total_bytes)); - - vnet_interface_counter_lock (im); - - vec_foreach (cm, im->combined_sw_if_counters) - { - which = cm - im->combined_sw_if_counters; - - for (i = 0; i < vlib_combined_counter_n_counters (cm); i++) - { - vlib_get_combined_counter (cm, i, &v); - total_pkts[which] += v.packets; - total_bytes[which] += v.bytes; - } - } - vnet_interface_counter_unlock (im); - - rmp->total_pkts[VLIB_RX] = clib_host_to_net_u64 (total_pkts[VLIB_RX]); - rmp->total_bytes[VLIB_RX] = clib_host_to_net_u64 (total_bytes[VLIB_RX]); - rmp->total_pkts[VLIB_TX] = clib_host_to_net_u64 (total_pkts[VLIB_TX]); - rmp->total_bytes[VLIB_TX] = clib_host_to_net_u64 (total_bytes[VLIB_TX]); - rmp->vector_rate = - clib_host_to_net_u64 (vlib_last_vector_length_per_node (sm->vlib_main)); - - vl_msg_api_send_shmem (q, (u8 *) & rmp); -} - -/* *INDENT-OFF* */ -typedef CLIB_PACKED (struct { - ip4_address_t address; - u32 address_length: 6; - u32 index:26; -}) ip4_route_t; -/* *INDENT-ON* */ static int ip4_reset_fib_t_handler (vl_api_reset_fib_t * mp) diff --git a/src/vpp/api/vpe.api b/src/vpp/api/vpe.api index eda95ce5..d68beae1 100644 --- a/src/vpp/api/vpe.api +++ b/src/vpp/api/vpe.api @@ -47,6 +47,7 @@ * DHCP APIs: see ... /src/vnet/dhcp/{dhcpk.api, dhcp_api.c} * COP APIs: see ... /src/vnet/cop/{cop.api, cop_api.c} * POLICER APIs: see ... /src/vnet/policer/{policer.api, policer_api.c} + * STATS APIs: see .../src/vpp/stats/{stats.api, stats.c} */ /** \brief Create a new subinterface with the given vlan id @@ -136,117 +137,6 @@ autoreply define reset_vrf u32 vrf_id; }; -/** \brief Want Stats, register for stats updates - @param client_index - opaque cookie to identify the sender - @param context - sender context, to match reply w/ request - @param enable_disable - 1 = enable stats, 0 = disable - @param pid - pid of process requesting stats updates -*/ -autoreply define want_stats -{ - u32 client_index; - u32 context; - u32 enable_disable; - u32 pid; -}; - -typeonly manual_print manual_endian define ip4_fib_counter -{ - u32 address; - u8 address_length; - u64 packets; - u64 bytes; -}; - -manual_print manual_endian define vnet_ip4_fib_counters -{ - u32 vrf_id; - u32 count; - vl_api_ip4_fib_counter_t c[count]; -}; - -typeonly manual_print manual_endian define ip4_nbr_counter -{ - u32 address; - u8 link_type; - u64 packets; - u64 bytes; -}; - -/** - * @brief Per-neighbour (i.e. per-adjacency) coutners - * @param count The size of the array of counters - * @param sw_if_index The interface the adjacency is on - * @param begin Flag to indicate this is the first set of stats for this - * interface. If this flag is not set the it is a continuation of - * stats for this interface - * @param c counters - */ -manual_print manual_endian define vnet_ip4_nbr_counters -{ - u32 count; - u32 sw_if_index; - u8 begin; - vl_api_ip4_nbr_counter_t c[count]; -}; - -typeonly manual_print manual_endian define ip6_fib_counter -{ - u64 address[2]; - u8 address_length; - u64 packets; - u64 bytes; -}; - -manual_print manual_endian define vnet_ip6_fib_counters -{ - u32 vrf_id; - u32 count; - vl_api_ip6_fib_counter_t c[count]; -}; - -typeonly manual_print manual_endian define ip6_nbr_counter -{ - u64 address[2]; - u8 link_type; - u64 packets; - u64 bytes; -}; - -manual_print manual_endian define vnet_ip6_nbr_counters -{ - u32 count; - u32 sw_if_index; - u8 begin; - vl_api_ip6_nbr_counter_t c[count]; -}; - -/** \brief Request for a single block of summary stats - @param client_index - opaque cookie to identify the sender - @param context - sender context, to match reply w/ request -*/ -define vnet_get_summary_stats -{ - u32 client_index; - u32 context; -}; - -/** \brief Reply for vnet_get_summary_stats request - @param context - sender context, to match reply w/ request - @param retval - return code for request - @param total_pkts - - @param total_bytes - - @param vector_rate - -*/ -define vnet_get_summary_stats_reply -{ - u32 context; - i32 retval; - u64 total_pkts[2]; - u64 total_bytes[2]; - f64 vector_rate; -}; - /** \brief OAM event structure @param dst_address[] - @param state diff --git a/src/vpp/api/vpe_all_api_h.h b/src/vpp/api/vpe_all_api_h.h index 397cd807..d35a0535 100644 --- a/src/vpp/api/vpe_all_api_h.h +++ b/src/vpp/api/vpe_all_api_h.h @@ -28,6 +28,9 @@ /* Include the current layer (third) vpp API definition layer */ #include +/* Include stats APIs */ +#include + /* * fd.io coding-style-patch-verification: ON * diff --git a/src/vpp/stats/stats.api b/src/vpp/stats/stats.api new file mode 100644 index 00000000..7f745859 --- /dev/null +++ b/src/vpp/stats/stats.api @@ -0,0 +1,222 @@ +/* + * Copyright (c) 2015-2016 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** \file + + This file defines the stats API +*/ + + +/** \brief Want Stats, enable/disable ALL stats updates + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param enable_disable - 1 = enable stats, 0 = disable + @param pid - pid of process requesting stats updates +*/ +autoreply define want_stats +{ + u32 client_index; + u32 context; + u32 enable_disable; + u32 pid; +}; + +/** \brief Want Interface Simple Stats, register for detailed interface stats + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param enable_disable - 1 = enable stats, 0 = disable + @param pid - pid of process requesting stats updates +*/ +autoreply define want_interface_simple_stats +{ + u32 client_index; + u32 context; + u32 enable_disable; + u32 pid; +}; + +/** \brief Want Interface Combined Stats, register for continuous stats + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param enable_disable - 1 = enable stats, 0 = disable + @param pid - pid of process requesting stats updates +*/ +autoreply define want_interface_combined_stats +{ + u32 client_index; + u32 context; + u32 enable_disable; + u32 pid; +}; + +/** \brief Want IP4 FIB Stats, register for continuous stats + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param enable_disable - 1 = enable stats, 0 = disable + @param pid - pid of process requesting stats updates +*/ +autoreply define want_ip4_fib_stats +{ + u32 client_index; + u32 context; + u32 enable_disable; + u32 pid; +}; + +/** \brief Want IP6 FIB Stats, register for continuous stats + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param enable_disable - 1 = enable stats, 0 = disable + @param pid - pid of process requesting stats updates +*/ +autoreply define want_ip6_fib_stats +{ + u32 client_index; + u32 context; + u32 enable_disable; + u32 pid; +}; + +/** \brief Want IP4 NBR Stats, register for continuous stats + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param enable_disable - 1 = enable stats, 0 = disable + @param pid - pid of process requesting stats updates +*/ +autoreply define want_ip4_nbr_stats +{ + u32 client_index; + u32 context; + u32 enable_disable; + u32 pid; +}; + +/** \brief Want IP6 NBR Stats, register for continuous stats + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param enable_disable - 1 = enable stats, 0 = disable + @param pid - pid of process requesting stats updates +*/ +autoreply define want_ip6_nbr_stats +{ + u32 client_index; + u32 context; + u32 enable_disable; + u32 pid; +}; + +typeonly manual_print manual_endian define ip4_fib_counter +{ + u32 address; + u8 address_length; + u64 packets; + u64 bytes; +}; + +manual_print manual_endian define vnet_ip4_fib_counters +{ + u32 vrf_id; + u32 count; + vl_api_ip4_fib_counter_t c[count]; +}; + +typeonly manual_print manual_endian define ip4_nbr_counter +{ + u32 address; + u8 link_type; + u64 packets; + u64 bytes; +}; + +/** + * @brief Per-neighbour (i.e. per-adjacency) coutners + * @param count The size of the array of counters + * @param sw_if_index The interface the adjacency is on + * @param begin Flag to indicate this is the first set of stats for this + * interface. If this flag is not set the it is a continuation of + * stats for this interface + * @param c counters + */ +manual_print manual_endian define vnet_ip4_nbr_counters +{ + u32 count; + u32 sw_if_index; + u8 begin; + vl_api_ip4_nbr_counter_t c[count]; +}; + +typeonly manual_print manual_endian define ip6_fib_counter +{ + u64 address[2]; + u8 address_length; + u64 packets; + u64 bytes; +}; + +manual_print manual_endian define vnet_ip6_fib_counters +{ + u32 vrf_id; + u32 count; + vl_api_ip6_fib_counter_t c[count]; +}; + +typeonly manual_print manual_endian define ip6_nbr_counter +{ + u64 address[2]; + u8 link_type; + u64 packets; + u64 bytes; +}; + +manual_print manual_endian define vnet_ip6_nbr_counters +{ + u32 count; + u32 sw_if_index; + u8 begin; + vl_api_ip6_nbr_counter_t c[count]; +}; + + +/** \brief Request for a single block of summary stats + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request +*/ +define vnet_get_summary_stats +{ + u32 client_index; + u32 context; +}; + +/** \brief Reply for vnet_get_summary_stats request + @param context - sender context, to match reply w/ request + @param retval - return code for request + @param total_pkts - + @param total_bytes - + @param vector_rate - +*/ +define vnet_get_summary_stats_reply +{ + u32 context; + i32 retval; + u64 total_pkts[2]; + u64 total_bytes[2]; + f64 vector_rate; +}; + +/* + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/src/vpp/stats/stats.c b/src/vpp/stats/stats.c index b0fac73d..c78a8aef 100644 --- a/src/vpp/stats/stats.c +++ b/src/vpp/stats/stats.c @@ -14,10 +14,8 @@ */ #include #include -#include -#include -#include #include +#include #include #define STATS_DEBUG 0 @@ -48,11 +46,19 @@ stats_main_t stats_main; #define foreach_stats_msg \ _(WANT_STATS, want_stats) \ _(VNET_INTERFACE_SIMPLE_COUNTERS, vnet_interface_simple_counters) \ +_(WANT_INTERFACE_SIMPLE_STATS, want_interface_simple_stats) \ _(VNET_INTERFACE_COMBINED_COUNTERS, vnet_interface_combined_counters) \ +_(WANT_INTERFACE_COMBINED_STATS, want_interface_combined_stats) \ _(VNET_IP4_FIB_COUNTERS, vnet_ip4_fib_counters) \ +_(WANT_IP4_FIB_STATS, want_ip4_fib_stats) \ _(VNET_IP6_FIB_COUNTERS, vnet_ip6_fib_counters) \ +_(WANT_IP6_FIB_STATS, want_ip6_fib_stats) \ _(VNET_IP4_NBR_COUNTERS, vnet_ip4_nbr_counters) \ -_(VNET_IP6_NBR_COUNTERS, vnet_ip6_nbr_counters) +_(WANT_IP4_NBR_STATS, want_ip4_nbr_stats) \ +_(VNET_IP6_NBR_COUNTERS, vnet_ip6_nbr_counters) \ +_(WANT_IP6_NBR_STATS, want_ip6_nbr_stats) \ +_(VNET_GET_SUMMARY_STATS, vnet_get_summary_stats) + /* These constants ensure msg sizes <= 1024, aka ring allocation */ #define SIMPLE_COUNTER_BATCH_SIZE 126 @@ -64,6 +70,110 @@ _(VNET_IP6_NBR_COUNTERS, vnet_ip6_nbr_counters) #define STATS_RELEASE_DELAY_NS (1000 * 1000 * 5) /* ns/us us/ms */ +u8 * +format_vnet_interface_combined_counters (u8 * s, va_list * args) +{ + stats_main_t *sm = &stats_main; + vl_api_vnet_interface_combined_counters_t *mp = + va_arg (*args, vl_api_vnet_interface_combined_counters_t *); + + char *counter_name; + u32 count, sw_if_index; + int i; + count = ntohl (mp->count); + sw_if_index = ntohl (mp->first_sw_if_index); + + vlib_counter_t *vp; + u64 packets, bytes; + vp = (vlib_counter_t *) mp->data; + + switch (mp->vnet_counter_type) + { + case VNET_INTERFACE_COUNTER_RX: + counter_name = "rx"; + break; + case VNET_INTERFACE_COUNTER_TX: + counter_name = "tx"; + break; + default: + counter_name = "bogus"; + break; + } + for (i = 0; i < count; i++) + { + packets = clib_mem_unaligned (&vp->packets, u64); + packets = clib_net_to_host_u64 (packets); + bytes = clib_mem_unaligned (&vp->bytes, u64); + bytes = clib_net_to_host_u64 (bytes); + vp++; + s = format (s, "%U.%s.packets %lld\n", + format_vnet_sw_if_index_name, + sm->vnet_main, sw_if_index, counter_name, packets); + s = format (s, "%U.%s.bytes %lld\n", + format_vnet_sw_if_index_name, + sm->vnet_main, sw_if_index, counter_name, bytes); + sw_if_index++; + } + return s; +} + +u8 * +format_vnet_interface_simple_counters (u8 * s, va_list * args) +{ + stats_main_t *sm = &stats_main; + vl_api_vnet_interface_simple_counters_t *mp = + va_arg (*args, vl_api_vnet_interface_simple_counters_t *); + char *counter_name; + u32 count, sw_if_index; + count = ntohl (mp->count); + sw_if_index = ntohl (mp->first_sw_if_index); + u64 *vp, v; + vp = (u64 *) mp->data; + int i; + + switch (mp->vnet_counter_type) + { + case VNET_INTERFACE_COUNTER_DROP: + counter_name = "drop"; + break; + case VNET_INTERFACE_COUNTER_PUNT: + counter_name = "punt"; + break; + case VNET_INTERFACE_COUNTER_IP4: + counter_name = "ip4"; + break; + case VNET_INTERFACE_COUNTER_IP6: + counter_name = "ip6"; + break; + case VNET_INTERFACE_COUNTER_RX_NO_BUF: + counter_name = "rx-no-buff"; + break; + case VNET_INTERFACE_COUNTER_RX_MISS: + counter_name = "rx-miss"; + break; + case VNET_INTERFACE_COUNTER_RX_ERROR: + counter_name = "rx-error (fifo-full)"; + break; + case VNET_INTERFACE_COUNTER_TX_ERROR: + counter_name = "tx-error (fifo-full)"; + break; + default: + counter_name = "bogus"; + break; + } + for (i = 0; i < count; i++) + { + v = clib_mem_unaligned (vp, u64); + v = clib_net_to_host_u64 (v); + vp++; + s = format (s, "%U.%s %lld\n", format_vnet_sw_if_index_name, + sm->vnet_main, sw_if_index, counter_name, v); + sw_if_index++; + } + + return s; +} + void dslock (stats_main_t * sm, int release_hint, int tag) { @@ -231,14 +341,6 @@ do_combined_interface_counters (stats_main_t * sm) vnet_interface_counter_unlock (im); } -/* from .../vnet/vnet/ip/lookup.c. Yuck */ -typedef CLIB_PACKED (struct - { - ip4_address_t address; -u32 address_length: 6; -u32 index: 26; - }) ip4_route_t; - static void ip46_fib_stats_delay (stats_main_t * sm, u32 sec, u32 nsec) { @@ -573,167 +675,177 @@ do_ip4_fibs (stats_main_t * sm) api_main_t *am = sm->api_main; vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr; unix_shared_memory_queue_t *q = shmem_hdr->vl_input_queue; - static ip4_route_t *routes; ip4_route_t *r; fib_table_t *fib; ip4_fib_t *v4_fib; - static uword *results; + do_ip46_fibs_t *do_fibs; vl_api_vnet_ip4_fib_counters_t *mp = 0; u32 items_this_message; vl_api_ip4_fib_counter_t *ctrp = 0; u32 start_at_fib_index = 0; - int i; + int i, j, k; + + do_fibs = &sm->do_ip46_fibs; again: + vec_reset_length (do_fibs->fibs); /* *INDENT-OFF* */ pool_foreach (fib, im4->fibs, - ({ - /* We may have bailed out due to control-plane activity */ - while ((fib - im4->fibs) < start_at_fib_index) - continue; + ({vec_add1(do_fibs->fibs,fib);})); - v4_fib = pool_elt_at_index (im4->v4_fibs, fib->ft_index); + /* *INDENT-ON* */ - if (mp == 0) - { - items_this_message = IP4_FIB_COUNTER_BATCH_SIZE; - mp = vl_msg_api_alloc_as_if_client - (sizeof (*mp) + - items_this_message * sizeof (vl_api_ip4_fib_counter_t)); - mp->_vl_msg_id = ntohs (VL_API_VNET_IP4_FIB_COUNTERS); - mp->count = 0; - mp->vrf_id = ntohl (fib->ft_table_id); - ctrp = (vl_api_ip4_fib_counter_t *) mp->c; - } - else - { - /* happens if the last FIB was empty... */ - ASSERT (mp->count == 0); - mp->vrf_id = ntohl (fib->ft_table_id); - } + for (j = 0; j < vec_len (do_fibs->fibs); j++) + { + fib = do_fibs->fibs[j]; + /* We may have bailed out due to control-plane activity */ + while ((fib - im4->fibs) < start_at_fib_index) + continue; - dslock (sm, 0 /* release hint */ , 1 /* tag */ ); + v4_fib = pool_elt_at_index (im4->v4_fibs, fib->ft_index); - vec_reset_length (routes); - vec_reset_length (results); + if (mp == 0) + { + items_this_message = IP4_FIB_COUNTER_BATCH_SIZE; + mp = vl_msg_api_alloc_as_if_client + (sizeof (*mp) + + items_this_message * sizeof (vl_api_ip4_fib_counter_t)); + mp->_vl_msg_id = ntohs (VL_API_VNET_IP4_FIB_COUNTERS); + mp->count = 0; + mp->vrf_id = ntohl (fib->ft_table_id); + ctrp = (vl_api_ip4_fib_counter_t *) mp->c; + } + else + { + /* happens if the last FIB was empty... */ + ASSERT (mp->count == 0); + mp->vrf_id = ntohl (fib->ft_table_id); + } - for (i = 0; i < ARRAY_LEN (v4_fib->fib_entry_by_dst_address); i++) - { - uword *hash = v4_fib->fib_entry_by_dst_address[i]; - hash_pair_t *p; - ip4_route_t x; - - x.address_length = i; - - hash_foreach_pair (p, hash, - ({ - x.address.data_u32 = p->key; - x.index = p->value[0]; - - vec_add1 (routes, x); - if (sm->data_structure_lock->release_hint) - { - start_at_fib_index = fib - im4->fibs; - dsunlock (sm); - ip46_fib_stats_delay (sm, 0 /* sec */, - STATS_RELEASE_DELAY_NS); - mp->count = 0; - ctrp = (vl_api_ip4_fib_counter_t *)mp->c; - goto again; - } - })); - } + dslock (sm, 0 /* release hint */ , 1 /* tag */ ); - vec_foreach (r, routes) - { - vlib_counter_t c; - const dpo_id_t *dpo_id; - - dpo_id = fib_entry_contribute_ip_forwarding(r->index); - vlib_get_combined_counter (&load_balance_main.lbm_to_counters, - (u32)dpo_id->dpoi_index, &c); - /* - * If it has actually - * seen at least one packet, send it. - */ - if (c.packets > 0) - { + vec_reset_length (do_fibs->ip4routes); + vec_reset_length (do_fibs->results); - /* already in net byte order */ - ctrp->address = r->address.as_u32; - ctrp->address_length = r->address_length; - ctrp->packets = clib_host_to_net_u64 (c.packets); - ctrp->bytes = clib_host_to_net_u64 (c.bytes); - mp->count++; - ctrp++; + for (i = 0; i < ARRAY_LEN (v4_fib->fib_entry_by_dst_address); i++) + { + uword *hash = v4_fib->fib_entry_by_dst_address[i]; + hash_pair_t *p; + ip4_route_t x; + + vec_reset_length (do_fibs->pvec); + + x.address_length = i; + + hash_foreach_pair (p, hash, ( + { + vec_add1 (do_fibs->pvec, p);} + )); + for (k = 0; k < vec_len (do_fibs->pvec); k++) + { + p = do_fibs->pvec[k]; + x.address.data_u32 = p->key; + x.index = p->value[0]; + + vec_add1 (do_fibs->ip4routes, x); + if (sm->data_structure_lock->release_hint) + { + start_at_fib_index = fib - im4->fibs; + dsunlock (sm); + ip46_fib_stats_delay (sm, 0 /* sec */ , + STATS_RELEASE_DELAY_NS); + mp->count = 0; + ctrp = (vl_api_ip4_fib_counter_t *) mp->c; + goto again; + } + } + } - if (mp->count == items_this_message) - { - mp->count = htonl (items_this_message); - /* - * If the main thread's input queue is stuffed, - * drop the data structure lock (which the main thread - * may want), and take a pause. - */ - unix_shared_memory_queue_lock (q); - if (unix_shared_memory_queue_is_full (q)) - { - dsunlock (sm); - vl_msg_api_send_shmem_nolock (q, (u8 *) & mp); - unix_shared_memory_queue_unlock (q); - mp = 0; - ip46_fib_stats_delay (sm, 0 /* sec */ , - STATS_RELEASE_DELAY_NS); - goto again; - } - vl_msg_api_send_shmem_nolock (q, (u8 *) & mp); - unix_shared_memory_queue_unlock (q); - - items_this_message = IP4_FIB_COUNTER_BATCH_SIZE; - mp = vl_msg_api_alloc_as_if_client - (sizeof (*mp) + - items_this_message * sizeof (vl_api_ip4_fib_counter_t)); - mp->_vl_msg_id = ntohs (VL_API_VNET_IP4_FIB_COUNTERS); - mp->count = 0; - mp->vrf_id = ntohl (fib->ft_table_id); - ctrp = (vl_api_ip4_fib_counter_t *) mp->c; - } - } /* for each (mp or single) adj */ - if (sm->data_structure_lock->release_hint) - { - start_at_fib_index = fib - im4->fibs; - dsunlock (sm); - ip46_fib_stats_delay (sm, 0 /* sec */ , STATS_RELEASE_DELAY_NS); - mp->count = 0; - ctrp = (vl_api_ip4_fib_counter_t *) mp->c; - goto again; - } + vec_foreach (r, do_fibs->ip4routes) + { + vlib_counter_t c; + const dpo_id_t *dpo_id; + u32 index; + + dpo_id = fib_entry_contribute_ip_forwarding (r->index); + index = (u32) dpo_id->dpoi_index; + + vlib_get_combined_counter (&load_balance_main.lbm_to_counters, + index, &c); + /* + * If it has actually + * seen at least one packet, send it. + */ + if (c.packets > 0) + { + + /* already in net byte order */ + ctrp->address = r->address.as_u32; + ctrp->address_length = r->address_length; + ctrp->packets = clib_host_to_net_u64 (c.packets); + ctrp->bytes = clib_host_to_net_u64 (c.bytes); + mp->count++; + ctrp++; + + if (mp->count == items_this_message) + { + mp->count = htonl (items_this_message); + /* + * If the main thread's input queue is stuffed, + * drop the data structure lock (which the main thread + * may want), and take a pause. + */ + unix_shared_memory_queue_lock (q); + if (unix_shared_memory_queue_is_full (q)) + { + dsunlock (sm); + vl_msg_api_send_shmem_nolock (q, (u8 *) & mp); + unix_shared_memory_queue_unlock (q); + mp = 0; + ip46_fib_stats_delay (sm, 0 /* sec */ , + STATS_RELEASE_DELAY_NS); + goto again; + } + vl_msg_api_send_shmem_nolock (q, (u8 *) & mp); + unix_shared_memory_queue_unlock (q); + + items_this_message = IP4_FIB_COUNTER_BATCH_SIZE; + mp = vl_msg_api_alloc_as_if_client + (sizeof (*mp) + + items_this_message * sizeof (vl_api_ip4_fib_counter_t)); + mp->_vl_msg_id = ntohs (VL_API_VNET_IP4_FIB_COUNTERS); + mp->count = 0; + mp->vrf_id = ntohl (fib->ft_table_id); + ctrp = (vl_api_ip4_fib_counter_t *) mp->c; + } + } /* for each (mp or single) adj */ + if (sm->data_structure_lock->release_hint) + { + start_at_fib_index = fib - im4->fibs; + dsunlock (sm); + ip46_fib_stats_delay (sm, 0 /* sec */ , STATS_RELEASE_DELAY_NS); + mp->count = 0; + ctrp = (vl_api_ip4_fib_counter_t *) mp->c; + goto again; + } } /* vec_foreach (routes) */ - dsunlock (sm); + dsunlock (sm); - /* Flush any data from this fib */ - if (mp->count) - { - mp->count = htonl (mp->count); - vl_msg_api_send_shmem (q, (u8 *) & mp); - mp = 0; - } - })); - /* *INDENT-ON* */ + /* Flush any data from this fib */ + if (mp->count) + { + mp->count = htonl (mp->count); + vl_msg_api_send_shmem (q, (u8 *) & mp); + mp = 0; + } + } /* If e.g. the last FIB had no reportable routes, free the buffer */ if (mp) vl_msg_api_free (mp); } -typedef struct -{ - ip6_address_t address; - u32 address_length; - u32 index; -} ip6_route_t; - typedef struct { u32 fib_index; @@ -769,137 +881,144 @@ do_ip6_fibs (stats_main_t * sm) api_main_t *am = sm->api_main; vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr; unix_shared_memory_queue_t *q = shmem_hdr->vl_input_queue; - static ip6_route_t *routes; ip6_route_t *r; fib_table_t *fib; - static uword *results; + do_ip46_fibs_t *do_fibs; vl_api_vnet_ip6_fib_counters_t *mp = 0; u32 items_this_message; vl_api_ip6_fib_counter_t *ctrp = 0; u32 start_at_fib_index = 0; BVT (clib_bihash) * h = &im6->ip6_table[IP6_FIB_TABLE_FWDING].ip6_hash; add_routes_in_fib_arg_t _a, *a = &_a; + int i; + do_fibs = &sm->do_ip46_fibs; again: + vec_reset_length (do_fibs->fibs); /* *INDENT-OFF* */ pool_foreach (fib, im6->fibs, - ({ - /* We may have bailed out due to control-plane activity */ - while ((fib - im6->fibs) < start_at_fib_index) - continue; - - if (mp == 0) - { - items_this_message = IP6_FIB_COUNTER_BATCH_SIZE; - mp = vl_msg_api_alloc_as_if_client - (sizeof (*mp) + - items_this_message * sizeof (vl_api_ip6_fib_counter_t)); - mp->_vl_msg_id = ntohs (VL_API_VNET_IP6_FIB_COUNTERS); - mp->count = 0; - mp->vrf_id = ntohl (fib->ft_table_id); - ctrp = (vl_api_ip6_fib_counter_t *) mp->c; - } + ({vec_add1(do_fibs->fibs,fib);})); + /* *INDENT-ON* */ - dslock (sm, 0 /* release hint */ , 1 /* tag */ ); - vec_reset_length (routes); - vec_reset_length (results); + for (i = 0; i < vec_len (do_fibs->fibs); i++) + { + fib = do_fibs->fibs[i]; + /* We may have bailed out due to control-plane activity */ + while ((fib - im6->fibs) < start_at_fib_index) + continue; - a->fib_index = fib - im6->fibs; - a->routep = &routes; - a->sm = sm; + if (mp == 0) + { + items_this_message = IP6_FIB_COUNTER_BATCH_SIZE; + mp = vl_msg_api_alloc_as_if_client + (sizeof (*mp) + + items_this_message * sizeof (vl_api_ip6_fib_counter_t)); + mp->_vl_msg_id = ntohs (VL_API_VNET_IP6_FIB_COUNTERS); + mp->count = 0; + mp->vrf_id = ntohl (fib->ft_table_id); + ctrp = (vl_api_ip6_fib_counter_t *) mp->c; + } - if (clib_setjmp (&sm->jmp_buf, 0) == 0) - { - start_at_fib_index = fib - im6->fibs; - BV (clib_bihash_foreach_key_value_pair) (h, add_routes_in_fib, a); - } - else - { - dsunlock (sm); - ip46_fib_stats_delay (sm, 0 /* sec */ , - STATS_RELEASE_DELAY_NS); - mp->count = 0; - ctrp = (vl_api_ip6_fib_counter_t *) mp->c; - goto again; - } + dslock (sm, 0 /* release hint */ , 1 /* tag */ ); - vec_foreach (r, routes) - { - vlib_counter_t c; - - vlib_get_combined_counter (&load_balance_main.lbm_to_counters, - r->index, &c); - /* - * If it has actually - * seen at least one packet, send it. - */ - if (c.packets > 0) - { - /* already in net byte order */ - ctrp->address[0] = r->address.as_u64[0]; - ctrp->address[1] = r->address.as_u64[1]; - ctrp->address_length = (u8) r->address_length; - ctrp->packets = clib_host_to_net_u64 (c.packets); - ctrp->bytes = clib_host_to_net_u64 (c.bytes); - mp->count++; - ctrp++; - - if (mp->count == items_this_message) - { - mp->count = htonl (items_this_message); - /* - * If the main thread's input queue is stuffed, - * drop the data structure lock (which the main thread - * may want), and take a pause. - */ - unix_shared_memory_queue_lock (q); - if (unix_shared_memory_queue_is_full (q)) - { - dsunlock (sm); - vl_msg_api_send_shmem_nolock (q, (u8 *) & mp); - unix_shared_memory_queue_unlock (q); - mp = 0; - ip46_fib_stats_delay (sm, 0 /* sec */ , - STATS_RELEASE_DELAY_NS); - goto again; - } - vl_msg_api_send_shmem_nolock (q, (u8 *) & mp); - unix_shared_memory_queue_unlock (q); - - items_this_message = IP6_FIB_COUNTER_BATCH_SIZE; - mp = vl_msg_api_alloc_as_if_client - (sizeof (*mp) + - items_this_message * sizeof (vl_api_ip6_fib_counter_t)); - mp->_vl_msg_id = ntohs (VL_API_VNET_IP6_FIB_COUNTERS); - mp->count = 0; - mp->vrf_id = ntohl (fib->ft_table_id); - ctrp = (vl_api_ip6_fib_counter_t *) mp->c; - } - } + vec_reset_length (do_fibs->ip6routes); + vec_reset_length (do_fibs->results); - if (sm->data_structure_lock->release_hint) - { - start_at_fib_index = fib - im6->fibs; - dsunlock (sm); - ip46_fib_stats_delay (sm, 0 /* sec */ , STATS_RELEASE_DELAY_NS); - mp->count = 0; - ctrp = (vl_api_ip6_fib_counter_t *) mp->c; - goto again; - } - } /* vec_foreach (routes) */ + a->fib_index = fib - im6->fibs; + a->routep = &do_fibs->ip6routes; + a->sm = sm; - dsunlock (sm); + if (clib_setjmp (&sm->jmp_buf, 0) == 0) + { + start_at_fib_index = fib - im6->fibs; + BV (clib_bihash_foreach_key_value_pair) (h, add_routes_in_fib, a); + } + else + { + dsunlock (sm); + ip46_fib_stats_delay (sm, 0 /* sec */ , + STATS_RELEASE_DELAY_NS); + mp->count = 0; + ctrp = (vl_api_ip6_fib_counter_t *) mp->c; + goto again; + } - /* Flush any data from this fib */ - if (mp->count) + vec_foreach (r, do_fibs->ip6routes) { - mp->count = htonl (mp->count); - vl_msg_api_send_shmem (q, (u8 *) & mp); - mp = 0; - } - })); - /* *INDENT-ON* */ + vlib_counter_t c; + + vlib_get_combined_counter (&load_balance_main.lbm_to_counters, + r->index, &c); + /* + * If it has actually + * seen at least one packet, send it. + */ + if (c.packets > 0) + { + /* already in net byte order */ + ctrp->address[0] = r->address.as_u64[0]; + ctrp->address[1] = r->address.as_u64[1]; + ctrp->address_length = (u8) r->address_length; + ctrp->packets = clib_host_to_net_u64 (c.packets); + ctrp->bytes = clib_host_to_net_u64 (c.bytes); + mp->count++; + ctrp++; + + if (mp->count == items_this_message) + { + mp->count = htonl (items_this_message); + /* + * If the main thread's input queue is stuffed, + * drop the data structure lock (which the main thread + * may want), and take a pause. + */ + unix_shared_memory_queue_lock (q); + if (unix_shared_memory_queue_is_full (q)) + { + dsunlock (sm); + vl_msg_api_send_shmem_nolock (q, (u8 *) & mp); + unix_shared_memory_queue_unlock (q); + mp = 0; + ip46_fib_stats_delay (sm, 0 /* sec */ , + STATS_RELEASE_DELAY_NS); + goto again; + } + vl_msg_api_send_shmem_nolock (q, (u8 *) & mp); + unix_shared_memory_queue_unlock (q); + + items_this_message = IP6_FIB_COUNTER_BATCH_SIZE; + mp = vl_msg_api_alloc_as_if_client + (sizeof (*mp) + + items_this_message * sizeof (vl_api_ip6_fib_counter_t)); + mp->_vl_msg_id = ntohs (VL_API_VNET_IP6_FIB_COUNTERS); + mp->count = 0; + mp->vrf_id = ntohl (fib->ft_table_id); + ctrp = (vl_api_ip6_fib_counter_t *) mp->c; + } + } + + if (sm->data_structure_lock->release_hint) + { + start_at_fib_index = fib - im6->fibs; + dsunlock (sm); + ip46_fib_stats_delay (sm, 0 /* sec */ , STATS_RELEASE_DELAY_NS); + mp->count = 0; + ctrp = (vl_api_ip6_fib_counter_t *) mp->c; + goto again; + } + } /* vec_foreach (routes) */ + + dsunlock (sm); + + /* Flush any data from this fib */ + if (mp->count) + { + mp->count = htonl (mp->count); + vl_msg_api_send_shmem (q, (u8 *) & mp); + mp = 0; + } + } /* If e.g. the last FIB had no reportable routes, free the buffer */ if (mp) @@ -946,84 +1065,45 @@ static void vl_api_vnet_interface_simple_counters_t_handler (vl_api_vnet_interface_simple_counters_t * mp) { - vpe_client_registration_t *reg; + vpe_client_stats_registration_t *reg; stats_main_t *sm = &stats_main; unix_shared_memory_queue_t *q, *q_prev = NULL; vl_api_vnet_interface_simple_counters_t *mp_copy = NULL; u32 mp_size; - -#if STATS_DEBUG > 0 - char *counter_name; - u32 count, sw_if_index; int i; -#endif mp_size = sizeof (*mp) + (ntohl (mp->count) * sizeof (u64)); /* *INDENT-OFF* */ + vec_reset_length(sm->regs); pool_foreach(reg, sm->stats_registrations, ({ - q = vl_api_client_index_to_input_queue (reg->client_index); - if (q) - { - if (q_prev && (q_prev->cursize < q_prev->maxsize)) - { - mp_copy = vl_msg_api_alloc_as_if_client(mp_size); - clib_memcpy(mp_copy, mp, mp_size); - vl_msg_api_send_shmem (q_prev, (u8 *)&mp); - mp = mp_copy; - } - q_prev = q; - } + vec_add1(sm->regs,reg); })); /* *INDENT-ON* */ - -#if STATS_DEBUG > 0 - count = ntohl (mp->count); - sw_if_index = ntohl (mp->first_sw_if_index); - u64 *vp, v; - vp = (u64 *) mp->data; - - switch (mp->vnet_counter_type) - { - case VNET_INTERFACE_COUNTER_DROP: - counter_name = "drop"; - break; - case VNET_INTERFACE_COUNTER_PUNT: - counter_name = "punt"; - break; - case VNET_INTERFACE_COUNTER_IP4: - counter_name = "ip4"; - break; - case VNET_INTERFACE_COUNTER_IP6: - counter_name = "ip6"; - break; - case VNET_INTERFACE_COUNTER_RX_NO_BUF: - counter_name = "rx-no-buff"; - break; - case VNET_INTERFACE_COUNTER_RX_MISS: - , counter_name = "rx-miss"; - break; - case VNET_INTERFACE_COUNTER_RX_ERROR: - , counter_name = "rx-error (fifo-full)"; - break; - case VNET_INTERFACE_COUNTER_TX_ERROR: - , counter_name = "tx-error (fifo-full)"; - break; - default: - counter_name = "bogus"; - break; - } - for (i = 0; i < count; i++) + for (i = 0; i < vec_len (sm->regs); i++) { - v = clib_mem_unaligned (vp, u64); - v = clib_net_to_host_u64 (v); - vp++; - fformat (stdout, "%U.%s %lld\n", format_vnet_sw_if_index_name, - sm->vnet_main, sw_if_index, counter_name, v); - sw_if_index++; + reg = sm->regs[i]; + if (reg->stats_registrations & INTERFACE_SIMPLE_COUNTERS) + { + q = vl_api_client_index_to_input_queue (reg->client.client_index); + if (q) + { + if (q_prev && (q_prev->cursize < q_prev->maxsize)) + { + mp_copy = vl_msg_api_alloc_as_if_client (mp_size); + clib_memcpy (mp_copy, mp, mp_size); + vl_msg_api_send_shmem (q_prev, (u8 *) & mp); + mp = mp_copy; + } + q_prev = q; + } + } } +#if STATS_DEBUG > 0 + fformat (stdout, "%U\n", format_vnet_simple_counters, mp); #endif + if (q_prev && (q_prev->cursize < q_prev->maxsize)) { vl_msg_api_send_shmem (q_prev, (u8 *) & mp); @@ -1038,75 +1118,39 @@ static void vl_api_vnet_interface_combined_counters_t_handler (vl_api_vnet_interface_combined_counters_t * mp) { - vpe_client_registration_t *reg; + vpe_client_stats_registration_t *reg; stats_main_t *sm = &stats_main; unix_shared_memory_queue_t *q, *q_prev = NULL; vl_api_vnet_interface_combined_counters_t *mp_copy = NULL; u32 mp_size; -#if STATS_DEBUG > 0 - char *counter_name; - u32 count, sw_if_index; - int i; -#endif - mp_size = sizeof (*mp) + (ntohl (mp->count) * sizeof (vlib_counter_t)); /* *INDENT-OFF* */ pool_foreach(reg, sm->stats_registrations, ({ - q = vl_api_client_index_to_input_queue (reg->client_index); - if (q) - { - if (q_prev && (q_prev->cursize < q_prev->maxsize)) - { - mp_copy = vl_msg_api_alloc_as_if_client(mp_size); - clib_memcpy(mp_copy, mp, mp_size); - vl_msg_api_send_shmem (q_prev, (u8 *)&mp); - mp = mp_copy; - } - q_prev = q; - } + if (reg->stats_registrations & INTERFACE_COMBINED_COUNTERS) + { + q = vl_api_client_index_to_input_queue (reg->client.client_index); + if (q) + { + if (q_prev && (q_prev->cursize < q_prev->maxsize)) + { + mp_copy = vl_msg_api_alloc_as_if_client(mp_size); + clib_memcpy(mp_copy, mp, mp_size); + vl_msg_api_send_shmem (q_prev, (u8 *)&mp); + mp = mp_copy; + } + q_prev = q; + } + } })); /* *INDENT-ON* */ #if STATS_DEBUG > 0 - count = ntohl (mp->count); - sw_if_index = ntohl (mp->first_sw_if_index); - - vlib_counter_t *vp; - u64 packets, bytes; - vp = (vlib_counter_t *) mp->data; - - switch (mp->vnet_counter_type) - { - case VNET_INTERFACE_COUNTER_RX: - counter_name = "rx"; - break; - case VNET_INTERFACE_COUNTER_TX: - counter_name = "tx"; - break; - default: - counter_name = "bogus"; - break; - } - for (i = 0; i < count; i++) - { - packets = clib_mem_unaligned (&vp->packets, u64); - packets = clib_net_to_host_u64 (packets); - bytes = clib_mem_unaligned (&vp->bytes, u64); - bytes = clib_net_to_host_u64 (bytes); - vp++; - fformat (stdout, "%U.%s.packets %lld\n", - format_vnet_sw_if_index_name, - sm->vnet_main, sw_if_index, counter_name, packets); - fformat (stdout, "%U.%s.bytes %lld\n", - format_vnet_sw_if_index_name, - sm->vnet_main, sw_if_index, counter_name, bytes); - sw_if_index++; - } - + fformat (stdout, "%U\n", format_vnet_combined_counters, mp); #endif + if (q_prev && (q_prev->cursize < q_prev->maxsize)) { vl_msg_api_send_shmem (q_prev, (u8 *) & mp); @@ -1120,7 +1164,7 @@ static void static void vl_api_vnet_ip4_fib_counters_t_handler (vl_api_vnet_ip4_fib_counters_t * mp) { - vpe_client_registration_t *reg; + vpe_client_stats_registration_t *reg; stats_main_t *sm = &stats_main; unix_shared_memory_queue_t *q, *q_prev = NULL; vl_api_vnet_ip4_fib_counters_t *mp_copy = NULL; @@ -1132,17 +1176,20 @@ vl_api_vnet_ip4_fib_counters_t_handler (vl_api_vnet_ip4_fib_counters_t * mp) /* *INDENT-OFF* */ pool_foreach(reg, sm->stats_registrations, ({ - q = vl_api_client_index_to_input_queue (reg->client_index); - if (q) + if (reg->stats_registrations & IP4_FIB_COUNTERS) { - if (q_prev && (q_prev->cursize < q_prev->maxsize)) + q = vl_api_client_index_to_input_queue (reg->client.client_index); + if (q) { - mp_copy = vl_msg_api_alloc_as_if_client(mp_size); - clib_memcpy(mp_copy, mp, mp_size); - vl_msg_api_send_shmem (q_prev, (u8 *)&mp); - mp = mp_copy; + if (q_prev && (q_prev->cursize < q_prev->maxsize)) + { + mp_copy = vl_msg_api_alloc_as_if_client(mp_size); + clib_memcpy(mp_copy, mp, mp_size); + vl_msg_api_send_shmem (q_prev, (u8 *)&mp); + mp = mp_copy; + } + q_prev = q; } - q_prev = q; } })); /* *INDENT-ON* */ @@ -1159,7 +1206,7 @@ vl_api_vnet_ip4_fib_counters_t_handler (vl_api_vnet_ip4_fib_counters_t * mp) static void vl_api_vnet_ip4_nbr_counters_t_handler (vl_api_vnet_ip4_nbr_counters_t * mp) { - vpe_client_registration_t *reg; + vpe_client_stats_registration_t *reg; stats_main_t *sm = &stats_main; unix_shared_memory_queue_t *q, *q_prev = NULL; vl_api_vnet_ip4_nbr_counters_t *mp_copy = NULL; @@ -1171,17 +1218,20 @@ vl_api_vnet_ip4_nbr_counters_t_handler (vl_api_vnet_ip4_nbr_counters_t * mp) /* *INDENT-OFF* */ pool_foreach(reg, sm->stats_registrations, ({ - q = vl_api_client_index_to_input_queue (reg->client_index); - if (q) + if (reg->stats_registrations & IP4_NBR_COUNTERS) { - if (q_prev && (q_prev->cursize < q_prev->maxsize)) + q = vl_api_client_index_to_input_queue (reg->client.client_index); + if (q) { - mp_copy = vl_msg_api_alloc_as_if_client(mp_size); - clib_memcpy(mp_copy, mp, mp_size); - vl_msg_api_send_shmem (q_prev, (u8 *)&mp); - mp = mp_copy; + if (q_prev && (q_prev->cursize < q_prev->maxsize)) + { + mp_copy = vl_msg_api_alloc_as_if_client(mp_size); + clib_memcpy(mp_copy, mp, mp_size); + vl_msg_api_send_shmem (q_prev, (u8 *)&mp); + mp = mp_copy; + } + q_prev = q; } - q_prev = q; } })); /* *INDENT-ON* */ @@ -1198,7 +1248,7 @@ vl_api_vnet_ip4_nbr_counters_t_handler (vl_api_vnet_ip4_nbr_counters_t * mp) static void vl_api_vnet_ip6_fib_counters_t_handler (vl_api_vnet_ip6_fib_counters_t * mp) { - vpe_client_registration_t *reg; + vpe_client_stats_registration_t *reg; stats_main_t *sm = &stats_main; unix_shared_memory_queue_t *q, *q_prev = NULL; vl_api_vnet_ip6_fib_counters_t *mp_copy = NULL; @@ -1210,19 +1260,22 @@ vl_api_vnet_ip6_fib_counters_t_handler (vl_api_vnet_ip6_fib_counters_t * mp) /* *INDENT-OFF* */ pool_foreach(reg, sm->stats_registrations, ({ - q = vl_api_client_index_to_input_queue (reg->client_index); - if (q) + if (reg->stats_registrations & IP6_FIB_COUNTERS) { - if (q_prev && (q_prev->cursize < q_prev->maxsize)) + q = vl_api_client_index_to_input_queue (reg->client.client_index); + if (q) { - mp_copy = vl_msg_api_alloc_as_if_client(mp_size); - clib_memcpy(mp_copy, mp, mp_size); - vl_msg_api_send_shmem (q_prev, (u8 *)&mp); - mp = mp_copy; + if (q_prev && (q_prev->cursize < q_prev->maxsize)) + { + mp_copy = vl_msg_api_alloc_as_if_client(mp_size); + clib_memcpy(mp_copy, mp, mp_size); + vl_msg_api_send_shmem (q_prev, (u8 *)&mp); + mp = mp_copy; + } + q_prev = q; } - q_prev = q; } - })); + })); /* *INDENT-ON* */ if (q_prev && (q_prev->cursize < q_prev->maxsize)) { @@ -1237,7 +1290,7 @@ vl_api_vnet_ip6_fib_counters_t_handler (vl_api_vnet_ip6_fib_counters_t * mp) static void vl_api_vnet_ip6_nbr_counters_t_handler (vl_api_vnet_ip6_nbr_counters_t * mp) { - vpe_client_registration_t *reg; + vpe_client_stats_registration_t *reg; stats_main_t *sm = &stats_main; unix_shared_memory_queue_t *q, *q_prev = NULL; vl_api_vnet_ip6_nbr_counters_t *mp_copy = NULL; @@ -1249,17 +1302,20 @@ vl_api_vnet_ip6_nbr_counters_t_handler (vl_api_vnet_ip6_nbr_counters_t * mp) /* *INDENT-OFF* */ pool_foreach(reg, sm->stats_registrations, ({ - q = vl_api_client_index_to_input_queue (reg->client_index); - if (q) + if (reg->stats_registrations & IP6_NBR_COUNTERS) { - if (q_prev && (q_prev->cursize < q_prev->maxsize)) + q = vl_api_client_index_to_input_queue (reg->client.client_index); + if (q) { - mp_copy = vl_msg_api_alloc_as_if_client(mp_size); - clib_memcpy(mp_copy, mp, mp_size); - vl_msg_api_send_shmem (q_prev, (u8 *)&mp); - mp = mp_copy; + if (q_prev && (q_prev->cursize < q_prev->maxsize)) + { + mp_copy = vl_msg_api_alloc_as_if_client(mp_size); + clib_memcpy(mp_copy, mp, mp_size); + vl_msg_api_send_shmem (q_prev, (u8 *)&mp); + mp = mp_copy; + } + q_prev = q; } - q_prev = q; } })); /* *INDENT-ON* */ @@ -1277,7 +1333,7 @@ static void vl_api_want_stats_t_handler (vl_api_want_stats_t * mp) { stats_main_t *sm = &stats_main; - vpe_client_registration_t *rp; + vpe_client_stats_registration_t *rp; vl_api_want_stats_reply_t *rmp; uword *p; i32 retval = 0; @@ -1307,9 +1363,16 @@ vl_api_want_stats_t_handler (vl_api_want_stats_t * mp) goto reply; } pool_get (sm->stats_registrations, rp); - rp->client_index = mp->client_index; - rp->client_pid = mp->pid; - hash_set (sm->stats_registration_hash, rp->client_index, + rp->client.client_index = mp->client_index; + rp->client.client_pid = mp->pid; + rp->stats_registrations |= INTERFACE_SIMPLE_COUNTERS; + rp->stats_registrations |= INTERFACE_COMBINED_COUNTERS; + rp->stats_registrations |= IP4_FIB_COUNTERS; + rp->stats_registrations |= IP4_NBR_COUNTERS; + rp->stats_registrations |= IP6_FIB_COUNTERS; + rp->stats_registrations |= IP6_NBR_COUNTERS; + + hash_set (sm->stats_registration_hash, rp->client.client_index, rp - sm->stats_registrations); reply: @@ -1331,10 +1394,386 @@ reply: vl_msg_api_send_shmem (q, (u8 *) & rmp); } +static void + vl_api_want_interface_simple_stats_t_handler + (vl_api_want_interface_simple_stats_t * mp) +{ + stats_main_t *sm = &stats_main; + vpe_client_stats_registration_t *rp; + vl_api_want_interface_simple_stats_reply_t *rmp; + uword *p; + i32 retval = 0; + unix_shared_memory_queue_t *q; + + p = hash_get (sm->stats_registration_hash, mp->client_index); + + /* Disable case */ + if (mp->enable_disable == 0) + { + if (!p) // No client to disable + { + clib_warning ("pid %d: already disabled for stats...", mp->pid); + retval = 0; + goto reply; + } + + rp = pool_elt_at_index (sm->stats_registrations, p[0]); + if (!rp->stats_registrations & INTERFACE_SIMPLE_COUNTERS) // Client but doesn't want this. + { + clib_warning + ("pid %d: already disabled for interface simple stats...", + mp->pid); + retval = 0; + goto reply; + } + else + { + rp->stats_registrations &= ~(INTERFACE_SIMPLE_COUNTERS); // Clear flag + if (rp->stats_registrations == 0) // Client isn't listening to anything else + { + pool_put (sm->stats_registrations, rp); + hash_unset (sm->stats_registration_hash, mp->client_index); + } + goto reply; + } + } + /* Enable case */ + /* Get client from pool */ + if (p) + rp = pool_elt_at_index (sm->stats_registrations, p[0]); + + if (!p || !rp) // Doesn't exist, make a new entry + { + pool_get (sm->stats_registrations, rp); + rp->client.client_index = mp->client_index; + rp->client.client_pid = mp->pid; + } + rp->stats_registrations |= INTERFACE_SIMPLE_COUNTERS; + hash_set (sm->stats_registration_hash, rp->client.client_index, + rp - sm->stats_registrations); + +reply: + if (pool_elts (sm->stats_registrations)) // Someone wants something, somewhere so enable globally for now. + sm->enable_poller = 1; + else + sm->enable_poller = 0; + + q = vl_api_client_index_to_input_queue (mp->client_index); + + if (!q) + return; + + rmp = vl_msg_api_alloc (sizeof (*rmp)); + rmp->_vl_msg_id = ntohs (VL_API_WANT_INTERFACE_SIMPLE_STATS_REPLY); + rmp->context = mp->context; + rmp->retval = retval; + + vl_msg_api_send_shmem (q, (u8 *) & rmp); +} + +static void + vl_api_want_interface_combined_stats_t_handler + (vl_api_want_interface_combined_stats_t * mp) +{ + stats_main_t *sm = &stats_main; + vpe_client_stats_registration_t *rp; + vl_api_want_interface_combined_stats_reply_t *rmp; + uword *p; + i32 retval = 0; + unix_shared_memory_queue_t *q; + + p = hash_get (sm->stats_registration_hash, mp->client_index); + + /* Disable case */ + if (mp->enable_disable == 0) + { + if (!p) // No client to disable + { + clib_warning ("pid %d: already disabled for stats...", mp->pid); + retval = 0; + goto reply; + } + + rp = pool_elt_at_index (sm->stats_registrations, p[0]); + if (!rp->stats_registrations & INTERFACE_COMBINED_COUNTERS) // Client but doesn't want this. + { + clib_warning + ("pid %d: already disabled for interface COMBINED stats...", + mp->pid); + retval = 0; + goto reply; + } + else + { + rp->stats_registrations &= ~(INTERFACE_COMBINED_COUNTERS); // Clear flag + if (rp->stats_registrations == 0) // Client isn't listening to anything else + { + pool_put (sm->stats_registrations, rp); + hash_unset (sm->stats_registration_hash, mp->client_index); + } + goto reply; + } + } + /* Enable case */ + /* Get client from pool */ + if (p) + rp = pool_elt_at_index (sm->stats_registrations, p[0]); + + if (!p || !rp) // Doesn't exist, make a new entry + { + pool_get (sm->stats_registrations, rp); + rp->client.client_index = mp->client_index; + rp->client.client_pid = mp->pid; + } + rp->stats_registrations |= INTERFACE_COMBINED_COUNTERS; + hash_set (sm->stats_registration_hash, rp->client.client_index, + rp - sm->stats_registrations); + +reply: + if (pool_elts (sm->stats_registrations)) // Someone wants something, somewhere so enable globally for now. + sm->enable_poller = 1; + else + sm->enable_poller = 0; + + q = vl_api_client_index_to_input_queue (mp->client_index); + + if (!q) + return; + + rmp = vl_msg_api_alloc (sizeof (*rmp)); + rmp->_vl_msg_id = ntohs (VL_API_WANT_INTERFACE_COMBINED_STATS_REPLY); + rmp->context = mp->context; + rmp->retval = retval; + + vl_msg_api_send_shmem (q, (u8 *) & rmp); +} + +static void +vl_api_want_ip4_fib_stats_t_handler (vl_api_want_ip4_fib_stats_t * mp) +{ + stats_main_t *sm = &stats_main; + vpe_client_stats_registration_t *rp; + vl_api_want_ip4_fib_stats_reply_t *rmp; + uword *p; + i32 retval = 0; + unix_shared_memory_queue_t *q; + + p = hash_get (sm->stats_registration_hash, mp->client_index); + + /* Disable case */ + /* + $$$ FIXME: need std return codes. Still undecided if enabling already + enabled (and similar for disabled) is really a -'ve error condition or + if 0 is sufficient + */ + if (mp->enable_disable == 0) + { + if (!p) // No client to disable + { + clib_warning ("pid %d: already disabled for stats...", mp->pid); + retval = -3; + goto reply; + } + + rp = pool_elt_at_index (sm->stats_registrations, p[0]); + if (!rp->stats_registrations & IP4_FIB_COUNTERS) // Client but doesn't want this. + { + clib_warning ("pid %d: already disabled for interface ip4 fib...", + mp->pid); + retval = -2; + goto reply; + } + else + { + rp->stats_registrations &= ~(IP4_FIB_COUNTERS); // Clear flag + if (rp->stats_registrations == 0) // Client isn't listening to anything else + { + pool_put (sm->stats_registrations, rp); + hash_unset (sm->stats_registration_hash, mp->client_index); + } + goto reply; + } + } + /* Enable case */ + /* Get client from pool */ + if (p) + rp = pool_elt_at_index (sm->stats_registrations, p[0]); + + if (!p || !rp) // Doesn't exist, make a new entry + { + pool_get (sm->stats_registrations, rp); + rp->client.client_index = mp->client_index; + rp->client.client_pid = mp->pid; + } + rp->stats_registrations |= IP4_FIB_COUNTERS; + hash_set (sm->stats_registration_hash, rp->client.client_index, + rp - sm->stats_registrations); + +reply: + if (pool_elts (sm->stats_registrations)) // Someone wants something, somewhere so enable globally for now. + sm->enable_poller = 1; + else + sm->enable_poller = 0; + + q = vl_api_client_index_to_input_queue (mp->client_index); + + if (!q) + return; + + rmp = vl_msg_api_alloc (sizeof (*rmp)); + rmp->_vl_msg_id = ntohs (VL_API_WANT_IP4_FIB_STATS_REPLY); + rmp->context = mp->context; + rmp->retval = retval; + + vl_msg_api_send_shmem (q, (u8 *) & rmp); +} + +static void +vl_api_want_ip6_fib_stats_t_handler (vl_api_want_ip6_fib_stats_t * mp) +{ + stats_main_t *sm = &stats_main; + vpe_client_stats_registration_t *rp; + vl_api_want_ip6_fib_stats_reply_t *rmp; + uword *p; + i32 retval = 0; + unix_shared_memory_queue_t *q; + + p = hash_get (sm->stats_registration_hash, mp->client_index); + + /* Disable case */ + /* + $$$ FIXME: need std return codes. Still undecided if enabling already + enabled (and similar for disabled) is really a -'ve error condition or + if 0 is sufficient + */ + if (mp->enable_disable == 0) + { + if (!p) // No client to disable + { + clib_warning ("pid %d: already disabled for stats...", mp->pid); + retval = -3; + goto reply; + } + + rp = pool_elt_at_index (sm->stats_registrations, p[0]); + if (!rp->stats_registrations & IP6_FIB_COUNTERS) // Client but doesn't want this. + { + clib_warning ("pid %d: already disabled for interface ip6 fib...", + mp->pid); + retval = -2; + goto reply; + } + else + { + rp->stats_registrations &= ~(IP6_FIB_COUNTERS); // Clear flag + if (rp->stats_registrations == 0) // Client isn't listening to anything else + { + pool_put (sm->stats_registrations, rp); + hash_unset (sm->stats_registration_hash, mp->client_index); + } + goto reply; + } + } + /* Enable case */ + /* Get client from pool */ + if (p) + rp = pool_elt_at_index (sm->stats_registrations, p[0]); + + if (!p || !rp) // Doesn't exist, make a new entry + { + pool_get (sm->stats_registrations, rp); + rp->client.client_index = mp->client_index; + rp->client.client_pid = mp->pid; + } + rp->stats_registrations |= IP6_FIB_COUNTERS; + hash_set (sm->stats_registration_hash, rp->client.client_index, + rp - sm->stats_registrations); + +reply: + if (pool_elts (sm->stats_registrations)) // Someone wants something, somewhere so enable globally for now. + sm->enable_poller = 1; + else + sm->enable_poller = 0; + + q = vl_api_client_index_to_input_queue (mp->client_index); + + if (!q) + return; + + rmp = vl_msg_api_alloc (sizeof (*rmp)); + rmp->_vl_msg_id = ntohs (VL_API_WANT_IP6_FIB_STATS_REPLY); + rmp->context = mp->context; + rmp->retval = retval; + + vl_msg_api_send_shmem (q, (u8 *) & rmp); +} + +/* FIXME - NBR stats broken - this will be fixed in subsequent patch */ +static void +vl_api_want_ip4_nbr_stats_t_handler (vl_api_want_ip4_nbr_stats_t * mp) +{ +} + +static void +vl_api_want_ip6_nbr_stats_t_handler (vl_api_want_ip6_nbr_stats_t * mp) +{ +} + +static void +vl_api_vnet_get_summary_stats_t_handler (vl_api_vnet_get_summary_stats_t * mp) +{ + stats_main_t *sm = &stats_main; + vnet_interface_main_t *im = sm->interface_main; + vl_api_vnet_get_summary_stats_reply_t *rmp; + vlib_combined_counter_main_t *cm; + vlib_counter_t v; + int i, which; + u64 total_pkts[VLIB_N_RX_TX]; + u64 total_bytes[VLIB_N_RX_TX]; + + unix_shared_memory_queue_t *q = + vl_api_client_index_to_input_queue (mp->client_index); + + if (!q) + return; + + rmp = vl_msg_api_alloc (sizeof (*rmp)); + rmp->_vl_msg_id = ntohs (VL_API_VNET_GET_SUMMARY_STATS_REPLY); + rmp->context = mp->context; + rmp->retval = 0; + + memset (total_pkts, 0, sizeof (total_pkts)); + memset (total_bytes, 0, sizeof (total_bytes)); + + vnet_interface_counter_lock (im); + + vec_foreach (cm, im->combined_sw_if_counters) + { + which = cm - im->combined_sw_if_counters; + + for (i = 0; i < vlib_combined_counter_n_counters (cm); i++) + { + vlib_get_combined_counter (cm, i, &v); + total_pkts[which] += v.packets; + total_bytes[which] += v.bytes; + } + } + vnet_interface_counter_unlock (im); + + rmp->total_pkts[VLIB_RX] = clib_host_to_net_u64 (total_pkts[VLIB_RX]); + rmp->total_bytes[VLIB_RX] = clib_host_to_net_u64 (total_bytes[VLIB_RX]); + rmp->total_pkts[VLIB_TX] = clib_host_to_net_u64 (total_pkts[VLIB_TX]); + rmp->total_bytes[VLIB_TX] = clib_host_to_net_u64 (total_bytes[VLIB_TX]); + rmp->vector_rate = + clib_host_to_net_u64 (vlib_last_vector_length_per_node (sm->vlib_main)); + + vl_msg_api_send_shmem (q, (u8 *) & rmp); +} + int stats_memclnt_delete_callback (u32 client_index) { - vpe_client_registration_t *rp; + vpe_client_stats_registration_t *rp; stats_main_t *sm = &stats_main; uword *p; diff --git a/src/vpp/stats/stats.h b/src/vpp/stats/stats.h index 024dc78e..167b09bd 100644 --- a/src/vpp/stats/stats.h +++ b/src/vpp/stats/stats.h @@ -21,6 +21,7 @@ #include #include #include +#include #include #include #include @@ -35,6 +36,44 @@ typedef struct int tag; } data_structure_lock_t; +typedef struct +{ + vpe_client_registration_t client; + u8 stats_registrations; +#define INTERFACE_SIMPLE_COUNTERS (1 << 0) +#define INTERFACE_COMBINED_COUNTERS (1 << 1) +#define IP4_FIB_COUNTERS (1 << 2) +#define IP4_NBR_COUNTERS (1 << 3) +#define IP6_FIB_COUNTERS (1 << 4) +#define IP6_NBR_COUNTERS (1 << 5) + +} vpe_client_stats_registration_t; + +/* from .../vnet/vnet/ip/lookup.c. Yuck */ +typedef CLIB_PACKED (struct + { + ip4_address_t address; +u32 address_length: 6; +u32 index: 26; + }) ip4_route_t; + +typedef struct +{ + ip6_address_t address; + u32 address_length; + u32 index; +} ip6_route_t; + + +typedef struct +{ + ip4_route_t *ip4routes; + ip6_route_t *ip6routes; + fib_table_t **fibs; + hash_pair_t **pvec; + uword *results; +} do_ip46_fibs_t; + typedef struct { void *mheap; @@ -45,7 +84,8 @@ typedef struct u32 enable_poller; uword *stats_registration_hash; - vpe_client_registration_t *stats_registrations; + vpe_client_stats_registration_t *stats_registrations; + vpe_client_stats_registration_t **regs; /* control-plane data structure lock */ data_structure_lock_t *data_structure_lock; @@ -53,6 +93,9 @@ typedef struct /* bail out of FIB walk if set */ clib_longjmp_t jmp_buf; + /* Vectors for Distribution funcs: do_ip4_fibs and do_ip6_fibs. */ + do_ip46_fibs_t do_ip46_fibs; + /* convenience */ vlib_main_t *vlib_main; vnet_main_t *vnet_main; -- cgit 1.2.3-korg From 0eacfa32687730ae6d7dbc67201165fac26682d6 Mon Sep 17 00:00:00 2001 From: Dave Barach Date: Wed, 13 Sep 2017 09:18:08 -0400 Subject: VPP-972: fix coverity warnings in the stats scraper Change-Id: I8843e57105c6c29bcdaf1e6e3bf8e9caf6bfd2ec Signed-off-by: Dave Barach --- src/vpp/stats/stats.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'src/vpp/stats/stats.c') diff --git a/src/vpp/stats/stats.c b/src/vpp/stats/stats.c index c78a8aef..ff52c4db 100644 --- a/src/vpp/stats/stats.c +++ b/src/vpp/stats/stats.c @@ -1495,7 +1495,7 @@ static void } rp = pool_elt_at_index (sm->stats_registrations, p[0]); - if (!rp->stats_registrations & INTERFACE_COMBINED_COUNTERS) // Client but doesn't want this. + if (!(rp->stats_registrations & INTERFACE_COMBINED_COUNTERS)) // Client but doesn't want this. { clib_warning ("pid %d: already disabled for interface COMBINED stats...", @@ -1576,7 +1576,7 @@ vl_api_want_ip4_fib_stats_t_handler (vl_api_want_ip4_fib_stats_t * mp) } rp = pool_elt_at_index (sm->stats_registrations, p[0]); - if (!rp->stats_registrations & IP4_FIB_COUNTERS) // Client but doesn't want this. + if (!(rp->stats_registrations & IP4_FIB_COUNTERS)) // Client but doesn't want this. { clib_warning ("pid %d: already disabled for interface ip4 fib...", mp->pid); @@ -1656,7 +1656,7 @@ vl_api_want_ip6_fib_stats_t_handler (vl_api_want_ip6_fib_stats_t * mp) } rp = pool_elt_at_index (sm->stats_registrations, p[0]); - if (!rp->stats_registrations & IP6_FIB_COUNTERS) // Client but doesn't want this. + if (!(rp->stats_registrations & IP6_FIB_COUNTERS)) // Client but doesn't want this. { clib_warning ("pid %d: already disabled for interface ip6 fib...", mp->pid); -- cgit 1.2.3-korg From aeedf2665cd6173a17ca844db34ecaf8997dcf62 Mon Sep 17 00:00:00 2001 From: Ole Troan Date: Wed, 13 Sep 2017 09:28:48 +0200 Subject: STATS: Refactor missed adding messages to CRC dictionary. Change-Id: I0d74856d4fd999dd9ca5886b8375ab8dd700b8a9 Signed-off-by: Ole Troan --- src/vpp/stats/stats.c | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) (limited to 'src/vpp/stats/stats.c') diff --git a/src/vpp/stats/stats.c b/src/vpp/stats/stats.c index ff52c4db..b07dc17e 100644 --- a/src/vpp/stats/stats.c +++ b/src/vpp/stats/stats.c @@ -60,6 +60,19 @@ _(WANT_IP6_NBR_STATS, want_ip6_nbr_stats) \ _(VNET_GET_SUMMARY_STATS, vnet_get_summary_stats) +#define vl_msg_name_crc_list +#include +#undef vl_msg_name_crc_list + +static void +setup_message_id_table (api_main_t * am) +{ +#define _(id,n,crc) \ + vl_msg_api_add_msg_name_crc (am, #n "_" #crc, id); + foreach_vl_msg_name_crc_stats; +#undef _ +} + /* These constants ensure msg sizes <= 1024, aka ring allocation */ #define SIMPLE_COUNTER_BATCH_SIZE 126 #define COMBINED_COUNTER_BATCH_SIZE 63 @@ -1836,6 +1849,11 @@ stats_init (vlib_main_t * vm) am->message_bounce[VL_API_VNET_IP4_NBR_COUNTERS] = 1; am->message_bounce[VL_API_VNET_IP6_NBR_COUNTERS] = 1; + /* + * Set up the (msg_name, crc, message-id) table + */ + setup_message_id_table (am); + return 0; } -- cgit 1.2.3-korg From 831fb59f2eba1abbd1e49fc7dce58172f0842258 Mon Sep 17 00:00:00 2001 From: "Keith Burns (alagalah)" Date: Tue, 12 Sep 2017 15:12:17 -0700 Subject: Stats refactor - added per-interface simple/combined counters - refactored fib/nbr API to use common registration scheme - refactored "want_stats" and "want_interface" to use per interface registration scheme - fixed issues with SEGV when client disconnects abruptly. Change-Id: Ib701bd8e4105d03548259217bfc809bd738b7c72 Signed-off-by: Keith Burns (alagalah) --- src/vnet/interface.api | 76 ++- src/vpp/stats/stats.api | 43 ++ src/vpp/stats/stats.c | 1425 ++++++++++++++++++++++++++++++++--------------- src/vpp/stats/stats.h | 113 +++- src/vpp/stats/stats.reg | 42 ++ 5 files changed, 1234 insertions(+), 465 deletions(-) create mode 100644 src/vpp/stats/stats.reg (limited to 'src/vpp/stats/stats.c') diff --git a/src/vnet/interface.api b/src/vnet/interface.api index 7a3743dd..94ecdd9f 100644 --- a/src/vnet/interface.api +++ b/src/vnet/interface.api @@ -227,16 +227,61 @@ typeonly manual_print manual_endian define vlib_counter u64 bytes; /**< byte counter */ }; +/** \brief Combined interface counter data type for vnet_interface_combined_counters + @param sw_if_index - interface indexes for counters + @param rx_packets - received packet count + @param rx_bytes - received byte count + @param tx_packets - transmitted packet count + @param tx_bytes - transmitted byte count + +*/ +typeonly manual_print manual_endian define vnet_combined_counter +{ + u32 sw_if_index; + u64 rx_packets; /**< packet counter */ + u64 rx_bytes; /**< byte counter */ + u64 tx_packets; /**< packet counter */ + u64 tx_bytes; /**< byte counter */ +}; + +/** \brief Simple interface counter data type for vnet_interface_simple_counters + @param sw_if_index - interface indexes for counters + @param drop - RX or TX drops due to buffer starvation + @param punt - used with VNET "punt" disposition + @param rx_ip4 - received IP4 packets + @param rx_ip6 - received IP6 packets + @param rx_no_buffer - no RX buffers available + @param rx_miss - receive misses + @param rx_error - receive errors + @param tx_error - transmit errors + @param rx_mpls - received MPLS packet + +*/ +typeonly manual_print manual_endian define vnet_simple_counter +{ + u32 sw_if_index; + u64 drop; + u64 punt; + u64 rx_ip4; + u64 rx_ip6; + u64 rx_no_buffer; + u64 rx_miss; + u64 rx_error; + u64 tx_error; + u64 rx_mpls; +}; + /** \brief Simple stats counters structure @param vnet_counter_type- such as ip4, ip6, punts, etc @param first_sw_if_index - first sw index in block of index, counts @param count - number of counters, equal to the number of interfaces in this stats block @param data - contiguous block of u64 counters + + vnet_counter_type defined in enums - plural - in vnet/interface.h */ manual_print manual_endian define vnet_interface_simple_counters { - /* enums - plural - in vnet/interface.h */ u8 vnet_counter_type; u32 first_sw_if_index; u32 count; @@ -249,16 +294,43 @@ manual_print manual_endian define vnet_interface_simple_counters @param count - number of counters, equal to the number of interfaces in this stats block @param data - contiguous block of vlib_counter_t structures + + vnet_counter_type defined in enums - plural - in vnet/interface.h */ manual_print manual_endian define vnet_interface_combined_counters { - /* enums - plural - in vnet/interface.h */ u8 vnet_counter_type; u32 first_sw_if_index; u32 count; vl_api_vlib_counter_t data[count]; }; + +/** \brief Simple per interface stats counters structure + @param count - number of elements in message + @param timestamp - u32 vlib timestamp for control plane + @param data[count] - vl_api_vnet_simple_counter_t + +*/ +manual_print manual_endian define vnet_per_interface_simple_counters +{ + u32 count; + u32 timestamp; + vl_api_vnet_simple_counter_t data[count]; +}; + +/** \brief Combined stats counters structure per interface + @param count - number of elements in message + @param timestamp - u32 vlib timestamp for control plane + @param data[count] - vl_api_vnet_combined_counter_t +*/ +manual_print manual_endian define vnet_per_interface_combined_counters +{ + u32 count; + u32 timestamp; + vl_api_vnet_combined_counter_t data[count]; +}; + /** \brief Set unnumbered interface add / del request @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request diff --git a/src/vpp/stats/stats.api b/src/vpp/stats/stats.api index 7f745859..caf8f515 100644 --- a/src/vpp/stats/stats.api +++ b/src/vpp/stats/stats.api @@ -38,6 +38,8 @@ autoreply define want_stats @param context - sender context, to match reply w/ request @param enable_disable - 1 = enable stats, 0 = disable @param pid - pid of process requesting stats updates + + Please consider using want_per_interface_simple_stats with sw_if_index=~0 */ autoreply define want_interface_simple_stats { @@ -47,11 +49,33 @@ autoreply define want_interface_simple_stats u32 pid; }; +/** \brief Want Per Interface simple Stats, register for continuous stats + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param enable_disable - 1 = enable stats, 0 = disable + @param pid - pid of process requesting stats updates + @param num - number of sw_if_indexes + @param sw_ifs - array of sw_if_index +*/ +autoreply define want_per_interface_simple_stats +{ + u32 client_index; + u32 context; + u32 enable_disable; + u32 pid; + u32 num; + u32 sw_ifs[num]; + +}; + /** \brief Want Interface Combined Stats, register for continuous stats @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @param enable_disable - 1 = enable stats, 0 = disable @param pid - pid of process requesting stats updates + + Please consider using want_per_interface_combined_stats with sw_if_index=~0 + */ autoreply define want_interface_combined_stats { @@ -61,6 +85,25 @@ autoreply define want_interface_combined_stats u32 pid; }; +/** \brief Want Per Interface Combined Stats, register for continuous stats + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param enable_disable - 1 = enable stats, 0 = disable + @param pid - pid of process requesting stats updates + @param num - number of sw_if_indexes + @param sw_ifs - array of sw_if_index +*/ +autoreply define want_per_interface_combined_stats +{ + u32 client_index; + u32 context; + u32 enable_disable; + u32 pid; + u32 num; + u32 sw_ifs[num]; + +}; + /** \brief Want IP4 FIB Stats, register for continuous stats @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request diff --git a/src/vpp/stats/stats.c b/src/vpp/stats/stats.c index b07dc17e..ac364e88 100644 --- a/src/vpp/stats/stats.c +++ b/src/vpp/stats/stats.c @@ -49,6 +49,8 @@ _(VNET_INTERFACE_SIMPLE_COUNTERS, vnet_interface_simple_counters) \ _(WANT_INTERFACE_SIMPLE_STATS, want_interface_simple_stats) \ _(VNET_INTERFACE_COMBINED_COUNTERS, vnet_interface_combined_counters) \ _(WANT_INTERFACE_COMBINED_STATS, want_interface_combined_stats) \ +_(WANT_PER_INTERFACE_COMBINED_STATS, want_per_interface_combined_stats) \ +_(WANT_PER_INTERFACE_SIMPLE_STATS, want_per_interface_simple_stats) \ _(VNET_IP4_FIB_COUNTERS, vnet_ip4_fib_counters) \ _(WANT_IP4_FIB_STATS, want_ip4_fib_stats) \ _(VNET_IP6_FIB_COUNTERS, vnet_ip6_fib_counters) \ @@ -248,6 +250,192 @@ stats_dsunlock (int hint, int tag) dsunlock (sm); } +static vpe_client_registration_t * +get_client_for_stat (u32 reg, u32 item, u32 client_index) +{ + stats_main_t *sm = &stats_main; + vpe_client_stats_registration_t *registration; + uword *p; + + /* Is there anything listening for item in that reg */ + p = hash_get (sm->stats_registration_hash[reg], item); + + if (!p) + return 0; // Fail + + /* If there is, is our client_index one of them */ + registration = pool_elt_at_index (sm->stats_registrations[reg], p[0]); + p = hash_get (registration->client_hash, client_index); + + if (!p) + return 0; // Fail + + return pool_elt_at_index (registration->clients, p[0]); + +} + +static int +set_client_for_stat (u32 reg, u32 item, vpe_client_registration_t * client) +{ + stats_main_t *sm = &stats_main; + vpe_client_stats_registration_t *registration; + vpe_client_registration_t *cr; + uword *p; + + /* Is there anything listening for item in that reg */ + p = hash_get (sm->stats_registration_hash[reg], item); + + if (!p) + { + pool_get (sm->stats_registrations[reg], registration); + registration->item = item; + hash_set (sm->stats_registration_hash[reg], item, + registration - sm->stats_registrations[reg]); + } + else + { + registration = pool_elt_at_index (sm->stats_registrations[reg], p[0]); + } + + p = hash_get (registration->client_hash, client->client_index); + + if (!p) + { + pool_get (registration->clients, cr); + cr->client_index = client->client_index; + cr->client_pid = client->client_pid; + hash_set (registration->client_hash, cr->client_index, + cr - registration->clients); + } + + return 1; //At least one client is doing something ... poll +} + +int +clear_client_for_stat (u32 reg, u32 item, u32 client_index) +{ + stats_main_t *sm = &stats_main; + vpe_client_stats_registration_t *registration; + vpe_client_registration_t *client; + uword *p; + int i, elts; + + /* Clear the client first */ + /* Is there anything listening for item in that reg */ + p = hash_get (sm->stats_registration_hash[reg], item); + + if (!p) + goto exit; + + /* If there is, is our client_index one of them */ + registration = pool_elt_at_index (sm->stats_registrations[reg], p[0]); + p = hash_get (registration->client_hash, client_index); + + if (!p) + goto exit; + + client = pool_elt_at_index (registration->clients, p[0]); + hash_unset (registration->client_hash, client->client_index); + pool_put (registration->clients, client); + + /* Now check if that was the last client for that item */ + if (0 == pool_elts (registration->clients)) + { + hash_unset (sm->stats_registration_hash[reg], item); + pool_put (sm->stats_registrations[reg], registration); + } + +exit: + elts = 0; + /* Now check if that was the last item in any of the listened to stats */ + for (i = 0; i < STATS_REG_N_IDX; i++) + { + elts += pool_elts (sm->stats_registrations[i]); + } + return elts; +} + +vpe_client_registration_t * +get_clients_for_stat (u32 reg, u32 item) +{ + stats_main_t *sm = &stats_main; + vpe_client_registration_t *client, *clients = 0; + vpe_client_stats_registration_t *registration; + uword *p; + + /* Is there anything listening for item in that reg */ + p = hash_get (sm->stats_registration_hash[reg], item); + + if (!p) + return 0; // Fail + + /* If there is, is our client_index one of them */ + registration = pool_elt_at_index (sm->stats_registrations[reg], p[0]); + + vec_reset_length (clients); + pool_foreach (client, registration->clients, ( + { + vec_add1 (clients, *client);} + )); + return clients; +} + + +static void +clear_client_reg (u32 ** registrations) +{ + /* When registrations[x] is a vector of pool indices + here is a good place to clean up the pools + */ +#define stats_reg(n) vec_free(registrations[IDX_##n]); +#include +#undef stats_reg + + vec_free (registrations); +} + +u32 ** +init_client_reg (u32 ** registrations) +{ + + /* + Initialise the stats registrations for each + type of stat a client can register for as well as + a vector of "interested" indexes. + Initially this is a u32 of either sw_if_index or fib_index + but eventually this should migrate to a pool_index (u32) + with a type specific pool that can include more complex things + such as timing and structured events. + */ + vec_validate (registrations, STATS_REG_N_IDX); +#define stats_reg(n) \ + vec_reset_length(registrations[IDX_##n]); +#include +#undef stats_reg + + /* + When registrations[x] is a vector of pool indices, here + is a good place to init the pools. + */ + return registrations; +} + +u32 ** +enable_all_client_reg (u32 ** registrations) +{ + + /* + Enable all stats known by adding + ~0 to the index vector. Eventually this + should be deprecated. + */ +#define stats_reg(n) \ + vec_add1(registrations[IDX_##n], ~0); +#include +#undef stats_reg + return registrations; +} + static void do_simple_interface_counters (stats_main_t * sm) { @@ -302,6 +490,131 @@ do_simple_interface_counters (stats_main_t * sm) vnet_interface_counter_unlock (im); } +void +handle_client_registration (vpe_client_registration_t * client, u32 stat, + u32 item, int enable_disable) +{ + stats_main_t *sm = &stats_main; + vpe_client_registration_t *rp, _rp; + + rp = get_client_for_stat (stat, item, client->client_index); + + /* Disable case */ + if (enable_disable == 0) + { + if (!rp) // No client to disable + { + clib_warning ("pid %d: already disabled for stats...", + client->client_pid); + return; + } + sm->enable_poller = + clear_client_for_stat (stat, item, client->client_index); + return; + } + /* Enable case */ + if (!rp) + { + rp = &_rp; + rp->client_index = client->client_index; + rp->client_pid = client->client_pid; + sm->enable_poller = set_client_for_stat (stat, item, rp); + } +} + + +/********************************** + * ALL Interface Combined stats - to be deprecated + **********************************/ + +/* + * This API should be deprecated as _per_interface_ works with ~0 as sw_if_index. + */ +static void + vl_api_want_interface_combined_stats_t_handler + (vl_api_want_interface_combined_stats_t * mp) +{ + stats_main_t *sm = &stats_main; + vpe_client_registration_t rp; + vl_api_want_interface_combined_stats_reply_t *rmp; + uword *p; + i32 retval = 0; + unix_shared_memory_queue_t *q; + u32 swif; + + swif = ~0; //Using same mechanism as _per_interface_ + rp.client_index = mp->client_index; + rp.client_pid = mp->pid; + + handle_client_registration (&rp, IDX_PER_INTERFACE_COMBINED_COUNTERS, swif, + mp->enable_disable); + +reply: + q = vl_api_client_index_to_input_queue (mp->client_index); + + if (!q) + { + sm->enable_poller = + clear_client_for_stat (IDX_PER_INTERFACE_COMBINED_COUNTERS, swif, + mp->client_index); + return; + } + + rmp = vl_msg_api_alloc (sizeof (*rmp)); + rmp->_vl_msg_id = ntohs (VL_API_WANT_INTERFACE_COMBINED_STATS_REPLY); + rmp->context = mp->context; + rmp->retval = retval; + + vl_msg_api_send_shmem (q, (u8 *) & rmp); +} + +static void + vl_api_vnet_interface_combined_counters_t_handler + (vl_api_vnet_interface_combined_counters_t * mp) +{ + vpe_client_registration_t *clients, client; + stats_main_t *sm = &stats_main; + unix_shared_memory_queue_t *q, *q_prev = NULL; + vl_api_vnet_interface_combined_counters_t *mp_copy = NULL; + u32 mp_size; + int i; + + mp_size = sizeof (*mp) + (ntohl (mp->count) * sizeof (vlib_counter_t)); + + clients = + get_clients_for_stat (IDX_PER_INTERFACE_COMBINED_COUNTERS, + ~0 /*flag for all */ ); + + for (i = 0; i < vec_len (clients); i++) + { + client = clients[i]; + q = vl_api_client_index_to_input_queue (client.client_index); + if (q) + { + if (q_prev && (q_prev->cursize < q_prev->maxsize)) + { + mp_copy = vl_msg_api_alloc_as_if_client (mp_size); + clib_memcpy (mp_copy, mp, mp_size); + vl_msg_api_send_shmem (q_prev, (u8 *) & mp); + mp = mp_copy; + } + q_prev = q; + } + } +#if STATS_DEBUG > 0 + fformat (stdout, "%U\n", format_vnet_combined_counters, mp); +#endif + + if (q_prev && (q_prev->cursize < q_prev->maxsize)) + { + vl_msg_api_send_shmem (q_prev, (u8 *) & mp); + } + else + { + vl_msg_api_free (mp); + } +} + static void do_combined_interface_counters (stats_main_t * sm) { @@ -354,6 +667,431 @@ do_combined_interface_counters (stats_main_t * sm) vnet_interface_counter_unlock (im); } +/********************************** + * Per Interface Combined stats + **********************************/ + +/* Request from client registering interfaces it wants */ +static void + vl_api_want_per_interface_combined_stats_t_handler + (vl_api_want_per_interface_combined_stats_t * mp) +{ + stats_main_t *sm = &stats_main; + vpe_client_registration_t rp; + vl_api_want_per_interface_combined_stats_reply_t *rmp; + vlib_combined_counter_main_t *cm; + uword *p; + i32 retval = 0; + unix_shared_memory_queue_t *q; + int i; + u32 swif; + + // Validate we have good sw_if_indexes before registering + for (i = 0; i < mp->num; i++) + { + swif = mp->sw_ifs[i]; + + /* Check its a real sw_if_index that the client is allowed to see */ + if (swif != ~0) + { + if (pool_is_free_index (sm->interface_main->sw_interfaces, swif)) + { + retval = VNET_API_ERROR_INVALID_SW_IF_INDEX; + goto reply; + } + } + } + + for (i = 0; i < mp->num; i++) + { + swif = mp->sw_ifs[i]; + + rp.client_index = mp->client_index; + rp.client_pid = mp->pid; + handle_client_registration (&rp, IDX_PER_INTERFACE_COMBINED_COUNTERS, + swif, mp->enable_disable); + } + +reply: + q = vl_api_client_index_to_input_queue (mp->client_index); + + if (!q) + { + for (i = 0; i < mp->num; i++) + { + swif = mp->sw_ifs[i]; + sm->enable_poller = + clear_client_for_stat (IDX_PER_INTERFACE_COMBINED_COUNTERS, swif, + mp->client_index); + } + return; + } + + rmp = vl_msg_api_alloc (sizeof (*rmp)); + rmp->_vl_msg_id = ntohs (VL_API_WANT_PER_INTERFACE_COMBINED_STATS_REPLY); + rmp->context = mp->context; + rmp->retval = retval; + + vl_msg_api_send_shmem (q, (u8 *) & rmp); +} + +/* Per Interface Combined distribution to client */ +static void +do_combined_per_interface_counters (stats_main_t * sm) +{ + vl_api_vnet_per_interface_combined_counters_t *mp = 0; + vnet_interface_main_t *im = sm->interface_main; + api_main_t *am = sm->api_main; + vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr; + unix_shared_memory_queue_t *q = NULL; + vlib_combined_counter_main_t *cm; + /* + * items_this_message will eventually be used to optimise the batching + * of per client messages for each stat. For now setting this to 1 then + * iterate. This will not affect API. + * + * FIXME instead of enqueueing here, this should be sent to a batch + * storer for per-client transmission. Each "mp" sent would be a single entry + * and if a client is listening to other sw_if_indexes for same, it would be + * appended to that *mp + */ + u32 items_this_message = 1; + vnet_combined_counter_t *vp = 0; + vlib_counter_t v; + int i, j; + u32 timestamp; + vpe_client_stats_registration_t *reg; + vpe_client_registration_t *client; + u32 *sw_if_index = 0; + + /* + FIXME(s): + - capturing the timestamp of the counters "when VPP knew them" is important. + Less so is that the timing of the delivery to the control plane be in the same + timescale. + + i.e. As long as the control plane can delta messages from VPP and work out + velocity etc based on the timestamp, it can do so in a more "batch mode". + + It would be beneficial to keep a "per-client" message queue, and then + batch all the stat messages for a client into one message, with + discrete timestamps. + + Given this particular API is for "per interface" one assumes that the scale + is less than the ~0 case, which the prior API is suited for. + */ + vnet_interface_counter_lock (im); + + timestamp = vlib_time_now (sm->vlib_main); + + vec_reset_length (sm->regs_tmp); + pool_foreach (reg, + sm->stats_registrations[IDX_PER_INTERFACE_COMBINED_COUNTERS], + ( + { + vec_add1 (sm->regs_tmp, reg);})); + + for (i = 0; i < vec_len (sm->regs_tmp); i++) + { + reg = sm->regs_tmp[i]; + if (reg->item == ~0) + { + vnet_interface_counter_unlock (im); + do_combined_interface_counters (sm); + vnet_interface_counter_lock (im); + continue; + } + vec_reset_length (sm->clients_tmp); + pool_foreach (client, reg->clients, ( + { + vec_add1 (sm->clients_tmp, + client);} + )); + + //FIXME - should be doing non-variant part of mp here and managing + // any alloc per client in that vec_foreach + for (j = 0; j < vec_len (sm->clients_tmp); j++) + { + client = sm->clients_tmp[j]; + q = vl_api_client_index_to_input_queue (client->client_index); + + //Client may have disconnected abrubtly, clean up so we don't poll nothing. + if (!q) + { + sm->enable_poller = + clear_client_for_stat (IDX_PER_INTERFACE_COMBINED_COUNTERS, + reg->item, client->client_index); + continue; + } + + mp = vl_msg_api_alloc (sizeof (*mp) + + (items_this_message * + (sizeof (*vp) /* rx */ ))); + + // FIXME when optimising for items_this_message > 1 need to include a + // SIMPLE_INTERFACE_BATCH_SIZE check. + mp->_vl_msg_id = + ntohs (VL_API_VNET_PER_INTERFACE_COMBINED_COUNTERS); + + mp->count = items_this_message; + mp->timestamp = timestamp; + vp = (vnet_combined_counter_t *) mp->data; + + vp->sw_if_index = htonl (reg->item); + + cm = im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX; + vlib_get_combined_counter (cm, reg->item, &v); + clib_mem_unaligned (&vp->rx_packets, u64) + = clib_host_to_net_u64 (v.packets); + clib_mem_unaligned (&vp->rx_bytes, u64) = + clib_host_to_net_u64 (v.bytes); + + + /* TX vlib_counter_t packets/bytes */ + cm = im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX; + vlib_get_combined_counter (cm, reg->item, &v); + clib_mem_unaligned (&vp->tx_packets, u64) + = clib_host_to_net_u64 (v.packets); + clib_mem_unaligned (&vp->tx_bytes, u64) = + clib_host_to_net_u64 (v.bytes); + + vl_msg_api_send_shmem (q, (u8 *) & mp); + } + } + + vnet_interface_counter_unlock (im); +} + +/********************************** + * Per Interface simple stats + **********************************/ + +/* Request from client registering interfaces it wants */ +static void + vl_api_want_per_interface_simple_stats_t_handler + (vl_api_want_per_interface_simple_stats_t * mp) +{ + stats_main_t *sm = &stats_main; + vpe_client_registration_t rp; + vl_api_want_per_interface_simple_stats_reply_t *rmp; + vlib_simple_counter_main_t *cm; + uword *p; + i32 retval = 0; + unix_shared_memory_queue_t *q; + int i; + u32 swif; + + for (i = 0; i < mp->num; i++) + { + swif = mp->sw_ifs[i]; + + /* Check its a real sw_if_index that the client is allowed to see */ + if (swif != ~0) + { + if (pool_is_free_index (sm->interface_main->sw_interfaces, swif)) + { + retval = VNET_API_ERROR_INVALID_SW_IF_INDEX; + goto reply; + } + } + } + + for (i = 0; i < mp->num; i++) + { + swif = mp->sw_ifs[i]; + + rp.client_index = mp->client_index; + rp.client_pid = mp->pid; + handle_client_registration (&rp, IDX_PER_INTERFACE_SIMPLE_COUNTERS, + swif, mp->enable_disable); + } + +reply: + q = vl_api_client_index_to_input_queue (mp->client_index); + + //Client may have disconnected abrubtly, clean up so we don't poll nothing. + if (!q) + { + for (i = 0; i < mp->num; i++) + { + swif = mp->sw_ifs[i]; + sm->enable_poller = + clear_client_for_stat (IDX_PER_INTERFACE_SIMPLE_COUNTERS, swif, + mp->client_index); + } + + return; + } + + + rmp = vl_msg_api_alloc (sizeof (*rmp)); + rmp->_vl_msg_id = ntohs (VL_API_WANT_PER_INTERFACE_SIMPLE_STATS_REPLY); + rmp->context = mp->context; + rmp->retval = retval; + + vl_msg_api_send_shmem (q, (u8 *) & rmp); +} + +/* Per Interface Simple distribution to client */ +static void +do_simple_per_interface_counters (stats_main_t * sm) +{ + vl_api_vnet_per_interface_simple_counters_t *mp = 0; + vnet_interface_main_t *im = sm->interface_main; + api_main_t *am = sm->api_main; + vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr; + unix_shared_memory_queue_t *q = NULL; + vlib_simple_counter_main_t *cm; + /* + * items_this_message will eventually be used to optimise the batching + * of per client messages for each stat. For now setting this to 1 then + * iterate. This will not affect API. + * + * FIXME instead of enqueueing here, this should be sent to a batch + * storer for per-client transmission. Each "mp" sent would be a single entry + * and if a client is listening to other sw_if_indexes for same, it would be + * appended to that *mp + */ + u32 items_this_message = 1; + int i, j, size; + vpe_client_stats_registration_t *reg; + vpe_client_registration_t *client; + u32 timestamp; + u32 count; + vnet_simple_counter_t *vp = 0; + counter_t v; + + /* + FIXME(s): + - capturing the timestamp of the counters "when VPP knew them" is important. + Less so is that the timing of the delivery to the control plane be in the same + timescale. + + i.e. As long as the control plane can delta messages from VPP and work out + velocity etc based on the timestamp, it can do so in a more "batch mode". + + It would be beneficial to keep a "per-client" message queue, and then + batch all the stat messages for a client into one message, with + discrete timestamps. + + Given this particular API is for "per interface" one assumes that the scale + is less than the ~0 case, which the prior API is suited for. + */ + vnet_interface_counter_lock (im); + + timestamp = vlib_time_now (sm->vlib_main); + + vec_reset_length (sm->regs_tmp); + pool_foreach (reg, + sm->stats_registrations[IDX_PER_INTERFACE_SIMPLE_COUNTERS], ( + { + vec_add1 + (sm->regs_tmp, + reg);})); + + for (i = 0; i < vec_len (sm->regs_tmp); i++) + { + reg = sm->regs_tmp[i]; + if (reg->item == ~0) + { + vnet_interface_counter_unlock (im); + do_simple_interface_counters (sm); + vnet_interface_counter_lock (im); + continue; + } + vec_reset_length (sm->clients_tmp); + pool_foreach (client, reg->clients, ( + { + vec_add1 (sm->clients_tmp, + client);} + )); + + //FIXME - should be doing non-variant part of mp here and managing + // any alloc per client in that vec_foreach + for (j = 0; j < vec_len (sm->clients_tmp); j++) + { + client = sm->clients_tmp[j]; + q = vl_api_client_index_to_input_queue (client->client_index); + + //Client may have disconnected abrubtly, clean up so we don't poll nothing. + if (!q) + { + sm->enable_poller = + clear_client_for_stat (IDX_PER_INTERFACE_SIMPLE_COUNTERS, + reg->item, client->client_index); + continue; + } + + size = (sizeof (*mp) + (items_this_message * (sizeof (u64) * 10))); + mp = vl_msg_api_alloc (size); + // FIXME when optimising for items_this_message > 1 need to include a + // SIMPLE_INTERFACE_BATCH_SIZE check. + mp->_vl_msg_id = ntohs (VL_API_VNET_PER_INTERFACE_SIMPLE_COUNTERS); + + mp->count = items_this_message; + mp->timestamp = timestamp; + vp = (vnet_simple_counter_t *) mp->data; + + vp->sw_if_index = htonl (reg->item); + + //FIXME will be simpler with a preprocessor macro + // VNET_INTERFACE_COUNTER_DROP + cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_DROP; + v = vlib_get_simple_counter (cm, reg->item); + clib_mem_unaligned (&vp->drop, u64) = clib_host_to_net_u64 (v); + + // VNET_INTERFACE_COUNTER_PUNT + cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_PUNT; + v = vlib_get_simple_counter (cm, reg->item); + clib_mem_unaligned (&vp->punt, u64) = clib_host_to_net_u64 (v); + + // VNET_INTERFACE_COUNTER_IP4 + cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_IP4; + v = vlib_get_simple_counter (cm, reg->item); + clib_mem_unaligned (&vp->rx_ip4, u64) = clib_host_to_net_u64 (v); + + //VNET_INTERFACE_COUNTER_IP6 + cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_IP6; + v = vlib_get_simple_counter (cm, reg->item); + clib_mem_unaligned (&vp->rx_ip6, u64) = clib_host_to_net_u64 (v); + + //VNET_INTERFACE_COUNTER_RX_NO_BUF + cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_RX_NO_BUF; + v = vlib_get_simple_counter (cm, reg->item); + clib_mem_unaligned (&vp->rx_no_buffer, u64) = + clib_host_to_net_u64 (v); + + //VNET_INTERFACE_COUNTER_RX_MISS + cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_RX_MISS; + v = vlib_get_simple_counter (cm, reg->item); + clib_mem_unaligned (&vp->rx_miss, u64) = clib_host_to_net_u64 (v); + + //VNET_INTERFACE_COUNTER_RX_ERROR + cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_RX_ERROR; + v = vlib_get_simple_counter (cm, reg->item); + clib_mem_unaligned (&vp->rx_error, u64) = clib_host_to_net_u64 (v); + + //VNET_INTERFACE_COUNTER_TX_ERROR + cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_TX_ERROR; + v = vlib_get_simple_counter (cm, reg->item); + clib_mem_unaligned (&vp->tx_error, u64) = clib_host_to_net_u64 (v); + + //VNET_INTERFACE_COUNTER_MPLS + cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_MPLS; + v = vlib_get_simple_counter (cm, reg->item); + clib_mem_unaligned (&vp->rx_mpls, u64) = clib_host_to_net_u64 (v); + + vl_msg_api_send_shmem (q, (u8 *) & mp); + } + } + + vnet_interface_counter_unlock (im); +} + +/********************************** + * Per FIB IP4 stats + **********************************/ + static void ip46_fib_stats_delay (stats_main_t * sm, u32 sec, u32 nsec) { @@ -475,7 +1213,7 @@ ip4_nbr_ship (stats_main_t * sm, ip4_nbr_stats_ctx_t * ctx) } static void -do_ip4_nbrs (stats_main_t * sm) +do_ip4_nbr_counters (stats_main_t * sm) { vnet_main_t *vnm = vnet_get_main (); vnet_interface_main_t *im = &vnm->interface_main; @@ -630,7 +1368,7 @@ ip6_nbr_ship (stats_main_t * sm, } static void -do_ip6_nbrs (stats_main_t * sm) +do_ip6_nbr_counters (stats_main_t * sm) { vnet_main_t *vnm = vnet_get_main (); vnet_interface_main_t *im = &vnm->interface_main; @@ -682,7 +1420,7 @@ do_ip6_nbrs (stats_main_t * sm) } static void -do_ip4_fibs (stats_main_t * sm) +do_ip4_fib_counters (stats_main_t * sm) { ip4_main_t *im4 = &ip4_main; api_main_t *am = sm->api_main; @@ -888,7 +1626,7 @@ add_routes_in_fib (BVT (clib_bihash_kv) * kvp, void *arg) } static void -do_ip6_fibs (stats_main_t * sm) +do_ip6_fib_counters (stats_main_t * sm) { ip6_main_t *im6 = &ip6_main; api_main_t *am = sm->api_main; @@ -1063,14 +1801,29 @@ stats_thread_fn (void *arg) /* 10 second poll interval */ ip46_fib_stats_delay (sm, 10 /* secs */ , 0 /* nsec */ ); - if (!(sm->enable_poller)) - continue; - do_simple_interface_counters (sm); - do_combined_interface_counters (sm); - do_ip4_fibs (sm); - do_ip6_fibs (sm); - do_ip4_nbrs (sm); - do_ip6_nbrs (sm); + if (!(sm->enable_poller)) + { + continue; + } + if (pool_elts + (sm->stats_registrations[IDX_PER_INTERFACE_COMBINED_COUNTERS])) + do_combined_per_interface_counters (sm); + + if (pool_elts + (sm->stats_registrations[IDX_PER_INTERFACE_SIMPLE_COUNTERS])) + do_simple_per_interface_counters (sm); + + if (pool_elts (sm->stats_registrations[IDX_IP4_FIB_COUNTERS])) + do_ip4_fib_counters (sm); + + if (pool_elts (sm->stats_registrations[IDX_IP6_FIB_COUNTERS])) + do_ip6_fib_counters (sm); + + if (pool_elts (sm->stats_registrations[IDX_IP4_NBR_COUNTERS])) + do_ip4_nbr_counters (sm); + + if (pool_elts (sm->stats_registrations[IDX_IP6_NBR_COUNTERS])) + do_ip6_nbr_counters (sm); } } @@ -1078,7 +1831,7 @@ static void vl_api_vnet_interface_simple_counters_t_handler (vl_api_vnet_interface_simple_counters_t * mp) { - vpe_client_stats_registration_t *reg; + vpe_client_registration_t *clients, client; stats_main_t *sm = &stats_main; unix_shared_memory_queue_t *q, *q_prev = NULL; vl_api_vnet_interface_simple_counters_t *mp_copy = NULL; @@ -1087,32 +1840,34 @@ static void mp_size = sizeof (*mp) + (ntohl (mp->count) * sizeof (u64)); - /* *INDENT-OFF* */ - vec_reset_length(sm->regs); - pool_foreach(reg, sm->stats_registrations, - ({ - vec_add1(sm->regs,reg); - })); - /* *INDENT-ON* */ - for (i = 0; i < vec_len (sm->regs); i++) + clients = + get_clients_for_stat (IDX_PER_INTERFACE_SIMPLE_COUNTERS, + ~0 /*flag for all */ ); + + for (i = 0; i < vec_len (clients); i++) { - reg = sm->regs[i]; - if (reg->stats_registrations & INTERFACE_SIMPLE_COUNTERS) + client = clients[i]; + q = vl_api_client_index_to_input_queue (client.client_index); + if (q) { - q = vl_api_client_index_to_input_queue (reg->client.client_index); - if (q) + if (q_prev && (q_prev->cursize < q_prev->maxsize)) { - if (q_prev && (q_prev->cursize < q_prev->maxsize)) - { - mp_copy = vl_msg_api_alloc_as_if_client (mp_size); - clib_memcpy (mp_copy, mp, mp_size); - vl_msg_api_send_shmem (q_prev, (u8 *) & mp); - mp = mp_copy; - } - q_prev = q; + mp_copy = vl_msg_api_alloc_as_if_client (mp_size); + clib_memcpy (mp_copy, mp, mp_size); + vl_msg_api_send_shmem (q_prev, (u8 *) & mp); + mp = mp_copy; } + q_prev = q; + } + else + { + sm->enable_poller = + clear_client_for_stat (IDX_PER_INTERFACE_SIMPLE_COUNTERS, ~0, + client.client_index); + continue; } } + #if STATS_DEBUG > 0 fformat (stdout, "%U\n", format_vnet_simple_counters, mp); #endif @@ -1127,85 +1882,49 @@ static void } } -static void - vl_api_vnet_interface_combined_counters_t_handler - (vl_api_vnet_interface_combined_counters_t * mp) -{ - vpe_client_stats_registration_t *reg; - stats_main_t *sm = &stats_main; - unix_shared_memory_queue_t *q, *q_prev = NULL; - vl_api_vnet_interface_combined_counters_t *mp_copy = NULL; - u32 mp_size; - - mp_size = sizeof (*mp) + (ntohl (mp->count) * sizeof (vlib_counter_t)); - /* *INDENT-OFF* */ - pool_foreach(reg, sm->stats_registrations, - ({ - if (reg->stats_registrations & INTERFACE_COMBINED_COUNTERS) - { - q = vl_api_client_index_to_input_queue (reg->client.client_index); - if (q) - { - if (q_prev && (q_prev->cursize < q_prev->maxsize)) - { - mp_copy = vl_msg_api_alloc_as_if_client(mp_size); - clib_memcpy(mp_copy, mp, mp_size); - vl_msg_api_send_shmem (q_prev, (u8 *)&mp); - mp = mp_copy; - } - q_prev = q; - } - } - })); - /* *INDENT-ON* */ -#if STATS_DEBUG > 0 - fformat (stdout, "%U\n", format_vnet_combined_counters, mp); -#endif - if (q_prev && (q_prev->cursize < q_prev->maxsize)) - { - vl_msg_api_send_shmem (q_prev, (u8 *) & mp); - } - else - { - vl_msg_api_free (mp); - } -} static void vl_api_vnet_ip4_fib_counters_t_handler (vl_api_vnet_ip4_fib_counters_t * mp) { - vpe_client_stats_registration_t *reg; stats_main_t *sm = &stats_main; unix_shared_memory_queue_t *q, *q_prev = NULL; vl_api_vnet_ip4_fib_counters_t *mp_copy = NULL; u32 mp_size; + vpe_client_registration_t *clients, client; + int i; mp_size = sizeof (*mp_copy) + ntohl (mp->count) * sizeof (vl_api_ip4_fib_counter_t); - /* *INDENT-OFF* */ - pool_foreach(reg, sm->stats_registrations, - ({ - if (reg->stats_registrations & IP4_FIB_COUNTERS) - { - q = vl_api_client_index_to_input_queue (reg->client.client_index); - if (q) - { - if (q_prev && (q_prev->cursize < q_prev->maxsize)) - { - mp_copy = vl_msg_api_alloc_as_if_client(mp_size); - clib_memcpy(mp_copy, mp, mp_size); - vl_msg_api_send_shmem (q_prev, (u8 *)&mp); - mp = mp_copy; - } - q_prev = q; - } - } - })); - /* *INDENT-ON* */ + clients = + get_clients_for_stat (IDX_IP4_FIB_COUNTERS, ~0 /*flag for all */ ); + + for (i = 0; i < vec_len (clients); i++) + { + client = clients[i]; + q = vl_api_client_index_to_input_queue (client.client_index); + if (q) + { + if (q_prev && (q_prev->cursize < q_prev->maxsize)) + { + mp_copy = vl_msg_api_alloc_as_if_client (mp_size); + clib_memcpy (mp_copy, mp, mp_size); + vl_msg_api_send_shmem (q_prev, (u8 *) & mp); + mp = mp_copy; + } + q_prev = q; + } + else + { + sm->enable_poller = clear_client_for_stat (IDX_IP4_FIB_COUNTERS, + ~0, client.client_index); + continue; + } + } + if (q_prev && (q_prev->cursize < q_prev->maxsize)) { vl_msg_api_send_shmem (q_prev, (u8 *) & mp); @@ -1219,34 +1938,42 @@ vl_api_vnet_ip4_fib_counters_t_handler (vl_api_vnet_ip4_fib_counters_t * mp) static void vl_api_vnet_ip4_nbr_counters_t_handler (vl_api_vnet_ip4_nbr_counters_t * mp) { - vpe_client_stats_registration_t *reg; stats_main_t *sm = &stats_main; unix_shared_memory_queue_t *q, *q_prev = NULL; vl_api_vnet_ip4_nbr_counters_t *mp_copy = NULL; u32 mp_size; + vpe_client_registration_t *clients, client; + int i; mp_size = sizeof (*mp_copy) + ntohl (mp->count) * sizeof (vl_api_ip4_nbr_counter_t); - /* *INDENT-OFF* */ - pool_foreach(reg, sm->stats_registrations, - ({ - if (reg->stats_registrations & IP4_NBR_COUNTERS) - { - q = vl_api_client_index_to_input_queue (reg->client.client_index); - if (q) - { - if (q_prev && (q_prev->cursize < q_prev->maxsize)) - { - mp_copy = vl_msg_api_alloc_as_if_client(mp_size); - clib_memcpy(mp_copy, mp, mp_size); - vl_msg_api_send_shmem (q_prev, (u8 *)&mp); - mp = mp_copy; - } - q_prev = q; - } - } - })); + clients = + get_clients_for_stat (IDX_IP4_NBR_COUNTERS, ~0 /*flag for all */ ); + + for (i = 0; i < vec_len (clients); i++) + { + client = clients[i]; + q = vl_api_client_index_to_input_queue (client.client_index); + if (q) + { + if (q_prev && (q_prev->cursize < q_prev->maxsize)) + { + mp_copy = vl_msg_api_alloc_as_if_client (mp_size); + clib_memcpy (mp_copy, mp, mp_size); + vl_msg_api_send_shmem (q_prev, (u8 *) & mp); + mp = mp_copy; + } + q_prev = q; + } + else + { + sm->enable_poller = clear_client_for_stat (IDX_IP4_NBR_COUNTERS, + ~0, client.client_index); + continue; + } + } + /* *INDENT-ON* */ if (q_prev && (q_prev->cursize < q_prev->maxsize)) { @@ -1261,34 +1988,41 @@ vl_api_vnet_ip4_nbr_counters_t_handler (vl_api_vnet_ip4_nbr_counters_t * mp) static void vl_api_vnet_ip6_fib_counters_t_handler (vl_api_vnet_ip6_fib_counters_t * mp) { - vpe_client_stats_registration_t *reg; stats_main_t *sm = &stats_main; unix_shared_memory_queue_t *q, *q_prev = NULL; vl_api_vnet_ip6_fib_counters_t *mp_copy = NULL; u32 mp_size; + vpe_client_registration_t *clients, client; + int i; mp_size = sizeof (*mp_copy) + ntohl (mp->count) * sizeof (vl_api_ip6_fib_counter_t); - /* *INDENT-OFF* */ - pool_foreach(reg, sm->stats_registrations, - ({ - if (reg->stats_registrations & IP6_FIB_COUNTERS) - { - q = vl_api_client_index_to_input_queue (reg->client.client_index); - if (q) - { - if (q_prev && (q_prev->cursize < q_prev->maxsize)) - { - mp_copy = vl_msg_api_alloc_as_if_client(mp_size); - clib_memcpy(mp_copy, mp, mp_size); - vl_msg_api_send_shmem (q_prev, (u8 *)&mp); - mp = mp_copy; - } - q_prev = q; - } - } - })); + clients = + get_clients_for_stat (IDX_IP6_FIB_COUNTERS, ~0 /*flag for all */ ); + + for (i = 0; i < vec_len (clients); i++) + { + client = clients[i]; + q = vl_api_client_index_to_input_queue (client.client_index); + if (q) + { + if (q_prev && (q_prev->cursize < q_prev->maxsize)) + { + mp_copy = vl_msg_api_alloc_as_if_client (mp_size); + clib_memcpy (mp_copy, mp, mp_size); + vl_msg_api_send_shmem (q_prev, (u8 *) & mp); + mp = mp_copy; + } + q_prev = q; + } + else + { + sm->enable_poller = clear_client_for_stat (IDX_IP6_FIB_COUNTERS, + ~0, client.client_index); + continue; + } + } /* *INDENT-ON* */ if (q_prev && (q_prev->cursize < q_prev->maxsize)) { @@ -1303,34 +2037,41 @@ vl_api_vnet_ip6_fib_counters_t_handler (vl_api_vnet_ip6_fib_counters_t * mp) static void vl_api_vnet_ip6_nbr_counters_t_handler (vl_api_vnet_ip6_nbr_counters_t * mp) { - vpe_client_stats_registration_t *reg; stats_main_t *sm = &stats_main; unix_shared_memory_queue_t *q, *q_prev = NULL; vl_api_vnet_ip6_nbr_counters_t *mp_copy = NULL; u32 mp_size; + vpe_client_registration_t *clients, client; + int i; mp_size = sizeof (*mp_copy) + ntohl (mp->count) * sizeof (vl_api_ip6_nbr_counter_t); - /* *INDENT-OFF* */ - pool_foreach(reg, sm->stats_registrations, - ({ - if (reg->stats_registrations & IP6_NBR_COUNTERS) - { - q = vl_api_client_index_to_input_queue (reg->client.client_index); - if (q) - { - if (q_prev && (q_prev->cursize < q_prev->maxsize)) - { - mp_copy = vl_msg_api_alloc_as_if_client(mp_size); - clib_memcpy(mp_copy, mp, mp_size); - vl_msg_api_send_shmem (q_prev, (u8 *)&mp); - mp = mp_copy; - } - q_prev = q; - } - } - })); + clients = + get_clients_for_stat (IDX_IP6_NBR_COUNTERS, ~0 /*flag for all */ ); + + for (i = 0; i < vec_len (clients); i++) + { + client = clients[i]; + q = vl_api_client_index_to_input_queue (client.client_index); + if (q) + { + if (q_prev && (q_prev->cursize < q_prev->maxsize)) + { + mp_copy = vl_msg_api_alloc_as_if_client (mp_size); + clib_memcpy (mp_copy, mp, mp_size); + vl_msg_api_send_shmem (q_prev, (u8 *) & mp); + mp = mp_copy; + } + q_prev = q; + } + else + { + sm->enable_poller = clear_client_for_stat (IDX_IP6_NBR_COUNTERS, + ~0, client.client_index); + continue; + } + } /* *INDENT-ON* */ if (q_prev && (q_prev->cursize < q_prev->maxsize)) { @@ -1346,138 +2087,43 @@ static void vl_api_want_stats_t_handler (vl_api_want_stats_t * mp) { stats_main_t *sm = &stats_main; - vpe_client_stats_registration_t *rp; + vpe_client_registration_t rp; vl_api_want_stats_reply_t *rmp; uword *p; i32 retval = 0; + u32 item; unix_shared_memory_queue_t *q; - p = hash_get (sm->stats_registration_hash, mp->client_index); - if (p) - { - if (mp->enable_disable) - { - clib_warning ("pid %d: already enabled...", mp->pid); - retval = -2; - goto reply; - } - else - { - rp = pool_elt_at_index (sm->stats_registrations, p[0]); - pool_put (sm->stats_registrations, rp); - hash_unset (sm->stats_registration_hash, mp->client_index); - goto reply; - } - } - if (mp->enable_disable == 0) - { - clib_warning ("pid %d: already disabled...", mp->pid); - retval = -3; - goto reply; - } - pool_get (sm->stats_registrations, rp); - rp->client.client_index = mp->client_index; - rp->client.client_pid = mp->pid; - rp->stats_registrations |= INTERFACE_SIMPLE_COUNTERS; - rp->stats_registrations |= INTERFACE_COMBINED_COUNTERS; - rp->stats_registrations |= IP4_FIB_COUNTERS; - rp->stats_registrations |= IP4_NBR_COUNTERS; - rp->stats_registrations |= IP6_FIB_COUNTERS; - rp->stats_registrations |= IP6_NBR_COUNTERS; - - hash_set (sm->stats_registration_hash, rp->client.client_index, - rp - sm->stats_registrations); - -reply: - if (pool_elts (sm->stats_registrations)) - sm->enable_poller = 1; - else - sm->enable_poller = 0; - - q = vl_api_client_index_to_input_queue (mp->client_index); - - if (!q) - return; - - rmp = vl_msg_api_alloc (sizeof (*rmp)); - rmp->_vl_msg_id = ntohs (VL_API_WANT_STATS_REPLY); - rmp->context = mp->context; - rmp->retval = retval; + item = ~0; //"ALL THE THINGS IN THE THINGS + rp.client_index = mp->client_index; + rp.client_pid = mp->pid; - vl_msg_api_send_shmem (q, (u8 *) & rmp); -} + handle_client_registration (&rp, IDX_PER_INTERFACE_SIMPLE_COUNTERS, + item, mp->enable_disable); -static void - vl_api_want_interface_simple_stats_t_handler - (vl_api_want_interface_simple_stats_t * mp) -{ - stats_main_t *sm = &stats_main; - vpe_client_stats_registration_t *rp; - vl_api_want_interface_simple_stats_reply_t *rmp; - uword *p; - i32 retval = 0; - unix_shared_memory_queue_t *q; + handle_client_registration (&rp, IDX_PER_INTERFACE_COMBINED_COUNTERS, + item, mp->enable_disable); - p = hash_get (sm->stats_registration_hash, mp->client_index); + handle_client_registration (&rp, IDX_IP4_FIB_COUNTERS, + item, mp->enable_disable); - /* Disable case */ - if (mp->enable_disable == 0) - { - if (!p) // No client to disable - { - clib_warning ("pid %d: already disabled for stats...", mp->pid); - retval = 0; - goto reply; - } + handle_client_registration (&rp, IDX_IP4_NBR_COUNTERS, + item, mp->enable_disable); - rp = pool_elt_at_index (sm->stats_registrations, p[0]); - if (!rp->stats_registrations & INTERFACE_SIMPLE_COUNTERS) // Client but doesn't want this. - { - clib_warning - ("pid %d: already disabled for interface simple stats...", - mp->pid); - retval = 0; - goto reply; - } - else - { - rp->stats_registrations &= ~(INTERFACE_SIMPLE_COUNTERS); // Clear flag - if (rp->stats_registrations == 0) // Client isn't listening to anything else - { - pool_put (sm->stats_registrations, rp); - hash_unset (sm->stats_registration_hash, mp->client_index); - } - goto reply; - } - } - /* Enable case */ - /* Get client from pool */ - if (p) - rp = pool_elt_at_index (sm->stats_registrations, p[0]); + handle_client_registration (&rp, IDX_IP6_FIB_COUNTERS, + item, mp->enable_disable); - if (!p || !rp) // Doesn't exist, make a new entry - { - pool_get (sm->stats_registrations, rp); - rp->client.client_index = mp->client_index; - rp->client.client_pid = mp->pid; - } - rp->stats_registrations |= INTERFACE_SIMPLE_COUNTERS; - hash_set (sm->stats_registration_hash, rp->client.client_index, - rp - sm->stats_registrations); + handle_client_registration (&rp, IDX_IP6_NBR_COUNTERS, + item, mp->enable_disable); reply: - if (pool_elts (sm->stats_registrations)) // Someone wants something, somewhere so enable globally for now. - sm->enable_poller = 1; - else - sm->enable_poller = 0; - q = vl_api_client_index_to_input_queue (mp->client_index); if (!q) return; rmp = vl_msg_api_alloc (sizeof (*rmp)); - rmp->_vl_msg_id = ntohs (VL_API_WANT_INTERFACE_SIMPLE_STATS_REPLY); + rmp->_vl_msg_id = ntohs (VL_API_WANT_STATS_REPLY); rmp->context = mp->context; rmp->retval = retval; @@ -1485,153 +2131,71 @@ reply: } static void - vl_api_want_interface_combined_stats_t_handler - (vl_api_want_interface_combined_stats_t * mp) + vl_api_want_interface_simple_stats_t_handler + (vl_api_want_interface_simple_stats_t * mp) { stats_main_t *sm = &stats_main; - vpe_client_stats_registration_t *rp; - vl_api_want_interface_combined_stats_reply_t *rmp; + vpe_client_registration_t rp; + vl_api_want_interface_simple_stats_reply_t *rmp; uword *p; i32 retval = 0; + u32 swif; unix_shared_memory_queue_t *q; - p = hash_get (sm->stats_registration_hash, mp->client_index); - - /* Disable case */ - if (mp->enable_disable == 0) - { - if (!p) // No client to disable - { - clib_warning ("pid %d: already disabled for stats...", mp->pid); - retval = 0; - goto reply; - } - - rp = pool_elt_at_index (sm->stats_registrations, p[0]); - if (!(rp->stats_registrations & INTERFACE_COMBINED_COUNTERS)) // Client but doesn't want this. - { - clib_warning - ("pid %d: already disabled for interface COMBINED stats...", - mp->pid); - retval = 0; - goto reply; - } - else - { - rp->stats_registrations &= ~(INTERFACE_COMBINED_COUNTERS); // Clear flag - if (rp->stats_registrations == 0) // Client isn't listening to anything else - { - pool_put (sm->stats_registrations, rp); - hash_unset (sm->stats_registration_hash, mp->client_index); - } - goto reply; - } - } - /* Enable case */ - /* Get client from pool */ - if (p) - rp = pool_elt_at_index (sm->stats_registrations, p[0]); + swif = ~0; //Using same mechanism as _per_interface_ + rp.client_index = mp->client_index; + rp.client_pid = mp->pid; - if (!p || !rp) // Doesn't exist, make a new entry - { - pool_get (sm->stats_registrations, rp); - rp->client.client_index = mp->client_index; - rp->client.client_pid = mp->pid; - } - rp->stats_registrations |= INTERFACE_COMBINED_COUNTERS; - hash_set (sm->stats_registration_hash, rp->client.client_index, - rp - sm->stats_registrations); + handle_client_registration (&rp, IDX_PER_INTERFACE_SIMPLE_COUNTERS, swif, + mp->enable_disable); reply: - if (pool_elts (sm->stats_registrations)) // Someone wants something, somewhere so enable globally for now. - sm->enable_poller = 1; - else - sm->enable_poller = 0; - q = vl_api_client_index_to_input_queue (mp->client_index); if (!q) - return; + { + sm->enable_poller = + clear_client_for_stat (IDX_PER_INTERFACE_SIMPLE_COUNTERS, swif, + mp->client_index); + return; + } rmp = vl_msg_api_alloc (sizeof (*rmp)); - rmp->_vl_msg_id = ntohs (VL_API_WANT_INTERFACE_COMBINED_STATS_REPLY); + rmp->_vl_msg_id = ntohs (VL_API_WANT_INTERFACE_SIMPLE_STATS_REPLY); rmp->context = mp->context; rmp->retval = retval; vl_msg_api_send_shmem (q, (u8 *) & rmp); } + static void vl_api_want_ip4_fib_stats_t_handler (vl_api_want_ip4_fib_stats_t * mp) { stats_main_t *sm = &stats_main; - vpe_client_stats_registration_t *rp; + vpe_client_registration_t rp; vl_api_want_ip4_fib_stats_reply_t *rmp; uword *p; i32 retval = 0; unix_shared_memory_queue_t *q; + u32 fib; - p = hash_get (sm->stats_registration_hash, mp->client_index); - - /* Disable case */ - /* - $$$ FIXME: need std return codes. Still undecided if enabling already - enabled (and similar for disabled) is really a -'ve error condition or - if 0 is sufficient - */ - if (mp->enable_disable == 0) - { - if (!p) // No client to disable - { - clib_warning ("pid %d: already disabled for stats...", mp->pid); - retval = -3; - goto reply; - } - - rp = pool_elt_at_index (sm->stats_registrations, p[0]); - if (!(rp->stats_registrations & IP4_FIB_COUNTERS)) // Client but doesn't want this. - { - clib_warning ("pid %d: already disabled for interface ip4 fib...", - mp->pid); - retval = -2; - goto reply; - } - else - { - rp->stats_registrations &= ~(IP4_FIB_COUNTERS); // Clear flag - if (rp->stats_registrations == 0) // Client isn't listening to anything else - { - pool_put (sm->stats_registrations, rp); - hash_unset (sm->stats_registration_hash, mp->client_index); - } - goto reply; - } - } - /* Enable case */ - /* Get client from pool */ - if (p) - rp = pool_elt_at_index (sm->stats_registrations, p[0]); + fib = ~0; //Using same mechanism as _per_interface_ + rp.client_index = mp->client_index; + rp.client_pid = mp->pid; - if (!p || !rp) // Doesn't exist, make a new entry - { - pool_get (sm->stats_registrations, rp); - rp->client.client_index = mp->client_index; - rp->client.client_pid = mp->pid; - } - rp->stats_registrations |= IP4_FIB_COUNTERS; - hash_set (sm->stats_registration_hash, rp->client.client_index, - rp - sm->stats_registrations); + handle_client_registration (&rp, IDX_IP4_FIB_COUNTERS, fib, + mp->enable_disable); reply: - if (pool_elts (sm->stats_registrations)) // Someone wants something, somewhere so enable globally for now. - sm->enable_poller = 1; - else - sm->enable_poller = 0; - q = vl_api_client_index_to_input_queue (mp->client_index); if (!q) - return; + { + sm->enable_poller = clear_client_for_stat (IDX_IP4_FIB_COUNTERS, + fib, mp->client_index); + return; + } rmp = vl_msg_api_alloc (sizeof (*rmp)); rmp->_vl_msg_id = ntohs (VL_API_WANT_IP4_FIB_STATS_REPLY); @@ -1645,73 +2209,29 @@ static void vl_api_want_ip6_fib_stats_t_handler (vl_api_want_ip6_fib_stats_t * mp) { stats_main_t *sm = &stats_main; - vpe_client_stats_registration_t *rp; - vl_api_want_ip6_fib_stats_reply_t *rmp; + vpe_client_registration_t rp; + vl_api_want_ip4_fib_stats_reply_t *rmp; uword *p; i32 retval = 0; unix_shared_memory_queue_t *q; + u32 fib; - p = hash_get (sm->stats_registration_hash, mp->client_index); + fib = ~0; //Using same mechanism as _per_interface_ + rp.client_index = mp->client_index; + rp.client_pid = mp->pid; - /* Disable case */ - /* - $$$ FIXME: need std return codes. Still undecided if enabling already - enabled (and similar for disabled) is really a -'ve error condition or - if 0 is sufficient - */ - if (mp->enable_disable == 0) - { - if (!p) // No client to disable - { - clib_warning ("pid %d: already disabled for stats...", mp->pid); - retval = -3; - goto reply; - } - - rp = pool_elt_at_index (sm->stats_registrations, p[0]); - if (!(rp->stats_registrations & IP6_FIB_COUNTERS)) // Client but doesn't want this. - { - clib_warning ("pid %d: already disabled for interface ip6 fib...", - mp->pid); - retval = -2; - goto reply; - } - else - { - rp->stats_registrations &= ~(IP6_FIB_COUNTERS); // Clear flag - if (rp->stats_registrations == 0) // Client isn't listening to anything else - { - pool_put (sm->stats_registrations, rp); - hash_unset (sm->stats_registration_hash, mp->client_index); - } - goto reply; - } - } - /* Enable case */ - /* Get client from pool */ - if (p) - rp = pool_elt_at_index (sm->stats_registrations, p[0]); - - if (!p || !rp) // Doesn't exist, make a new entry - { - pool_get (sm->stats_registrations, rp); - rp->client.client_index = mp->client_index; - rp->client.client_pid = mp->pid; - } - rp->stats_registrations |= IP6_FIB_COUNTERS; - hash_set (sm->stats_registration_hash, rp->client.client_index, - rp - sm->stats_registrations); + handle_client_registration (&rp, IDX_IP6_FIB_COUNTERS, fib, + mp->enable_disable); reply: - if (pool_elts (sm->stats_registrations)) // Someone wants something, somewhere so enable globally for now. - sm->enable_poller = 1; - else - sm->enable_poller = 0; - q = vl_api_client_index_to_input_queue (mp->client_index); if (!q) - return; + { + sm->enable_poller = clear_client_for_stat (IDX_IP6_FIB_COUNTERS, + fib, mp->client_index); + return; + } rmp = vl_msg_api_alloc (sizeof (*rmp)); rmp->_vl_msg_id = ntohs (VL_API_WANT_IP6_FIB_STATS_REPLY); @@ -1748,7 +2268,9 @@ vl_api_vnet_get_summary_stats_t_handler (vl_api_vnet_get_summary_stats_t * mp) vl_api_client_index_to_input_queue (mp->client_index); if (!q) - return; + { + return; + } rmp = vl_msg_api_alloc (sizeof (*rmp)); rmp->_vl_msg_id = ntohs (VL_API_VNET_GET_SUMMARY_STATS_REPLY); @@ -1790,13 +2312,14 @@ stats_memclnt_delete_callback (u32 client_index) stats_main_t *sm = &stats_main; uword *p; - p = hash_get (sm->stats_registration_hash, client_index); - if (p) - { - rp = pool_elt_at_index (sm->stats_registrations, p[0]); - pool_put (sm->stats_registrations, rp); - hash_unset (sm->stats_registration_hash, client_index); - } + // FIXME + /* p = hash_get (sm->stats_registration_hash, client_index); */ + /* if (p) */ + /* { */ + /* rp = pool_elt_at_index (sm->stats_registrations, p[0]); */ + /* pool_put (sm->stats_registrations, rp); */ + /* hash_unset (sm->stats_registration_hash, client_index); */ + /* } */ return 0; } @@ -1854,6 +2377,14 @@ stats_init (vlib_main_t * vm) */ setup_message_id_table (am); + vec_validate (sm->stats_registrations, STATS_REG_N_IDX); + vec_validate (sm->stats_registration_hash, STATS_REG_N_IDX); +#define stats_reg(n) \ + sm->stats_registrations[IDX_##n] = 0; \ + sm->stats_registration_hash[IDX_##n] = 0; +#include +#undef stats_reg + return 0; } diff --git a/src/vpp/stats/stats.h b/src/vpp/stats/stats.h index 167b09bd..042bcb65 100644 --- a/src/vpp/stats/stats.h +++ b/src/vpp/stats/stats.h @@ -36,19 +36,10 @@ typedef struct int tag; } data_structure_lock_t; -typedef struct -{ - vpe_client_registration_t client; - u8 stats_registrations; -#define INTERFACE_SIMPLE_COUNTERS (1 << 0) -#define INTERFACE_COMBINED_COUNTERS (1 << 1) -#define IP4_FIB_COUNTERS (1 << 2) -#define IP4_NBR_COUNTERS (1 << 3) -#define IP6_FIB_COUNTERS (1 << 4) -#define IP6_NBR_COUNTERS (1 << 5) - -} vpe_client_stats_registration_t; - +/** + * @brief stats request registration indexes + * + */ /* from .../vnet/vnet/ip/lookup.c. Yuck */ typedef CLIB_PACKED (struct { @@ -57,6 +48,30 @@ u32 address_length: 6; u32 index: 26; }) ip4_route_t; +/* see interface.api */ +typedef struct +{ + u32 sw_if_index; + u64 drop; + u64 punt; + u64 rx_ip4; + u64 rx_ip6; + u64 rx_no_buffer; + u64 rx_miss; + u64 rx_error; + u64 tx_error; + u64 rx_mpls; +} vnet_simple_counter_t; + +typedef struct +{ + u32 sw_if_index; + u64 rx_packets; /**< packet counter */ + u64 rx_bytes; /**< byte counter */ + u64 tx_packets; /**< packet counter */ + u64 tx_bytes; /**< byte counter */ +} vnet_combined_counter_t; + typedef struct { ip6_address_t address; @@ -74,6 +89,33 @@ typedef struct uword *results; } do_ip46_fibs_t; +typedef struct +{ + u16 msg_id; + u32 size; + u32 client_index; + u32 context; + i32 retval; +} client_registration_reply_t; + +typedef enum +{ +#define stats_reg(n) IDX_##n, +#include +#undef stats_reg + STATS_REG_N_IDX, +} stats_reg_index_t; + +typedef struct +{ + //Standard client information + uword *client_hash; + vpe_client_registration_t *clients; + u32 item; + +} vpe_client_stats_registration_t; + + typedef struct { void *mheap; @@ -83,9 +125,41 @@ typedef struct u32 stats_poll_interval_in_seconds; u32 enable_poller; - uword *stats_registration_hash; - vpe_client_stats_registration_t *stats_registrations; - vpe_client_stats_registration_t **regs; + /* + * stats_registrations is a vector, indexed by + * IDX_xxxx_COUNTER generated for each streaming + * stat a client can register for. (see stats.reg) + * + * The values in the vector refer to pools. + * + * The pool is of type vpe_client_stats_registration_t + * + * This typedef consists of: + * + * u32 item: This is the instance of the IDX_xxxx_COUNTER a + * client is interested in. + * vpe_client_registration_t *clients: The list of clients interested. + * + * e.g. + * stats_registrations[IDX_INTERFACE_SIMPLE_COUNTERS] refers to a pool + * containing elements: + * + * u32 item = sw_if_index1 + * clients = ["clienta","clientb"] + * + * When clients == NULL the pool element is freed. When the pool is empty + * + * ie + * 0 == pool_elts(stats_registrations[IDX_INTERFACE_SIMPLE_COUNTERS] + * + * then there is no need to process INTERFACE_SIMPLE_COUNTERS + * + * Note that u32 item = ~0 is the simple case for ALL interfaces or fibs. + * + */ + + uword **stats_registration_hash; + vpe_client_stats_registration_t **stats_registrations; /* control-plane data structure lock */ data_structure_lock_t *data_structure_lock; @@ -96,6 +170,13 @@ typedef struct /* Vectors for Distribution funcs: do_ip4_fibs and do_ip6_fibs. */ do_ip46_fibs_t do_ip46_fibs; + /* + Working vector vars so as to not thrash memory allocator. + Has effect of making "static" + */ + vpe_client_stats_registration_t **regs_tmp; + vpe_client_registration_t **clients_tmp; + /* convenience */ vlib_main_t *vlib_main; vnet_main_t *vnet_main; diff --git a/src/vpp/stats/stats.reg b/src/vpp/stats/stats.reg new file mode 100644 index 00000000..d76443c4 --- /dev/null +++ b/src/vpp/stats/stats.reg @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2017 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * @file + * @brief Client stats registrations + * + * Each entry is a specific REGISTRATION as + * opposed to an API call. + * + * For instance: + * want_stats() + * enables/disables ALL stats entries below for all + * ITEM. + * + * An item is the an incident of the thing we want stats for + * such as a FIB or sw_if_index for interface. In each case the + * value ~0 is treated as ALL ITEMs. + * + * As such want_interface_simple_counters() is translated to + * want_per_interface_simple_counters(item=~0) + */ + + +stats_reg (IP4_FIB_COUNTERS) +stats_reg (IP4_NBR_COUNTERS) +stats_reg (IP6_FIB_COUNTERS) +stats_reg (IP6_NBR_COUNTERS) +stats_reg (PER_INTERFACE_COMBINED_COUNTERS) +stats_reg (PER_INTERFACE_SIMPLE_COUNTERS) + -- cgit 1.2.3-korg