/* * Copyright (c) 2015 Cisco and/or its affiliates. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at: * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #define _GNU_SOURCE #include #include #include #include #include #include DECLARE_CJ_GLOBAL_LOG; #define FRAME_QUEUE_NELTS 32 u32 vl (void *p) { return vec_len (p); } vlib_worker_thread_t *vlib_worker_threads; vlib_thread_main_t vlib_thread_main; /* * Barrier tracing can be enabled on a normal build to collect information * on barrier use, including timings and call stacks. Deliberately not * keyed off CLIB_DEBUG, because that can add significant overhead which * imapacts observed timings. */ #ifdef BARRIER_TRACING /* * Output of barrier tracing can be to syslog or elog as suits */ #ifdef BARRIER_TRACING_ELOG static u32 elog_id_for_msg_name (const char *msg_name) { uword *p, r; static uword *h; u8 *name_copy; if (!h) h = hash_create_string (0, sizeof (uword)); p = hash_get_mem (h, msg_name); if (p) return p[0]; r = elog_string (&vlib_global_main.elog_main, "%s", msg_name); name_copy = format (0, "%s%c", msg_name, 0); hash_set_mem (h, name_copy, r); return r; } /* * elog Barrier trace functions, which are nulled out if BARRIER_TRACING isn't * defined */ static inline void barrier_trace_sync (f64 t_entry, f64 t_open, f64 t_closed) { /* *INDENT-OFF* */ ELOG_TYPE_DECLARE (e) = { .format = "barrier <%d#%s(O:%dus:%dus)(%dus)", .format_args = "i4T4i4i4i4", }; /* *INDENT-ON* */ struct { u32 count, caller, t_entry, t_open, t_closed; } *ed = 0; ed = ELOG_DATA (&vlib_global_main.elog_main, e); ed->count = (int) vlib_worker_threads[0].barrier_sync_count; ed->caller = elog_id_for_msg_name (vlib_worker_threads[0].barrier_caller); ed->t_entry = (int) (1000000.0 * t_entry); ed->t_open = (int) (1000000.0 * t_open); ed->t_closed = (int) (1000000.0 * t_closed); } static inline void barrier_trace_sync_rec (f64 t_entry) { /* *INDENT-OFF* */ ELOG_TYPE_DECLARE (e) = { .format = "barrier <%d(%dus)%s", .format_args = "i4i4T4", }; /* *INDENT-ON* */ struct { u32 depth, t_entry, caller; } *ed = 0; ed = ELOG_DATA (&vlib_global_main.elog_main, e); ed->depth = (int) vlib_worker_threads[0].recursion_level - 1; ed->t_entry = (int) (1000000.0 * t_entry); ed->caller = elog_id_for_msg_name (vlib_worker_threads[0].barrier_caller); } static inline void barrier_trace_release_rec (f64 t_entry) { /* *INDENT-OFF* */ ELOG_TYPE_DECLARE (e) = { .format = "barrier (%dus)%d>", .format_args = "i4i4", }; /* *INDENT-ON* */ struct { u32 t_entry, depth; } *ed = 0; ed = ELOG_DATA (&vlib_global_main.elog_main, e); ed->t_entry = (int) (1000000.0 * t_entry); ed->depth = (int) vlib_worker_threads[0].recursion_level; } static inline void barrier_trace_release (f64 t_entry, f64 t_closed_total, f64 t_update_main) { /* *INDENT-OFF* */ ELOG_TYPE_DECLARE (e) = { .format = "barrier (%dus){%d}(C:%dus)#%d>", .format_args = "i4i4i4i4", }; /* *INDENT-ON* */ struct { u32 t_entry, t_update_main, t_closed_total, count; } *ed = 0; ed = ELOG_DATA (&vlib_global_main.elog_main, e); ed->t_entry = (int) (1000000.0 * t_entry); ed->t_update_main = (int) (1000000.0 * t_update_main); ed->t_closed_total = (int) (1000000.0 * t_closed_total); ed->count = (int) vlib_worker_threads[0].barrier_sync_count; /* Reset context for next trace */ vlib_worker_threads[0].barrier_context = NULL; } #else char barrier_trace[65536]; char *btp = barrier_trace; /* * syslog Barrier trace functions, which are nulled out if BARRIER_TRACING * isn't defined */ static inline void barrier_trace_sync (f64 t_entry, f64 t_open, f64 t_closed) { btp += sprintf (btp, "<%u#%s", (unsigned int) vlib_worker_threads[0].barrier_sync_count, vlib_worker_threads[0].barrier_caller); if (vlib_worker_threads[0].barrier_context) { btp += sprintf (btp, "[%s]", vlib_worker_threads[0].barrier_context); } btp += sprintf (btp, "(O:%dus:%dus)(%dus):", (int) (1000000.0 * t_entry), (int) (1000000.0 * t_open), (int) (1000000.0 * t_closed)); } static inline void barrier_trace_sync_rec (f64 t_entry) { btp += sprintf (btp, "<%u(%dus)%s:", (int) vlib_worker_threads[0].recursion_level - 1, (int) (1000000.0 * t_entry), vlib_worker_threads[0].barrier_caller); } static inline void barrier_trace_release_rec (f64 t_entry) { btp += sprintf (btp, ":(%dus)%u>", (int) (1000000.0 * t_entry), (int) vlib_worker_threads[0].recursion_level); } static inline void barrier_trace_release (f64 t_entry, f64 t_closed_total, f64 t_update_main) { btp += sprintf (btp, ":(%dus)", (int) (1000000.0 * t_entry)); if (t_update_main > 0) { btp += sprintf (btp, "{%dus}", (int) (1000000.0 * t_update_main)); } btp += sprintf (btp, "(C:%dus)#%u>", (int) (1000000.0 * t_closed_total), (int) vlib_worker_threads[0].barrier_sync_count); /* Dump buffer to syslog, and reset for next trace */ fformat (stderr, "BTRC %s\n", barrier_trace); btp = barrier_trace; vlib_worker_threads[0].barrier_context = NULL; } #endif #else /* Null functions for default case where barrier tracing isn't used */ static inline void barrier_trace_sync (f64 t_entry, f64 t_open, f64 t_closed) { } static inline void barrier_trace_sync_rec (f64 t_entry) { } static inline void barrier_trace_release_rec (f64 t_entry) { } static inline void barrier_trace_release (f64 t_entry, f64 t_closed_total, f64 t_update_main) { } #endif uword os_get_nthreads (void) { u32 len; len = vec_len (vlib_thread_stacks); if (len == 0) return 1; else return len; } void vlib_set_thread_name (char *name) { int pthread_setname_np (pthread_t __target_thread, const char *__name); int rv; pthread_t thread = pthread_self (); if (thread) { rv = pthread_setname_np (thread, name); if (rv) clib_warning ("pthread_setname_np returned %d", rv); } } static int sort_registrations_by_no_clone (void *a0, void *a1) { vlib_thread_registration_t **tr0 = a0; vlib_thread_registration_t **tr1 = a1; return ((i32) ((*tr0)->no_data_structure_clone) - ((i32) ((*tr1)->no_data_structure_clone))); } static uword * clib_sysfs_list_to_bitmap (char *filename) { FILE *fp; uword *r = 0; fp = fopen (filename, "r"); if (fp != NULL) { u8 *buffer = 0; vec_validate (buffer, 256 - 1); if (fgets ((char *) buffer, 256, fp)) { unformat_input_t in; unformat_init_string (&in, (char *) buffer, strlen ((char *) buffer)); if (unformat (&in, "%U", unformat_bitmap_list, &r) != 1) clib_warning ("unformat_bitmap_list failed"); unformat_free (&in); } vec_free (buffer); fclose (fp); } return r; } /* Called early in the init sequence */ clib_error_t * vlib_thread_init (vlib_main_t * vm) { vlib_thread_main_t *tm = &vlib_thread_main; vlib_worker_thread_t *w; vlib_thread_registration_t *tr; u32 n_vlib_mains = 1; u32 first_index = 1; u32 i; uword *avail_cpu; /* get bitmaps of active cpu cores and sockets */ tm->cpu_core_bitmap = clib_sysfs_list_to_bitmap ("/sys/devices/system/cpu/online"); tm->cpu_socket_bitmap = clib_sysfs_list_to_bitmap ("/sys/devices/system/node/online"); avail_cpu = clib_bitmap_dup (tm->cpu_core_bitmap); /* skip cores */ for (i = 0; i < tm->skip_cores; i++) { uword c = clib_bitmap_first_set (avail_cpu); if (c == ~0) return clib_error_return (0, "no available cpus to skip"); avail_cpu = clib_bitmap_set (avail_cpu, c, 0); } /* grab cpu for main thread */ if (!tm->main_lcore) { tm->main_lcore = clib_bitmap_first_set (avail_cpu); if (tm->main_lcore == (u8) ~ 0) return clib_error_return (0, "no available cpus to be used for the" " main thread"); } else { if (clib_bitmap_get (avail_cpu, tm->main_lcore) == 0) return clib_error_return (0, "cpu %u is not available to be used" " for the main thread", tm->main_lcore); } avail_cpu = clib_bitmap_set (avail_cpu, tm->main_lcore, 0); /* assume that there is socket 0 only if there is no data from sysfs */ if (!tm->cpu_socket_bitmap) tm->cpu_socket_bitmap = clib_bitmap_set (0, 0, 1); /* pin main thread to main_lcore */ if (tm->cb.vlib_thread_set_lcore_cb) { tm->cb.vlib_thread_set_lcore_cb (0, tm->main_lcore); } else { cpu_set_t cpuset; CPU_ZERO (&cpuset); CPU_SET (tm->main_lcore, &cpuset); pthread_setaffinity_np (pthread_self (), sizeof (cpu_set_t), &cpuset); } /* as many threads as stacks... */ vec_validate_aligned (vlib_worker_threads, vec_len (vlib_thread_stacks) - 1, CLIB_CACHE_LINE_BYTES); /* Preallocate thread 0 */ _vec_len (vlib_worker_threads) = 1; w = vlib_worker_threads; w->thread_mheap = clib_mem_get_heap (); w->thread_stac
/*
 * Copyright (c) 2015 Cisco and/or its affiliates.
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at:
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <strings.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <netinet/in.h>
#include <netdb.h>

#define SOCKCLNT_SERVER_PORT 32741	/* whatever */

typedef signed char i8;
typedef signed short i16;
typedef signed int i32;
typedef signed long long i64;
typedef unsigned char u8;
typedef unsigned short u16;
typedef unsigned int u32;
typedef unsigned long long u64;
typedef unsigned long uword;

#define VL_API_PACKED(x) x __attribute__ ((packed))

typedef VL_API_PACKED (struct _vl_api_sockclnt_create
		       {
		       u16 _vl_msg_id; u8 name[64];
		       u32 context;
		       }) vl_api_sockclnt_create_t;

typedef VL_API_PACKED (struct _vl_api_sockclnt_create_reply
		       {
		       u16 _vl_msg_id;
		       i32 response; u64 handle; u32 index; u32 context;
		       }) vl_api_sockclnt_create_reply_t;

typedef VL_API_PACKED (struct _vl_api_sockclnt_delete
		       {
		       u16 _vl_msg_id; u32 index;
		       u64 handle;
		       }) vl_api_sockclnt_delete_t;

typedef VL_API_PACKED (struct _vl_api_sockclnt_delete_reply
		       {
		       u16 _vl_msg_id; i32 response; u64 handle;
		       }) vl_api_sockclnt_delete_reply_t;

void
error (char *msg)
{
  perror (msg);
  exit (0);
}

int
main (int argc, char *argv[])
{
  int sockfd, portno, n;
  struct sockaddr_in serv_addr;
  struct hostent *server;
  char buffer[256];
  int i;
  u32 nbytes;
  vl_api_sockclnt_create_t *mp;
  vl_api_sockclnt_create_reply_t *rp;
  char *rdptr;
  int total_bytes;

  for (i = 0; i < 1; i++)
    {
      portno = SOCKCLNT_SERVER_PORT;
      sockfd = socket (AF_INET, SOCK_STREAM, 0);
      if (sockfd < 0)
	error ("ERROR opening socket");
      server = gethostbyname ("localhost");
      if (server == NULL)
	{
	  fprintf (stderr, "ERROR, no such host\n");
	  exit (0);
	}
      bzero ((char *) &serv_addr, sizeof (serv_addr));
      serv_addr.sin_family = AF_INET;
      bcopy ((char *) server->h_addr,
	     (char *) &serv_addr.sin_addr.s_addr, server->h_length);
      serv_addr.sin_port = htons (portno);
      if (connect (sockfd, (const void *) &serv_addr, sizeof (serv_addr)) < 0)
	error ("ERROR connecting");

      memset (buffer, 0, sizeof (buffer));

      mp = (vl_api_sockclnt_create_t *) buffer;
      mp->_vl_msg_id = ntohs (8);	/* VL_API_SOCKCLNT_CREATE */
      strncpy ((char *) mp->name, "socket-test", sizeof (mp->name) - 1);
      mp->name[sizeof (mp->name) - 1] = 0;
      mp->context = 0xfeedface;
      /* length of the message, including the length itself */
      nbytes = sizeof (*mp) + sizeof (nbytes);
      nbytes = ntohl (nbytes);
      n = write (sockfd, &nbytes, sizeof (nbytes));
      if (n < 0)
	error ("ERROR writing len to socket");
      n = write (sockfd, mp, sizeof (*mp));
      if (n < 0)
	error ("ERROR writing msg to socket");

      memset (buffer, 0, sizeof (buffer));

      total_bytes = 0;
      rdptr = buffer;
      do
	{
	  n = read (sockfd, rdptr, sizeof (buffer) - (rdptr - buffer));
	  if (n < 0)
	    error ("ERROR reading from socket");
	  printf ("read %d bytes\n", n);
	  total_bytes += n;
	  rdptr += n;
	}
      while (total_bytes < sizeof (vl_api_sockclnt_create_reply_t) + 4);

      rp = (vl_api_sockclnt_create_reply_t *) (buffer + 4);
      /* VL_API_SOCKCLNT_CREATE_REPLY */
      if (ntohs (rp->_vl_msg_id) != 9)
	{
	  printf ("WARNING: msg id %d\n", ntohs (rp->_vl_msg_id));
	}

      printf ("response %d, handle 0x%llx, index %d, context 0x%x\n",
	      ntohl (rp->response), rp->handle, rp->index, rp->context);
      close (sockfd);
    }
  return 0;
}

/*
 * fd.io coding-style-patch-verification: ON
 *
 * Local Variables:
 * eval: (c-set-style "gnu")
 * End:
 */
e */ now = vlib_time_now (vm); t_entry = now - vm->barrier_epoch; /* Tolerate recursive calls */ if (++vlib_worker_threads[0].recursion_level > 1) { barrier_trace_sync_rec (t_entry); return; } vlib_worker_threads[0].barrier_sync_count++; /* Enforce minimum barrier open time to minimize packet loss */ ASSERT (vm->barrier_no_close_before <= (now + BARRIER_MINIMUM_OPEN_LIMIT)); while ((now = vlib_time_now (vm)) < vm->barrier_no_close_before) ; /* Record time of closure */ t_open = now - vm->barrier_epoch; vm->barrier_epoch = now; deadline = now + BARRIER_SYNC_TIMEOUT; *vlib_worker_threads->wait_at_barrier = 1; while (*vlib_worker_threads->workers_at_barrier != count) { if ((now = vlib_time_now (vm)) > deadline) { fformat (stderr, "%s: worker thread deadlock\n", __FUNCTION__); os_panic (); } } t_closed = now - vm->barrier_epoch; barrier_trace_sync (t_entry, t_open, t_closed); } void vlib_stat_segment_lock (void) __attribute__ ((weak)); void vlib_stat_segment_lock (void) { } void vlib_stat_segment_unlock (void) __attribute__ ((weak)); void vlib_stat_segment_unlock (void) { } void vlib_worker_thread_barrier_release (vlib_main_t * vm) { f64 deadline; f64 now; f64 minimum_open; f64 t_entry; f64 t_closed_total; f64 t_update_main = 0.0; int refork_needed = 0; if (vec_len (vlib_mains) < 2) return; ASSERT (vlib_get_thread_index () == 0); now = vlib_time_now (vm); t_entry = now - vm->barrier_epoch; if (--vlib_worker_threads[0].recursion_level > 0) { barrier_trace_release_rec (t_entry); return; } /* Update (all) node runtimes before releasing the barrier, if needed */ if (vm->need_vlib_worker_thread_node_runtime_update) { /* * Lock stat segment here, so we's safe when * rebuilding the stat segment node clones from the * stat thread... */ vlib_stat_segment_lock (); /* Do stats elements on main thread */ worker_thread_node_runtime_update_internal (); vm->need_vlib_worker_thread_node_runtime_update = 0; /* Do per thread rebuilds in parallel */ refork_needed = 1; clib_smp_atomic_add (vlib_worker_threads->node_reforks_required, (vec_len (vlib_mains) - 1)); now = vlib_time_now (vm); t_update_main = now - vm->barrier_epoch; } deadline = now + BARRIER_SYNC_TIMEOUT; *vlib_worker_threads->wait_at_barrier = 0; while (*vlib_worker_threads->workers_at_barrier > 0) { if ((now = vlib_time_now (vm)) > deadline) { fformat (stderr, "%s: worker thread deadlock\n", __FUNCTION__); os_panic (); } } /* Wait for reforks before continuing */ if (refork_needed) { now = vlib_time_now (vm); deadline = now + BARRIER_SYNC_TIMEOUT; while (*vlib_worker_threads->node_reforks_required > 0) { if ((now = vlib_time_now (vm)) > deadline) { fformat (stderr, "%s: worker thread refork deadlock\n", __FUNCTION__); os_panic (); } } vlib_stat_segment_unlock (); } t_closed_total = now - vm->barrier_epoch; minimum_open = t_closed_total * BARRIER_MINIMUM_OPEN_FACTOR; if (minimum_open > BARRIER_MINIMUM_OPEN_LIMIT) { minimum_open = BARRIER_MINIMUM_OPEN_LIMIT; } vm->barrier_no_close_before = now + minimum_open; /* Record barrier epoch (used to enforce minimum open time) */ vm->barrier_epoch = now; barrier_trace_release (t_entry, t_closed_total, t_update_main); } /* * Check the frame queue to see if any frames are available. * If so, pull the packets off the frames and put them to * the handoff node. */ int vlib_frame_queue_dequeue (vlib_main_t * vm, vlib_frame_queue_main_t * fqm) { u32 thread_id = vm->thread_index; vlib_frame_queue_t *fq = fqm->vlib_frame_queues[thread_id]; vlib_frame_queue_elt_t *elt; u32 *from, *to; vlib_frame_t *f; int msg_type; int processed = 0; u32 n_left_to_node; u32 vectors = 0; ASSERT (fq); ASSERT (vm == vlib_mains[thread_id]); if (PREDICT_FALSE (fqm->node_index == ~0)) return 0; /* * Gather trace data for frame queues */ if (PREDICT_FALSE (fq->trace)) { frame_queue_trace_t *fqt; frame_queue_nelt_counter_t *fqh; u32 elix; fqt = &fqm->frame_queue_traces[thread_id]; fqt->nelts = fq->nelts; fqt->head = fq->head; fqt->head_hint = fq->head_hint; fqt->tail = fq->tail; fqt->threshold = fq->vector_threshold; fqt->n_in_use = fqt->tail - fqt->head; if (fqt->n_in_use >= fqt->nelts) { // if beyond max then use max fqt->n_in_use = fqt->nelts - 1; } /* Record the number of elements in use in the histogram */ fqh = &fqm->frame_queue_histogram[thread_id]; fqh->count[fqt->n_in_use]++; /* Record a snapshot of the elements in use */ for (elix = 0; elix < fqt->nelts; elix++) { elt = fq->elts + ((fq->head + 1 + elix) & (fq->nelts - 1)); if (1 || elt->valid) { fqt->n_vectors[elix] = elt->n_vectors; } } fqt->written = 1; } while (1) { if (fq->head == fq->tail) { fq->head_hint = fq->head; return processed; } elt = fq->elts + ((fq->head + 1) & (fq->nelts - 1)); if (!elt->valid) { fq->head_hint = fq->head; return processed; } from = elt->buffer_index; msg_type = elt->msg_type; ASSERT (msg_type == VLIB_FRAME_QUEUE_ELT_DISPATCH_FRAME); ASSERT (elt->n_vectors <= VLIB_FRAME_SIZE); f = vlib_get_frame_to_node (vm, fqm->node_index); to = vlib_frame_vector_args (f); n_left_to_node = elt->n_vectors; while (n_left_to_node >= 4) { to[0] = from[0]; to[1] = from[1]; to[2] = from[2]; to[3] = from[3]; to += 4; from += 4; n_left_to_node -= 4; } while (n_left_to_node > 0) { to[0] = from[0]; to++; from++; n_left_to_node--; } vectors += elt->n_vectors; f->n_vectors = elt->n_vectors; vlib_put_frame_to_node (vm, fqm->node_index, f); elt->valid = 0; elt->n_vectors = 0; elt->msg_type = 0xfefefefe; CLIB_MEMORY_BARRIER (); fq->head++; processed++; /* * Limit the number of packets pushed into the graph */ if (vectors >= fq->vector_threshold) { fq->head_hint = fq->head; return processed; } } ASSERT (0); return processed; } void vlib_worker_thread_fn (void *arg) { vlib_worker_thread_t *w = (vlib_worker_thread_t *) arg; vlib_thread_main_t *tm = vlib_get_thread_main (); vlib_main_t *vm = vlib_get_main (); clib_error_t *e; ASSERT (vm->thread_index == vlib_get_thread_index ()); vlib_worker_thread_init (w); clib_time_init (&vm->clib_time); clib_mem_set_heap (w->thread_mheap); /* Wait until the dpdk init sequence is complete */ while (tm->extern_thread_mgmt && tm->worker_thread_release == 0) vlib_worker_thread_barrier_check (); e = vlib_call_init_exit_functions (vm, vm->worker_init_function_registrations, 1 /* call_once */ ); if (e) clib_error_report (e); vlib_worker_loop (vm); } /* *INDENT-OFF* */ VLIB_REGISTER_THREAD (worker_thread_reg, static) = { .name = "workers", .short_name = "wk", .function = vlib_worker_thread_fn, }; /* *INDENT-ON* */ u32 vlib_frame_queue_main_init (u32 node_index, u32 frame_queue_nelts) { vlib_thread_main_t *tm = vlib_get_thread_main (); vlib_frame_queue_main_t *fqm; vlib_frame_queue_t *fq; int i; if (frame_queue_nelts == 0) frame_queue_nelts = FRAME_QUEUE_NELTS; vec_add2 (tm->frame_queue_mains, fqm, 1); fqm->node_index = node_index; vec_validate (fqm->vlib_frame_queues, tm->n_vlib_mains - 1); _vec_len (fqm->vlib_frame_queues) = 0; for (i = 0; i < tm->n_vlib_mains; i++) { fq = vlib_frame_queue_alloc (frame_queue_nelts); vec_add1 (fqm->vlib_frame_queues, fq); } return (fqm - tm->frame_queue_mains); } int vlib_thread_cb_register (struct vlib_main_t *vm, vlib_thread_callbacks_t * cb) { vlib_thread_main_t *tm = vlib_get_thread_main (); if (tm->extern_thread_mgmt) return -1; tm->cb.vlib_launch_thread_cb = cb->vlib_launch_thread_cb; tm->extern_thread_mgmt = 1; return 0; } void vlib_process_signal_event_mt_helper (vlib_process_signal_event_mt_args_t * args) { ASSERT (vlib_get_thread_index () == 0); vlib_process_signal_event (vlib_get_main (), args->node_index, args->type_opaque, args->data); } void *rpc_call_main_thread_cb_fn; void vlib_rpc_call_main_thread (void *callback, u8 * args, u32 arg_size) { if (rpc_call_main_thread_cb_fn) { void (*fp) (void *, u8 *, u32) = rpc_call_main_thread_cb_fn; (*fp) (callback, args, arg_size); } else clib_warning ("BUG: rpc_call_main_thread_cb_fn NULL!"); } clib_error_t * threads_init (vlib_main_t * vm) { return 0; } VLIB_INIT_FUNCTION (threads_init); /* * fd.io coding-style-patch-verification: ON * * Local Variables: * eval: (c-set-style "gnu") * End: */