/* * Copyright (c) 2015 Cisco and/or its affiliates. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at: * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #define _GNU_SOURCE #include #include #include #include #include #include #include DECLARE_CJ_GLOBAL_LOG; #define FRAME_QUEUE_NELTS 64 u32 vl (void *p) { return vec_len (p); } vlib_worker_thread_t *vlib_worker_threads; vlib_thread_main_t vlib_thread_main; /* * Barrier tracing can be enabled on a normal build to collect information * on barrier use, including timings and call stacks. Deliberately not * keyed off CLIB_DEBUG, because that can add significant overhead which * imapacts observed timings. */ u32 elog_global_id_for_msg_name (const char *msg_name) { uword *p, r; static uword *h; u8 *name_copy; if (!h) h = hash_create_string (0, sizeof (uword)); p = hash_get_mem (h, msg_name); if (p) return p[0]; r = elog_string (&vlib_global_main.elog_main, "%s", msg_name); name_copy = format (0, "%s%c", msg_name, 0); hash_set_mem (h, name_copy, r); return r; } static inline void barrier_trace_sync (f64 t_entry, f64 t_open, f64 t_closed) { if (!vlib_worker_threads->barrier_elog_enabled) return; /* *INDENT-OFF* */ ELOG_TYPE_DECLARE (e) = { .format = "bar-trace-%s-#%d", .format_args = "T4i4", }; /* *INDENT-ON* */ struct { u32 caller, count, t_entry, t_open, t_closed; } *ed = 0; ed = ELOG_DATA (&vlib_global_main.elog_main, e); ed->count = (int) vlib_worker_threads[0].barrier_sync_count; ed->caller = elog_global_id_for_msg_name (vlib_worker_threads[0].barrier_caller); ed->t_entry = (int) (1000000.0 * t_entry); ed->t_open = (int) (1000000.0 * t_open); ed->t_closed = (int) (1000000.0 * t_closed); } static inline void barrier_trace_sync_rec (f64 t_entry) { if (!vlib_worker_threads->barrier_elog_enabled) return; /* *INDENT-OFF* */ ELOG_TYPE_DECLARE (e) = { .format = "bar-syncrec-%s-#%d", .format_args = "T4i4", }; /* *INDENT-ON* */ struct { u32 caller, depth; } *ed = 0; ed = ELOG_DATA (&vlib_global_main.elog_main, e); ed->depth = (int) vlib_worker_threads[0].recursion_level - 1; ed->caller = elog_global_id_for_msg_name (vlib_worker_threads[0].barrier_caller); } static inline void barrier_trace_release_rec (f64 t_entry) { if (!vlib_worker_threads->barrier_elog_enabled) return; /* *INDENT-OFF* */ ELOG_TYPE_DECLARE (e) = { .format = "bar-relrrec-#%d", .format_args = "i4", }; /* *INDENT-ON* */ struct { u32 depth; } *ed = 0; ed = ELOG_DATA (&vlib_global_main.elog_main, e); ed->depth = (int) vlib_worker_threads[0].recursion_level; } static inline void barrier_trace_release (f64 t_entry, f64 t_closed_total, f64 t_update_main) { if (!vlib_worker_threads->barrier_elog_enabled) return; /* *INDENT-OFF* */ ELOG_TYPE_DECLARE (e) = { .format = "bar-rel-#%d-e%d-u%d-t%d", .format_args = "i4i4i4i4", }; /* *INDENT-ON* */ struct { u32 count, t_entry, t_update_main, t_closed_total; } *ed = 0; ed = ELOG_DATA (&vlib_global_main.elog_main, e); ed->t_entry = (int) (1000000.0 * t_entry); ed->t_update_main = (int) (1000000.0 * t_update_main); ed->t_closed_total = (int) (1000000.0 * t_closed_total); ed->count = (int) vlib_worker_threads[0].barrier_sync_count; /* Reset context for next trace */ vlib_worker_threads[0].barrier_context = NULL; } uword os_get_nthreads (void) { u32 len; len = vec_len (vlib_thread_stacks); if (len == 0) return 1; else return len; } void vlib_set_thread_name (char *name) { int pthread_setname_np (pthread_t __target_thread, const char *__name); int rv; pthread_t thread = pthread_self (); if (thread) { rv = pthread_setname_np (thread, name); if (rv) clib_warning ("pthread_setname_np returned %d", rv); } } static int sort_registrations_by_no_clone (void *a0, void *a1) { vlib_thread_registration_t **tr0 = a0; vlib_thread_registration_t **tr1 = a1; return ((i32) ((*tr0)->no_data_structure_clone) - ((i32) ((*tr1)->no_data_structure_clone))); } static uword * clib_sysfs_list_to_bitmap (char *filename) { FILE *fp; uword *r = 0; fp = fopen (filename, "r"); if (fp != NULL) { u8 *buffer = 0; vec_validate (buffer, 256 - 1); if (fgets ((char *) buffer, 256, fp)) { unformat_input_t in; unformat_init_string (&in, (char *) buffer, strlen ((char *) buffer)); if (unformat (&in, "%U", unformat_bitmap_list, &r) != 1) clib_warning ("unformat_bitmap_list failed"); unformat_free (&in); } vec_free (buffer); fclose (fp); } return r; } /* Called early in the init sequence */ clib_error_t * vlib_thread_init (vlib_main_t * vm) { vlib_thread_main_t *tm = &vlib_thread_main; vlib_worker_thread_t *w; vlib_thread_registration_t *tr; u32 n_vlib_mains = 1; u32 first_index = 1; u32 i; uword *avail_cpu; /* get bitmaps of active cpu cores and sockets */ tm->cpu_core_bitmap = clib_sysfs_list_to_bitmap ("/sys/devices/system/cpu/online"); tm->cpu_socket_bitmap = clib_sysfs_list_to_bitmap ("/sys/devices/system/node/online"); avail_cpu = clib_bitmap_dup (tm->cpu_core_bitmap); /* skip cores */ for (i = 0; i < tm->skip_cores; i++) { uword c = clib_bitmap_first_set (avail_cpu); if (c == ~0) return clib_error_return (0, "no available cpus to skip"); avail_cpu = clib_bitmap_set (avail_cpu, c, 0); } /* grab cpu for main thread */ if (tm->main_lcore == ~0) { /* if main-lcore is not set, we try to use lcore 1 */ if (clib_bitmap_get (avail_cpu, 1)) tm->main_lcore = 1; else tm->main_lcore = clib_bitmap_first_set (avail_cpu); if (tm->main_lcore == (u8) ~ 0) return clib_error_return (0, "no available cpus to be used for the" " main thread"); } else { if (clib_bitmap_get (avail_cpu, tm->main_lcore) == 0) return clib_error_return (0, "cpu %u is not available to be used" " for the main thread", tm->main_lcore); } avail_cpu = clib_bitmap_set (avail_cpu, tm->main_lcore, 0); /* assume that there is socket 0 only if there is no data from sysfs */ if (!tm->cpu_socket_bitmap) tm->cpu_socket_bitmap = clib_bitmap_set (0, 0, 1); /* pin main thread to main_lcore */ if (tm->cb.vlib_thread_set_lcore_cb) { tm->cb.vlib_thread_set_lcore_cb (0, tm->main_lcore); } else { cpu_set_t cpuset; CPU_ZERO (&cpuset); CPU_SET (tm->main_lcore, &cpuset); pthread_setaffinity_np (pthread_self (), sizeof (cpu_set_t), &cpuset); } /* as many threads as stacks... */ vec_validate_aligned (vlib_worker_threads, vec_len (vlib_thread_stacks) - 1, CLIB_CACHE_LINE_BYTES); /* Preallocate thread 0 */ _vec_len (vlib_worker_threads) = 1; w = vlib_worker_threads; w->thread_mheap = clib_mem_get_heap (); w->thread_stack = vlib_thread_stacks[0]; w->cpu_id = tm->main_lcore; w->lwp = syscall (SYS_gettid); w->thread_id = pthread_self (); tm->n_vlib_mains = 1; if (tm->sched_policy != ~0) { struct sched_param sched_param; if (!sched_getparam (w->lwp, &sched_param)) { if (tm->sched_priority != ~0) sched_param.sched_priority = tm->sched_priority; sched_setscheduler (w->lwp, tm->sched_policy, &sched_param); } } /* assign threads to cores and set n_vlib_mains */ tr = tm->next; while (tr) { vec_add1 (tm->registrations, tr); tr = tr->next; } vec_sort_with_function (tm->registrations, sort_registrations_by_no_clone); for (i = 0; i < vec_len (tm->registrations); i++) { int j; tr = tm->registrations[i]; tr->first_index = first_index; first_index += tr->count; n_vlib_mains += (tr->no_data_structure_clone == 0) ? tr->count : 0; /* construct coremask */ if (tr->use_pthreads || !tr->count) continue; if (tr->coremask) { uword c; /* *INDENT-OFF* */ clib_bitmap_foreach (c, tr->coremask, ({ if (clib_bitmap_get(avail_cpu, c) == 0) return clib_error_return (0, "cpu %u is not available to be used" " for the '%s' thread",c, tr->name); avail_cpu = clib_bitmap_set(avail_cpu, c, 0); })); /* *INDENT-ON* */ } else { for (j = 0; j < tr->count; j++) { uword c = clib_bitmap_first_set (avail_cpu); if (c == ~0) return clib_error_return (0, "no available cpus to be used for" " the '%s' thread", tr->name); avail_cpu = clib_bitmap_set (avail_cpu, c, 0); tr->coremask = clib_bitmap_set (tr->coremask, c, 1); } } } clib_bitmap_free (avail_cpu); tm->n_vlib_mains = n_vlib_mains; vec_validate_aligned (vlib_worker_threads, first_index - 1, CLIB_CACHE_LINE_BYTES); return 0; } vlib_frame_queue_t * vlib_frame_queue_alloc (int nelts) { vlib_frame_queue_t *fq; fq = clib_mem_alloc_aligned (sizeof (*fq), CLIB_CACHE_LINE_BYTES); clib_memset (fq, 0, sizeof (*fq)); fq->nelts = nelts; fq->vector_threshold = 128; // packets vec_validate_aligned (fq->elts, nelts - 1, CLIB_CACHE_LINE_BYTES); if (1) { if (((uword) & fq->tail) & (CLIB_CACHE_LINE_BYTES - 1)) fformat (stderr, "WARNING: fq->tail unaligned\n"); if (((uword) & fq->head) & (CLIB_CACHE_LINE_BYTES - 1)) fformat (stderr, "WARNING: fq->head unaligned\n"); if (((uword) fq->elts) & (CLIB_CACHE_LINE_BYTES - 1)) fformat (stderr, "WARNING: fq->elts unaligned\n"); if (sizeof (fq->elts[0]) % CLIB_CACHE_LINE_BYTES) fformat (stderr, "WARNING: fq->elts[0] size %d\n", sizeof (fq->elts[0])); if (nelts & (nelts - 1)) { fformat (stderr, "FATAL: nelts MUST be a power of 2\n"); abort (); } } return (fq); } void vl_msg_api_handler_no_free (void *) __attribute__ ((weak)); void vl_msg_api_handler_no_free (void *v) { } /* Turned off, save as reference material... */ #if 0 static inline int vlib_frame_queue_dequeue_internal (int thread_id, vlib_main_t * vm, vlib_node_main_t * nm) { vlib_frame_queue_t *fq = vlib_frame_queues[thread_id]; vlib_frame_queue_elt_t *elt; vlib_frame_t *f; vlib_pending_frame_t *p; vlib_node_runtime_t *r; u32 node_runtime_index; int msg_type; u64 before; int processed = 0; ASSERT (vm == vlib_mains[thread_id]); while (1) { if (fq->head == fq->tail) return processed; elt = fq->elts + ((fq->head + 1) & (fq->nelts - 1)); if (!elt->valid) return processed; before = clib_cpu_time_now (); f = elt->frame; node_runtime_index = elt->node_runtime_index; msg_type = elt->msg_type; switch (msg_type) { case VLIB_FRAME_QUEUE_ELT_FREE_BUFFERS: vlib_buffer_free (vm, vlib_frame_vector_args (f), f->n_vectors); /* note fallthrough... */ case VLIB_FRAME_QUEUE_ELT_FREE_FRAME: r = vec_elt_at_index (nm->nodes_by_type[VLIB_NODE_TYPE_INTERNAL], node_runtime_index); vlib_frame_free (vm, r, f); break; case VLIB_FRAME_QUEUE_ELT_DISPATCH_FRAME: vec_add2 (vm->node_main.pending_frames, p, 1); f->flags |= (VLIB_FRAME_PENDING | VLIB_FRAME_FREE_AFTER_DISPATCH); p->node_runtime_index = elt->node_runtime_index; p->frame_index = vlib_frame_index (vm, f); p->next_frame_index = VLIB_PENDING_FRAME_NO_NEXT_FRAME; fq->dequeue_vectors += (u64) f->n_vectors; break; case VLIB_FRAME_QUEUE_ELT_API_MSG: vl_msg_api_handler_no_free (f); break; default: clib_warning ("bogus frame queue message, type %d", msg_type); break; } elt->valid = 0; fq->dequeues++; fq->dequeue_ticks += clib_cpu_time_now () - before; CLIB_MEMORY_BARRIER (); fq->head++; processed++; } ASSERT (0); return processed; } int vlib_frame_queue_dequeue (int thread_id, vlib_main_t * vm, vlib_node_main_t * nm) { return vlib_frame_queue_dequeue_internal (thread_id, vm, nm); } int vlib_frame_queue_enqueue (vlib_main_t * vm, u32 node_runtime_index, u32 frame_queue_index, vlib_frame_t * frame, vlib_frame_queue_msg_type_t type) { vlib_frame_queue_t *fq = vlib_frame_queues[frame_queue_index]; vlib_frame_queue_elt_t *elt; u32 save_count; u64 new_tail; u64 before = clib_cpu_time_now (); ASSERT (fq); new_tail = clib_atomic_add_fetch (&fq->tail, 1); /* Wait until a ring slot is available */ while (new_tail >= fq->head + fq->nelts) { f64 b4 = vlib_time_now_ticks (vm, before); vlib_worker_thread_barrier_check (vm, b4); /* Bad idea. Dequeue -> enqueue -> dequeue -> trouble */ // vlib_frame_queue_dequeue (vm->thread_index, vm, nm); } elt = fq->elts + (new_tail & (fq->nelts - 1)); /* this would be very bad... */ while (elt->valid) { } /* Once we enqueue the frame, frame->n_vectors is owned elsewhere... */ save_count = frame->n_vectors; elt->frame = frame; elt->node_runtime_index = node_runtime_index; elt->msg_type = type; CLIB_MEMORY_BARRIER (); elt->valid = 1; return save_count; } #endif /* 0 */ /* To be called by vlib worker threads upon startup */ void vlib_worker_thread_init (vlib_worker_thread_t * w) { vlib_thread_main_t *tm = vlib_get_thread_main (); /* * Note: disabling signals in worker threads as follows * prevents the api post-mortem dump scheme from working * { * sigset_t s; * sigfillset (&s); * pthread_sigmask (SIG_SETMASK, &s, 0); * } */ clib_mem_set_heap (w->thread_mheap); if (vec_len (tm->thread_prefix) && w->registration->short_name) { w->name = format (0, "%v_%s_%d%c", tm->thread_prefix, w->registration->short_name, w->instance_id, '\0'); vlib_set_thread_name ((char *) w->name); } if (!w->registration->use_pthreads) { /* Initial barrier sync, for both worker and i/o threads */ clib_atomic_fetch_add (vlib_worker_threads->workers_at_barrier, 1); while (*vlib_worker_threads->wait_at_barrier) ; clib_atomic_fetch_add (vlib_worker_threads->workers_at_barrier, -1); } } void * vlib_worker_thread_bootstrap_fn (void *arg) { void *rv; vlib_worker_thread_t *w = arg; w->lwp = syscall (SYS_gettid); w->thread_id = pthread_self (); __os_thread_index = w - vlib_worker_threads; rv = (void *) clib_calljmp ((uword (*)(uword)) w->thread_function, (uword) arg, w->thread_stack + VLIB_THREAD_STACK_SIZE); /* NOTREACHED, we hope */ return rv; } static void vlib_get_thread_core_socket (vlib_worker_thread_t * w, unsigned cpu_id) { const char *sys_cpu_path = "/sys/devices/system/cpu/cpu"; u8 *p = 0; int core_id = -1, socket_id = -1; p = format (p, "%s%u/topology/core_id%c", sys_cpu_path, cpu_id, 0); clib_sysfs_read ((char *) p, "%d", &core_id); vec_reset_length (p); p = format (p, "%s%u/topology/physical_package_id%c", sys_cpu_path, cpu_id, 0); clib_sysfs_read ((char *) p, "%d", &socket_id); vec_free (p); w->core_id = core_id; w->socket_id = socket_id; } static clib_error_t * vlib_launch_thread_int (void *fp, vlib_worker_thread_t * w, unsigned cpu_id) { vlib_thread_main_t *tm = &vlib_thread_main; void *(*fp_arg) (void *) = fp; w->cpu_id = cpu_id; vlib_get_thread_core_socket (w, cpu_id); if (tm->cb.vlib_launch_thread_cb && !w->registration->use_pthreads) return tm->cb.vlib_launch_thread_cb (fp, (void *) w, cpu_id); else { pthread_t worker; cpu_set_t cpuset; CPU_ZERO (&cpuset); CPU_SET (cpu_id, &cpuset); if (pthread_create (&worker, NULL /* attr */ , fp_arg, (void *) w)) return clib_error_return_unix (0, "pthread_create"); if (pthread_setaffinity_np (worker, sizeof (cpu_set_t), &cpuset)) return clib_error_return_unix (0, "pthread_setaffinity_np"); return 0; } } static clib_error_t *
/*
 * Copyright (c) 2016 Cisco and/or its affiliates.
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at:
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#include <vlibmemory/api.h>
#include <vlib/vlib.h>

#include <vppinfra/hash.h>
#include <vppinfra/error.h>
#include <vppinfra/elog.h>

#include <vnet/vnet.h>
#include <vnet/pg/pg.h>
#include <vnet/ip/ip.h>
#include <vnet/udp/udp.h>
#include <vnet/udp/udp_packet.h>
#include <vnet/session/session.h>

static char *udp_error_strings[] = {
#define udp_error(n,s) s,
#include "udp_error.def"
#undef udp_error
};

typedef struct
{
  u32 connection;
  u32 disposition;
  u32 thread_index;
} udp_input_trace_t;

/* packet trace format function */
static u8 *
format_udp_input_trace (u8 * s, va_list * args)
{
  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
  udp_input_trace_t *t = va_arg (*args, udp_input_trace_t *);

  s = format (s, "UDP_INPUT: connection %d, disposition %d, thread %d",
	      t->connection, t->disposition, t->thread_index);
  return s;
}

#define foreach_udp_input_next			\
  _ (DROP, "error-drop")

typedef enum
{
#define _(s, n) UDP_INPUT_NEXT_##s,
  foreach_udp_input_next
#undef _
    UDP_INPUT_N_NEXT,
} udp_input_next_t;

always_inline void
udp_input_inc_counter (vlib_main_t * vm, u8 is_ip4, u8 evt, u8 val)
{
  if (PREDICT_TRUE (!val))
    return;

  if (is_ip4)
    vlib_node_increment_counter (vm, udp4_input_node.index, evt, val);
  else
    vlib_node_increment_counter (vm, udp6_input_node.index, evt, val);
}

always_inline uword
udp46_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
		    vlib_frame_t * frame, u8 is_ip4)
{
  u32 n_left_from, *from, *to_next;
  u32 next_index, errors;
  u32 my_thread_index = vm->thread_index;

  from = vlib_frame_vector_args (frame);
  n_left_from = frame->n_vectors;
  next_index = node->cached_next_index;

  while (n_left_from > 0)
    {
      u32 n_left_to_next;

      vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);

      while (n_left_from > 0 && n_left_to_next > 0)
	{
	  u32 bi0, fib_index0;
	  vlib_buffer_t *b0;
	  u32 next0 = UDP_INPUT_NEXT_DROP;
	  u32 error0 = UDP_ERROR_ENQUEUED;
	  udp_header_t *udp0;
	  ip4_header_t *ip40;
	  ip6_header_t *ip60;
	  u8 *data0;
	  stream_session_t *s0;
	  udp_connection_t *uc0, *child0, *new_uc0;
	  transport_connection_t *tc0;
	  int wrote0;
	  void *rmt_addr, *lcl_addr;
	  session_dgram_hdr_t hdr0;

	  /* speculatively enqueue b0 to the current next frame */
	  bi0 = from[0];
	  to_next[0] = bi0;
	  from += 1;
	  to_next += 1;
	  n_left_from -= 1;
	  n_left_to_next -= 1;

	  b0 = vlib_get_buffer (vm, bi0);

	  /* udp_local hands us a pointer to the udp data */
	  data0 = vlib_buffer_get_current (b0);
	  udp0 = (udp_header_t *) (data0 - sizeof (*udp0));
	  fib_index0 = vnet_buffer (b0)->ip.fib_index;

	  if (is_ip4)
	    {
	      /* TODO: must fix once udp_local does ip options correctly */
	      ip40 = (ip4_header_t *) (((u8 *) udp0) - sizeof (*ip40));
	      s0 = session_lookup_safe4 (fib_index0, &ip40->dst_address,
					 &ip40->src_address, udp0->dst_port,
					 udp0->src_port, TRANSPORT_PROTO_UDP);
	      lcl_addr = &ip40->dst_address;
	      rmt_addr = &ip40->src_address;

	    }
	  else
	    {
	      ip60 = (ip6_header_t *) (((u8 *) udp0) - sizeof (*ip60));
	      s0 = session_lookup_safe6 (fib_index0, &ip60->dst_address,
					 &ip60->src_address, udp0->dst_port,
					 udp0->src_port, TRANSPORT_PROTO_UDP);
	      lcl_addr = &ip60->dst_address;
	      rmt_addr = &ip60->src_address;
	    }

	  if (PREDICT_FALSE (!s0))
	    {
	      error0 = UDP_ERROR_NO_LISTENER;
	      goto trace0;
	    }

	  if (s0->session_state == SESSION_STATE_OPENED)
	    {
	      /* TODO optimization: move cl session to right thread
	       * However, since such a move would affect the session handle,
	       * which we pass 'raw' to the app, we'd also have notify the
	       * app of the change or change the way we pass handles to apps.
	       */
	      tc0 = session_get_transport (s0);
	      uc0 = udp_get_connection_from_transport (tc0);
	      if (uc0->is_connected)
		{
		  /*
		   * Clone the transport. It will be cleaned up with the
		   * session once we notify the session layer.
		   */
		  new_uc0 = udp_connection_clone_safe (s0->connection_index,
						       s0->thread_index);
		  ASSERT (s0->session_index == new_uc0->c_s_index);

		  /*
		   * Drop the 'lock' on pool resize
		   */
		  session_pool_remove_peeker (s0->thread_index);
		  session_dgram_connect_notify (&new_uc0->connection,
						s0->thread_index, &s0);
		  tc0 = &new_uc0->connection;
		}
	    }
	  else if (s0->session_state == SESSION_STATE_READY)
	    {
	      tc0 = session_get_transport (s0);
	      uc0 = udp_get_connection_from_transport (tc0);
	    }
	  else if (s0->session_state == SESSION_STATE_LISTENING)
	    {
	      tc0 = listen_session_get_transport (s0);
	      uc0 = udp_get_connection_from_transport (tc0);
	      if (uc0->is_connected)
		{
		  child0 = udp_connection_alloc (my_thread_index);
		  if (is_ip4)
		    {
		      ip_set (&child0->c_lcl_ip, &ip40->dst_address, 1);
		      ip_set (&child0->c_rmt_ip, &ip40->