aboutsummaryrefslogtreecommitdiffstats
path: root/src/vnet/udp/udp_pg.c
blob: 621beb792770e0cf2f653723ca4814e3e50304c6 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
/*
 * Copyright (c) 2015-2019 Cisco and/or its affiliates.
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at:
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
/*
 * ip/udp_pg: UDP packet-generator interface
 *
 * Copyright (c) 2008 Eliot Dresselhaus
 *
 * Permission is hereby granted, free of charge, to any person obtaining
 * a copy of this software and associated documentation files (the
 * "Software"), to deal in the Software without restriction, including
 * without limitation the rights to use, copy, modify, merge, publish,
 * distribute, sublicense, and/or sell copies of the Software, and to
 * permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * The above copyright notice and this permission notice shall be
 * included in all copies or substantial portions of the Software.
 *
 *  THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 *  EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 *  MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 *  NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
 *  LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
 *  OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
 *  WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 */

#include <vnet/pg/pg.h>
#include <vnet/ip/ip.h>		/* for unformat_udp_udp_port */
#include <vnet/udp/udp.h>

#define UDP_PG_EDIT_LENGTH (1 << 0)
#define UDP_PG_EDIT_CHECKSUM (1 << 1)

always_inline void
udp_pg_edit_function_inline (pg_main_t * pg,
			     pg_stream_t * s,
			     pg_edit_group_t * g,
			     u32 * packets, u32 n_packets, u32 flags)
{
  vlib_main_t *vm = vlib_get_main ();
  u32 ip_offset, udp_offset;

  udp_offset = g->start_byte_offset;
  ip_offset = (g - 1)->start_byte_offset;

  while (n_packets >= 1)
    {
      vlib_buffer_t *p0;
      ip4_header_t *ip0;
      udp_header_t *udp0;
      u32 udp_len0;

      p0 = vlib_get_buffer (vm, packets[0]);
      n_packets -= 1;
      packets += 1;

      ip0 = (void *) (p0->data + ip_offset);
      udp0 = (void *) (p0->data + udp_offset);
      udp_len0 = vlib_buffer_length_in_chain (vm, p0) - udp_offset;

      if (flags & UDP_PG_EDIT_LENGTH)
	udp0->length = clib_host_to_net_u16 (udp_len0);
      else
	udp_len0 = clib_host_to_net_u16 (udp0->length);

      /* Initialize checksum with header. */
      if (flags & UDP_PG_EDIT_CHECKSUM)
	{
	  ip_csum_t sum0;

	  sum0 = clib_mem_unaligned (&ip0->src_address, u64);

	  sum0 = ip_csum_with_carry
	    (sum0, clib_host_to_net_u32 (udp_len0 + (ip0->protocol << 16)));

	  /* Invalidate possibly old checksum. */
	  udp0->checksum = 0;

	  sum0 =
	    ip_incremental_checksum_buffer (vm, p0, udp_offset, udp_len0,
					    sum0);

	  sum0 = ~ip_csum_fold (sum0);

	  /* Zero checksum means checksumming disabled. */
	  sum0 = sum0 != 0 ? sum0 : 0xffff;

	  udp0->checksum = sum0;
	}
    }
}

static void
udp_pg_edit_function (pg_main_t * pg,
		      pg_stream_t * s,
		      pg_edit_group_t * g, u32 * packets, u32 n_packets)
{
  switch (g->edit_function_opaque)
    {
    case UDP_PG_EDIT_LENGTH:
      udp_pg_edit_function_inline (pg, s, g, packets, n_packets,
				   UDP_PG_EDIT_LENGTH);
      break;

    case UDP_PG_EDIT_CHECKSUM:
      udp_pg_edit_function_inline (pg, s, g, packets, n_packets,
				   UDP_PG_EDIT_CHECKSUM);
      break;

    case UDP_PG_EDIT_CHECKSUM | UDP_PG_EDIT_LENGTH:
      udp_pg_edit_function_inline (pg, s, g, packets, n_packets,
				   UDP_PG_EDIT_CHECKSUM | UDP_PG_EDIT_LENGTH);
      break;

    default:
      ASSERT (0);
      break;
    }
}

typedef struct
{
  pg_edit_t src_port, dst_port;
  pg_edit_t length;
  pg_edit_t checksum;
} pg_udp_header_t;

static inline void
pg_udp_header_init (pg_udp_header_t * p)
{
  /* Initialize fields that are not bit fields in the IP header. */
#define _(f) pg_edit_init (&p->f, udp_header_t, f);
  _(src_port);
  _(dst_port);
  _(length);
  _(checksum);
#undef _
}

uword
unformat_pg_udp_header (unformat_input_t * input, va_list * args)
{
  pg_stream_t *s = va_arg (*args, pg_stream_t *);
  pg_udp_header_t *p;
  u32 group_index;

  p = pg_create_edit_group (s, sizeof (p[0]), sizeof (udp_header_t),
			    &group_index);
  pg_udp_header_init (p);

  /* Defaults. */
  p->checksum.type = PG_EDIT_UNSPECIFIED;
  p->length.type = PG_EDIT_UNSPECIFIED;

  if (!unformat (input, "UDP: %U -> %U",
		 unformat_pg_edit,
		 unformat_tcp_udp_port, &p->src_port,
		 unformat_pg_edit, unformat_tcp_udp_port, &p->dst_port))
    goto error;

  /* Parse options. */
  while (1)
    {
      if (unformat (input, "length %U",
		    unformat_pg_edit, unformat_pg_number, &p->length))
	;

      else if (unformat (input, "checksum %U",
			 unformat_pg_edit, unformat_pg_number, &p->checksum))
	;

      /* Can't parse input: try next protocol level. */
      else
	break;
    }

  {
    ip_main_t *im = &ip_main;
    u16 dst_port;
    tcp_udp_port_info_t *pi;

    /* For the pg format of applications over UDP local */
    udp_dst_port_info_t *pi2 = NULL;

    pi = 0;
    if (p->dst_port.type == PG_EDIT_FIXED)
      {
	dst_port = pg_edit_get_value (&p->dst_port, PG_EDIT_LO);
	pi = ip_get_tcp_udp_port_info (im, dst_port);
	pi2 = udp_get_dst_port_info (&udp_main, dst_port, UDP_IP4);
	if (!pi2)
	  pi2 = udp_get_dst_port_info (&udp_main, dst_port, UDP_IP6);
      }

    if (pi && pi->unformat_pg_edit
	&& unformat_user (input, pi->unformat_pg_edit, s))
      ;
    else if (pi2 && pi2->unformat_pg_edit
	     && unformat_user (input, pi2->unformat_pg_edit, s))
      ;
    else if (!unformat_user (input, unformat_pg_payload, s))
      goto error;

    p = pg_get_edit_group (s, group_index);
    if (p->checksum.type == PG_EDIT_UNSPECIFIED
	|| p->length.type == PG_EDIT_UNSPECIFIED)
      {
	pg_edit_group_t *g = pg_stream_get_group (s, group_index);
	g->edit_function = udp_pg_edit_function;
	g->edit_function_opaque = 0;
	if (p->checksum.type == PG_EDIT_UNSPECIFIED)
	  g->edit_function_opaque |= UDP_PG_EDIT_CHECKSUM;
	if (p->length.type == PG_EDIT_UNSPECIFIED)
	  g->edit_function_opaque |= UDP_PG_EDIT_LENGTH;
      }

    return 1;
  }

error:
  /* Free up any edits we may have added. */
  pg_free_edit_group (s);
  return 0;
}


/*
 * fd.io coding-style-patch-verification: ON
 *
 * Local Variables:
 * eval: (c-set-style "gnu")
 * End:
 */
class="p">) { /* yes, loser; try next larger pool */ ap[i].misses++; if (pool == 0) pthread_mutex_unlock (&q->mutex); continue; } /* OK, we have a winner */ ap[i].hits++; /* * Remember the source queue, although we * don't need to know the queue to free the item. */ rv->q = q; q->head++; if (q->head == q->maxsize) q->head = 0; if (pool == 0) pthread_mutex_unlock (&q->mutex); goto out; } /* * Request too big, or head element of all size-compatible rings * still in use. Fall back to shared-memory malloc. */ am->ring_misses++; pthread_mutex_lock (&am->vlib_rp->mutex); oldheap = svm_push_data_heap (am->vlib_rp); rv = clib_mem_alloc (nbytes); rv->q = 0; svm_pop_heap (oldheap); pthread_mutex_unlock (&am->vlib_rp->mutex); out: rv->data_len = htonl (nbytes - sizeof (msgbuf_t)); return (rv->data); } void * vl_msg_api_alloc (int nbytes) { int pool; api_main_t *am = &api_main; vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr; /* * Clients use pool-0, vlib proc uses pool 1 */ pool = (am->our_pid == shmem_hdr->vl_pid); return vl_msg_api_alloc_internal (nbytes, pool); } void * vl_msg_api_alloc_as_if_client (int nbytes) { return vl_msg_api_alloc_internal (nbytes, 0); } void vl_msg_api_free (void *a) { msgbuf_t *rv; void *oldheap; api_main_t *am = &api_main; rv = (msgbuf_t *) (((u8 *) a) - offsetof (msgbuf_t, data)); /* * Here's the beauty of the scheme. Only one proc/thread has * control of a given message buffer. To free a buffer, we just clear the * queue field, and leave. No locks, no hits, no errors... */ if (rv->q) { rv->q = 0; return; } pthread_mutex_lock (&am->vlib_rp->mutex); oldheap = svm_push_data_heap (am->vlib_rp); clib_mem_free (rv); svm_pop_heap (oldheap); pthread_mutex_unlock (&am->vlib_rp->mutex); } static void vl_msg_api_free_nolock (void *a) { msgbuf_t *rv; void *oldheap; api_main_t *am = &api_main; rv = (msgbuf_t *) (((u8 *) a) - offsetof (msgbuf_t, data)); /* * Here's the beauty of the scheme. Only one proc/thread has * control of a given message buffer. To free a buffer, we just clear the * queue field, and leave. No locks, no hits, no errors... */ if (rv->q) { rv->q = 0; return; } oldheap = svm_push_data_heap (am->vlib_rp); clib_mem_free (rv); svm_pop_heap (oldheap); } void vl_set_memory_root_path (char *name) { api_main_t *am = &api_main; am->root_path = name; } void vl_set_memory_uid (int uid) { api_main_t *am = &api_main; am->api_uid = uid; } void vl_set_memory_gid (int gid) { api_main_t *am = &api_main; am->api_gid = gid; } void vl_set_global_memory_baseva (u64 baseva) { api_main_t *am = &api_main; am->global_baseva = baseva; } void vl_set_global_memory_size (u64 size) { api_main_t *am = &api_main; am->global_size = size; } void vl_set_api_memory_size (u64 size) { api_main_t *am = &api_main; am->api_size = size; } void vl_set_global_pvt_heap_size (u64 size) { api_main_t *am = &api_main; am->global_pvt_heap_size = size; } void vl_set_api_pvt_heap_size (u64 size) { api_main_t *am = &api_main; am->api_pvt_heap_size = size; } int vl_map_shmem (char *region_name, int is_vlib) { svm_map_region_args_t _a, *a = &_a; svm_region_t *vlib_rp, *root_rp; void *oldheap; vl_shmem_hdr_t *shmem_hdr = 0; api_main_t *am = &api_main; int i; struct timespec ts, tsrem; if (is_vlib == 0) svm_region_init_chroot (am->root_path); memset (a, 0, sizeof (*a)); a->name = region_name; a->size = am->api_size ? am->api_size : (16 << 20); a->flags = SVM_FLAGS_MHEAP; a->uid = am->api_uid; a->gid = am->api_gid; a->pvt_heap_size = am->api_pvt_heap_size; vlib_rp = svm_region_find_or_create (a); if (vlib_rp == 0) return (-2); pthread_mutex_lock (&vlib_rp->mutex); /* Has someone else set up the shared-memory variable table? */ if (vlib_rp->user_ctx) { am->shmem_hdr = (void *) vlib_rp->user_ctx; am->our_pid = getpid (); if (is_vlib) { unix_shared_memory_queue_t *q; uword old_msg; /* * application restart. Reset cached pids, API message * rings, list of clients; otherwise, various things * fail. (e.g. queue non-empty notification) */ /* ghosts keep the region from disappearing properly */ svm_client_scan_this_region_nolock (vlib_rp); am->shmem_hdr->application_restarts++; q = am->shmem_hdr->vl_input_queue; am->shmem_hdr->vl_pid = getpid (); q->consumer_pid = am->shmem_hdr->vl_pid; /* Drain the input queue, freeing msgs */ for (i = 0; i < 10; i++) { if (pthread_mutex_trylock (&q->mutex) == 0) { pthread_mutex_unlock (&q->mutex); goto mutex_ok; } ts.tv_sec = 0; ts.tv_nsec = 10000 * 1000; /* 10 ms */ while (nanosleep (&ts, &tsrem) < 0) ts = tsrem; } /* Mutex buggered, "fix" it */ memset (&q->mutex, 0, sizeof (q->mutex)); clib_warning ("forcibly release main input queue mutex"); mutex_ok: am->vlib_rp = vlib_rp; while (unix_shared_memory_queue_sub (q, (u8 *) & old_msg, 1 /* nowait */ ) != -2 /* queue underflow */ ) { vl_msg_api_free_nolock ((void *) old_msg); am->shmem_hdr->restart_reclaims++; } pthread_mutex_unlock (&vlib_rp->mutex); root_rp = svm_get_root_rp (); ASSERT (root_rp); /* Clean up the root region client list */ pthread_mutex_lock (&root_rp->mutex); svm_client_scan_this_region_nolock (root_rp); pthread_mutex_unlock (&root_rp->mutex); } else { pthread_mutex_unlock (&vlib_rp->mutex); } am->vlib_rp = vlib_rp; vec_add1 (am->mapped_shmem_regions, vlib_rp); return 0; } /* Clients simply have to wait... */ if (!is_vlib) { pthread_mutex_unlock (&vlib_rp->mutex); /* Wait up to 100 seconds... */ for (i = 0; i < 10000; i++) { ts.tv_sec = 0; ts.tv_nsec = 10000 * 1000; /* 10 ms */ while (nanosleep (&ts, &tsrem) < 0) ts = tsrem; if (vlib_rp->user_ctx) goto ready; } /* Clean up and leave... */ svm_region_unmap (vlib_rp); clib_warning ("region init fail"); return (-2); ready: am->shmem_hdr = (void *) vlib_rp->user_ctx; am->our_pid = getpid (); am->vlib_rp = vlib_rp; vec_add1 (am->mapped_shmem_regions, vlib_rp); return 0; } /* Nope, it's our problem... */ oldheap = svm_push_data_heap (vlib_rp); vec_validate (shmem_hdr, 0); shmem_hdr->version = VL_SHM_VERSION; /* vlib main input queue */ shmem_hdr->vl_input_queue = unix_shared_memory_queue_init (1024, sizeof (uword), getpid (), am->vlib_signal); /* Set up the msg ring allocator */ #define _(sz,n) \ do { \ ring_alloc_t _rp; \ _rp.rp = unix_shared_memory_queue_init ((n), (sz), 0, 0); \ _rp.size = (sz); \ _rp.nitems = n; \ _rp.hits = 0; \ _rp.misses = 0; \ vec_add1(shmem_hdr->vl_rings, _rp); \ } while (0); foreach_vl_aring_size; #undef _ #define _(sz,n) \ do { \ ring_alloc_t _rp; \ _rp.rp = unix_shared_memory_queue_init ((n), (sz), 0, 0); \ _rp.size = (sz); \ _rp.nitems = n; \ _rp.hits = 0; \ _rp.misses = 0; \ vec_add1(shmem_hdr->client_rings, _rp); \ } while (0); foreach_clnt_aring_size; #undef _ am->shmem_hdr = shmem_hdr; am->vlib_rp = vlib_rp; am->our_pid = getpid (); if (is_vlib) am->shmem_hdr->vl_pid = am->our_pid; svm_pop_heap (oldheap); /* * After absolutely everything that a client might see is set up, * declare the shmem region valid */ vlib_rp->user_ctx = shmem_hdr; pthread_mutex_unlock (&vlib_rp->mutex); vec_add1 (am->mapped_shmem_regions, vlib_rp); return 0; } void vl_register_mapped_shmem_region (svm_region_t * rp) { api_main_t *am = &api_main; vec_add1 (am->mapped_shmem_regions, rp); } void vl_unmap_shmem (void) { svm_region_t *rp; int i; api_main_t *am = &api_main; if (!svm_get_root_rp ()) return; for (i = 0; i < vec_len (am->mapped_shmem_regions); i++) { rp = am->mapped_shmem_regions[i]; svm_region_unmap (rp); } vec_free (am->mapped_shmem_regions); am->shmem_hdr = 0; svm_region_exit (); /* $$$ more careful cleanup, valgrind run... */ vec_free (am->msg_handlers); vec_free (am->msg_endian_handlers); vec_free (am->msg_print_handlers); } void vl_msg_api_send_shmem (unix_shared_memory_queue_t * q, u8 * elem) { api_main_t *am = &api_main; uword *trace = (uword *) elem; if (am->tx_trace && am->tx_trace->enabled) vl_msg_api_trace (am, am->tx_trace, (void *) trace[0]); (void) unix_shared_memory_queue_add (q, elem, 0 /* nowait */ ); } void vl_msg_api_send_shmem_nolock (unix_shared_memory_queue_t * q, u8 * elem) { api_main_t *am = &api_main; uword *trace = (uword *) elem; if (am->tx_trace && am->tx_trace->enabled) vl_msg_api_trace (am, am->tx_trace, (void *) trace[0]); (void) unix_shared_memory_queue_add_nolock (q, elem); } static void vl_api_memclnt_create_reply_t_handler (vl_api_memclnt_create_reply_t * mp) { api_main_t *am = &api_main; int rv; am->my_client_index = mp->index; am->my_registration = (vl_api_registration_t *) (uword) mp->handle; rv = ntohl (mp->response); if (rv < 0) clib_warning ("WARNING: API mismatch detected"); } void vl_client_add_api_signatures (vl_api_memclnt_create_t * mp) __attribute__ ((weak)); void vl_client_add_api_signatures (vl_api_memclnt_create_t * mp) { int i; for (i = 0; i < ARRAY_LEN (mp->api_versions); i++) mp->api_versions[i] = 0; } int vl_client_connect (char *name, int ctx_quota, int input_queue_size) { svm_region_t *svm; vl_api_memclnt_create_t *mp; vl_api_memclnt_create_reply_t *rp; unix_shared_memory_queue_t *vl_input_queue; vl_shmem_hdr_t *shmem_hdr; int rv = 0; void *oldheap; api_main_t *am = &api_main; if (am->my_registration) { clib_warning ("client %s already connected...", name); return -1; } if (am->vlib_rp == 0) { clib_warning ("am->vlib_rp NULL"); return -1; } svm = am->vlib_rp; shmem_hdr = am->shmem_hdr; if (shmem_hdr == 0 || shmem_hdr->vl_input_queue == 0) { clib_warning ("shmem_hdr / input queue NULL"); return -1; } pthread_mutex_lock (&svm->mutex); oldheap = svm_push_data_heap (svm); vl_input_queue = unix_shared_memory_queue_init (input_queue_size, sizeof (uword), getpid (), 0); pthread_mutex_unlock (&svm->mutex); svm_pop_heap (oldheap); am->my_client_index = ~0; am->my_registration = 0; am->vl_input_queue = vl_input_queue; mp = vl_msg_api_alloc (sizeof (vl_api_memclnt_create_t)); memset (mp, 0, sizeof (*mp)); mp->_vl_msg_id = ntohs (VL_API_MEMCLNT_CREATE); mp->ctx_quota = ctx_quota; mp->input_queue = (uword) vl_input_queue; strncpy ((char *) mp->name, name, sizeof (mp->name) - 1); vl_client_add_api_signatures (mp); vl_msg_api_send_shmem (shmem_hdr->vl_input_queue, (u8 *) & mp); while (1) { int qstatus; struct timespec ts, tsrem; int i; /* Wait up to 10 seconds */ for (i = 0; i < 1000; i++) { qstatus = unix_shared_memory_queue_sub (vl_input_queue, (u8 *) & rp, 1 /* nowait */ ); if (qstatus == 0) goto read_one_msg; ts.tv_sec = 0; ts.tv_nsec = 10000 * 1000; /* 10 ms */ while (nanosleep (&ts, &tsrem) < 0) ts = tsrem; } /* Timeout... */ clib_warning ("memclnt_create_reply timeout"); return -1; read_one_msg: if (ntohs (rp->_vl_msg_id) != VL_API_MEMCLNT_CREATE_REPLY) { clib_warning ("unexpected reply: id %d", ntohs (rp->_vl_msg_id)); continue; } rv = clib_net_to_host_u32 (rp->response); vl_msg_api_handler ((void *) rp); break; } return (rv); } static void vl_api_memclnt_delete_reply_t_handler (vl_api_memclnt_delete_reply_t * mp) { void *oldheap; api_main_t *am = &api_main; pthread_mutex_lock (&am->vlib_rp->mutex); oldheap = svm_push_data_heap (am->vlib_rp); unix_shared_memory_queue_free (am->vl_input_queue); pthread_mutex_unlock (&am->vlib_rp->mutex); svm_pop_heap (oldheap); am->my_client_index = ~0; am->my_registration = 0; am->vl_input_queue = 0; } void vl_client_disconnect (void) { vl_api_memclnt_delete_t *mp; vl_api_memclnt_delete_reply_t *rp; unix_shared_memory_queue_t *vl_input_queue; vl_shmem_hdr_t *shmem_hdr; time_t begin; api_main_t *am = &api_main; ASSERT (am->vlib_rp); shmem_hdr = am->shmem_hdr; ASSERT (shmem_hdr && shmem_hdr->vl_input_queue); vl_input_queue = am->vl_input_queue; mp = vl_msg_api_alloc (sizeof (vl_api_memclnt_delete_t)); memset (mp, 0, sizeof (*mp)); mp->_vl_msg_id = ntohs (VL_API_MEMCLNT_DELETE); mp->index = am->my_client_index; mp->handle = (uword) am->my_registration; vl_msg_api_send_shmem (shmem_hdr->vl_input_queue, (u8 *) & mp); /* * Have to be careful here, in case the client is disconnecting * because e.g. the vlib process died, or is unresponsive. */ begin = time (0); while (1) { time_t now; now = time (0); if (now >= (begin + 2)) { clib_warning ("peer unresponsive, give up"); am->my_client_index = ~0; am->my_registration = 0; am->shmem_hdr = 0; break; } if (unix_shared_memory_queue_sub (vl_input_queue, (u8 *) & rp, 1) < 0) continue; /* drain the queue */ if (ntohs (rp->_vl_msg_id) != VL_API_MEMCLNT_DELETE_REPLY) { vl_msg_api_handler ((void *) rp); continue; } vl_msg_api_handler ((void *) rp); break; } } static inline vl_api_registration_t * vl_api_client_index_to_registration_internal (u32 handle) { vl_api_registration_t **regpp; vl_api_registration_t *regp; api_main_t *am = &api_main; u32 index; index = vl_msg_api_handle_get_index (handle); if ((am->shmem_hdr->application_restarts & VL_API_EPOCH_MASK) != vl_msg_api_handle_get_epoch (handle)) { vl_msg_api_increment_missing_client_counter (); return 0; } regpp = am->vl_clients + index; if (pool_is_free (am->vl_clients, regpp)) { vl_msg_api_increment_missing_client_counter (); return 0; } regp = *regpp; return (regp); } vl_api_registration_t * vl_api_client_index_to_registration (u32 index) { return (vl_api_client_index_to_registration_internal (index)); } unix_shared_memory_queue_t * vl_api_client_index_to_input_queue (u32 index) { vl_api_registration_t *regp; regp = vl_api_client_index_to_registration_internal (index); if (!regp) return 0; return (regp->vl_input_queue); } #define foreach_api_client_msg \ _(MEMCLNT_CREATE_REPLY, memclnt_create_reply) \ _(MEMCLNT_DELETE_REPLY, memclnt_delete_reply) int vl_client_api_map (char *region_name) { int rv; if ((rv = vl_map_shmem (region_name, 0 /* is_vlib */ )) < 0) { return rv; } #define _(N,n) \ vl_msg_api_set_handlers(VL_API_##N, 0 /* name */, \ vl_api_##n##_t_handler, \ 0/* cleanup */, 0/* endian */, 0/* print */, \ sizeof(vl_api_##n##_t), 1); foreach_api_client_msg; #undef _ return 0; } void vl_client_api_unmap (void) { vl_unmap_shmem (); } /* * fd.io coding-style-patch-verification: ON * * Local Variables: * eval: (c-set-style "gnu") * End: */