summaryrefslogtreecommitdiffstats
path: root/src/vnet/handoff.h
blob: f50b86d5c6d0166ec98a241efd5a2ca991419563 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
/*
 * Copyright (c) 2016 Cisco and/or its affiliates.
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at:
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#ifndef included_vnet_handoff_h
#define included_vnet_handoff_h

#include <vlib/vlib.h>
#include <vnet/ethernet/ethernet.h>
#include <vnet/ip/ip4_packet.h>
#include <vnet/ip/ip6_packet.h>
#include <vnet/mpls/packet.h>

static inline u64
ipv4_get_key (ip4_header_t * ip)
{
  u64 hash_key;

  hash_key = *((u64 *) (&ip->address_pair)) ^ ip->protocol;

  return hash_key;
}

static inline u64
ipv6_get_key (ip6_header_t * ip)
{
  u64 hash_key;

  hash_key = ip->src_address.as_u64[0] ^
    rotate_left (ip->src_address.as_u64[1], 13) ^
    rotate_left (ip->dst_address.as_u64[0], 26) ^
    rotate_left (ip->dst_address.as_u64[1], 39) ^ ip->protocol;

  return hash_key;
}

#define MPLS_BOTTOM_OF_STACK_BIT_MASK   0x00000100U
#define MPLS_LABEL_MASK                 0xFFFFF000U

static inline u64
mpls_get_key (mpls_unicast_header_t * m)
{
  u64 hash_key;
  u8 ip_ver;


  /* find the bottom of the MPLS label stack. */
  if (PREDICT_TRUE (m->label_exp_s_ttl &
		    clib_net_to_host_u32 (MPLS_BOTTOM_OF_STACK_BIT_MASK)))
    {
      goto bottom_lbl_found;
    }
  m++;

  if (PREDICT_TRUE (m->label_exp_s_ttl &
		    clib_net_to_host_u32 (MPLS_BOTTOM_OF_STACK_BIT_MASK)))
    {
      goto bottom_lbl_found;
    }
  m++;

  if (m->label_exp_s_ttl &
      clib_net_to_host_u32 (MPLS_BOTTOM_OF_STACK_BIT_MASK))
    {
      goto bottom_lbl_found;
    }
  m++;

  if (m->label_exp_s_ttl &
      clib_net_to_host_u32 (MPLS_BOTTOM_OF_STACK_BIT_MASK))
    {
      goto bottom_lbl_found;
    }
  m++;

  if (m->label_exp_s_ttl &
      clib_net_to_host_u32 (MPLS_BOTTOM_OF_STACK_BIT_MASK))
    {
      goto bottom_lbl_found;
    }

  /* the bottom label was not found - use the last label */
  hash_key = m->label_exp_s_ttl & clib_net_to_host_u32 (MPLS_LABEL_MASK);

  return hash_key;

bottom_lbl_found:
  m++;
  ip_ver = (*((u8 *) m) >> 4);

  /* find out if it is IPV4 or IPV6 header */
  if (PREDICT_TRUE (ip_ver == 4))
    {
      hash_key = ipv4_get_key ((ip4_header_t *) m);
    }
  else if (PREDICT_TRUE (ip_ver == 6))
    {
      hash_key = ipv6_get_key ((ip6_header_t *) m);
    }
  else
    {
      /* use the bottom label */
      hash_key =
	(m - 1)->label_exp_s_ttl & clib_net_to_host_u32 (MPLS_LABEL_MASK);
    }

  return hash_key;

}

static inline u64
eth_get_sym_key (ethernet_header_t * h0)
{
  u64 hash_key;

  if (PREDICT_TRUE (h0->type) == clib_host_to_net_u16 (ETHERNET_TYPE_IP4))
    {
      ip4_header_t *ip = (ip4_header_t *) (h0 + 1);
      hash_key =
	(u64) (ip->src_address.as_u32 ^
	       ip->dst_address.as_u32 ^ ip->protocol);
    }
  else if (h0->type == clib_host_to_net_u16 (ETHERNET_TYPE_IP6))
    {
      ip6_header_t *ip = (ip6_header_t *) (h0 + 1);
      hash_key = (u64) (ip->src_address.as_u64[0] ^
			ip->src_address.as_u64[1] ^
			ip->dst_address.as_u64[0] ^
			ip->dst_address.as_u64[1] ^ ip->protocol);
    }
  else if (h0->type == clib_host_to_net_u16 (ETHERNET_TYPE_MPLS))
    {
      hash_key = mpls_get_key ((mpls_unicast_header_t *) (h0 + 1));
    }
  else
    if (PREDICT_FALSE
	((h0->type == clib_host_to_net_u16 (ETHERNET_TYPE_VLAN))
	 || (h0->type == clib_host_to_net_u16 (ETHERNET_TYPE_DOT1AD))))
    {
      ethernet_vlan_header_t *outer = (ethernet_vlan_header_t *) (h0 + 1);

      outer = (outer->type == clib_host_to_net_u16 (ETHERNET_TYPE_VLAN)) ?
	outer + 1 : outer;
      if (PREDICT_TRUE (outer->type) ==
	  clib_host_to_net_u16 (ETHERNET_TYPE_IP4))
	{
	  ip4_header_t *ip = (ip4_header_t *) (outer + 1);
	  hash_key =
	    (u64) (ip->src_address.as_u32 ^
		   ip->dst_address.as_u32 ^ ip->protocol);
	}
      else if (outer->type == clib_host_to_net_u16 (ETHERNET_TYPE_IP6))
	{
	  ip6_header_t *ip = (ip6_header_t *) (outer + 1);
	  hash_key =
	    (u64) (ip->src_address.as_u64[0] ^ ip->src_address.as_u64[1] ^
		   ip->dst_address.as_u64[0] ^
		   ip->dst_address.as_u64[1] ^ ip->protocol);
	}
      else if (outer->type == clib_host_to_net_u16 (ETHERNET_TYPE_MPLS))
	{
	  hash_key = mpls_get_key ((mpls_unicast_header_t *) (outer + 1));
	}
      else
	{
	  hash_key = outer->type;
	}
    }
  else
    {
      hash_key = 0;
    }

  return hash_key;
}

static inline u64
eth_get_key (ethernet_header_t * h0)
{
  u64 hash_key;

  if (PREDICT_TRUE (h0->type) == clib_host_to_net_u16 (ETHERNET_TYPE_IP4))
    {
      hash_key = ipv4_get_key ((ip4_header_t *) (h0 + 1));
    }
  else if (h0->type == clib_host_to_net_u16 (ETHERNET_TYPE_IP6))
    {
      hash_key = ipv6_get_key ((ip6_header_t *) (h0 + 1));
    }
  else if (h0->type == clib_host_to_net_u16 (ETHERNET_TYPE_MPLS))
    {
      hash_key = mpls_get_key ((mpls_unicast_header_t *) (h0 + 1));
    }
  else if ((h0->type == clib_host_to_net_u16 (ETHERNET_TYPE_VLAN)) ||
	   (h0->type == clib_host_to_net_u16 (ETHERNET_TYPE_DOT1AD)))
    {
      ethernet_vlan_header_t *outer = (ethernet_vlan_header_t *) (h0 + 1);

      outer = (outer->type == clib_host_to_net_u16 (ETHERNET_TYPE_VLAN)) ?
	outer + 1 : outer;
      if (PREDICT_TRUE (outer->type) ==
	  clib_host_to_net_u16 (ETHERNET_TYPE_IP4))
	{
	  hash_key = ipv4_get_key ((ip4_header_t *) (outer + 1));
	}
      else if (outer->type == clib_host_to_net_u16 (ETHERNET_TYPE_IP6))
	{
	  hash_key = ipv6_get_key ((ip6_header_t *) (outer + 1));
	}
      else if (outer->type == clib_host_to_net_u16 (ETHERNET_TYPE_MPLS))
	{
	  hash_key = mpls_get_key ((mpls_unicast_header_t *) (outer + 1));
	}
      else
	{
	  hash_key = outer->type;
	}
    }
  else
    {
      hash_key = 0;
    }

  return hash_key;
}

#endif /* included_vnet_handoff_h */

/*
 * fd.io coding-style-patch-verification: ON
 *
 * Local Variables:
 * eval: (c-set-style "gnu")
 * End:
 */
unsigned) a->context); vl_print (handle, "ctx_quota: %ld\n", (long) a->ctx_quota); return handle; } static inline void * vl_api_memclnt_delete_t_print (vl_api_memclnt_delete_t * a, void *handle) { vl_print (handle, "vl_api_memclnt_delete_t:\n"); vl_print (handle, "index: %u\n", (unsigned) a->index); vl_print (handle, "handle: 0x%wx\n", a->handle); return handle; } volatile int **vl_api_queue_cursizes; static void memclnt_queue_callback (vlib_main_t * vm) { int i; api_main_t *am = vlibapi_get_main (); if (PREDICT_FALSE (vec_len (vl_api_queue_cursizes) != 1 + vec_len (am->vlib_private_rps))) { vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr; svm_queue_t *q; if (shmem_hdr == 0) return; q = shmem_hdr->vl_input_queue; if (q == 0) return; vec_add1 (vl_api_queue_cursizes, &q->cursize); for (i = 0; i < vec_len (am->vlib_private_rps); i++) { svm_region_t *vlib_rp = am->vlib_private_rps[i]; shmem_hdr = (void *) vlib_rp->user_ctx; q = shmem_hdr->vl_input_queue; vec_add1 (vl_api_queue_cursizes, &q->cursize); } } for (i = 0; i < vec_len (vl_api_queue_cursizes); i++) { if (*vl_api_queue_cursizes[i]) { vm->queue_signal_pending = 1; vm->api_queue_nonempty = 1; vlib_process_signal_event (vm, vl_api_clnt_node.index, /* event_type */ QUEUE_SIGNAL_EVENT, /* event_data */ 0); break; } } if (vec_len (vm->pending_rpc_requests)) { vm->queue_signal_pending = 1; vm->api_queue_nonempty = 1; vlib_process_signal_event (vm, vl_api_clnt_node.index, /* event_type */ QUEUE_SIGNAL_EVENT, /* event_data */ 0); } } /* * vl_api_memclnt_create_internal */ u32 vl_api_memclnt_create_internal (char *name, svm_queue_t * q) { vl_api_registration_t **regpp; vl_api_registration_t *regp; void *oldheap; api_main_t *am = vlibapi_get_main (); ASSERT (vlib_get_thread_index () == 0); pool_get (am->vl_clients, regpp); oldheap = vl_msg_push_heap (); *regpp = clib_mem_alloc (sizeof (vl_api_registration_t)); regp = *regpp; clib_memset (regp, 0, sizeof (*regp)); regp->registration_type = REGISTRATION_TYPE_SHMEM; regp->vl_api_registration_pool_index = regpp - am->vl_clients; regp->vlib_rp = am->vlib_rp; regp->shmem_hdr = am->shmem_hdr; regp->vl_input_queue = q; regp->name = format (0, "%s%c", name, 0); vl_msg_pop_heap (oldheap); return vl_msg_api_handle_from_index_and_epoch (regp->vl_api_registration_pool_index, am->shmem_hdr->application_restarts); } /* * vl_api_memclnt_create_t_handler */ void vl_api_memclnt_create_t_handler (vl_api_memclnt_create_t * mp) { vl_api_registration_t **regpp; vl_api_registration_t *regp; vl_api_memclnt_create_reply_t *rp; svm_queue_t *q; int rv = 0; void *oldheap; api_main_t *am = vlibapi_get_main (); u8 *msg_table; /* * This is tortured. Maintain a vlib-address-space private * pool of client registrations. We use the shared-memory virtual * address of client structure as a handle, to allow direct * manipulation of context quota vbls from the client library. * * This scheme causes trouble w/ API message trace replay, since * some random VA from clib_mem_alloc() certainly won't * occur in the Linux sim. The (very) few places * that care need to use the pool index. * * Putting the registration object(s) into a pool in shared memory and * using the pool index as a handle seems like a great idea. * Unfortunately, each and every reference to that pool would need * to be protected by a mutex: * * Client VLIB * ------ ---- * convert pool index to * pointer. * <deschedule> * expand pool * <deschedule> * kaboom! */ pool_get (am->vl_clients, regpp); oldheap = vl_msg_push_heap (); *regpp = clib_mem_alloc (sizeof (vl_api_registration_t)); regp = *regpp; clib_memset (regp, 0, sizeof (*regp)); regp->registration_type = REGISTRATION_TYPE_SHMEM; regp->vl_api_registration_pool_index = regpp - am->vl_clients; regp->vlib_rp = am->vlib_rp; regp->shmem_hdr = am->shmem_hdr; regp->clib_file_index = am->shmem_hdr->clib_file_index; q = regp->vl_input_queue = (svm_queue_t *) (uword) mp->input_queue; VL_MSG_API_SVM_QUEUE_UNPOISON (q); regp->name = format (0, "%s", mp->name); vec_add1 (regp->name, 0); if (am->serialized_message_table_in_shmem == 0) am->serialized_message_table_in_shmem = vl_api_serialize_message_table (am, 0); if (am->vlib_rp != am->vlib_primary_rp) msg_table = vl_api_serialize_message_table (am, 0); else msg_table = am->serialized_message_table_in_shmem; vl_msg_pop_heap (oldheap); rp = vl_msg_api_alloc (sizeof (*rp)); rp->_vl_msg_id = ntohs (VL_API_MEMCLNT_CREATE_REPLY); rp->handle = (uword) regp; rp->index = vl_msg_api_handle_from_index_and_epoch (regp->vl_api_registration_pool_index, am->shmem_hdr->application_restarts); rp->context = mp->context; rp->response = ntohl (rv); rp->message_table = pointer_to_uword (msg_table); vl_msg_api_send_shmem (q, (u8 *) & rp); } void vl_api_call_reaper_functions (u32 client_index) { clib_error_t *error = 0; _vl_msg_api_function_list_elt_t *i; i = vlibapi_get_main ()->reaper_function_registrations; while (i) { error = i->f (client_index); if (error) clib_error_report (error); i = i->next_init_function; } } /* * vl_api_memclnt_delete_t_handler */ void vl_api_memclnt_delete_t_handler (vl_api_memclnt_delete_t * mp) { vl_api_registration_t **regpp; vl_api_registration_t *regp; vl_api_memclnt_delete_reply_t *rp; void *oldheap; api_main_t *am = vlibapi_get_main (); u32 handle, client_index, epoch; handle = mp->index; vl_api_call_reaper_functions (handle); epoch = vl_msg_api_handle_get_epoch (handle); client_index = vl_msg_api_handle_get_index (handle); if (epoch != (am->shmem_hdr->application_restarts & VL_API_EPOCH_MASK)) { clib_warning ("Stale clnt delete index %d old epoch %d cur epoch %d", client_index, epoch, (am->shmem_hdr->application_restarts & VL_API_EPOCH_MASK)); return; } regpp = pool_elt_at_index (am->vl_clients, client_index); if (!pool_is_free (am->vl_clients, regpp)) { int i; regp = *regpp; int private_registration = 0; /* Send reply unless client asked us to do the cleanup */ if (!mp->do_cleanup) { /* * Note: the API message handling path will set am->vlib_rp * as appropriate for pairwise / private memory segments */ rp = vl_msg_api_alloc (sizeof (*rp)); rp->_vl_msg_id = ntohs (VL_API_MEMCLNT_DELETE_REPLY); rp->handle = mp->handle; rp->response = 1; vl_msg_api_send_shmem (regp->vl_input_queue, (u8 *) & rp); if (client_index != regp->vl_api_registration_pool_index) { clib_warning ("mismatch client_index %d pool_index %d", client_index, regp->vl_api_registration_pool_index); vl_msg_api_free (rp); return; } } /* No dangling references, please */ *regpp = 0; /* For horizontal scaling, add a hash table... */ for (i = 0; i < vec_len (am->vlib_private_rps); i++) { /* Is this a pairwise / private API segment? */ if (am->vlib_private_rps[i] == am->vlib_rp) { /* Note: account for the memfd header page */ uword virtual_base = am->vlib_rp->virtual_base - MMAP_PAGESIZE; uword virtual_size = am->vlib_rp->virtual_size + MMAP_PAGESIZE; /* * Kill the registration pool element before we make * the index vanish forever */ pool_put_index (am->vl_clients, regp->vl_api_registration_pool_index); vec_delete (am->vlib_private_rps, 1, i); /* Kill it, accounting for the memfd header page */ if (munmap ((void *) virtual_base, virtual_size) < 0) clib_unix_warning ("munmap"); /* Reset the queue-length-address cache */ vec_reset_length (vl_api_queue_cursizes); private_registration = 1; break; } } if (private_registration == 0) { pool_put_index (am->vl_clients, regp->vl_api_registration_pool_index); oldheap = vl_msg_push_heap (); if (mp->do_cleanup) svm_queue_free (regp->vl_input_queue); vec_free (regp->name); /* Poison the old registration */ clib_memset (regp, 0xF1, sizeof (*regp)); clib_mem_free (regp); vl_msg_pop_heap (oldheap); /* * These messages must be freed manually, since they're set up * as "bounce" messages. In the private_registration == 1 case, * we kill the shared-memory segment which contains the message * with munmap. */ vl_msg_api_free (mp); } } else { clib_warning ("unknown client ID %d", mp->index); } } /** * client answered a ping, stave off the grim reaper... */ void vl_api_memclnt_keepalive_reply_t_handler (vl_api_memclnt_keepalive_reply_t * mp) { vl_api_registration_t *regp; vlib_main_t *vm = vlib_get_main (); regp = vl_api_client_index_to_registration (mp->context); if (regp) { regp->last_heard = vlib_time_now (vm); regp->unanswered_pings = 0; } else clib_warning ("BUG: anonymous memclnt_keepalive_reply"); } /** * We can send ourselves these messages if someone uses the * builtin binary api test tool... */ static void vl_api_memclnt_keepalive_t_handler (vl_api_memclnt_keepalive_t * mp) { vl_api_memclnt_keepalive_reply_t *rmp; api_main_t *am; vl_shmem_hdr_t *shmem_hdr; am = vlibapi_get_main (); shmem_hdr = am->shmem_hdr; rmp = vl_msg_api_alloc_as_if_client (sizeof (*rmp)); clib_memset (rmp, 0, sizeof (*rmp)); rmp->_vl_msg_id = ntohs (VL_API_MEMCLNT_KEEPALIVE_REPLY); rmp->context = mp->context; vl_msg_api_send_shmem (shmem_hdr->vl_input_queue, (u8 *) & rmp); } /* * To avoid filling the API trace buffer with boring messages, * don't trace memclnt_keepalive[_reply] msgs */ #define foreach_vlib_api_msg \ _(MEMCLNT_CREATE, memclnt_create, 1) \ _(MEMCLNT_DELETE, memclnt_delete, 1) \ _(MEMCLNT_KEEPALIVE, memclnt_keepalive, 0) \ _(MEMCLNT_KEEPALIVE_REPLY, memclnt_keepalive_reply, 0) /* * memory_api_init */ int vl_mem_api_init (const char *region_name) { int rv; api_main_t *am = vlibapi_get_main (); vl_msg_api_msg_config_t cfg; vl_msg_api_msg_config_t *c = &cfg; vl_shmem_hdr_t *shm; vlib_main_t *vm = vlib_get_main (); clib_memset (c, 0, sizeof (*c)); if ((rv = vl_map_shmem (region_name, 1 /* is_vlib */ )) < 0) return rv; #define _(N,n,t) do { \ c->id = VL_API_##N; \ c->name = #n; \ c->handler = vl_api_##n##_t_handler; \ c->cleanup = vl_noop_handler; \ c->endian = vl_api_##n##_t_endian; \ c->print = vl_api_##n##_t_print; \ c->size = sizeof(vl_api_##n##_t); \ c->traced = t; /* trace, so these msgs print */ \ c->replay = 0; /* don't replay client create/delete msgs */ \ c->message_bounce = 0; /* don't bounce this message */ \ vl_msg_api_config(c);} while (0); foreach_vlib_api_msg; #undef _ /* * special-case freeing of memclnt_delete messages, so we can * simply munmap pairwise / private API segments... */ am->message_bounce[VL_API_MEMCLNT_DELETE] = 1; am->is_mp_safe[VL_API_MEMCLNT_KEEPALIVE_REPLY] = 1; am->is_mp_safe[VL_API_MEMCLNT_KEEPALIVE] = 1; vlib_set_queue_signal_callback (vm, memclnt_queue_callback); shm = am->shmem_hdr; ASSERT (shm && shm->vl_input_queue); /* Make a note so we can always find the primary region easily */ am->vlib_primary_rp = am->vlib_rp; return 0; } clib_error_t * map_api_segment_init (vlib_main_t * vm) { api_main_t *am = vlibapi_get_main (); int rv; if ((rv = vl_mem_api_init (am->region_name)) < 0) { return clib_error_return (0, "vl_mem_api_init (%s) failed", am->region_name); } return 0; } static void send_memclnt_keepalive (vl_api_registration_t * regp, f64 now) { vl_api_memclnt_keepalive_t *mp; svm_queue_t *q; api_main_t *am = vlibapi_get_main (); q = regp->vl_input_queue; /* * If the queue head is moving, assume that the client is processing * messages and skip the ping. This heuristic may fail if the queue * is in the same position as last time, net of wrapping; in which * case, the client will receive a keepalive. */ if (regp->last_queue_head != q->head) { regp->last_heard = now; regp->unanswered_pings = 0; regp->last_queue_head = q->head; return; } /* * push/pop shared memory segment, so this routine * will work with "normal" as well as "private segment" * memory clients.. */ mp = vl_mem_api_alloc_as_if_client_w_reg (regp, sizeof (*mp)); clib_memset (mp, 0, sizeof (*mp)); mp->_vl_msg_id = clib_host_to_net_u16 (VL_API_MEMCLNT_KEEPALIVE); mp->context = mp->client_index = vl_msg_api_handle_from_index_and_epoch (regp->vl_api_registration_pool_index, am->shmem_hdr->application_restarts); regp->unanswered_pings++; /* Failure-to-send due to a stuffed queue is absolutely expected */ if (svm_queue_add (q, (u8 *) & mp, 1 /* nowait */ )) vl_msg_api_free_w_region (regp->vlib_rp, mp); } static void vl_mem_send_client_keepalive_w_reg (api_main_t * am, f64 now, vl_api_registration_t ** regpp, u32 ** dead_indices, u32 ** confused_indices) { vl_api_registration_t *regp = *regpp; if (regp) { /* If we haven't heard from this client recently... */ if (regp->last_heard < (now - 10.0)) { if (regp->unanswered_pings == 2) { svm_queue_t *q; q = regp->vl_input_queue; if (kill (q->consumer_pid, 0) >= 0) { clib_warning ("REAPER: lazy binary API client '%s'", regp->name); regp->unanswered_pings = 0; regp->last_heard = now; } else { clib_warning ("REAPER: binary API client '%s' died", regp->name); vec_add1 (*dead_indices, regpp - am->vl_clients); } } else send_memclnt_keepalive (regp, now); } else regp->unanswered_pings = 0; } else { clib_warning ("NULL client registration index %d", regpp - am->vl_clients); vec_add1 (*confused_indices, regpp - am->vl_clients); } } void vl_mem_api_dead_client_scan (api_main_t * am, vl_shmem_hdr_t * shm, f64 now) { vl_api_registration_t **regpp; static u32 *dead_indices; static u32 *confused_indices; vec_reset_length (dead_indices); vec_reset_length (confused_indices); /* *INDENT-OFF* */ pool_foreach (regpp, am->vl_clients) { vl_mem_send_client_keepalive_w_reg (am, now, regpp, &dead_indices, &confused_indices); } /* *INDENT-ON* */ /* This should "never happen," but if it does, fix it... */ if (PREDICT_FALSE (vec_len (confused_indices) > 0)) { int i; for (i = 0; i < vec_len (confused_indices); i++) { pool_put_index (am->vl_clients, confused_indices[i]); } } if (PREDICT_FALSE (vec_len (dead_indices) > 0)) { int i; void *oldheap; /* Allow the application to clean up its registrations */ for (i = 0; i < vec_len (dead_indices); i++) { regpp = pool_elt_at_index (am->vl_clients, dead_indices[i]); if (regpp) { u32 handle; handle = vl_msg_api_handle_from_index_and_epoch (dead_indices[i], shm->application_restarts); vl_api_call_reaper_functions (handle); } } oldheap = vl_msg_push_heap (); for (i = 0; i < vec_len (dead_indices); i++) { regpp = pool_elt_at_index (am->vl_clients, dead_indices[i]); if (regpp) { /* Is this a pairwise SVM segment? */ if ((*regpp)->vlib_rp != am->vlib_rp) { int i; svm_region_t *dead_rp = (*regpp)->vlib_rp; /* Note: account for the memfd header page */ uword virtual_base = dead_rp->virtual_base - MMAP_PAGESIZE; uword virtual_size = dead_rp->virtual_size + MMAP_PAGESIZE; /* For horizontal scaling, add a hash table... */ for (i = 0; i < vec_len (am->vlib_private_rps); i++) if (am->vlib_private_rps[i] == dead_rp) { vec_delete (am->vlib_private_rps, 1, i); goto found; } svm_pop_heap (oldheap); clib_warning ("private rp %llx AWOL", dead_rp); oldheap = svm_push_data_heap (am->vlib_rp); found: /* Kill it, accounting for the memfd header page */ svm_pop_heap (oldheap); if (munmap ((void *) virtual_base, virtual_size) < 0) clib_unix_warning ("munmap"); /* Reset the queue-length-address cache */ vec_reset_length (vl_api_queue_cursizes); oldheap = svm_push_data_heap (am->vlib_rp); } else { /* Poison the old registration */ clib_memset (*regpp, 0xF3, sizeof (**regpp)); clib_mem_free (*regpp); } /* no dangling references, please */ *regpp = 0; } else { svm_pop_heap (oldheap); clib_warning ("Duplicate free, client index %d", regpp - am->vl_clients); oldheap = svm_push_data_heap (am->vlib_rp); } } svm_client_scan_this_region_nolock (am->vlib_rp); vl_msg_pop_heap (oldheap); for (i = 0; i < vec_len (dead_indices); i++) pool_put_index (am->vl_clients, dead_indices[i]); } } static inline int void_mem_api_handle_msg_i (api_main_t * am, svm_region_t * vlib_rp, vlib_main_t * vm, vlib_node_runtime_t * node, u8 is_private) { svm_queue_t *q; uword mp; q = ((vl_shmem_hdr_t *) (void *) vlib_rp->user_ctx)->vl_input_queue; if (!svm_queue_sub2 (q, (u8 *) & mp)) { VL_MSG_API_UNPOISON ((void *) mp); vl_msg_api_handler_with_vm_node (am, vlib_rp, (void *) mp, vm, node, is_private); return 0; } return -1; } int vl_mem_api_handle_msg_main (vlib_main_t * vm, vlib_node_runtime_t * node) { api_main_t *am = vlibapi_get_main (); return void_mem_api_handle_msg_i (am, am->vlib_rp, vm, node, 0 /* is_private */ ); } int vl_mem_api_handle_rpc (vlib_main_t * vm, vlib_node_runtime_t * node) { api_main_t *am = vlibapi_get_main (); int i; uword *tmp, mp; /* * Swap pending and processing vectors, then process the RPCs * Avoid deadlock conditions by construction. */ clib_spinlock_lock_if_init (&vm->pending_rpc_lock); tmp = vm->processing_rpc_requests; vec_reset_length (tmp); vm->processing_rpc_requests = vm->pending_rpc_requests; vm->pending_rpc_requests = tmp; clib_spinlock_unlock_if_init (&vm->pending_rpc_lock); /* * RPCs are used to reflect function calls to thread 0 * when the underlying code is not thread-safe. * * Grabbing the thread barrier across a set of RPCs * greatly increases efficiency, and avoids * running afoul of the barrier sync holddown timer. * The barrier sync code supports recursive locking. * * We really need to rewrite RPC-based code... */ if (PREDICT_TRUE (vec_len (vm->processing_rpc_requests))) { vl_msg_api_barrier_sync (); for (i = 0; i < vec_len (vm->processing_rpc_requests); i++) { mp = vm->processing_rpc_requests[i]; vl_msg_api_handler_with_vm_node (am, am->vlib_rp, (void *) mp, vm, node, 0 /* is_private */ ); } vl_msg_api_barrier_release (); } return 0; } int vl_mem_api_handle_msg_private (vlib_main_t * vm, vlib_node_runtime_t * node, u32 reg_index) { api_main_t *am = vlibapi_get_main (); return void_mem_api_handle_msg_i (am, am->vlib_private_rps[reg_index], vm, node, 1 /* is_private */ ); } vl_api_registration_t * vl_mem_api_client_index_to_registration (u32 handle) { vl_api_registration_t **regpp; vl_api_registration_t *regp; api_main_t *am = vlibapi_get_main (); vl_shmem_hdr_t *shmem_hdr; u32 index; index = vl_msg_api_handle_get_index (handle); regpp = am->vl_clients + index; if (pool_is_free (am->vl_clients, regpp)) { vl_msg_api_increment_missing_client_counter (); return 0; } regp = *regpp; shmem_hdr = (vl_shmem_hdr_t *) regp->shmem_hdr; if (!vl_msg_api_handle_is_valid (handle, shmem_hdr->application_restarts)) { vl_msg_api_increment_missing_client_counter (); return 0; } return (regp); } svm_queue_t * vl_api_client_index_to_input_queue (u32 index) { vl_api_registration_t *regp; api_main_t *am = vlibapi_get_main (); /* Special case: vlib trying to send itself a message */ if (index == (u32) ~ 0) return (am->shmem_hdr->vl_input_queue); regp = vl_mem_api_client_index_to_registration (index); if (!regp) return 0; return (regp->vl_input_queue); } static clib_error_t * setup_memclnt_exit (vlib_main_t * vm) { atexit (vl_unmap_shmem); return 0; } VLIB_INIT_FUNCTION (setup_memclnt_exit); u8 * format_api_message_rings (u8 * s, va_list * args) { api_main_t *am = va_arg (*args, api_main_t *); vl_shmem_hdr_t *shmem_hdr = va_arg (*args, vl_shmem_hdr_t *); int main_segment = va_arg (*args, int); ring_alloc_t *ap; int i; if (shmem_hdr == 0) return format (s, "%8s %8s %8s %8s %8s\n", "Owner", "Size", "Nitems", "Hits", "Misses"); ap = shmem_hdr->vl_rings; for (i = 0; i < vec_len (shmem_hdr->vl_rings); i++) { s = format (s, "%8s %8d %8d %8d %8d\n", "vlib", ap->size, ap->nitems, ap->hits, ap->misses); ap++; } ap = shmem_hdr->client_rings; for (i = 0; i < vec_len (shmem_hdr->client_rings); i++) { s = format (s, "%8s %8d %8d %8d %8d\n", "clnt", ap->size, ap->nitems, ap->hits, ap->misses); ap++; } if (main_segment) { s = format (s, "%d ring miss fallback allocations\n", am->ring_misses); s = format (s, "%d application restarts, %d reclaimed msgs, %d garbage collects\n", shmem_hdr->application_restarts, shmem_hdr->restart_reclaims, shmem_hdr->garbage_collects); } return s; } static clib_error_t * vl_api_ring_command (vlib_main_t * vm, unformat_input_t * input, vlib_cli_command_t * cli_cmd) { int i; vl_shmem_hdr_t *shmem_hdr; api_main_t *am = vlibapi_get_main (); /* First, dump the primary region rings.. */ if (am->vlib_primary_rp == 0 || am->vlib_primary_rp->user_ctx == 0) { vlib_cli_output (vm, "Shared memory segment not initialized...\n"); return 0; } shmem_hdr = (void *) am->vlib_primary_rp->user_ctx; vlib_cli_output (vm, "Main API segment rings:"); vlib_cli_output (vm, "%U", format_api_message_rings, am, 0 /* print header */ , 0 /* notused */ ); vlib_cli_output (vm, "%U", format_api_message_rings, am, shmem_hdr, 1 /* main segment */ ); for (i = 0; i < vec_len (am->vlib_private_rps); i++) { svm_region_t *vlib_rp = am->vlib_private_rps[i]; shmem_hdr = (void *) vlib_rp->user_ctx; vl_api_registration_t **regpp; vl_api_registration_t *regp = 0; /* For horizontal scaling, add a hash table... */ /* *INDENT-OFF* */ pool_foreach (regpp, am->vl_clients) { regp = *regpp; if (regp && regp->vlib_rp == vlib_rp) { vlib_cli_output (vm, "%s segment rings:", regp->name); goto found; } } vlib_cli_output (vm, "regp %llx not found?", regp); continue; /* *INDENT-ON* */ found: vlib_cli_output (vm, "%U", format_api_message_rings, am, 0 /* print header */ , 0 /* notused */ ); vlib_cli_output (vm, "%U", format_api_message_rings, am, shmem_hdr, 0 /* main segment */ ); } return 0; } /*? * Display binary api message allocation ring statistics ?*/ /* *INDENT-OFF* */ VLIB_CLI_COMMAND (cli_show_api_ring_command, static) = { .path = "show api ring-stats", .short_help = "Message ring statistics", .function = vl_api_ring_command, }; /* *INDENT-ON* */ clib_error_t * vlibmemory_init (vlib_main_t * vm) { api_main_t *am = vlibapi_get_main (); svm_map_region_args_t _a, *a = &_a; u8 *remove_path1, *remove_path2; void vlibsocket_reference (void); vlibsocket_reference (); /* * By popular request / to avoid support fires, remove any old api segment * files Right Here. */ if (am->root_path == 0) { remove_path1 = format (0, "/dev/shm/global_vm%c", 0); remove_path2 = format (0, "/dev/shm/vpe-api%c", 0); } else { remove_path1 = format (0, "/dev/shm/%s-global_vm%c", am->root_path, 0); remove_path2 = format (0, "/dev/shm/%s-vpe-api%c", am->root_path, 0); } (void) unlink ((char *) remove_path1); (void) unlink ((char *) remove_path2); vec_free (remove_path1); vec_free (remove_path2); clib_memset (a, 0, sizeof (*a)); a->root_path = am->root_path; a->name = SVM_GLOBAL_REGION_NAME; a->baseva = (am->global_baseva != 0) ? am->global_baseva : +svm_get_global_region_base_va (); a->size = (am->global_size != 0) ? am->global_size : SVM_GLOBAL_REGION_SIZE; a->flags = SVM_FLAGS_NODATA; a->uid = am->api_uid; a->gid = am->api_gid; a->pvt_heap_size = (am->global_pvt_heap_size != 0) ? am->global_pvt_heap_size : SVM_PVT_MHEAP_SIZE; svm_region_init_args (a); return 0; } void vl_set_memory_region_name (const char *name) { api_main_t *am = vlibapi_get_main (); am->region_name = name; } /* * fd.io coding-style-patch-verification: ON * * Local Variables: * eval: (c-set-style "gnu") * End: */