summaryrefslogtreecommitdiffstats
path: root/src/vnet/ipsec/ipsec.c
blob: e88a72e8bac846e5d8f518c39658ea87055d9469 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
/*
 * ipsec.c : IPSEC module functions
 *
 * Copyright (c) 2015 Cisco and/or its affiliates.
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at:
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#include <vnet/vnet.h>
#include <vnet/api_errno.h>
#include <vnet/ip/ip.h>
#include <vnet/interface.h>
#include <vnet/udp/udp.h>

#include <vnet/ipsec/ipsec.h>
#include <vnet/ipsec/ikev2.h>
#include <vnet/ipsec/esp.h>
#include <vnet/ipsec/ah.h>

ipsec_main_t ipsec_main;

static void
ipsec_rand_seed (void)
{
  struct
  {
    time_t time;
    pid_t pid;
    void *p;
  } seed_data;

  seed_data.time = time (NULL);
  seed_data.pid = getpid ();
  seed_data.p = (void *) &seed_data;

  RAND_seed ((const void *) &seed_data, sizeof (seed_data));
}

static clib_error_t *
ipsec_check_ah_support (ipsec_sa_t * sa)
{
  if (sa->integ_alg == IPSEC_INTEG_ALG_NONE)
    return clib_error_return (0, "unsupported none integ-alg");
  return 0;
}

static clib_error_t *
ipsec_check_esp_support (ipsec_sa_t * sa)
{
  if (sa->crypto_alg == IPSEC_CRYPTO_ALG_AES_GCM_128)
    return clib_error_return (0, "unsupported aes-gcm-128 crypto-alg");
  if (sa->crypto_alg == IPSEC_CRYPTO_ALG_AES_GCM_192)
    return clib_error_return (0, "unsupported aes-gcm-192 crypto-alg");
  if (sa->crypto_alg == IPSEC_CRYPTO_ALG_AES_GCM_256)
    return clib_error_return (0, "unsupported aes-gcm-256 crypto-alg");

  return 0;
}

clib_error_t *
ipsec_add_del_sa_sess_cb (ipsec_main_t * im, u32 sa_index, u8 is_add)
{
  ipsec_ah_backend_t *ah =
    pool_elt_at_index (im->ah_backends, im->ah_current_backend);
  if (ah->add_del_sa_sess_cb)
    {
      clib_error_t *err = ah->add_del_sa_sess_cb (sa_index, is_add);
      if (err)
	return err;
    }
  ipsec_esp_backend_t *esp =
    pool_elt_at_index (im->esp_backends, im->esp_current_backend);
  if (esp->add_del_sa_sess_cb)
    {
      clib_error_t *err = esp->add_del_sa_sess_cb (sa_index, is_add);
      if (err)
	return err;
    }
  return 0;
}

clib_error_t *
ipsec_check_support_cb (ipsec_main_t * im, ipsec_sa_t * sa)
{
  clib_error_t *error = 0;

  if (PREDICT_FALSE (sa->protocol == IPSEC_PROTOCOL_AH))
    {
      ipsec_ah_backend_t *ah =
	pool_elt_at_index (im->ah_backends, im->ah_current_backend);
      ASSERT (ah->check_support_cb);
      error = ah->check_support_cb (sa);
    }
  else
    {
      ipsec_esp_backend_t *esp =
	pool_elt_at_index (im->esp_backends, im->esp_current_backend);
      ASSERT (esp->check_support_cb);
      error = esp->check_support_cb (sa);
    }
  return error;
}


static void
ipsec_add_node (vlib_main_t * vm, const char *node_name,
		const char *prev_node_name, u32 * out_node_index,
		u32 * out_next_index)
{
  vlib_node_t *prev_node, *node;
  prev_node = vlib_get_node_by_name (vm, (u8 *) prev_node_name);
  ASSERT (prev_node);
  node = vlib_get_node_by_name (vm, (u8 *) node_name);
  ASSERT (node);
  *out_node_index = node->index;
  *out_next_index = vlib_node_add_next (vm, prev_node->index, node->index);
}

u32
ipsec_register_ah_backend (vlib_main_t * vm, ipsec_main_t * im,
			   const char *name,
			   const char *ah4_encrypt_node_name,
			   const char *ah4_decrypt_node_name,
			   const char *ah6_encrypt_node_name,
			   const char *ah6_decrypt_node_name,
			   check_support_cb_t ah_check_support_cb,
			   add_del_sa_sess_cb_t ah_add_del_sa_sess_cb)
{
  ipsec_ah_backend_t *b;
  pool_get (im->ah_backends, b);
  b->name = format (NULL, "%s", name);

  ipsec_add_node (vm, ah4_encrypt_node_name, "ipsec4-output-feature",
		  &b->ah4_encrypt_node_index, &b->ah4_encrypt_next_index);
  ipsec_add_node (vm, ah4_decrypt_node_name, "ipsec4-input-feature",
		  &b->ah4_decrypt_node_index, &b->ah4_decrypt_next_index);
  ipsec_add_node (vm, ah6_encrypt_node_name, "ipsec6-output-feature",
		  &b->ah6_encrypt_node_index, &b->ah6_encrypt_next_index);
  ipsec_add_node (vm, ah6_decrypt_node_name, "ipsec6-input-feature",
		  &b->ah6_decrypt_node_index, &b->ah6_decrypt_next_index);

  b->check_support_cb = ah_check_support_cb;
  b->add_del_sa_sess_cb = ah_add_del_sa_sess_cb;
  return b - im->ah_backends;
}

u32
ipsec_register_esp_backend (vlib_main_t * vm, ipsec_main_t * im,
			    const char *name,
			    const char *esp4_encrypt_node_name,
			    const char *esp4_decrypt_node_name,
			    const char *esp6_encrypt_node_name,
			    const char *esp6_decrypt_node_name,
			    check_support_cb_t esp_check_support_cb,
			    add_del_sa_sess_cb_t esp_add_del_sa_sess_cb)
{
  ipsec_esp_backend_t *b;
  pool_get (im->esp_backends, b);
  b->name = format (NULL, "%s", name);

  ipsec_add_node (vm, esp4_encrypt_node_name, "ipsec4-output-feature",
		  &b->esp4_encrypt_node_index, &b->esp4_encrypt_next_index);
  ipsec_add_node (vm, esp4_decrypt_node_name, "ipsec4-input-feature",
		  &b->esp4_decrypt_node_index, &b->esp4_decrypt_next_index);
  ipsec_add_node (vm, esp6_encrypt_node_name, "ipsec6-output-feature",
		  &b->esp6_encrypt_node_index, &b->esp6_encrypt_next_index);
  ipsec_add_node (vm, esp6_decrypt_node_name, "ipsec6-input-feature",
		  &b->esp6_decrypt_node_index, &b->esp6_decrypt_next_index);

  b->check_support_cb = esp_check_support_cb;
  b->add_del_sa_sess_cb = esp_add_del_sa_sess_cb;
  return b - im->esp_backends;
}

int
ipsec_select_ah_backend (ipsec_main_t * im, u32 backend_idx)
{
  if (pool_elts (im->sad) > 0
      || pool_is_free_index (im->ah_backends, backend_idx))
    {
      return -1;
    }
  ipsec_ah_backend_t *b = pool_elt_at_index (im->ah_backends, backend_idx);
  im->ah_current_backend = backend_idx;
  im->ah4_encrypt_node_index = b->ah4_encrypt_node_index;
  im->ah4_decrypt_node_index = b->ah4_decrypt_node_index;
  im->ah4_encrypt_next_index = b->ah4_encrypt_next_index;
  im->ah4_decrypt_next_index = b->ah4_decrypt_next_index;
  im->ah6_encrypt_node_index = b->ah6_encrypt_node_index;
  im->ah6_decrypt_node_index = b->ah6_decrypt_node_index;
  im->ah6_encrypt_next_index = b->ah6_encrypt_next_index;
  im->ah6_decrypt_next_index = b->ah6_decrypt_next_index;
  return 0;
}

int
ipsec_select_esp_backend (ipsec_main_t * im, u32 backend_idx)
{
  if (pool_elts (im->sad) > 0
      || pool_is_free_index (im->esp_backends, backend_idx))
    {
      return -1;
    }
  ipsec_esp_backend_t *b = pool_elt_at_index (im->esp_backends, backend_idx);
  im->esp_current_backend = backend_idx;
  im->esp4_encrypt_node_index = b->esp4_encrypt_node_index;
  im->esp4_decrypt_node_index = b->esp4_decrypt_node_index;
  im->esp4_encrypt_next_index = b->esp4_encrypt_next_index;
  im->esp4_decrypt_next_index = b->esp4_decrypt_next_index;
  im->esp6_encrypt_node_index = b->esp6_encrypt_node_index;
  im->esp6_decrypt_node_index = b->esp6_decrypt_node_index;
  im->esp6_encrypt_next_index = b->esp6_encrypt_next_index;
  im->esp6_decrypt_next_index = b->esp6_decrypt_next_index;
  return 0;
}

static clib_error_t *
ipsec_init (vlib_main_t * vm)
{
  clib_error_t *error;
  ipsec_main_t *im = &ipsec_main;
  vlib_thread_main_t *tm = vlib_get_thread_main ();

  ipsec_rand_seed ();

  clib_memset (im, 0, sizeof (im[0]));

  im->vnet_main = vnet_get_main ();
  im->vlib_main = vm;

  im->spd_index_by_spd_id = hash_create (0, sizeof (uword));
  im->sa_index_by_sa_id = hash_create (0, sizeof (uword));
  im->spd_index_by_sw_if_index = hash_create (0, sizeof (uword));

  vec_validate_aligned (im->empty_buffers, tm->n_vlib_mains - 1,
			CLIB_CACHE_LINE_BYTES);

  vlib_node_t *node = vlib_get_node_by_name (vm, (u8 *) "error-drop");
  ASSERT (node);
  im->error_drop_node_index = node->index;

  u32 idx = ipsec_register_ah_backend (vm, im, "default openssl backend",
				       "ah4-encrypt",
				       "ah4-decrypt",
				       "ah6-encrypt",
				       "ah6-decrypt",
				       ipsec_check_ah_support,
				       NULL);

  im->ah_default_backend = idx;
  int rv = ipsec_select_ah_backend (im, idx);
  ASSERT (0 == rv);
  (void) (rv);			// avoid warning

  idx = ipsec_register_esp_backend (vm, im, "default openssl backend",
				    "esp4-encrypt",
				    "esp4-decrypt",
				    "esp6-encrypt",
				    "esp6-decrypt",
				    ipsec_check_esp_support, NULL);
  im->esp_default_backend = idx;

  rv = ipsec_select_esp_backend (im, idx);
  ASSERT (0 == rv);
  (void) (rv);			// avoid warning

  if ((error = vlib_call_init_function (vm, ipsec_cli_init)))
    return error;

  if ((error = vlib_call_init_function (vm, ipsec_tunnel_if_init)))
    return error;

  ipsec_proto_init ();

  if ((error = ikev2_init (vm)))
    return error;

  return 0;
}

VLIB_INIT_FUNCTION (ipsec_init);

/*
 * fd.io coding-style-patch-verification: ON
 *
 * Local Variables:
 * eval: (c-set-style "gnu")
 * End:
 */
an> GBP_ENDPOINT_FLAG_LEARNT) v |= GBP_API_ENDPOINT_FLAG_LEARNT; if (f & GBP_ENDPOINT_FLAG_EXTERNAL) v |= GBP_API_ENDPOINT_FLAG_EXTERNAL; v = htonl (v); return (v); } static void vl_api_gbp_endpoint_add_t_handler (vl_api_gbp_endpoint_add_t * mp) { vl_api_gbp_endpoint_add_reply_t *rmp; gbp_endpoint_flags_t gef; u32 sw_if_index, handle; ip46_address_t *ips; mac_address_t mac; int rv = 0, ii; VALIDATE_SW_IF_INDEX (&(mp->endpoint)); gef = gbp_endpoint_flags_decode (mp->endpoint.flags), ips = NULL; sw_if_index = ntohl (mp->endpoint.sw_if_index); if (mp->endpoint.n_ips) { vec_validate (ips, mp->endpoint.n_ips - 1); vec_foreach_index (ii, ips) { ip_address_decode (&mp->endpoint.ips[ii], &ips[ii]); } } mac_address_decode (mp->endpoint.mac, &mac); if (GBP_ENDPOINT_FLAG_REMOTE & gef) { ip46_address_t tun_src, tun_dst; ip_address_decode (&mp->endpoint.tun.src, &tun_src); ip_address_decode (&mp->endpoint.tun.dst, &tun_dst); rv = gbp_endpoint_update_and_lock (GBP_ENDPOINT_SRC_CP, sw_if_index, ips, &mac, INDEX_INVALID, INDEX_INVALID, ntohs (mp->endpoint.epg_id), gef, &tun_src, &tun_dst, &handle); } else { rv = gbp_endpoint_update_and_lock (GBP_ENDPOINT_SRC_CP, sw_if_index, ips, &mac, INDEX_INVALID, INDEX_INVALID, ntohs (mp->endpoint.epg_id), gef, NULL, NULL, &handle); } vec_free (ips); BAD_SW_IF_INDEX_LABEL; /* *INDENT-OFF* */ REPLY_MACRO2 (VL_API_GBP_ENDPOINT_ADD_REPLY + GBP_MSG_BASE, ({ rmp->handle = htonl (handle); })); /* *INDENT-ON* */ } static void vl_api_gbp_endpoint_del_t_handler (vl_api_gbp_endpoint_del_t * mp) { vl_api_gbp_endpoint_del_reply_t *rmp; int rv = 0; gbp_endpoint_unlock (GBP_ENDPOINT_SRC_CP, ntohl (mp->handle)); REPLY_MACRO (VL_API_GBP_ENDPOINT_DEL_REPLY + GBP_MSG_BASE); } static void vl_api_gbp_endpoint_learn_set_inactive_threshold_t_handler (vl_api_gbp_endpoint_learn_set_inactive_threshold_t * mp) { vl_api_gbp_endpoint_learn_set_inactive_threshold_reply_t *rmp; int rv = 0; gbp_learn_set_inactive_threshold (ntohl (mp->threshold)); REPLY_MACRO (VL_API_GBP_ENDPOINT_LEARN_SET_INACTIVE_THRESHOLD_REPLY + GBP_MSG_BASE); } typedef struct gbp_walk_ctx_t_ { vl_api_registration_t *reg; u32 context; } gbp_walk_ctx_t; static walk_rc_t gbp_endpoint_send_details (index_t gei, void *args) { vl_api_gbp_endpoint_details_t *mp; gbp_endpoint_loc_t *gel; gbp_endpoint_fwd_t *gef; gbp_endpoint_t *ge; gbp_walk_ctx_t *ctx; u8 n_ips, ii; ctx = args; ge = gbp_endpoint_get (gei); n_ips = vec_len (ge->ge_key.gek_ips); mp = vl_msg_api_alloc (sizeof (*mp) + (sizeof (*mp->endpoint.ips) * n_ips)); if (!mp) return 1; clib_memset (mp, 0, sizeof (*mp)); mp->_vl_msg_id = ntohs (VL_API_GBP_ENDPOINT_DETAILS + GBP_MSG_BASE); mp->context = ctx->context; gel = &ge->ge_locs[0]; gef = &ge->ge_fwd; if (gbp_endpoint_is_remote (ge)) { mp->endpoint.sw_if_index = ntohl (gel->tun.gel_parent_sw_if_index); ip_address_encode (&gel->tun.gel_src, IP46_TYPE_ANY, &mp->endpoint.tun.src); ip_address_encode (&gel->tun.gel_dst, IP46_TYPE_ANY, &mp->endpoint.tun.dst); } else { mp->endpoint.sw_if_index = ntohl (gef->gef_itf); } mp->endpoint.epg_id = ntohs (ge->ge_fwd.gef_epg_id); mp->endpoint.n_ips = n_ips; mp->endpoint.flags = gbp_endpoint_flags_encode (gef->gef_flags); mp->handle = htonl (gei); mp->age = vlib_time_now (vlib_get_main ()) - ge->ge_last_time; mac_address_encode (&ge->ge_key.gek_mac, mp->endpoint.mac); vec_foreach_index (ii, ge->ge_key.gek_ips) { ip_address_encode (&ge->ge_key.gek_ips[ii].fp_addr, IP46_TYPE_ANY, &mp->endpoint.ips[ii]); } vl_api_send_msg (ctx->reg, (u8 *) mp); return (WALK_CONTINUE); } static void vl_api_gbp_endpoint_dump_t_handler (vl_api_gbp_endpoint_dump_t * mp) { vl_api_registration_t *reg; reg = vl_api_client_index_to_registration (mp->client_index); if (!reg) return; gbp_walk_ctx_t ctx = { .reg = reg, .context = mp->context, }; gbp_endpoint_walk (gbp_endpoint_send_details, &ctx); } static void vl_api_gbp_endpoint_group_add_t_handler (vl_api_gbp_endpoint_group_add_t * mp) { vl_api_gbp_endpoint_group_add_reply_t *rmp; int rv = 0; rv = gbp_endpoint_group_add_and_lock (ntohs (mp->epg.epg_id), ntohl (mp->epg.bd_id), ntohl (mp->epg.rd_id), ntohl (mp->epg.uplink_sw_if_index)); REPLY_MACRO (VL_API_GBP_ENDPOINT_GROUP_ADD_REPLY + GBP_MSG_BASE); } static void vl_api_gbp_endpoint_group_del_t_handler (vl_api_gbp_endpoint_group_del_t * mp) { vl_api_gbp_endpoint_group_del_reply_t *rmp; int rv = 0; rv = gbp_endpoint_group_delete (ntohs (mp->epg_id)); REPLY_MACRO (VL_API_GBP_ENDPOINT_GROUP_DEL_REPLY + GBP_MSG_BASE); } static gbp_bridge_domain_flags_t gbp_bridge_domain_flags_from_api (vl_api_gbp_bridge_domain_flags_t a) { gbp_bridge_domain_flags_t g; g = GBP_BD_FLAG_NONE; a = clib_net_to_host_u32 (a); if (a & GBP_BD_API_FLAG_DO_NOT_LEARN) g |= GBP_BD_FLAG_DO_NOT_LEARN; return (g); } static void vl_api_gbp_bridge_domain_add_t_handler (vl_api_gbp_bridge_domain_add_t * mp) { vl_api_gbp_bridge_domain_add_reply_t *rmp; int rv = 0; rv = gbp_bridge_domain_add_and_lock (ntohl (mp->bd.bd_id), gbp_bridge_domain_flags_from_api (mp->bd.flags), ntohl (mp->bd.bvi_sw_if_index), ntohl (mp->bd.uu_fwd_sw_if_index)); REPLY_MACRO (VL_API_GBP_BRIDGE_DOMAIN_ADD_REPLY + GBP_MSG_BASE); } static void vl_api_gbp_bridge_domain_del_t_handler (vl_api_gbp_bridge_domain_del_t * mp) { vl_api_gbp_bridge_domain_del_reply_t *rmp; int rv = 0; rv = gbp_bridge_domain_delete (ntohl (mp->bd_id)); REPLY_MACRO (VL_API_GBP_BRIDGE_DOMAIN_DEL_REPLY + GBP_MSG_BASE); } static void vl_api_gbp_route_domain_add_t_handler (vl_api_gbp_route_domain_add_t * mp) { vl_api_gbp_route_domain_add_reply_t *rmp; int rv = 0; rv = gbp_route_domain_add_and_lock (ntohl (mp->rd.rd_id), ntohl (mp->rd.ip4_table_id), ntohl (mp->rd.ip6_table_id), ntohl (mp->rd.ip4_uu_sw_if_index), ntohl (mp->rd.ip6_uu_sw_if_index)); REPLY_MACRO (VL_API_GBP_ROUTE_DOMAIN_ADD_REPLY + GBP_MSG_BASE); } static void vl_api_gbp_route_domain_del_t_handler (vl_api_gbp_route_domain_del_t * mp) { vl_api_gbp_route_domain_del_reply_t *rmp; int rv = 0; rv = gbp_route_domain_delete (ntohl (mp->rd_id)); REPLY_MACRO (VL_API_GBP_ROUTE_DOMAIN_DEL_REPLY + GBP_MSG_BASE); } static int gub_subnet_type_from_api (vl_api_gbp_subnet_type_t a, gbp_subnet_type_t * t) { a = clib_net_to_host_u32 (a); switch (a) { case GBP_API_SUBNET_TRANSPORT: *t = GBP_SUBNET_TRANSPORT; return (0); case GBP_API_SUBNET_L3_OUT: *t = GBP_SUBNET_L3_OUT; return (0); case GBP_API_SUBNET_STITCHED_INTERNAL: *t = GBP_SUBNET_STITCHED_INTERNAL; return (0); case GBP_API_SUBNET_STITCHED_EXTERNAL: *t = GBP_SUBNET_STITCHED_EXTERNAL; return (0); } return (-1); } static void vl_api_gbp_subnet_add_del_t_handler (vl_api_gbp_subnet_add_del_t * mp) { vl_api_gbp_subnet_add_del_reply_t *rmp; gbp_subnet_type_t type; fib_prefix_t pfx; int rv = 0; ip_prefix_decode (&mp->subnet.prefix, &pfx); rv = gub_subnet_type_from_api (mp->subnet.type, &type); if (0 != rv) goto out; if (mp->is_add) rv = gbp_subnet_add (ntohl (mp->subnet.rd_id), &pfx, type, ntohl (mp->subnet.sw_if_index), ntohs (mp->subnet.epg_id)); else rv = gbp_subnet_del (ntohl (mp->subnet.rd_id), &pfx); out: REPLY_MACRO (VL_API_GBP_SUBNET_ADD_DEL_REPLY + GBP_MSG_BASE); } static vl_api_gbp_subnet_type_t gub_subnet_type_to_api (gbp_subnet_type_t t) { vl_api_gbp_subnet_type_t a = 0; switch (t) { case GBP_SUBNET_TRANSPORT: a = GBP_API_SUBNET_TRANSPORT; break; case GBP_SUBNET_STITCHED_INTERNAL: a = GBP_API_SUBNET_STITCHED_INTERNAL; break; case GBP_SUBNET_STITCHED_EXTERNAL: a = GBP_API_SUBNET_STITCHED_EXTERNAL; break; case GBP_SUBNET_L3_OUT: a = GBP_API_SUBNET_L3_OUT; break; } a = clib_host_to_net_u32 (a); return (a); } static walk_rc_t gbp_subnet_send_details (u32 rd_id, const fib_prefix_t * pfx, gbp_subnet_type_t type, u32 sw_if_index, epg_id_t epg, void *args) { vl_api_gbp_subnet_details_t *mp; gbp_walk_ctx_t *ctx; ctx = args; mp = vl_msg_api_alloc (sizeof (*mp)); if (!mp) return 1; clib_memset (mp, 0, sizeof (*mp)); mp->_vl_msg_id = ntohs (VL_API_GBP_SUBNET_DETAILS + GBP_MSG_BASE); mp->context = ctx->context; mp->subnet.type = gub_subnet_type_to_api (type); mp->subnet.sw_if_index = ntohl (sw_if_index); mp->subnet.epg_id = ntohs (epg); mp->subnet.rd_id = ntohl (rd_id); ip_prefix_encode (pfx, &mp->subnet.prefix); vl_api_send_msg (ctx->reg, (u8 *) mp); return (WALK_CONTINUE); } static void vl_api_gbp_subnet_dump_t_handler (vl_api_gbp_subnet_dump_t * mp) { vl_api_registration_t *reg; reg = vl_api_client_index_to_registration (mp->client_index); if (!reg) return; gbp_walk_ctx_t ctx = { .reg = reg, .context = mp->context, }; gbp_subnet_walk (gbp_subnet_send_details, &ctx); } static int gbp_endpoint_group_send_details (gbp_endpoint_group_t * gg, void *args) { vl_api_gbp_endpoint_group_details_t *mp; gbp_walk_ctx_t *ctx; ctx = args; mp = vl_msg_api_alloc (sizeof (*mp)); if (!mp) return 1; clib_memset (mp, 0, sizeof (*mp)); mp->_vl_msg_id = ntohs (VL_API_GBP_ENDPOINT_GROUP_DETAILS + GBP_MSG_BASE); mp->context = ctx->context; mp->epg.uplink_sw_if_index = ntohl (gg->gg_uplink_sw_if_index); mp->epg.epg_id = ntohs (gg->gg_id); mp->epg.bd_id = ntohl (gbp_endpoint_group_get_bd_id (gg)); mp->epg.rd_id = ntohl (gbp_route_domain_get_rd_id (gg->gg_rd)); vl_api_send_msg (ctx->reg, (u8 *) mp); return (1); } static void vl_api_gbp_endpoint_group_dump_t_handler (vl_api_gbp_endpoint_group_dump_t * mp) { vl_api_registration_t *reg; reg = vl_api_client_index_to_registration (mp->client_index); if (!reg) return; gbp_walk_ctx_t ctx = { .reg = reg, .context = mp->context, }; gbp_endpoint_group_walk (gbp_endpoint_group_send_details, &ctx); } static int gbp_bridge_domain_send_details (gbp_bridge_domain_t * gb, void *args) { vl_api_gbp_bridge_domain_details_t *mp; gbp_walk_ctx_t *ctx; ctx = args; mp = vl_msg_api_alloc (sizeof (*mp)); if (!mp) return 1; memset (mp, 0, sizeof (*mp)); mp->_vl_msg_id = ntohs (VL_API_GBP_BRIDGE_DOMAIN_DETAILS + GBP_MSG_BASE); mp->context = ctx->context; mp->bd.bd_id = ntohl (gb->gb_bd_id); mp->bd.bvi_sw_if_index = ntohl (gb->gb_bvi_sw_if_index); mp->bd.uu_fwd_sw_if_index = ntohl (gb->gb_uu_fwd_sw_if_index); vl_api_send_msg (ctx->reg, (u8 *) mp); return (1); } static void vl_api_gbp_bridge_domain_dump_t_handler (vl_api_gbp_bridge_domain_dump_t * mp) { vl_api_registration_t *reg; reg = vl_api_client_index_to_registration (mp->client_index); if (!reg) return; gbp_walk_ctx_t ctx = { .reg = reg, .context = mp->context, }; gbp_bridge_domain_walk (gbp_bridge_domain_send_details, &ctx); } static int gbp_route_domain_send_details (gbp_route_domain_t * grd, void *args) { vl_api_gbp_route_domain_details_t *mp; gbp_walk_ctx_t *ctx; ctx = args; mp = vl_msg_api_alloc (sizeof (*mp)); if (!mp) return 1; memset (mp, 0, sizeof (*mp)); mp->_vl_msg_id = ntohs (VL_API_GBP_ROUTE_DOMAIN_DETAILS + GBP_MSG_BASE); mp->context = ctx->context; mp->rd.rd_id = ntohl (grd->grd_id); mp->rd.ip4_uu_sw_if_index = ntohl (grd->grd_uu_sw_if_index[FIB_PROTOCOL_IP4]); mp->rd.ip6_uu_sw_if_index = ntohl (grd->grd_uu_sw_if_index[FIB_PROTOCOL_IP6]); vl_api_send_msg (ctx->reg, (u8 *) mp); return (1); } static void vl_api_gbp_route_domain_dump_t_handler (vl_api_gbp_route_domain_dump_t * mp) { vl_api_registration_t *reg; reg = vl_api_client_index_to_registration (mp->client_index); if (!reg) return; gbp_walk_ctx_t ctx = { .reg = reg, .context = mp->context, }; gbp_route_domain_walk (gbp_route_domain_send_details, &ctx); } static void vl_api_gbp_recirc_add_del_t_handler (vl_api_gbp_recirc_add_del_t * mp) { vl_api_gbp_recirc_add_del_reply_t *rmp; u32 sw_if_index; int rv = 0; sw_if_index = ntohl (mp->recirc.sw_if_index); if (!vnet_sw_if_index_is_api_valid (sw_if_index)) goto bad_sw_if_index; if (mp->is_add) gbp_recirc_add (sw_if_index, ntohs (mp->recirc.epg_id), mp->recirc.is_ext); else gbp_recirc_delete (sw_if_index); BAD_SW_IF_INDEX_LABEL; REPLY_MACRO (VL_API_GBP_RECIRC_ADD_DEL_REPLY + GBP_MSG_BASE); } static walk_rc_t gbp_recirc_send_details (gbp_recirc_t * gr, void *args) { vl_api_gbp_recirc_details_t *mp; gbp_walk_ctx_t *ctx; ctx = args; mp = vl_msg_api_alloc (sizeof (*mp)); if (!mp) return (WALK_STOP); clib_memset (mp, 0, sizeof (*mp)); mp->_vl_msg_id = ntohs (VL_API_GBP_RECIRC_DETAILS + GBP_MSG_BASE); mp->context = ctx->context; mp->recirc.epg_id = ntohs (gr->gr_epg); mp->recirc.sw_if_index = ntohl (gr->gr_sw_if_index); mp->recirc.is_ext = gr->gr_is_ext; vl_api_send_msg (ctx->reg, (u8 *) mp); return (WALK_CONTINUE); } static void vl_api_gbp_recirc_dump_t_handler (vl_api_gbp_recirc_dump_t * mp) { vl_api_registration_t *reg; reg = vl_api_client_index_to_registration (mp->client_index); if (!reg) return; gbp_walk_ctx_t ctx = { .reg = reg, .context = mp->context, }; gbp_recirc_walk (gbp_recirc_send_details, &ctx); } static void vl_api_gbp_ext_itf_add_del_t_handler (vl_api_gbp_ext_itf_add_del_t * mp) { vl_api_gbp_ext_itf_add_del_reply_t *rmp; u32 sw_if_index; int rv = 0; sw_if_index = ntohl (mp->ext_itf.sw_if_index); if (!vnet_sw_if_index_is_api_valid (sw_if_index)) goto bad_sw_if_index; if (mp->is_add) rv = gbp_ext_itf_add (sw_if_index, ntohl (mp->ext_itf.bd_id), ntohl (mp->ext_itf.rd_id)); else rv = gbp_ext_itf_delete (sw_if_index); BAD_SW_IF_INDEX_LABEL; REPLY_MACRO (VL_API_GBP_EXT_ITF_ADD_DEL_REPLY + GBP_MSG_BASE); } static walk_rc_t gbp_ext_itf_send_details (gbp_ext_itf_t * gx, void *args) { vl_api_gbp_ext_itf_details_t *mp; gbp_walk_ctx_t *ctx; ctx = args; mp = vl_msg_api_alloc (sizeof (*mp)); if (!mp) return (WALK_STOP); clib_memset (mp, 0, sizeof (*mp)); mp->_vl_msg_id = ntohs (VL_API_GBP_EXT_ITF_DETAILS + GBP_MSG_BASE); mp->context = ctx->context; mp->ext_itf.bd_id = ntohl (gbp_bridge_domain_get_bd_id (gx->gx_bd)); mp->ext_itf.rd_id = ntohl (gbp_route_domain_get_rd_id (gx->gx_rd)); mp->ext_itf.sw_if_index = ntohl (gx->gx_itf); vl_api_send_msg (ctx->reg, (u8 *) mp); return (WALK_CONTINUE); } static void vl_api_gbp_ext_itf_dump_t_handler (vl_api_gbp_ext_itf_dump_t * mp) { vl_api_registration_t *reg; reg = vl_api_client_index_to_registration (mp->client_index); if (!reg) return; gbp_walk_ctx_t ctx = { .reg = reg, .context = mp->context, }; gbp_ext_itf_walk (gbp_ext_itf_send_details, &ctx); } static int gbp_contract_rule_action_deocde (vl_api_gbp_rule_action_t in, gbp_rule_action_t * out) { in = clib_net_to_host_u32 (in); switch (in) { case GBP_API_RULE_PERMIT: *out = GBP_RULE_PERMIT; return (0); case GBP_API_RULE_DENY: *out = GBP_RULE_DENY; return (0); case GBP_API_RULE_REDIRECT: *out = GBP_RULE_REDIRECT; return (0); } return (-1); } static int gbp_hash_mode_decode (vl_api_gbp_hash_mode_t in, gbp_hash_mode_t * out) { in = clib_net_to_host_u32 (in); switch (in) { case GBP_API_HASH_MODE_SRC_IP: *out = GBP_HASH_MODE_SRC_IP; return (0); case GBP_API_HASH_MODE_DST_IP: *out = GBP_HASH_MODE_DST_IP; return (0); case GBP_API_HASH_MODE_SYMMETRIC: *out = GBP_HASH_MODE_SYMMETRIC; return (0); } return (-2); } static int gbp_next_hop_decode (const vl_api_gbp_next_hop_t * in, index_t * gnhi) { ip46_address_t ip; mac_address_t mac; index_t grd, gbd; gbd = gbp_bridge_domain_find_and_lock (ntohl (in->bd_id)); if (INDEX_INVALID == gbd) return (VNET_API_ERROR_BD_NOT_MODIFIABLE); grd = gbp_route_domain_find_and_lock (ntohl (in->rd_id)); if (INDEX_INVALID == grd) return (VNET_API_ERROR_NO_SUCH_FIB); ip_address_decode (&in->ip, &ip); mac_address_decode (in->mac, &mac); *gnhi = gbp_next_hop_alloc (&ip, grd, &mac, gbd); return (0); } static int gbp_next_hop_set_decode (const vl_api_gbp_next_hop_set_t * in, gbp_hash_mode_t * hash_mode, index_t ** out) { index_t *gnhis = NULL; int rv; u8 ii; rv = gbp_hash_mode_decode (in->hash_mode, hash_mode); if (0 != rv) return rv; vec_validate (gnhis, in->n_nhs - 1); for (ii = 0; ii < in->n_nhs; ii++) { rv = gbp_next_hop_decode (&in->nhs[ii], &gnhis[ii]); if (0 != rv) { vec_free (gnhis); break; } } *out = gnhis; return (rv); } static int gbp_contract_rule_decode (const vl_api_gbp_rule_t * in, index_t * gui) { gbp_hash_mode_t hash_mode; gbp_rule_action_t action; index_t *nhs = NULL; int rv; rv = gbp_contract_rule_action_deocde (in->action, &action); if (0 != rv) return rv; if (GBP_RULE_REDIRECT == action) { rv = gbp_next_hop_set_decode (&in->nh_set, &hash_mode, &nhs); if (0 != rv) return (rv); } else { hash_mode = GBP_HASH_MODE_SRC_IP; } *gui = gbp_rule_alloc (action, hash_mode, nhs); return (rv); } static int gbp_contract_rules_decode (u8 n_rules, const vl_api_gbp_rule_t * rules, index_t ** out) { index_t *guis = NULL; int rv; u8 ii; if (0 == n_rules) { *out = NULL; return (0); } vec_validate (guis, n_rules - 1); for (ii = 0; ii < n_rules; ii++) { rv = gbp_contract_rule_decode (&rules[ii], &guis[ii]); if (0 != rv) { vec_free (guis); return (rv); } } *out = guis; return (rv); } static void vl_api_gbp_contract_add_del_t_handler (vl_api_gbp_contract_add_del_t * mp) { vl_api_gbp_contract_add_del_reply_t *rmp; u16 *allowed_ethertypes; index_t *rules; int ii, rv = 0; u8 *data, n_et; u16 *et; if (mp->is_add) { rv = gbp_contract_rules_decode (mp->contract.n_rules, mp->contract.rules, &rules); if (0 != rv) goto out; allowed_ethertypes = NULL; /* * move past the variable legnth array of rules to get to the * allowed ether types */ data = (((u8 *) & mp->contract.n_ether_types) + (sizeof (mp->contract.rules[0]) * mp->contract.n_rules)); n_et = *data; et = (u16 *) (++data); vec_validate (allowed_ethertypes, n_et - 1); for (ii = 0; ii < n_et; ii++) { /* leave the ether types in network order */ allowed_ethertypes[ii] = et[ii]; } rv = gbp_contract_update (ntohs (mp->contract.src_epg), ntohs (mp->contract.dst_epg), ntohl (mp->contract.acl_index), rules, allowed_ethertypes); } else rv = gbp_contract_delete (ntohs (mp->contract.src_epg), ntohs (mp->contract.dst_epg)); out: REPLY_MACRO (VL_API_GBP_CONTRACT_ADD_DEL_REPLY + GBP_MSG_BASE); } static int gbp_contract_send_details (gbp_contract_t * gbpc, void *args) { vl_api_gbp_contract_details_t *mp; gbp_walk_ctx_t *ctx; ctx = args; mp = vl_msg_api_alloc (sizeof (*mp)); if (!mp) return 1; clib_memset (mp, 0, sizeof (*mp)); mp->_vl_msg_id = ntohs (VL_API_GBP_CONTRACT_DETAILS + GBP_MSG_BASE); mp->context = ctx->context; mp->contract.src_epg = ntohs (gbpc->gc_key.gck_src); mp->contract.dst_epg = ntohs (gbpc->gc_key.gck_dst); // mp->contract.acl_index = ntohl (gbpc->gc_value.gc_acl_index); vl_api_send_msg (ctx->reg, (u8 *) mp); return (1); } static void vl_api_gbp_contract_dump_t_handler (vl_api_gbp_contract_dump_t * mp) { vl_api_registration_t *reg; reg = vl_api_client_index_to_registration (mp->client_index); if (!reg) return; gbp_walk_ctx_t ctx = { .reg = reg, .context = mp->context, }; gbp_contract_walk (gbp_contract_send_details, &ctx); } static int gbp_vxlan_tunnel_mode_2_layer (vl_api_gbp_vxlan_tunnel_mode_t mode, gbp_vxlan_tunnel_layer_t * l) { mode = clib_net_to_host_u32 (mode); switch (mode) { case GBP_VXLAN_TUNNEL_MODE_L2: *l = GBP_VXLAN_TUN_L2; return (0); case GBP_VXLAN_TUNNEL_MODE_L3: *l = GBP_VXLAN_TUN_L3; return (0); } return (-1); } static void vl_api_gbp_vxlan_tunnel_add_t_handler (vl_api_gbp_vxlan_tunnel_add_t * mp) { vl_api_gbp_vxlan_tunnel_add_reply_t *rmp; gbp_vxlan_tunnel_layer_t layer; u32 sw_if_index; int rv = 0; rv = gbp_vxlan_tunnel_mode_2_layer (mp->tunnel.mode, &layer); if (0 != rv) goto out; rv = gbp_vxlan_tunnel_add (ntohl (mp->tunnel.vni), layer, ntohl (mp->tunnel.bd_rd_id), &sw_if_index); out: /* *INDENT-OFF* */ REPLY_MACRO2 (VL_API_GBP_VXLAN_TUNNEL_ADD_REPLY + GBP_MSG_BASE, ({ rmp->sw_if_index = htonl (sw_if_index); })); /* *INDENT-ON* */ } static void vl_api_gbp_vxlan_tunnel_del_t_handler (vl_api_gbp_vxlan_tunnel_add_t * mp) { vl_api_gbp_vxlan_tunnel_del_reply_t *rmp; int rv = 0; rv = gbp_vxlan_tunnel_del (ntohl (mp->tunnel.vni)); REPLY_MACRO (VL_API_GBP_VXLAN_TUNNEL_DEL_REPLY + GBP_MSG_BASE); } static vl_api_gbp_vxlan_tunnel_mode_t gbp_vxlan_tunnel_layer_2_mode (gbp_vxlan_tunnel_layer_t layer) { vl_api_gbp_vxlan_tunnel_mode_t mode = GBP_VXLAN_TUNNEL_MODE_L2; switch (layer) { case GBP_VXLAN_TUN_L2: mode = GBP_VXLAN_TUNNEL_MODE_L2; break; case GBP_VXLAN_TUN_L3: mode = GBP_VXLAN_TUNNEL_MODE_L3; break; } mode = clib_host_to_net_u32 (mode); return (mode); } static walk_rc_t gbp_vxlan_tunnel_send_details (gbp_vxlan_tunnel_t * gt, void *args) { vl_api_gbp_vxlan_tunnel_details_t *mp; gbp_walk_ctx_t *ctx; ctx = args; mp = vl_msg_api_alloc (sizeof (*mp)); if (!mp) return 1; memset (mp, 0, sizeof (*mp)); mp->_vl_msg_id = htons (VL_API_GBP_VXLAN_TUNNEL_DETAILS + GBP_MSG_BASE); mp->context = ctx->context; mp->tunnel.vni = htonl (gt->gt_vni); mp->tunnel.mode = gbp_vxlan_tunnel_layer_2_mode (gt->gt_layer); mp->tunnel.bd_rd_id = htonl (gt->gt_bd_rd_id); vl_api_send_msg (ctx->reg, (u8 *) mp); return (1); } static void vl_api_gbp_vxlan_tunnel_dump_t_handler (vl_api_gbp_vxlan_tunnel_dump_t * mp) { vl_api_registration_t *reg; reg = vl_api_client_index_to_registration (mp->client_index); if (!reg) return; gbp_walk_ctx_t ctx = { .reg = reg, .context = mp->context, }; gbp_vxlan_walk (gbp_vxlan_tunnel_send_details, &ctx); } /* * gbp_api_hookup * Add vpe's API message handlers to the table. * vlib has already mapped shared memory and * added the client registration handlers. * See .../vlib-api/vlibmemory/memclnt_vlib.c:memclnt_process() */ #define vl_msg_name_crc_list #include <gbp/gbp_all_api_h.h> #undef vl_msg_name_crc_list static void setup_message_id_table (api_main_t * am) { #define _(id,n,crc) \ vl_msg_api_add_msg_name_crc (am, #n "_" #crc, id + GBP_MSG_BASE); foreach_vl_msg_name_crc_gbp; #undef _ } static void gbp_api_hookup (vlib_main_t * vm) { #define _(N,n) \ vl_msg_api_set_handlers(VL_API_##N + GBP_MSG_BASE, \ #n, \ vl_api_##n##_t_handler, \ vl_noop_handler, \ vl_api_##n##_t_endian, \ vl_api_##n##_t_print, \ sizeof(vl_api_##n##_t), 1); foreach_gbp_api_msg; #undef _ } static clib_error_t * gbp_init (vlib_main_t * vm) { api_main_t *am = &api_main; gbp_main_t *gbpm = &gbp_main; u8 *name = format (0, "gbp_%08x%c", api_version, 0); gbpm->gbp_acl_user_id = ~0; /* Ask for a correctly-sized block of API message decode slots */ msg_id_base = vl_msg_api_get_msg_ids ((char *) name, VL_MSG_FIRST_AVAILABLE); gbp_api_hookup (vm); /* Add our API messages to the global name_crc hash table */ setup_message_id_table (am); vec_free (name); return (NULL); } VLIB_API_INIT_FUNCTION (gbp_init); /* *INDENT-OFF* */ VLIB_PLUGIN_REGISTER () = { .version = VPP_BUILD_VER, .description = "Group Based Policy", }; /* *INDENT-ON* */ /* * fd.io coding-style-patch-verification: ON * * Local Variables: * eval: (c-set-style "gnu") * End: */