summaryrefslogtreecommitdiffstats
path: root/src/vnet/ipsec/ipsec_spd.c
blob: 45a379db399ac0678d077f2174e79a71170e4c33 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
/*
 * Copyright (c) 2015 Cisco and/or its affiliates.
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at:
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#include <vnet/ipsec/ipsec.h>
#include <vnet/ipsec/ipsec_io.h>

int
ipsec_add_del_spd (vlib_main_t * vm, u32 spd_id, int is_add)
{
  ipsec_main_t *im = &ipsec_main;
  ipsec_spd_t *spd = 0;
  uword *p;
  u32 spd_index, k, v;

  p = hash_get (im->spd_index_by_spd_id, spd_id);
  if (p && is_add)
    return VNET_API_ERROR_ENTRY_ALREADY_EXISTS;
  if (!p && !is_add)
    return VNET_API_ERROR_NO_SUCH_ENTRY;

  if (!is_add)			/* delete */
    {
      spd_index = p[0];
      spd = pool_elt_at_index (im->spds, spd_index);
      if (!spd)
	return VNET_API_ERROR_INVALID_VALUE;
      /* *INDENT-OFF* */
      hash_foreach (k, v, im->spd_index_by_sw_if_index, ({
        if (v == spd_index)
          ipsec_set_interface_spd(vm, k, spd_id, 0);
      }));
      /* *INDENT-ON* */
      hash_unset (im->spd_index_by_spd_id, spd_id);
#define _(s,v) vec_free(spd->policies[IPSEC_SPD_POLICY_##s]);
      foreach_ipsec_spd_policy_type
#undef _
	pool_put (im->spds, spd);
    }
  else				/* create new SPD */
    {
      pool_get (im->spds, spd);
      clib_memset (spd, 0, sizeof (*spd));
      spd_index = spd - im->spds;
      spd->id = spd_id;
      hash_set (im->spd_index_by_spd_id, spd_id, spd_index);
    }
  return 0;
}

int
ipsec_set_interface_spd (vlib_main_t * vm, u32 sw_if_index, u32 spd_id,
			 int is_add)
{
  ipsec_main_t *im = &ipsec_main;
  ip4_ipsec_config_t config;

  u32 spd_index;
  uword *p;

  p = hash_get (im->spd_index_by_spd_id, spd_id);
  if (!p)
    return VNET_API_ERROR_SYSCALL_ERROR_1;	/* no such spd-id */

  spd_index = p[0];

  p = hash_get (im->spd_index_by_sw_if_index, sw_if_index);
  if (p && is_add)
    return VNET_API_ERROR_SYSCALL_ERROR_1;	/* spd already assigned */

  if (is_add)
    {
      hash_set (im->spd_index_by_sw_if_index, sw_if_index, spd_index);
    }
  else
    {
      hash_unset (im->spd_index_by_sw_if_index, sw_if_index);
    }

  clib_warning ("sw_if_index %u spd_id %u spd_index %u",
		sw_if_index, spd_id, spd_index);

  /* enable IPsec on TX */
  vnet_feature_enable_disable ("ip4-output", "ipsec4-output-feature",
			       sw_if_index, is_add, 0, 0);
  vnet_feature_enable_disable ("ip6-output", "ipsec6-output-feature",
			       sw_if_index, is_add, 0, 0);

  config.spd_index = spd_index;

  /* enable IPsec on RX */
  vnet_feature_enable_disable ("ip4-unicast", "ipsec4-input-feature",
			       sw_if_index, is_add, &config, sizeof (config));
  vnet_feature_enable_disable ("ip6-unicast", "ipsec6-input-feature",
			       sw_if_index, is_add, &config, sizeof (config));

  return 0;
}

/*
 * fd.io coding-style-patch-verification: ON
 *
 * Local Variables:
 * eval: (c-set-style "gnu")
 * End:
 */
class="nf">heap_next (heap_elt_t * e) { return e + e->next; } always_inline heap_elt_t * heap_prev (heap_elt_t * e) { return e + e->prev; } always_inline uword heap_elt_size (void *v, heap_elt_t * e) { heap_elt_t *n = heap_next (e); uword next_offset = n != e ? heap_offset (n) : vec_len (v); return next_offset - heap_offset (e); } /* Sizes are binned. Sizes 1 to 2^log2_small_bins have their own free lists. Larger sizes are grouped in powers of two. */ #define HEAP_LOG2_SMALL_BINS (5) #define HEAP_SMALL_BINS (1 << HEAP_LOG2_SMALL_BINS) #define HEAP_N_BINS (2 * HEAP_SMALL_BINS) /* Header for heaps. */ typedef struct { /* Vector of used and free elements. */ heap_elt_t *elts; /* For elt_bytes < sizeof (u32) we need some extra space per elt to store free list index. */ u32 *small_free_elt_free_index; /* Vector of free indices of elts array. */ u32 *free_elts; /* Indices of free elts indexed by size bin. */ u32 **free_lists; format_function_t *format_elt; /* Used for validation/debugging. */ uword *used_elt_bitmap; /* First and last element of doubly linked chain of elements. */ u32 head, tail; u32 used_count, max_len; /* Number of bytes in a help element. */ u32 elt_bytes; u32 flags; /* Static heaps are made from external memory given to us by user and are not re-sizable vectors. */ #define HEAP_IS_STATIC (1) } heap_header_t; /* Start of heap elements is always cache aligned. */ #define HEAP_DATA_ALIGN (CLIB_CACHE_LINE_BYTES) always_inline heap_header_t * heap_header (void *v) { return vec_header (v, sizeof (heap_header_t)); } always_inline uword heap_header_bytes () { return vec_header_bytes (sizeof (heap_header_t)); } always_inline void heap_dup_header (heap_header_t * old, heap_header_t * new) { uword i; new[0] = old[0]; new->elts = vec_dup (new->elts); new->free_elts = vec_dup (new->free_elts); new->free_lists = vec_dup (new->free_lists); for (i = 0; i < vec_len (new->free_lists); i++) new->free_lists[i] = vec_dup (new->free_lists[i]); new->used_elt_bitmap = clib_bitmap_dup (new->used_elt_bitmap); new->small_free_elt_free_index = vec_dup (new->small_free_elt_free_index); } /* Make a duplicate copy of a heap. */ #define heap_dup(v) _heap_dup(v, vec_len (v) * sizeof (v[0])) always_inline void * _heap_dup (void *v_old, uword v_bytes) { heap_header_t *h_old, *h_new; void *v_new; h_old = heap_header (v_old); if (!v_old) return v_old; v_new = 0; v_new = _vec_resize (v_new, _vec_len (v_old), v_bytes, sizeof (heap_header_t), HEAP_DATA_ALIGN); h_new = heap_header (v_new); heap_dup_header (h_old, h_new); clib_memcpy_fast (v_new, v_old, v_bytes); return v_new; } always_inline uword heap_elts (void *v) { heap_header_t *h = heap_header (v); return h->used_count; } uword heap_bytes (void *v); always_inline void * _heap_new (u32 len, u32 n_elt_bytes) { void *v = _vec_resize ((void *) 0, len, (uword) len * n_elt_bytes, sizeof (heap_header_t), HEAP_DATA_ALIGN); heap_header (v)->elt_bytes = n_elt_bytes; return v; } #define heap_new(v) (v) = _heap_new (0, sizeof ((v)[0])) always_inline void heap_set_format (void *v, format_function_t * format_elt) { ASSERT (v); heap_header (v)->format_elt = format_elt; } always_inline void heap_set_max_len (void *v, uword max_len) { ASSERT (v); heap_header (v)->max_len = max_len; } always_inline uword heap_get_max_len (void *v) { return v ? heap_header (v)->max_len : 0; } /* Create fixed size heap with given block of memory. */ always_inline void * heap_create_from_memory (void *memory, uword max_len, uword elt_bytes) { heap_header_t *h; void *v; if (max_len * elt_bytes < sizeof (h[0])) return 0; h = memory; clib_memset (h, 0, sizeof (h[0])); h->max_len = max_len; h->elt_bytes = elt_bytes; h->flags = HEAP_IS_STATIC; v = (void *) (memory + heap_header_bytes ()); _vec_len (v) = 0; return v; } /* Execute BODY for each allocated heap element. */ #define heap_foreach(var,len,heap,body) \ do { \ if (vec_len (heap) > 0) \ { \ heap_header_t * _h = heap_header (heap); \ heap_elt_t * _e = _h->elts + _h->head; \ heap_elt_t * _end = _h->elts + _h->tail; \ while (1) \ { \ if (! heap_is_free (_e)) \ { \ (var) = (heap) + heap_offset (_e); \ (len) = heap_elt_size ((heap), _e); \ do { body; } while (0); \ } \ if (_e == _end) \ break; \ _e = heap_next (_e); \ } \ } \ } while (0) #define heap_elt_at_index(v,index) vec_elt_at_index(v,index) always_inline heap_elt_t * heap_get_elt (void *v, uword handle) { heap_header_t *h = heap_header (v); heap_elt_t *e = vec_elt_at_index (h->elts, handle); ASSERT (!heap_is_free (e)); return e; } #define heap_elt_with_handle(v,handle) \ ({ \ heap_elt_t * _e = heap_get_elt ((v), (handle)); \ (v) + heap_offset (_e); \ }) always_inline uword heap_is_free_handle (void *v, uword heap_handle) { heap_header_t *h = heap_header (v); heap_elt_t *e = vec_elt_at_index (h->elts, heap_handle); return heap_is_free (e); } extern uword heap_len (void *v, word handle); /* Low level allocation call. */ extern void *_heap_alloc (void *v, uword size, uword alignment, uword elt_bytes, uword * offset, uword * handle); #define heap_alloc_aligned(v,size,align,handle) \ ({ \ uword _o, _h; \ uword _a = (align); \ uword _s = (size); \ (v) = _heap_alloc ((v), _s, _a, sizeof ((v)[0]), &_o, &_h); \ (handle) = _h; \ _o; \ }) #define heap_alloc(v,size,handle) heap_alloc_aligned((v),(size),0,(handle)) extern void heap_dealloc (void *v, uword handle); extern void heap_validate (void *v); /* Format heap internal data structures as string. */ extern u8 *format_heap (u8 * s, va_list * va); void *_heap_free (void *v); #define heap_free(v) (v)=_heap_free(v) #endif /* included_heap_h */ /* * fd.io coding-style-patch-verification: ON * * Local Variables: * eval: (c-set-style "gnu") * End: */