summaryrefslogtreecommitdiffstats
path: root/src/vnet/handoff.h
blob: 04ba8bfbc024150e2f9d3c644a52c68d6907aa2f (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
/*
 * Copyright (c) 2016 Cisco and/or its affiliates.
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at:
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#ifndef included_vnet_handoff_h
#define included_vnet_handoff_h

#include <vlib/vlib.h>
#include <vnet/ethernet/ethernet.h>
#include <vnet/ip/ip4_packet.h>
#include <vnet/ip/ip6_packet.h>
#include <vnet/mpls/packet.h>

typedef enum
{
  HANDOFF_DISPATCH_NEXT_IP4_INPUT,
  HANDOFF_DISPATCH_NEXT_IP6_INPUT,
  HANDOFF_DISPATCH_NEXT_MPLS_INPUT,
  HANDOFF_DISPATCH_NEXT_ETHERNET_INPUT,
  HANDOFF_DISPATCH_NEXT_DROP,
  HANDOFF_DISPATCH_N_NEXT,
} handoff_dispatch_next_t;


static inline u64
ipv4_get_key (ip4_header_t * ip)
{
  u64 hash_key;

  hash_key = *((u64 *) (&ip->address_pair)) ^ ip->protocol;

  return hash_key;
}

static inline u64
ipv6_get_key (ip6_header_t * ip)
{
  u64 hash_key;

  hash_key = ip->src_address.as_u64[0] ^
    rotate_left (ip->src_address.as_u64[1], 13) ^
    rotate_left (ip->dst_address.as_u64[0], 26) ^
    rotate_left (ip->dst_address.as_u64[1], 39) ^ ip->protocol;

  return hash_key;
}

#define MPLS_BOTTOM_OF_STACK_BIT_MASK   0x00000100U
#define MPLS_LABEL_MASK                 0xFFFFF000U

static inline u64
mpls_get_key (mpls_unicast_header_t * m)
{
  u64 hash_key;
  u8 ip_ver;


  /* find the bottom of the MPLS label stack. */
  if (PREDICT_TRUE (m->label_exp_s_ttl &
		    clib_net_to_host_u32 (MPLS_BOTTOM_OF_STACK_BIT_MASK)))
    {
      goto bottom_lbl_found;
    }
  m++;

  if (PREDICT_TRUE (m->label_exp_s_ttl &
		    clib_net_to_host_u32 (MPLS_BOTTOM_OF_STACK_BIT_MASK)))
    {
      goto bottom_lbl_found;
    }
  m++;

  if (m->label_exp_s_ttl &
      clib_net_to_host_u32 (MPLS_BOTTOM_OF_STACK_BIT_MASK))
    {
      goto bottom_lbl_found;
    }
  m++;

  if (m->label_exp_s_ttl &
      clib_net_to_host_u32 (MPLS_BOTTOM_OF_STACK_BIT_MASK))
    {
      goto bottom_lbl_found;
    }
  m++;

  if (m->label_exp_s_ttl &
      clib_net_to_host_u32 (MPLS_BOTTOM_OF_STACK_BIT_MASK))
    {
      goto bottom_lbl_found;
    }

  /* the bottom label was not found - use the last label */
  hash_key = m->label_exp_s_ttl & clib_net_to_host_u32 (MPLS_LABEL_MASK);

  return hash_key;

bottom_lbl_found:
  m++;
  ip_ver = (*((u8 *) m) >> 4);

  /* find out if it is IPV4 or IPV6 header */
  if (PREDICT_TRUE (ip_ver == 4))
    {
      hash_key = ipv4_get_key ((ip4_header_t *) m);
    }
  else if (PREDICT_TRUE (ip_ver == 6))
    {
      hash_key = ipv6_get_key ((ip6_header_t *) m);
    }
  else
    {
      /* use the bottom label */
      hash_key =
	(m - 1)->label_exp_s_ttl & clib_net_to_host_u32 (MPLS_LABEL_MASK);
    }

  return hash_key;

}

static inline u64
eth_get_sym_key (ethernet_header_t * h0)
{
  u64 hash_key;

  if (PREDICT_TRUE (h0->type) == clib_host_to_net_u16 (ETHERNET_TYPE_IP4))
    {
      ip4_header_t *ip = (ip4_header_t *) (h0 + 1);
      hash_key =
	(u64) (ip->src_address.as_u32 ^
	       ip->dst_address.as_u32 ^ ip->protocol);
    }
  else if (h0->type == clib_host_to_net_u16 (ETHERNET_TYPE_IP6))
    {
      ip6_header_t *ip = (ip6_header_t *) (h0 + 1);
      hash_key = (u64) (ip->src_address.as_u64[0] ^
			ip->src_address.as_u64[1] ^
			ip->dst_address.as_u64[0] ^
			ip->dst_address.as_u64[1] ^ ip->protocol);
    }
  else if (h0->type == clib_host_to_net_u16 (ETHERNET_TYPE_MPLS))
    {
      hash_key = mpls_get_key ((mpls_unicast_header_t *) (h0 + 1));
    }
  else
    if (PREDICT_FALSE
	((h0->type == clib_host_to_net_u16 (ETHERNET_TYPE_VLAN))
	 || (h0->type == clib_host_to_net_u16 (ETHERNET_TYPE_DOT1AD))))
    {
      ethernet_vlan_header_t *outer = (ethernet_vlan_header_t *) (h0 + 1);

      outer = (outer->type == clib_host_to_net_u16 (ETHERNET_TYPE_VLAN)) ?
	outer + 1 : outer;
      if (PREDICT_TRUE (outer->type) ==
	  clib_host_to_net_u16 (ETHERNET_TYPE_IP4))
	{
	  ip4_header_t *ip = (ip4_header_t *) (outer + 1);
	  hash_key =
	    (u64) (ip->src_address.as_u32 ^
		   ip->dst_address.as_u32 ^ ip->protocol);
	}
      else if (outer->type == clib_host_to_net_u16 (ETHERNET_TYPE_IP6))
	{
	  ip6_header_t *ip = (ip6_header_t *) (outer + 1);
	  hash_key =
	    (u64) (ip->src_address.as_u64[0] ^ ip->src_address.as_u64[1] ^
		   ip->dst_address.as_u64[0] ^
		   ip->dst_address.as_u64[1] ^ ip->protocol);
	}
      else if (outer->type == clib_host_to_net_u16 (ETHERNET_TYPE_MPLS))
	{
	  hash_key = mpls_get_key ((mpls_unicast_header_t *) (outer + 1));
	}
      else
	{
	  hash_key = outer->type;
	}
    }
  else
    {
      hash_key = 0;
    }

  return hash_key;
}

static inline u64
eth_get_key (ethernet_header_t * h0)
{
  u64 hash_key;

  if (PREDICT_TRUE (h0->type) == clib_host_to_net_u16 (ETHERNET_TYPE_IP4))
    {
      hash_key = ipv4_get_key ((ip4_header_t *) (h0 + 1));
    }
  else if (h0->type == clib_host_to_net_u16 (ETHERNET_TYPE_IP6))
    {
      hash_key = ipv6_get_key ((ip6_header_t *) (h0 + 1));
    }
  else if (h0->type == clib_host_to_net_u16 (ETHERNET_TYPE_MPLS))
    {
      hash_key = mpls_get_key ((mpls_unicast_header_t *) (h0 + 1));
    }
  else if ((h0->type == clib_host_to_net_u16 (ETHERNET_TYPE_VLAN)) ||
	   (h0->type == clib_host_to_net_u16 (ETHERNET_TYPE_DOT1AD)))
    {
      ethernet_vlan_header_t *outer = (ethernet_vlan_header_t *) (h0 + 1);

      outer = (outer->type == clib_host_to_net_u16 (ETHERNET_TYPE_VLAN)) ?
	outer + 1 : outer;
      if (PREDICT_TRUE (outer->type) ==
	  clib_host_to_net_u16 (ETHERNET_TYPE_IP4))
	{
	  hash_key = ipv4_get_key ((ip4_header_t *) (outer + 1));
	}
      else if (outer->type == clib_host_to_net_u16 (ETHERNET_TYPE_IP6))
	{
	  hash_key = ipv6_get_key ((ip6_header_t *) (outer + 1));
	}
      else if (outer->type == clib_host_to_net_u16 (ETHERNET_TYPE_MPLS))
	{
	  hash_key = mpls_get_key ((mpls_unicast_header_t *) (outer + 1));
	}
      else
	{
	  hash_key = outer->type;
	}
    }
  else
    {
      hash_key = 0;
    }

  return hash_key;
}

#endif /* included_vnet_handoff_h */

/*
 * fd.io coding-style-patch-verification: ON
 *
 * Local Variables:
 * eval: (c-set-style "gnu")
 * End:
 */
_INDEX_INVALID == load_balance_map_db_find(lbm)); /* * insert into the DB based on the set of paths. */ hash_set (load_balance_map_db, load_balance_map_db_hash_key_from_index( load_balance_map_get_index(lbm)), load_balance_map_get_index(lbm)); /* * insert into each per-path list. */ vec_foreach(lbmp, lbm->lbm_paths) { p = hash_get(lb_maps_by_path_index, lbmp->lbmp_index); if (NULL == p) { list = fib_node_list_create(); hash_set(lb_maps_by_path_index, lbmp->lbmp_index, list); } else { list = p[0]; } lbmp->lbmp_sibling = fib_node_list_push_front(list, 0, FIB_NODE_TYPE_FIRST, load_balance_map_get_index(lbm)); } LOAD_BALANCE_MAP_DBG(lbm, "DB-inserted"); } static void load_balance_map_db_remove (load_balance_map_t *lbm) { load_balance_map_path_t *lbmp; uword *p; ASSERT(FIB_NODE_INDEX_INVALID != load_balance_map_db_find(lbm)); hash_unset(load_balance_map_db, load_balance_map_db_hash_key_from_index( load_balance_map_get_index(lbm))); /* * remove from each per-path list. */ vec_foreach(lbmp, lbm->lbm_paths) { p = hash_get(lb_maps_by_path_index, lbmp->lbmp_index); ASSERT(NULL != p); fib_node_list_remove(p[0], lbmp->lbmp_sibling); } LOAD_BALANCE_MAP_DBG(lbm, "DB-removed"); } /** * @brief from the paths that are usable, fill the Map. */ static void load_balance_map_fill (load_balance_map_t *lbm) { load_balance_map_path_t *lbmp; u32 n_buckets, bucket, ii, jj; u16 *tmp_buckets; tmp_buckets = NULL; n_buckets = vec_len(lbm->lbm_buckets); /* * run throught the set of paths once, and build a vector of the * indices that are usable. we do this is a scratch space, since we * need to refer to it multiple times as we build the real buckets. */ vec_validate(tmp_buckets, n_buckets-1); bucket = jj = 0; vec_foreach (lbmp, lbm->lbm_paths) { if (fib_path_is_resolved(lbmp->lbmp_index)) { for (ii = 0; ii < lbmp->lbmp_weight; ii++) { tmp_buckets[jj++] = bucket++; } } else { bucket += lbmp->lbmp_weight; } } _vec_len(tmp_buckets) = jj; /* * If the number of temporaries written is as many as we need, implying * all paths were up, then we can simply copy the scratch area over the * actual buckets' memory */ if (jj == n_buckets) { memcpy(lbm->lbm_buckets, tmp_buckets, sizeof(lbm->lbm_buckets[0]) * n_buckets); } else { /* * one or more paths are down. */ if (0 == vec_len(tmp_buckets)) { /* * if the scratch area is empty, then no paths are usable. * they will all drop. so use them all, lest we account drops * against only one. */ for (bucket = 0; bucket < n_buckets; bucket++) { lbm->lbm_buckets[bucket] = bucket; } } else { bucket = jj = 0; vec_foreach (lbmp, lbm->lbm_paths) { if (fib_path_is_resolved(lbmp->lbmp_index)) { for (ii = 0; ii < lbmp->lbmp_weight; ii++) { lbm->lbm_buckets[bucket] = bucket; bucket++; } } else { /* * path is unusable * cycle through the scratch space selecting a index. * this means we load balance, in the intended ratio, * over the paths that are still usable. */ for (ii = 0; ii < lbmp->lbmp_weight; ii++) { lbm->lbm_buckets[bucket] = tmp_buckets[jj]; jj = (jj + 1) % vec_len(tmp_buckets); bucket++; } } } } } vec_free(tmp_buckets); } static load_balance_map_t* load_balance_map_alloc (const load_balance_path_t *paths) { load_balance_map_t *lbm; u32 ii; pool_get_aligned(load_balance_map_pool, lbm, CLIB_CACHE_LINE_BYTES); memset(lbm, 0, sizeof(*lbm)); vec_validate(lbm->lbm_paths, vec_len(paths)-1); vec_foreach_index(ii, paths) { lbm->lbm_paths[ii].lbmp_index = paths[ii].path_index; lbm->lbm_paths[ii].lbmp_weight = paths[ii].path_weight; } return (lbm); } static load_balance_map_t * load_balance_map_init (load_balance_map_t *lbm, u32 n_buckets, u32 sum_of_weights) { lbm->lbm_sum_of_norm_weights = sum_of_weights; vec_validate(lbm->lbm_buckets, n_buckets-1); load_balance_map_db_insert(lbm); load_balance_map_fill(lbm); return (lbm); } static void load_balance_map_destroy (load_balance_map_t *lbm) { vec_free(lbm->lbm_paths); vec_free(lbm->lbm_buckets); pool_put(load_balance_map_pool, lbm); } index_t load_balance_map_add_or_lock (u32 n_buckets, u32 sum_of_weights, const load_balance_path_t *paths) { load_balance_map_t *tmp, *lbm; index_t lbmi; tmp = load_balance_map_alloc(paths); lbmi = load_balance_map_db_find(tmp); if (INDEX_INVALID == lbmi) { lbm = load_balance_map_init(tmp, n_buckets, sum_of_weights); } else { lbm = load_balance_map_get(lbmi); load_balance_map_destroy(tmp); } lbm->lbm_locks++; return (load_balance_map_get_index(lbm)); } void load_balance_map_lock (index_t lbmi) { load_balance_map_t *lbm; lbm = load_balance_map_get(lbmi); lbm->lbm_locks++; } void load_balance_map_unlock (index_t lbmi) { load_balance_map_t *lbm; if (INDEX_INVALID == lbmi) { return; } lbm = load_balance_map_get(lbmi); lbm->lbm_locks--; if (0 == lbm->lbm_locks) { load_balance_map_db_remove(lbm); load_balance_map_destroy(lbm); } } static int load_balance_map_path_state_change_walk (fib_node_ptr_t *fptr, void *ctx) { load_balance_map_t *lbm; lbm = load_balance_map_get(fptr->fnp_index); load_balance_map_fill(lbm); return (!0); } /** * @brief the state of a path has changed (it has no doubt gone down). * This is the trigger to perform a PIC edge cutover and update the maps * to exclude this path. */ void load_balance_map_path_state_change (fib_node_index_t path_index) { uword *p; /* * re-stripe the buckets for each affect MAP */ p = hash_get(lb_maps_by_path_index, path_index); if (NULL == p) return; fib_node_list_walk(p[0], load_balance_map_path_state_change_walk, NULL); } /** * @brief Make/add a new or lock an existing Load-balance map */ void load_balance_map_module_init (void) { load_balance_map_db = hash_create2 (/* elts */ 0, /* user */ 0, /* value_bytes */ sizeof (index_t), load_balance_map_db_hash_key_sum, load_balance_map_db_hash_key_equal, /* format pair/arg */ 0, 0); lb_maps_by_path_index = hash_create(0, sizeof(fib_node_list_t)); } void load_balance_map_show_mem (void) { fib_show_memory_usage("Load-Balance Map", pool_elts(load_balance_map_pool), pool_len(load_balance_map_pool), sizeof(load_balance_map_t)); } static clib_error_t * load_balance_map_show (vlib_main_t * vm, unformat_input_t * input, vlib_cli_command_t * cmd) { index_t lbmi = INDEX_INVALID; while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) { if (unformat (input, "%d", &lbmi)) ; else break; } if (INDEX_INVALID != lbmi) { vlib_cli_output (vm, "%U", format_load_balance_map, lbmi, 0); } else { load_balance_map_t *lbm; pool_foreach(lbm, load_balance_map_pool, ({ vlib_cli_output (vm, "%U", format_load_balance_map, load_balance_map_get_index(lbm), 0); })); } return 0; } VLIB_CLI_COMMAND (load_balance_map_show_command, static) = { .path = "show load-balance-map", .short_help = "show load-balance-map [<index>]", .function = load_balance_map_show, };