aboutsummaryrefslogtreecommitdiffstats
path: root/src/vppinfra/pfhash.h
blob: 2884fa81cf91ddf180693074a5070bb828a80e23 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
/*
  Copyright (c) 2013 Cisco and/or its affiliates.

  * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at:
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
*/

#ifndef included_clib_pfhash_h
#define included_clib_pfhash_h


#include <vppinfra/clib.h>
#include <vppinfra/hash.h>
#include <vppinfra/pool.h>

#if defined(CLIB_HAVE_VEC128) && ! defined (__ALTIVEC__)

typedef struct
{
  /* 3 x 16 = 48 key bytes */
  union
  {
    u32x4 k_u32x4[3];
    u64 k_u64[6];
  } kb;
  /* 3 x 4 = 12 value bytes */
  u32 values[3];
  u32 pad;
} pfhash_kv_16_t;

typedef struct
{
  /* 5 x 8 = 40 key bytes */
  union
  {
    u64 k_u64[5];
  } kb;

  /* 5 x 4 = 20 value bytes */
  u32 values[5];
  u32 pad;
} pfhash_kv_8_t;

typedef struct
{
  /* 4 x 8 = 32 key bytes */
  union
  {
    u64 k_u64[4];
  } kb;

  /* 4 x 8 = 32 value bytes */
  u64 values[4];
} pfhash_kv_8v8_t;

typedef struct
{
  /* 8 x 4 = 32 key bytes */
  union
  {
    u32x4 k_u32x4[2];
    u32 kb[8];
  } kb;

  /* 8 x 4 = 32 value bytes */
  u32 values[8];
} pfhash_kv_4_t;

typedef union
{
  pfhash_kv_16_t kv16;
  pfhash_kv_8_t kv8;
  pfhash_kv_8v8_t kv8v8;
  pfhash_kv_4_t kv4;
} pfhash_kv_t;

typedef struct
{
  /* Bucket vector */
  u32 *buckets;
#define PFHASH_BUCKET_OVERFLOW (u32)~0

  /* Pool of key/value pairs */
  pfhash_kv_t *kvp;

  /* overflow plain-o-hash */
  uword *overflow_hash;

  /* Pretty-print name */
  u8 *name;

  u32 key_size;
  u32 value_size;

  u32 overflow_count;
  u32 nitems;
  u32 nitems_in_overflow;
} pfhash_t;

void pfhash_init (pfhash_t * p, char *name, u32 key_size, u32 value_size,
		  u32 nbuckets);
void pfhash_free (pfhash_t * p);
u64 pfhash_get (pfhash_t * p, u32 bucket, void *key);
void pfhash_set (pfhash_t * p, u32 bucket, void *key, void *value);
void pfhash_unset (pfhash_t * p, u32 bucket, void *key);

format_function_t format_pfhash;

static inline void
pfhash_prefetch_bucket (pfhash_t * p, u32 bucket)
{
  CLIB_PREFETCH (&p->buckets[bucket], CLIB_CACHE_LINE_BYTES, LOAD);
}

static inline u32
pfhash_read_bucket_prefetch_kv (pfhash_t * p, u32 bucket)
{
  u32 bucket_contents = p->buckets[bucket];
  if (PREDICT_TRUE ((bucket_contents & PFHASH_BUCKET_OVERFLOW) == 0))
    CLIB_PREFETCH (&p->kvp[bucket_contents], CLIB_CACHE_LINE_BYTES, LOAD);
  return bucket_contents;
}

/*
 * pfhash_search_kv_16
 * See if the supplied 16-byte key matches one of three 16-byte (key,value) pairs.
 * Return the indicated value, or ~0 if no match
 *
 * Note: including the overflow test, the fast path is 35 instrs
 * on x86_64. Elves will steal your keyboard in the middle of the night if
 * you "improve" it without checking the generated code!
 */
static inline u32
pfhash_search_kv_16 (pfhash_t * p, u32 bucket_contents, u32x4 * key)
{
  u32x4 diff0, diff1, diff2;
  u32 is_equal0, is_equal1, is_equal2;
  u32 no_match;
  pfhash_kv_16_t *kv;
  u32 rv;

  if (PREDICT_FALSE (bucket_contents == PFHASH_BUCKET_OVERFLOW))
    {
      uword *hp;
      hp = hash_get_mem (p->overflow_hash, key);
      if (hp)
	return hp[0];
      return (u32) ~ 0;
    }

  kv = &p->kvp[bucket_contents].kv16;

  diff0 = u32x4_sub (kv->kb.k_u32x4[0], key[0]);
  diff1 = u32x4_sub (kv->kb.k_u32x4[1], key[0]);
  diff2 = u32x4_sub (kv->kb.k_u32x4[2], key[0]);

  no_match = is_equal0 = (i16) u32x4_zero_byte_mask (diff0);
  is_equal1 = (i16) u32x4_zero_byte_mask (diff1);
  no_match |= is_equal1;
  is_equal2 = (i16) u32x4_zero_byte_mask (diff2);
  no_match |= is_equal2;
  /* If any of the three items matched, no_match will be zero after this line */
  no_match = ~no_match;

  rv = (is_equal0 & kv->values[0])
    | (is_equal1 & kv->values[1]) | (is_equal2 & kv->values[2]) | no_match;

  return rv;
}

static inline u32
pfhash_search_kv_8 (pfhash_t * p, u32 bucket_contents, u64 * key)
{
  pfhash_kv_8_t *kv;
  u32 rv = (u32) ~ 0;

  if (PREDICT_FALSE (bucket_contents == PFHASH_BUCKET_OVERFLOW))
    {
      uword *hp;
      hp = hash_get_mem (p->overflow_hash, key);
      if (hp)
	return hp[0];
      return (u32) ~ 0;
    }

  kv = &p->kvp[bucket_contents].kv8;

  rv = (kv->kb.k_u64[0] == key[0]) ? kv->values[0] : rv;
  rv = (kv->kb.k_u64[1] == key[0]) ? kv->values[1] : rv;
  rv = (kv->kb.k_u64[2] == key[0]) ? kv->values[2] : rv;
  rv = (kv->kb.k_u64[3] == key[0]) ? kv->values[3] : rv;
  rv = (kv->kb.k_u64[4] == key[0]) ? kv->values[4] : rv;

  return rv;
}

static inline u64
pfhash_search_kv_8v8 (pfhash_t * p, u32 bucket_contents, u64 * key)
{
  pfhash_kv_8v8_t *kv;
  u64 rv = (u64) ~ 0;

  if (PREDICT_FALSE (bucket_contents == PFHASH_BUCKET_OVERFLOW))
    {
      uword *hp;
      hp = hash_get_mem (p->overflow_hash, key);
      if (hp)
	return hp[0];
      return (u64) ~ 0;
    }

  kv = &p->kvp[bucket_contents].kv8v8;

  rv = (kv->kb.k_u64[0] == key[0]) ? kv->values[0] : rv;
  rv = (kv->kb.k_u64[1] == key[0]) ? kv->values[1] : rv;
  rv = (kv->kb.k_u64[2] == key[0]) ? kv->values[2] : rv;
  rv = (kv->kb.k_u64[3] == key[0]) ? kv->values[3] : rv;

  return rv;
}

static inline u32
pfhash_search_kv_4 (pfhash_t * p, u32 bucket_contents, u32 * key)
{
  u32x4 vector_key;
  u32x4 is_equal[2];
  u32 zbm[2], winner_index;
  pfhash_kv_4_t *kv;

  if (PREDICT_FALSE (bucket_contents == PFHASH_BUCKET_OVERFLOW))
    {
      uword *hp;
      hp = hash_get_mem (p->overflow_hash, key);
      if (hp)
	return hp[0];
      return (u32) ~ 0;
    }

  kv = &p->kvp[bucket_contents].kv4;

  vector_key = u32x4_splat (key[0]);

  is_equal[0] = (kv->kb.k_u32x4[0] == vector_key);
  is_equal[1] = (kv->kb.k_u32x4[1] == vector_key);
  zbm[0] = ~u32x4_zero_byte_mask (is_equal[0]) & 0xFFFF;
  zbm[1] = ~u32x4_zero_byte_mask (is_equal[1]) & 0xFFFF;

  if (PREDICT_FALSE ((zbm[0] == 0) && (zbm[1] == 0)))
    return (u32) ~ 0;

  winner_index = min_log2 (zbm[0]) >> 2;
  winner_index = zbm[1] ? (4 + (min_log2 (zbm[1]) >> 2)) : winner_index;

  return kv->values[winner_index];
}

#endif /* CLIB_HAVE_VEC128 */

#endif /* included_clib_pfhash_h */

/*
 * fd.io coding-style-patch-verification: ON
 *
 * Local Variables:
 * eval: (c-set-style "gnu")
 * End:
 */
ndex = node->cached_next_index; while (n_left_from > 0) { u32 n_left_to_next; vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); /* TODO: Dual/quad loop */ while (n_left_from > 0 && n_left_to_next > 0) { u32 bi0; vlib_buffer_t *b0; ip6_header_t *ip0 = 0; ip6_sr_header_t *sr0; ip6_sr_localsid_t *ls0; u32 next0 = SRV6_AD_LOCALSID_NEXT_ERROR; bi0 = from[0]; to_next[0] = bi0; from += 1; to_next += 1; n_left_from -= 1; n_left_to_next -= 1; b0 = vlib_get_buffer (vm, bi0); ip0 = vlib_buffer_get_current (b0); sr0 = (ip6_sr_header_t *) (ip0 + 1); /* Lookup the SR End behavior based on IP DA (adj) */ ls0 = pool_elt_at_index (sm->localsids, vnet_buffer (b0)->ip.adj_index[VLIB_TX]); /* SRH processing */ end_ad_processing (b0, ip0, sr0, ls0, &next0); if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED)) { srv6_ad_localsid_trace_t *tr = vlib_add_trace (vm, node, b0, sizeof *tr); tr->localsid_index = ls0 - sm->localsids; } /* This increments the SRv6 per LocalSID counters. */ vlib_increment_combined_counter (((next0 == SRV6_AD_LOCALSID_NEXT_ERROR) ? &(sm->sr_ls_invalid_counters) : &(sm->sr_ls_valid_counters)), vm->thread_index, ls0 - sm->localsids, 1, vlib_buffer_length_in_chain (vm, b0)); vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next, n_left_to_next, bi0, next0); cnt_packets++; } vlib_put_next_frame (vm, node, next_index, n_left_to_next); } return frame->n_vectors; } /* *INDENT-OFF* */ VLIB_REGISTER_NODE (srv6_ad_localsid_node) = { .function = srv6_ad_localsid_fn, .name = "srv6-ad-localsid", .vector_size = sizeof (u32), .format_trace = format_srv6_ad_localsid_trace, .type = VLIB_NODE_TYPE_INTERNAL, .n_next_nodes = SRV6_AD_LOCALSID_N_NEXT, .next_nodes = { [SRV6_AD_LOCALSID_NEXT_REWRITE4] = "ip4-rewrite", [SRV6_AD_LOCALSID_NEXT_REWRITE6] = "ip6-rewrite", [SRV6_AD_LOCALSID_NEXT_INTERFACE] = "interface-output", [SRV6_AD_LOCALSID_NEXT_ERROR] = "error-drop", }, }; /* *INDENT-ON* */ /******************************* Rewriting node *******************************/ /** * @brief Graph node for applying a SR policy into an IPv6 packet. Encapsulation */ static uword srv6_ad2_rewrite_fn (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) { ip6_sr_main_t *srm = &sr_main; srv6_ad_main_t *sm = &srv6_ad_main; u32 n_left_from, next_index, *from, *to_next; u32 cnt_packets = 0; from = vlib_frame_vector_args (frame); n_left_from = frame->n_vectors; next_index = node->cached_next_index; while (n_left_from > 0) { u32 n_left_to_next; vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); /* TODO: Dual/quad loop */ while (n_left_from > 0 && n_left_to_next > 0) { u32 bi0; vlib_buffer_t *b0; ethernet_header_t *en0; ip6_header_t *ip0 = 0; ip6_sr_localsid_t *ls0; srv6_ad_localsid_t *ls0_mem; u32 next0 = SRV6_AD_REWRITE_NEXT_LOOKUP; bi0 = from[0]; to_next[0] = bi0; from += 1; to_next += 1; n_left_from -= 1; n_left_to_next -= 1; b0 = vlib_get_buffer (vm, bi0); en0 = vlib_buffer_get_current (b0); ls0 = pool_elt_at_index (srm->localsids, sm->sw_iface_localsid2[vnet_buffer (b0)->sw_if_index [VLIB_RX]]); ls0_mem = ls0->plugin_mem; if (PREDICT_FALSE (ls0_mem == NULL || ls0_mem->rewrite == NULL)) { next0 = SRV6_AD_REWRITE_NEXT_ERROR; b0->error = node->errors[SRV6_AD_REWRITE_COUNTER_NO_RW]; } else { ASSERT (VLIB_BUFFER_PRE_DATA_SIZE >= (ls0_mem->rw_len + b0->current_data)); clib_memcpy_fast (((u8 *) en0) - ls0_mem->rw_len, ls0_mem->rewrite, ls0_mem->rw_len); vlib_buffer_advance (b0, -(word) ls0_mem->rw_len); ip0 = vlib_buffer_get_current (b0); ip0->payload_length = clib_host_to_net_u16 (b0->current_length - sizeof (ip6_header_t)); } if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE) && PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED)) { srv6_ad_rewrite_trace_t *tr = vlib_add_trace (vm, node, b0, sizeof *tr); tr->error = 0; if (next0 == SRV6_AD_REWRITE_NEXT_ERROR) { tr->error = 1; } else { clib_memcpy_fast (tr->src.as_u8, ip0->src_address.as_u8, sizeof tr->src.as_u8); clib_memcpy_fast (tr->dst.as_u8, ip0->dst_address.as_u8, sizeof tr->dst.as_u8); } } /* Increment per-SID AD rewrite counters */ vlib_increment_combined_counter (((next0 == SRV6_AD_LOCALSID_NEXT_ERROR) ? &(sm->invalid_counters) : &(sm->valid_counters)), vm->thread_index, ls0_mem->index, 1, vlib_buffer_length_in_chain (vm, b0)); vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next, n_left_to_next, bi0, next0); cnt_packets++; } vlib_put_next_frame (vm, node, next_index, n_left_to_next); } /* Update counters */ vlib_node_increment_counter (vm, srv6_ad4_rewrite_node.index, SRV6_AD_REWRITE_COUNTER_PROCESSED, cnt_packets); return frame->n_vectors; } /* *INDENT-OFF* */ VLIB_REGISTER_NODE (srv6_ad2_rewrite_node) = { .function = srv6_ad2_rewrite_fn, .name = "srv6-ad2-rewrite", .vector_size = sizeof (u32), .format_trace = format_srv6_ad_rewrite_trace, .type = VLIB_NODE_TYPE_INTERNAL, .n_errors = SRV6_AD_REWRITE_N_COUNTERS, .error_strings = srv6_ad_rewrite_counter_strings, .n_next_nodes = SRV6_AD_REWRITE_N_NEXT, .next_nodes = { [SRV6_AD_REWRITE_NEXT_LOOKUP] = "ip6-lookup", [SRV6_AD_REWRITE_NEXT_ERROR] = "error-drop", }, }; /* *INDENT-ON* */ /** * @brief Graph node for applying a SR policy into an IPv6 packet. Encapsulation */ static uword srv6_ad4_rewrite_fn (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) { ip6_sr_main_t *srm = &sr_main; srv6_ad_main_t *sm = &srv6_ad_main; u32 n_left_from, next_index, *from, *to_next; u32 cnt_packets = 0; from = vlib_frame_vector_args (frame); n_left_from = frame->n_vectors; next_index = node->cached_next_index; while (n_left_from > 0) { u32 n_left_to_next; vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); /* TODO: Dual/quad loop */ while (n_left_from > 0 && n_left_to_next > 0) { u32 bi0; vlib_buffer_t *b0; ip4_header_t *ip0_encap = 0; ip6_header_t *ip0 = 0; ip6_sr_localsid_t *ls0; srv6_ad_localsid_t *ls0_mem; u32 next0 = SRV6_AD_REWRITE_NEXT_LOOKUP; u16 new_l0 = 0; bi0 = from[0]; to_next[0] = bi0; from += 1; to_next += 1; n_left_from -= 1; n_left_to_next -= 1; b0 = vlib_get_buffer (vm, bi0); ip0_encap = vlib_buffer_get_current (b0); ls0 = pool_elt_at_index (srm->localsids, sm->sw_iface_localsid4[vnet_buffer (b0)->sw_if_index [VLIB_RX]]); ls0_mem = ls0->plugin_mem; if (PREDICT_FALSE (ls0_mem == NULL || ls0_mem->rewrite == NULL)) { next0 = SRV6_AD_REWRITE_NEXT_ERROR; b0->error = node->errors[SRV6_AD_REWRITE_COUNTER_NO_RW]; } else { ASSERT (VLIB_BUFFER_PRE_DATA_SIZE >= (ls0_mem->rw_len + b0->current_data)); clib_memcpy_fast (((u8 *) ip0_encap) - ls0_mem->rw_len, ls0_mem->rewrite, ls0_mem->rw_len); vlib_buffer_advance (b0, -(word) ls0_mem->rw_len); ip0 = vlib_buffer_get_current (b0); /* Update inner IPv4 TTL and checksum */ u32 checksum0; ip0_encap->ttl -= 1; checksum0 = ip0_encap->checksum + clib_host_to_net_u16 (0x0100); checksum0 += checksum0 >= 0xffff; ip0_encap->checksum = checksum0; /* Update outer IPv6 length (in case it has changed) */ new_l0 = ls0_mem->rw_len - sizeof (ip6_header_t) + clib_net_to_host_u16 (ip0_encap->length); ip0->payload_length = clib_host_to_net_u16 (new_l0); } if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE) && PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED)) { srv6_ad_rewrite_trace_t *tr = vlib_add_trace (vm, node, b0, sizeof *tr); tr->error = 0; if (next0 == SRV6_AD_REWRITE_NEXT_ERROR) { tr->error = 1; } else { clib_memcpy_fast (tr->src.as_u8, ip0->src_address.as_u8, sizeof tr->src.as_u8); clib_memcpy_fast (tr->dst.as_u8, ip0->dst_address.as_u8, sizeof tr->dst.as_u8); } } /* Increment per-SID AD rewrite counters */ vlib_increment_combined_counter (((next0 == SRV6_AD_LOCALSID_NEXT_ERROR) ? &(sm->invalid_counters) : &(sm->valid_counters)), vm->thread_index, ls0_mem->index, 1, vlib_buffer_length_in_chain (vm, b0)); vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next, n_left_to_next, bi0, next0); cnt_packets++; } vlib_put_next_frame (vm, node, next_index, n_left_to_next); } /* Update counters */ vlib_node_increment_counter (vm, srv6_ad4_rewrite_node.index, SRV6_AD_REWRITE_COUNTER_PROCESSED, cnt_packets); return frame->n_vectors; } /* *INDENT-OFF* */ VLIB_REGISTER_NODE (srv6_ad4_rewrite_node) = { .function = srv6_ad4_rewrite_fn, .name = "srv6-ad4-rewrite", .vector_size = sizeof (u32), .format_trace = format_srv6_ad_rewrite_trace, .type = VLIB_NODE_TYPE_INTERNAL, .n_errors = SRV6_AD_REWRITE_N_COUNTERS, .error_strings = srv6_ad_rewrite_counter_strings, .n_next_nodes = SRV6_AD_REWRITE_N_NEXT, .next_nodes = { [SRV6_AD_REWRITE_NEXT_LOOKUP] = "ip6-lookup", [SRV6_AD_REWRITE_NEXT_ERROR] = "error-drop", }, }; /* *INDENT-ON* */ /** * @brief Graph node for applying a SR policy into an IPv6 packet. Encapsulation */ static uword srv6_ad6_rewrite_fn (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) { ip6_sr_main_t *srm = &sr_main; srv6_ad_main_t *sm = &srv6_ad_main; u32 n_left_from, next_index, *from, *to_next; u32 cnt_packets = 0; from = vlib_frame_vector_args (frame); n_left_from = frame->n_vectors; next_index = node->cached_next_index; while (n_left_from > 0) { u32 n_left_to_next; vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); /* TODO: Dual/quad loop */ while (n_left_from > 0 && n_left_to_next > 0) { u32 bi0; vlib_buffer_t *b0; ip6_header_t *ip0 = 0, *ip0_encap = 0; ip6_sr_localsid_t *ls0; srv6_ad_localsid_t *ls0_mem; u32 next0 = SRV6_AD_REWRITE_NEXT_LOOKUP; u16 new_l0 = 0; bi0 = from[0]; to_next[0] = bi0; from += 1; to_next += 1; n_left_from -= 1; n_left_to_next -= 1; b0 = vlib_get_buffer (vm, bi0); ip0_encap = vlib_buffer_get_current (b0); ls0 = pool_elt_at_index (srm->localsids, sm->sw_iface_localsid6[vnet_buffer (b0)->sw_if_index [VLIB_RX]]); ls0_mem = ls0->plugin_mem; if (PREDICT_FALSE (ls0_mem == NULL || ls0_mem->rewrite == NULL)) { next0 = SRV6_AD_REWRITE_NEXT_ERROR; b0->error = node->errors[SRV6_AD_REWRITE_COUNTER_NO_RW]; } else { ASSERT (VLIB_BUFFER_PRE_DATA_SIZE >= (ls0_mem->rw_len + b0->current_data)); clib_memcpy_fast (((u8 *) ip0_encap) - ls0_mem->rw_len, ls0_mem->rewrite, ls0_mem->rw_len); vlib_buffer_advance (b0, -(word) ls0_mem->rw_len); ip0 = vlib_buffer_get_current (b0); /* Update inner IPv6 hop limit */ ip0_encap->hop_limit -= 1; /* Update outer IPv6 length (in case it has changed) */ new_l0 = ls0_mem->rw_len + clib_net_to_host_u16 (ip0_encap->payload_length); ip0->payload_length = clib_host_to_net_u16 (new_l0); } if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE) && PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED)) { srv6_ad_rewrite_trace_t *tr = vlib_add_trace (vm, node, b0, sizeof *tr); tr->error = 0; if (next0 == SRV6_AD_REWRITE_NEXT_ERROR) { tr->error = 1; } else { clib_memcpy_fast (tr->src.as_u8, ip0->src_address.as_u8, sizeof tr->src.as_u8); clib_memcpy_fast (tr->dst.as_u8, ip0->dst_address.as_u8, sizeof tr->dst.as_u8); } } /* Increment per-SID AD rewrite counters */ vlib_increment_combined_counter (((next0 == SRV6_AD_LOCALSID_NEXT_ERROR) ? &(sm->invalid_counters) : &(sm->valid_counters)), vm->thread_index, ls0_mem->index, 1, vlib_buffer_length_in_chain (vm, b0)); vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next, n_left_to_next, bi0, next0); cnt_packets++; } vlib_put_next_frame (vm, node, next_index, n_left_to_next); } /* Update counters */ vlib_node_increment_counter (vm, srv6_ad6_rewrite_node.index, SRV6_AD_REWRITE_COUNTER_PROCESSED, cnt_packets); return frame->n_vectors; } /* *INDENT-OFF* */ VLIB_REGISTER_NODE (srv6_ad6_rewrite_node) = { .function = srv6_ad6_rewrite_fn, .name = "srv6-ad6-rewrite", .vector_size = sizeof (u32), .format_trace = format_srv6_ad_rewrite_trace, .type = VLIB_NODE_TYPE_INTERNAL, .n_errors = SRV6_AD_REWRITE_N_COUNTERS, .error_strings = srv6_ad_rewrite_counter_strings, .n_next_nodes = SRV6_AD_REWRITE_N_NEXT, .next_nodes = { [SRV6_AD_REWRITE_NEXT_LOOKUP] = "ip6-lookup", [SRV6_AD_REWRITE_NEXT_ERROR] = "error-drop", }, }; /* *INDENT-ON* */ /* * fd.io coding-style-patch-verification: ON * * Local Variables: * eval: (c-set-style "gnu") * End: */