aboutsummaryrefslogtreecommitdiffstats
path: root/src/vnet/adj/adj.c
blob: 0966d97cc6d66982502c4f338ba06e36e6cdd0a2 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
/*
 * Copyright (c) 2016 Cisco and/or its affiliates.
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at:
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#include <vnet/adj/adj.h>
#include <vnet/adj/adj_internal.h>
#include <vnet/adj/adj_glean.h>
#include <vnet/adj/adj_midchain.h>
#include <vnet/adj/adj_mcast.h>
#include <vnet/adj/adj_delegate.h>
#include <vnet/fib/fib_node_list.h>

/* Adjacency packet/byte counters indexed by adjacency index. */
vlib_combined_counter_main_t adjacency_counters = {
    .name = "adjacency",
    .stat_segment_name = "/net/adjacency",
};

/*
 * the single adj pool
 */
ip_adjacency_t *adj_pool;

/**
 * @brief Global Config for enabling per-adjacency counters.
 * By default these are disabled.
 */
int adj_per_adj_counters;

const ip46_address_t ADJ_BCAST_ADDR = {
    .ip6 = {
        .as_u64[0] = 0xffffffffffffffff,
        .as_u64[1] = 0xffffffffffffffff,
    },
};

/**
 * Adj flag names
 */
static const char *adj_attr_names[] = ADJ_ATTR_NAMES;

always_inline void
adj_poison (ip_adjacency_t * adj)
{
    if (CLIB_DEBUG > 0)
    {
	clib_memset (adj, 0xfe, sizeof (adj[0]));
    }
}

ip_adjacency_t *
adj_alloc (fib_protocol_t proto)
{
    ip_adjacency_t *adj;

    pool_get_aligned(adj_pool, adj, CLIB_CACHE_LINE_BYTES);

    adj_poison(adj);

    /* Make sure certain fields are always initialized. */
    /* Validate adjacency counters. */
    vlib_validate_combined_counter(&adjacency_counters,
                                   adj_get_index(adj));
    vlib_zero_combined_counter(&adjacency_counters,
                               adj_get_index(adj));
    fib_node_init(&adj->ia_node,
                  FIB_NODE_TYPE_ADJ);

    adj->ia_nh_proto = proto;
    adj->ia_flags = 0;
    adj->rewrite_header.sw_if_index = ~0;
    adj->rewrite_header.flags = 0;
    adj->lookup_next_index = 0;
    adj->ia_delegates = NULL;

    /* lest it become a midchain in the future */
    clib_memset(&adj->sub_type.midchain.next_dpo, 0,
           sizeof(adj->sub_type.midchain.next_dpo));

    return (adj);
}

static int
adj_index_is_special (adj_index_t adj_index)
{
    if (ADJ_INDEX_INVALID == adj_index)
	return (!0);

    return (0);
}

u8*
format_adj_flags (u8 * s, va_list * args)
{
    adj_flags_t af;
    adj_attr_t at;

    af = va_arg (*args, int);

    if (ADJ_FLAG_NONE == af)
    {
        return (format(s, "None"));
    }
    FOR_EACH_ADJ_ATTR(at)
    {
        if (af & (1 << at))
        {
            s = format(s, "%s ", adj_attr_names[at]);
        }
    }
    return (s);
}

/**
 * @brief Pretty print helper function for formatting specific adjacencies.
 * @param s - input string to format
 * @param args - other args passed to format function such as:
 *                 - vnet_main_t
 *                 - ip_lookup_main_t
 *                 - adj_index
 */
u8 *
format_ip_adjacency (u8 * s, va_list * args)
{
    format_ip_adjacency_flags_t fiaf;
    ip_adjacency_t * adj;
    u32 adj_index;

    adj_index = va_arg (*args, u32);
    fiaf = va_arg (*args, format_ip_adjacency_flags_t);

    if (!adj_is_valid(adj_index))
      return format(s, "<invalid adjacency>");

    adj = adj_get(adj_index);

    switch (adj->lookup_next_index)
    {
    case IP_LOOKUP_NEXT_REWRITE:
    case IP_LOOKUP_NEXT_BCAST:
	s = format (s, "%U", format_adj_nbr, adj_index, 0);
	break;
    case IP_LOOKUP_NEXT_ARP:
	s = format (s, "%U", format_adj_nbr_incomplete, adj_index, 0);
	break;
    case IP_LOOKUP_NEXT_GLEAN:
	s = format (s, "%U", format_adj_glean, adj_index, 0);
	break;
    case IP_LOOKUP_NEXT_MIDCHAIN:
	s = format (s, "%U", format_adj_midchain, adj_index, 2);
	break;
    case IP_LOOKUP_NEXT_MCAST:
	s = format (s, "%U", format_adj_mcast, adj_index, 0);
	break;
    case IP_LOOKUP_NEXT_MCAST_MIDCHAIN:
	s = format (s, "%U", format_adj_mcast_midchain, adj_index, 0);
	break;
    case IP_LOOKUP_NEXT_DROP:
    case IP_LOOKUP_NEXT_PUNT:
    case IP_LOOKUP_NEXT_LOCAL:
    case IP_LOOKUP_NEXT_ICMP_ERROR:
    case IP_LOOKUP_N_NEXT:
        break;
    }

    if (fiaf & FORMAT_IP_ADJACENCY_DETAIL)
    {
        vlib_counter_t counts;

        vlib_get_combined_counter(&adjacency_counters, adj_index, &counts);
        s = format (s, "\n   flags:%U", format_adj_flags, adj->ia_flags);
        s = format (s, "\n   counts:[%Ld:%Ld]", counts.packets, counts.bytes);
	s = format (s, "\n   locks:%d", adj->ia_node.fn_locks);
	s = format(s, "\n delegates:\n  ");
        s = adj_delegate_format(s, adj);

	s = format(s, "\n children:");
        if (fib_node_list_get_size(adj->ia_node.fn_children))
        {
            s = format(s, "\n  ");
            s = fib_node_children_format(adj->ia_node.fn_children, s);
        }
    }

    return s;
}

int
adj_recursive_loop_detect (adj_index_t ai,
                           fib_node_index_t **entry_indicies)
{
    ip_adjacency_t * adj;

    adj = adj_get(ai);

    switch (adj->lookup_next_index)
    {
    case IP_LOOKUP_NEXT_REWRITE:
    case IP_LOOKUP_NEXT_ARP:
    case IP_LOOKUP_NEXT_GLEAN:
    case IP_LOOKUP_NEXT_MCAST:
    case IP_LOOKUP_NEXT_BCAST:
    case IP_LOOKUP_NEXT_DROP:
    case IP_LOOKUP_NEXT_PUNT:
    case IP_LOOKUP_NEXT_LOCAL:
    case IP_LOOKUP_NEXT_ICMP_ERROR:
    case IP_LOOKUP_N_NEXT:
        /*
         * these adjacency types are terminal graph nodes, so there's no
         * possibility of a loop down here.
         */
	break;
    case IP_LOOKUP_NEXT_MIDCHAIN:
    case IP_LOOKUP_NEXT_MCAST_MIDCHAIN:
        return (adj_ndr_midchain_recursive_loop_detect(ai, entry_indicies));
    }

    return (0);
}

/*
 * adj_last_lock_gone
 *
 * last lock/reference to the adj has gone, we no longer need it.
 */
static void
adj_last_lock_gone (ip_adjacency_t *adj)
{
    vlib_main_t * vm = vlib_get_main();

    ASSERT(0 == fib_node_list_get_size(adj->ia_node.fn_children));
    ADJ_DBG(adj, "last-lock-gone");

    adj_delegate_adj_deleted(adj);

    vlib_worker_thread_barrier_sync (vm);

    switch (adj->lookup_next_index)
    {
    case IP_LOOKUP_NEXT_MIDCHAIN:
        dpo_reset(&adj->sub_type.midchain.next_dpo);
        /* FALL THROUGH */
    case IP_LOOKUP_NEXT_ARP:
    case IP_LOOKUP_NEXT_REWRITE:
    case IP_LOOKUP_NEXT_BCAST:
	/*
	 * complete and incomplete nbr adjs
	 */
	adj_nbr_remove(adj_get_index(adj),
                       adj->ia_nh_proto,
		       adj->ia_link,
		       &adj->sub_type.nbr.next_hop,
		       adj->rewrite_header.sw_if_index);
	break;
    case IP_LOOKUP_NEXT_GLEAN:
	adj_glean_remove(adj->ia_nh_proto,
			 adj->rewrite_header.sw_if_index);
	break;
    case IP_LOOKUP_NEXT_MCAST:
    case IP_LOOKUP_NEXT_MCAST_MIDCHAIN:
	adj_mcast_remove(adj->ia_nh_proto,
			 adj->rewrite_header.sw_if_index);
	break;
    case IP_LOOKUP_NEXT_DROP:
    case IP_LOOKUP_NEXT_PUNT:
    case IP_LOOKUP_NEXT_LOCAL:
    case IP_LOOKUP_NEXT_ICMP_ERROR:
    case IP_LOOKUP_N_NEXT:
	/*
	 * type not stored in any DB from which we need to remove it
	 */
	break;
    }

    vlib_worker_thread_barrier_release(vm);

    fib_node_deinit(&adj->ia_node);
    ASSERT(0 == vec_len(adj->ia_delegates));
    vec_free(adj->ia_delegates);
    pool_put(adj_pool, adj);
}

u32
adj_dpo_get_urpf (const dpo_id_t *dpo)
{
    ip_adjacency_t *adj;

    adj = adj_get(dpo->dpoi_index);

    return (adj->rewrite_header.sw_if_index);
}

void
adj_lock (adj_index_t adj_index)
{
    ip_adjacency_t *adj;

    if (adj_index_is_special(adj_index))
    {
	return;
    }

    adj = adj_get(adj_index);
    ASSERT(adj);

    ADJ_DBG(adj, "lock");
    fib_node_lock(&adj->ia_node);
}

void
adj_unlock (adj_index_t adj_index)
{
    ip_adjacency_t *adj;

    if (adj_index_is_special(adj_index))
    {
	return;
    }

    adj = adj_get(adj_index);
    ASSERT(adj);

    ADJ_DBG(adj, "unlock");
    ASSERT(adj);

    fib_node_unlock(&adj->ia_node);
}

u32
adj_child_add (adj_index_t adj_index,
	       fib_node_type_t child_type,
	       fib_node_index_t child_index)
{
    ASSERT(ADJ_INDEX_INVALID != adj_index);
    if (adj_index_is_special(adj_index))
    {
	return (~0);
    }

    return (fib_node_child_add(FIB_NODE_TYPE_ADJ,
                               adj_index,
                               child_type,
                               child_index));
}

void
adj_child_remove (adj_index_t adj_index,
		  u32 sibling_index)
{
    if (adj_index_is_special(adj_index))
    {
	return;
    }

    fib_node_child_remove(FIB_NODE_TYPE_ADJ,
                          adj_index,
                          sibling_index);
}

/*
 * Context for the walk to update the cached feature flags.
 */
typedef struct adj_feature_update_t_
{
    u8 arc;
    u8 enable;
} adj_feature_update_ctx_t;

static adj_walk_rc_t
adj_feature_update_walk_cb (adj_index_t ai,
                            void *arg)
{
    adj_feature_update_ctx_t *ctx = arg;
    ip_adjacency_t *adj;

    adj = adj_get(ai);

    /*
     * this ugly mess matches the feature arc that is changing with affected
     * adjacencies
     */
    if (((ctx->arc == ip6_main.lookup_main.output_feature_arc_index) &&
         (VNET_LINK_IP6 == adj->ia_link)) ||
        ((ctx->arc == ip4_main.lookup_main.output_feature_arc_index) &&
         (VNET_LINK_IP4 == adj->ia_link)) ||
        ((ctx->arc == mpls_main.output_feature_arc_index) &&
         (VNET_LINK_MPLS == adj->ia_link)))
    {
        if (ctx->enable)
            adj->rewrite_header.flags |= VNET_REWRITE_HAS_FEATURES;
        else
            adj->rewrite_header.flags &= ~VNET_REWRITE_HAS_FEATURES;
    }
    return (ADJ_WALK_RC_CONTINUE);
}

void
adj_feature_update (u32 sw_if_index,
                    u8 arc_index,
                    u8 is_enable)
{
    /*
     * Walk all the adjacencies on the interface to update the cached
     * 'has-features' flag
     */
    adj_feature_update_ctx_t ctx = {
        .arc = arc_index,
        .enable = is_enable,
    };
    adj_walk (sw_if_index, adj_feature_update_walk_cb, &ctx);
}

static adj_walk_rc_t
adj_mtu_update_walk_cb (adj_index_t ai,
                        void *arg)
{
    ip_adjacency_t *adj;

    adj = adj_get(ai);

    vnet_rewrite_update_mtu (vnet_get_main(), adj->ia_link,
                             &adj->rewrite_header);

    return (ADJ_WALK_RC_CONTINUE);
}

static clib_error_t *
adj_mtu_update (vnet_main_t * vnm, u32 sw_if_index, u32 flags)
{
  adj_walk (sw_if_index, adj_mtu_update_walk_cb, NULL);

  return (NULL);
}

VNET_SW_INTERFACE_MTU_CHANGE_FUNCTION(adj_mtu_update);

/**
 * @brief Walk the Adjacencies on a given interface
 */
void
adj_walk (u32 sw_if_index,
          adj_walk_cb_t cb,
          void *ctx)
{
    /*
     * walk all the neighbor adjacencies
     */
    fib_protocol_t proto;

    FOR_EACH_FIB_IP_PROTOCOL(proto)
    {
        adj_nbr_walk(sw_if_index, proto, cb, ctx);
        adj_mcast_walk(sw_if_index, proto, cb, ctx);
    }
}

/**
 * @brief Return the link type of the adjacency
 */
vnet_link_t
adj_get_link_type (adj_index_t ai)
{
    const ip_adjacency_t *adj;

    adj = adj_get(ai);

    return (adj->ia_link);
}

/**
 * @brief Return the sw interface index of the adjacency.
 */
u32
adj_get_sw_if_index (adj_index_t ai)
{
    const ip_adjacency_t *adj;

    adj = adj_get(ai);

    return (adj->rewrite_header.sw_if_index);
}

/**
 * @brief Return true if the adjacency is 'UP', i.e. can be used for forwarding
 * 0 is down, !0 is up.
 */
int
adj_is_up (adj_index_t ai)
{
    return (adj_bfd_is_up(ai));
}

/**
 * @brief Return the rewrite string of the adjacency
 */
const u8*
adj_get_rewrite (adj_index_t ai)
{
    vnet_rewrite_header_t *rw;
    ip_adjacency_t *adj;

    adj = adj_get(ai);
    rw = &adj->rewrite_header;

    ASSERT (rw->data_bytes != 0xfefe);

    return (rw->data - rw->data_bytes);
}

static fib_node_t *
adj_get_node (fib_node_index_t index)
{
    ip_adjacency_t *adj;

    adj = adj_get(index);

    return (&adj->ia_node);
}

#define ADJ_FROM_NODE(_node)						\
    ((ip_adjacency_t*)((char*)_node - STRUCT_OFFSET_OF(ip_adjacency_t, ia_node)))

static void
adj_node_last_lock_gone (fib_node_t *node)
{
    adj_last_lock_gone(ADJ_FROM_NODE(node));
}

static fib_node_back_walk_rc_t
adj_back_walk_notify (fib_node_t *node,
		      fib_node_back_walk_ctx_t *ctx)
{
    ip_adjacency_t *adj;

    adj = ADJ_FROM_NODE(node);

    switch (adj->lookup_next_index)
    {
    case IP_LOOKUP_NEXT_MIDCHAIN:
        adj_midchain_delegate_restack(adj_get_index(adj));
        break;
    case IP_LOOKUP_NEXT_ARP:
    case IP_LOOKUP_NEXT_REWRITE:
    case IP_LOOKUP_NEXT_BCAST:
    case IP_LOOKUP_NEXT_GLEAN:
    case IP_LOOKUP_NEXT_MCAST:
    case IP_LOOKUP_NEXT_MCAST_MIDCHAIN:
    case IP_LOOKUP_NEXT_DROP:
    case IP_LOOKUP_NEXT_PUNT:
    case IP_LOOKUP_NEXT_LOCAL:
    case IP_LOOKUP_NEXT_ICMP_ERROR:
    case IP_LOOKUP_N_NEXT:
        /*
         * Que pasa. yo soj en el final!
         */
        ASSERT(0);
        break;
    }

    return (FIB_NODE_BACK_WALK_CONTINUE);
}

/*
 * Adjacency's graph node virtual function table
 */
static const fib_node_vft_t adj_vft = {
    .fnv_get = adj_get_node,
    .fnv_last_lock = adj_node_last_lock_gone,
    .fnv_back_walk = adj_back_walk_notify,
};

static clib_error_t *
adj_module_init (vlib_main_t * vm)
{
    fib_node_register_type(FIB_NODE_TYPE_ADJ, &adj_vft);

    adj_nbr_module_init();
    adj_glean_module_init();
    adj_midchain_module_init();
    adj_mcast_module_init();

    return (NULL);
}

VLIB_INIT_FUNCTION (adj_module_init);

static clib_error_t *
adj_show (vlib_main_t * vm,
	  unformat_input_t * input,
	  vlib_cli_command_t * cmd)
{
    adj_index_t ai = ADJ_INDEX_INVALID;
    u32 sw_if_index = ~0;
    int summary = 0;

    while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
    {
	if (unformat (input, "%d", &ai))
	    ;
	else if (unformat (input, "sum"))
	    summary = 1;
	else if (unformat (input, "summary"))
	    summary = 1;
	else if (unformat (input, "%U",
			   unformat_vnet_sw_interface, vnet_get_main(),
			   &sw_if_index))
	    ;
	else
	    break;
    }

    if (summary)
    {
        vlib_cli_output (vm, "Number of adjacencies: %d", pool_elts(adj_pool));
        vlib_cli_output (vm, "Per-adjacency counters: %s",
                         (adj_are_counters_enabled() ?
                          "enabled":
                          "disabled"));
    }
    else
    {
        if (ADJ_INDEX_INVALID != ai)
        {
            if (pool_is_free_index(adj_pool, ai))
            {
                vlib_cli_output (vm, "adjacency %d invalid", ai);
                return 0;
            }

            vlib_cli_output (vm, "[@%d] %U",
                             ai,
                             format_ip_adjacency,  ai,
                             FORMAT_IP_ADJACENCY_DETAIL);
        }
        else
        {
            /* *INDENT-OFF* */
            pool_foreach_index(ai, adj_pool,
            ({
                if (~0 != sw_if_index &&
                    sw_if_index != adj_get_sw_if_index(ai))
                {
                }
                else
                {
                    vlib_cli_output (vm, "[@%d] %U",
                                     ai,
                                     format_ip_adjacency, ai,
                                     FORMAT_IP_ADJACENCY_NONE);
                }
            }));
            /* *INDENT-ON* */
        }
    }
    return 0;
}

/*?
 * Show all adjacencies.
 * @cliexpar
 * @cliexstart{sh adj}
 * [@0]
 * [@1]  glean: loop0
 * [@2] ipv4 via 1.0.0.2 loop0: IP4: 00:00:22:aa:bb:cc -> 00:00:11:aa:bb:cc
 * [@3] mpls via 1.0.0.2 loop0: MPLS: 00:00:22:aa:bb:cc -> 00:00:11:aa:bb:cc
 * [@4] ipv4 via 1.0.0.3 loop0: IP4: 00:00:22:aa:bb:cc -> 00:00:11:aa:bb:cc
 * [@5] mpls via 1.0.0.3 loop0: MPLS: 00:00:22:aa:bb:cc -> 00:00:11:aa:bb:cc
 * @cliexend
 ?*/
VLIB_CLI_COMMAND (adj_show_command, static) = {
    .path = "show adj",
    .short_help = "show adj [<adj_index>] [interface] [summary]",
    .function = adj_show,
};

/**
 * @brief CLI invoked function to enable/disable per-adj counters
 */
static clib_error_t *
adj_cli_counters_set (vlib_main_t * vm,
                      unformat_input_t * input,
                      vlib_cli_command_t * cmd)
{
    clib_error_t *error = NULL;
    int enable = ~0;

    while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
    {
	if (unformat (input, "enable"))
	    enable = 1;
	else if (unformat (input, "disable"))
	    enable = 0;
	else
	    break;
    }

    if (enable != ~0)
    {
        /* user requested something sensible */
        adj_per_adj_counters = enable;
    }
    else
    {
        error = clib_error_return (0, "specify 'enable' or 'disable'");
    }

    return (error);
}

/*?
 * Enable/disable per-adjacency counters. This is optional because it comes
 * with a non-negligible performance cost.
 ?*/
VLIB_CLI_COMMAND (adj_cli_counters_set_command, static) = {
    .path = "adjacency counters",
    .short_help = "adjacency counters [enable|disable]",
    .function = adj_cli_counters_set,
};
s="n">bfd_session_t * bs) { bs->echo_transmit_interval_clocks = clib_max (bs->effective_desired_min_tx_clocks, bs->remote_min_echo_rx_clocks); BFD_DBG ("Recalculated echo transmit interval " BFD_CLK_FMT, BFD_CLK_PRN (bs->echo_transmit_interval_clocks)); } static void bfd_calc_next_tx (bfd_main_t * bm, bfd_session_t * bs, u64 now) { if (bs->local_detect_mult > 1) { /* common case - 75-100% of transmit interval */ bs->tx_timeout_clocks = bs->last_tx_clocks + (1 - .25 * (random_f64 (&bm->random_seed))) * bs->transmit_interval_clocks; if (bs->tx_timeout_clocks < now) { /* * the timeout is in the past, which means that either remote * demand mode was set or performance/clock issues ... */ BFD_DBG ("Missed %lu transmit events (now is %lu, calc " "tx_timeout is %lu)", (now - bs->tx_timeout_clocks) / bs->transmit_interval_clocks, now, bs->tx_timeout_clocks); bs->tx_timeout_clocks = now; } } else { /* special case - 75-90% of transmit interval */ bs->tx_timeout_clocks = bs->last_tx_clocks + (.9 - .15 * (random_f64 (&bm->random_seed))) * bs->transmit_interval_clocks; if (bs->tx_timeout_clocks < now) { /* * the timeout is in the past, which means that either remote * demand mode was set or performance/clock issues ... */ BFD_DBG ("Missed %lu transmit events (now is %lu, calc " "tx_timeout is %lu)", (now - bs->tx_timeout_clocks) / bs->transmit_interval_clocks, now, bs->tx_timeout_clocks); bs->tx_timeout_clocks = now; } } if (bs->tx_timeout_clocks) { BFD_DBG ("Next transmit in %lu clocks/%.02fs@%lu", bs->tx_timeout_clocks - now, (bs->tx_timeout_clocks - now) / bm->cpu_cps, bs->tx_timeout_clocks); } } static void bfd_calc_next_echo_tx (bfd_main_t * bm, bfd_session_t * bs, u64 now) { bs->echo_tx_timeout_clocks = bs->echo_last_tx_clocks + bs->echo_transmit_interval_clocks; if (bs->echo_tx_timeout_clocks < now) { /* huh, we've missed it already, transmit now */ BFD_DBG ("Missed %lu echo transmit events (now is %lu, calc tx_timeout " "is %lu)", (now - bs->echo_tx_timeout_clocks) / bs->echo_transmit_interval_clocks, now, bs->echo_tx_timeout_clocks); bs->echo_tx_timeout_clocks = now; } BFD_DBG ("Next echo transmit in %lu clocks/%.02fs@%lu", bs->echo_tx_timeout_clocks - now, (bs->echo_tx_timeout_clocks - now) / bm->cpu_cps, bs->echo_tx_timeout_clocks); } static void bfd_recalc_detection_time (bfd_main_t * bm, bfd_session_t * bs) { if (bs->local_state == BFD_STATE_init || bs->local_state == BFD_STATE_up) { bs->detection_time_clocks = bs->remote_detect_mult * clib_max (bs->effective_required_min_rx_clocks, bs->remote_desired_min_tx_clocks); BFD_DBG ("Recalculated detection time %lu clocks/%.2fs", bs->detection_time_clocks, bs->detection_time_clocks / bm->cpu_cps); } } static void bfd_set_timer (bfd_main_t * bm, bfd_session_t * bs, u64 now, int handling_wakeup) { u64 next = 0; u64 rx_timeout = 0; u64 tx_timeout = 0; if (BFD_STATE_up == bs->local_state) { rx_timeout = bs->last_rx_clocks + bs->detection_time_clocks; } if (BFD_STATE_up != bs->local_state || (!bs->remote_demand && bs->remote_min_rx_usec) || BFD_POLL_NOT_NEEDED != bs->poll_state) { tx_timeout = bs->tx_timeout_clocks; } if (tx_timeout && rx_timeout) { next = clib_min (tx_timeout, rx_timeout); } else if (tx_timeout) { next = tx_timeout; } else if (rx_timeout) { next = rx_timeout; } if (bs->echo && next > bs->echo_tx_timeout_clocks) { next = bs->echo_tx_timeout_clocks; } BFD_DBG ("bs_idx=%u, tx_timeout=%lu, echo_tx_timeout=%lu, rx_timeout=%lu, " "next=%s", bs->bs_idx, tx_timeout, bs->echo_tx_timeout_clocks, rx_timeout, next == tx_timeout ? "tx" : (next == bs->echo_tx_timeout_clocks ? "echo tx" : "rx")); /* sometimes the wheel expires an event a bit sooner than requested, account for that here */ if (next && (now + bm->wheel_inaccuracy > bs->wheel_time_clocks || next < bs->wheel_time_clocks || !bs->wheel_time_clocks)) { bs->wheel_time_clocks = next; BFD_DBG ("timing_wheel_insert(%p, %lu (%ld clocks/%.2fs in the " "future), %u);", &bm->wheel, bs->wheel_time_clocks, (i64) bs->wheel_time_clocks - clib_cpu_time_now (), (i64) (bs->wheel_time_clocks - clib_cpu_time_now ()) / bm->cpu_cps, bs->bs_idx); timing_wheel_insert (&bm->wheel, bs->wheel_time_clocks, bs->bs_idx); if (!handling_wakeup) { vlib_process_signal_event (bm->vlib_main, bm->bfd_process_node_index, BFD_EVENT_RESCHEDULE, bs->bs_idx); } } } static void bfd_set_effective_desired_min_tx (bfd_main_t * bm, bfd_session_t * bs, u64 now, u64 desired_min_tx_clocks) { bs->effective_desired_min_tx_clocks = desired_min_tx_clocks; BFD_DBG ("Set effective desired min tx to " BFD_CLK_FMT, BFD_CLK_PRN (bs->effective_desired_min_tx_clocks)); bfd_recalc_detection_time (bm, bs); bfd_recalc_tx_interval (bm, bs); bfd_recalc_echo_tx_interval (bm, bs); bfd_calc_next_tx (bm, bs, now); } static void bfd_set_effective_required_min_rx (bfd_main_t * bm, bfd_session_t * bs, u64 required_min_rx_clocks) { bs->effective_required_min_rx_clocks = required_min_rx_clocks; BFD_DBG ("Set effective required min rx to " BFD_CLK_FMT, BFD_CLK_PRN (bs->effective_required_min_rx_clocks)); bfd_recalc_detection_time (bm, bs); } static void bfd_set_remote_required_min_rx (bfd_main_t * bm, bfd_session_t * bs, u64 now, u32 remote_required_min_rx_usec) { if (bs->remote_min_rx_usec != remote_required_min_rx_usec) { bs->remote_min_rx_usec = remote_required_min_rx_usec; bs->remote_min_rx_clocks = bfd_usec_to_clocks (bm, remote_required_min_rx_usec); BFD_DBG ("Set remote min rx to " BFD_CLK_FMT, BFD_CLK_PRN (bs->remote_min_rx_clocks)); bfd_recalc_detection_time (bm, bs); bfd_recalc_tx_interval (bm, bs); } } static void bfd_set_remote_required_min_echo_rx (bfd_main_t * bm, bfd_session_t * bs, u64 now, u32 remote_required_min_echo_rx_usec) { if (bs->remote_min_echo_rx_usec != remote_required_min_echo_rx_usec) { bs->remote_min_echo_rx_usec = remote_required_min_echo_rx_usec; bs->remote_min_echo_rx_clocks = bfd_usec_to_clocks (bm, bs->remote_min_echo_rx_usec); BFD_DBG ("Set remote min echo rx to " BFD_CLK_FMT, BFD_CLK_PRN (bs->remote_min_echo_rx_clocks)); bfd_recalc_echo_tx_interval (bm, bs); } } static void bfd_notify_listeners (bfd_main_t * bm, bfd_listen_event_e event, const bfd_session_t * bs) { bfd_notify_fn_t *fn; vec_foreach (fn, bm->listeners) { (*fn) (event, bs); } } void bfd_session_start (bfd_main_t * bm, bfd_session_t * bs) { BFD_DBG ("\nStarting session: %U", format_bfd_session, bs); vlib_log_info (bm->log_class, "start BFD session: %U", format_bfd_session_brief, bs); bfd_set_effective_required_min_rx (bm, bs, bs->config_required_min_rx_clocks); bfd_recalc_tx_interval (bm, bs); vlib_process_signal_event (bm->vlib_main, bm->bfd_process_node_index, BFD_EVENT_NEW_SESSION, bs->bs_idx); bfd_notify_listeners (bm, BFD_LISTEN_EVENT_CREATE, bs); } void bfd_session_set_flags (bfd_session_t * bs, u8 admin_up_down) { bfd_main_t *bm = &bfd_main; u64 now = clib_cpu_time_now (); if (admin_up_down) { BFD_DBG ("Session set admin-up, bs-idx=%u", bs->bs_idx); vlib_log_info (bm->log_class, "set session admin-up: %U", format_bfd_session_brief, bs); bfd_set_state (bm, bs, BFD_STATE_down, 0); bfd_set_diag (bs, BFD_DIAG_CODE_no_diag); bfd_calc_next_tx (bm, bs, now); bfd_set_timer (bm, bs, now, 0); } else { BFD_DBG ("Session set admin-down, bs-idx=%u", bs->bs_idx); vlib_log_info (bm->log_class, "set session admin-down: %U", format_bfd_session_brief, bs); bfd_set_diag (bs, BFD_DIAG_CODE_admin_down); bfd_set_state (bm, bs, BFD_STATE_admin_down, 0); bfd_calc_next_tx (bm, bs, now); bfd_set_timer (bm, bs, now, 0); } } u8 * bfd_input_format_trace (u8 * s, va_list * args) { CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *); CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *); const bfd_input_trace_t *t = va_arg (*args, bfd_input_trace_t *); const bfd_pkt_t *pkt = (bfd_pkt_t *) t->data; if (t->len > STRUCT_SIZE_OF (bfd_pkt_t, head)) { s = format (s, "BFD v%u, diag=%u(%s), state=%u(%s),\n" " flags=(P:%u, F:%u, C:%u, A:%u, D:%u, M:%u), " "detect_mult=%u, length=%u\n", bfd_pkt_get_version (pkt), bfd_pkt_get_diag_code (pkt), bfd_diag_code_string (bfd_pkt_get_diag_code (pkt)), bfd_pkt_get_state (pkt), bfd_state_string (bfd_pkt_get_state (pkt)), bfd_pkt_get_poll (pkt), bfd_pkt_get_final (pkt), bfd_pkt_get_control_plane_independent (pkt), bfd_pkt_get_auth_present (pkt), bfd_pkt_get_demand (pkt), bfd_pkt_get_multipoint (pkt), pkt->head.detect_mult, pkt->head.length); if (t->len >= sizeof (bfd_pkt_t) && pkt->head.length >= sizeof (bfd_pkt_t)) { s = format (s, " my discriminator: %u\n", clib_net_to_host_u32 (pkt->my_disc)); s = format (s, " your discriminator: %u\n", clib_net_to_host_u32 (pkt->your_disc)); s = format (s, " desired min tx interval: %u\n", clib_net_to_host_u32 (pkt->des_min_tx)); s = format (s, " required min rx interval: %u\n", clib_net_to_host_u32 (pkt->req_min_rx)); s = format (s, " required min echo rx interval: %u", clib_net_to_host_u32 (pkt->req_min_echo_rx)); } if (t->len >= sizeof (bfd_pkt_with_common_auth_t) && pkt->head.length >= sizeof (bfd_pkt_with_common_auth_t) && bfd_pkt_get_auth_present (pkt)) { const bfd_pkt_with_common_auth_t *with_auth = (void *) pkt; const bfd_auth_common_t *common = &with_auth->common_auth; s = format (s, "\n auth len: %u\n", common->len); s = format (s, " auth type: %u:%s\n", common->type, bfd_auth_type_str (common->type)); if (t->len >= sizeof (bfd_pkt_with_sha1_auth_t) && pkt->head.length >= sizeof (bfd_pkt_with_sha1_auth_t) && (BFD_AUTH_TYPE_keyed_sha1 == common->type || BFD_AUTH_TYPE_meticulous_keyed_sha1 == common->type)) { const bfd_pkt_with_sha1_auth_t *with_sha1 = (void *) pkt; const bfd_auth_sha1_t *sha1 = &with_sha1->sha1_auth; s = format (s, " seq num: %u\n", clib_net_to_host_u32 (sha1->seq_num)); s = format (s, " key id: %u\n", sha1->key_id); s = format (s, " hash: %U", format_hex_bytes, sha1->hash, sizeof (sha1->hash)); } } else { s = format (s, "\n"); } } return s; } static void bfd_on_state_change (bfd_main_t * bm, bfd_session_t * bs, u64 now, int handling_wakeup) { BFD_DBG ("\nState changed: %U", format_bfd_session, bs); bfd_event (bm, bs); switch (bs->local_state) { case BFD_STATE_admin_down: bs->echo = 0; bfd_set_effective_desired_min_tx (bm, bs, now, clib_max (bs->config_desired_min_tx_clocks, bm->default_desired_min_tx_clocks)); bfd_set_effective_required_min_rx (bm, bs, bs->config_required_min_rx_clocks); bfd_set_timer (bm, bs, now, handling_wakeup); break; case BFD_STATE_down: bs->echo = 0; bfd_set_effective_desired_min_tx (bm, bs, now, clib_max (bs->config_desired_min_tx_clocks, bm->default_desired_min_tx_clocks)); bfd_set_effective_required_min_rx (bm, bs, bs->config_required_min_rx_clocks); bfd_set_timer (bm, bs, now, handling_wakeup); break; case BFD_STATE_init: bs->echo = 0; bfd_set_effective_desired_min_tx (bm, bs, now, bs->config_desired_min_tx_clocks); bfd_set_timer (bm, bs, now, handling_wakeup); break; case BFD_STATE_up: bfd_set_effective_desired_min_tx (bm, bs, now, bs->config_desired_min_tx_clocks); if (BFD_POLL_NOT_NEEDED == bs->poll_state) { bfd_set_effective_required_min_rx (bm, bs, bs->config_required_min_rx_clocks); } bfd_set_timer (bm, bs, now, handling_wakeup); break; } bfd_notify_listeners (bm, BFD_LISTEN_EVENT_UPDATE, bs); } static void bfd_on_config_change (vlib_main_t * vm, vlib_node_runtime_t * rt, bfd_main_t * bm, bfd_session_t * bs, u64 now) { /* * if remote demand mode is set and we need to do a poll, set the next * timeout so that the session wakes up immediately */ if (bs->remote_demand && BFD_POLL_NEEDED == bs->poll_state && bs->poll_state_start_or_timeout_clocks < now) { bs->tx_timeout_clocks = now; } bfd_recalc_detection_time (bm, bs); bfd_set_timer (bm, bs, now, 0); } static void bfd_add_transport_layer (vlib_main_t * vm, u32 bi, bfd_session_t * bs) { switch (bs->transport) { case BFD_TRANSPORT_UDP4: BFD_DBG ("Transport bfd via udp4, bs_idx=%u", bs->bs_idx); bfd_add_udp4_transport (vm, bi, bs, 0 /* is_echo */ ); break; case BFD_TRANSPORT_UDP6: BFD_DBG ("Transport bfd via udp6, bs_idx=%u", bs->bs_idx); bfd_add_udp6_transport (vm, bi, bs, 0 /* is_echo */ ); break; } } static int bfd_transport_control_frame (vlib_main_t * vm, u32 bi, bfd_session_t * bs) { switch (bs->transport) { case BFD_TRANSPORT_UDP4: BFD_DBG ("Transport bfd via udp4, bs_idx=%u", bs->bs_idx); return bfd_transport_udp4 (vm, bi, bs); break; case BFD_TRANSPORT_UDP6: BFD_DBG ("Transport bfd via udp6, bs_idx=%u", bs->bs_idx); return bfd_transport_udp6 (vm, bi, bs); break; } return 0; } static int bfd_echo_add_transport_layer (vlib_main_t * vm, u32 bi, bfd_session_t * bs) { switch (bs->transport) { case BFD_TRANSPORT_UDP4: BFD_DBG ("Transport bfd echo via udp4, bs_idx=%u", bs->bs_idx); return bfd_add_udp4_transport (vm, bi, bs, 1 /* is_echo */ ); break; case BFD_TRANSPORT_UDP6: BFD_DBG ("Transport bfd echo via udp6, bs_idx=%u", bs->bs_idx); return bfd_add_udp6_transport (vm, bi, bs, 1 /* is_echo */ ); break; } return 0; } static int bfd_transport_echo (vlib_main_t * vm, u32 bi, bfd_session_t * bs) { switch (bs->transport) { case BFD_TRANSPORT_UDP4: BFD_DBG ("Transport bfd echo via udp4, bs_idx=%u", bs->bs_idx); return bfd_transport_udp4 (vm, bi, bs); break; case BFD_TRANSPORT_UDP6: BFD_DBG ("Transport bfd echo via udp6, bs_idx=%u", bs->bs_idx); return bfd_transport_udp6 (vm, bi, bs); break; } return 0; } #if WITH_LIBSSL > 0 static void bfd_add_sha1_auth_section (vlib_buffer_t * b, bfd_session_t * bs) { bfd_pkt_with_sha1_auth_t *pkt = vlib_buffer_get_current (b); bfd_auth_sha1_t *auth = &pkt->sha1_auth; b->current_length += sizeof (*auth); pkt->pkt.head.length += sizeof (*auth); bfd_pkt_set_auth_present (&pkt->pkt); memset (auth, 0, sizeof (*auth)); auth->type_len.type = bs->auth.curr_key->auth_type; /* * only meticulous authentication types require incrementing seq number * for every message, but doing so doesn't violate the RFC */ ++bs->auth.local_seq_number; auth->type_len.len = sizeof (bfd_auth_sha1_t); auth->key_id = bs->auth.curr_bfd_key_id; auth->seq_num = clib_host_to_net_u32 (bs->auth.local_seq_number); /* * first copy the password into the packet, then calculate the hash * and finally replace the password with the calculated hash */ clib_memcpy (auth->hash, bs->auth.curr_key->key, sizeof (bs->auth.curr_key->key)); unsigned char hash[sizeof (auth->hash)]; SHA1 ((unsigned char *) pkt, sizeof (*pkt), hash); BFD_DBG ("hashing: %U", format_hex_bytes, pkt, sizeof (*pkt)); clib_memcpy (auth->hash, hash, sizeof (hash)); } #endif static void bfd_add_auth_section (vlib_buffer_t * b, bfd_session_t * bs) { bfd_main_t *bm = &bfd_main; if (bs->auth.curr_key) { const bfd_auth_type_e auth_type = bs->auth.curr_key->auth_type; switch (auth_type) { case BFD_AUTH_TYPE_reserved: /* fallthrough */ case BFD_AUTH_TYPE_simple_password: /* fallthrough */ case BFD_AUTH_TYPE_keyed_md5: /* fallthrough */ case BFD_AUTH_TYPE_meticulous_keyed_md5: vlib_log_crit (bm->log_class, "internal error, unexpected BFD auth type '%d'", auth_type); break; #if WITH_LIBSSL > 0 case BFD_AUTH_TYPE_keyed_sha1: /* fallthrough */ case BFD_AUTH_TYPE_meticulous_keyed_sha1: bfd_add_sha1_auth_section (b, bs); break; #else case BFD_AUTH_TYPE_keyed_sha1: /* fallthrough */ case BFD_AUTH_TYPE_meticulous_keyed_sha1: vlib_log_crit (bm->log_class, "internal error, unexpected BFD auth type '%d'", auth_type); break; #endif } } } static int bfd_is_echo_possible (bfd_session_t * bs) { if (BFD_STATE_up == bs->local_state && BFD_STATE_up == bs->remote_state && bs->remote_min_echo_rx_usec > 0) { switch (bs->transport) { case BFD_TRANSPORT_UDP4: return bfd_udp_is_echo_available (BFD_TRANSPORT_UDP4); case BFD_TRANSPORT_UDP6: return bfd_udp_is_echo_available (BFD_TRANSPORT_UDP6); } } return 0; } static void bfd_init_control_frame (bfd_main_t * bm, bfd_session_t * bs, vlib_buffer_t * b) { bfd_pkt_t *pkt = vlib_buffer_get_current (b); u32 bfd_length = 0; bfd_length = sizeof (bfd_pkt_t); memset (pkt, 0, sizeof (*pkt)); bfd_pkt_set_version (pkt, 1); bfd_pkt_set_diag_code (pkt, bs->local_diag); bfd_pkt_set_state (pkt, bs->local_state); pkt->head.detect_mult = bs->local_detect_mult; pkt->head.length = bfd_length; pkt->my_disc = bs->local_discr; pkt->your_disc = bs->remote_discr; pkt->des_min_tx = clib_host_to_net_u32 (bs->config_desired_min_tx_usec); if (bs->echo) { pkt->req_min_rx = clib_host_to_net_u32 (bfd_clocks_to_usec (bm, bs->effective_required_min_rx_clocks)); } else { pkt->req_min_rx = clib_host_to_net_u32 (bs->config_required_min_rx_usec); } pkt->req_min_echo_rx = clib_host_to_net_u32 (1); b->current_length = bfd_length; } static void bfd_send_echo (vlib_main_t * vm, vlib_node_runtime_t * rt, bfd_main_t * bm, bfd_session_t * bs, u64 now) { if (!bfd_is_echo_possible (bs)) { BFD_DBG ("\nSwitching off echo function: %U", format_bfd_session, bs); bs->echo = 0; return; } /* sometimes the wheel expires an event a bit sooner than requested, account for that here */ if (now + bm->wheel_inaccuracy >= bs->echo_tx_timeout_clocks) { BFD_DBG ("\nSending echo packet: %U", format_bfd_session, bs); u32 bi; if (vlib_buffer_alloc (vm, &bi, 1) != 1) { vlib_log_crit (bm->log_class, "buffer allocation failure"); return; } vlib_buffer_t *b = vlib_get_buffer (vm, bi); ASSERT (b->current_data == 0); memset (vnet_buffer (b), 0, sizeof (*vnet_buffer (b))); VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b); bfd_echo_pkt_t *pkt = vlib_buffer_get_current (b); memset (pkt, 0, sizeof (*pkt)); pkt->discriminator = bs->local_discr; pkt->expire_time_clocks = now + bs->echo_transmit_interval_clocks * bs->local_detect_mult; pkt->checksum = bfd_calc_echo_checksum (bs->local_discr, pkt->expire_time_clocks, bs->echo_secret); b->current_length = sizeof (*pkt); if (!bfd_echo_add_transport_layer (vm, bi, bs)) { BFD_ERR ("cannot send echo packet out, turning echo off"); bs->echo = 0; vlib_buffer_free_one (vm, bi); return; } if (!bfd_transport_echo (vm, bi, bs)) { BFD_ERR ("cannot send echo packet out, turning echo off"); bs->echo = 0; vlib_buffer_free_one (vm, bi); return; } bs->echo_last_tx_clocks = now; bfd_calc_next_echo_tx (bm, bs, now); } else { BFD_DBG ("No need to send echo packet now, now is %lu, tx_timeout is %lu", now, bs->echo_tx_timeout_clocks); } } static void bfd_send_periodic (vlib_main_t * vm, vlib_node_runtime_t * rt, bfd_main_t * bm, bfd_session_t * bs, u64 now) { if (!bs->remote_min_rx_usec && BFD_POLL_NOT_NEEDED == bs->poll_state) { BFD_DBG ("Remote min rx interval is zero, not sending periodic control " "frame"); return; } if (BFD_POLL_NOT_NEEDED == bs->poll_state && bs->remote_demand && BFD_STATE_up == bs->local_state && BFD_STATE_up == bs->remote_state) { /* * A system MUST NOT periodically transmit BFD Control packets if Demand * mode is active on the remote system (bfd.RemoteDemandMode is 1, * bfd.SessionState is Up, and bfd.RemoteSessionState is Up) and a Poll * Sequence is not being transmitted. */ BFD_DBG ("Remote demand is set, not sending periodic control frame"); return; } /* * sometimes the wheel expires an event a bit sooner than requested, account * for that here */ if (now + bm->wheel_inaccuracy >= bs->tx_timeout_clocks) { BFD_DBG ("\nSending periodic control frame: %U", format_bfd_session, bs); u32 bi; if (vlib_buffer_alloc (vm, &bi, 1) != 1) { vlib_log_crit (bm->log_class, "buffer allocation failure"); return; } vlib_buffer_t *b = vlib_get_buffer (vm, bi); ASSERT (b->current_data == 0); memset (vnet_buffer (b), 0, sizeof (*vnet_buffer (b))); VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b); bfd_init_control_frame (bm, bs, b); switch (bs->poll_state) { case BFD_POLL_NEEDED: if (now < bs->poll_state_start_or_timeout_clocks) { BFD_DBG ("Cannot start a poll sequence yet, need to wait " "for " BFD_CLK_FMT, BFD_CLK_PRN (bs->poll_state_start_or_timeout_clocks - now)); break; } bs->poll_state_start_or_timeout_clocks = now; bfd_set_poll_state (bs, BFD_POLL_IN_PROGRESS); /* fallthrough */ case BFD_POLL_IN_PROGRESS: case BFD_POLL_IN_PROGRESS_AND_QUEUED: bfd_pkt_set_poll (vlib_buffer_get_current (b)); BFD_DBG ("Setting poll bit in packet, bs_idx=%u", bs->bs_idx); break; case BFD_POLL_NOT_NEEDED: /* fallthrough */ break; } bfd_add_auth_section (b, bs); bfd_add_transport_layer (vm, bi, bs); if (!bfd_transport_control_frame (vm, bi, bs)) { vlib_buffer_free_one (vm, bi); } bs->last_tx_clocks = now; bfd_calc_next_tx (bm, bs, now); } else { BFD_DBG ("No need to send control frame now, now is %lu, tx_timeout is %lu", now, bs->tx_timeout_clocks); } } void bfd_init_final_control_frame (vlib_main_t * vm, vlib_buffer_t * b, bfd_main_t * bm, bfd_session_t * bs, int is_local) { BFD_DBG ("Send final control frame for bs_idx=%lu", bs->bs_idx); bfd_init_control_frame (bm, bs, b); bfd_pkt_set_final (vlib_buffer_get_current (b)); bfd_add_auth_section (b, bs); u32 bi = vlib_get_buffer_index (vm, b); bfd_add_transport_layer (vm, bi, bs); bs->last_tx_clocks = clib_cpu_time_now (); /* * RFC allows to include changes in final frame, so if there were any * pending, we already did that, thus we can clear any pending poll needs */ bfd_set_poll_state (bs, BFD_POLL_NOT_NEEDED); } static void bfd_check_rx_timeout (bfd_main_t * bm, bfd_session_t * bs, u64 now, int handling_wakeup) { /* * sometimes the wheel expires an event a bit sooner than requested, account * for that here */ if (bs->last_rx_clocks + bs->detection_time_clocks <= now + bm->wheel_inaccuracy) { BFD_DBG ("Rx timeout, session goes down"); bfd_set_diag (bs, BFD_DIAG_CODE_det_time_exp); bfd_set_state (bm, bs, BFD_STATE_down, handling_wakeup); /* * If the remote system does not receive any * BFD Control packets for a Detection Time, it SHOULD reset * bfd.RemoteMinRxInterval to its initial value of 1 (per section 6.8.1, * since it is no longer required to maintain previous session state) * and then can transmit at its own rate. */ bfd_set_remote_required_min_rx (bm, bs, now, 1); } else if (bs->echo && bs->echo_last_rx_clocks + bs->echo_transmit_interval_clocks * bs->local_detect_mult <= now + bm->wheel_inaccuracy) { BFD_DBG ("Echo rx timeout, session goes down"); bfd_set_diag (bs, BFD_DIAG_CODE_echo_failed); bfd_set_state (bm, bs, BFD_STATE_down, handling_wakeup); } } void bfd_on_timeout (vlib_main_t * vm, vlib_node_runtime_t * rt, bfd_main_t * bm, bfd_session_t * bs, u64 now) { BFD_DBG ("Timeout for bs_idx=%lu", bs->bs_idx); switch (bs->local_state) { case BFD_STATE_admin_down: bfd_send_periodic (vm, rt, bm, bs, now); break; case BFD_STATE_down: bfd_send_periodic (vm, rt, bm, bs, now); break; case BFD_STATE_init: bfd_check_rx_timeout (bm, bs, now, 1); bfd_send_periodic (vm, rt, bm, bs, now); break; case BFD_STATE_up: bfd_check_rx_timeout (bm, bs, now, 1); if (BFD_POLL_NOT_NEEDED == bs->poll_state && !bs->echo && bfd_is_echo_possible (bs)) { /* switch on echo function as main detection method now */ BFD_DBG ("Switching on echo function, bs_idx=%u", bs->bs_idx); bs->echo = 1; bs->echo_last_rx_clocks = now; bs->echo_tx_timeout_clocks = now; bfd_set_effective_required_min_rx (bm, bs, clib_max (bm->min_required_min_rx_while_echo_clocks, bs->config_required_min_rx_clocks)); bfd_set_poll_state (bs, BFD_POLL_NEEDED); } bfd_send_periodic (vm, rt, bm, bs, now); if (bs->echo) { bfd_send_echo (vm, rt, bm, bs, now); } break; } } /* * bfd process node function */ static uword bfd_process (vlib_main_t * vm, vlib_node_runtime_t * rt, vlib_frame_t * f) { bfd_main_t *bm = &bfd_main; u32 *expired = 0; uword event_type, *event_data = 0; /* So we can send events to the bfd process */ bm->bfd_process_node_index = bfd_process_node.index; while (1) { u64 now = clib_cpu_time_now (); u64 next_expire = timing_wheel_next_expiring_elt_time (&bm->wheel); BFD_DBG ("timing_wheel_next_expiring_elt_time(%p) returns %lu", &bm->wheel, next_expire); if ((i64) next_expire < 0) { BFD_DBG ("wait for event without timeout"); (void) vlib_process_wait_for_event (vm); event_type = vlib_process_get_events (vm, &event_data); } else { f64 timeout = ((i64) next_expire - (i64) now) / bm->cpu_cps; BFD_DBG ("wait for event with timeout %.02f", timeout); if (timeout < 0) { BFD_DBG ("negative timeout, already expired, skipping wait"); event_type = ~0; } else { (void) vlib_process_wait_for_event_or_clock (vm, timeout); event_type = vlib_process_get_events (vm, &event_data); } } now = clib_cpu_time_now (); switch (event_type) { case ~0: /* no events => timeout */ /* nothing to do here */ break; case BFD_EVENT_RESCHEDULE: /* nothing to do here - reschedule is done automatically after * each event or timeout */ break; case BFD_EVENT_NEW_SESSION: if (!pool_is_free_index (bm->sessions, *event_data)) { bfd_session_t *bs = pool_elt_at_index (bm->sessions, *event_data); bfd_send_periodic (vm, rt, bm, bs, now); bfd_set_timer (bm, bs, now, 1); } else { BFD_DBG ("Ignoring event for non-existent session index %u", (u32) * event_data); } break; case BFD_EVENT_CONFIG_CHANGED: if (!pool_is_free_index (bm->sessions, *event_data)) { bfd_session_t *bs = pool_elt_at_index (bm->sessions, *event_data); bfd_on_config_change (vm, rt, bm, bs, now); } else { BFD_DBG ("Ignoring event for non-existent session index %u", (u32) * event_data); } break; default: vlib_log_err (bm->log_class, "BUG: event type 0x%wx", event_type); break; } BFD_DBG ("advancing wheel, now is %lu", now); BFD_DBG ("timing_wheel_advance (%p, %lu, %p, 0);", &bm->wheel, now, expired); expired = timing_wheel_advance (&bm->wheel, now, expired, 0); BFD_DBG ("Expired %d elements", vec_len (expired)); u32 *p = NULL; vec_foreach (p, expired) { const u32 bs_idx = *p; if (!pool_is_free_index (bm->sessions, bs_idx)) { bfd_session_t *bs = pool_elt_at_index (bm->sessions, bs_idx); bfd_on_timeout (vm, rt, bm, bs, now); bfd_set_timer (bm, bs, now, 1); } } if (expired) { _vec_len (expired) = 0; } if (event_data) { _vec_len (event_data) = 0; } } return 0; } /* * bfd process node declaration */ /* *INDENT-OFF* */ VLIB_REGISTER_NODE (bfd_process_node, static) = { .function = bfd_process, .type = VLIB_NODE_TYPE_PROCESS, .name = "bfd-process", .n_next_nodes = 0, .next_nodes = {}, }; /* *INDENT-ON* */ static clib_error_t * bfd_sw_interface_up_down (vnet_main_t * vnm, u32 sw_if_index, u32 flags) { // bfd_main_t *bm = &bfd_main; // vnet_hw_interface_t *hi = vnet_get_sup_hw_interface (vnm, sw_if_index); if (!(flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP)) { /* TODO */ } return 0; } VNET_SW_INTERFACE_ADMIN_UP_DOWN_FUNCTION (bfd_sw_interface_up_down); static clib_error_t * bfd_hw_interface_up_down (vnet_main_t * vnm, u32 hw_if_index, u32 flags) { // bfd_main_t *bm = &bfd_main; if (flags & VNET_HW_INTERFACE_FLAG_LINK_UP) { /* TODO */ } return 0; } VNET_HW_INTERFACE_LINK_UP_DOWN_FUNCTION (bfd_hw_interface_up_down); void bfd_register_listener (bfd_notify_fn_t fn) { bfd_main_t *bm = &bfd_main; vec_add1 (bm->listeners, fn); } /* * setup function */ static clib_error_t * bfd_main_init (vlib_main_t * vm) { #if BFD_DEBUG setbuf (stdout, NULL); #endif bfd_main_t *bm = &bfd_main; bm->random_seed = random_default_seed (); bm->vlib_main = vm; bm->vnet_main = vnet_get_main (); memset (&bm->wheel, 0, sizeof (bm->wheel)); bm->cpu_cps = vm->clib_time.clocks_per_second; BFD_DBG ("cps is %.2f", bm->cpu_cps); bm->default_desired_min_tx_clocks = bfd_usec_to_clocks (bm, BFD_DEFAULT_DESIRED_MIN_TX_USEC); bm->min_required_min_rx_while_echo_clocks = bfd_usec_to_clocks (bm, BFD_REQUIRED_MIN_RX_USEC_WHILE_ECHO); const u64 now = clib_cpu_time_now (); timing_wheel_init (&bm->wheel, now, bm->cpu_cps); bm->wheel_inaccuracy = 2 << bm->wheel.log2_clocks_per_bin; bm->log_class = vlib_log_register_class ("bfd", 0); vlib_log_debug (bm->log_class, "initialized"); return 0; } VLIB_INIT_FUNCTION (bfd_main_init); bfd_session_t * bfd_get_session (bfd_main_t * bm, bfd_transport_e t) { bfd_session_t *result; pool_get (bm->sessions, result); memset (result, 0, sizeof (*result)); result->bs_idx = result - bm->sessions; result->transport = t; const unsigned limit = 1000; unsigned counter = 0; do { result->local_discr = random_u32 (&bm->random_seed); if (counter > limit) { vlib_log_crit (bm->log_class, "couldn't allocate unused session discriminator even " "after %u tries!", limit); pool_put (bm->sessions, result); return NULL; } ++counter; } while (hash_get (bm->session_by_disc, result->local_discr)); bfd_set_defaults (bm, result); hash_set (bm->session_by_disc, result->local_discr, result->bs_idx); return result; } void bfd_put_session (bfd_main_t * bm, bfd_session_t * bs) { vlib_log_info (bm->log_class, "delete session: %U", format_bfd_session_brief, bs); bfd_notify_listeners (bm, BFD_LISTEN_EVENT_DELETE, bs); if (bs->auth.curr_key) { --bs->auth.curr_key->use_count; } if (bs->auth.next_key) { --bs->auth.next_key->use_count; } hash_unset (bm->session_by_disc, bs->local_discr); pool_put (bm->sessions, bs); } bfd_session_t * bfd_find_session_by_idx (bfd_main_t * bm, uword bs_idx) { if (!pool_is_free_index (bm->sessions, bs_idx)) { return pool_elt_at_index (bm->sessions, bs_idx); } return NULL; } bfd_session_t * bfd_find_session_by_disc (bfd_main_t * bm, u32 disc) { uword *p = hash_get (bfd_main.session_by_disc, disc); if (p) { return pool_elt_at_index (bfd_main.sessions, *p); } return NULL; } /** * @brief verify bfd packet - common checks * * @param pkt * * @return 1 if bfd packet is valid */ int bfd_verify_pkt_common (const bfd_pkt_t * pkt) { if (1 != bfd_pkt_get_version (pkt)) { BFD_ERR ("BFD verification failed - unexpected version: '%d'", bfd_pkt_get_version (pkt)); return 0; } if (pkt->head.length < sizeof (bfd_pkt_t) || (bfd_pkt_get_auth_present (pkt) && pkt->head.length < sizeof (bfd_pkt_with_common_auth_t))) { BFD_ERR ("BFD verification failed - unexpected length: '%d' (auth " "present: %d)", pkt->head.length, bfd_pkt_get_auth_present (pkt)); return 0; } if (!pkt->head.detect_mult) { BFD_ERR ("BFD verification failed - unexpected detect-mult: '%d'", pkt->head.detect_mult); return 0; } if (bfd_pkt_get_multipoint (pkt)) { BFD_ERR ("BFD verification failed - unexpected multipoint: '%d'", bfd_pkt_get_multipoint (pkt)); return 0; } if (!pkt->my_disc) { BFD_ERR ("BFD verification failed - unexpected my-disc: '%d'", pkt->my_disc); return 0; } if (!pkt->your_disc) { const u8 pkt_state = bfd_pkt_get_state (pkt); if (pkt_state != BFD_STATE_down && pkt_state != BFD_STATE_admin_down) { BFD_ERR ("BFD verification failed - unexpected state: '%s' " "(your-disc is zero)", bfd_state_string (pkt_state)); return 0; } } return 1; } static void bfd_session_switch_auth_to_next (bfd_session_t * bs) { BFD_DBG ("Switching authentication key from %U to %U for bs_idx=%u", format_bfd_auth_key, bs->auth.curr_key, format_bfd_auth_key, bs->auth.next_key, bs->bs_idx); bs->auth.is_delayed = 0; if (bs->auth.curr_key) { --bs->auth.curr_key->use_count; } bs->auth.curr_key = bs->auth.next_key; bs->auth.next_key = NULL; bs->auth.curr_bfd_key_id = bs->auth.next_bfd_key_id; } static int bfd_auth_type_is_meticulous (bfd_auth_type_e auth_type) { if (BFD_AUTH_TYPE_meticulous_keyed_md5 == auth_type || BFD_AUTH_TYPE_meticulous_keyed_sha1 == auth_type) { return 1; } return 0; } static int bfd_verify_pkt_auth_seq_num (bfd_session_t * bs, u32 received_seq_num, int is_meticulous) { /* * RFC 5880 6.8.1: * * This variable MUST be set to zero after no packets have been * received on this session for at least twice the Detection Time. */ u64 now = clib_cpu_time_now (); if (now - bs->last_rx_clocks > bs->detection_time_clocks * 2) { BFD_DBG ("BFD peer unresponsive for %lu clocks, which is > 2 * " "detection_time=%u clocks, resetting remote_seq_number_known " "flag", now - bs->last_rx_clocks, bs->detection_time_clocks * 2); bs->auth.remote_seq_number_known = 0; } if (bs->auth.remote_seq_number_known) { /* remote sequence number is known, verify its validity */ const u32 max_u32 = 0xffffffff; /* the calculation might wrap, account for the special case... */ if (bs->auth.remote_seq_number > max_u32 - 3 * bs->local_detect_mult) { /* * special case * * x y z * |----------+----------------------------+-----------| * 0 ^ ^ 0xffffffff * | remote_seq_num------+ * | * +-----(remote_seq_num + 3*detect_mult) % * 0xffffffff * * x + y + z = 0xffffffff * x + z = 3 * detect_mult */ const u32 z = max_u32 - bs->auth.remote_seq_number; const u32 x = 3 * bs->local_detect_mult - z; if (received_seq_num > x && received_seq_num < bs->auth.remote_seq_number + is_meticulous) { BFD_ERR ("Recvd sequence number=%u out of ranges <0, %u>, <%u, %u>", received_seq_num, x, bs->auth.remote_seq_number + is_meticulous, max_u32); return 0; } } else { /* regular case */ const u32 min = bs->auth.remote_seq_number + is_meticulous; const u32 max = bs->auth.remote_seq_number + 3 * bs->local_detect_mult; if (received_seq_num < min || received_seq_num > max) { BFD_ERR ("Recvd sequence number=%u out of range <%u, %u>", received_seq_num, min, max); return 0; } } } return 1; } static int bfd_verify_pkt_auth_key_sha1 (const bfd_pkt_t * pkt, u32 pkt_size, bfd_session_t * bs, u8 bfd_key_id, bfd_auth_key_t * auth_key) { ASSERT (auth_key->auth_type == BFD_AUTH_TYPE_keyed_sha1 || auth_key->auth_type == BFD_AUTH_TYPE_meticulous_keyed_sha1); u8 result[SHA_DIGEST_LENGTH]; bfd_pkt_with_common_auth_t *with_common = (void *) pkt; if (pkt_size < sizeof (*with_common)) { BFD_ERR ("Packet size too small to hold authentication common header"); return 0; } if (with_common->common_auth.type != auth_key->auth_type) { BFD_ERR ("BFD auth type mismatch, packet auth=%d:%s doesn't match " "in-use auth=%d:%s", with_common->common_auth.type, bfd_auth_type_str (with_common->common_auth.type), auth_key->auth_type, bfd_auth_type_str (auth_key->auth_type)); return 0; } bfd_pkt_with_sha1_auth_t *with_sha1 = (void *) pkt; if (pkt_size < sizeof (*with_sha1) || with_sha1->sha1_auth.type_len.len < sizeof (with_sha1->sha1_auth)) { BFD_ERR ("BFD size mismatch, payload size=%u, expected=%u, auth_len=%u, " "expected=%u", pkt_size, sizeof (*with_sha1), with_sha1->sha1_auth.type_len.len, sizeof (with_sha1->sha1_auth)); return 0; } if (with_sha1->sha1_auth.key_id != bfd_key_id) { BFD_ERR ("BFD key ID mismatch, packet key ID=%u doesn't match key ID=%u%s", with_sha1->sha1_auth.key_id, bfd_key_id, bs-> auth.is_delayed ? " (but a delayed auth change is scheduled)" : ""); return 0; } SHA_CTX ctx; if (!SHA1_Init (&ctx)) { BFD_ERR ("SHA1_Init failed"); return 0; } /* ignore last 20 bytes - use the actual key data instead pkt data */ if (!SHA1_Update (&ctx, with_sha1, sizeof (*with_sha1) - sizeof (with_sha1->sha1_auth.hash))) { BFD_ERR ("SHA1_Update failed"); return 0; } if (!SHA1_Update (&ctx, auth_key->key, sizeof (auth_key->key))) { BFD_ERR ("SHA1_Update failed"); return 0; } if (!SHA1_Final (result, &ctx)) { BFD_ERR ("SHA1_Final failed"); return 0; } if (0 == memcmp (result, with_sha1->sha1_auth.hash, SHA_DIGEST_LENGTH)) { return 1; } BFD_ERR ("SHA1 hash: %U doesn't match the expected value: %U", format_hex_bytes, with_sha1->sha1_auth.hash, SHA_DIGEST_LENGTH, format_hex_bytes, result, SHA_DIGEST_LENGTH); return 0; } static int bfd_verify_pkt_auth_key (const bfd_pkt_t * pkt, u32 pkt_size, bfd_session_t * bs, u8 bfd_key_id, bfd_auth_key_t * auth_key) { bfd_main_t *bm = &bfd_main; switch (auth_key->auth_type) { case BFD_AUTH_TYPE_reserved: vlib_log_err (bm->log_class, "internal error, unexpected auth_type=%d:%s", auth_key->auth_type, bfd_auth_type_str (auth_key->auth_type)); return 0; case BFD_AUTH_TYPE_simple_password: vlib_log_err (bm->log_class, "internal error, not implemented, unexpected auth_type=%d:%s", auth_key->auth_type, bfd_auth_type_str (auth_key->auth_type)); return 0; case BFD_AUTH_TYPE_keyed_md5: /* fallthrough */ case BFD_AUTH_TYPE_meticulous_keyed_md5: vlib_log_err (bm->log_class, "internal error, not implemented, unexpected auth_type=%d:%s", auth_key->auth_type, bfd_auth_type_str (auth_key->auth_type)); return 0; case BFD_AUTH_TYPE_keyed_sha1: /* fallthrough */ case BFD_AUTH_TYPE_meticulous_keyed_sha1: #if WITH_LIBSSL > 0 do { const u32 seq_num = clib_net_to_host_u32 (((bfd_pkt_with_sha1_auth_t *) pkt)-> sha1_auth.seq_num); return bfd_verify_pkt_auth_seq_num (bs, seq_num, bfd_auth_type_is_meticulous (auth_key->auth_type)) && bfd_verify_pkt_auth_key_sha1 (pkt, pkt_size, bs, bfd_key_id, auth_key); } while (0); #else vlib_log_err (bm->log_class, "internal error, attempt to use SHA1 without SSL support"); return 0; #endif } return 0; } /** * @brief verify bfd packet - authentication * * @param pkt * * @return 1 if bfd packet is valid */ int bfd_verify_pkt_auth (const bfd_pkt_t * pkt, u16 pkt_size, bfd_session_t * bs) { if (bfd_pkt_get_auth_present (pkt)) { /* authentication present in packet */ if (!bs->auth.curr_key) { /* currently not using authentication - can we turn it on? */ if (bs->auth.is_delayed && bs->auth.next_key) { /* yes, switch is scheduled - make sure the auth is valid */ if (bfd_verify_pkt_auth_key (pkt, pkt_size, bs, bs->auth.next_bfd_key_id, bs->auth.next_key)) { /* auth matches next key, do the switch, packet is valid */ bfd_session_switch_auth_to_next (bs); return 1; } } } else { /* yes, using authentication, verify the key */ if (bfd_verify_pkt_auth_key (pkt, pkt_size, bs, bs->auth.curr_bfd_key_id, bs->auth.curr_key)) { /* verification passed, packet is valid */ return 1; } else { /* verification failed - but maybe we need to switch key */ if (bs->auth.is_delayed && bs->auth.next_key) { /* delayed switch present, verify if that key works */ if (bfd_verify_pkt_auth_key (pkt, pkt_size, bs, bs->auth.next_bfd_key_id, bs->auth.next_key)) { /* auth matches next key, switch key, packet is valid */ bfd_session_switch_auth_to_next (bs); return 1; } } } } } else { /* authentication in packet not present */ if (pkt_size > sizeof (*pkt)) { BFD_ERR ("BFD verification failed - unexpected packet size '%d' " "(auth not present)", pkt_size); return 0; } if (bs->auth.curr_key) { /* currently authenticating - could we turn it off? */ if (bs->auth.is_delayed && !bs->auth.next_key) { /* yes, delayed switch to NULL key is scheduled */ bfd_session_switch_auth_to_next (bs); return 1; } } else { /* no auth in packet, no auth in use - packet is valid */ return 1; } } return 0; } void bfd_consume_pkt (bfd_main_t * bm, const bfd_pkt_t * pkt, u32 bs_idx) { bfd_session_t *bs = bfd_find_session_by_idx (bm, bs_idx); if (!bs || (pkt->your_disc && pkt->your_disc != bs->local_discr)) { return; } BFD_DBG ("Scanning bfd packet, bs_idx=%d", bs->bs_idx); bs->remote_discr = pkt->my_disc; bs->remote_state = bfd_pkt_get_state (pkt); bs->remote_demand = bfd_pkt_get_demand (pkt); bs->remote_diag = bfd_pkt_get_diag_code (pkt); u64 now = clib_cpu_time_now (); bs->last_rx_clocks = now; if (bfd_pkt_get_auth_present (pkt)) { bfd_auth_type_e auth_type = ((bfd_pkt_with_common_auth_t *) (pkt))->common_auth.type; switch (auth_type) { case BFD_AUTH_TYPE_reserved: /* fallthrough */ case BFD_AUTH_TYPE_simple_password: /* fallthrough */ case BFD_AUTH_TYPE_keyed_md5: /* fallthrough */ case BFD_AUTH_TYPE_meticulous_keyed_md5: vlib_log_crit (bm->log_class, "internal error, unexpected auth_type=%d:%s", auth_type, bfd_auth_type_str (auth_type)); break; case BFD_AUTH_TYPE_keyed_sha1: /* fallthrough */ case BFD_AUTH_TYPE_meticulous_keyed_sha1: do { bfd_pkt_with_sha1_auth_t *with_sha1 = (bfd_pkt_with_sha1_auth_t *) pkt; bs->auth.remote_seq_number = clib_net_to_host_u32 (with_sha1->sha1_auth.seq_num); bs->auth.remote_seq_number_known = 1; BFD_DBG ("Received sequence number %u", bs->auth.remote_seq_number); } while (0); } } bs->remote_desired_min_tx_clocks = bfd_usec_to_clocks (bm, clib_net_to_host_u32 (pkt->des_min_tx)); bs->remote_detect_mult = pkt->head.detect_mult; bfd_set_remote_required_min_rx (bm, bs, now, clib_net_to_host_u32 (pkt->req_min_rx)); bfd_set_remote_required_min_echo_rx (bm, bs, now, clib_net_to_host_u32 (pkt->req_min_echo_rx)); if (bfd_pkt_get_final (pkt)) { if (BFD_POLL_IN_PROGRESS == bs->poll_state) { BFD_DBG ("Poll sequence terminated, bs_idx=%u", bs->bs_idx); bfd_set_poll_state (bs, BFD_POLL_NOT_NEEDED); if (BFD_STATE_up == bs->local_state) { bfd_set_effective_required_min_rx (bm, bs, clib_max (bs->echo * bm->min_required_min_rx_while_echo_clocks, bs->config_required_min_rx_clocks)); } } else if (BFD_POLL_IN_PROGRESS_AND_QUEUED == bs->poll_state) { /* * next poll sequence must be delayed by at least the round trip * time, so calculate that here */ BFD_DBG ("Next poll sequence can commence in " BFD_CLK_FMT, BFD_CLK_PRN (now - bs->poll_state_start_or_timeout_clocks)); bs->poll_state_start_or_timeout_clocks = now + (now - bs->poll_state_start_or_timeout_clocks); BFD_DBG ("Poll sequence terminated, but another is needed, bs_idx=%u", bs->bs_idx); bfd_set_poll_state (bs, BFD_POLL_NEEDED); } } bfd_calc_next_tx (bm, bs, now); bfd_set_timer (bm, bs, now, 0); if (BFD_STATE_admin_down == bs->local_state) { BFD_DBG ("Session is admin-down, ignoring packet, bs_idx=%u", bs->bs_idx); return; } if (BFD_STATE_admin_down == bs->remote_state) { bfd_set_diag (bs, BFD_DIAG_CODE_neighbor_sig_down); bfd_set_state (bm, bs, BFD_STATE_down, 0); } else if (BFD_STATE_down == bs->local_state) { if (BFD_STATE_down == bs->remote_state) { bfd_set_diag (bs, BFD_DIAG_CODE_no_diag); bfd_set_state (bm, bs, BFD_STATE_init, 0); } else if (BFD_STATE_init == bs->remote_state) { bfd_set_diag (bs, BFD_DIAG_CODE_no_diag); bfd_set_state (bm, bs, BFD_STATE_up, 0); } } else if (BFD_STATE_init == bs->local_state) { if (BFD_STATE_up == bs->remote_state || BFD_STATE_init == bs->remote_state) { bfd_set_diag (bs, BFD_DIAG_CODE_no_diag); bfd_set_state (bm, bs, BFD_STATE_up, 0); } } else /* BFD_STATE_up == bs->local_state */ { if (BFD_STATE_down == bs->remote_state) { bfd_set_diag (bs, BFD_DIAG_CODE_neighbor_sig_down); bfd_set_state (bm, bs, BFD_STATE_down, 0); } } } int bfd_consume_echo_pkt (bfd_main_t * bm, vlib_buffer_t * b) { bfd_echo_pkt_t *pkt = NULL; if (b->current_length != sizeof (*pkt)) { return 0; } pkt = vlib_buffer_get_current (b); bfd_session_t *bs = bfd_find_session_by_disc (bm, pkt->discriminator); if (!bs) { return 0; } BFD_DBG ("Scanning bfd echo packet, bs_idx=%d", bs->bs_idx); u64 checksum = bfd_calc_echo_checksum (bs->local_discr, pkt->expire_time_clocks, bs->echo_secret); if (checksum != pkt->checksum) { BFD_DBG ("Invalid echo packet, checksum mismatch"); return 1; } u64 now = clib_cpu_time_now (); if (pkt->expire_time_clocks < now) { BFD_DBG ("Stale packet received, expire time %lu < now %lu", pkt->expire_time_clocks, now); } else { bs->echo_last_rx_clocks = now; } return 1; } u8 * format_bfd_session (u8 * s, va_list * args) { const bfd_session_t *bs = va_arg (*args, bfd_session_t *); u32 indent = format_get_indent (s) + vlib_log_get_indent (); s = format (s, "bs_idx=%u local-state=%s remote-state=%s\n" "%Ulocal-discriminator=%u remote-discriminator=%u\n" "%Ulocal-diag=%s echo-active=%s\n" "%Udesired-min-tx=%u required-min-rx=%u\n" "%Urequired-min-echo-rx=%u detect-mult=%u\n" "%Uremote-min-rx=%u remote-min-echo-rx=%u\n" "%Uremote-demand=%s poll-state=%s\n" "%Uauth: local-seq-num=%u remote-seq-num=%u\n" "%U is-delayed=%s\n" "%U curr-key=%U\n" "%U next-key=%U", bs->bs_idx, bfd_state_string (bs->local_state), bfd_state_string (bs->remote_state), format_white_space, indent, bs->local_discr, bs->remote_discr, format_white_space, indent, bfd_diag_code_string (bs->local_diag), (bs->echo ? "yes" : "no"), format_white_space, indent, bs->config_desired_min_tx_usec, bs->config_required_min_rx_usec, format_white_space, indent, 1, bs->local_detect_mult, format_white_space, indent, bs->remote_min_rx_usec, bs->remote_min_echo_rx_usec, format_white_space, indent, (bs->remote_demand ? "yes" : "no"), bfd_poll_state_string (bs->poll_state), format_white_space, indent, bs->auth.local_seq_number, bs->auth.remote_seq_number, format_white_space, indent, (bs->auth.is_delayed ? "yes" : "no"), format_white_space, indent, format_bfd_auth_key, bs->auth.curr_key, format_white_space, indent, format_bfd_auth_key, bs->auth.next_key); return s; } u8 * format_bfd_session_brief (u8 * s, va_list * args) { const bfd_session_t *bs = va_arg (*args, bfd_session_t *); s = format (s, "bs_idx=%u local-state=%s remote-state=%s", bs->bs_idx, bfd_state_string (bs->local_state), bfd_state_string (bs->remote_state)); return s; } unsigned bfd_auth_type_supported (bfd_auth_type_e auth_type) { if (auth_type == BFD_AUTH_TYPE_keyed_sha1 || auth_type == BFD_AUTH_TYPE_meticulous_keyed_sha1) { return 1; } return 0; } vnet_api_error_t bfd_auth_activate (bfd_session_t * bs, u32 conf_key_id, u8 bfd_key_id, u8 is_delayed) { bfd_main_t *bm = &bfd_main; const uword *key_idx_p = hash_get (bm->auth_key_by_conf_key_id, conf_key_id); if (!key_idx_p) { vlib_log_err (bm->log_class, "authentication key with config ID %u doesn't exist)", conf_key_id); return VNET_API_ERROR_BFD_ENOENT; } const uword key_idx = *key_idx_p; bfd_auth_key_t *key = pool_elt_at_index (bm->auth_keys, key_idx); if (is_delayed) { if (bs->auth.next_key == key) { /* already using this key, no changes required */ return 0; } bs->auth.next_key = key; bs->auth.next_bfd_key_id = bfd_key_id; bs->auth.is_delayed = 1; } else { if (bs->auth.curr_key == key) { /* already using this key, no changes required */ return 0; } if (bs->auth.curr_key) { --bs->auth.curr_key->use_count; } bs->auth.curr_key = key; bs->auth.curr_bfd_key_id = bfd_key_id; bs->auth.is_delayed = 0; } ++key->use_count; BFD_DBG ("\nSession auth modified: %U", format_bfd_session, bs); vlib_log_info (bm->log_class, "session auth modified: %U", format_bfd_session_brief, bs); return 0; } vnet_api_error_t bfd_auth_deactivate (bfd_session_t * bs, u8 is_delayed) { bfd_main_t *bm = &bfd_main; #if WITH_LIBSSL > 0 if (!is_delayed) { /* not delayed - deactivate the current key right now */ if (bs->auth.curr_key) { --bs->auth.curr_key->use_count; bs->auth.curr_key = NULL; } bs->auth.is_delayed = 0; } else { /* delayed - mark as so */ bs->auth.is_delayed = 1; } /* * clear the next key unconditionally - either the auth change is not delayed * in which case the caller expects the session to not use authentication * from this point forward, or it is delayed, in which case the next_key * needs to be set to NULL to make it so in the future */ if (bs->auth.next_key) { --bs->auth.next_key->use_count; bs->auth.next_key = NULL; } BFD_DBG ("\nSession auth modified: %U", format_bfd_session, bs); vlib_log_info (bm->log_class, "session auth modified: %U", format_bfd_session_brief, bs); return 0; #else vlib_log_err (bm->log_class, "SSL missing, cannot deactivate BFD authentication"); return VNET_API_ERROR_BFD_NOTSUPP; #endif } vnet_api_error_t bfd_session_set_params (bfd_main_t * bm, bfd_session_t * bs, u32 desired_min_tx_usec, u32 required_min_rx_usec, u8 detect_mult) { if (bs->local_detect_mult != detect_mult || bs->config_desired_min_tx_usec != desired_min_tx_usec || bs->config_required_min_rx_usec != required_min_rx_usec) { BFD_DBG ("\nChanging session params: %U", format_bfd_session, bs); switch (bs->poll_state) { case BFD_POLL_NOT_NEEDED: if (BFD_STATE_up == bs->local_state || BFD_STATE_init == bs->local_state) { /* poll sequence is not needed for detect multiplier change */ if (bs->config_desired_min_tx_usec != desired_min_tx_usec || bs->config_required_min_rx_usec != required_min_rx_usec) { bfd_set_poll_state (bs, BFD_POLL_NEEDED); } } break; case BFD_POLL_NEEDED: case BFD_POLL_IN_PROGRESS_AND_QUEUED: /* * nothing to do - will be handled in the future poll which is * already scheduled for execution */ break; case BFD_POLL_IN_PROGRESS: /* poll sequence is not needed for detect multiplier change */ if (bs->config_desired_min_tx_usec != desired_min_tx_usec || bs->config_required_min_rx_usec != required_min_rx_usec) { BFD_DBG ("Poll in progress, queueing extra poll, bs_idx=%u", bs->bs_idx); bfd_set_poll_state (bs, BFD_POLL_IN_PROGRESS_AND_QUEUED); } } bs->local_detect_mult = detect_mult; bs->config_desired_min_tx_usec = desired_min_tx_usec; bs->config_desired_min_tx_clocks = bfd_usec_to_clocks (bm, desired_min_tx_usec); bs->config_required_min_rx_usec = required_min_rx_usec; bs->config_required_min_rx_clocks = bfd_usec_to_clocks (bm, required_min_rx_usec); BFD_DBG ("\nChanged session params: %U", format_bfd_session, bs); vlib_log_info (bm->log_class, "changed session params: %U", format_bfd_session_brief, bs); vlib_process_signal_event (bm->vlib_main, bm->bfd_process_node_index, BFD_EVENT_CONFIG_CHANGED, bs->bs_idx); } else { BFD_DBG ("Ignore parameter change - no change, bs_idx=%u", bs->bs_idx); } return 0; } vnet_api_error_t bfd_auth_set_key (u32 conf_key_id, u8 auth_type, u8 key_len, const u8 * key_data) { bfd_main_t *bm = &bfd_main; #if WITH_LIBSSL > 0 bfd_auth_key_t *auth_key = NULL; if (!key_len || key_len > bfd_max_key_len_for_auth_type (auth_type)) { vlib_log_err (bm->log_class, "invalid authentication key length for auth_type=%d:%s " "(key_len=%u, must be non-zero, expected max=%u)", auth_type, bfd_auth_type_str (auth_type), key_len, (u32) bfd_max_key_len_for_auth_type (auth_type)); return VNET_API_ERROR_INVALID_VALUE; } if (!bfd_auth_type_supported (auth_type)) { vlib_log_err (bm->log_class, "unsupported auth type=%d:%s", auth_type, bfd_auth_type_str (auth_type)); return VNET_API_ERROR_BFD_NOTSUPP; } uword *key_idx_p = hash_get (bm->auth_key_by_conf_key_id, conf_key_id); if (key_idx_p) { /* modifying existing key - must not be used */ const uword key_idx = *key_idx_p; auth_key = pool_elt_at_index (bm->auth_keys, key_idx); if (auth_key->use_count > 0) { vlib_log_err (bm->log_class, "authentication key with conf ID %u in use by %u BFD " "session(s) - cannot modify", conf_key_id, auth_key->use_count); return VNET_API_ERROR_BFD_EINUSE; } } else { /* adding new key */ pool_get (bm->auth_keys, auth_key); auth_key->conf_key_id = conf_key_id; hash_set (bm->auth_key_by_conf_key_id, conf_key_id, auth_key - bm->auth_keys); } auth_key->auth_type = auth_type; memset (auth_key->key, 0, sizeof (auth_key->key)); clib_memcpy (auth_key->key, key_data, key_len); return 0; #else vlib_log_err (bm->log_class, "SSL missing, cannot manipulate authentication keys"); return VNET_API_ERROR_BFD_NOTSUPP; #endif } vnet_api_error_t bfd_auth_del_key (u32 conf_key_id) { #if WITH_LIBSSL > 0 bfd_auth_key_t *auth_key = NULL; bfd_main_t *bm = &bfd_main; uword *key_idx_p = hash_get (bm->auth_key_by_conf_key_id, conf_key_id); if (key_idx_p) { /* deleting existing key - must not be used */ const uword key_idx = *key_idx_p; auth_key = pool_elt_at_index (bm->auth_keys, key_idx); if (auth_key->use_count > 0) { vlib_log_err (bm->log_class, "authentication key with conf ID %u in use by %u BFD " "session(s) - cannot delete", conf_key_id, auth_key->use_count); return VNET_API_ERROR_BFD_EINUSE; } hash_unset (bm->auth_key_by_conf_key_id, conf_key_id); memset (auth_key, 0, sizeof (*auth_key)); pool_put (bm->auth_keys, auth_key); } else { /* no such key */ vlib_log_err (bm->log_class, "authentication key with conf ID %u does not exist", conf_key_id); return VNET_API_ERROR_BFD_ENOENT; } return 0; #else vlib_log_err (bm->log_class, "SSL missing, cannot manipulate authentication keys"); return VNET_API_ERROR_BFD_NOTSUPP; #endif } bfd_main_t bfd_main; /* * fd.io coding-style-patch-verification: ON * * Local Variables: * eval: (c-set-style "gnu") * End: */