1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
2619
2620
2621
2622
2623
2624
2625
2626
2627
2628
2629
2630
2631
2632
2633
2634
2635
2636
2637
2638
2639
2640
2641
2642
2643
2644
2645
2646
2647
2648
2649
2650
2651
2652
2653
2654
2655
2656
2657
2658
2659
2660
2661
2662
2663
2664
2665
2666
2667
2668
2669
2670
2671
2672
2673
2674
2675
2676
2677
2678
2679
2680
2681
2682
2683
2684
2685
2686
2687
2688
2689
2690
2691
2692
2693
2694
2695
2696
2697
2698
2699
2700
2701
2702
2703
2704
2705
2706
2707
2708
2709
2710
2711
2712
2713
2714
2715
2716
2717
2718
2719
2720
2721
2722
2723
2724
2725
2726
2727
2728
2729
2730
2731
2732
2733
2734
2735
2736
2737
2738
2739
2740
2741
2742
2743
2744
2745
2746
2747
2748
2749
2750
2751
2752
2753
2754
2755
2756
2757
2758
2759
2760
2761
2762
2763
2764
2765
2766
2767
2768
2769
2770
2771
2772
2773
2774
2775
2776
2777
2778
2779
2780
2781
2782
2783
2784
2785
2786
2787
2788
2789
2790
2791
2792
2793
2794
2795
2796
2797
2798
2799
2800
2801
2802
2803
2804
2805
2806
2807
2808
2809
2810
2811
2812
2813
2814
2815
2816
2817
2818
2819
2820
2821
2822
2823
2824
2825
2826
2827
2828
2829
2830
2831
2832
2833
2834
2835
2836
2837
2838
2839
2840
2841
2842
2843
2844
2845
2846
2847
2848
2849
2850
2851
2852
2853
2854
2855
2856
2857
2858
2859
2860
2861
2862
2863
2864
2865
2866
2867
2868
2869
2870
2871
2872
2873
2874
2875
2876
2877
2878
2879
2880
2881
2882
2883
2884
2885
2886
2887
2888
2889
2890
2891
2892
2893
2894
2895
2896
2897
2898
2899
2900
2901
2902
2903
2904
2905
2906
2907
2908
2909
2910
2911
2912
2913
2914
2915
2916
2917
2918
2919
2920
2921
2922
2923
2924
2925
2926
2927
2928
2929
2930
2931
2932
2933
2934
2935
2936
2937
2938
2939
2940
2941
2942
2943
2944
2945
2946
2947
2948
2949
2950
2951
2952
2953
2954
2955
2956
2957
2958
2959
2960
2961
2962
2963
2964
2965
2966
2967
2968
2969
2970
2971
2972
2973
2974
2975
2976
2977
2978
2979
2980
2981
2982
2983
2984
2985
2986
2987
2988
2989
2990
2991
2992
2993
2994
2995
2996
2997
2998
2999
3000
3001
3002
3003
3004
3005
3006
3007
3008
3009
3010
3011
3012
3013
3014
3015
3016
3017
3018
3019
3020
3021
3022
3023
3024
3025
3026
3027
3028
3029
3030
3031
3032
3033
3034
3035
3036
3037
3038
3039
3040
3041
3042
3043
3044
3045
3046
3047
3048
3049
3050
3051
3052
3053
3054
3055
3056
3057
3058
3059
3060
3061
3062
3063
3064
3065
3066
3067
3068
3069
3070
3071
3072
3073
3074
3075
3076
3077
3078
3079
3080
3081
3082
3083
3084
3085
3086
3087
3088
3089
3090
3091
3092
3093
3094
3095
3096
3097
3098
3099
3100
3101
3102
3103
3104
3105
3106
3107
3108
3109
3110
3111
3112
3113
3114
3115
3116
3117
3118
3119
3120
3121
3122
3123
3124
3125
3126
3127
3128
3129
3130
3131
3132
3133
3134
3135
3136
3137
3138
3139
3140
3141
3142
3143
3144
3145
3146
3147
3148
3149
3150
3151
3152
3153
3154
3155
3156
3157
3158
3159
3160
3161
3162
3163
3164
3165
3166
3167
3168
3169
3170
3171
3172
3173
3174
3175
3176
3177
3178
3179
3180
3181
3182
3183
3184
3185
3186
3187
3188
3189
3190
3191
3192
3193
3194
3195
3196
3197
3198
3199
3200
3201
3202
3203
3204
3205
3206
3207
3208
3209
3210
3211
3212
3213
3214
3215
3216
3217
3218
3219
3220
3221
3222
3223
3224
3225
3226
3227
3228
3229
3230
3231
3232
3233
3234
3235
3236
3237
3238
3239
3240
3241
3242
3243
3244
3245
3246
3247
3248
3249
3250
3251
3252
3253
3254
3255
3256
3257
3258
3259
3260
3261
3262
3263
3264
3265
3266
3267
3268
3269
3270
3271
3272
3273
3274
3275
3276
3277
3278
3279
3280
3281
3282
3283
3284
3285
3286
3287
3288
3289
3290
3291
3292
3293
3294
3295
3296
3297
3298
3299
3300
3301
3302
3303
3304
3305
3306
3307
3308
3309
3310
3311
3312
3313
3314
3315
3316
3317
3318
3319
3320
3321
3322
3323
3324
3325
3326
3327
3328
3329
3330
3331
3332
3333
3334
3335
3336
3337
3338
3339
3340
3341
3342
3343
3344
3345
3346
3347
3348
3349
3350
3351
3352
3353
3354
3355
3356
3357
3358
3359
3360
3361
3362
3363
3364
3365
3366
3367
3368
3369
3370
3371
3372
3373
3374
3375
3376
3377
3378
3379
3380
3381
3382
3383
3384
3385
3386
3387
3388
3389
3390
3391
3392
3393
3394
3395
3396
3397
3398
3399
3400
3401
3402
3403
3404
3405
3406
3407
3408
3409
3410
3411
3412
3413
3414
3415
3416
3417
3418
3419
3420
3421
3422
3423
3424
3425
3426
3427
3428
3429
3430
3431
3432
3433
3434
3435
3436
3437
3438
3439
3440
3441
3442
3443
3444
3445
3446
3447
3448
3449
3450
3451
3452
3453
3454
3455
3456
3457
3458
3459
3460
3461
3462
3463
3464
3465
3466
3467
3468
3469
3470
3471
3472
3473
3474
3475
3476
3477
3478
3479
3480
3481
3482
3483
3484
3485
3486
3487
3488
3489
3490
3491
3492
3493
3494
3495
3496
3497
3498
3499
3500
3501
3502
3503
3504
3505
3506
3507
3508
3509
3510
3511
3512
3513
3514
3515
3516
3517
3518
3519
3520
3521
3522
3523
3524
3525
3526
3527
3528
3529
3530
3531
3532
3533
3534
3535
3536
3537
3538
3539
3540
3541
3542
3543
3544
3545
3546
3547
3548
3549
3550
3551
3552
3553
3554
3555
3556
3557
3558
3559
3560
3561
3562
3563
3564
3565
3566
3567
3568
3569
3570
3571
3572
3573
3574
3575
3576
3577
3578
3579
3580
3581
3582
3583
3584
3585
3586
3587
3588
3589
3590
3591
3592
3593
3594
3595
3596
3597
3598
3599
3600
3601
3602
3603
3604
3605
3606
3607
3608
3609
3610
3611
3612
3613
3614
3615
3616
3617
3618
3619
3620
3621
3622
3623
3624
3625
3626
3627
3628
3629
3630
3631
3632
3633
3634
3635
3636
3637
3638
3639
3640
3641
3642
3643
3644
3645
3646
3647
3648
3649
3650
3651
3652
3653
3654
3655
3656
3657
3658
3659
3660
3661
3662
3663
3664
3665
3666
3667
3668
3669
3670
3671
3672
3673
3674
3675
3676
|
/*
* Copyright (c) 2016 Cisco and/or its affiliates.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stddef.h>
#include <vnet/vnet.h>
#include <vnet/plugin/plugin.h>
#include <acl/acl.h>
#include <vnet/l2/l2_classify.h>
#include <vnet/classify/in_out_acl.h>
#include <vpp/app/version.h>
#include <vlibapi/api.h>
#include <vlibmemory/api.h>
/* define message IDs */
#include <acl/acl_msg_enum.h>
/* define message structures */
#define vl_typedefs
#include <acl/acl_all_api_h.h>
#undef vl_typedefs
/* define generated endian-swappers */
#define vl_endianfun
#include <acl/acl_all_api_h.h>
#undef vl_endianfun
/* instantiate all the print functions we know about */
#define vl_print(handle, ...) vlib_cli_output (handle, __VA_ARGS__)
#define vl_printfun
#include <acl/acl_all_api_h.h>
#undef vl_printfun
/* Get the API version number */
#define vl_api_version(n,v) static u32 api_version=(v);
#include <acl/acl_all_api_h.h>
#undef vl_api_version
#include "fa_node.h"
#include "hash_lookup.h"
acl_main_t acl_main;
#define REPLY_MSG_ID_BASE am->msg_id_base
#include <vlibapi/api_helper_macros.h>
/* List of message types that this plugin understands */
#define foreach_acl_plugin_api_msg \
_(ACL_PLUGIN_GET_VERSION, acl_plugin_get_version) \
_(ACL_PLUGIN_CONTROL_PING, acl_plugin_control_ping) \
_(ACL_ADD_REPLACE, acl_add_replace) \
_(ACL_DEL, acl_del) \
_(ACL_INTERFACE_ADD_DEL, acl_interface_add_del) \
_(ACL_INTERFACE_SET_ACL_LIST, acl_interface_set_acl_list) \
_(ACL_DUMP, acl_dump) \
_(ACL_INTERFACE_LIST_DUMP, acl_interface_list_dump) \
_(MACIP_ACL_ADD, macip_acl_add) \
_(MACIP_ACL_ADD_REPLACE, macip_acl_add_replace) \
_(MACIP_ACL_DEL, macip_acl_del) \
_(MACIP_ACL_INTERFACE_ADD_DEL, macip_acl_interface_add_del) \
_(MACIP_ACL_DUMP, macip_acl_dump) \
_(MACIP_ACL_INTERFACE_GET, macip_acl_interface_get) \
_(MACIP_ACL_INTERFACE_LIST_DUMP, macip_acl_interface_list_dump)
/* *INDENT-OFF* */
VLIB_PLUGIN_REGISTER () = {
.version = VPP_BUILD_VER,
.description = "Access Control Lists",
};
/* *INDENT-ON* */
static void *
acl_set_heap (acl_main_t * am)
{
if (0 == am->acl_mheap)
{
am->acl_mheap = mheap_alloc (0 /* use VM */ , am->acl_mheap_size);
mheap_t *h = mheap_header (am->acl_mheap);
h->flags |= MHEAP_FLAG_THREAD_SAFE;
}
void *oldheap = clib_mem_set_heap (am->acl_mheap);
return oldheap;
}
void
acl_plugin_acl_set_validate_heap (acl_main_t * am, int on)
{
clib_mem_set_heap (acl_set_heap (am));
mheap_t *h = mheap_header (am->acl_mheap);
if (on)
{
h->flags |= MHEAP_FLAG_VALIDATE;
h->flags &= ~MHEAP_FLAG_SMALL_OBJECT_CACHE;
mheap_validate (h);
}
else
{
h->flags &= ~MHEAP_FLAG_VALIDATE;
h->flags |= MHEAP_FLAG_SMALL_OBJECT_CACHE;
}
}
void
acl_plugin_acl_set_trace_heap (acl_main_t * am, int on)
{
clib_mem_set_heap (acl_set_heap (am));
mheap_t *h = mheap_header (am->acl_mheap);
if (on)
{
h->flags |= MHEAP_FLAG_TRACE;
}
else
{
h->flags &= ~MHEAP_FLAG_TRACE;
}
}
static void
vl_api_acl_plugin_get_version_t_handler (vl_api_acl_plugin_get_version_t * mp)
{
acl_main_t *am = &acl_main;
vl_api_acl_plugin_get_version_reply_t *rmp;
int msg_size = sizeof (*rmp);
vl_api_registration_t *reg;
reg = vl_api_client_index_to_registration (mp->client_index);
if (!reg)
return;
rmp = vl_msg_api_alloc (msg_size);
memset (rmp, 0, msg_size);
rmp->_vl_msg_id =
ntohs (VL_API_ACL_PLUGIN_GET_VERSION_REPLY + am->msg_id_base);
rmp->context = mp->context;
rmp->major = htonl (ACL_PLUGIN_VERSION_MAJOR);
rmp->minor = htonl (ACL_PLUGIN_VERSION_MINOR);
vl_api_send_msg (reg, (u8 *) rmp);
}
static void
vl_api_acl_plugin_control_ping_t_handler (vl_api_acl_plugin_control_ping_t *
mp)
{
vl_api_acl_plugin_control_ping_reply_t *rmp;
acl_main_t *am = &acl_main;
int rv = 0;
/* *INDENT-OFF* */
REPLY_MACRO2 (VL_API_ACL_PLUGIN_CONTROL_PING_REPLY,
({
rmp->vpe_pid = ntohl (getpid ());
}));
/* *INDENT-ON* */
}
static int
acl_add_list (u32 count, vl_api_acl_rule_t rules[],
u32 * acl_list_index, u8 * tag)
{
acl_main_t *am = &acl_main;
acl_list_t *a;
acl_rule_t *r;
acl_rule_t *acl_new_rules = 0;
int i;
if (*acl_list_index != ~0)
{
/* They supplied some number, let's see if this ACL exists */
if (pool_is_free_index (am->acls, *acl_list_index))
{
/* tried to replace a non-existent ACL, no point doing anything */
clib_warning
("acl-plugin-error: Trying to replace nonexistent ACL %d (tag %s)",
*acl_list_index, tag);
return VNET_API_ERROR_NO_SUCH_ENTRY;
}
}
if (0 == count)
{
clib_warning
("acl-plugin-warning: supplied no rules for ACL %d (tag %s)",
*acl_list_index, tag);
}
void *oldheap = acl_set_heap (am);
/* Create and populate the rules */
if (count > 0)
vec_validate (acl_new_rules, count - 1);
for (i = 0; i < count; i++)
{
r = vec_elt_at_index (acl_new_rules, i);
memset (r, 0, sizeof (*r));
r->is_permit = rules[i].is_permit;
r->is_ipv6 = rules[i].is_ipv6;
if (r->is_ipv6)
{
memcpy (&r->src, rules[i].src_ip_addr, sizeof (r->src));
memcpy (&r->dst, rules[i].dst_ip_addr, sizeof (r->dst));
}
else
{
memcpy (&r->src.ip4, rules[i].src_ip_addr, sizeof (r->src.ip4));
memcpy (&r->dst.ip4, rules[i].dst_ip_addr, sizeof (r->dst.ip4));
}
r->src_prefixlen = rules[i].src_ip_prefix_len;
r->dst_prefixlen = rules[i].dst_ip_prefix_len;
r->proto = rules[i].proto;
r->src_port_or_type_first = ntohs (rules[i].srcport_or_icmptype_first);
r->src_port_or_type_last = ntohs (rules[i].srcport_or_icmptype_last);
r->dst_port_or_code_first = ntohs (rules[i].dstport_or_icmpcode_first);
r->dst_port_or_code_last = ntohs (rules[i].dstport_or_icmpcode_last);
r->tcp_flags_value = rules[i].tcp_flags_value;
r->tcp_flags_mask = rules[i].tcp_flags_mask;
}
if (~0 == *acl_list_index)
{
/* Get ACL index */
pool_get_aligned (am->acls, a, CLIB_CACHE_LINE_BYTES);
memset (a, 0, sizeof (*a));
/* Will return the newly allocated ACL index */
*acl_list_index = a - am->acls;
}
else
{
a = am->acls + *acl_list_index;
hash_acl_delete (am, *acl_list_index);
/* Get rid of the old rules */
if (a->rules)
vec_free (a->rules);
}
a->rules = acl_new_rules;
a->count = count;
memcpy (a->tag, tag, sizeof (a->tag));
hash_acl_add (am, *acl_list_index);
clib_mem_set_heap (oldheap);
return 0;
}
static int
acl_del_list (u32 acl_list_index)
{
acl_main_t *am = &acl_main;
acl_list_t *a;
int i, ii;
if (pool_is_free_index (am->acls, acl_list_index))
{
return VNET_API_ERROR_NO_SUCH_ENTRY;
}
if (acl_list_index < vec_len (am->input_sw_if_index_vec_by_acl))
{
if (vec_len (vec_elt (am->input_sw_if_index_vec_by_acl, acl_list_index))
> 0)
{
/* ACL is applied somewhere inbound. Refuse to delete */
return VNET_API_ERROR_ACL_IN_USE_INBOUND;
}
}
if (acl_list_index < vec_len (am->output_sw_if_index_vec_by_acl))
{
if (vec_len
(vec_elt (am->output_sw_if_index_vec_by_acl, acl_list_index)) > 0)
{
/* ACL is applied somewhere outbound. Refuse to delete */
return VNET_API_ERROR_ACL_IN_USE_OUTBOUND;
}
}
void *oldheap = acl_set_heap (am);
/* delete any references to the ACL */
for (i = 0; i < vec_len (am->output_acl_vec_by_sw_if_index); i++)
{
for (ii = 0; ii < vec_len (am->output_acl_vec_by_sw_if_index[i]);
/* see body */ )
{
if (acl_list_index == am->output_acl_vec_by_sw_if_index[i][ii])
{
vec_del1 (am->output_acl_vec_by_sw_if_index[i], ii);
}
else
{
ii++;
}
}
}
for (i = 0; i < vec_len (am->input_acl_vec_by_sw_if_index); i++)
{
for (ii = 0; ii < vec_len (am->input_acl_vec_by_sw_if_index[i]);
/* see body */ )
{
if (acl_list_index == am->input_acl_vec_by_sw_if_index[i][ii])
{
vec_del1 (am->input_acl_vec_by_sw_if_index[i], ii);
}
else
{
ii++;
}
}
}
/* delete the hash table data */
hash_acl_delete (am, acl_list_index);
/* now we can delete the ACL itself */
a = pool_elt_at_index (am->acls, acl_list_index);
if (a->rules)
vec_free (a->rules);
pool_put (am->acls, a);
clib_mem_set_heap (oldheap);
return 0;
}
/* Some aids in ASCII graphing the content */
#define XX "\377"
#define __ "\000"
#define _(x)
#define v
/* *INDENT-OFF* */
u8 ip4_5tuple_mask[] =
_(" dmac smac etype ")
_(ether) __ __ __ __ __ __ v __ __ __ __ __ __ v __ __ v
_(" v ihl totlen ")
_(0x0000)
__ __ __ __
_(" ident fl+fo ")
_(0x0004)
__ __ __ __
_(" ttl pr checksum ")
_(0x0008)
__ XX __ __
_(" src address ")
_(0x000C)
XX XX XX XX
_(" dst address ")
_(0x0010)
XX XX XX XX
_("L4 T/U sport dport ")
_(tcpudp)
XX XX XX XX
_(padpad)
__ __ __ __
_(padpad)
__ __ __ __
_(padeth)
__ __;
u8 ip6_5tuple_mask[] =
_(" dmac smac etype ")
_(ether) __ __ __ __ __ __ v __ __ __ __ __ __ v __ __ v
_(" v tc + flow ")
_(0x0000) __ __ __ __
_(" plen nh hl ")
_(0x0004) __ __ XX __
_(" src address ")
_(0x0008) XX XX XX XX
_(0x000C) XX XX XX XX
_(0x0010) XX XX XX XX
_(0x0014) XX XX XX XX
_(" dst address ")
_(0x0018) XX XX XX XX
_(0x001C) XX XX XX XX
_(0x0020) XX XX XX XX
_(0x0024) XX XX XX XX
_("L4T/U sport dport ")
_(tcpudp) XX XX XX XX _(padpad) __ __ __ __ _(padeth) __ __;
u8 dot1q_5tuple_mask[] =
_(" dmac smac dot1q etype ")
_(ether) __ __ __ __ __ __ v __ __ __ __ __ __ v XX XX __ __ v XX XX v
_(padpad) __ __ __ __
_(padpad) __ __ __ __
_(padpad) __ __ __ __
_(padeth) __ __;
u8 dot1ad_5tuple_mask[] =
_(" dmac smac dot1ad dot1q etype ")
_(ether) __ __ __ __ __ __ v __ __ __ __ __ __ v XX XX __ __ XX XX __ __ v XX XX v
_(padpad) __ __ __ __
_(padpad) __ __ __ __
_(padeth) __ __;
/* *INDENT-ON* */
#undef XX
#undef __
#undef _
#undef v
static int
count_skip (u8 * p, u32 size)
{
u64 *p64 = (u64 *) p;
/* Be tolerant to null pointer */
if (0 == p)
return 0;
while ((0ULL == *p64) && ((u8 *) p64 - p) < size)
{
p64++;
}
return (p64 - (u64 *) p) / 2;
}
static int
acl_classify_add_del_table_tiny (vnet_classify_main_t * cm, u8 * mask,
u32 mask_len, u32 next_table_index,
u32 miss_next_index, u32 * table_index,
int is_add)
{
u32 nbuckets = 1;
u32 memory_size = 2 << 13;
u32 skip = count_skip (mask, mask_len);
u32 match = (mask_len / 16) - skip;
u8 *skip_mask_ptr = mask + 16 * skip;
u32 current_data_flag = 0;
int current_data_offset = 0;
if (0 == match)
match = 1;
void *oldheap = clib_mem_set_heap (cm->vlib_main->heap_base);
int ret = vnet_classify_add_del_table (cm, skip_mask_ptr, nbuckets,
memory_size, skip, match,
next_table_index, miss_next_index,
table_index, current_data_flag,
current_data_offset, is_add,
1 /* delete_chain */ );
clib_mem_set_heap (oldheap);
return ret;
}
static int
acl_classify_add_del_table_small (vnet_classify_main_t * cm, u8 * mask,
u32 mask_len, u32 next_table_index,
u32 miss_next_index, u32 * table_index,
int is_add)
{
u32 nbuckets = 32;
u32 memory_size = 2 << 22;
u32 skip = count_skip (mask, mask_len);
u32 match = (mask_len / 16) - skip;
u8 *skip_mask_ptr = mask + 16 * skip;
u32 current_data_flag = 0;
int current_data_offset = 0;
if (0 == match)
match = 1;
void *oldheap = clib_mem_set_heap (cm->vlib_main->heap_base);
int ret = vnet_classify_add_del_table (cm, skip_mask_ptr, nbuckets,
memory_size, skip, match,
next_table_index, miss_next_index,
table_index, current_data_flag,
current_data_offset, is_add,
1 /* delete_chain */ );
clib_mem_set_heap (oldheap);
return ret;
}
static int
acl_unhook_l2_input_classify (acl_main_t * am, u32 sw_if_index)
{
vnet_classify_main_t *cm = &vnet_classify_main;
u32 ip4_table_index = ~0;
u32 ip6_table_index = ~0;
u32 dot1q_table_index = ~0;
u32 dot1ad_table_index = ~0;
void *oldheap = acl_set_heap (am);
vec_validate_init_empty (am->acl_ip4_input_classify_table_by_sw_if_index,
sw_if_index, ~0);
vec_validate_init_empty (am->acl_ip6_input_classify_table_by_sw_if_index,
sw_if_index, ~0);
vec_validate_init_empty (am->acl_dot1q_input_classify_table_by_sw_if_index,
sw_if_index, ~0);
vec_validate_init_empty (am->acl_dot1ad_input_classify_table_by_sw_if_index,
sw_if_index, ~0);
/* switch to global heap while calling vnet_* functions */
clib_mem_set_heap (cm->vlib_main->heap_base);
vnet_l2_input_classify_enable_disable (sw_if_index, 0);
if (am->acl_ip4_input_classify_table_by_sw_if_index[sw_if_index] != ~0)
{
ip4_table_index =
am->acl_ip4_input_classify_table_by_sw_if_index[sw_if_index];
am->acl_ip4_input_classify_table_by_sw_if_index[sw_if_index] = ~0;
acl_classify_add_del_table_tiny (cm, ip4_5tuple_mask,
sizeof (ip4_5tuple_mask) - 1, ~0,
am->l2_input_classify_next_acl_ip4,
&ip4_table_index, 0);
}
if (am->acl_ip6_input_classify_table_by_sw_if_index[sw_if_index] != ~0)
{
ip6_table_index =
am->acl_ip6_input_classify_table_by_sw_if_index[sw_if_index];
am->acl_ip6_input_classify_table_by_sw_if_index[sw_if_index] = ~0;
acl_classify_add_del_table_tiny (cm, ip6_5tuple_mask,
sizeof (ip6_5tuple_mask) - 1, ~0,
am->l2_input_classify_next_acl_ip6,
&ip6_table_index, 0);
}
if (am->acl_dot1q_input_classify_table_by_sw_if_index[sw_if_index] != ~0)
{
dot1q_table_index =
am->acl_dot1q_input_classify_table_by_sw_if_index[sw_if_index];
am->acl_dot1q_input_classify_table_by_sw_if_index[sw_if_index] = ~0;
acl_classify_add_del_table_tiny (cm, ip6_5tuple_mask,
sizeof (ip6_5tuple_mask) - 1, ~0,
~0, &dot1q_table_index, 0);
}
if (am->acl_dot1ad_input_classify_table_by_sw_if_index[sw_if_index] != ~0)
{
dot1ad_table_index =
am->acl_dot1ad_input_classify_table_by_sw_if_index[sw_if_index];
am->acl_dot1ad_input_classify_table_by_sw_if_index[sw_if_index] = ~0;
acl_classify_add_del_table_tiny (cm, dot1ad_5tuple_mask,
sizeof (dot1ad_5tuple_mask) - 1, ~0,
~0, &dot1ad_table_index, 0);
}
clib_mem_set_heap (oldheap);
return 0;
}
static int
acl_unhook_l2_output_classify (acl_main_t * am, u32 sw_if_index)
{
vnet_classify_main_t *cm = &vnet_classify_main;
u32 ip4_table_index = ~0;
u32 ip6_table_index = ~0;
u32 dot1q_table_index = ~0;
u32 dot1ad_table_index = ~0;
void *oldheap = acl_set_heap (am);
vec_validate_init_empty (am->acl_ip4_output_classify_table_by_sw_if_index,
sw_if_index, ~0);
vec_validate_init_empty (am->acl_ip6_output_classify_table_by_sw_if_index,
sw_if_index, ~0);
vec_validate_init_empty (am->acl_dot1q_output_classify_table_by_sw_if_index,
sw_if_index, ~0);
vec_validate_init_empty
(am->acl_dot1ad_output_classify_table_by_sw_if_index, sw_if_index, ~0);
/* switch to global heap while calling vnet_* functions */
clib_mem_set_heap (cm->vlib_main->heap_base);
vnet_l2_output_classify_enable_disable (sw_if_index, 0);
if (am->acl_ip4_output_classify_table_by_sw_if_index[sw_if_index] != ~0)
{
ip4_table_index =
am->acl_ip4_output_classify_table_by_sw_if_index[sw_if_index];
am->acl_ip4_output_classify_table_by_sw_if_index[sw_if_index] = ~0;
acl_classify_add_del_table_tiny (cm, ip4_5tuple_mask,
sizeof (ip4_5tuple_mask) - 1, ~0,
am->l2_output_classify_next_acl_ip4,
&ip4_table_index, 0);
}
if (am->acl_ip6_output_classify_table_by_sw_if_index[sw_if_index] != ~0)
{
ip6_table_index =
am->acl_ip6_output_classify_table_by_sw_if_index[sw_if_index];
am->acl_ip6_output_classify_table_by_sw_if_index[sw_if_index] = ~0;
acl_classify_add_del_table_tiny (cm, ip6_5tuple_mask,
sizeof (ip6_5tuple_mask) - 1, ~0,
am->l2_output_classify_next_acl_ip6,
&ip6_table_index, 0);
}
if (am->acl_dot1q_output_classify_table_by_sw_if_index[sw_if_index] != ~0)
{
dot1q_table_index =
am->acl_dot1q_output_classify_table_by_sw_if_index[sw_if_index];
am->acl_dot1q_output_classify_table_by_sw_if_index[sw_if_index] = ~0;
acl_classify_add_del_table_tiny (cm, ip6_5tuple_mask,
sizeof (ip6_5tuple_mask) - 1, ~0,
~0, &dot1q_table_index, 0);
}
if (am->acl_dot1ad_output_classify_table_by_sw_if_index[sw_if_index] != ~0)
{
dot1ad_table_index =
am->acl_dot1ad_output_classify_table_by_sw_if_index[sw_if_index];
am->acl_dot1ad_output_classify_table_by_sw_if_index[sw_if_index] = ~0;
acl_classify_add_del_table_tiny (cm, dot1ad_5tuple_mask,
sizeof (dot1ad_5tuple_mask) - 1, ~0,
~0, &dot1ad_table_index, 0);
}
clib_mem_set_heap (oldheap);
return 0;
}
static void
acl_add_vlan_session (acl_main_t * am, u32 table_index, u8 is_output,
u8 is_dot1ad, u8 is_ip6)
{
vnet_classify_main_t *cm = &vnet_classify_main;
u8 *match;
u32 next_acl;
u8 idx;
u8 session_idx;
if (is_ip6)
{
next_acl =
(is_output) ? am->
l2_output_classify_next_acl_ip6 : am->l2_input_classify_next_acl_ip6;
}
else
{
next_acl =
(is_output) ? am->
l2_output_classify_next_acl_ip4 : am->l2_input_classify_next_acl_ip4;
}
match = (is_dot1ad) ? dot1ad_5tuple_mask : dot1q_5tuple_mask;
idx = (is_dot1ad) ? 20 : 16;
if (is_dot1ad)
{
/* 802.1ad ethertype */
match[12] = 0x88;
match[13] = 0xa8;
/* 802.1q ethertype */
match[16] = 0x81;
match[17] = 0x00;
}
else
{
/* 802.1q ethertype */
match[12] = 0x81;
match[13] = 0x00;
}
/* add sessions to vlan tables per ethernet_type */
if (is_ip6)
{
match[idx] = 0x86;
match[idx + 1] = 0xdd;
session_idx = 1;
}
else
{
match[idx] = 0x08;
match[idx + 1] = 0x00;
session_idx = 0;
}
vnet_classify_add_del_session (cm, table_index, match, next_acl,
session_idx, 0, 0, 0, 1);
/* reset the mask back to being a mask */
match[idx] = 0xff;
match[idx + 1] = 0xff;
match[12] = 0xff;
match[13] = 0xff;
if (is_dot1ad)
{
match[16] = 0xff;
match[17] = 0xff;
}
}
static int
acl_hook_l2_input_classify (acl_main_t * am, u32 sw_if_index)
{
vnet_classify_main_t *cm = &vnet_classify_main;
u32 ip4_table_index = ~0;
u32 ip6_table_index = ~0;
u32 dot1q_table_index = ~0;
u32 dot1ad_table_index = ~0;
int rv;
void *prevheap = clib_mem_set_heap (cm->vlib_main->heap_base);
/* in case there were previous tables attached */
acl_unhook_l2_input_classify (am, sw_if_index);
rv =
acl_classify_add_del_table_tiny (cm, ip4_5tuple_mask,
sizeof (ip4_5tuple_mask) - 1, ~0,
am->l2_input_classify_next_acl_ip4,
&ip4_table_index, 1);
if (rv)
goto done;
rv =
acl_classify_add_del_table_tiny (cm, ip6_5tuple_mask,
sizeof (ip6_5tuple_mask) - 1, ~0,
am->l2_input_classify_next_acl_ip6,
&ip6_table_index, 1);
if (rv)
{
acl_classify_add_del_table_tiny (cm, ip4_5tuple_mask,
sizeof (ip4_5tuple_mask) - 1, ~0,
am->l2_input_classify_next_acl_ip4,
&ip4_table_index, 0);
goto done;
}
rv =
acl_classify_add_del_table_tiny (cm, dot1ad_5tuple_mask,
sizeof (dot1ad_5tuple_mask) - 1, ~0,
~0, &dot1ad_table_index, 1);
rv =
acl_classify_add_del_table_tiny (cm, dot1q_5tuple_mask,
sizeof (dot1q_5tuple_mask) - 1,
dot1ad_table_index, ~0,
&dot1q_table_index, 1);
if (rv)
{
acl_classify_add_del_table_tiny (cm, dot1ad_5tuple_mask,
sizeof (dot1ad_5tuple_mask) - 1, ~0,
~0, &dot1ad_table_index, 0);
acl_classify_add_del_table_tiny (cm, ip6_5tuple_mask,
sizeof (ip6_5tuple_mask) - 1, ~0,
am->l2_input_classify_next_acl_ip6,
&ip6_table_index, 0);
acl_classify_add_del_table_tiny (cm, ip4_5tuple_mask,
sizeof (ip4_5tuple_mask) - 1, ~0,
am->l2_input_classify_next_acl_ip4,
&ip4_table_index, 0);
goto done;
}
rv =
vnet_l2_input_classify_set_tables (sw_if_index, ip4_table_index,
ip6_table_index, dot1q_table_index);
if (rv)
{
acl_classify_add_del_table_tiny (cm, ip4_5tuple_mask,
sizeof (ip4_5tuple_mask) - 1, ~0,
am->l2_input_classify_next_acl_ip4,
&ip4_table_index, 0);
acl_classify_add_del_table_tiny (cm, ip6_5tuple_mask,
sizeof (ip6_5tuple_mask) - 1, ~0,
am->l2_input_classify_next_acl_ip6,
&ip6_table_index, 0);
acl_classify_add_del_table_tiny (cm, dot1q_5tuple_mask,
sizeof (dot1q_5tuple_mask) - 1, ~0,
~0, &dot1q_table_index, 0);
acl_classify_add_del_table_tiny (cm, dot1ad_5tuple_mask,
sizeof (dot1ad_5tuple_mask) - 1, ~0,
~0, &dot1ad_table_index, 0);
goto done;
}
/* add sessions to vlan tables per ethernet_type */
acl_add_vlan_session (am, dot1q_table_index, 0, 0, 0);
acl_add_vlan_session (am, dot1q_table_index, 0, 0, 1);
acl_add_vlan_session (am, dot1ad_table_index, 0, 1, 0);
acl_add_vlan_session (am, dot1ad_table_index, 0, 1, 1);
am->acl_ip4_input_classify_table_by_sw_if_index[sw_if_index] =
ip4_table_index;
am->acl_ip6_input_classify_table_by_sw_if_index[sw_if_index] =
ip6_table_index;
am->acl_dot1q_input_classify_table_by_sw_if_index[sw_if_index] =
dot1q_table_index;
am->acl_dot1ad_input_classify_table_by_sw_if_index[sw_if_index] =
dot1ad_table_index;
vnet_l2_input_classify_enable_disable (sw_if_index, 1);
done:
clib_mem_set_heap (prevheap);
return rv;
}
static int
acl_hook_l2_output_classify (acl_main_t * am, u32 sw_if_index)
{
vnet_classify_main_t *cm = &vnet_classify_main;
u32 ip4_table_index = ~0;
u32 ip6_table_index = ~0;
u32 dot1q_table_index = ~0;
u32 dot1ad_table_index = ~0;
int rv;
void *prevheap = clib_mem_set_heap (cm->vlib_main->heap_base);
/* in case there were previous tables attached */
acl_unhook_l2_output_classify (am, sw_if_index);
rv =
acl_classify_add_del_table_tiny (cm, ip4_5tuple_mask,
sizeof (ip4_5tuple_mask) - 1, ~0,
am->l2_output_classify_next_acl_ip4,
&ip4_table_index, 1);
if (rv)
goto done;
rv =
acl_classify_add_del_table_tiny (cm, ip6_5tuple_mask,
sizeof (ip6_5tuple_mask) - 1, ~0,
am->l2_output_classify_next_acl_ip6,
&ip6_table_index, 1);
if (rv)
{
acl_classify_add_del_table_tiny (cm, ip4_5tuple_mask,
sizeof (ip4_5tuple_mask) - 1, ~0,
am->l2_output_classify_next_acl_ip4,
&ip4_table_index, 0);
goto done;
}
rv =
acl_classify_add_del_table_tiny (cm, dot1ad_5tuple_mask,
sizeof (dot1ad_5tuple_mask) - 1, ~0,
~0, &dot1ad_table_index, 1);
rv =
acl_classify_add_del_table_tiny (cm, dot1q_5tuple_mask,
sizeof (dot1q_5tuple_mask) - 1,
dot1ad_table_index, ~0,
&dot1q_table_index, 1);
if (rv)
{
acl_classify_add_del_table_tiny (cm, dot1ad_5tuple_mask,
sizeof (dot1ad_5tuple_mask) - 1, ~0,
~0, &dot1ad_table_index, 0);
acl_classify_add_del_table_tiny (cm, ip6_5tuple_mask,
sizeof (ip6_5tuple_mask) - 1, ~0,
am->l2_output_classify_next_acl_ip6,
&ip6_table_index, 0);
acl_classify_add_del_table_tiny (cm, ip4_5tuple_mask,
sizeof (ip4_5tuple_mask) - 1, ~0,
am->l2_output_classify_next_acl_ip4,
&ip4_table_index, 0);
goto done;
}
rv =
vnet_l2_output_classify_set_tables (sw_if_index, ip4_table_index,
ip6_table_index, dot1q_table_index);
clib_warning
("ACL enabling on interface sw_if_index %d, setting tables to the following: ip4: %d ip6: %d\n",
sw_if_index, ip4_table_index, ip6_table_index);
if (rv)
{
acl_classify_add_del_table_tiny (cm, ip6_5tuple_mask,
sizeof (ip6_5tuple_mask) - 1, ~0,
am->l2_output_classify_next_acl_ip6,
&ip6_table_index, 0);
acl_classify_add_del_table_tiny (cm, ip4_5tuple_mask,
sizeof (ip4_5tuple_mask) - 1, ~0,
am->l2_output_classify_next_acl_ip4,
&ip4_table_index, 0);
acl_classify_add_del_table_tiny (cm, dot1q_5tuple_mask,
sizeof (dot1q_5tuple_mask) - 1, ~0,
~0, &dot1q_table_index, 0);
acl_classify_add_del_table_tiny (cm, dot1ad_5tuple_mask,
sizeof (dot1ad_5tuple_mask) - 1, ~0,
~0, &dot1ad_table_index, 0);
goto done;
}
/* add sessions to vlan tables per ethernet_type */
acl_add_vlan_session (am, dot1q_table_index, 1, 0, 0);
acl_add_vlan_session (am, dot1q_table_index, 1, 0, 1);
acl_add_vlan_session (am, dot1ad_table_index, 1, 1, 0);
acl_add_vlan_session (am, dot1ad_table_index, 1, 1, 1);
am->acl_ip4_output_classify_table_by_sw_if_index[sw_if_index] =
ip4_table_index;
am->acl_ip6_output_classify_table_by_sw_if_index[sw_if_index] =
ip6_table_index;
am->acl_dot1q_output_classify_table_by_sw_if_index[sw_if_index] =
dot1q_table_index;
am->acl_dot1ad_output_classify_table_by_sw_if_index[sw_if_index] =
dot1ad_table_index;
vnet_l2_output_classify_enable_disable (sw_if_index, 1);
done:
clib_mem_set_heap (prevheap);
return rv;
}
int
acl_interface_in_enable_disable (acl_main_t * am, u32 sw_if_index,
int enable_disable)
{
int rv;
/* Utterly wrong? */
if (pool_is_free_index (am->vnet_main->interface_main.sw_interfaces,
sw_if_index))
return VNET_API_ERROR_INVALID_SW_IF_INDEX;
acl_fa_enable_disable (sw_if_index, 1, enable_disable);
if (enable_disable)
{
rv = acl_hook_l2_input_classify (am, sw_if_index);
}
else
{
rv = acl_unhook_l2_input_classify (am, sw_if_index);
}
return rv;
}
int
acl_interface_out_enable_disable (acl_main_t * am, u32 sw_if_index,
int enable_disable)
{
int rv;
/* Utterly wrong? */
if (pool_is_free_index (am->vnet_main->interface_main.sw_interfaces,
sw_if_index))
return VNET_API_ERROR_INVALID_SW_IF_INDEX;
acl_fa_enable_disable (sw_if_index, 0, enable_disable);
if (enable_disable)
{
rv = acl_hook_l2_output_classify (am, sw_if_index);
}
else
{
rv = acl_unhook_l2_output_classify (am, sw_if_index);
}
return rv;
}
static int
acl_is_not_defined (acl_main_t * am, u32 acl_list_index)
{
return (pool_is_free_index (am->acls, acl_list_index));
}
static int
acl_interface_add_inout_acl (u32 sw_if_index, u8 is_input, u32 acl_list_index)
{
acl_main_t *am = &acl_main;
if (acl_is_not_defined (am, acl_list_index))
{
/* ACL is not defined. Can not apply */
return VNET_API_ERROR_NO_SUCH_ENTRY;
}
void *oldheap = acl_set_heap (am);
if (is_input)
{
vec_validate (am->input_acl_vec_by_sw_if_index, sw_if_index);
u32 index = vec_search (am->input_acl_vec_by_sw_if_index[sw_if_index],
acl_list_index);
if (index < vec_len (am->input_acl_vec_by_sw_if_index[sw_if_index]))
{
clib_warning
("ACL %d is already applied inbound on sw_if_index %d (index %d)",
acl_list_index, sw_if_index, index);
/* the entry is already there */
clib_mem_set_heap (oldheap);
return VNET_API_ERROR_ACL_IN_USE_INBOUND;
}
/* if there was no ACL applied before, enable the ACL processing */
if (vec_len (am->input_acl_vec_by_sw_if_index[sw_if_index]) == 0)
{
acl_interface_in_enable_disable (am, sw_if_index, 1);
}
vec_add (am->input_acl_vec_by_sw_if_index[sw_if_index], &acl_list_index,
1);
vec_validate (am->input_sw_if_index_vec_by_acl, acl_list_index);
vec_add (am->input_sw_if_index_vec_by_acl[acl_list_index], &sw_if_index,
1);
}
else
{
vec_validate (am->output_acl_vec_by_sw_if_index, sw_if_index);
u32 index = vec_search (am->output_acl_vec_by_sw_if_index[sw_if_index],
acl_list_index);
if (index < vec_len (am->output_acl_vec_by_sw_if_index[sw_if_index]))
{
clib_warning
("ACL %d is already applied outbound on sw_if_index %d (index %d)",
acl_list_index, sw_if_index, index);
/* the entry is already there */
clib_mem_set_heap (oldheap);
return VNET_API_ERROR_ACL_IN_USE_OUTBOUND;
}
/* if there was no ACL applied before, enable the ACL processing */
if (vec_len (am->output_acl_vec_by_sw_if_index[sw_if_index]) == 0)
{
acl_interface_out_enable_disable (am, sw_if_index, 1);
}
vec_add (am->output_acl_vec_by_sw_if_index[sw_if_index],
&acl_list_index, 1);
vec_validate (am->output_sw_if_index_vec_by_acl, acl_list_index);
vec_add (am->output_sw_if_index_vec_by_acl[acl_list_index],
&sw_if_index, 1);
}
clib_mem_set_heap (oldheap);
return 0;
}
static int
acl_interface_del_inout_acl (u32 sw_if_index, u8 is_input, u32 acl_list_index)
{
acl_main_t *am = &acl_main;
int i;
int rv = VNET_API_ERROR_NO_SUCH_ENTRY;
void *oldheap = acl_set_heap (am);
if (is_input)
{
vec_validate (am->input_acl_vec_by_sw_if_index, sw_if_index);
for (i = 0; i < vec_len (am->input_acl_vec_by_sw_if_index[sw_if_index]);
i++)
{
if (acl_list_index ==
am->input_acl_vec_by_sw_if_index[sw_if_index][i])
{
vec_del1 (am->input_acl_vec_by_sw_if_index[sw_if_index], i);
rv = 0;
break;
}
}
if (acl_list_index < vec_len (am->input_sw_if_index_vec_by_acl))
{
u32 index =
vec_search (am->input_sw_if_index_vec_by_acl[acl_list_index],
sw_if_index);
if (index <
vec_len (am->input_sw_if_index_vec_by_acl[acl_list_index]))
{
hash_acl_unapply (am, sw_if_index, is_input, acl_list_index);
vec_del1 (am->input_sw_if_index_vec_by_acl[acl_list_index],
index);
}
}
/* If there is no more ACLs applied on an interface, disable ACL processing */
if (0 == vec_len (am->input_acl_vec_by_sw_if_index[sw_if_index]))
{
acl_interface_in_enable_disable (am, sw_if_index, 0);
}
}
else
{
vec_validate (am->output_acl_vec_by_sw_if_index, sw_if_index);
for (i = 0;
i < vec_len (am->output_acl_vec_by_sw_if_index[sw_if_index]); i++)
{
if (acl_list_index ==
am->output_acl_vec_by_sw_if_index[sw_if_index][i])
{
vec_del1 (am->output_acl_vec_by_sw_if_index[sw_if_index], i);
rv = 0;
break;
}
}
if (acl_list_index < vec_len (am->output_sw_if_index_vec_by_acl))
{
u32 index =
vec_search (am->output_sw_if_index_vec_by_acl[acl_list_index],
sw_if_index);
if (index <
vec_len (am->output_sw_if_index_vec_by_acl[acl_list_index]))
{
hash_acl_unapply (am, sw_if_index, is_input, acl_list_index);
vec_del1 (am->output_sw_if_index_vec_by_acl[acl_list_index],
index);
}
}
/* If there is no more ACLs applied on an interface, disable ACL processing */
if (0 == vec_len (am->output_acl_vec_by_sw_if_index[sw_if_index]))
{
acl_interface_out_enable_disable (am, sw_if_index, 0);
}
}
clib_mem_set_heap (oldheap);
return rv;
}
static void
acl_interface_reset_inout_acls (u32 sw_if_index, u8 is_input)
{
acl_main_t *am = &acl_main;
int i;
void *oldheap = acl_set_heap (am);
if (is_input)
{
vec_validate (am->input_acl_vec_by_sw_if_index, sw_if_index);
if (vec_len (am->input_acl_vec_by_sw_if_index[sw_if_index]) > 0)
{
acl_interface_in_enable_disable (am, sw_if_index, 0);
}
for (i = vec_len (am->input_acl_vec_by_sw_if_index[sw_if_index]) - 1;
i >= 0; i--)
{
u32 acl_list_index =
am->input_acl_vec_by_sw_if_index[sw_if_index][i];
hash_acl_unapply (am, sw_if_index, is_input, acl_list_index);
if (acl_list_index < vec_len (am->input_sw_if_index_vec_by_acl))
{
u32 index =
vec_search (am->input_sw_if_index_vec_by_acl[acl_list_index],
sw_if_index);
if (index <
vec_len (am->input_sw_if_index_vec_by_acl[acl_list_index]))
{
vec_del1 (am->input_sw_if_index_vec_by_acl[acl_list_index],
index);
}
}
}
vec_reset_length (am->input_acl_vec_by_sw_if_index[sw_if_index]);
}
else
{
vec_validate (am->output_acl_vec_by_sw_if_index, sw_if_index);
if (vec_len (am->output_acl_vec_by_sw_if_index[sw_if_index]) > 0)
{
acl_interface_out_enable_disable (am, sw_if_index, 0);
}
for (i = vec_len (am->output_acl_vec_by_sw_if_index[sw_if_index]) - 1;
i >= 0; i--)
{
u32 acl_list_index =
am->output_acl_vec_by_sw_if_index[sw_if_index][i];
hash_acl_unapply (am, sw_if_index, is_input, acl_list_index);
if (acl_list_index < vec_len (am->output_sw_if_index_vec_by_acl))
{
u32 index =
vec_search (am->output_sw_if_index_vec_by_acl[acl_list_index],
sw_if_index);
if (index <
vec_len (am->output_sw_if_index_vec_by_acl[acl_list_index]))
{
vec_del1 (am->output_sw_if_index_vec_by_acl[acl_list_index],
index);
}
}
}
vec_reset_length (am->output_acl_vec_by_sw_if_index[sw_if_index]);
}
clib_mem_set_heap (oldheap);
}
static int
acl_interface_add_del_inout_acl (u32 sw_if_index, u8 is_add, u8 is_input,
u32 acl_list_index)
{
int rv = VNET_API_ERROR_NO_SUCH_ENTRY;
acl_main_t *am = &acl_main;
if (is_add)
{
rv =
acl_interface_add_inout_acl (sw_if_index, is_input, acl_list_index);
if (rv == 0)
{
hash_acl_apply (am, sw_if_index, is_input, acl_list_index);
}
}
else
{
hash_acl_unapply (am, sw_if_index, is_input, acl_list_index);
rv =
acl_interface_del_inout_acl (sw_if_index, is_input, acl_list_index);
}
return rv;
}
typedef struct
{
u8 is_ipv6;
u8 has_egress;
u8 mac_mask[6];
u8 prefix_len;
u32 count;
u32 table_index;
u32 arp_table_index;
u32 dot1q_table_index;
u32 dot1ad_table_index;
/* egress tables */
u32 out_table_index;
u32 out_arp_table_index;
u32 out_dot1q_table_index;
u32 out_dot1ad_table_index;
} macip_match_type_t;
static u32
macip_find_match_type (macip_match_type_t * mv, u8 * mac_mask, u8 prefix_len,
u8 is_ipv6)
{
u32 i;
if (mv)
{
for (i = 0; i < vec_len (mv); i++)
{
if ((mv[i].prefix_len == prefix_len) && (mv[i].is_ipv6 == is_ipv6)
&& (0 == memcmp (mv[i].mac_mask, mac_mask, 6)))
{
return i;
}
}
}
return ~0;
}
/* Get metric used to sort match types.
The more specific and the more often seen - the bigger the metric */
static int
match_type_metric (macip_match_type_t * m)
{
unsigned int mac_bits_set = 0;
unsigned int mac_byte;
int i;
for (i = 0; i < 6; i++)
{
mac_byte = m->mac_mask[i];
for (; mac_byte; mac_byte >>= 1)
mac_bits_set += mac_byte & 1;
}
/*
* Attempt to place the more specific and the more used rules on top.
* There are obvious caveat corner cases to this, but they do not
* seem to be sensible in real world (e.g. specific IPv4 with wildcard MAC
* going with a wildcard IPv4 with a specific MAC).
*/
return m->prefix_len + mac_bits_set + m->is_ipv6 + 10 * m->count;
}
static int
match_type_compare (macip_match_type_t * m1, macip_match_type_t * m2)
{
/* Ascending sort based on the metric values */
return match_type_metric (m1) - match_type_metric (m2);
}
/* Get the offset of L3 source within ethernet packet */
static int
get_l3_src_offset (int is6)
{
if (is6)
return (sizeof (ethernet_header_t) +
offsetof (ip6_header_t, src_address));
else
return (sizeof (ethernet_header_t) +
offsetof (ip4_header_t, src_address));
}
static int
get_l3_dst_offset (int is6)
{
if (is6)
return (sizeof (ethernet_header_t) +
offsetof (ip6_header_t, dst_address));
else
return (sizeof (ethernet_header_t) +
offsetof (ip4_header_t, dst_address));
}
/*
* return if the is_permit value also requires to create the egress tables
* For backwards compatibility, we keep the is_permit = 1 to only
* create the ingress tables, and the new value of 3 will also
* create the egress tables based on destination.
*/
static int
macip_permit_also_egress (u8 is_permit)
{
return (is_permit == 3);
}
static int
macip_create_classify_tables (acl_main_t * am, u32 macip_acl_index)
{
macip_match_type_t *mvec = NULL;
macip_match_type_t *mt;
macip_acl_list_t *a = pool_elt_at_index (am->macip_acls, macip_acl_index);
int i;
u32 match_type_index;
u32 last_table;
u32 out_last_table;
u8 mask[5 * 16];
vnet_classify_main_t *cm = &vnet_classify_main;
/* Count the number of different types of rules */
for (i = 0; i < a->count; i++)
{
if (~0 ==
(match_type_index =
macip_find_match_type (mvec, a->rules[i].src_mac_mask,
a->rules[i].src_prefixlen,
a->rules[i].is_ipv6)))
{
match_type_index = vec_len (mvec);
vec_validate (mvec, match_type_index);
memcpy (mvec[match_type_index].mac_mask,
a->rules[i].src_mac_mask, 6);
mvec[match_type_index].prefix_len = a->rules[i].src_prefixlen;
mvec[match_type_index].is_ipv6 = a->rules[i].is_ipv6;
mvec[match_type_index].has_egress = 0;
mvec[match_type_index].table_index = ~0;
mvec[match_type_index].arp_table_index = ~0;
mvec[match_type_index].dot1q_table_index = ~0;
mvec[match_type_index].dot1ad_table_index = ~0;
mvec[match_type_index].out_table_index = ~0;
mvec[match_type_index].out_arp_table_index = ~0;
mvec[match_type_index].out_dot1q_table_index = ~0;
mvec[match_type_index].out_dot1ad_table_index = ~0;
}
mvec[match_type_index].count++;
mvec[match_type_index].has_egress |=
macip_permit_also_egress (a->rules[i].is_permit);
}
/* Put the most frequently used tables last in the list so we can create classifier tables in reverse order */
vec_sort_with_function (mvec, match_type_compare);
/* Create the classifier tables */
last_table = ~0;
out_last_table = ~0;
/* First add ARP tables */
vec_foreach (mt, mvec)
{
int mask_len;
int is6 = mt->is_ipv6;
if (!is6)
{
/*
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Destination Address |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| | |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +
| Source Address |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| EtherType | Hardware Type |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Protocol Type | Hw addr len | Proto addr len|
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Opcode | |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +
| Sender Hardware Address |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Sender Protocol Address |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Target Hardware Address |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| | TargetProtocolAddress |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
*/
memset (mask, 0, sizeof (mask));
/* source MAC address */
memcpy (&mask[6], mt->mac_mask, 6);
memset (&mask[12], 0xff, 2); /* ethernet protocol */
/* sender hardware address within ARP */
memcpy (&mask[14 + 8], mt->mac_mask, 6);
/* sender protocol address within ARP */
for (i = 0; i < (mt->prefix_len / 8); i++)
mask[14 + 14 + i] = 0xff;
if (mt->prefix_len % 8)
mask[14 + 14 + (mt->prefix_len / 8)] =
0xff - ((1 << (8 - mt->prefix_len % 8)) - 1);
mask_len = ((14 + 14 + ((mt->prefix_len + 7) / 8) +
(sizeof (u32x4) - 1)) / sizeof (u32x4)) * sizeof (u32x4);
acl_classify_add_del_table_small (cm, mask, mask_len, last_table,
(~0 == last_table) ? 0 : ~0,
&mt->arp_table_index, 1);
last_table = mt->arp_table_index;
if (mt->has_egress)
{
/* egress ARP table */
memset (mask, 0, sizeof (mask));
// memcpy (&mask[0], mt->mac_mask, 6);
memset (&mask[12], 0xff, 2); /* ethernet protocol */
/* AYXX: FIXME here - can we tighten the ARP-related table more ? */
/* mask captures just the destination and the ethertype */
mask_len = ((14 +
(sizeof (u32x4) -
1)) / sizeof (u32x4)) * sizeof (u32x4);
acl_classify_add_del_table_small (cm, mask, mask_len,
out_last_table,
(~0 == out_last_table) ? 0 : ~0,
&mt->out_arp_table_index, 1);
out_last_table = mt->out_arp_table_index;
}
}
}
/* Now add IP[46] tables */
vec_foreach (mt, mvec)
{
int mask_len;
int is6 = mt->is_ipv6;
int l3_src_offs;
int l3_dst_offs;
int tags;
u32 *last_tag_table;
u32 *out_last_tag_table;
/*
* create chained tables for VLAN (no-tags, dot1q and dot1ad) packets
*/
for (tags = 2; tags >= 0; tags--)
{
memset (mask, 0, sizeof (mask));
memcpy (&mask[6], mt->mac_mask, 6);
l3_src_offs = tags * 4 + get_l3_src_offset (is6);
switch (tags)
{
case 0:
default:
memset (&mask[12], 0xff, 2); /* ethernet protocol */
last_tag_table = &mt->table_index;
break;
case 1:
memset (&mask[12], 0xff, 2); /* VLAN tag1 */
memset (&mask[16], 0xff, 2); /* ethernet protocol */
last_tag_table = &mt->dot1q_table_index;
break;
case 2:
memset (&mask[12], 0xff, 2); /* VLAN tag1 */
memset (&mask[16], 0xff, 2); /* VLAN tag2 */
memset (&mask[20], 0xff, 2); /* ethernet protocol */
last_tag_table = &mt->dot1ad_table_index;
break;
}
for (i = 0; i < (mt->prefix_len / 8); i++)
{
mask[l3_src_offs + i] = 0xff;
}
if (mt->prefix_len % 8)
{
mask[l3_src_offs + (mt->prefix_len / 8)] =
0xff - ((1 << (8 - mt->prefix_len % 8)) - 1);
}
/*
* Round-up the number of bytes needed to store the prefix,
* and round up the number of vectors too
*/
mask_len = ((l3_src_offs + ((mt->prefix_len + 7) / 8) +
(sizeof (u32x4) - 1)) / sizeof (u32x4)) * sizeof (u32x4);
acl_classify_add_del_table_small (cm, mask, mask_len, last_table,
(~0 == last_table) ? 0 : ~0,
last_tag_table, 1);
last_table = *last_tag_table;
}
if (mt->has_egress)
{
for (tags = 2; tags >= 0; tags--)
{
memset (mask, 0, sizeof (mask));
/* MAC destination */
memcpy (&mask[0], mt->mac_mask, 6);
l3_dst_offs = tags * 4 + get_l3_dst_offset (is6);
switch (tags)
{
case 0:
default:
memset (&mask[12], 0xff, 2); /* ethernet protocol */
out_last_tag_table = &mt->out_table_index;
break;
case 1:
memset (&mask[12], 0xff, 2); /* VLAN tag1 */
memset (&mask[16], 0xff, 2); /* ethernet protocol */
out_last_tag_table = &mt->out_dot1q_table_index;
break;
case 2:
memset (&mask[12], 0xff, 2); /* VLAN tag1 */
memset (&mask[16], 0xff, 2); /* VLAN tag2 */
memset (&mask[20], 0xff, 2); /* ethernet protocol */
out_last_tag_table = &mt->out_dot1ad_table_index;
break;
}
for (i = 0; i < (mt->prefix_len / 8); i++)
{
mask[l3_dst_offs + i] = 0xff;
}
if (mt->prefix_len % 8)
{
mask[l3_dst_offs + (mt->prefix_len / 8)] =
0xff - ((1 << (8 - mt->prefix_len % 8)) - 1);
}
/*
* Round-up the number of bytes needed to store the prefix,
* and round up the number of vectors too
*/
mask_len = ((l3_dst_offs + ((mt->prefix_len + 7) / 8) +
(sizeof (u32x4) -
1)) / sizeof (u32x4)) * sizeof (u32x4);
acl_classify_add_del_table_small (cm, mask, mask_len,
out_last_table,
(~0 == out_last_table) ? 0 : ~0,
out_last_tag_table, 1);
out_last_table = *out_last_tag_table;
}
}
}
a->ip4_table_index = last_table;
a->ip6_table_index = last_table;
a->l2_table_index = last_table;
a->out_ip4_table_index = out_last_table;
a->out_ip6_table_index = out_last_table;
a->out_l2_table_index = out_last_table;
/* Populate the classifier tables with rules from the MACIP ACL */
for (i = 0; i < a->count; i++)
{
u32 action = 0;
u32 metadata = 0;
int is6 = a->rules[i].is_ipv6;
int l3_src_offs;
int l3_dst_offs;
u32 tag_table;
int tags, eth;
match_type_index =
macip_find_match_type (mvec, a->rules[i].src_mac_mask,
a->rules[i].src_prefixlen,
a->rules[i].is_ipv6);
ASSERT (match_type_index != ~0);
for (tags = 2; tags >= 0; tags--)
{
memset (mask, 0, sizeof (mask));
l3_src_offs = tags * 4 + get_l3_src_offset (is6);
memcpy (&mask[6], a->rules[i].src_mac, 6);
switch (tags)
{
case 0:
default:
tag_table = mvec[match_type_index].table_index;
eth = 12;
break;
case 1:
tag_table = mvec[match_type_index].dot1q_table_index;
mask[12] = 0x81;
mask[13] = 0x00;
eth = 16;
break;
case 2:
tag_table = mvec[match_type_index].dot1ad_table_index;
mask[12] = 0x88;
mask[13] = 0xa8;
mask[16] = 0x81;
mask[17] = 0x00;
eth = 20;
break;
}
if (is6)
{
memcpy (&mask[l3_src_offs], &a->rules[i].src_ip_addr.ip6, 16);
mask[eth] = 0x86;
mask[eth + 1] = 0xdd;
}
else
{
memcpy (&mask[l3_src_offs], &a->rules[i].src_ip_addr.ip4, 4);
mask[eth] = 0x08;
mask[eth + 1] = 0x00;
}
/* add session to table mvec[match_type_index].table_index; */
vnet_classify_add_del_session (cm, tag_table,
mask, a->rules[i].is_permit ? ~0 : 0,
i, 0, action, metadata, 1);
memset (&mask[12], 0, sizeof (mask) - 12);
}
/* add ARP table entry too */
if (!is6 && (mvec[match_type_index].arp_table_index != ~0))
{
memset (mask, 0, sizeof (mask));
memcpy (&mask[6], a->rules[i].src_mac, 6);
mask[12] = 0x08;
mask[13] = 0x06;
memcpy (&mask[14 + 8], a->rules[i].src_mac, 6);
memcpy (&mask[14 + 14], &a->rules[i].src_ip_addr.ip4, 4);
vnet_classify_add_del_session (cm,
mvec
[match_type_index].arp_table_index,
mask, a->rules[i].is_permit ? ~0 : 0,
i, 0, action, metadata, 1);
}
if (macip_permit_also_egress (a->rules[i].is_permit))
{
/* Add the egress entry with destination set */
for (tags = 2; tags >= 0; tags--)
{
memset (mask, 0, sizeof (mask));
l3_dst_offs = tags * 4 + get_l3_dst_offset (is6);
/* src mac in the other direction becomes dst */
memcpy (&mask[0], a->rules[i].src_mac, 6);
switch (tags)
{
case 0:
default:
tag_table = mvec[match_type_index].out_table_index;
eth = 12;
break;
case 1:
tag_table = mvec[match_type_index].out_dot1q_table_index;
mask[12] = 0x81;
mask[13] = 0x00;
eth = 16;
break;
case 2:
tag_table = mvec[match_type_index].out_dot1ad_table_index;
mask[12] = 0x88;
mask[13] = 0xa8;
mask[16] = 0x81;
mask[17] = 0x00;
eth = 20;
break;
}
if (is6)
{
memcpy (&mask[l3_dst_offs], &a->rules[i].src_ip_addr.ip6,
16);
mask[eth] = 0x86;
mask[eth + 1] = 0xdd;
}
else
{
memcpy (&mask[l3_dst_offs], &a->rules[i].src_ip_addr.ip4,
4);
mask[eth] = 0x08;
mask[eth + 1] = 0x00;
}
/* add session to table mvec[match_type_index].table_index; */
vnet_classify_add_del_session (cm, tag_table,
mask,
a->rules[i].is_permit ? ~0 : 0,
i, 0, action, metadata, 1);
// memset (&mask[12], 0, sizeof (mask) - 12);
}
/* add ARP table entry too */
if (!is6 && (mvec[match_type_index].out_arp_table_index != ~0))
{
memset (mask, 0, sizeof (mask));
memcpy (&mask[0], a->rules[i].src_mac, 6);
mask[12] = 0x08;
mask[13] = 0x06;
vnet_classify_add_del_session (cm,
mvec
[match_type_index].out_arp_table_index,
mask,
a->rules[i].is_permit ? ~0 : 0,
i, 0, action, metadata, 1);
}
}
}
return 0;
}
static void
macip_destroy_classify_tables (acl_main_t * am, u32 macip_acl_index)
{
vnet_classify_main_t *cm = &vnet_classify_main;
macip_acl_list_t *a = pool_elt_at_index (am->macip_acls, macip_acl_index);
if (a->ip4_table_index != ~0)
{
acl_classify_add_del_table_small (cm, 0, ~0, ~0, ~0,
&a->ip4_table_index, 0);
a->ip4_table_index = ~0;
}
if (a->ip6_table_index != ~0)
{
acl_classify_add_del_table_small (cm, 0, ~0, ~0, ~0,
&a->ip6_table_index, 0);
a->ip6_table_index = ~0;
}
if (a->l2_table_index != ~0)
{
acl_classify_add_del_table_small (cm, 0, ~0, ~0, ~0, &a->l2_table_index,
0);
a->l2_table_index = ~0;
}
}
static int
macip_maybe_apply_unapply_classifier_tables (acl_main_t * am, u32 acl_index,
int is_apply)
{
int rv = 0;
int rv0 = 0;
int i;
macip_acl_list_t *a = pool_elt_at_index (am->macip_acls, acl_index);
for (i = 0; i < vec_len (am->macip_acl_by_sw_if_index); i++)
if (vec_elt (am->macip_acl_by_sw_if_index, i) == acl_index)
{
rv0 = vnet_set_input_acl_intfc (am->vlib_main, i, a->ip4_table_index,
a->ip6_table_index, a->l2_table_index,
is_apply);
/* return the first unhappy outcome but make try to plough through. */
rv = rv || rv0;
rv0 =
vnet_set_output_acl_intfc (am->vlib_main, i, a->out_ip4_table_index,
a->out_ip6_table_index,
a->out_l2_table_index, is_apply);
/* return the first unhappy outcome but make try to plough through. */
rv = rv || rv0;
}
return rv;
}
static int
macip_acl_add_list (u32 count, vl_api_macip_acl_rule_t rules[],
u32 * acl_list_index, u8 * tag)
{
acl_main_t *am = &acl_main;
macip_acl_list_t *a;
macip_acl_rule_t *r;
macip_acl_rule_t *acl_new_rules = 0;
int i;
int rv = 0;
if (*acl_list_index != ~0)
{
/* They supplied some number, let's see if this MACIP ACL exists */
if (pool_is_free_index (am->macip_acls, *acl_list_index))
{
/* tried to replace a non-existent ACL, no point doing anything */
clib_warning
("acl-plugin-error: Trying to replace nonexistent MACIP ACL %d (tag %s)",
*acl_list_index, tag);
return VNET_API_ERROR_NO_SUCH_ENTRY;
}
}
if (0 == count)
{
clib_warning
("acl-plugin-warning: Trying to create empty MACIP ACL (tag %s)",
tag);
}
/* if replacing the ACL, unapply the classifier tables first - they will be gone.. */
if (~0 != *acl_list_index)
rv = macip_maybe_apply_unapply_classifier_tables (am, *acl_list_index, 0);
void *oldheap = acl_set_heap (am);
/* Create and populate the rules */
if (count > 0)
vec_validate (acl_new_rules, count - 1);
for (i = 0; i < count; i++)
{
r = &acl_new_rules[i];
r->is_permit = rules[i].is_permit;
r->is_ipv6 = rules[i].is_ipv6;
memcpy (&r->src_mac, rules[i].src_mac, 6);
memcpy (&r->src_mac_mask, rules[i].src_mac_mask, 6);
if (rules[i].is_ipv6)
memcpy (&r->src_ip_addr.ip6, rules[i].src_ip_addr, 16);
else
memcpy (&r->src_ip_addr.ip4, rules[i].src_ip_addr, 4);
r->src_prefixlen = rules[i].src_ip_prefix_len;
}
if (~0 == *acl_list_index)
{
/* Get ACL index */
pool_get_aligned (am->macip_acls, a, CLIB_CACHE_LINE_BYTES);
memset (a, 0, sizeof (*a));
/* Will return the newly allocated ACL index */
*acl_list_index = a - am->macip_acls;
}
else
{
a = pool_elt_at_index (am->macip_acls, *acl_list_index);
if (a->rules)
{
vec_free (a->rules);
}
macip_destroy_classify_tables (am, *acl_list_index);
}
a->rules = acl_new_rules;
a->count = count;
memcpy (a->tag, tag, sizeof (a->tag));
/* Create and populate the classifer tables */
macip_create_classify_tables (am, *acl_list_index);
clib_mem_set_heap (oldheap);
/* If the ACL was already applied somewhere, reapply the newly created tables */
rv = rv
|| macip_maybe_apply_unapply_classifier_tables (am, *acl_list_index, 1);
return rv;
}
/* No check for validity of sw_if_index - the callers were supposed to validate */
static int
macip_acl_interface_del_acl (acl_main_t * am, u32 sw_if_index)
{
int rv;
u32 macip_acl_index;
macip_acl_list_t *a;
void *oldheap = acl_set_heap (am);
vec_validate_init_empty (am->macip_acl_by_sw_if_index, sw_if_index, ~0);
clib_mem_set_heap (oldheap);
macip_acl_index = am->macip_acl_by_sw_if_index[sw_if_index];
/* No point in deleting MACIP ACL which is not applied */
if (~0 == macip_acl_index)
return VNET_API_ERROR_NO_SUCH_ENTRY;
a = pool_elt_at_index (am->macip_acls, macip_acl_index);
/* remove the classifier tables off the interface L2 ACL */
rv =
vnet_set_input_acl_intfc (am->vlib_main, sw_if_index, a->ip4_table_index,
a->ip6_table_index, a->l2_table_index, 0);
rv |=
vnet_set_output_acl_intfc (am->vlib_main, sw_if_index,
a->out_ip4_table_index, a->out_ip6_table_index,
a->out_l2_table_index, 0);
/* Unset the MACIP ACL index */
am->macip_acl_by_sw_if_index[sw_if_index] = ~0;
return rv;
}
/* No check for validity of sw_if_index - the callers were supposed to validate */
static int
macip_acl_interface_add_acl (acl_main_t * am, u32 sw_if_index,
u32 macip_acl_index)
{
macip_acl_list_t *a;
int rv;
if (pool_is_free_index (am->macip_acls, macip_acl_index))
{
return VNET_API_ERROR_NO_SUCH_ENTRY;
}
void *oldheap = acl_set_heap (am);
a = pool_elt_at_index (am->macip_acls, macip_acl_index);
vec_validate_init_empty (am->macip_acl_by_sw_if_index, sw_if_index, ~0);
clib_mem_set_heap (oldheap);
/* If there already a MACIP ACL applied, unapply it */
if (~0 != am->macip_acl_by_sw_if_index[sw_if_index])
macip_acl_interface_del_acl (am, sw_if_index);
am->macip_acl_by_sw_if_index[sw_if_index] = macip_acl_index;
/* Apply the classifier tables for L2 ACLs */
rv =
vnet_set_input_acl_intfc (am->vlib_main, sw_if_index, a->ip4_table_index,
a->ip6_table_index, a->l2_table_index, 1);
rv |=
vnet_set_output_acl_intfc (am->vlib_main, sw_if_index,
a->out_ip4_table_index, a->out_ip6_table_index,
a->out_l2_table_index, 1);
return rv;
}
static int
macip_acl_del_list (u32 acl_list_index)
{
acl_main_t *am = &acl_main;
macip_acl_list_t *a;
int i;
if (pool_is_free_index (am->macip_acls, acl_list_index))
{
return VNET_API_ERROR_NO_SUCH_ENTRY;
}
/* delete any references to the ACL */
for (i = 0; i < vec_len (am->macip_acl_by_sw_if_index); i++)
{
if (am->macip_acl_by_sw_if_index[i] == acl_list_index)
{
macip_acl_interface_del_acl (am, i);
}
}
void *oldheap = acl_set_heap (am);
/* Now that classifier tables are detached, clean them up */
macip_destroy_classify_tables (am, acl_list_index);
/* now we can delete the ACL itself */
a = pool_elt_at_index (am->macip_acls, acl_list_index);
if (a->rules)
{
vec_free (a->rules);
}
pool_put (am->macip_acls, a);
clib_mem_set_heap (oldheap);
return 0;
}
static int
macip_acl_interface_add_del_acl (u32 sw_if_index, u8 is_add,
u32 acl_list_index)
{
acl_main_t *am = &acl_main;
int rv = -1;
if (is_add)
{
rv = macip_acl_interface_add_acl (am, sw_if_index, acl_list_index);
}
else
{
rv = macip_acl_interface_del_acl (am, sw_if_index);
}
return rv;
}
/*
* If the client does not allocate enough memory for a variable-length
* message, and then proceed to use it as if the full memory allocated,
* absent the check we happily consume that on the VPP side, and go
* along as if nothing happened. However, the resulting
* effects range from just garbage in the API decode
* (because the decoder snoops too far), to potential memory
* corruptions.
*
* This verifies that the actual length of the message is
* at least expected_len, and complains loudly if it is not.
*
* A failing check here is 100% a software bug on the API user side,
* so we might as well yell.
*
*/
static int
verify_message_len (void *mp, u32 expected_len, char *where)
{
u32 supplied_len = vl_msg_api_get_msg_length (mp);
if (supplied_len < expected_len)
{
clib_warning ("%s: Supplied message length %d is less than expected %d",
where, supplied_len, expected_len);
return 0;
}
else
{
return 1;
}
}
/* API message handler */
static void
vl_api_acl_add_replace_t_handler (vl_api_acl_add_replace_t * mp)
{
vl_api_acl_add_replace_reply_t *rmp;
acl_main_t *am = &acl_main;
int rv;
u32 acl_list_index = ntohl (mp->acl_index);
u32 acl_count = ntohl (mp->count);
u32 expected_len = sizeof (*mp) + acl_count * sizeof (mp->r[0]);
if (verify_message_len (mp, expected_len, "acl_add_replace"))
{
rv = acl_add_list (acl_count, mp->r, &acl_list_index, mp->tag);
}
else
{
rv = VNET_API_ERROR_INVALID_VALUE;
}
/* *INDENT-OFF* */
REPLY_MACRO2(VL_API_ACL_ADD_REPLACE_REPLY,
({
rmp->acl_index = htonl(acl_list_index);
}));
/* *INDENT-ON* */
}
static void
vl_api_acl_del_t_handler (vl_api_acl_del_t * mp)
{
acl_main_t *am = &acl_main;
vl_api_acl_del_reply_t *rmp;
int rv;
rv = acl_del_list (ntohl (mp->acl_index));
REPLY_MACRO (VL_API_ACL_DEL_REPLY);
}
static void
vl_api_acl_interface_add_del_t_handler (vl_api_acl_interface_add_del_t * mp)
{
acl_main_t *am = &acl_main;
vnet_interface_main_t *im = &am->vnet_main->interface_main;
u32 sw_if_index = ntohl (mp->sw_if_index);
vl_api_acl_interface_add_del_reply_t *rmp;
int rv = -1;
if (pool_is_free_index (im->sw_interfaces, sw_if_index))
rv = VNET_API_ERROR_INVALID_SW_IF_INDEX;
else
rv =
acl_interface_add_del_inout_acl (sw_if_index, mp->is_add,
mp->is_input, ntohl (mp->acl_index));
REPLY_MACRO (VL_API_ACL_INTERFACE_ADD_DEL_REPLY);
}
static void
vl_api_acl_interface_set_acl_list_t_handler
(vl_api_acl_interface_set_acl_list_t * mp)
{
acl_main_t *am = &acl_main;
vl_api_acl_interface_set_acl_list_reply_t *rmp;
int rv = 0;
int i;
vnet_interface_main_t *im = &am->vnet_main->interface_main;
u32 sw_if_index = ntohl (mp->sw_if_index);
if (pool_is_free_index (im->sw_interfaces, sw_if_index))
rv = VNET_API_ERROR_INVALID_SW_IF_INDEX;
else
{
acl_interface_reset_inout_acls (sw_if_index, 0);
acl_interface_reset_inout_acls (sw_if_index, 1);
for (i = 0; i < mp->count; i++)
{
if (acl_is_not_defined (am, ntohl (mp->acls[i])))
{
/* ACL does not exist, so we can not apply it */
rv = VNET_API_ERROR_NO_SUCH_ENTRY;
}
}
if (0 == rv)
{
for (i = 0; i < mp->count; i++)
{
acl_interface_add_del_inout_acl (sw_if_index, 1,
(i < mp->n_input),
ntohl (mp->acls[i]));
}
}
}
REPLY_MACRO (VL_API_ACL_INTERFACE_SET_ACL_LIST_REPLY);
}
static void
copy_acl_rule_to_api_rule (vl_api_acl_rule_t * api_rule, acl_rule_t * r)
{
api_rule->is_permit = r->is_permit;
api_rule->is_ipv6 = r->is_ipv6;
if (r->is_ipv6)
{
memcpy (api_rule->src_ip_addr, &r->src, sizeof (r->src));
memcpy (api_rule->dst_ip_addr, &r->dst, sizeof (r->dst));
}
else
{
memcpy (api_rule->src_ip_addr, &r->src.ip4, sizeof (r->src.ip4));
memcpy (api_rule->dst_ip_addr, &r->dst.ip4, sizeof (r->dst.ip4));
}
api_rule->src_ip_prefix_len = r->src_prefixlen;
api_rule->dst_ip_prefix_len = r->dst_prefixlen;
api_rule->proto = r->proto;
api_rule->srcport_or_icmptype_first = htons (r->src_port_or_type_first);
api_rule->srcport_or_icmptype_last = htons (r->src_port_or_type_last);
api_rule->dstport_or_icmpcode_first = htons (r->dst_port_or_code_first);
api_rule->dstport_or_icmpcode_last = htons (r->dst_port_or_code_last);
api_rule->tcp_flags_mask = r->tcp_flags_mask;
api_rule->tcp_flags_value = r->tcp_flags_value;
}
static void
send_acl_details (acl_main_t * am, vl_api_registration_t * reg,
acl_list_t * acl, u32 context)
{
vl_api_acl_details_t *mp;
vl_api_acl_rule_t *rules;
int i;
int msg_size = sizeof (*mp) + sizeof (mp->r[0]) * acl->count;
void *oldheap = acl_set_heap (am);
mp = vl_msg_api_alloc (msg_size);
memset (mp, 0, msg_size);
mp->_vl_msg_id = ntohs (VL_API_ACL_DETAILS + am->msg_id_base);
/* fill in the message */
mp->context = context;
mp->count = htonl (acl->count);
mp->acl_index = htonl (acl - am->acls);
memcpy (mp->tag, acl->tag, sizeof (mp->tag));
// clib_memcpy (mp->r, acl->rules, acl->count * sizeof(acl->rules[0]));
rules = mp->r;
for (i = 0; i < acl->count; i++)
{
copy_acl_rule_to_api_rule (&rules[i], &acl->rules[i]);
}
clib_mem_set_heap (oldheap);
vl_api_send_msg (reg, (u8 *) mp);
}
static void
vl_api_acl_dump_t_handler (vl_api_acl_dump_t * mp)
{
acl_main_t *am = &acl_main;
u32 acl_index;
acl_list_t *acl;
int rv = -1;
vl_api_registration_t *reg;
reg = vl_api_client_index_to_registration (mp->client_index);
if (!reg)
return;
if (mp->acl_index == ~0)
{
/* *INDENT-OFF* */
/* Just dump all ACLs */
pool_foreach (acl, am->acls,
({
send_acl_details(am, reg, acl, mp->context);
}));
/* *INDENT-ON* */
}
else
{
acl_index = ntohl (mp->acl_index);
if (!pool_is_free_index (am->acls, acl_index))
{
acl = pool_elt_at_index (am->acls, acl_index);
send_acl_details (am, reg, acl, mp->context);
}
}
if (rv == -1)
{
/* FIXME API: should we signal an error here at all ? */
return;
}
}
static void
send_acl_interface_list_details (acl_main_t * am,
vl_api_registration_t * reg,
u32 sw_if_index, u32 context)
{
vl_api_acl_interface_list_details_t *mp;
int msg_size;
int n_input;
int n_output;
int count;
int i = 0;
void *oldheap = acl_set_heap (am);
vec_validate (am->input_acl_vec_by_sw_if_index, sw_if_index);
vec_validate (am->output_acl_vec_by_sw_if_index, sw_if_index);
n_input = vec_len (am->input_acl_vec_by_sw_if_index[sw_if_index]);
n_output = vec_len (am->output_acl_vec_by_sw_if_index[sw_if_index]);
count = n_input + n_output;
msg_size = sizeof (*mp);
msg_size += sizeof (mp->acls[0]) * count;
mp = vl_msg_api_alloc (msg_size);
memset (mp, 0, msg_size);
mp->_vl_msg_id =
ntohs (VL_API_ACL_INTERFACE_LIST_DETAILS + am->msg_id_base);
/* fill in the message */
mp->context = context;
mp->sw_if_index = htonl (sw_if_index);
mp->count = count;
mp->n_input = n_input;
for (i = 0; i < n_input; i++)
{
mp->acls[i] = htonl (am->input_acl_vec_by_sw_if_index[sw_if_index][i]);
}
for (i = 0; i < n_output; i++)
{
mp->acls[n_input + i] =
htonl (am->output_acl_vec_by_sw_if_index[sw_if_index][i]);
}
clib_mem_set_heap (oldheap);
vl_api_send_msg (reg, (u8 *) mp);
}
static void
vl_api_acl_interface_list_dump_t_handler (vl_api_acl_interface_list_dump_t *
mp)
{
acl_main_t *am = &acl_main;
vnet_sw_interface_t *swif;
vnet_interface_main_t *im = &am->vnet_main->interface_main;
u32 sw_if_index;
vl_api_registration_t *reg;
reg = vl_api_client_index_to_registration (mp->client_index);
if (!reg)
return;
if (mp->sw_if_index == ~0)
{
/* *INDENT-OFF* */
pool_foreach (swif, im->sw_interfaces,
({
send_acl_interface_list_details(am, reg, swif->sw_if_index, mp->context);
}));
/* *INDENT-ON* */
}
else
{
sw_if_index = ntohl (mp->sw_if_index);
if (!pool_is_free_index (im->sw_interfaces, sw_if_index))
send_acl_interface_list_details (am, reg, sw_if_index, mp->context);
}
}
/* MACIP ACL API handlers */
static void
vl_api_macip_acl_add_t_handler (vl_api_macip_acl_add_t * mp)
{
vl_api_macip_acl_add_reply_t *rmp;
acl_main_t *am = &acl_main;
int rv;
u32 acl_list_index = ~0;
u32 acl_count = ntohl (mp->count);
u32 expected_len = sizeof (*mp) + acl_count * sizeof (mp->r[0]);
if (verify_message_len (mp, expected_len, "macip_acl_add"))
{
rv = macip_acl_add_list (acl_count, mp->r, &acl_list_index, mp->tag);
}
else
{
rv = VNET_API_ERROR_INVALID_VALUE;
}
/* *INDENT-OFF* */
REPLY_MACRO2(VL_API_MACIP_ACL_ADD_REPLY,
({
rmp->acl_index = htonl(acl_list_index);
}));
/* *INDENT-ON* */
}
static void
vl_api_macip_acl_add_replace_t_handler (vl_api_macip_acl_add_replace_t * mp)
{
vl_api_macip_acl_add_replace_reply_t *rmp;
acl_main_t *am = &acl_main;
int rv;
u32 acl_list_index = ntohl (mp->acl_index);
u32 acl_count = ntohl (mp->count);
u32 expected_len = sizeof (*mp) + acl_count * sizeof (mp->r[0]);
if (verify_message_len (mp, expected_len, "macip_acl_add_replace"))
{
rv = macip_acl_add_list (acl_count, mp->r, &acl_list_index, mp->tag);
}
else
{
rv = VNET_API_ERROR_INVALID_VALUE;
}
/* *INDENT-OFF* */
REPLY_MACRO2(VL_API_MACIP_ACL_ADD_REPLACE_REPLY,
({
rmp->acl_index = htonl(acl_list_index);
}));
/* *INDENT-ON* */
}
static void
vl_api_macip_acl_del_t_handler (vl_api_macip_acl_del_t * mp)
{
acl_main_t *am = &acl_main;
vl_api_macip_acl_del_reply_t *rmp;
int rv;
rv = macip_acl_del_list (ntohl (mp->acl_index));
REPLY_MACRO (VL_API_MACIP_ACL_DEL_REPLY);
}
static void
vl_api_macip_acl_interface_add_del_t_handler
(vl_api_macip_acl_interface_add_del_t * mp)
{
acl_main_t *am = &acl_main;
vl_api_macip_acl_interface_add_del_reply_t *rmp;
int rv = -1;
vnet_interface_main_t *im = &am->vnet_main->interface_main;
u32 sw_if_index = ntohl (mp->sw_if_index);
if (pool_is_free_index (im->sw_interfaces, sw_if_index))
rv = VNET_API_ERROR_INVALID_SW_IF_INDEX;
else
rv =
macip_acl_interface_add_del_acl (ntohl (mp->sw_if_index), mp->is_add,
ntohl (mp->acl_index));
REPLY_MACRO (VL_API_MACIP_ACL_INTERFACE_ADD_DEL_REPLY);
}
static void
send_macip_acl_details (acl_main_t * am, vl_api_registration_t * reg,
macip_acl_list_t * acl, u32 context)
{
vl_api_macip_acl_details_t *mp;
vl_api_macip_acl_rule_t *rules;
macip_acl_rule_t *r;
int i;
int msg_size = sizeof (*mp) + (acl ? sizeof (mp->r[0]) * acl->count : 0);
mp = vl_msg_api_alloc (msg_size);
memset (mp, 0, msg_size);
mp->_vl_msg_id = ntohs (VL_API_MACIP_ACL_DETAILS + am->msg_id_base);
/* fill in the message */
mp->context = context;
if (acl)
{
memcpy (mp->tag, acl->tag, sizeof (mp->tag));
mp->count = htonl (acl->count);
mp->acl_index = htonl (acl - am->macip_acls);
rules = mp->r;
for (i = 0; i < acl->count; i++)
{
r = &acl->rules[i];
rules[i].is_permit = r->is_permit;
rules[i].is_ipv6 = r->is_ipv6;
memcpy (rules[i].src_mac, &r->src_mac, sizeof (r->src_mac));
memcpy (rules[i].src_mac_mask, &r->src_mac_mask,
sizeof (r->src_mac_mask));
if (r->is_ipv6)
memcpy (rules[i].src_ip_addr, &r->src_ip_addr.ip6,
sizeof (r->src_ip_addr.ip6));
else
memcpy (rules[i].src_ip_addr, &r->src_ip_addr.ip4,
sizeof (r->src_ip_addr.ip4));
rules[i].src_ip_prefix_len = r->src_prefixlen;
}
}
else
{
/* No martini, no party - no ACL applied to this interface. */
mp->acl_index = ~0;
mp->count = 0;
}
vl_api_send_msg (reg, (u8 *) mp);
}
static void
vl_api_macip_acl_dump_t_handler (vl_api_macip_acl_dump_t * mp)
{
acl_main_t *am = &acl_main;
macip_acl_list_t *acl;
vl_api_registration_t *reg;
reg = vl_api_client_index_to_registration (mp->client_index);
if (!reg)
return;
if (mp->acl_index == ~0)
{
/* Just dump all ACLs for now, with sw_if_index = ~0 */
pool_foreach (acl, am->macip_acls, (
{
send_macip_acl_details (am, reg,
acl,
mp->context);
}
));
/* *INDENT-ON* */
}
else
{
u32 acl_index = ntohl (mp->acl_index);
if (!pool_is_free_index (am->macip_acls, acl_index))
{
acl = pool_elt_at_index (am->macip_acls, acl_index);
send_macip_acl_details (am, reg, acl, mp->context);
}
}
}
static void
vl_api_macip_acl_interface_get_t_handler (vl_api_macip_acl_interface_get_t *
mp)
{
acl_main_t *am = &acl_main;
vl_api_macip_acl_interface_get_reply_t *rmp;
u32 count = vec_len (am->macip_acl_by_sw_if_index);
int msg_size = sizeof (*rmp) + sizeof (rmp->acls[0]) * count;
vl_api_registration_t *reg;
int i;
reg = vl_api_client_index_to_registration (mp->client_index);
if (!reg)
return;
rmp = vl_msg_api_alloc (msg_size);
memset (rmp, 0, msg_size);
rmp->_vl_msg_id =
ntohs (VL_API_MACIP_ACL_INTERFACE_GET_REPLY + am->msg_id_base);
rmp->context = mp->context;
rmp->count = htonl (count);
for (i = 0; i < count; i++)
{
rmp->acls[i] = htonl (am->macip_acl_by_sw_if_index[i]);
}
vl_api_send_msg (reg, (u8 *) rmp);
}
static void
send_macip_acl_interface_list_details (acl_main_t * am,
vl_api_registration_t * reg,
u32 sw_if_index,
u32 acl_index, u32 context)
{
vl_api_macip_acl_interface_list_details_t *rmp;
/* at this time there is only ever 1 mac ip acl per interface */
int msg_size = sizeof (*rmp) + sizeof (rmp->acls[0]);
rmp = vl_msg_api_alloc (msg_size);
memset (rmp, 0, msg_size);
rmp->_vl_msg_id =
ntohs (VL_API_MACIP_ACL_INTERFACE_LIST_DETAILS + am->msg_id_base);
/* fill in the message */
rmp->context = context;
rmp->count = 1;
rmp->sw_if_index = htonl (sw_if_index);
rmp->acls[0] = htonl (acl_index);
vl_api_send_msg (reg, (u8 *) rmp);
}
static void
vl_api_macip_acl_interface_list_dump_t_handler
(vl_api_macip_acl_interface_list_dump_t * mp)
{
vl_api_registration_t *reg;
acl_main_t *am = &acl_main;
u32 sw_if_index = ntohl (mp->sw_if_index);
reg = vl_api_client_index_to_registration (mp->client_index);
if (!reg)
return;
if (sw_if_index == ~0)
{
vec_foreach_index (sw_if_index, am->macip_acl_by_sw_if_index)
{
if (~0 != am->macip_acl_by_sw_if_index[sw_if_index])
{
send_macip_acl_interface_list_details (am, reg, sw_if_index,
am->macip_acl_by_sw_if_index
[sw_if_index],
mp->context);
}
}
}
else
{
if (vec_len (am->macip_acl_by_sw_if_index) > sw_if_index)
{
send_macip_acl_interface_list_details (am, reg, sw_if_index,
am->macip_acl_by_sw_if_index
[sw_if_index], mp->context);
}
}
}
/* Set up the API message handling tables */
static clib_error_t *
acl_plugin_api_hookup (vlib_main_t * vm)
{
acl_main_t *am = &acl_main;
#define _(N,n) \
vl_msg_api_set_handlers((VL_API_##N + am->msg_id_base), \
#n, \
vl_api_##n##_t_handler, \
vl_noop_handler, \
vl_api_##n##_t_endian, \
vl_api_##n##_t_print, \
sizeof(vl_api_##n##_t), 1);
foreach_acl_plugin_api_msg;
#undef _
return 0;
}
#define vl_msg_name_crc_list
#include <acl/acl_all_api_h.h>
#undef vl_msg_name_crc_list
static void
setup_message_id_table (acl_main_t * am, api_main_t * apim)
{
#define _(id,n,crc) \
vl_msg_api_add_msg_name_crc (apim, #n "_" #crc, id + am->msg_id_base);
foreach_vl_msg_name_crc_acl;
#undef _
}
static void
acl_setup_fa_nodes (void)
{
vlib_main_t *vm = vlib_get_main ();
acl_main_t *am = &acl_main;
vlib_node_t *n, *n4, *n6;
n = vlib_get_node_by_name (vm, (u8 *) "l2-input-classify");
n4 = vlib_get_node_by_name (vm, (u8 *) "acl-plugin-in-ip4-l2");
n6 = vlib_get_node_by_name (vm, (u8 *) "acl-plugin-in-ip6-l2");
am->l2_input_classify_next_acl_ip4 =
vlib_node_add_next_with_slot (vm, n->index, n4->index, ~0);
am->l2_input_classify_next_acl_ip6 =
vlib_node_add_next_with_slot (vm, n->index, n6->index, ~0);
feat_bitmap_init_next_nodes (vm, n4->index, L2INPUT_N_FEAT,
l2input_get_feat_names (),
am->fa_acl_in_ip4_l2_node_feat_next_node_index);
feat_bitmap_init_next_nodes (vm, n6->index, L2INPUT_N_FEAT,
l2input_get_feat_names (),
am->fa_acl_in_ip6_l2_node_feat_next_node_index);
n = vlib_get_node_by_name (vm, (u8 *) "l2-output-classify");
n4 = vlib_get_node_by_name (vm, (u8 *) "acl-plugin-out-ip4-l2");
n6 = vlib_get_node_by_name (vm, (u8 *) "acl-plugin-out-ip6-l2");
am->l2_output_classify_next_acl_ip4 =
vlib_node_add_next_with_slot (vm, n->index, n4->index, ~0);
am->l2_output_classify_next_acl_ip6 =
vlib_node_add_next_with_slot (vm, n->index, n6->index, ~0);
feat_bitmap_init_next_nodes (vm, n4->index, L2OUTPUT_N_FEAT,
l2output_get_feat_names (),
am->fa_acl_out_ip4_l2_node_feat_next_node_index);
feat_bitmap_init_next_nodes (vm, n6->index, L2OUTPUT_N_FEAT,
l2output_get_feat_names (),
am->fa_acl_out_ip6_l2_node_feat_next_node_index);
}
static void
acl_set_timeout_sec (int timeout_type, u32 value)
{
acl_main_t *am = &acl_main;
clib_time_t *ct = &am->vlib_main->clib_time;
if (timeout_type < ACL_N_TIMEOUTS)
{
am->session_timeout_sec[timeout_type] = value;
}
else
{
clib_warning ("Unknown timeout type %d", timeout_type);
return;
}
am->session_timeout[timeout_type] =
(u64) (((f64) value) / ct->seconds_per_clock);
}
static void
acl_set_session_max_entries (u32 value)
{
acl_main_t *am = &acl_main;
am->fa_conn_table_max_entries = value;
}
static int
acl_set_skip_ipv6_eh (u32 eh, u32 value)
{
acl_main_t *am = &acl_main;
if ((eh < 256) && (value < 2))
{
am->fa_ipv6_known_eh_bitmap =
clib_bitmap_set (am->fa_ipv6_known_eh_bitmap, eh, value);
return 1;
}
else
return 0;
}
static clib_error_t *
acl_sw_interface_add_del (vnet_main_t * vnm, u32 sw_if_index, u32 is_add)
{
acl_main_t *am = &acl_main;
if (0 == am->acl_mheap)
{
/* ACL heap is not initialized, so definitely nothing to do. */
return 0;
}
if (0 == is_add)
{
vlib_process_signal_event (am->vlib_main, am->fa_cleaner_node_index,
ACL_FA_CLEANER_DELETE_BY_SW_IF_INDEX,
sw_if_index);
/* also unapply any ACLs in case the users did not do so. */
macip_acl_interface_del_acl (am, sw_if_index);
acl_interface_reset_inout_acls (sw_if_index, 0);
acl_interface_reset_inout_acls (sw_if_index, 1);
}
return 0;
}
VNET_SW_INTERFACE_ADD_DEL_FUNCTION (acl_sw_interface_add_del);
static clib_error_t *
acl_set_aclplugin_fn (vlib_main_t * vm,
unformat_input_t * input, vlib_cli_command_t * cmd)
{
clib_error_t *error = 0;
u32 timeout = 0;
u32 val = 0;
u32 eh_val = 0;
uword memory_size = 0;
acl_main_t *am = &acl_main;
if (unformat (input, "skip-ipv6-extension-header %u %u", &eh_val, &val))
{
if (!acl_set_skip_ipv6_eh (eh_val, val))
{
error = clib_error_return (0, "expecting eh=0..255, value=0..1");
}
goto done;
}
if (unformat (input, "use-hash-acl-matching %u", &val))
{
am->use_hash_acl_matching = (val != 0);
goto done;
}
if (unformat (input, "l4-match-nonfirst-fragment %u", &val))
{
am->l4_match_nonfirst_fragment = (val != 0);
goto done;
}
if (unformat (input, "heap"))
{
if (unformat (input, "main"))
{
if (unformat (input, "validate %u", &val))
acl_plugin_acl_set_validate_heap (am, val);
else if (unformat (input, "trace %u", &val))
acl_plugin_acl_set_trace_heap (am, val);
goto done;
}
else if (unformat (input, "hash"))
{
if (unformat (input, "validate %u", &val))
acl_plugin_hash_acl_set_validate_heap (am, val);
else if (unformat (input, "trace %u", &val))
acl_plugin_hash_acl_set_trace_heap (am, val);
goto done;
}
goto done;
}
if (unformat (input, "session"))
{
if (unformat (input, "table"))
{
/* The commands here are for tuning/testing. No user-serviceable parts inside */
if (unformat (input, "max-entries"))
{
if (!unformat (input, "%u", &val))
{
error = clib_error_return (0,
"expecting maximum number of entries, got `%U`",
format_unformat_error, input);
goto done;
}
else
{
acl_set_session_max_entries (val);
goto done;
}
}
if (unformat (input, "hash-table-buckets"))
{
if (!unformat (input, "%u", &val))
{
error = clib_error_return (0,
"expecting maximum number of hash table buckets, got `%U`",
format_unformat_error, input);
goto done;
}
else
{
am->fa_conn_table_hash_num_buckets = val;
goto done;
}
}
if (unformat (input, "hash-table-memory"))
{
if (!unformat (input, "%U", unformat_memory_size, &memory_size))
{
error = clib_error_return (0,
"expecting maximum amount of hash table memory, got `%U`",
format_unformat_error, input);
goto done;
}
else
{
am->fa_conn_table_hash_memory_size = memory_size;
goto done;
}
}
if (unformat (input, "event-trace"))
{
if (!unformat (input, "%u", &val))
{
error = clib_error_return (0,
"expecting trace level, got `%U`",
format_unformat_error, input);
goto done;
}
else
{
am->trace_sessions = val;
goto done;
}
}
goto done;
}
if (unformat (input, "timeout"))
{
if (unformat (input, "udp"))
{
if (unformat (input, "idle"))
{
if (!unformat (input, "%u", &timeout))
{
error = clib_error_return (0,
"expecting timeout value in seconds, got `%U`",
format_unformat_error,
input);
goto done;
}
else
{
acl_set_timeout_sec (ACL_TIMEOUT_UDP_IDLE, timeout);
goto done;
}
}
}
if (unformat (input, "tcp"))
{
if (unformat (input, "idle"))
{
if (!unformat (input, "%u", &timeout))
{
error = clib_error_return (0,
"expecting timeout value in seconds, got `%U`",
format_unformat_error,
input);
goto done;
}
else
{
acl_set_timeout_sec (ACL_TIMEOUT_TCP_IDLE, timeout);
goto done;
}
}
if (unformat (input, "transient"))
{
if (!unformat (input, "%u", &timeout))
{
error = clib_error_return (0,
"expecting timeout value in seconds, got `%U`",
format_unformat_error,
input);
goto done;
}
else
{
acl_set_timeout_sec (ACL_TIMEOUT_TCP_TRANSIENT,
timeout);
goto done;
}
}
}
goto done;
}
}
done:
return error;
}
static u8 *
my_format_mac_address (u8 * s, va_list * args)
{
u8 *a = va_arg (*args, u8 *);
return format (s, "%02x:%02x:%02x:%02x:%02x:%02x",
a[0], a[1], a[2], a[3], a[4], a[5]);
}
static inline u8 *
my_macip_acl_rule_t_pretty_format (u8 * out, va_list * args)
{
macip_acl_rule_t *a = va_arg (*args, macip_acl_rule_t *);
out = format (out, "%s action %d ip %U/%d mac %U mask %U",
a->is_ipv6 ? "ipv6" : "ipv4", a->is_permit,
format_ip46_address, &a->src_ip_addr,
a->is_ipv6 ? IP46_TYPE_IP6 : IP46_TYPE_IP4,
a->src_prefixlen,
my_format_mac_address, a->src_mac,
my_format_mac_address, a->src_mac_mask);
return (out);
}
static void
macip_acl_print (acl_main_t * am, u32 macip_acl_index)
{
vlib_main_t *vm = am->vlib_main;
int i;
/* Don't try to print someone else's memory */
if (macip_acl_index > vec_len (am->macip_acls))
return;
macip_acl_list_t *a = vec_elt_at_index (am->macip_acls, macip_acl_index);
int free_pool_slot = pool_is_free_index (am->macip_acls, macip_acl_index);
vlib_cli_output (vm,
"MACIP acl_index: %d, count: %d (true len %d) tag {%s} is free pool slot: %d\n",
macip_acl_index, a->count, vec_len (a->rules), a->tag,
free_pool_slot);
vlib_cli_output (vm,
" ip4_table_index %d, ip6_table_index %d, l2_table_index %d\n",
a->ip4_table_index, a->ip6_table_index, a->l2_table_index);
vlib_cli_output (vm,
" out_ip4_table_index %d, out_ip6_table_index %d, out_l2_table_index %d\n",
a->out_ip4_table_index, a->out_ip6_table_index,
a->out_l2_table_index);
for (i = 0; i < vec_len (a->rules); i++)
vlib_cli_output (vm, " rule %d: %U\n", i,
my_macip_acl_rule_t_pretty_format,
vec_elt_at_index (a->rules, i));
}
static clib_error_t *
acl_show_aclplugin_macip_acl_fn (vlib_main_t * vm,
unformat_input_t *
input, vlib_cli_command_t * cmd)
{
clib_error_t *error = 0;
acl_main_t *am = &acl_main;
int i;
for (i = 0; i < vec_len (am->macip_acls); i++)
macip_acl_print (am, i);
return error;
}
static clib_error_t *
acl_show_aclplugin_macip_interface_fn (vlib_main_t * vm,
unformat_input_t *
input, vlib_cli_command_t * cmd)
{
clib_error_t *error = 0;
acl_main_t *am = &acl_main;
int i;
for (i = 0; i < vec_len (am->macip_acl_by_sw_if_index); i++)
{
vlib_cli_output (vm, " sw_if_index %d: %d\n", i,
vec_elt (am->macip_acl_by_sw_if_index, i));
}
return error;
}
#define PRINT_AND_RESET(vm, out0) do { vlib_cli_output(vm, "%v", out0); vec_reset_length(out0); } while(0)
static void
acl_print_acl (vlib_main_t * vm, acl_main_t * am, int acl_index)
{
acl_rule_t *r;
u8 *out0 = format (0, "acl-index %u count %u tag {%s}\n", acl_index,
am->acls[acl_index].count, am->acls[acl_index].tag);
int j;
PRINT_AND_RESET (vm, out0);
for (j = 0; j < am->acls[acl_index].count; j++)
{
r = &am->acls[acl_index].rules[j];
out0 = format (out0, " %4d: %s ", j, r->is_ipv6 ? "ipv6" : "ipv4");
out0 = format_acl_action (out0, r->is_permit);
out0 = format (out0, " src %U/%d", format_ip46_address, &r->src,
r->is_ipv6 ? IP46_TYPE_IP6 : IP46_TYPE_IP4,
r->src_prefixlen);
out0 =
format (out0, " dst %U/%d", format_ip46_address, &r->dst,
r->is_ipv6 ? IP46_TYPE_IP6 : IP46_TYPE_IP4, r->dst_prefixlen);
out0 = format (out0, " proto %d", r->proto);
out0 = format (out0, " sport %d", r->src_port_or_type_first);
if (r->src_port_or_type_first != r->src_port_or_type_last)
{
out0 = format (out0, "-%d", r->src_port_or_type_last);
}
out0 = format (out0, " dport %d", r->dst_port_or_code_first);
if (r->dst_port_or_code_first != r->dst_port_or_code_last)
{
out0 = format (out0, "-%d", r->dst_port_or_code_last);
}
if (r->tcp_flags_mask || r->tcp_flags_value)
{
out0 =
format (out0, " tcpflags %d mask %d", r->tcp_flags_value,
r->tcp_flags_mask);
}
out0 = format (out0, "\n");
PRINT_AND_RESET (vm, out0);
}
}
#undef PRINT_AND_RESET
static void
acl_plugin_show_acl (acl_main_t * am, u32 acl_index)
{
u32 i;
vlib_main_t *vm = am->vlib_main;
for (i = 0; i < vec_len (am->acls); i++)
{
if (acl_is_not_defined (am, i))
{
/* don't attempt to show the ACLs that do not exist */
continue;
}
if ((acl_index != ~0) && (acl_index != i))
{
continue;
}
acl_print_acl (vm, am, i);
if (i < vec_len (am->input_sw_if_index_vec_by_acl))
{
vlib_cli_output (vm, " applied inbound on sw_if_index: %U\n",
format_vec32, am->input_sw_if_index_vec_by_acl[i],
"%d");
}
if (i < vec_len (am->output_sw_if_index_vec_by_acl))
{
vlib_cli_output (vm, " applied outbound on sw_if_index: %U\n",
format_vec32, am->output_sw_if_index_vec_by_acl[i],
"%d");
}
}
}
static clib_error_t *
acl_show_aclplugin_acl_fn (vlib_main_t * vm,
unformat_input_t * input, vlib_cli_command_t * cmd)
{
clib_error_t *error = 0;
acl_main_t *am = &acl_main;
u32 acl_index = ~0;
(void) unformat (input, "index %u", &acl_index);
acl_plugin_show_acl (am, acl_index);
return error;
}
static void
acl_plugin_show_interface (acl_main_t * am, u32 sw_if_index, int show_acl)
{
vlib_main_t *vm = am->vlib_main;
u32 swi;
u32 *pj;
for (swi = 0; (swi < vec_len (am->input_acl_vec_by_sw_if_index)) ||
(swi < vec_len (am->output_acl_vec_by_sw_if_index)); swi++)
{
/* if we need a particular interface, skip all the others */
if ((sw_if_index != ~0) && (sw_if_index != swi))
continue;
vlib_cli_output (vm, "sw_if_index %d:\n", swi);
if ((swi < vec_len (am->input_acl_vec_by_sw_if_index)) &&
(vec_len (am->input_acl_vec_by_sw_if_index[swi]) > 0))
{
vlib_cli_output (vm, " input acl(s): %U", format_vec32,
am->input_acl_vec_by_sw_if_index[swi], "%d");
if (show_acl)
{
vlib_cli_output (vm, "\n");
vec_foreach (pj, am->input_acl_vec_by_sw_if_index[swi])
{
acl_print_acl (vm, am, *pj);
}
vlib_cli_output (vm, "\n");
}
}
if ((swi < vec_len (am->output_acl_vec_by_sw_if_index)) &&
(vec_len (am->output_acl_vec_by_sw_if_index[swi]) > 0))
{
vlib_cli_output (vm, " output acl(s): %U", format_vec32,
am->output_acl_vec_by_sw_if_index[swi], "%d");
if (show_acl)
{
vlib_cli_output (vm, "\n");
vec_foreach (pj, am->output_acl_vec_by_sw_if_index[swi])
{
acl_print_acl (vm, am, *pj);
}
vlib_cli_output (vm, "\n");
}
}
}
}
static clib_error_t *
acl_show_aclplugin_decode_5tuple_fn (vlib_main_t * vm,
unformat_input_t * input,
vlib_cli_command_t * cmd)
{
clib_error_t *error = 0;
u64 five_tuple[6] = { 0, 0, 0, 0, 0, 0 };
if (unformat
(input, "%llx %llx %llx %llx %llx %llx", &five_tuple[0], &five_tuple[1],
&five_tuple[2], &five_tuple[3], &five_tuple[4], &five_tuple[5]))
vlib_cli_output (vm, "5-tuple structure decode: %U\n\n",
format_acl_plugin_5tuple, five_tuple);
else
error = clib_error_return (0, "expecting 6 hex integers");
return error;
}
static clib_error_t *
acl_show_aclplugin_interface_fn (vlib_main_t * vm,
unformat_input_t *
input, vlib_cli_command_t * cmd)
{
clib_error_t *error = 0;
acl_main_t *am = &acl_main;
u32 sw_if_index = ~0;
(void) unformat (input, "sw_if_index %u", &sw_if_index);
int show_acl = unformat (input, "acl");
acl_plugin_show_interface (am, sw_if_index, show_acl);
return error;
}
static clib_error_t *
acl_show_aclplugin_memory_fn (vlib_main_t * vm,
unformat_input_t * input,
vlib_cli_command_t * cmd)
{
clib_error_t *error = 0;
acl_main_t *am = &acl_main;
vlib_cli_output (vm, "ACL plugin main heap statistics:\n");
if (am->acl_mheap)
{
vlib_cli_output (vm, " %U\n", format_mheap, am->acl_mheap, 1);
}
else
{
vlib_cli_output (vm, " Not initialized\n");
}
vlib_cli_output (vm, "ACL hash lookup support heap statistics:\n");
if (am->hash_lookup_mheap)
{
vlib_cli_output (vm, " %U\n", format_mheap, am->hash_lookup_mheap, 1);
}
else
{
vlib_cli_output (vm, " Not initialized\n");
}
return error;
}
static void
acl_plugin_show_sessions (acl_main_t * am,
u32 show_session_thread_id,
u32 show_session_session_index)
{
vlib_main_t *vm = am->vlib_main;
u16 wk;
vnet_interface_main_t *im = &am->vnet_main->interface_main;
vnet_sw_interface_t *swif;
{
u64 n_adds = am->fa_session_total_adds;
u64 n_dels = am->fa_session_total_dels;
vlib_cli_output (vm, "Sessions total: add %lu - del %lu = %lu", n_adds,
n_dels, n_adds - n_dels);
}
vlib_cli_output (vm, "\n\nPer-thread data:");
for (wk = 0; wk < vec_len (am->per_worker_data); wk++)
{
acl_fa_per_worker_data_t *pw = &am->per_worker_data[wk];
vlib_cli_output (vm, "Thread #%d:", wk);
if (show_session_thread_id == wk
&& show_session_session_index < pool_len (pw->fa_sessions_pool))
{
vlib_cli_output (vm, " session index %u:",
show_session_session_index);
fa_session_t *sess =
pw->fa_sessions_pool + show_session_session_index;
u64 *m = (u64 *) & sess->info;
vlib_cli_output (vm,
" info: %016llx %016llx %016llx %016llx %016llx %016llx",
m[0], m[1], m[2], m[3], m[4], m[5]);
vlib_cli_output (vm, " sw_if_index: %u", sess->sw_if_index);
vlib_cli_output (vm, " tcp_flags_seen: %x",
sess->tcp_flags_seen.as_u16);
vlib_cli_output (vm, " last active time: %lu",
sess->last_active_time);
vlib_cli_output (vm, " thread index: %u", sess->thread_index);
vlib_cli_output (vm, " link enqueue time: %lu",
sess->link_enqueue_time);
vlib_cli_output (vm, " link next index: %u",
sess->link_next_idx);
vlib_cli_output (vm, " link prev index: %u",
sess->link_prev_idx);
vlib_cli_output (vm, " link list id: %u", sess->link_list_id);
}
vlib_cli_output (vm, " connection add/del stats:", wk);
pool_foreach (swif, im->sw_interfaces, (
{
u32 sw_if_index =
swif->sw_if_index;
u64 n_adds =
sw_if_index <
vec_len
(pw->fa_session_adds_by_sw_if_index)
?
pw->fa_session_adds_by_sw_if_index
[sw_if_index] : 0;
u64 n_dels =
sw_if_index <
vec_len
(pw->fa_session_dels_by_sw_if_index)
?
pw->fa_session_dels_by_sw_if_index
[sw_if_index] : 0;
vlib_cli_output (vm,
" sw_if_index %d: add %lu - del %lu = %lu",
sw_if_index,
n_adds,
n_dels,
n_adds -
n_dels);
}
));
vlib_cli_output (vm, " connection timeout type lists:", wk);
u8 tt = 0;
for (tt = 0; tt < ACL_N_TIMEOUTS; tt++)
{
u32 head_session_index = pw->fa_conn_list_head[tt];
vlib_cli_output (vm, " fa_conn_list_head[%d]: %d", tt,
head_session_index);
if (~0 != head_session_index)
{
fa_session_t *sess = pw->fa_sessions_pool + head_session_index;
vlib_cli_output (vm, " last active time: %lu",
sess->last_active_time);
vlib_cli_output (vm, " link enqueue time: %lu",
sess->link_enqueue_time);
}
}
vlib_cli_output (vm, " Next expiry time: %lu", pw->next_expiry_time);
vlib_cli_output (vm, " Requeue until time: %lu",
pw->requeue_until_time);
vlib_cli_output (vm, " Current time wait interval: %lu",
pw->current_time_wait_interval);
vlib_cli_output (vm, " Count of deleted sessions: %lu",
pw->cnt_deleted_sessions);
vlib_cli_output (vm, " Delete already deleted: %lu",
pw->cnt_already_deleted_sessions);
vlib_cli_output (vm, " Session timers restarted: %lu",
pw->cnt_session_timer_restarted);
vlib_cli_output (vm, " Swipe until this time: %lu",
pw->swipe_end_time);
vlib_cli_output (vm, " sw_if_index serviced bitmap: %U",
format_bitmap_hex, pw->serviced_sw_if_index_bitmap);
vlib_cli_output (vm, " pending clear intfc bitmap : %U",
format_bitmap_hex,
pw->pending_clear_sw_if_index_bitmap);
vlib_cli_output (vm, " clear in progress: %u", pw->clear_in_process);
vlib_cli_output (vm, " interrupt is pending: %d",
pw->interrupt_is_pending);
vlib_cli_output (vm, " interrupt is needed: %d",
pw->interrupt_is_needed);
vlib_cli_output (vm, " interrupt is unwanted: %d",
pw->interrupt_is_unwanted);
vlib_cli_output (vm, " interrupt generation: %d",
pw->interrupt_generation);
}
vlib_cli_output (vm, "\n\nConn cleaner thread counters:");
#define _(cnt, desc) vlib_cli_output(vm, " %20lu: %s", am->cnt, desc);
foreach_fa_cleaner_counter;
#undef _
vlib_cli_output (vm, "Interrupt generation: %d",
am->fa_interrupt_generation);
vlib_cli_output (vm,
"Sessions per interval: min %lu max %lu increment: %f ms current: %f ms",
am->fa_min_deleted_sessions_per_interval,
am->fa_max_deleted_sessions_per_interval,
am->fa_cleaner_wait_time_increment * 1000.0,
((f64) am->fa_current_cleaner_timer_wait_interval) *
1000.0 / (f64) vm->clib_time.clocks_per_second);
}
static clib_error_t *
acl_show_aclplugin_sessions_fn (vlib_main_t * vm,
unformat_input_t * input,
vlib_cli_command_t * cmd)
{
clib_error_t *error = 0;
acl_main_t *am = &acl_main;
u32 show_bihash_verbose = 0;
u32 show_session_thread_id = ~0;
u32 show_session_session_index = ~0;
(void) unformat (input, "thread %u index %u", &show_session_thread_id,
&show_session_session_index);
(void) unformat (input, "verbose %u", &show_bihash_verbose);
acl_plugin_show_sessions (am, show_session_thread_id,
show_session_session_index);
show_fa_sessions_hash (vm, show_bihash_verbose);
return error;
}
static void
acl_plugin_show_tables_mask_type (acl_main_t * am)
{
vlib_main_t *vm = am->vlib_main;
ace_mask_type_entry_t *mte;
vlib_cli_output (vm, "Mask-type entries:");
/* *INDENT-OFF* */
pool_foreach(mte, am->ace_mask_type_pool,
({
vlib_cli_output(vm, " %3d: %016llx %016llx %016llx %016llx %016llx %016llx refcount %d",
mte - am->ace_mask_type_pool,
mte->mask.kv.key[0], mte->mask.kv.key[1], mte->mask.kv.key[2],
mte->mask.kv.key[3], mte->mask.kv.key[4], mte->mask.kv.value, mte->refcount);
}));
/* *INDENT-ON* */
}
static void
acl_plugin_show_tables_acl_hash_info (acl_main_t * am, u32 acl_index)
{
vlib_main_t *vm = am->vlib_main;
u32 i, j;
u64 *m;
vlib_cli_output (vm, "Mask-ready ACL representations\n");
for (i = 0; i < vec_len (am->hash_acl_infos); i++)
{
if ((acl_index != ~0) && (acl_index != i))
{
continue;
}
hash_acl_info_t *ha = &am->hash_acl_infos[i];
vlib_cli_output (vm, "acl-index %u bitmask-ready layout\n", i);
vlib_cli_output (vm, " applied inbound on sw_if_index list: %U\n",
format_vec32, ha->inbound_sw_if_index_list, "%d");
vlib_cli_output (vm, " applied outbound on sw_if_index list: %U\n",
format_vec32, ha->outbound_sw_if_index_list, "%d");
vlib_cli_output (vm, " mask type index bitmap: %U\n",
format_bitmap_hex, ha->mask_type_index_bitmap);
for (j = 0; j < vec_len (ha->rules); j++)
{
hash_ace_info_t *pa = &ha->rules[j];
m = (u64 *) & pa->match;
vlib_cli_output (vm,
" %4d: %016llx %016llx %016llx %016llx %016llx %016llx mask index %d acl %d rule %d action %d src/dst portrange not ^2: %d,%d\n",
j, m[0], m[1], m[2], m[3], m[4], m[5],
pa->mask_type_index, pa->acl_index, pa->ace_index,
pa->action, pa->src_portrange_not_powerof2,
pa->dst_portrange_not_powerof2);
}
}
}
static void
acl_plugin_print_pae (vlib_main_t * vm, int j, applied_hash_ace_entry_t * pae)
{
vlib_cli_output (vm,
" %4d: acl %d rule %d action %d bitmask-ready rule %d next %d prev %d tail %d hitcount %lld",
j, pae->acl_index, pae->ace_index, pae->action,
pae->hash_ace_info_index, pae->next_applied_entry_index,
pae->prev_applied_entry_index,
pae->tail_applied_entry_index, pae->hitcount);
}
static void
acl_plugin_show_tables_applied_info (acl_main_t * am, u32 sw_if_index)
{
vlib_main_t *vm = am->vlib_main;
u32 swi, j;
vlib_cli_output (vm, "Applied lookup entries for interfaces");
for (swi = 0;
(swi < vec_len (am->input_applied_hash_acl_info_by_sw_if_index))
|| (swi < vec_len (am->output_applied_hash_acl_info_by_sw_if_index))
|| (swi < vec_len (am->input_hash_entry_vec_by_sw_if_index))
|| (swi < vec_len (am->output_hash_entry_vec_by_sw_if_index)); swi++)
{
if ((sw_if_index != ~0) && (sw_if_index != swi))
{
continue;
}
vlib_cli_output (vm, "sw_if_index %d:", swi);
if (swi < vec_len (am->input_applied_hash_acl_info_by_sw_if_index))
{
applied_hash_acl_info_t *pal =
&am->input_applied_hash_acl_info_by_sw_if_index[swi];
vlib_cli_output (vm, " input lookup mask_type_index_bitmap: %U",
format_bitmap_hex, pal->mask_type_index_bitmap);
vlib_cli_output (vm, " input applied acls: %U", format_vec32,
pal->applied_acls, "%d");
}
if (swi < vec_len (am->input_hash_entry_vec_by_sw_if_index))
{
vlib_cli_output (vm, " input lookup applied entries:");
for (j = 0;
j < vec_len (am->input_hash_entry_vec_by_sw_if_index[swi]);
j++)
{
acl_plugin_print_pae (vm, j,
&am->input_hash_entry_vec_by_sw_if_index
[swi][j]);
}
}
if (swi < vec_len (am->output_applied_hash_acl_info_by_sw_if_index))
{
applied_hash_acl_info_t *pal =
&am->output_applied_hash_acl_info_by_sw_if_index[swi];
vlib_cli_output (vm, " output lookup mask_type_index_bitmap: %U",
format_bitmap_hex, pal->mask_type_index_bitmap);
vlib_cli_output (vm, " output applied acls: %U", format_vec32,
pal->applied_acls, "%d");
}
if (swi < vec_len (am->output_hash_entry_vec_by_sw_if_index))
{
vlib_cli_output (vm, " output lookup applied entries:");
for (j = 0;
j < vec_len (am->output_hash_entry_vec_by_sw_if_index[swi]);
j++)
{
acl_plugin_print_pae (vm, j,
&am->output_hash_entry_vec_by_sw_if_index
[swi][j]);
}
}
}
}
static void
acl_plugin_show_tables_bihash (acl_main_t * am, u32 show_bihash_verbose)
{
vlib_main_t *vm = am->vlib_main;
show_hash_acl_hash (vm, am, show_bihash_verbose);
}
static clib_error_t *
acl_show_aclplugin_tables_fn (vlib_main_t * vm,
unformat_input_t * input,
vlib_cli_command_t * cmd)
{
clib_error_t *error = 0;
acl_main_t *am = &acl_main;
u32 acl_index = ~0;
u32 sw_if_index = ~0;
int show_acl_hash_info = 0;
int show_applied_info = 0;
int show_mask_type = 0;
int show_bihash = 0;
u32 show_bihash_verbose = 0;
if (unformat (input, "acl"))
{
show_acl_hash_info = 1;
/* mask-type is handy to see as well right there */
show_mask_type = 1;
unformat (input, "index %u", &acl_index);
}
else if (unformat (input, "applied"))
{
show_applied_info = 1;
unformat (input, "sw_if_index %u", &sw_if_index);
}
else if (unformat (input, "mask"))
{
show_mask_type = 1;
}
else if (unformat (input, "hash"))
{
show_bihash = 1;
unformat (input, "verbose %u", &show_bihash_verbose);
}
if (!
(show_mask_type || show_acl_hash_info || show_applied_info
|| show_bihash))
{
/* if no qualifiers specified, show all */
show_mask_type = 1;
show_acl_hash_info = 1;
show_applied_info = 1;
show_bihash = 1;
}
if (show_mask_type)
acl_plugin_show_tables_mask_type (am);
if (show_acl_hash_info)
acl_plugin_show_tables_acl_hash_info (am, acl_index);
if (show_applied_info)
acl_plugin_show_tables_applied_info (am, sw_if_index);
if (show_bihash)
acl_plugin_show_tables_bihash (am, show_bihash_verbose);
return error;
}
static clib_error_t *
acl_clear_aclplugin_fn (vlib_main_t * vm,
unformat_input_t * input, vlib_cli_command_t * cmd)
{
clib_error_t *error = 0;
acl_main_t *am = &acl_main;
vlib_process_signal_event (am->vlib_main, am->fa_cleaner_node_index,
ACL_FA_CLEANER_DELETE_BY_SW_IF_INDEX, ~0);
return error;
}
/* *INDENT-OFF* */
VLIB_CLI_COMMAND (aclplugin_set_command, static) = {
.path = "set acl-plugin",
.short_help = "set acl-plugin session timeout {{udp idle}|tcp {idle|transient}} <seconds>",
.function = acl_set_aclplugin_fn,
};
VLIB_CLI_COMMAND (aclplugin_show_acl_command, static) = {
.path = "show acl-plugin acl",
.short_help = "show acl-plugin acl [index N]",
.function = acl_show_aclplugin_acl_fn,
};
VLIB_CLI_COMMAND (aclplugin_show_decode_5tuple_command, static) = {
.path = "show acl-plugin decode 5tuple",
.short_help = "show acl-plugin decode 5tuple XXXX XXXX XXXX XXXX XXXX XXXX",
.function = acl_show_aclplugin_decode_5tuple_fn,
};
VLIB_CLI_COMMAND (aclplugin_show_interface_command, static) = {
.path = "show acl-plugin interface",
.short_help = "show acl-plugin interface [sw_if_index N] [acl]",
.function = acl_show_aclplugin_interface_fn,
};
VLIB_CLI_COMMAND (aclplugin_show_memory_command, static) = {
.path = "show acl-plugin memory",
.short_help = "show acl-plugin memory",
.function = acl_show_aclplugin_memory_fn,
};
VLIB_CLI_COMMAND (aclplugin_show_sessions_command, static) = {
.path = "show acl-plugin sessions",
.short_help = "show acl-plugin sessions",
.function = acl_show_aclplugin_sessions_fn,
};
VLIB_CLI_COMMAND (aclplugin_show_tables_command, static) = {
.path = "show acl-plugin tables",
.short_help = "show acl-plugin tables [ acl [index N] | applied [ sw_if_index N ] | mask | hash [verbose N] ]",
.function = acl_show_aclplugin_tables_fn,
};
VLIB_CLI_COMMAND (aclplugin_show_macip_acl_command, static) = {
.path = "show acl-plugin macip acl",
.short_help = "show acl-plugin macip acl",
.function = acl_show_aclplugin_macip_acl_fn,
};
VLIB_CLI_COMMAND (aclplugin_show_macip_interface_command, static) = {
.path = "show acl-plugin macip interface",
.short_help = "show acl-plugin macip interface",
.function = acl_show_aclplugin_macip_interface_fn,
};
VLIB_CLI_COMMAND (aclplugin_clear_command, static) = {
.path = "clear acl-plugin sessions",
.short_help = "clear acl-plugin sessions",
.function = acl_clear_aclplugin_fn,
};
/* *INDENT-ON* */
static clib_error_t *
acl_plugin_config (vlib_main_t * vm, unformat_input_t * input)
{
acl_main_t *am = &acl_main;
u32 conn_table_hash_buckets;
u32 conn_table_hash_memory_size;
u32 conn_table_max_entries;
u32 main_heap_size;
u32 hash_heap_size;
u32 hash_lookup_hash_buckets;
u32 hash_lookup_hash_memory;
while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
{
if (unformat
(input, "connection hash buckets %d", &conn_table_hash_buckets))
am->fa_conn_table_hash_num_buckets = conn_table_hash_buckets;
else if (unformat (input, "connection hash memory %d",
&conn_table_hash_memory_size))
am->fa_conn_table_hash_memory_size = conn_table_hash_memory_size;
else if (unformat (input, "connection count max %d",
&conn_table_max_entries))
am->fa_conn_table_max_entries = conn_table_max_entries;
else if (unformat (input, "main heap size %d", &main_heap_size))
am->acl_mheap_size = main_heap_size;
else if (unformat (input, "hash lookup heap size %d", &hash_heap_size))
am->hash_lookup_mheap_size = hash_heap_size;
else if (unformat (input, "hash lookup hash buckets %d",
&hash_lookup_hash_buckets))
am->hash_lookup_hash_buckets = hash_lookup_hash_buckets;
else if (unformat (input, "hash lookup hash memory %d",
&hash_lookup_hash_memory))
am->hash_lookup_hash_memory = hash_lookup_hash_memory;
else
return clib_error_return (0, "unknown input '%U'",
format_unformat_error, input);
}
return 0;
}
VLIB_CONFIG_FUNCTION (acl_plugin_config, "acl-plugin");
static clib_error_t *
acl_init (vlib_main_t * vm)
{
acl_main_t *am = &acl_main;
clib_error_t *error = 0;
memset (am, 0, sizeof (*am));
am->vlib_main = vm;
am->vnet_main = vnet_get_main ();
u8 *name = format (0, "acl_%08x%c", api_version, 0);
/* Ask for a correctly-sized block of API message decode slots */
am->msg_id_base = vl_msg_api_get_msg_ids ((char *) name,
VL_MSG_FIRST_AVAILABLE);
error = acl_plugin_api_hookup (vm);
/* Add our API messages to the global name_crc hash table */
setup_message_id_table (am, &api_main);
vec_free (name);
acl_setup_fa_nodes ();
am->acl_mheap_size = ACL_FA_DEFAULT_HEAP_SIZE;
am->hash_lookup_mheap_size = ACL_PLUGIN_HASH_LOOKUP_HEAP_SIZE;
am->hash_lookup_hash_buckets = ACL_PLUGIN_HASH_LOOKUP_HASH_BUCKETS;
am->hash_lookup_hash_memory = ACL_PLUGIN_HASH_LOOKUP_HASH_MEMORY;
am->session_timeout_sec[ACL_TIMEOUT_TCP_TRANSIENT] =
TCP_SESSION_TRANSIENT_TIMEOUT_SEC;
am->session_timeout_sec[ACL_TIMEOUT_TCP_IDLE] =
TCP_SESSION_IDLE_TIMEOUT_SEC;
am->session_timeout_sec[ACL_TIMEOUT_UDP_IDLE] =
UDP_SESSION_IDLE_TIMEOUT_SEC;
am->fa_conn_table_hash_num_buckets =
ACL_FA_CONN_TABLE_DEFAULT_HASH_NUM_BUCKETS;
am->fa_conn_table_hash_memory_size =
ACL_FA_CONN_TABLE_DEFAULT_HASH_MEMORY_SIZE;
am->fa_conn_table_max_entries = ACL_FA_CONN_TABLE_DEFAULT_MAX_ENTRIES;
vlib_thread_main_t *tm = vlib_get_thread_main ();
vec_validate (am->per_worker_data, tm->n_vlib_mains - 1);
{
u16 wk;
u8 tt;
for (wk = 0; wk < vec_len (am->per_worker_data); wk++)
{
acl_fa_per_worker_data_t *pw = &am->per_worker_data[wk];
vec_validate (pw->fa_conn_list_head, ACL_N_TIMEOUTS - 1);
vec_validate (pw->fa_conn_list_tail, ACL_N_TIMEOUTS - 1);
for (tt = 0; tt < ACL_N_TIMEOUTS; tt++)
{
pw->fa_conn_list_head[tt] = ~0;
pw->fa_conn_list_tail[tt] = ~0;
}
}
}
am->fa_min_deleted_sessions_per_interval =
ACL_FA_DEFAULT_MIN_DELETED_SESSIONS_PER_INTERVAL;
am->fa_max_deleted_sessions_per_interval =
ACL_FA_DEFAULT_MAX_DELETED_SESSIONS_PER_INTERVAL;
am->fa_cleaner_wait_time_increment =
ACL_FA_DEFAULT_CLEANER_WAIT_TIME_INCREMENT;
am->fa_cleaner_cnt_delete_by_sw_index = 0;
am->fa_cleaner_cnt_delete_by_sw_index_ok = 0;
am->fa_cleaner_cnt_unknown_event = 0;
am->fa_cleaner_cnt_timer_restarted = 0;
am->fa_cleaner_cnt_wait_with_timeout = 0;
#define _(N, v, s) am->fa_ipv6_known_eh_bitmap = clib_bitmap_set(am->fa_ipv6_known_eh_bitmap, v, 1);
foreach_acl_eh
#undef _
am->l4_match_nonfirst_fragment = 1;
/* use the new fancy hash-based matching */
am->use_hash_acl_matching = 1;
return error;
}
VLIB_INIT_FUNCTION (acl_init);
/*
* fd.io coding-style-patch-verification: ON
*
* Local Variables:
* eval: (c-set-style "gnu")
* End:
*/
|