summaryrefslogtreecommitdiffstats
path: root/src/vnet/fib/fib_test.c
blob: 92141ddfce1161eb06b29f0f794651a9639bfbde (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411

@media only all and (prefers-color-scheme: dark) {
.highlight .hll { background-color: #49483e }
.highlight .c { color: #75715e } /* Comment */
.highlight .err { color: #960050; background-color: #1e0010 } /* Error */
.highlight .k { color: #66d9ef } /* Keyword */
.highlight .l { color: #ae81ff } /* Literal */
.highlight .n { color: #f8f8f2 } /* Name */
.highlight .o { color: #f92672 } /* Operator */
.highlight .p { color: #f8f8f2 } /* Punctuation */
.highlight .ch { color: #75715e } /* Comment.Hashbang */
.highlight .cm { color: #75715e } /* Comment.Multiline */
.highlight .cp { color: #75715e } /* Comment.Preproc */
.highlight .cpf { color: #75715e } /* Comment.PreprocFile */
.highlight .c1 { color: #75715e } /* Comment.Single */
.highlight .cs { color: #75715e } /* Comment.Special */
.highlight .gd { color: #f92672 } /* Generic.Deleted */
.highlight .ge { font-style: italic } /* Generic.Emph */
.highlight .gi { color: #a6e22e } /* Generic.Inserted */
.highlight .gs { font-weight: bold } /* Generic.Strong */
.highlight .gu { color: #75715e } /* Generic.Subheading */
.highlight .kc { color: #66d9ef } /* Keyword.Constant */
.highlight .kd { color: #66d9ef } /* Keyword.Declaration */
.highlight .kn { color: #f92672 } /* Keyword.Namespace */
.highlight .kp { color: #66d9ef } /* Keyword.Pseudo */
.highlight .kr { color: #66d9ef } /* Keyword.Reserved */
.highlight .kt { color: #66d9ef } /* Keyword.Type */
.highlight .ld { color: #e6db74 } /* Literal.Date */
.highlight .m { color: #ae81ff } /* Literal.Number */
.highlight .s { color: #e6db74 } /* Literal.String */
.highlight .na { color: #a6e22e } /* Name.Attribute */
.highlight .nb { color: #f8f8f2 } /* Name.Builtin */
.highlight .nc { color: #a6e22e } /* Name.Class */
.highlight .no { color: #66d9ef } /* Name.Constant */
.highlight .nd { color: #a6e22e } /* Name.Decorator */
.highlight .ni { color: #f8f8f2 } /* Name.Entity */
.highlight .ne { color: #a6e22e } /* Name.Exception */
.highlight .nf { color: #a6e22e } /* Name.Function */
.highlight .nl { color: #f8f8f2 } /* Name.Label */
.highlight .nn { color: #f8f8f2 } /* Name.Namespace */
.highlight .nx { color: #a6e22e } /* Name.Other */
.highlight .py { color: #f8f8f2 } /* Name.Property */
.highlight .nt { color: #f92672 } /* Name.Tag */
.highlight .nv { color: #f8f8f2 } /* Name.Variable */
.highlight .ow { color: #f92672 } /* Operator.Word */
.highlight .w { color: #f8f8f2 } /* Text.Whitespace */
.highlight .mb { color: #ae81ff } /* Literal.Number.Bin */
.highlight .mf { color: #ae81ff } /* Literal.Number.Float */
.highlight .mh { color: #ae81ff } /* Literal.Number.Hex */
.highlight .mi { color: #ae81ff } /* Literal.Number.Integer */
.highlight .mo { color: #ae81ff } /* Literal.Number.Oct */
.highlight .sa { color: #e6db74 } /* Literal.String.Affix */
.highlight .sb { color: #e6db74 } /* Literal.String.Backtick */
.highlight .sc { color: #e6db74 } /* Literal.String.Char */
.highlight .dl { color: #e6db74 } /* Literal.String.Delimiter */
.highlight .sd { color: #e6db74 } /* Literal.String.Doc */
.highlight .s2 { color: #e6db74 } /* Literal.String.Double */
.highlight .se { color: #ae81ff } /* Literal.String.Escape */
.highlight .sh { color: #e6db74 } /* Literal.String.Heredoc */
.highlight .si { color: #e6db74 } /* Literal.String.Interpol */
.highlight .sx { color: #e6db74 } /* Literal.String.Other */
.highlight .sr { color: #e6db74 } /* Literal.String.Regex */
.highlight .s1 { color: #e6db74 } /* Literal.String.Single */
.highlight .ss { color: #e6db74 } /* Literal.String.Symbol */
.highlight .bp { color: #f8f8f2 } /* Name.Builtin.Pseudo */
.highlight .fm { color: #a6e22e } /* Name.Function.Magic */
.highlight .vc { color: #f8f8f2 } /* Name.Variable.Class */
.highlight .vg { color: #f8f8f2 } /* Name.Variable.Global */
.highlight .vi { color: #f8f8f2 } /* Name.Variable.Instance */
.highlight .vm { color: #f8f8f2 } /* Name.Variable.Magic */
.highlight .il { color: #ae81ff } /* Literal.Number.Integer.Long */
}
@media (prefers-color-scheme: light) {
.highlight .hll { background-color: #ffffcc }
.highlight .c { color: #888888 } /* Comment */
.highlight .err { color: #a61717; background-color: #e3d2d2 } /* Error */
.highlight .k { color: #008800; font-weight: bold } /* Keyword */
.highlight .ch { color: #888888 } /* Comment.Hashbang */
.highlight .cm { color: #888888 } /* Comment.Multiline */
.highlight .cp { color: #cc0000; font-weight: bold } /* Comment.Preproc */
.highlight .cpf { color: #888888 } /* Comment.PreprocFile */
.highlight .c1 { color: #888888 } /* Comment.Single */
.highlight .cs { color: #cc0000; font-weight: bold; background-color: #fff0f0 } /* Comment.Special */
.highlight .gd { color: #000000; background-color: #ffdddd } /* Generic.Deleted */
.highlight .ge { font-style: italic } /* Generic.Emph */
.highlight .gr { color: #aa0000 } /* Generic.Error */
.highlight .gh { color: #333333 } /* Generic.Heading */
.highlight .gi { color: #000000; background-color: #ddffdd } /* Generic.Inserted */
.highlight .go { color: #888888 } /* Generic.Output */
.highlight .gp { color: #555555 } /* Generic.Prompt */
.highlight .gs { font-weight: bold } /* Generic.Strong */
.highlight .gu { color: #666666 } /* Generic.Subheading */
.highlight .gt { color: #aa0000 } /* Generic.Traceback */
.highlight .kc { color: #008800; font-weight: bold } /* Keyword.Constant */
.highlight .kd { color: #008800; font-weight: bold } /* Keyword.Declaration */
.highlight .kn { color: #008800; font-weight: bold } /* Keyword.Namespace */
.highlight .kp { color: #008800 } /* Keyword.Pseudo */
.highlight .kr { color: #008800; font-weight: bold } /* Keyword.Reserved */
.highlight .kt { color: #888888; font-weight: bold } /* Keyword.Type */
.highlight .m { color: #0000DD; font-weight: bold } /* Literal.Number */
.highlight .s { color: #dd2200; background-color: #fff0f0 } /* Literal.String */
.highlight .na { color: #336699 } /* Name.Attribute */
.highlight .nb { color: #003388 } /* Name.Builtin */
.highlight .nc { color: #bb0066; font-weight: bold } /* Name.Class */
.highlight .no { color: #003366; font-weight: bold } /* Name.Constant */
.highlight .nd { color: #555555 } /* Name.Decorator */
.highlight .ne { color: #bb0066; font-weight: bold } /* Name.Exception */
.highlight .nf { color: #0066bb; font-weight: bold } /* Name.Function */
.highlight .nl { color: #336699; font-style: italic } /* Name.Label */
.highlight .nn { color: #bb0066; font-weight: bold } /* Name.Namespace */
.highlight .py { color: #336699; font-weight: bold } /* Name.Property */
.highlight .nt { color: #bb0066; font-weight: bold } /* Name.Tag */
.highlight .nv { color: #336699 } /* Name.Variable */
.highlight .ow { color: #008800 } /* Operator.Word */
.highlight .w { color: #bbbbbb } /* Text.Whitespace */
.highlight .mb { color: #0000DD; font-weight: bold } /* Literal.Number.Bin */
.highlight .mf { color: #0000DD; font-weight: bold } /* Literal.Number.Float */
.highlight .mh { color: #0000DD; font-weight: bold } /* Literal.Number.Hex */
.highlight .mi { color: #0000DD; font-weight: bold } /* Literal.Number.Integer */
.highlight .mo { color: #0000DD; font-weight: bold } /* Literal.Number.Oct */
.highlight .sa { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Affix */
.highlight .sb { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Backtick */
.highlight .sc { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Char */
.highlight .dl { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Delimiter */
.highlight .sd { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Doc */
.highlight .s2 { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Double */
.highlight .se { color: #0044dd; background-color: #fff0f0 } /* Literal.String.Escape */
.highlight .sh { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Heredoc */
.highlight .si { color: #3333bb; background-color: #fff0f0 } /* Literal.String.Interpol */
.highlight .sx { color: #22bb22; background-color: #f0fff0 } /* Literal.String.Other */
.highlight .sr { color: #008800; background-color: #fff0ff } /* Literal.String.Regex */
.highlight .s1 { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Single */
.highlight .ss { color: #aa6600; background-color: #fff0f0 } /* Literal.String.Symbol */
.highlight .bp { color: #003388 } /* Name.Builtin.Pseudo */
.highlight .fm { color: #0066bb; font-weight: bold } /* Name.Function.Magic */
.highlight .vc { color: #336699 } /* Name.Variable.Class */
.highlight .vg { color: #dd7700 } /* Name.Variable.Global */
.highlight .vi { color: #3333bb } /* Name.Variable.Instance */
.highlight .vm { color: #336699 } /* Name.Variable.Magic */
.highlight .il { color: #0000DD; font-weight: bold } /* Literal.Number.Integer.Long */
}
#!/usr/bin/env python

import unittest
import socket

from framework import VppTestCase, VppTestRunner
from vpp_ip_route import VppIpRoute, VppRoutePath, VppMplsRoute, \
    VppMplsIpBind, VppIpMRoute, VppMRoutePath, \
    MRouteItfFlags, MRouteEntryFlags
from vpp_mpls_tunnel_interface import VppMPLSTunnelInterface

from scapy.packet import Raw
from scapy.layers.l2 import Ether
from scapy.layers.inet import IP, UDP, ICMP
from scapy.layers.inet6 import IPv6
from scapy.contrib.mpls import MPLS


class TestMPLS(VppTestCase):
    """ MPLS Test Case """

    def setUp(self):
        super(TestMPLS, self).setUp()

        # create 2 pg interfaces
        self.create_pg_interfaces(range(4))

        # setup both interfaces
        # assign them different tables.
        table_id = 0

        for i in self.pg_interfaces:
            i.admin_up()
            i.set_table_ip4(table_id)
            i.set_table_ip6(table_id)
            i.config_ip4()
            i.resolve_arp()
            i.config_ip6()
            i.resolve_ndp()
            i.enable_mpls()
            table_id += 1

    def tearDown(self):
        super(TestMPLS, self).tearDown()
        for i in self.pg_interfaces:
            i.unconfig_ip4()
            i.unconfig_ip6()
            i.ip6_disable()
            i.admin_down()

    # the default of 64 matches the IP packet TTL default
    def create_stream_labelled_ip4(
            self,
            src_if,
            mpls_labels,
            mpls_ttl=255,
            ping=0,
            ip_itf=None,
            dst_ip=None,
            n=257):
        self.reset_packet_infos()
        pkts = []
        for i in range(0, n):
            info = self.create_packet_info(src_if, src_if)
            payload = self.info_to_payload(info)
            p = Ether(dst=src_if.local_mac, src=src_if.remote_mac)

            for ii in range(len(mpls_labels)):
                if ii == len(mpls_labels) - 1:
                    p = p / MPLS(label=mpls_labels[ii], ttl=mpls_ttl, s=1)
                else:
                    p = p / MPLS(label=mpls_labels[ii], ttl=mpls_ttl, s=0)
            if not ping:
                if not dst_ip:
                    p = (p / IP(src=src_if.local_ip4, dst=src_if.remote_ip4) /
                         UDP(sport=1234, dport=1234) /
                         Raw(payload))
                else:
                    p = (p / IP(src=src_if.local_ip4, dst=dst_ip) /
                         UDP(sport=1234, dport=1234) /
                         Raw(payload))
            else:
                p = (p / IP(src=ip_itf.remote_ip4,
                            dst=ip_itf.local_ip4) /
                     ICMP())

            info.data = p.copy()
            pkts.append(p)
        return pkts

    def create_stream_ip4(self, src_if, dst_ip):
        self.reset_packet_infos()
        pkts = []
        for i in range(0, 257):
            info = self.create_packet_info(src_if, src_if)
            payload = self.info_to_payload(info)
            p = (Ether(dst=src_if.local_mac, src=src_if.remote_mac) /
                 IP(src=src_if.remote_ip4, dst=dst_ip) /
                 UDP(sport=1234, dport=1234) /
                 Raw(payload))
            info.data = p.copy()
            pkts.append(p)
        return pkts

    def create_stream_labelled_ip6(self, src_if, mpls_label, mpls_ttl):
        self.reset_packet_infos()
        pkts = []
        for i in range(0, 257):
            info = self.create_packet_info(src_if, src_if)
            payload = self.info_to_payload(info)
            p = (Ether(dst=src_if.local_mac, src=src_if.remote_mac) /
                 MPLS(label=mpls_label, ttl=mpls_ttl) /
                 IPv6(src=src_if.remote_ip6, dst=src_if.remote_ip6) /
                 UDP(sport=1234, dport=1234) /
                 Raw(payload))
            info.data = p.copy()
            pkts.append(p)
        return pkts

    @staticmethod
    def verify_filter(capture, sent):
        if not len(capture) == len(sent):
            # filter out any IPv6 RAs from the capture
            for p in capture:
                if p.haslayer(IPv6):
                    capture.remove(p)
        return capture

    def verify_capture_ip4(self, src_if, capture, sent, ping_resp=0):
        try:
            capture = self.verify_filter(capture, sent)

            self.assertEqual(len(capture), len(sent))

            for i in range(len(capture)):
                tx = sent[i]
                rx = capture[i]

                # the rx'd packet has the MPLS label popped
                eth = rx[Ether]
                self.assertEqual(eth.type, 0x800)

                tx_ip = tx[IP]
                rx_ip = rx[IP]

                if not ping_resp:
                    self.assertEqual(rx_ip.src, tx_ip.src)
                    self.assertEqual(rx_ip.dst, tx_ip.dst)
                    # IP processing post pop has decremented the TTL
                    self.assertEqual(rx_ip.ttl + 1, tx_ip.ttl)
                else:
                    self.assertEqual(rx_ip.src, tx_ip.dst)
                    self.assertEqual(rx_ip.dst, tx_ip.src)

        except:
            raise

    def verify_mpls_stack(self, rx, mpls_labels, ttl=255, num=0):
        # the rx'd packet has the MPLS label popped
        eth = rx[Ether]
        self.assertEqual(eth.type, 0x8847)

        rx_mpls = rx[MPLS]

        for ii in range(len(mpls_labels)):
            self.assertEqual(rx_mpls.label, mpls_labels[ii])
            self.assertEqual(rx_mpls.cos, 0)
            if ii == num:
                self.assertEqual(rx_mpls.ttl, ttl)
            else:
                self.assertEqual(rx_mpls.ttl, 255)

            if ii == len(mpls_labels) - 1:
                self.assertEqual(rx_mpls.s, 1)
            else:
                # not end of stack
                self.assertEqual(rx_mpls.s, 0)
                # pop the label to expose the next
                rx_mpls = rx_mpls[MPLS].payload

    def verify_capture_labelled_ip4(self, src_if, capture, sent,
                                    mpls_labels):
        try:
            capture = self.verify_filter(capture, sent)

            self.assertEqual(len(capture), len(sent))

            for i in range(len(capture)):
                tx = sent[i]
                rx = capture[i]
                tx_ip = tx[IP]
                rx_ip = rx[IP]

                # the MPLS TTL is copied from the IP
                self.verify_mpls_stack(
                    rx, mpls_labels, rx_ip.ttl, len(mpls_labels) - 1)

                self.assertEqual(rx_ip.src, tx_ip.src)
                self.assertEqual(rx_ip.dst, tx_ip.dst)
                # IP processing post pop has decremented the TTL
                self.assertEqual(rx_ip.ttl + 1, tx_ip.ttl)

        except:
            raise

    def verify_capture_tunneled_ip4(self, src_if, capture, sent, mpls_labels):
        try:
            capture = self.verify_filter(capture, sent)

            self.assertEqual(len(capture), len(sent))

            for i in range(len(capture)):
                tx = sent[i]
                rx = capture[i]
                tx_ip = tx[IP]
                rx_ip = rx[IP]

                # the MPLS TTL is 255 since it enters a new tunnel
                self.verify_mpls_stack(
                    rx, mpls_labels, 255, len(mpls_labels) - 1)

                self.assertEqual(rx_ip.src, tx_ip.src)
                self.assertEqual(rx_ip.dst, tx_ip.dst)
                # IP processing post pop has decremented the TTL
                self.assertEqual(rx_ip.ttl + 1, tx_ip.ttl)

        except:
            raise

    def verify_capture_labelled(self, src_if, capture, sent,
                                mpls_labels, ttl=254, num=0):
        try:
            capture = self.verify_filter(capture, sent)

            self.assertEqual(len(capture), len(sent))

            for i in range(len(capture)):
                rx = capture[i]
                self.verify_mpls_stack(rx, mpls_labels, ttl, num)
        except:
            raise

    def verify_capture_ip6(self, src_if, capture, sent):
        try:
            self.assertEqual(len(capture), len(sent))

            for i in range(len(capture)):
                tx = sent[i]
                rx = capture[i]

                # the rx'd packet has the MPLS label popped
                eth = rx[Ether]
                self.assertEqual(eth.type, 0x86DD)

                tx_ip = tx[IPv6]
                rx_ip = rx[IPv6]

                self.assertEqual(rx_ip.src, tx_ip.src)
                self.assertEqual(rx_ip.dst, tx_ip.dst)
                # IP processing post pop has decremented the TTL
                self.assertEqual(rx_ip.hlim + 1, tx_ip.hlim)

        except:
            raise

    def send_and_assert_no_replies(self, intf, pkts, remark):
        intf.add_stream(pkts)
        self.pg_enable_capture(self.pg_interfaces)
        self.pg_start()
        for i in self.pg_interfaces:
            i.assert_nothing_captured(remark=remark)

    def test_swap(self):
        """ MPLS label swap tests """

        #
        # A simple MPLS xconnect - eos label in label out
        #
        route_32_eos = VppMplsRoute(self, 32, 1,
                                    [VppRoutePath(self.pg0.remote_ip4,
                                                  self.pg0.sw_if_index,
                                                  labels=[33])])
        route_32_eos.add_vpp_config()

        #
        # a stream that matches the route for 10.0.0.1
        # PG0 is in the default table
        #
        self.vapi.cli("clear trace")
        tx = self.create_stream_labelled_ip4(self.pg0, [32])
        self.pg0.add_stream(tx)

        self.pg_enable_capture(self.pg_interfaces)
        self.pg_start()

        rx = self.pg0.get_capture()
        self.verify_capture_labelled(self.pg0, rx, tx, [33])

        #
        # A simple MPLS xconnect - non-eos label in label out
        #
        route_32_neos = VppMplsRoute(self, 32, 0,
                                     [VppRoutePath(self.pg0.remote_ip4,
                                                   self.pg0.sw_if_index,
                                                   labels=[33])])
        route_32_neos.add_vpp_config()

        #
        # a stream that matches the route for 10.0.0.1
        # PG0 is in the default table
        #
        self.vapi.cli("clear trace")
        tx = self.create_stream_labelled_ip4(self.pg0, [32, 99])
        self.pg0.add_stream(tx)

        self.pg_enable_capture(self.pg_interfaces)
        self.pg_start()

        rx = self.pg0.get_capture()
        self.verify_capture_labelled(self.pg0, rx, tx, [33, 99])

        #
        # An MPLS xconnect - EOS label in IP out
        #
        route_33_eos = VppMplsRoute(self, 33, 1,
                                    [VppRoutePath(self.pg0.remote_ip4,
                                                  self.pg0.sw_if_index,
                                                  labels=[])])
        route_33_eos.add_vpp_config()

        self.vapi.cli("clear trace")
        tx = self.create_stream_labelled_ip4(self.pg0, [33])
        self.pg0.add_stream(tx)

        self.pg_enable_capture(self.pg_interfaces)
        self.pg_start()

        rx = self.pg0.get_capture()
        self.verify_capture_ip4(self.pg0, rx, tx)

        #
        # An MPLS xconnect - non-EOS label in IP out - an invalid configuration
        # so this traffic should be dropped.
        #
        route_33_neos = VppMplsRoute(self, 33, 0,
                                     [VppRoutePath(self.pg0.remote_ip4,
                                                   self.pg0.sw_if_index,
                                                   labels=[])])
        route_33_neos.add_vpp_config()

        self.vapi.cli("clear trace")
        tx = self.create_stream_labelled_ip4(self.pg0, [33, 99])
        self.pg0.add_stream(tx)

        self.pg_enable_capture(self.pg_interfaces)
        self.pg_start()
        self.pg0.assert_nothing_captured(
            remark="MPLS non-EOS packets popped and forwarded")

        #
        # A recursive EOS x-connect, which resolves through another x-connect
        #
        route_34_eos = VppMplsRoute(self, 34, 1,
                                    [VppRoutePath("0.0.0.0",
                                                  0xffffffff,
                                                  nh_via_label=32,
                                                  labels=[44, 45])])
        route_34_eos.add_vpp_config()

        tx = self.create_stream_labelled_ip4(self.pg0, [34])
        self.pg0.add_stream(tx)

        self.pg_enable_capture(self.pg_interfaces)
        self.pg_start()

        rx = self.pg0.get_capture()
        self.verify_capture_labelled(self.pg0, rx, tx, [33, 44, 45], num=2)

        #
        # A recursive non-EOS x-connect, which resolves through another
        # x-connect
        #
        route_34_neos = VppMplsRoute(self, 34, 0,
                                     [VppRoutePath("0.0.0.0",
                                                   0xffffffff,
                                                   nh_via_label=32,
                                                   labels=[44, 46])])
        route_34_neos.add_vpp_config()

        self.vapi.cli("clear trace")
        tx = self.create_stream_labelled_ip4(self.pg0, [34, 99])
        self.pg0.add_stream(tx)

        self.pg_enable_capture(self.pg_interfaces)
        self.pg_start()

        rx = self.pg0.get_capture()
        # it's the 2nd (counting from 0) label in the stack that is swapped
        self.verify_capture_labelled(self.pg0, rx, tx, [33, 44, 46, 99], num=2)

        #
        # an recursive IP route that resolves through the recursive non-eos
        # x-connect
        #
        ip_10_0_0_1 = VppIpRoute(self, "10.0.0.1", 32,
                                 [VppRoutePath("0.0.0.0",
                                               0xffffffff,
                                               nh_via_label=34,
                                               labels=[55])])
        ip_10_0_0_1.add_vpp_config()

        self.vapi.cli("clear trace")
        tx = self.create_stream_ip4(self.pg0, "10.0.0.1")
        self.pg0.add_stream(tx)

        self.pg_enable_capture(self.pg_interfaces)
        self.pg_start()

        rx = self.pg0.get_capture()
        self.verify_capture_labelled_ip4(self.pg0, rx, tx, [33, 44, 46, 55])

        ip_10_0_0_1.remove_vpp_config()
        route_34_neos.remove_vpp_config()
        route_34_eos.remove_vpp_config()
        route_33_neos.remove_vpp_config()
        route_33_eos.remove_vpp_config()
        route_32_neos.remove_vpp_config()
        route_32_eos.remove_vpp_config()

    def test_bind(self):
        """ MPLS Local Label Binding test """

        #
        # Add a non-recursive route with a single out label
        #
        route_10_0_0_1 = VppIpRoute(self, "10.0.0.1", 32,
                                    [VppRoutePath(self.pg0.remote_ip4,
                                                  self.pg0.sw_if_index,
                                                  labels=[45])])
        route_10_0_0_1.add_vpp_config()

        # bind a local label to the route
        binding = VppMplsIpBind(self, 44, "10.0.0.1", 32)
        binding.add_vpp_config()

        # non-EOS stream
        self.vapi.cli("clear trace")
        tx = self.create_stream_labelled_ip4(self.pg0, [44, 99])
        self.pg0.add_stream(tx)

        self.pg_enable_capture(self.pg_interfaces)
        self.pg_start()

        rx = self.pg0.get_capture()
        self.verify_capture_labelled(self.pg0, rx, tx, [45, 99])

        # EOS stream
        self.vapi.cli("clear trace")
        tx = self.create_stream_labelled_ip4(self.pg0, [44])
        self.pg0.add_stream(tx)

        self.pg_enable_capture(self.pg_interfaces)
        self.pg_start()

        rx = self.pg0.get_capture()
        self.verify_capture_labelled(self.pg0, rx, tx, [45])

        # IP stream
        self.vapi.cli("clear trace")
        tx = self.create_stream_ip4(self.pg0, "10.0.0.1")
        self.pg0.add_stream(tx)

        self.pg_enable_capture(self.pg_interfaces)
        self.pg_start()

        rx = self.pg0.get_capture()
        self.verify_capture_labelled_ip4(self.pg0, rx, tx, [45])

        #
        # cleanup
        #
        binding.remove_vpp_config()
        route_10_0_0_1.remove_vpp_config()

    def test_imposition(self):
        """ MPLS label imposition test """

        #
        # Add a non-recursive route with a single out label
        #
        route_10_0_0_1 = VppIpRoute(self, "10.0.0.1", 32,
                                    [VppRoutePath(self.pg0.remote_ip4,
                                                  self.pg0.sw_if_index,
                                                  labels=[32])])
        route_10_0_0_1.add_vpp_config()

        #
        # a stream that matches the route for 10.0.0.1
        # PG0 is in the default table
        #
        self.vapi.cli("clear trace")
        tx = self.create_stream_ip4(self.pg0, "10.0.0.1")
        self.pg0.add_stream(tx)

        self.pg_enable_capture(self.pg_interfaces)
        self.pg_start()

        rx = self.pg0.get_capture()
        self.verify_capture_labelled_ip4(self.pg0, rx, tx, [32])

        #
        # Add a non-recursive route with a 3 out labels
        #
        route_10_0_0_2 = VppIpRoute(self, "10.0.0.2", 32,
                                    [VppRoutePath(self.pg0.remote_ip4,
                                                  self.pg0.sw_if_index,
                                                  labels=[32, 33, 34])])
        route_10_0_0_2.add_vpp_config()

        #
        # a stream that matches the route for 10.0.0.1
        # PG0 is in the default table
        #
        self.vapi.cli("clear trace")
        tx = self.create_stream_ip4(self.pg0, "10.0.0.2")
        self.pg0.add_stream(tx)

        self.pg_enable_capture(self.pg_interfaces)
        self.pg_start()

        rx = self.pg0.get_capture()
        self.verify_capture_labelled_ip4(self.pg0, rx, tx, [32, 33, 34])

        #
        # add a recursive path, with output label, via the 1 label route
        #
        route_11_0_0_1 = VppIpRoute(self, "11.0.0.1", 32,
                                    [VppRoutePath("10.0.0.1",
                                                  0xffffffff,
                                                  labels=[44])])
        route_11_0_0_1.add_vpp_config()

        #
        # a stream that matches the route for 11.0.0.1, should pick up
        # the label stack for 11.0.0.1 and 10.0.0.1
        #
        self.vapi.cli("clear trace")
        tx = self.create_stream_ip4(self.pg0, "11.0.0.1")
        self.pg0.add_stream(tx)

        self.pg_enable_capture(self.pg_interfaces)
        self.pg_start()

        rx = self.pg0.get_capture()
        self.verify_capture_labelled_ip4(self.pg0, rx, tx, [32, 44])

        #
        # add a recursive path, with 2 labels, via the 3 label route
        #
        route_11_0_0_2 = VppIpRoute(self, "11.0.0.2", 32,
                                    [VppRoutePath("10.0.0.2",
                                                  0xffffffff,
                                                  labels=[44, 45])])
        route_11_0_0_2.add_vpp_config()

        #
        # a stream that matches the route for 11.0.0.1, should pick up
        # the label stack for 11.0.0.1 and 10.0.0.1
        #
        self.vapi.cli("clear trace")
        tx = self.create_stream_ip4(self.pg0, "11.0.0.2")
        self.pg0.add_stream(tx)

        self.pg_enable_capture(self.pg_interfaces)
        self.pg_start()

        rx = self.pg0.get_capture()
        self.verify_capture_labelled_ip4(
            self.pg0, rx, tx, [32, 33, 34, 44, 45])

        #
        # cleanup
        #
        route_11_0_0_2.remove_vpp_config()
        route_11_0_0_1.remove_vpp_config()
        route_10_0_0_2.remove_vpp_config()
        route_10_0_0_1.remove_vpp_config()

    def test_tunnel(self):
        """ MPLS Tunnel Tests """

        #
        # Create a tunnel with a single out label
        #
        mpls_tun = VppMPLSTunnelInterface(self,
                                          [VppRoutePath(self.pg0.remote_ip4,
                                                        self.pg0.sw_if_index,
                                                        labels=[44, 46])])
        mpls_tun.add_vpp_config()
        mpls_tun.admin_up()

        #
        # add an unlabelled route through the new tunnel
        #
        route_10_0_0_3 = VppIpRoute(self, "10.0.0.3", 32,
                                    [VppRoutePath("0.0.0.0",
                                                  mpls_tun._sw_if_index)])
        route_10_0_0_3.add_vpp_config()

        self.vapi.cli("clear trace")
        tx = self.create_stream_ip4(self.pg0, "10.0.0.3")
        self.pg0.add_stream(tx)

        self.pg_enable_capture(self.pg_interfaces)
        self.pg_start()

        rx = self.pg0.get_capture()
        self.verify_capture_tunneled_ip4(self.pg0, rx, tx, [44, 46])

    def test_v4_exp_null(self):
        """ MPLS V4 Explicit NULL test """

        #
        # The first test case has an MPLS TTL of 0
        # all packet should be dropped
        #
        tx = self.create_stream_labelled_ip4(self.pg0, [0], 0)
        self.pg0.add_stream(tx)

        self.pg_enable_capture(self.pg_interfaces)
        self.pg_start()

        self.pg0.assert_nothing_captured(remark="MPLS TTL=0 packets forwarded")

        #
        # a stream with a non-zero MPLS TTL
        # PG0 is in the default table
        #
        tx = self.create_stream_labelled_ip4(self.pg0, [0])
        self.pg0.add_stream(tx)

        self.pg_enable_capture(self.pg_interfaces)
        self.pg_start()

        rx = self.pg0.get_capture()
        self.verify_capture_ip4(self.pg0, rx, tx)

        #
        # a stream with a non-zero MPLS TTL
        # PG1 is in table 1
        # we are ensuring the post-pop lookup occurs in the VRF table
        #
        self.vapi.cli("clear trace")
        tx = self.create_stream_labelled_ip4(self.pg1, [0])
        self.pg1.add_stream(tx)

        self.pg_enable_capture(self.pg_interfaces)
        self.pg_start()

        rx = self.pg1.get_capture()
        self.verify_capture_ip4(self.pg0, rx, tx)

    def test_v6_exp_null(self):
        """ MPLS V6 Explicit NULL test """

        #
        # a stream with a non-zero MPLS TTL
        # PG0 is in the default table
        #
        self.vapi.cli("clear trace")
        tx = self.create_stream_labelled_ip6(self.pg0, 2, 2)
        self.pg0.add_stream(tx)

        self.pg_enable_capture(self.pg_interfaces)
        self.pg_start()

        rx = self.pg0.get_capture()
        self.verify_capture_ip6(self.pg0, rx, tx)

        #
        # a stream with a non-zero MPLS TTL
        # PG1 is in table 1
        # we are ensuring the post-pop lookup occurs in the VRF table
        #
        self.vapi.cli("clear trace")
        tx = self.create_stream_labelled_ip6(self.pg1, 2, 2)
        self.pg1.add_stream(tx)

        self.pg_enable_capture(self.pg_interfaces)
        self.pg_start()

        rx = self.pg1.get_capture()
        self.verify_capture_ip6(self.pg0, rx, tx)

    def test_deag(self):
        """ MPLS Deagg """

        #
        # A de-agg route - next-hop lookup in default table
        #
        route_34_eos = VppMplsRoute(self, 34, 1,
                                    [VppRoutePath("0.0.0.0",
                                                  0xffffffff,
                                                  nh_table_id=0)])
        route_34_eos.add_vpp_config()

        #
        # ping an interface in the default table
        # PG0 is in the default table
        #
        self.vapi.cli("clear trace")
        tx = self.create_stream_labelled_ip4(self.pg0, [34], ping=1,
                                             ip_itf=self.pg0)
        self.pg0.add_stream(tx)

        self.pg_enable_capture(self.pg_interfaces)
        self.pg_start()

        rx = self.pg0.get_capture()
        self.verify_capture_ip4(self.pg0, rx, tx, ping_resp=1)

        #
        # A de-agg route - next-hop lookup in non-default table
        #
        route_35_eos = VppMplsRoute(self, 35, 1,
                                    [VppRoutePath("0.0.0.0",
                                                  0xffffffff,
                                                  nh_table_id=1)])
        route_35_eos.add_vpp_config()

        #
        # ping an interface in the non-default table
        # PG0 is in the default table. packet arrive labelled in the
        # default table and egress unlabelled in the non-default
        #
        self.vapi.cli("clear trace")
        tx = self.create_stream_labelled_ip4(
            self.pg0, [35], ping=1, ip_itf=self.pg1)
        self.pg0.add_stream(tx)

        self.pg_enable_capture(self.pg_interfaces)
        self.pg_start()

        packet_count = self.get_packet_count_for_if_idx(self.pg0.sw_if_index)
        rx = self.pg1.get_capture(packet_count)
        self.verify_capture_ip4(self.pg1, rx, tx, ping_resp=1)

        route_35_eos.remove_vpp_config()
        route_34_eos.remove_vpp_config()

    def test_interface_rx(self):
        """ MPLS Interface Receive """

        #
        # Add a non-recursive route that will forward the traffic
        # post-interface-rx
        #
        route_10_0_0_1 = VppIpRoute(self, "10.0.0.1", 32,
                                    table_id=1,
                                    paths=[VppRoutePath(self.pg1.remote_ip4,
                                                        self.pg1.sw_if_index)])
        route_10_0_0_1.add_vpp_config()

        #
        # An interface receive label that maps traffic to RX on interface
        # pg1
        # by injecting the packet in on pg0, which is in table 0
        # doing an interface-rx on pg1 and matching a route in table 1
        # if the packet egresses, then we must have swapped to pg1
        # so as to have matched the route in table 1
        #
        route_34_eos = VppMplsRoute(self, 34, 1,
                                    [VppRoutePath("0.0.0.0",
                                                  self.pg1.sw_if_index,
                                                  is_interface_rx=1)])
        route_34_eos.add_vpp_config()

        #
        # ping an interface in the default table
        # PG0 is in the default table
        #
        self.vapi.cli("clear trace")
        tx = self.create_stream_labelled_ip4(self.pg0, [34], n=257,
                                             dst_ip="10.0.0.1")
        self.pg0.add_stream(tx)

        self.pg_enable_capture(self.pg_interfaces)
        self.pg_start()

        rx = self.pg1.get_capture(257)
        self.verify_capture_ip4(self.pg1, rx, tx)

    def test_mcast_mid_point(self):
        """ MPLS Multicast Mid Point """

        #
        # Add a non-recursive route that will forward the traffic
        # post-interface-rx
        #
        route_10_0_0_1 = VppIpRoute(self, "10.0.0.1", 32,
                                    table_id=1,
                                    paths=[VppRoutePath(self.pg1.remote_ip4,
                                                        self.pg1.sw_if_index)])
        route_10_0_0_1.add_vpp_config()

        #
        # Add a mcast entry that replicate to pg2 and pg3
        # and replicate to a interface-rx (like a bud node would)
        #
        route_3400_eos = VppMplsRoute(self, 3400, 1,
                                      [VppRoutePath(self.pg2.remote_ip4,
                                                    self.pg2.sw_if_index,
                                                    labels=[3401]),
                                       VppRoutePath(self.pg3.remote_ip4,
                                                    self.pg3.sw_if_index,
                                                    labels=[3402]),
                                       VppRoutePath("0.0.0.0",
                                                    self.pg1.sw_if_index,
                                                    is_interface_rx=1)],
                                      is_multicast=1)
        route_3400_eos.add_vpp_config()

        #
        # ping an interface in the default table
        # PG0 is in the default table
        #
        self.vapi.cli("clear trace")
        tx = self.create_stream_labelled_ip4(self.pg0, [3400], n=257,
                                             dst_ip="10.0.0.1")
        self.pg0.add_stream(tx)

        self.pg_enable_capture(self.pg_interfaces)
        self.pg_start()

        rx = self.pg1.get_capture(257)
        self.verify_capture_ip4(self.pg1, rx, tx)

        rx = self.pg2.get_capture(257)
        self.verify_capture_labelled(self.pg2, rx, tx, [3401])
        rx = self.pg3.get_capture(257)
        self.verify_capture_labelled(self.pg3, rx, tx, [3402])

    def test_mcast_head(self):
        """ MPLS Multicast Head-end """

        #
        # Create a multicast tunnel with two replications
        #
        mpls_tun = VppMPLSTunnelInterface(self,
                                          [VppRoutePath(self.pg2.remote_ip4,
                                                        self.pg2.sw_if_index,
                                                        labels=[42]),
                                           VppRoutePath(self.pg3.remote_ip4,
                                                        self.pg3.sw_if_index,
                                                        labels=[43])],
                                          is_multicast=1)
        mpls_tun.add_vpp_config()
        mpls_tun.admin_up()

        #
        # add an unlabelled route through the new tunnel
        #
        route_10_0_0_3 = VppIpRoute(self, "10.0.0.3", 32,
                                    [VppRoutePath("0.0.0.0",
                                                  mpls_tun._sw_if_index)])
        route_10_0_0_3.add_vpp_config()

        self.vapi.cli("clear trace")
        tx = self.create_stream_ip4(self.pg0, "10.0.0.3")
        self.pg0.add_stream(tx)

        self.pg_enable_capture(self.pg_interfaces)
        self.pg_start()

        rx = self.pg2.get_capture(257)
        self.verify_capture_tunneled_ip4(self.pg0, rx, tx, [42])
        rx = self.pg3.get_capture(257)
        self.verify_capture_tunneled_ip4(self.pg0, rx, tx, [43])

        #
        # An an IP multicast route via the tunnel
        # A (*,G).
        # one accepting interface, pg0, 1 forwarding interface via the tunnel
        #
        route_232_1_1_1 = VppIpMRoute(
            self,
            "0.0.0.0",
            "232.1.1.1", 32,
            MRouteEntryFlags.MFIB_ENTRY_FLAG_NONE,
            [VppMRoutePath(self.pg0.sw_if_index,
                           MRouteItfFlags.MFIB_ITF_FLAG_ACCEPT),
             VppMRoutePath(mpls_tun._sw_if_index,
                           MRouteItfFlags.MFIB_ITF_FLAG_FORWARD)])
        route_232_1_1_1.add_vpp_config()

        self.vapi.cli("clear trace")
        tx = self.create_stream_ip4(self.pg0, "232.1.1.1")
        self.pg0.add_stream(tx)

        self.pg_enable_capture(self.pg_interfaces)
        self.pg_start()

        rx = self.pg2.get_capture(257)
        self.verify_capture_tunneled_ip4(self.pg0, rx, tx, [42])
        rx = self.pg3.get_capture(257)
        self.verify_capture_tunneled_ip4(self.pg0, rx, tx, [43])

    def test_mcast_tail(self):
        """ MPLS Multicast Tail """

        #
        # Add a multicast route that will forward the traffic
        # post-disposition
        #
        route_232_1_1_1 = VppIpMRoute(
            self,
            "0.0.0.0",
            "232.1.1.1", 32,
            MRouteEntryFlags.MFIB_ENTRY_FLAG_NONE,
            table_id=1,
            paths=[VppMRoutePath(self.pg1.sw_if_index,
                                 MRouteItfFlags.MFIB_ITF_FLAG_FORWARD)])
        route_232_1_1_1.add_vpp_config()

        #
        # An interface receive label that maps traffic to RX on interface
        # pg1
        # by injecting the packet in on pg0, which is in table 0
        # doing an rpf-id  and matching a route in table 1
        # if the packet egresses, then we must have matched the route in
        # table 1
        #
        route_34_eos = VppMplsRoute(self, 34, 1,
                                    [VppRoutePath("0.0.0.0",
                                                  self.pg1.sw_if_index,
                                                  nh_table_id=1,
                                                  rpf_id=55)],
                                    is_multicast=1)

        route_34_eos.add_vpp_config()

        #
        # Drop due to interface lookup miss
        #
        self.vapi.cli("clear trace")
        tx = self.create_stream_labelled_ip4(self.pg0, [34],
                                             dst_ip="232.1.1.1", n=1)
        self.send_and_assert_no_replies(self.pg0, tx, "RPF-ID drop none")

        #
        # set the RPF-ID of the enrtry to match the input packet's
        #
        route_232_1_1_1.update_rpf_id(55)

        self.vapi.cli("clear trace")
        tx = self.create_stream_labelled_ip4(self.pg0, [34],
                                             dst_ip="232.1.1.1", n=257)
        self.pg0.add_stream(tx)

        self.pg_enable_capture(self.pg_interfaces)
        self.pg_start()

        rx = self.pg1.get_capture(257)
        self.verify_capture_ip4(self.pg1, rx, tx)

        #
        # set the RPF-ID of the enrtry to not match the input packet's
        #
        route_232_1_1_1.update_rpf_id(56)
        tx = self.create_stream_labelled_ip4(self.pg0, [34],
                                             dst_ip="232.1.1.1")
        self.send_and_assert_no_replies(self.pg0, tx, "RPF-ID drop 56")


class TestMPLSDisabled(VppTestCase):
    """ MPLS disabled """

    def setUp(self):
        super(TestMPLSDisabled, self).setUp()

        # create 2 pg interfaces
        self.create_pg_interfaces(range(2))

        # PG0 is MPLS enalbed
        self.pg0.admin_up()
        self.pg0.config_ip4()
        self.pg0.resolve_arp()
        self.pg0.enable_mpls()

        # PG 1 is not MPLS enabled
        self.pg1.admin_up()

    def tearDown(self):
        super(TestMPLSDisabled, self).tearDown()
        for i in self.pg_interfaces:
            i.unconfig_ip4()
            i.admin_down()

    def send_and_assert_no_replies(self, intf, pkts, remark):
        intf.add_stream(pkts)
        self.pg_enable_capture(self.pg_interfaces)
        self.pg_start()
        for i in self.pg_interfaces:
            i.get_capture(0)
            i.assert_nothing_captured(remark=remark)

    def test_mpls_disabled(self):
        """ MPLS Disabled """

        tx = (Ether(src=self.pg1.remote_mac,
                    dst=self.pg1.local_mac) /
              MPLS(label=32, ttl=64) /
              IPv6(src="2001::1", dst=self.pg0.remote_ip6) /
              UDP(sport=1234, dport=1234) /
              Raw('\xa5' * 100))

        #
        # A simple MPLS xconnect - eos label in label out
        #
        route_32_eos = VppMplsRoute(self, 32, 1,
                                    [VppRoutePath(self.pg0.remote_ip4,
                                                  self.pg0.sw_if_index,
                                                  labels=[33])])
        route_32_eos.add_vpp_config()

        #
        # PG1 does not forward IP traffic
        #
        self.send_and_assert_no_replies(self.pg1, tx, "MPLS disabled")

        #
        # MPLS enable PG1
        #
        self.pg1.enable_mpls()

        #
        # Now we get packets through
        #
        self.pg1.add_stream(tx)
        self.pg_enable_capture(self.pg_interfaces)
        self.pg_start()

        rx = self.pg0.get_capture(1)

        #
        # Disable PG1
        #
        self.pg1.disable_mpls()

        #
        # PG1 does not forward IP traffic
        #
        self.send_and_assert_no_replies(self.pg1, tx, "IPv6 disabled")
        self.send_and_assert_no_replies(self.pg1, tx, "IPv6 disabled")


class TestMPLSPIC(VppTestCase):
    """ MPLS PIC edge convergence """

    def setUp(self):
        super(TestMPLSPIC, self).setUp()

        # create 2 pg interfaces
        self.create_pg_interfaces(range(4))

        # core links
        self.pg0.admin_up()
        self.pg0.config_ip4()
        self.pg0.resolve_arp()
        self.pg0.enable_mpls()
        self.pg1.admin_up()
        self.pg1.config_ip4()
        self.pg1.resolve_arp()
        self.pg1.enable_mpls()

        # VRF (customer facing) link
        self.pg2.admin_up()
        self.pg2.set_table_ip4(1)
        self.pg2.config_ip4()
        self.pg2.resolve_arp()
        self.pg2.set_table_ip6(1)
        self.pg2.config_ip6()
        self.pg2.resolve_ndp()
        self.pg3.admin_up()
        self.pg3.set_table_ip4(1)
        self.pg3.config_ip4()
        self.pg3.resolve_arp()
        self.pg3.set_table_ip6(1)
        self.pg3.config_ip6()
        self.pg3.resolve_ndp()

    def tearDown(self):
        super(TestMPLSPIC, self).tearDown()
        self.pg0.disable_mpls()
        for i in self.pg_interfaces:
            i.unconfig_ip4()
            i.unconfig_ip6()
            i.set_table_ip4(0)
            i.set_table_ip6(0)
            i.admin_down()

    def test_mpls_ibgp_pic(self):
        """ MPLS iBGP PIC edge convergence

        1) setup many iBGP VPN routes via a pair of iBGP peers.
        2) Check EMCP forwarding to these peers
        3) withdraw the IGP route to one of these peers.
        4) check forwarding continues to the remaining peer
        """

        #
        # IGP+LDP core routes
        #
        core_10_0_0_45 = VppIpRoute(self, "10.0.0.45", 32,
                                    [VppRoutePath(self.pg0.remote_ip4,
                                                  self.pg0.sw_if_index,
                                                  labels=[45])])
        core_10_0_0_45.add_vpp_config()

        core_10_0_0_46 = VppIpRoute(self, "10.0.0.46", 32,
                                    [VppRoutePath(self.pg1.remote_ip4,
                                                  self.pg1.sw_if_index,
                                                  labels=[46])])
        core_10_0_0_46.add_vpp_config()

        #
        # Lot's of VPN routes. We need more the 64 so VPP will build
        # the fast convergence indirection
        #
        vpn_routes = []
        pkts = []
        for ii in range(64):
            dst = "192.168.1.%d" % ii
            vpn_routes.append(VppIpRoute(self, dst, 32,
                                         [VppRoutePath("10.0.0.45",
                                                       0xffffffff,
                                                       labels=[145],
                                                       is_resolve_host=1),
                                          VppRoutePath("10.0.0.46",
                                                       0xffffffff,
                                                       labels=[146],
                                                       is_resolve_host=1)],
                                         table_id=1))
            vpn_routes[ii].add_vpp_config()

            pkts.append(Ether(dst=self.pg2.local_mac,
                              src=self.pg2.remote_mac) /
                        IP(src=self.pg2.remote_ip4, dst=dst) /
                        UDP(sport=1234, dport=1234) /
                        Raw('\xa5' * 100))

        #
        # Send the packet stream (one pkt to each VPN route)
        #  - expect a 50-50 split of the traffic
        #
        self.pg2.add_stream(pkts)
        self.pg_enable_capture(self.pg_interfaces)
        self.pg_start()

        rx0 = self.pg0._get_capture(1)
        rx1 = self.pg1._get_capture(1)

        # not testig the LB hashing algorithm so we're not concerned
        # with the split ratio, just as long as neither is 0
        self.assertNotEqual(0, len(rx0))
        self.assertNotEqual(0, len(rx1))

        #
        # use a test CLI command to stop the FIB walk process, this
        # will prevent the FIB converging the VPN routes and thus allow
        # us to probe the interim (psot-fail, pre-converge) state
        #
        self.vapi.ppcli("test fib-walk-process disable")

        #
        # Withdraw one of the IGP routes
        #
        core_10_0_0_46.remove_vpp_config()

        #
        # now all packets should be forwarded through the remaining peer
        #
        self.vapi.ppcli("clear trace")
        self.pg2.add_stream(pkts)
        self.pg_enable_capture(self.pg_interfaces)
        self.pg_start()

        rx0 = self.pg0.get_capture(len(pkts))

        #
        # enable the FIB walk process to converge the FIB
        #
        self.vapi.ppcli("test fib-walk-process enable")

        #
        # packets should still be forwarded through the remaining peer
        #
        self.pg2.add_stream(pkts)
        self.pg_enable_capture(self.pg_interfaces)
        self.pg_start()

        rx0 = self.pg0.get_capture(64)

        #
        # Add the IGP route back and we return to load-balancing
        #
        core_10_0_0_46.add_vpp_config()

        self.pg2.add_stream(pkts)
        self.pg_enable_capture(self.pg_interfaces)
        self.pg_start()

        rx0 = self.pg0._get_capture(1)
        rx1 = self.pg1._get_capture(1)
        self.assertNotEqual(0, len(rx0))
        self.assertNotEqual(0, len(rx1))

    def test_mpls_ebgp_pic(self):
        """ MPLS eBGP PIC edge convergence

        1) setup many eBGP VPN routes via a pair of eBGP peers
        2) Check EMCP forwarding to these peers
        3) withdraw one eBGP path - expect LB across remaining eBGP
        """

        #
        # Lot's of VPN routes. We need more the 64 so VPP will build
        # the fast convergence indirection
        #
        vpn_routes = []
        vpn_bindings = []
        pkts = []
        for ii in range(64):
            dst = "192.168.1.%d" % ii
            local_label = 1600 + ii
            vpn_routes.append(VppIpRoute(self, dst, 32,
                                         [VppRoutePath(self.pg2.remote_ip4,
                                                       0xffffffff,
                                                       nh_table_id=1,
                                                       is_resolve_attached=1),
                                          VppRoutePath(self.pg3.remote_ip4,
                                                       0xffffffff,
                                                       nh_table_id=1,
                                                       is_resolve_attached=1)],
                                         table_id=1))
            vpn_routes[ii].add_vpp_config()

            vpn_bindings.append(VppMplsIpBind(self, local_label, dst, 32,
                                              ip_table_id=1))
            vpn_bindings[ii].add_vpp_config()

            pkts.append(Ether(dst=self.pg0.local_mac,
                              src=self.pg0.remote_mac) /
                        MPLS(label=local_label, ttl=64) /
                        IP(src=self.pg0.remote_ip4, dst=dst) /
                        UDP(sport=1234, dport=1234) /
                        Raw('\xa5' * 100))

        self.pg0.add_stream(pkts)
        self.pg_enable_capture(self.pg_interfaces)
        self.pg_start()

        rx0 = self.pg2._get_capture(1)
        rx1 = self.pg3._get_capture(1)
        self.assertNotEqual(0, len(rx0))
        self.assertNotEqual(0, len(rx1))

        #
        # use a test CLI command to stop the FIB walk process, this
        # will prevent the FIB converging the VPN routes and thus allow
        # us to probe the interim (psot-fail, pre-converge) state
        #
        self.vapi.ppcli("test fib-walk-process disable")

        #
        # withdraw the connected prefix on the interface.
        #
        self.pg2.unconfig_ip4()

        #
        # now all packets should be forwarded through the remaining peer
        #
        self.pg0.add_stream(pkts)
        self.pg_enable_capture(self.pg_interfaces)
        self.pg_start()

        rx0 = self.pg3.get_capture(len(pkts))

        #
        # enable the FIB walk process to converge the FIB
        #
        self.vapi.ppcli("test fib-walk-process enable")
        self.pg0.add_stream(pkts)
        self.pg_enable_capture(self.pg_interfaces)
        self.pg_start()

        rx0 = self.pg3.get_capture(len(pkts))

        #
        # put the connecteds back
        #
        self.pg2.config_ip4()

        self.pg0.add_stream(pkts)
        self.pg_enable_capture(self.pg_interfaces)
        self.pg_start()

        rx0 = self.pg2._get_capture(1)
        rx1 = self.pg3._get_capture(1)
        self.assertNotEqual(0, len(rx0))
        self.assertNotEqual(0, len(rx1))

    def test_mpls_v6_ebgp_pic(self):
        """ MPLSv6 eBGP PIC edge convergence

        1) setup many eBGP VPNv6 routes via a pair of eBGP peers
        2) Check EMCP forwarding to these peers
        3) withdraw one eBGP path - expect LB across remaining eBGP
        """

        #
        # Lot's of VPN routes. We need more the 64 so VPP will build
        # the fast convergence indirection
        #
        vpn_routes = []
        vpn_bindings = []
        pkts = []
        for ii in range(64):
            dst = "3000::%d" % ii
            local_label = 1600 + ii
            vpn_routes.append(VppIpRoute(self, dst, 128,
                                         [VppRoutePath(self.pg2.remote_ip6,
                                                       0xffffffff,
                                                       nh_table_id=1,
                                                       is_resolve_attached=1,
                                                       is_ip6=1),
                                          VppRoutePath(self.pg3.remote_ip6,
                                                       0xffffffff,
                                                       nh_table_id=1,
                                                       is_ip6=1,
                                                       is_resolve_attached=1)],
                                         table_id=1,
                                         is_ip6=1))
            vpn_routes[ii].add_vpp_config()

            vpn_bindings.append(VppMplsIpBind(self, local_label, dst, 128,
                                              ip_table_id=1,
                                              is_ip6=1))
            vpn_bindings[ii].add_vpp_config()

            pkts.append(Ether(dst=self.pg0.local_mac,
                              src=self.pg0.remote_mac) /
                        MPLS(label=local_label, ttl=64) /
                        IPv6(src=self.pg0.remote_ip6, dst=dst) /
                        UDP(sport=1234, dport=1234) /
                        Raw('\xa5' * 100))

        self.pg0.add_stream(pkts)
        self.pg_enable_capture(self.pg_interfaces)
        self.pg_start()

        rx0 = self.pg2._get_capture(1)
        rx1 = self.pg3._get_capture(1)
        self.assertNotEqual(0, len(rx0))
        self.assertNotEqual(0, len(rx1))

        #
        # use a test CLI command to stop the FIB walk process, this
        # will prevent the FIB converging the VPN routes and thus allow
        # us to probe the interim (psot-fail, pre-converge) state
        #
        self.vapi.ppcli("test fib-walk-process disable")

        #
        # withdraw the connected prefix on the interface.
        # and shutdown the interface so the ND cache is flushed.
        #
        self.pg2.unconfig_ip6()
        self.pg2.admin_down()

        #
        # now all packets should be forwarded through the remaining peer
        #
        self.pg0.add_stream(pkts)
        self.pg_enable_capture(self.pg_interfaces)
        self.pg_start()

        rx0 = self.pg3.get_capture(len(pkts))

        #
        # enable the FIB walk process to converge the FIB
        #
        self.vapi.ppcli("test fib-walk-process enable")
        self.pg0.add_stream(pkts)
        self.pg_enable_capture(self.pg_interfaces)
        self.pg_start()

        rx0 = self.pg3.get_capture(len(pkts))

        #
        # put the connecteds back
        #
        self.pg2.admin_up()
        self.pg2.config_ip6()

        self.pg0.add_stream(pkts)
        self.pg_enable_capture(self.pg_interfaces)
        self.pg_start()

        rx0 = self.pg2._get_capture(1)
        rx1 = self.pg3._get_capture(1)
        self.assertNotEqual(0, len(rx0))
        self.assertNotEqual(0, len(rx1))


if __name__ == '__main__':
    unittest.main(testRunner=VppTestRunner)
145' href='#n7145'>7145 7146 7147 7148 7149 7150 7151 7152 7153 7154 7155 7156 7157 7158 7159 7160 7161 7162 7163 7164 7165 7166 7167 7168 7169 7170 7171 7172 7173 7174 7175 7176 7177 7178 7179 7180
/*
 * Copyright (c) 2016 Cisco and/or its affiliates.
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at:
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#include <vnet/fib/ip6_fib.h>
#include <vnet/fib/ip4_fib.h>
#include <vnet/fib/mpls_fib.h>
#include <vnet/adj/adj.h>
#include <vnet/dpo/load_balance.h>
#include <vnet/dpo/load_balance_map.h>
#include <vnet/dpo/mpls_label_dpo.h>
#include <vnet/dpo/lookup_dpo.h>
#include <vnet/dpo/drop_dpo.h>
#include <vnet/dpo/receive_dpo.h>
#include <vnet/dpo/ip_null_dpo.h>

#include <vnet/mpls/mpls.h>

#include <vnet/fib/fib_path_list.h>
#include <vnet/fib/fib_entry_src.h>
#include <vnet/fib/fib_walk.h>
#include <vnet/fib/fib_node_list.h>
#include <vnet/fib/fib_urpf_list.h>

#define FIB_TEST_I(_cond, _comment, _args...)			\
({								\
    int _evald = (_cond);					\
    if (!(_evald)) {						\
	fformat(stderr, "FAIL:%d: " _comment "\n",		\
		__LINE__, ##_args);				\
    } else {							\
    }								\
    _evald;							\
})
#define FIB_TEST(_cond, _comment, _args...)			\
{								\
    if (!FIB_TEST_I(_cond, _comment, ##_args)) {		\
	return 1;                                               \
	ASSERT(!("FAIL: " _comment));				\
    }								\
}

/**
 * A 'i'm not fussed is this is not efficient' store of test data
 */
typedef struct test_main_t_ {
    /**
     * HW if indicies
     */
    u32 hw_if_indicies[4];
    /**
     * HW interfaces
     */
    vnet_hw_interface_t * hw[4];

} test_main_t;
static test_main_t test_main;

/* fake ethernet device class, distinct from "fake-ethX" */
static u8 * format_test_interface_name (u8 * s, va_list * args)
{
  u32 dev_instance = va_arg (*args, u32);
  return format (s, "test-eth%d", dev_instance);
}

static uword dummy_interface_tx (vlib_main_t * vm,
				 vlib_node_runtime_t * node,
				 vlib_frame_t * frame)
{
  clib_warning ("you shouldn't be here, leaking buffers...");
  return frame->n_vectors;
}

static clib_error_t *
test_interface_admin_up_down (vnet_main_t * vnm,
                              u32 hw_if_index,
                              u32 flags)
{
  u32 hw_flags = (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) ?
    VNET_HW_INTERFACE_FLAG_LINK_UP : 0;
  vnet_hw_interface_set_flags (vnm, hw_if_index, hw_flags);
  return 0;
}

VNET_DEVICE_CLASS (test_interface_device_class,static) = {
  .name = "Test interface",
  .format_device_name = format_test_interface_name,
  .tx_function = dummy_interface_tx,
  .admin_up_down_function = test_interface_admin_up_down,
};

static u8 *hw_address;

static int
fib_test_mk_intf (u32 ninterfaces)
{
    clib_error_t * error = NULL;
    test_main_t *tm = &test_main;
    u8 byte;
    u32 i;

    ASSERT(ninterfaces <= ARRAY_LEN(tm->hw_if_indicies));

    for (i=0; i<6; i++)
    {
	byte = 0xd0+i;
	vec_add1(hw_address, byte);
    }

    for (i = 0; i < ninterfaces; i++)
    {
	hw_address[5] = i;

	error = ethernet_register_interface(vnet_get_main(),
                                            test_interface_device_class.index,
					    i /* instance */,
					    hw_address,
					    &tm->hw_if_indicies[i], 
					    /* flag change */ 0);

	FIB_TEST((NULL == error), "ADD interface %d", i);
      
        error = vnet_hw_interface_set_flags(vnet_get_main(),
                                            tm->hw_if_indicies[i],
                                            VNET_HW_INTERFACE_FLAG_LINK_UP);
        tm->hw[i] = vnet_get_hw_interface(vnet_get_main(),
					  tm->hw_if_indicies[i]);
	vec_validate (ip4_main.fib_index_by_sw_if_index,
                      tm->hw[i]->sw_if_index);
	vec_validate (ip6_main.fib_index_by_sw_if_index,
                      tm->hw[i]->sw_if_index);
	ip4_main.fib_index_by_sw_if_index[tm->hw[i]->sw_if_index] = 0;
	ip6_main.fib_index_by_sw_if_index[tm->hw[i]->sw_if_index] = 0;

	error = vnet_sw_interface_set_flags(vnet_get_main(),
					    tm->hw[i]->sw_if_index,
					    VNET_SW_INTERFACE_FLAG_ADMIN_UP);
	FIB_TEST((NULL == error), "UP interface %d", i);
    }
    /*
     * re-eval after the inevitable realloc
     */
    for (i = 0; i < ninterfaces; i++)
    {
	tm->hw[i] = vnet_get_hw_interface(vnet_get_main(),
					  tm->hw_if_indicies[i]);
    }

    return (0);
}

#define FIB_TEST_REC_FORW(_rec_prefix, _via_prefix, _bucket)		\
{                                                                       \
    const dpo_id_t *_rec_dpo = fib_entry_contribute_ip_forwarding(      \
        fib_table_lookup_exact_match(fib_index, (_rec_prefix)));        \
    const dpo_id_t *_via_dpo = fib_entry_contribute_ip_forwarding(      \
        fib_table_lookup(fib_index, (_via_prefix)));                    \
    FIB_TEST(!dpo_cmp(_via_dpo,                                         \
                      load_balance_get_bucket(_rec_dpo->dpoi_index,	\
					      _bucket)),		\
             "%U is recursive via %U",                                  \
             format_fib_prefix, (_rec_prefix),                          \
             format_fib_prefix, _via_prefix);                           \
}

#define FIB_TEST_LB_BUCKET_VIA_ADJ(_prefix, _bucket, _ai)               \
{                                                                       \
    const dpo_id_t *_dpo = fib_entry_contribute_ip_forwarding(          \
        fib_table_lookup_exact_match(fib_index, (_prefix)));            \
    const dpo_id_t *_dpo1 =                                             \
        load_balance_get_bucket(_dpo->dpoi_index, _bucket);             \
    FIB_TEST(DPO_ADJACENCY == _dpo1->dpoi_type, "type is %U",           \
             format_dpo_type, _dpo1->dpoi_type);                        \
    FIB_TEST((_ai == _dpo1->dpoi_index),                                \
	     "%U bucket %d resolves via %U",                            \
             format_fib_prefix, (_prefix),                              \
             _bucket,                                                   \
             format_dpo_id, _dpo1, 0);                                  \
}

#define FIB_TEST_RPF(_cond, _comment, _args...)			\
{								\
    if (!FIB_TEST_I(_cond, _comment, ##_args)) {		\
	return (0);						\
    }								\
}

static int
fib_test_urpf_is_equal (fib_node_index_t fei,
		       fib_forward_chain_type_t fct,
		       u32 num, ...)
{
    dpo_id_t dpo = DPO_INVALID;
    fib_urpf_list_t *urpf;
    index_t ui;
    va_list ap;
    int ii;

    va_start(ap, num);

    fib_entry_contribute_forwarding(fei, fct, &dpo);
    ui = load_balance_get_urpf(dpo.dpoi_index);

    urpf = fib_urpf_list_get(ui);

    FIB_TEST_RPF(num == vec_len(urpf->furpf_itfs),
		 "RPF:%U len %d == %d",
		 format_fib_urpf_list, ui,
		 num, vec_len(urpf->furpf_itfs));
    FIB_TEST_RPF(num == fib_urpf_check_size(ui),
		 "RPF:%U check-size %d == %d",
		 format_fib_urpf_list, ui,
		 num, vec_len(urpf->furpf_itfs));

    for (ii = 0; ii < num; ii++)
    {
	adj_index_t ai = va_arg(ap, adj_index_t);

	FIB_TEST_RPF(ai == urpf->furpf_itfs[ii],
		     "RPF:%d item:%d - %d == %d",
		     ui, ii, ai, urpf->furpf_itfs[ii]);
	FIB_TEST_RPF(fib_urpf_check(ui, ai),
		     "RPF:%d %d found",
		     ui, ai);
    }

    dpo_reset(&dpo);

    va_end(ap);

    return (1);
}

static u8*
fib_test_build_rewrite (u8 *eth_addr)
{
    u8* rewrite = NULL;

    vec_validate(rewrite, 13);

    memcpy(rewrite, eth_addr, 6);
    memcpy(rewrite+6, eth_addr, 6);

    return (rewrite);
}

typedef enum fib_test_lb_bucket_type_t_ {
    FT_LB_LABEL_O_ADJ,
    FT_LB_LABEL_STACK_O_ADJ,
    FT_LB_LABEL_O_LB,
    FT_LB_O_LB,
    FT_LB_SPECIAL,
    FT_LB_ADJ,
} fib_test_lb_bucket_type_t;

typedef struct fib_test_lb_bucket_t_ {
    fib_test_lb_bucket_type_t type;

    union
    {
	struct
	{
	    mpls_eos_bit_t eos;
	    mpls_label_t label;
	    u8 ttl;
	    adj_index_t adj;
	} label_o_adj;
	struct
	{
	    mpls_eos_bit_t eos;
	    mpls_label_t label_stack[8];
	    u8 label_stack_size;
	    u8 ttl;
	    adj_index_t adj;
	} label_stack_o_adj;
	struct
	{
	    mpls_eos_bit_t eos;
	    mpls_label_t label;
	    u8 ttl;
	    index_t lb;
	} label_o_lb;
	struct
	{
	    index_t adj;
	} adj;
	struct
	{
	    index_t lb;
	} lb;
	struct
	{
	    index_t adj;
	} special;
    };
} fib_test_lb_bucket_t;

#define FIB_TEST_LB(_cond, _comment, _args...)			\
{								\
    if (!FIB_TEST_I(_cond, _comment, ##_args)) {		\
	return (0);						\
    }								\
}

static int
fib_test_validate_lb_v (const load_balance_t *lb,
			u16 n_buckets,
			va_list ap)
{
    const dpo_id_t *dpo;
    int bucket;

    FIB_TEST_LB((n_buckets == lb->lb_n_buckets), "n_buckets = %d", lb->lb_n_buckets);

    for (bucket = 0; bucket < n_buckets; bucket++)
    {
	const fib_test_lb_bucket_t *exp;

	exp = va_arg(ap, fib_test_lb_bucket_t*);
	dpo = load_balance_get_bucket_i(lb, bucket);

	switch (exp->type)
	{
	case FT_LB_LABEL_STACK_O_ADJ:
	    {
		const mpls_label_dpo_t *mld;
                mpls_label_t hdr;
		u32 ii;

		FIB_TEST_LB((DPO_MPLS_LABEL == dpo->dpoi_type),
			   "bucket %d stacks on %U",
			   bucket,
			   format_dpo_type, dpo->dpoi_type);
	    
		mld = mpls_label_dpo_get(dpo->dpoi_index);

		FIB_TEST_LB(exp->label_stack_o_adj.label_stack_size == mld->mld_n_labels,
			    "label stack size",
			    mld->mld_n_labels);

		for (ii = 0; ii < mld->mld_n_labels; ii++)
		{
		    hdr = clib_net_to_host_u32(mld->mld_hdr[ii].label_exp_s_ttl);
		    FIB_TEST_LB((vnet_mpls_uc_get_label(hdr) ==
				 exp->label_stack_o_adj.label_stack[ii]),
				"bucket %d stacks on label %d",
				bucket,
				exp->label_stack_o_adj.label_stack[ii]);

		    if (ii == mld->mld_n_labels-1)
		    {
			FIB_TEST_LB((vnet_mpls_uc_get_s(hdr) ==
				     exp->label_o_adj.eos),
				    "bucket %d stacks on label %d %U!=%U",
				    bucket,
				    exp->label_stack_o_adj.label_stack[ii],
				    format_mpls_eos_bit, exp->label_o_adj.eos,
				    format_mpls_eos_bit, vnet_mpls_uc_get_s(hdr));
		    }
		    else
		    {
			FIB_TEST_LB((vnet_mpls_uc_get_s(hdr) == MPLS_NON_EOS),
				    "bucket %d stacks on label %d %U",
				    bucket,
				    exp->label_stack_o_adj.label_stack[ii],
				    format_mpls_eos_bit, vnet_mpls_uc_get_s(hdr));
		    }
		}

		FIB_TEST_LB((DPO_ADJACENCY_INCOMPLETE == mld->mld_dpo.dpoi_type),
			    "bucket %d label stacks on %U",
			    bucket,
			    format_dpo_type, mld->mld_dpo.dpoi_type);

		FIB_TEST_LB((exp->label_stack_o_adj.adj == mld->mld_dpo.dpoi_index),
			    "bucket %d label stacks on adj %d",
			    bucket,
			    exp->label_stack_o_adj.adj);
	    }
	    break;
	case FT_LB_LABEL_O_ADJ:
	    {
		const mpls_label_dpo_t *mld;
                mpls_label_t hdr;
		FIB_TEST_LB((DPO_MPLS_LABEL == dpo->dpoi_type),
			   "bucket %d stacks on %U",
			   bucket,
			   format_dpo_type, dpo->dpoi_type);
	    
		mld = mpls_label_dpo_get(dpo->dpoi_index);
                hdr = clib_net_to_host_u32(mld->mld_hdr[0].label_exp_s_ttl);

		FIB_TEST_LB((vnet_mpls_uc_get_label(hdr) ==
			     exp->label_o_adj.label),
			    "bucket %d stacks on label %d",
			    bucket,
			    exp->label_o_adj.label);

		FIB_TEST_LB((vnet_mpls_uc_get_s(hdr) ==
			     exp->label_o_adj.eos),
			    "bucket %d stacks on label %d %U",
			    bucket,
			    exp->label_o_adj.label,
			    format_mpls_eos_bit, exp->label_o_adj.eos);

		FIB_TEST_LB((DPO_ADJACENCY_INCOMPLETE == mld->mld_dpo.dpoi_type),
			    "bucket %d label stacks on %U",
			    bucket,
			    format_dpo_type, mld->mld_dpo.dpoi_type);

		FIB_TEST_LB((exp->label_o_adj.adj == mld->mld_dpo.dpoi_index),
			    "bucket %d label stacks on adj %d",
			    bucket,
			    exp->label_o_adj.adj);
	    }
	    break;
	case FT_LB_LABEL_O_LB:
	    {
		const mpls_label_dpo_t *mld;
                mpls_label_t hdr;

		FIB_TEST_LB((DPO_MPLS_LABEL == dpo->dpoi_type),
			   "bucket %d stacks on %U",
			   bucket,
			   format_dpo_type, dpo->dpoi_type);
	    
		mld = mpls_label_dpo_get(dpo->dpoi_index);
                hdr = clib_net_to_host_u32(mld->mld_hdr[0].label_exp_s_ttl);

		FIB_TEST_LB(1 == mld->mld_n_labels, "label stack size",
			    mld->mld_n_labels);
		FIB_TEST_LB((vnet_mpls_uc_get_label(hdr) ==
			     exp->label_o_lb.label),
			    "bucket %d stacks on label %d",
			    bucket,
			    exp->label_o_lb.label);

		FIB_TEST_LB((vnet_mpls_uc_get_s(hdr) ==
			     exp->label_o_lb.eos),
			    "bucket %d stacks on label %d %U",
			    bucket,
			    exp->label_o_lb.label,
			    format_mpls_eos_bit, exp->label_o_lb.eos);

		FIB_TEST_LB((DPO_LOAD_BALANCE == mld->mld_dpo.dpoi_type),
			    "bucket %d label stacks on %U",
			    bucket,
			    format_dpo_type, mld->mld_dpo.dpoi_type);

		FIB_TEST_LB((exp->label_o_lb.lb == mld->mld_dpo.dpoi_index),
			    "bucket %d label stacks on LB %d",
			    bucket,
			    exp->label_o_lb.lb);
	    }
	    break;
	case FT_LB_ADJ:
	    FIB_TEST_I(((DPO_ADJACENCY == dpo->dpoi_type) ||
			(DPO_ADJACENCY_INCOMPLETE == dpo->dpoi_type)),
		       "bucket %d stacks on %U",
		       bucket,
		       format_dpo_type, dpo->dpoi_type);
	    FIB_TEST_LB((exp->adj.adj == dpo->dpoi_index),
			"bucket %d stacks on adj %d",
			bucket,
			exp->adj.adj);
	    break;
	case FT_LB_O_LB:
	    FIB_TEST_I((DPO_LOAD_BALANCE == dpo->dpoi_type),
                       "bucket %d stacks on %U",
                       bucket,
                       format_dpo_type, dpo->dpoi_type);
	    FIB_TEST_LB((exp->lb.lb == dpo->dpoi_index),
			"bucket %d stacks on lb %d",
			bucket,
			exp->lb.lb);
	    break;
	case FT_LB_SPECIAL:
	    FIB_TEST_I((DPO_DROP == dpo->dpoi_type),
		       "bucket %d stacks on %U",
		       bucket,
		       format_dpo_type, dpo->dpoi_type);
	    FIB_TEST_LB((exp->special.adj == dpo->dpoi_index),
			"bucket %d stacks on drop %d",
			bucket,
			exp->special.adj);
	    break;
	}
    }
    return (!0);
}

static int
fib_test_validate_entry (fib_node_index_t fei,
			 fib_forward_chain_type_t fct,
			 u16 n_buckets,
			 ...)
{
    dpo_id_t dpo = DPO_INVALID;
    const load_balance_t *lb;
    fib_prefix_t pfx;
    index_t fw_lbi;
    u32 fib_index;
    va_list ap;
    int res;

    va_start(ap, n_buckets);

    fib_entry_get_prefix(fei, &pfx);
    fib_index = fib_entry_get_fib_index(fei);
    fib_entry_contribute_forwarding(fei, fct, &dpo);

    FIB_TEST_LB((DPO_LOAD_BALANCE == dpo.dpoi_type),
		"Entry links to %U",
		format_dpo_type, dpo.dpoi_type);
    lb = load_balance_get(dpo.dpoi_index);

    res = fib_test_validate_lb_v(lb, n_buckets, ap);

    /*
     * ensure that the LB contributed by the entry is the
     * same as the LB in the forwarding tables
     */
    if (fct == fib_entry_get_default_chain_type(fib_entry_get(fei)))
    {
        switch (pfx.fp_proto)
        {
        case FIB_PROTOCOL_IP4:
            fw_lbi = ip4_fib_forwarding_lookup(fib_index, &pfx.fp_addr.ip4);
            break;
        case FIB_PROTOCOL_IP6:
            fw_lbi = ip6_fib_table_fwding_lookup(&ip6_main, fib_index, &pfx.fp_addr.ip6);
            break;
        case FIB_PROTOCOL_MPLS:
            {
                mpls_unicast_header_t hdr = {
                    .label_exp_s_ttl = 0,
                };

                vnet_mpls_uc_set_label(&hdr.label_exp_s_ttl, pfx.fp_label);
                vnet_mpls_uc_set_s(&hdr.label_exp_s_ttl, pfx.fp_eos);
                hdr.label_exp_s_ttl = clib_host_to_net_u32(hdr.label_exp_s_ttl);

                fw_lbi = mpls_fib_table_forwarding_lookup(fib_index, &hdr);
                break;
            }
        default:
            fw_lbi = 0;
        }
        FIB_TEST_LB((fw_lbi == dpo.dpoi_index),
                    "Contributed LB = FW LB: %U\n %U",
                    format_load_balance, fw_lbi, 0,
                    format_load_balance, dpo.dpoi_index, 0);
    }

    dpo_reset(&dpo);

    va_end(ap);

    return (res);
}

static int
fib_test_v4 (void)
{
    /*
     * In the default table check for the presence and correct forwarding
     * of the special entries
     */
    fib_node_index_t dfrt, fei, ai, ai2, locked_ai, ai_01, ai_02, ai_03;
    const dpo_id_t *dpo, *dpo1, *dpo2, *dpo_drop;
    const ip_adjacency_t *adj;
    const load_balance_t *lb;
    test_main_t *tm;
    u32 fib_index;
    int ii;

    /* via 10.10.10.1 */
    ip46_address_t nh_10_10_10_1 = {
	.ip4.as_u32 = clib_host_to_net_u32(0x0a0a0a01),
    };
    /* via 10.10.10.2 */
    ip46_address_t nh_10_10_10_2 = {
	.ip4.as_u32 = clib_host_to_net_u32(0x0a0a0a02),
    };

    tm = &test_main;

    /* Find or create FIB table 11 */
    fib_index = fib_table_find_or_create_and_lock(FIB_PROTOCOL_IP4, 11);

    for (ii = 0; ii < 4; ii++)
    {
	ip4_main.fib_index_by_sw_if_index[tm->hw[ii]->sw_if_index] = fib_index;
    }

    fib_prefix_t pfx_0_0_0_0_s_0 = {
	.fp_len = 0,
	.fp_proto = FIB_PROTOCOL_IP4,
	.fp_addr = {
	    .ip4 = {
		{0}
	    },
	},
    };

    fib_prefix_t pfx = {
	.fp_len = 0,
	.fp_proto = FIB_PROTOCOL_IP4,
	.fp_addr = {
	    .ip4 = {
		{0}
	    },
	},
    };

    dpo_drop = drop_dpo_get(DPO_PROTO_IP4);

    dfrt = fib_table_lookup(fib_index, &pfx_0_0_0_0_s_0);
    FIB_TEST((FIB_NODE_INDEX_INVALID != dfrt), "default route present");
    FIB_TEST(load_balance_is_drop(fib_entry_contribute_ip_forwarding(dfrt)),
	     "Default route is DROP");

    pfx.fp_len = 32;
    fei = fib_table_lookup(fib_index, &pfx);
    FIB_TEST((FIB_NODE_INDEX_INVALID != fei), "all zeros route present");
    FIB_TEST(load_balance_is_drop(fib_entry_contribute_ip_forwarding(fei)),
	     "all 0s route is DROP");

    pfx.fp_addr.ip4.as_u32 = clib_host_to_net_u32(0xffffffff);
    pfx.fp_len = 32;
    fei = fib_table_lookup(fib_index, &pfx);
    FIB_TEST((FIB_NODE_INDEX_INVALID != fei), "all ones route present");
    FIB_TEST(load_balance_is_drop(fib_entry_contribute_ip_forwarding(fei)),
	     "all 1s route is DROP");

    pfx.fp_addr.ip4.as_u32 = clib_host_to_net_u32(0xe0000000);
    pfx.fp_len = 8;
    fei = fib_table_lookup(fib_index, &pfx);
    FIB_TEST((FIB_NODE_INDEX_INVALID != fei), "all-mcast route present");
    FIB_TEST(load_balance_is_drop(fib_entry_contribute_ip_forwarding(fei)),
	     "all-mcast route is DROP");

    pfx.fp_addr.ip4.as_u32 = clib_host_to_net_u32(0xf0000000);
    pfx.fp_len = 8;
    fei = fib_table_lookup(fib_index, &pfx);
    FIB_TEST((FIB_NODE_INDEX_INVALID != fei), "class-e route present");
    FIB_TEST(load_balance_is_drop(fib_entry_contribute_ip_forwarding(fei)),
	     "class-e route is DROP");

    /*
     * at this stage there are 5 entries in the test FIB (plus 5 in the default),
     * all of which are special sourced and so none of which share path-lists.
     * There are also 2 entries, and 2 non-shared path-lists, in the v6 default
     * table, and 4 path-lists in the v6 MFIB table
     */
#define ENBR (5+5+2)
#define PNBR (5+5+6)
    FIB_TEST((0 == fib_path_list_db_size()),   "path list DB is empty");
    FIB_TEST((PNBR == fib_path_list_pool_size()), "path list pool size is %d",
	     fib_path_list_pool_size());
    FIB_TEST((ENBR == fib_entry_pool_size()), "entry pool size is %d",
	     fib_entry_pool_size());

    /*
     * add interface routes.
     *  validate presence of /24 attached and /32 recieve.
     *  test for the presence of the receive address in the glean and local adj
     */
    fib_prefix_t local_pfx = {
	.fp_len = 24,
	.fp_proto = FIB_PROTOCOL_IP4,
	.fp_addr = {
	    .ip4 = {
		.as_u32 = clib_host_to_net_u32(0x0a0a0a0a),
	    },
	},
    };

    fib_table_entry_update_one_path(fib_index, &local_pfx,
				    FIB_SOURCE_INTERFACE,
				    (FIB_ENTRY_FLAG_CONNECTED |
				     FIB_ENTRY_FLAG_ATTACHED),
				    FIB_PROTOCOL_IP4,
				    NULL,
				    tm->hw[0]->sw_if_index,
				    ~0, // invalid fib index
				    1, // weight
				    NULL,
				    FIB_ROUTE_PATH_FLAG_NONE);
    fei = fib_table_lookup(fib_index, &local_pfx);
    FIB_TEST((FIB_NODE_INDEX_INVALID != fei), "attached interface route present");
    FIB_TEST(((FIB_ENTRY_FLAG_ATTACHED | FIB_ENTRY_FLAG_CONNECTED) ==
	      fib_entry_get_flags(fei)),
	     "Flags set on attached interface");

    ai = fib_entry_get_adj(fei);
    FIB_TEST((FIB_NODE_INDEX_INVALID != ai), "attached interface route adj present");
    adj = adj_get(ai);
    FIB_TEST((IP_LOOKUP_NEXT_GLEAN == adj->lookup_next_index),
	     "attached interface adj is glean");
    FIB_TEST((0 == ip46_address_cmp(&local_pfx.fp_addr,
				    &adj->sub_type.glean.receive_addr)),
	      "attached interface adj is receive ok");

    local_pfx.fp_len = 32;
    fib_table_entry_update_one_path(fib_index, &local_pfx,
				    FIB_SOURCE_INTERFACE,
				    (FIB_ENTRY_FLAG_CONNECTED |
				     FIB_ENTRY_FLAG_LOCAL),
				    FIB_PROTOCOL_IP4,
				    NULL,
				    tm->hw[0]->sw_if_index,
				    ~0, // invalid fib index
				    1, // weight
				    NULL,
				    FIB_ROUTE_PATH_FLAG_NONE);
    fei = fib_table_lookup(fib_index, &local_pfx);
    FIB_TEST(((FIB_ENTRY_FLAG_LOCAL | FIB_ENTRY_FLAG_CONNECTED) ==
	      fib_entry_get_flags(fei)),
	     "Flags set on local interface");

    FIB_TEST((FIB_NODE_INDEX_INVALID != fei), "local interface route present");

    dpo = fib_entry_contribute_ip_forwarding(fei);
    FIB_TEST(fib_test_urpf_is_equal(fei, FIB_FORW_CHAIN_TYPE_UNICAST_IP4, 0),
	     "RPF list for local length 0");
    dpo = load_balance_get_bucket(dpo->dpoi_index, 0);
    FIB_TEST((DPO_RECEIVE == dpo->dpoi_type),
	     "local interface adj is local");
    receive_dpo_t *rd = receive_dpo_get(dpo->dpoi_index);

    FIB_TEST((0 == ip46_address_cmp(&local_pfx.fp_addr,
				    &rd->rd_addr)),
	      "local interface adj is receive ok");

    FIB_TEST((2 == fib_table_get_num_entries(fib_index,
                                             FIB_PROTOCOL_IP4,
                                             FIB_SOURCE_INTERFACE)),
             "2 Interface Source'd prefixes");

    /*
     * +2 interface routes +2 non-shared path-lists
     */
    FIB_TEST((0 == fib_path_list_db_size()),   "path list DB is empty");
    FIB_TEST((PNBR+2 == fib_path_list_pool_size()), "path list pool size is%d",
	     fib_path_list_pool_size());
    FIB_TEST((ENBR+2 == fib_entry_pool_size()), "entry pool size is %d",
	     fib_entry_pool_size());

    /*
     * Modify the default route to be via an adj not yet known.
     * this sources the defalut route with the API source, which is
     * a higher preference to the DEFAULT_ROUTE source
     */
    pfx.fp_addr.ip4.as_u32 = 0;
    pfx.fp_len = 0;
    fib_table_entry_path_add(fib_index, &pfx,
			     FIB_SOURCE_API,
			     FIB_ENTRY_FLAG_NONE,
			     FIB_PROTOCOL_IP4,
			     &nh_10_10_10_1,
			     tm->hw[0]->sw_if_index,
			     ~0, // invalid fib index
			     1,
			     NULL,
			     FIB_ROUTE_PATH_FLAG_NONE);
    fei = fib_table_lookup(fib_index, &pfx);
    FIB_TEST((FIB_ENTRY_FLAG_NONE == fib_entry_get_flags(fei)),
	     "Flags set on API route");

    FIB_TEST((fei == dfrt), "default route same index");
    ai = fib_entry_get_adj(fei);
    FIB_TEST((FIB_NODE_INDEX_INVALID != ai), "default route adj present");
    adj = adj_get(ai);
    FIB_TEST((IP_LOOKUP_NEXT_ARP == adj->lookup_next_index),
	     "adj is incomplete");
    FIB_TEST((0 == ip46_address_cmp(&nh_10_10_10_1, &adj->sub_type.nbr.next_hop)),
	      "adj nbr next-hop ok");
    FIB_TEST((1 == fib_table_get_num_entries(fib_index,
                                             FIB_PROTOCOL_IP4,
                                             FIB_SOURCE_API)),
             "1 API Source'd prefixes");

    /*
     * find the adj in the shared db
     */
    locked_ai = adj_nbr_add_or_lock(FIB_PROTOCOL_IP4,
				    VNET_LINK_IP4,
				    &nh_10_10_10_1,
				    tm->hw[0]->sw_if_index);
    FIB_TEST((locked_ai == ai), "ADJ NBR DB find");
    adj_unlock(locked_ai);

    /*
     * +1 shared path-list
     */
    FIB_TEST((1 == fib_path_list_db_size()),   "path list DB population:%d",
	     fib_path_list_db_size());
    FIB_TEST((PNBR+3 == fib_path_list_pool_size()), "path list pool size is%d",
	     fib_path_list_pool_size());
    FIB_TEST((ENBR+2 == fib_entry_pool_size()), "entry pool size is %d",
	     fib_entry_pool_size());

    /*
     * remove the API source from the default route. We expected
     * the route to remain, sourced by DEFAULT_ROUTE, and hence a DROP
     */
    pfx.fp_addr.ip4.as_u32 = 0;
    pfx.fp_len = 0;
    fib_table_entry_path_remove(fib_index, &pfx,
				FIB_SOURCE_API,
				FIB_PROTOCOL_IP4,
				&nh_10_10_10_1,
				tm->hw[0]->sw_if_index,
				~0, // non-recursive path, so no FIB index
				1,
				FIB_ROUTE_PATH_FLAG_NONE);

    fei = fib_table_lookup(fib_index, &pfx);

    FIB_TEST((fei == dfrt), "default route same index");
    FIB_TEST(load_balance_is_drop(fib_entry_contribute_ip_forwarding(fei)),
	     "Default route is DROP");

    /*
     * -1 shared-path-list
     */
    FIB_TEST((0 == fib_path_list_db_size()),   "path list DB is empty");
    FIB_TEST((PNBR+2 == fib_path_list_pool_size()), "path list pool size is%d",
	     fib_path_list_pool_size());
    FIB_TEST((ENBR+2 == fib_entry_pool_size()), "entry pool size is %d",
	     fib_entry_pool_size());

    /*
     * Add an 2 ARP entry => a complete ADJ plus adj-fib.
     */
    fib_prefix_t pfx_10_10_10_1_s_32 = {
	.fp_len = 32,
	.fp_proto = FIB_PROTOCOL_IP4,
	.fp_addr = {
	    /* 10.10.10.1 */
	    .ip4.as_u32 = clib_host_to_net_u32(0x0a0a0a01),
	},
    };
    fib_prefix_t pfx_10_10_10_2_s_32 = {
	.fp_len = 32,
	.fp_proto = FIB_PROTOCOL_IP4,
	.fp_addr = {
	    /* 10.10.10.2 */
	    .ip4.as_u32 = clib_host_to_net_u32(0x0a0a0a02),
	},
    };
    fib_prefix_t pfx_11_11_11_11_s_32 = {
	.fp_len = 32,
	.fp_proto = FIB_PROTOCOL_IP4,
	.fp_addr = {
	    /* 11.11.11.11 */
	    .ip4.as_u32 = clib_host_to_net_u32(0x0b0b0b0b),
	},
    };
    u8 eth_addr[] = {
	0xde, 0xde, 0xde, 0xba, 0xba, 0xba,
    };

    ip46_address_t nh_12_12_12_12 = {
	.ip4.as_u32 = clib_host_to_net_u32(0x0c0c0c0c),
    };
    adj_index_t ai_12_12_12_12;

    /*
     * Add a route via an incomplete ADJ. then complete the ADJ
     * Expect the route LB is updated to use complete adj type.
     */
    fei = fib_table_entry_update_one_path(fib_index,
                                          &pfx_11_11_11_11_s_32,
                                          FIB_SOURCE_API,
                                          FIB_ENTRY_FLAG_ATTACHED,
					  FIB_PROTOCOL_IP4,
                                          &pfx_10_10_10_1_s_32.fp_addr,
                                          tm->hw[0]->sw_if_index,
                                          ~0, // invalid fib index
                                          1,
                                          NULL,
                                          FIB_ROUTE_PATH_FLAG_NONE);

    dpo = fib_entry_contribute_ip_forwarding(fei);
    dpo1 = load_balance_get_bucket(dpo->dpoi_index, 0);
    FIB_TEST(DPO_ADJACENCY_INCOMPLETE == dpo1->dpoi_type,
             "11.11.11.11/32 via incomplete adj");

    ai_01 = adj_nbr_add_or_lock(FIB_PROTOCOL_IP4,
				VNET_LINK_IP4,
				&pfx_10_10_10_1_s_32.fp_addr,
				tm->hw[0]->sw_if_index);
    FIB_TEST((FIB_NODE_INDEX_INVALID != ai_01), "adj created");
    adj = adj_get(ai_01);
    FIB_TEST((IP_LOOKUP_NEXT_ARP == adj->lookup_next_index),
	     "adj is incomplete");
    FIB_TEST((0 == ip46_address_cmp(&pfx_10_10_10_1_s_32.fp_addr,
				    &adj->sub_type.nbr.next_hop)),
	      "adj nbr next-hop ok");

    adj_nbr_update_rewrite(ai_01, ADJ_NBR_REWRITE_FLAG_COMPLETE,
			   fib_test_build_rewrite(eth_addr));
    FIB_TEST((IP_LOOKUP_NEXT_REWRITE == adj->lookup_next_index),
	     "adj is complete");
    FIB_TEST((0 == ip46_address_cmp(&pfx_10_10_10_1_s_32.fp_addr,
				    &adj->sub_type.nbr.next_hop)),
	      "adj nbr next-hop ok");
    ai = fib_entry_get_adj(fei);
    FIB_TEST((ai_01 == ai), "ADJ-FIB resolves via adj");

    dpo = fib_entry_contribute_ip_forwarding(fei);
    dpo1 = load_balance_get_bucket(dpo->dpoi_index, 0);
    FIB_TEST(DPO_ADJACENCY == dpo1->dpoi_type,
             "11.11.11.11/32 via complete adj");
    FIB_TEST(fib_test_urpf_is_equal(fei, FIB_FORW_CHAIN_TYPE_UNICAST_IP4, 1,
				    tm->hw[0]->sw_if_index),
	     "RPF list for adj-fib contains adj");

    ai_12_12_12_12 = adj_nbr_add_or_lock(FIB_PROTOCOL_IP4,
					 VNET_LINK_IP4,
					 &nh_12_12_12_12,
					 tm->hw[1]->sw_if_index);
    FIB_TEST((FIB_NODE_INDEX_INVALID != ai_12_12_12_12), "adj created");
    adj = adj_get(ai_12_12_12_12);
    FIB_TEST((IP_LOOKUP_NEXT_ARP == adj->lookup_next_index),
	     "adj is incomplete");
    FIB_TEST((0 == ip46_address_cmp(&nh_12_12_12_12,
				    &adj->sub_type.nbr.next_hop)),
	      "adj nbr next-hop ok");
    adj_nbr_update_rewrite(ai_12_12_12_12, ADJ_NBR_REWRITE_FLAG_COMPLETE,
			   fib_test_build_rewrite(eth_addr));
    FIB_TEST((IP_LOOKUP_NEXT_REWRITE == adj->lookup_next_index),
	     "adj is complete");

    /*
     * add the adj fib
     */
    fei = fib_table_entry_update_one_path(fib_index,
                                          &pfx_10_10_10_1_s_32,
                                          FIB_SOURCE_ADJ,
                                          FIB_ENTRY_FLAG_ATTACHED,
					  FIB_PROTOCOL_IP4,
                                          &pfx_10_10_10_1_s_32.fp_addr,
                                          tm->hw[0]->sw_if_index,
                                          ~0, // invalid fib index
                                          1,
                                          NULL,
                                          FIB_ROUTE_PATH_FLAG_NONE);
    FIB_TEST((FIB_ENTRY_FLAG_ATTACHED  == fib_entry_get_flags(fei)),
	     "Flags set on adj-fib");
    ai = fib_entry_get_adj(fei);
    FIB_TEST((ai_01 == ai), "ADJ-FIB resolves via adj");

    fib_table_entry_path_remove(fib_index,
                                &pfx_11_11_11_11_s_32,
                                FIB_SOURCE_API,
				FIB_PROTOCOL_IP4,
                                &pfx_10_10_10_1_s_32.fp_addr,
                                tm->hw[0]->sw_if_index,
                                ~0, // invalid fib index
                                1,
                                FIB_ROUTE_PATH_FLAG_NONE);

    eth_addr[5] = 0xb2;

    ai_02 = adj_nbr_add_or_lock(FIB_PROTOCOL_IP4,
				VNET_LINK_IP4,
				&pfx_10_10_10_2_s_32.fp_addr,
				tm->hw[0]->sw_if_index);
    FIB_TEST((FIB_NODE_INDEX_INVALID != ai_02), "adj created");
    adj = adj_get(ai_02);
    FIB_TEST((IP_LOOKUP_NEXT_ARP == adj->lookup_next_index),
	     "adj is incomplete");
    FIB_TEST((0 == ip46_address_cmp(&pfx_10_10_10_2_s_32.fp_addr,
				    &adj->sub_type.nbr.next_hop)),
	      "adj nbr next-hop ok");

    adj_nbr_update_rewrite(ai_02, ADJ_NBR_REWRITE_FLAG_COMPLETE,
			   fib_test_build_rewrite(eth_addr));
    FIB_TEST((IP_LOOKUP_NEXT_REWRITE == adj->lookup_next_index),
	     "adj is complete");
    FIB_TEST((0 == ip46_address_cmp(&pfx_10_10_10_2_s_32.fp_addr,
				    &adj->sub_type.nbr.next_hop)),
	      "adj nbr next-hop ok");
    FIB_TEST((ai_01 != ai_02), "ADJs are different");

    fib_table_entry_update_one_path(fib_index,
				    &pfx_10_10_10_2_s_32,
				    FIB_SOURCE_ADJ,
				    FIB_ENTRY_FLAG_ATTACHED,
				    FIB_PROTOCOL_IP4,
				    &pfx_10_10_10_2_s_32.fp_addr,
				    tm->hw[0]->sw_if_index,
				    ~0, // invalid fib index
				    1,
				    NULL,
				    FIB_ROUTE_PATH_FLAG_NONE);

    fei = fib_table_lookup(fib_index, &pfx_10_10_10_2_s_32);
    ai = fib_entry_get_adj(fei);
    FIB_TEST((ai_02 == ai), "ADJ-FIB resolves via adj");

    /*
     * +2 adj-fibs, and their non-shared path-lists
     */
    FIB_TEST((0 == fib_path_list_db_size()),   "path list DB is empty");
    FIB_TEST((PNBR+4 == fib_path_list_pool_size()), "path list pool size is %d",
	     fib_path_list_pool_size());
    FIB_TEST((ENBR+4 == fib_entry_pool_size()), "entry pool size is %d",
	     fib_entry_pool_size());

    /*
     * Add 2 routes via the first ADJ. ensure path-list sharing
     */
    fib_prefix_t pfx_1_1_1_1_s_32 = {
	.fp_len = 32,
	.fp_proto = FIB_PROTOCOL_IP4,
	.fp_addr = {
	    /* 1.1.1.1/32 */
	    .ip4.as_u32 = clib_host_to_net_u32(0x01010101),
	},
    };

    fib_table_entry_path_add(fib_index,
			     &pfx_1_1_1_1_s_32,
			     FIB_SOURCE_API,
			     FIB_ENTRY_FLAG_NONE,
			     FIB_PROTOCOL_IP4,
			     &nh_10_10_10_1,
			     tm->hw[0]->sw_if_index,
			     ~0, // invalid fib index
			     1,
			     NULL,
			     FIB_ROUTE_PATH_FLAG_NONE);
    fei = fib_table_lookup(fib_index, &pfx_1_1_1_1_s_32);
    ai = fib_entry_get_adj(fei);
    FIB_TEST((ai_01 == ai), "1.1.1.1 resolves via 10.10.10.1");

    /*
     * +1 entry and a shared path-list
     */
    FIB_TEST((1 == fib_path_list_db_size()),   "path list DB is empty");
    FIB_TEST((PNBR+5 == fib_path_list_pool_size()), "path list pool size is %d",
	     fib_path_list_pool_size());
    FIB_TEST((ENBR+5 == fib_entry_pool_size()), "entry pool size is %d",
	     fib_entry_pool_size());

    /* 1.1.2.0/24 */
    fib_prefix_t pfx_1_1_2_0_s_24 = {
	.fp_len = 24,
	.fp_proto = FIB_PROTOCOL_IP4,
	.fp_addr = {
	    .ip4.as_u32 = clib_host_to_net_u32(0x01010200),
	}
    };

    fib_table_entry_path_add(fib_index,
			     &pfx_1_1_2_0_s_24,
			     FIB_SOURCE_API,
			     FIB_ENTRY_FLAG_NONE,
			     FIB_PROTOCOL_IP4,
			     &nh_10_10_10_1,
			     tm->hw[0]->sw_if_index,
			     ~0, // invalid fib index
			     1,
			     NULL,
			     FIB_ROUTE_PATH_FLAG_NONE);
    fei = fib_table_lookup(fib_index, &pfx_1_1_2_0_s_24);
    ai = fib_entry_get_adj(fei);
    FIB_TEST((ai_01 == ai), "1.1.2.0/24 resolves via 10.10.10.1");

    /*
     * +1 entry only
     */
    FIB_TEST((1 == fib_path_list_db_size()),   "path list DB is empty");
    FIB_TEST((PNBR+5 == fib_path_list_pool_size()), "path list pool size is %d",
	     fib_path_list_pool_size());
    FIB_TEST((ENBR+6 == fib_entry_pool_size()), "entry pool size is %d",
	     fib_entry_pool_size());

    /*
     * modify 1.1.2.0/24 to use multipath.
     */
    fib_table_entry_path_add(fib_index,
			     &pfx_1_1_2_0_s_24,
			     FIB_SOURCE_API,
			     FIB_ENTRY_FLAG_NONE,
			     FIB_PROTOCOL_IP4,
			     &nh_10_10_10_2,
			     tm->hw[0]->sw_if_index,
			     ~0, // invalid fib index
			     1,
			     NULL,
			     FIB_ROUTE_PATH_FLAG_NONE);
    fei = fib_table_lookup(fib_index, &pfx_1_1_2_0_s_24);
    dpo = fib_entry_contribute_ip_forwarding(fei);
    FIB_TEST(fib_test_urpf_is_equal(fei, FIB_FORW_CHAIN_TYPE_UNICAST_IP4,
				    1, tm->hw[0]->sw_if_index),
	     "RPF list for 1.1.2.0/24 contains both adjs");

    dpo1 = load_balance_get_bucket(dpo->dpoi_index, 0);
    FIB_TEST(DPO_ADJACENCY == dpo1->dpoi_type, "type is %d", dpo1->dpoi_type);
    FIB_TEST((ai_01 == dpo1->dpoi_index),
	     "1.1.2.0/24 bucket 0 resolves via 10.10.10.1 (%d=%d)",
             ai_01, dpo1->dpoi_index);

    dpo1 = load_balance_get_bucket(dpo->dpoi_index, 1);
    FIB_TEST(DPO_ADJACENCY == dpo1->dpoi_type, "type is %d", dpo1->dpoi_type);
    FIB_TEST((ai_02 == dpo1->dpoi_index),
	     "1.1.2.0/24 bucket 1 resolves via 10.10.10.2");

    /*
     * +1 shared-pathlist
     */
    FIB_TEST((2 == fib_path_list_db_size()),   "path list DB is empty");
    FIB_TEST((PNBR+6 == fib_path_list_pool_size()), "path list pool size is %d",
	     fib_path_list_pool_size());
    FIB_TEST((ENBR+6 == fib_entry_pool_size()), "entry pool size is %d",
	     fib_entry_pool_size());

    /*
     * revert the modify
     */
    fib_table_entry_path_remove(fib_index,
				&pfx_1_1_2_0_s_24,
				FIB_SOURCE_API,
				FIB_PROTOCOL_IP4,
				&nh_10_10_10_2,
				tm->hw[0]->sw_if_index,
				~0,
				1,
				FIB_ROUTE_PATH_FLAG_NONE);
    fei = fib_table_lookup(fib_index, &pfx_1_1_2_0_s_24);
    dpo = fib_entry_contribute_ip_forwarding(fei);
    FIB_TEST(fib_test_urpf_is_equal(fei, FIB_FORW_CHAIN_TYPE_UNICAST_IP4,
				   1, tm->hw[0]->sw_if_index),
	     "RPF list for 1.1.2.0/24 contains one adj");

    ai = fib_entry_get_adj(fei);
    FIB_TEST((ai_01 == ai), "1.1.2.0/24 resolves via 10.10.10.1");

    /*
     * +1 shared-pathlist
     */
    FIB_TEST((1 == fib_path_list_db_size()),   "path list DB is %d",
	     fib_path_list_db_size());
    FIB_TEST((PNBR+5 == fib_path_list_pool_size()), "path list pool size is %d",
	     fib_path_list_pool_size());
    FIB_TEST((ENBR+6 == fib_entry_pool_size()), "entry pool size is %d",
	     fib_entry_pool_size());

    /*
     * Add 2 recursive routes:
     *   100.100.100.100/32 via 1.1.1.1/32  => the via entry is installed.
     *   100.100.100.101/32 via 1.1.1.1/32  => the via entry is installed.
     */
    fib_prefix_t bgp_100_pfx = {
	.fp_len = 32,
	.fp_proto = FIB_PROTOCOL_IP4,
	.fp_addr = {
	    /* 100.100.100.100/32 */
	    .ip4.as_u32 = clib_host_to_net_u32(0x64646464),
	},
    };
    /* via 1.1.1.1 */
    ip46_address_t nh_1_1_1_1 = {
	.ip4.as_u32 = clib_host_to_net_u32(0x01010101),
    };

    fei = fib_table_entry_path_add(fib_index,
				   &bgp_100_pfx,
				   FIB_SOURCE_API,
				   FIB_ENTRY_FLAG_NONE,
				   FIB_PROTOCOL_IP4,
				   &nh_1_1_1_1,
				   ~0, // no index provided.
				   fib_index, // nexthop in same fib as route
				   1,
				   NULL,
				   FIB_ROUTE_PATH_FLAG_NONE);

    FIB_TEST_REC_FORW(&bgp_100_pfx, &pfx_1_1_1_1_s_32, 0);
    FIB_TEST(fib_test_urpf_is_equal(fei, FIB_FORW_CHAIN_TYPE_UNICAST_IP4, 1,
				    tm->hw[0]->sw_if_index),
	     "RPF list for adj-fib contains adj");

    /*
     * +1 entry and +1 shared-path-list
     */
    FIB_TEST((2  == fib_path_list_db_size()),   "path list DB population:%d",
	     fib_path_list_db_size());
    FIB_TEST((PNBR+6 == fib_path_list_pool_size()), "path list pool size is %d",
	     fib_path_list_pool_size());
    FIB_TEST((ENBR+7 == fib_entry_pool_size()), "entry pool size is %d",
	     fib_entry_pool_size());

    fib_prefix_t bgp_101_pfx = {
	.fp_len = 32,
	.fp_proto = FIB_PROTOCOL_IP4,
	.fp_addr = {
	    /* 100.100.100.101/32 */
	    .ip4.as_u32 = clib_host_to_net_u32(0x64646465),
	},
    };

    fib_table_entry_path_add(fib_index,
			     &bgp_101_pfx,
			     FIB_SOURCE_API,
			     FIB_ENTRY_FLAG_NONE,
			     FIB_PROTOCOL_IP4,
			     &nh_1_1_1_1,
			     ~0, // no index provided.
			     fib_index, // nexthop in same fib as route
			     1,
			     NULL,
			     FIB_ROUTE_PATH_FLAG_NONE);

    FIB_TEST_REC_FORW(&bgp_101_pfx, &pfx_1_1_1_1_s_32, 0);
    FIB_TEST(fib_test_urpf_is_equal(fei, FIB_FORW_CHAIN_TYPE_UNICAST_IP4, 1,
				    tm->hw[0]->sw_if_index),
	     "RPF list for adj-fib contains adj");

    /*
     * +1 entry, but the recursive path-list is shared.
     */
    FIB_TEST((2  == fib_path_list_db_size()),   "path list DB population:%d",
	     fib_path_list_db_size());
    FIB_TEST((PNBR+6 == fib_path_list_pool_size()), "path list pool size is %d",
	     fib_path_list_pool_size());
    FIB_TEST((ENBR+8 == fib_entry_pool_size()), "entry pool size is %d",
	     fib_entry_pool_size());

    /*
     * An EXCLUSIVE route; one where the user (me) provides the exclusive
     * adjacency through which the route will resovle
     */
    fib_prefix_t ex_pfx = {
	.fp_len = 32,
	.fp_proto = FIB_PROTOCOL_IP4,
	.fp_addr = {
	    /* 4.4.4.4/32 */
	    .ip4.as_u32 = clib_host_to_net_u32(0x04040404),
	},
    };

    fib_table_entry_special_add(fib_index,
				&ex_pfx,
				FIB_SOURCE_SPECIAL,
				FIB_ENTRY_FLAG_EXCLUSIVE,
				locked_ai);
    fei = fib_table_lookup_exact_match(fib_index, &ex_pfx);
    FIB_TEST((ai == fib_entry_get_adj(fei)),
	     "Exclusive route links to user adj");

    fib_table_entry_special_remove(fib_index,
				   &ex_pfx,
				   FIB_SOURCE_SPECIAL);
    FIB_TEST(FIB_NODE_INDEX_INVALID ==
	     fib_table_lookup_exact_match(fib_index, &ex_pfx),
	     "Exclusive reoute removed");

    /*
     * An EXCLUSIVE route; one where the user (me) provides the exclusive
     * adjacency through which the route will resovle
     */
    dpo_id_t ex_dpo = DPO_INVALID;

    lookup_dpo_add_or_lock_w_fib_index(fib_index,
                                       DPO_PROTO_IP4,
                                       LOOKUP_INPUT_DST_ADDR,
                                       LOOKUP_TABLE_FROM_CONFIG,
                                       &ex_dpo);

    fib_table_entry_special_dpo_add(fib_index,
                                    &ex_pfx,
                                    FIB_SOURCE_SPECIAL,
                                    FIB_ENTRY_FLAG_EXCLUSIVE,
                                    &ex_dpo);
    fei = fib_table_lookup_exact_match(fib_index, &ex_pfx);
    dpo = fib_entry_contribute_ip_forwarding(fei);
    FIB_TEST(!dpo_cmp(&ex_dpo, load_balance_get_bucket(dpo->dpoi_index, 0)),
	     "exclusive remote uses lookup DPO");

    /*
     * update the exclusive to use a different DPO
     */
    ip_null_dpo_add_and_lock(DPO_PROTO_IP4,
			     IP_NULL_ACTION_SEND_ICMP_UNREACH,
			     &ex_dpo);
    fib_table_entry_special_dpo_update(fib_index,
				       &ex_pfx,
				       FIB_SOURCE_SPECIAL,
				       FIB_ENTRY_FLAG_EXCLUSIVE,
				       &ex_dpo);
    dpo = fib_entry_contribute_ip_forwarding(fei);
    FIB_TEST(!dpo_cmp(&ex_dpo, load_balance_get_bucket(dpo->dpoi_index, 0)),
	     "exclusive remote uses now uses NULL DPO");

    fib_table_entry_special_remove(fib_index,
				   &ex_pfx,
				   FIB_SOURCE_SPECIAL);
    FIB_TEST(FIB_NODE_INDEX_INVALID ==
	     fib_table_lookup_exact_match(fib_index, &ex_pfx),
	     "Exclusive reoute removed");
    dpo_reset(&ex_dpo);

    /*
     * Add a recursive route:
     *   200.200.200.200/32 via 1.1.1.2/32  => the via entry is NOT installed.
     */
    fib_prefix_t bgp_200_pfx = {
	.fp_len = 32,
	.fp_proto = FIB_PROTOCOL_IP4,
	.fp_addr = {
	    /* 200.200.200.200/32 */
	    .ip4.as_u32 = clib_host_to_net_u32(0xc8c8c8c8),
	},
    };
    /* via 1.1.1.2 */
    fib_prefix_t pfx_1_1_1_2_s_32 = {
	.fp_len = 32,
	.fp_proto = FIB_PROTOCOL_IP4,
	.fp_addr = {
	    .ip4.as_u32 = clib_host_to_net_u32(0x01010102),
	},
    };

    fib_table_entry_path_add(fib_index,
			     &bgp_200_pfx,
			     FIB_SOURCE_API,
			     FIB_ENTRY_FLAG_NONE,
			     FIB_PROTOCOL_IP4,
			     &pfx_1_1_1_2_s_32.fp_addr,
			     ~0, // no index provided.
			     fib_index, // nexthop in same fib as route
			     1,
			     NULL,
			     FIB_ROUTE_PATH_FLAG_NONE);

    FIB_TEST_REC_FORW(&bgp_200_pfx, &pfx_1_1_1_2_s_32, 0);

    /*
     * the adj should be recursive via drop, since the route resolves via
     * the default route, which is itself a DROP 
     */
    fei = fib_table_lookup(fib_index, &pfx_1_1_1_2_s_32);
    dpo1 = fib_entry_contribute_ip_forwarding(fei);
    FIB_TEST(load_balance_is_drop(dpo1), "1.1.1.2/32 is drop");
    FIB_TEST(fib_test_urpf_is_equal(fei, FIB_FORW_CHAIN_TYPE_UNICAST_IP4, 0),
	     "RPF list for 1.1.1.2/32 contains 0 adjs");

    /*
     * +2 entry and +1 shared-path-list
     */
    FIB_TEST((3  == fib_path_list_db_size()),   "path list DB population:%d",
	     fib_path_list_db_size());
    FIB_TEST((PNBR+7 == fib_path_list_pool_size()), "path list pool size is %d",
	     fib_path_list_pool_size());
    FIB_TEST((ENBR+10 == fib_entry_pool_size()), "entry pool size is %d",
	     fib_entry_pool_size());

    /*
     * Unequal Cost load-balance. 3:1 ratio. fits in a 4 bucket LB
     * The paths are sort by NH first. in this case the the path with greater
     * weight is first in the set. This ordering is to test the RPF sort|uniq logic
     */
    fib_prefix_t pfx_1_2_3_4_s_32 = {
	.fp_len = 32,
	.fp_proto = FIB_PROTOCOL_IP4,
	.fp_addr = {
	    .ip4.as_u32 = clib_host_to_net_u32(0x01020304),
	},
    };
    fib_table_entry_path_add(fib_index,
			     &pfx_1_2_3_4_s_32,
			     FIB_SOURCE_API,
			     FIB_ENTRY_FLAG_NONE,
			     FIB_PROTOCOL_IP4,
                             &nh_10_10_10_1,
                             tm->hw[0]->sw_if_index,
                             ~0,
                             1,
                             NULL,
                             FIB_ROUTE_PATH_FLAG_NONE);
    fei = fib_table_entry_path_add(fib_index,
                                   &pfx_1_2_3_4_s_32,
                                   FIB_SOURCE_API,
                                   FIB_ENTRY_FLAG_NONE,
				   FIB_PROTOCOL_IP4,
                                   &nh_12_12_12_12,
                                   tm->hw[1]->sw_if_index,
                                   ~0,
                                   3,
                                   NULL,
                                   FIB_ROUTE_PATH_FLAG_NONE);

    FIB_TEST((FIB_NODE_INDEX_INVALID != fei), "1.2.3.4/32 presnet");
    dpo = fib_entry_contribute_ip_forwarding(fei);
    lb = load_balance_get(dpo->dpoi_index);
    FIB_TEST((lb->lb_n_buckets == 4),
             "1.2.3.4/32 LB has %d bucket",
             lb->lb_n_buckets);

    FIB_TEST_LB_BUCKET_VIA_ADJ(&pfx_1_2_3_4_s_32, 0, ai_12_12_12_12);
    FIB_TEST_LB_BUCKET_VIA_ADJ(&pfx_1_2_3_4_s_32, 1, ai_12_12_12_12);
    FIB_TEST_LB_BUCKET_VIA_ADJ(&pfx_1_2_3_4_s_32, 2, ai_12_12_12_12);
    FIB_TEST_LB_BUCKET_VIA_ADJ(&pfx_1_2_3_4_s_32, 3, ai_01);

    FIB_TEST(fib_test_urpf_is_equal(fei, FIB_FORW_CHAIN_TYPE_UNICAST_IP4, 2,
				    tm->hw[0]->sw_if_index,
				    tm->hw[1]->sw_if_index),
	     "RPF list for 1.2.3.4/32 contains both adjs");


    /*
     * Unequal Cost load-balance. 4:1 ratio.
     *  fits in a 16 bucket LB with ratio 13:3
     */
    fib_prefix_t pfx_1_2_3_5_s_32 = {
	.fp_len = 32,
	.fp_proto = FIB_PROTOCOL_IP4,
	.fp_addr = {
	    .ip4.as_u32 = clib_host_to_net_u32(0x01020305),
	},
    };
    fib_table_entry_path_add(fib_index,
			     &pfx_1_2_3_5_s_32,
			     FIB_SOURCE_API,
			     FIB_ENTRY_FLAG_NONE,
			     FIB_PROTOCOL_IP4,
                             &nh_12_12_12_12,
                             tm->hw[1]->sw_if_index,
                             ~0,
                             1,
                             NULL,
                             FIB_ROUTE_PATH_FLAG_NONE);
    fei = fib_table_entry_path_add(fib_index,
                                   &pfx_1_2_3_5_s_32,
                                   FIB_SOURCE_API,
                                   FIB_ENTRY_FLAG_NONE,
				   FIB_PROTOCOL_IP4,
                                   &nh_10_10_10_1,
                                   tm->hw[0]->sw_if_index,
                                   ~0,
                                   4,
                                   NULL,
                                   FIB_ROUTE_PATH_FLAG_NONE);

    FIB_TEST((FIB_NODE_INDEX_INVALID != fei), "1.2.3.5/32 presnet");
    dpo = fib_entry_contribute_ip_forwarding(fei);
    lb = load_balance_get(dpo->dpoi_index);
    FIB_TEST((lb->lb_n_buckets == 16),
             "1.2.3.5/32 LB has %d bucket",
             lb->lb_n_buckets);

    FIB_TEST_LB_BUCKET_VIA_ADJ(&pfx_1_2_3_5_s_32, 0, ai_01);
    FIB_TEST_LB_BUCKET_VIA_ADJ(&pfx_1_2_3_5_s_32, 1, ai_01);
    FIB_TEST_LB_BUCKET_VIA_ADJ(&pfx_1_2_3_5_s_32, 2, ai_01);
    FIB_TEST_LB_BUCKET_VIA_ADJ(&pfx_1_2_3_5_s_32, 3, ai_01);
    FIB_TEST_LB_BUCKET_VIA_ADJ(&pfx_1_2_3_5_s_32, 4, ai_01);
    FIB_TEST_LB_BUCKET_VIA_ADJ(&pfx_1_2_3_5_s_32, 5, ai_01);
    FIB_TEST_LB_BUCKET_VIA_ADJ(&pfx_1_2_3_5_s_32, 6, ai_01);
    FIB_TEST_LB_BUCKET_VIA_ADJ(&pfx_1_2_3_5_s_32, 7, ai_01);
    FIB_TEST_LB_BUCKET_VIA_ADJ(&pfx_1_2_3_5_s_32, 8, ai_01);
    FIB_TEST_LB_BUCKET_VIA_ADJ(&pfx_1_2_3_5_s_32, 9, ai_01);
    FIB_TEST_LB_BUCKET_VIA_ADJ(&pfx_1_2_3_5_s_32, 10, ai_01);
    FIB_TEST_LB_BUCKET_VIA_ADJ(&pfx_1_2_3_5_s_32, 11, ai_01);
    FIB_TEST_LB_BUCKET_VIA_ADJ(&pfx_1_2_3_5_s_32, 12, ai_01);
    FIB_TEST_LB_BUCKET_VIA_ADJ(&pfx_1_2_3_5_s_32, 13, ai_12_12_12_12);
    FIB_TEST_LB_BUCKET_VIA_ADJ(&pfx_1_2_3_5_s_32, 14, ai_12_12_12_12);
    FIB_TEST_LB_BUCKET_VIA_ADJ(&pfx_1_2_3_5_s_32, 15, ai_12_12_12_12);

    FIB_TEST(fib_test_urpf_is_equal(fei, FIB_FORW_CHAIN_TYPE_UNICAST_IP4, 2,
				    tm->hw[0]->sw_if_index,
				    tm->hw[1]->sw_if_index),
	     "RPF list for 1.2.3.4/32 contains both adjs");

    /*
     * Test UCMP with a large weight skew - this produces load-balance objects with large
     * numbers of buckets to accommodate the skew. By updating said load-balances we are
     * laso testing the LB in placce modify code when number of buckets is large.
     */
    fib_prefix_t pfx_6_6_6_6_s_32 = {
	.fp_len = 32,
	.fp_proto = FIB_PROTOCOL_IP4,
	.fp_addr = {
	    /* 1.1.1.1/32 */
	    .ip4.as_u32 = clib_host_to_net_u32(0x06060606),
	},
    };
    fib_test_lb_bucket_t ip_6_6_6_6_o_10_10_10_1 = {
	.type = FT_LB_ADJ,
	.adj = {
	    .adj = ai_01,
	},
    };
    fib_test_lb_bucket_t ip_6_6_6_6_o_10_10_10_2 = {
        .type = FT_LB_ADJ,
        .adj = {
            .adj = ai_02,
        },
    };
    fib_test_lb_bucket_t ip_6_6_6_6_o_12_12_12_12 = {
        .type = FT_LB_ADJ,
        .adj = {
            .adj = ai_12_12_12_12,
        },
    };
    fib_table_entry_update_one_path(fib_index,
				    &pfx_6_6_6_6_s_32,
				    FIB_SOURCE_API,
				    FIB_ENTRY_FLAG_NONE,
				    FIB_PROTOCOL_IP4,
				    &nh_10_10_10_1,
				    tm->hw[0]->sw_if_index,
				    ~0, // invalid fib index
				    0,  // zero weigth
				    NULL,
				    FIB_ROUTE_PATH_FLAG_NONE);

    fei = fib_table_lookup(fib_index, &pfx_6_6_6_6_s_32);
    FIB_TEST(fib_test_validate_entry(fei,
				     FIB_FORW_CHAIN_TYPE_UNICAST_IP4,
				     1,
				     &ip_6_6_6_6_o_10_10_10_1),
	     "6.6.6.6/32 via 10.10.10.1");

    fib_table_entry_path_add(fib_index,
                             &pfx_6_6_6_6_s_32,
                             FIB_SOURCE_API,
                             FIB_ENTRY_FLAG_NONE,
                             FIB_PROTOCOL_IP4,
                             &nh_10_10_10_2,
                             tm->hw[0]->sw_if_index,
                             ~0, // invalid fib index
                             100,
                             NULL,
                             FIB_ROUTE_PATH_FLAG_NONE);

    fei = fib_table_lookup(fib_index, &pfx_6_6_6_6_s_32);
    FIB_TEST(fib_test_validate_entry(fei,
				     FIB_FORW_CHAIN_TYPE_UNICAST_IP4,
				     64,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_1),
	     "6.6.6.6/32 via 10.10.10.1 and 10.10.10.2 in 63:1 ratio");

    fib_table_entry_path_add(fib_index,
                             &pfx_6_6_6_6_s_32,
                             FIB_SOURCE_API,
                             FIB_ENTRY_FLAG_NONE,
                             FIB_PROTOCOL_IP4,
                             &nh_12_12_12_12,
                             tm->hw[1]->sw_if_index,
                             ~0, // invalid fib index
                             100,
                             NULL,
                             FIB_ROUTE_PATH_FLAG_NONE);

    fei = fib_table_lookup(fib_index, &pfx_6_6_6_6_s_32);
    FIB_TEST(fib_test_validate_entry(fei,
				     FIB_FORW_CHAIN_TYPE_UNICAST_IP4,
				     128,
				     &ip_6_6_6_6_o_10_10_10_1,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_12_12_12_12,
				     &ip_6_6_6_6_o_12_12_12_12,
				     &ip_6_6_6_6_o_12_12_12_12,
				     &ip_6_6_6_6_o_12_12_12_12,
				     &ip_6_6_6_6_o_12_12_12_12,
				     &ip_6_6_6_6_o_12_12_12_12,
				     &ip_6_6_6_6_o_12_12_12_12,
				     &ip_6_6_6_6_o_12_12_12_12,
				     &ip_6_6_6_6_o_12_12_12_12,
				     &ip_6_6_6_6_o_12_12_12_12,
				     &ip_6_6_6_6_o_12_12_12_12,
				     &ip_6_6_6_6_o_12_12_12_12,
				     &ip_6_6_6_6_o_12_12_12_12,
				     &ip_6_6_6_6_o_12_12_12_12,
				     &ip_6_6_6_6_o_12_12_12_12,
				     &ip_6_6_6_6_o_12_12_12_12,
				     &ip_6_6_6_6_o_12_12_12_12,
				     &ip_6_6_6_6_o_12_12_12_12,
				     &ip_6_6_6_6_o_12_12_12_12,
				     &ip_6_6_6_6_o_12_12_12_12,
				     &ip_6_6_6_6_o_12_12_12_12,
				     &ip_6_6_6_6_o_12_12_12_12,
				     &ip_6_6_6_6_o_12_12_12_12,
				     &ip_6_6_6_6_o_12_12_12_12,
				     &ip_6_6_6_6_o_12_12_12_12,
				     &ip_6_6_6_6_o_12_12_12_12,
				     &ip_6_6_6_6_o_12_12_12_12,
				     &ip_6_6_6_6_o_12_12_12_12,
				     &ip_6_6_6_6_o_12_12_12_12,
				     &ip_6_6_6_6_o_12_12_12_12,
				     &ip_6_6_6_6_o_12_12_12_12,
				     &ip_6_6_6_6_o_12_12_12_12,
				     &ip_6_6_6_6_o_12_12_12_12,
				     &ip_6_6_6_6_o_12_12_12_12,
				     &ip_6_6_6_6_o_12_12_12_12,
				     &ip_6_6_6_6_o_12_12_12_12,
				     &ip_6_6_6_6_o_12_12_12_12,
				     &ip_6_6_6_6_o_12_12_12_12,
				     &ip_6_6_6_6_o_12_12_12_12,
				     &ip_6_6_6_6_o_12_12_12_12,
				     &ip_6_6_6_6_o_12_12_12_12,
				     &ip_6_6_6_6_o_12_12_12_12,
				     &ip_6_6_6_6_o_12_12_12_12,
				     &ip_6_6_6_6_o_12_12_12_12,
				     &ip_6_6_6_6_o_12_12_12_12,
				     &ip_6_6_6_6_o_12_12_12_12,
				     &ip_6_6_6_6_o_12_12_12_12,
				     &ip_6_6_6_6_o_12_12_12_12,
				     &ip_6_6_6_6_o_12_12_12_12,
				     &ip_6_6_6_6_o_12_12_12_12,
				     &ip_6_6_6_6_o_12_12_12_12,
				     &ip_6_6_6_6_o_12_12_12_12,
				     &ip_6_6_6_6_o_12_12_12_12,
				     &ip_6_6_6_6_o_12_12_12_12,
				     &ip_6_6_6_6_o_12_12_12_12,
				     &ip_6_6_6_6_o_12_12_12_12,
				     &ip_6_6_6_6_o_12_12_12_12,
				     &ip_6_6_6_6_o_12_12_12_12,
				     &ip_6_6_6_6_o_12_12_12_12,
				     &ip_6_6_6_6_o_12_12_12_12,
				     &ip_6_6_6_6_o_12_12_12_12,
				     &ip_6_6_6_6_o_12_12_12_12,
				     &ip_6_6_6_6_o_12_12_12_12),
	     "6.6.6.6/32 via 10.10.10.1 and 10.10.10.2 in 63:1 ratio");

    fib_table_entry_path_remove(fib_index,
                                &pfx_6_6_6_6_s_32,
                                FIB_SOURCE_API,
                                FIB_PROTOCOL_IP4,
                                &nh_12_12_12_12,
                                tm->hw[1]->sw_if_index,
                                ~0, // invalid fib index
                                100,
                                FIB_ROUTE_PATH_FLAG_NONE);

    fei = fib_table_lookup(fib_index, &pfx_6_6_6_6_s_32);
    FIB_TEST(fib_test_validate_entry(fei,
				     FIB_FORW_CHAIN_TYPE_UNICAST_IP4,
				     64,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_2,
				     &ip_6_6_6_6_o_10_10_10_1),
	     "6.6.6.6/32 via 10.10.10.1 and 10.10.10.2 in 63:1 ratio");

    fib_table_entry_path_remove(fib_index,
                                &pfx_6_6_6_6_s_32,
                                FIB_SOURCE_API,
                                FIB_PROTOCOL_IP4,
                                &nh_10_10_10_2,
                                tm->hw[0]->sw_if_index,
                                ~0, // invalid fib index
                                100,
                                FIB_ROUTE_PATH_FLAG_NONE);

    fei = fib_table_lookup(fib_index, &pfx_6_6_6_6_s_32);
    FIB_TEST(fib_test_validate_entry(fei,
				     FIB_FORW_CHAIN_TYPE_UNICAST_IP4,
				     1,
				     &ip_6_6_6_6_o_10_10_10_1),
	     "6.6.6.6/32 via 10.10.10.1");

    fib_table_entry_delete(fib_index, &pfx_6_6_6_6_s_32, FIB_SOURCE_API);

    /*
     * A recursive via the two unequal cost entries
     */
    fib_prefix_t bgp_44_s_32 = {
	.fp_len = 32,
	.fp_proto = FIB_PROTOCOL_IP4,
	.fp_addr = {
	    /* 200.200.200.201/32 */
	    .ip4.as_u32 = clib_host_to_net_u32(0x44444444),
	},
    };
    fei = fib_table_entry_path_add(fib_index,
                                   &bgp_44_s_32,
                                   FIB_SOURCE_API,
                                   FIB_ENTRY_FLAG_NONE,
				   FIB_PROTOCOL_IP4,
				   &pfx_1_2_3_4_s_32.fp_addr,
                                   ~0,
                                   fib_index,
                                   1,
                                   NULL,
                                   FIB_ROUTE_PATH_FLAG_NONE);
    fei = fib_table_entry_path_add(fib_index,
                                   &bgp_44_s_32,
                                   FIB_SOURCE_API,
                                   FIB_ENTRY_FLAG_NONE,
				   FIB_PROTOCOL_IP4,
				   &pfx_1_2_3_5_s_32.fp_addr,
                                   ~0,
                                   fib_index,
                                   1,
                                   NULL,
                                   FIB_ROUTE_PATH_FLAG_NONE);

    FIB_TEST_REC_FORW(&bgp_44_s_32, &pfx_1_2_3_4_s_32, 0);
    FIB_TEST_REC_FORW(&bgp_44_s_32, &pfx_1_2_3_5_s_32, 1);
    FIB_TEST(fib_test_urpf_is_equal(fei, FIB_FORW_CHAIN_TYPE_UNICAST_IP4, 2,
				    tm->hw[0]->sw_if_index,
				    tm->hw[1]->sw_if_index),
	     "RPF list for 1.2.3.4/32 contains both adjs");

    /*
     * test the uRPF check functions
     */
    dpo_id_t dpo_44 = DPO_INVALID;
    index_t urpfi;

    fib_entry_contribute_forwarding(fei, FIB_FORW_CHAIN_TYPE_UNICAST_IP4, &dpo_44);
    urpfi = load_balance_get_urpf(dpo_44.dpoi_index);

    FIB_TEST(fib_urpf_check(urpfi, tm->hw[0]->sw_if_index),
	     "uRPF check for 68.68.68.68/32 on %d OK",
	     tm->hw[0]->sw_if_index);
    FIB_TEST(fib_urpf_check(urpfi, tm->hw[1]->sw_if_index),
	     "uRPF check for 68.68.68.68/32 on %d OK",
	     tm->hw[1]->sw_if_index);
    FIB_TEST(!fib_urpf_check(urpfi, 99),
	     "uRPF check for 68.68.68.68/32 on 99 not-OK",
	     99);
    dpo_reset(&dpo_44);

    fib_table_entry_delete(fib_index,
                           &bgp_44_s_32,
                           FIB_SOURCE_API);
    fib_table_entry_delete(fib_index,
                           &pfx_1_2_3_5_s_32,
                           FIB_SOURCE_API);
    fib_table_entry_delete(fib_index,
                           &pfx_1_2_3_4_s_32,
                           FIB_SOURCE_API);

    /*
     * Add a recursive route:
     *   200.200.200.201/32 via 1.1.1.200/32  => the via entry is NOT installed.
     */
    fib_prefix_t bgp_201_pfx = {
	.fp_len = 32,
	.fp_proto = FIB_PROTOCOL_IP4,
	.fp_addr = {
	    /* 200.200.200.201/32 */
	    .ip4.as_u32 = clib_host_to_net_u32(0xc8c8c8c9),
	},
    };
    /* via 1.1.1.200 */
    fib_prefix_t pfx_1_1_1_200_s_32 = {
	.fp_len = 32,
	.fp_proto = FIB_PROTOCOL_IP4,
	.fp_addr = {
	    .ip4.as_u32 = clib_host_to_net_u32(0x010101c8),
	},
    };

    fib_table_entry_path_add(fib_index,
			     &bgp_201_pfx,
			     FIB_SOURCE_API,
			     FIB_ENTRY_FLAG_NONE,
			     FIB_PROTOCOL_IP4,
			     &pfx_1_1_1_200_s_32.fp_addr,
			     ~0, // no index provided.
			     fib_index, // nexthop in same fib as route
			     1,
			     NULL,
			     FIB_ROUTE_PATH_FLAG_NONE);

    FIB_TEST_REC_FORW(&bgp_201_pfx, &pfx_1_1_1_200_s_32, 0);

    fei = fib_table_lookup_exact_match(fib_index, &pfx_1_1_1_200_s_32);
    FIB_TEST((FIB_ENTRY_FLAG_NONE == fib_entry_get_flags(fei)),
	     "Flags set on RR via non-attached");
    FIB_TEST(fib_test_urpf_is_equal(fei, FIB_FORW_CHAIN_TYPE_UNICAST_IP4, 0),
	     "RPF list for BGP route empty");

    /*
     * +2 entry (BGP & RR) and +1 shared-path-list
     */
    FIB_TEST((4  == fib_path_list_db_size()),   "path list DB population:%d",
	     fib_path_list_db_size());
    FIB_TEST((PNBR+8 == fib_path_list_pool_size()), "path list pool size is %d",
	     fib_path_list_pool_size());
    FIB_TEST((ENBR+12 == fib_entry_pool_size()), "entry pool size is %d",
	     fib_entry_pool_size());

    /*
     * insert a route that covers the missing 1.1.1.2/32. we epxect
     * 200.200.200.200/32 and 200.200.200.201/32 to resolve through it.
     */
    fib_prefix_t pfx_1_1_1_0_s_24 = {
	.fp_len = 24,
	.fp_proto = FIB_PROTOCOL_IP4,
	.fp_addr = {
	    /* 1.1.1.0/24 */
	    .ip4.as_u32 = clib_host_to_net_u32(0x01010100),
	},
    };

    fib_table_entry_path_add(fib_index,
			     &pfx_1_1_1_0_s_24,
			     FIB_SOURCE_API,
			     FIB_ENTRY_FLAG_NONE,
			     FIB_PROTOCOL_IP4,
			     &nh_10_10_10_1,
			     tm->hw[0]->sw_if_index,
			     ~0, // invalid fib index
			     1,
			     NULL,
			     FIB_ROUTE_PATH_FLAG_NONE);
    fei = fib_table_lookup(fib_index, &pfx_1_1_1_0_s_24);
    dpo1 = fib_entry_contribute_ip_forwarding(fei);
    ai = fib_entry_get_adj(fei);
    FIB_TEST((ai_01 == ai), "1.1.1.0/24 resolves via 10.10.10.1");
    fei = fib_table_lookup(fib_index, &pfx_1_1_1_2_s_32);
    dpo1 = fib_entry_contribute_ip_forwarding(fei);
    ai = fib_entry_get_adj(fei);
    FIB_TEST((ai_01 == ai), "1.1.1.2/32 resolves via 10.10.10.1");
    fei = fib_table_lookup(fib_index, &pfx_1_1_1_200_s_32);
    dpo1 = fib_entry_contribute_ip_forwarding(fei);
    ai = fib_entry_get_adj(fei);
    FIB_TEST((ai_01 == ai), "1.1.1.200/24 resolves via 10.10.10.1");

    /*
     * +1 entry. 1.1.1.1/32 already uses 10.10.10.1 so no new pah-list
     */
    FIB_TEST((4  == fib_path_list_db_size()),   "path list DB population:%d",
	     fib_path_list_db_size());
    FIB_TEST((PNBR+8 == fib_path_list_pool_size()), "path list pool size is %d",
	     fib_path_list_pool_size());
    FIB_TEST((ENBR+13 == fib_entry_pool_size()), "entry pool size is %d",
	     fib_entry_pool_size());

    /*
     * the recursive adj for 200.200.200.200 should be updated.
     */
    FIB_TEST_REC_FORW(&bgp_201_pfx, &pfx_1_1_1_200_s_32, 0);
    FIB_TEST_REC_FORW(&bgp_200_pfx, &pfx_1_1_1_2_s_32, 0);
    fei = fib_table_lookup(fib_index, &bgp_200_pfx);
    FIB_TEST(fib_test_urpf_is_equal(fei, FIB_FORW_CHAIN_TYPE_UNICAST_IP4, 1,
				    tm->hw[0]->sw_if_index),
	     "RPF list for BGP route has itf index 0");

    /*
     * insert a more specific route than 1.1.1.0/24 that also covers the
     * missing 1.1.1.2/32, but not 1.1.1.200/32. we epxect
     * 200.200.200.200 to resolve through it.
     */
    fib_prefix_t pfx_1_1_1_0_s_28 = {
	.fp_len = 28,
	.fp_proto = FIB_PROTOCOL_IP4,
	.fp_addr = {
	    /* 1.1.1.0/24 */
	    .ip4.as_u32 = clib_host_to_net_u32(0x01010100),
	},
    };

    fib_table_entry_path_add(fib_index,
			     &pfx_1_1_1_0_s_28,
			     FIB_SOURCE_API,
			     FIB_ENTRY_FLAG_NONE,
			     FIB_PROTOCOL_IP4,
			     &nh_10_10_10_2,
			     tm->hw[0]->sw_if_index,
			     ~0, // invalid fib index
			     1,
			     NULL,
			     FIB_ROUTE_PATH_FLAG_NONE);
    fei = fib_table_lookup(fib_index, &pfx_1_1_1_0_s_28);
    dpo2 = fib_entry_contribute_ip_forwarding(fei);
    ai = fib_entry_get_adj(fei);
    FIB_TEST((ai_02 == ai), "1.1.1.0/24 resolves via 10.10.10.2");

    /*
     * +1 entry. +1 shared path-list
     */
    FIB_TEST((5  == fib_path_list_db_size()),   "path list DB population:%d",
	     fib_path_list_db_size());
    FIB_TEST((PNBR+9 == fib_path_list_pool_size()), "path list pool size is %d",
	     fib_path_list_pool_size());
    FIB_TEST((ENBR+14 == fib_entry_pool_size()), "entry pool size is %d",
	     fib_entry_pool_size());

    /*
     * the recursive adj for 200.200.200.200 should be updated.
     * 200.200.200.201 remains unchanged.
     */
    FIB_TEST_REC_FORW(&bgp_201_pfx, &pfx_1_1_1_200_s_32, 0);
    FIB_TEST_REC_FORW(&bgp_200_pfx, &pfx_1_1_1_2_s_32, 0);

    /*
     * remove this /28. 200.200.200.200/32 should revert back to via 1.1.1.0/24
     */
    fib_table_entry_path_remove(fib_index,
				&pfx_1_1_1_0_s_28,
				FIB_SOURCE_API,
				FIB_PROTOCOL_IP4,
				&nh_10_10_10_2,
				tm->hw[0]->sw_if_index,
				~0,
				1,
				FIB_ROUTE_PATH_FLAG_NONE);
    FIB_TEST((fib_table_lookup_exact_match(fib_index, &pfx_1_1_1_0_s_28) == 
	      FIB_NODE_INDEX_INVALID),
	     "1.1.1.0/28 removed");
    FIB_TEST((fib_table_lookup(fib_index, &pfx_1_1_1_0_s_28) == 
	      fib_table_lookup(fib_index, &pfx_1_1_1_0_s_24)),
	     "1.1.1.0/28 lookup via /24");
    FIB_TEST_REC_FORW(&bgp_201_pfx, &pfx_1_1_1_200_s_32, 0);
    FIB_TEST_REC_FORW(&bgp_200_pfx, &pfx_1_1_1_2_s_32, 0);

    /*
     * -1 entry. -1 shared path-list
     */
    FIB_TEST((4  == fib_path_list_db_size()),   "path list DB population:%d",
	     fib_path_list_db_size());
    FIB_TEST((PNBR+8 == fib_path_list_pool_size()), "path list pool size is %d",
	     fib_path_list_pool_size());
    FIB_TEST((ENBR+13 == fib_entry_pool_size()), "entry pool size is %d",
	     fib_entry_pool_size());

    /*
     * remove 1.1.1.0/24. 200.200.200.200/32 should revert back to via 0.0.0.0/0
     */
    fib_table_entry_path_remove(fib_index,
				&pfx_1_1_1_0_s_24,
				FIB_SOURCE_API,
				FIB_PROTOCOL_IP4,
				&nh_10_10_10_1,
				tm->hw[0]->sw_if_index,
				~0,
				1,
				FIB_ROUTE_PATH_FLAG_NONE);
    FIB_TEST((fib_table_lookup_exact_match(fib_index, &pfx_1_1_1_0_s_24) == 
	      FIB_NODE_INDEX_INVALID),
	     "1.1.1.0/24 removed");

    fei = fib_table_lookup(fib_index, &pfx_1_1_1_2_s_32);
    FIB_TEST(load_balance_is_drop(fib_entry_contribute_ip_forwarding(fei)),
	     "1.1.1.2/32 route is DROP");
    fei = fib_table_lookup(fib_index, &pfx_1_1_1_200_s_32);
    FIB_TEST(load_balance_is_drop(fib_entry_contribute_ip_forwarding(fei)),
	     "1.1.1.200/32 route is DROP");

    FIB_TEST_REC_FORW(&bgp_201_pfx, &pfx_1_1_1_200_s_32, 0);
    FIB_TEST_REC_FORW(&bgp_200_pfx, &pfx_1_1_1_2_s_32, 0);

    /*
     * -1 entry
     */
    FIB_TEST((4  == fib_path_list_db_size()),   "path list DB population:%d",
	fib_path_list_db_size());
    FIB_TEST((PNBR+8 == fib_path_list_pool_size()), "path list pool size is %d",
	fib_path_list_pool_size());
    FIB_TEST((ENBR+12 == fib_entry_pool_size()), "entry pool size is %d",
	     fib_entry_pool_size());

    /*
     * insert the missing 1.1.1.2/32
     */
    fei = fib_table_entry_path_add(fib_index,
	                           &pfx_1_1_1_2_s_32,
				   FIB_SOURCE_API,
				   FIB_ENTRY_FLAG_NONE,
				   FIB_PROTOCOL_IP4,
				   &nh_10_10_10_1,
				   tm->hw[0]->sw_if_index,
				   ~0, // invalid fib index
				   1,
				   NULL,
				   FIB_ROUTE_PATH_FLAG_NONE);
    dpo1 = fib_entry_contribute_ip_forwarding(fei);
    ai = fib_entry_get_adj(fei);
    FIB_TEST((ai = ai_01), "1.1.1.2/32 resolves via 10.10.10.1");

    FIB_TEST_REC_FORW(&bgp_201_pfx, &pfx_1_1_1_200_s_32, 0);
    FIB_TEST_REC_FORW(&bgp_200_pfx, &pfx_1_1_1_2_s_32, 0);

    /*
     * no change. 1.1.1.2/32 was already there RR sourced.
     */
    FIB_TEST((4  == fib_path_list_db_size()),   "path list DB population:%d",
	     fib_path_list_db_size());
    FIB_TEST((PNBR+8 == fib_path_list_pool_size()), "path list pool size is %d",
	     fib_path_list_pool_size());
    FIB_TEST((ENBR+12 == fib_entry_pool_size()), "entry pool size is %d",
	     fib_entry_pool_size());

    /*
     * remove 200.200.200.201/32 which does not have a valid via FIB
     */
    fib_table_entry_path_remove(fib_index,
				&bgp_201_pfx,
				FIB_SOURCE_API,
				FIB_PROTOCOL_IP4,
				&pfx_1_1_1_200_s_32.fp_addr,
				~0, // no index provided.
				fib_index,
				1,
				FIB_ROUTE_PATH_FLAG_NONE);

    /*
     * -2 entries (BGP and RR). -1 shared path-list;
     */
    FIB_TEST((fib_table_lookup_exact_match(fib_index, &bgp_201_pfx) == 
	      FIB_NODE_INDEX_INVALID),
	     "200.200.200.201/32 removed");
    FIB_TEST((fib_table_lookup_exact_match(fib_index, &pfx_1_1_1_200_s_32) == 
	      FIB_NODE_INDEX_INVALID),
	     "1.1.1.200/32 removed");

    FIB_TEST((3  == fib_path_list_db_size()),   "path list DB population:%d",
	     fib_path_list_db_size());
    FIB_TEST((PNBR+7 == fib_path_list_pool_size()), "path list pool size is %d",
	     fib_path_list_pool_size());
    FIB_TEST((ENBR+10 == fib_entry_pool_size()), "entry pool size is %d",
	     fib_entry_pool_size());

    /*
     * remove 200.200.200.200/32 which does have a valid via FIB
     */
    fib_table_entry_path_remove(fib_index,
				&bgp_200_pfx,
				FIB_SOURCE_API,
				FIB_PROTOCOL_IP4,
				&pfx_1_1_1_2_s_32.fp_addr,
				~0, // no index provided.
				fib_index,
				1,
				FIB_ROUTE_PATH_FLAG_NONE);

    FIB_TEST((fib_table_lookup_exact_match(fib_index, &bgp_200_pfx) == 
	      FIB_NODE_INDEX_INVALID),
	     "200.200.200.200/32 removed");
    FIB_TEST((fib_table_lookup_exact_match(fib_index, &pfx_1_1_1_2_s_32) != 
	      FIB_NODE_INDEX_INVALID),
	     "1.1.1.2/32 still present");

    /*
     * -1 entry (BGP, the RR source is also API sourced). -1 shared path-list;
     */
    FIB_TEST((2  == fib_path_list_db_size()),   "path list DB population:%d",
	     fib_path_list_db_size());
    FIB_TEST((PNBR+6 == fib_path_list_pool_size()), "path list pool size is %d",
	     fib_path_list_pool_size());
    FIB_TEST((ENBR+9 == fib_entry_pool_size()), "entry pool size is %d",
	     fib_entry_pool_size());

    /*
     * A recursive prefix that has a 2 path  load-balance.
     * It also shares a next-hop with other BGP prefixes and hence
     * test the ref counting of RR sourced prefixes and 2 level LB.
     */
    const fib_prefix_t bgp_102 = {
	.fp_len = 32,
	.fp_proto = FIB_PROTOCOL_IP4,
	.fp_addr = {
	    /* 100.100.100.101/32 */
	    .ip4.as_u32 = clib_host_to_net_u32(0x64646466),
	},
    };
    fib_table_entry_path_add(fib_index,
			     &bgp_102,
			     FIB_SOURCE_API,
			     FIB_ENTRY_FLAG_NONE,
			     FIB_PROTOCOL_IP4,
			     &pfx_1_1_1_1_s_32.fp_addr,
			     ~0, // no index provided.
			     fib_index, // same as route
			     1,
			     NULL,
			     FIB_ROUTE_PATH_FLAG_NONE);
    fib_table_entry_path_add(fib_index,
			     &bgp_102,
			     FIB_SOURCE_API,
			     FIB_ENTRY_FLAG_NONE,
			     FIB_PROTOCOL_IP4,
			     &pfx_1_1_1_2_s_32.fp_addr,
			     ~0, // no index provided.
			     fib_index, // same as route's FIB
			     1,
			     NULL,
			     FIB_ROUTE_PATH_FLAG_NONE);
    fei = fib_table_lookup_exact_match(fib_index, &bgp_102);
    FIB_TEST((FIB_NODE_INDEX_INVALID != fei), "100.100.100.102/32 presnet");
    dpo = fib_entry_contribute_ip_forwarding(fei);

    fei  = fib_table_lookup_exact_match(fib_index, &pfx_1_1_1_1_s_32);
    dpo1 = fib_entry_contribute_ip_forwarding(fei);
    fei  = fib_table_lookup_exact_match(fib_index, &pfx_1_1_1_2_s_32);
    dpo2 = fib_entry_contribute_ip_forwarding(fei);

    lb = load_balance_get(dpo->dpoi_index);
    FIB_TEST((lb->lb_n_buckets == 2), "Recursive LB has %d bucket", lb->lb_n_buckets);
    FIB_TEST(!dpo_cmp(dpo1, load_balance_get_bucket(dpo->dpoi_index, 0)),
	     "First via 10.10.10.1");
    FIB_TEST(!dpo_cmp(dpo2, load_balance_get_bucket(dpo->dpoi_index, 1)),
	     "Second via 10.10.10.1");

    fib_table_entry_path_remove(fib_index,
				&bgp_102,
				FIB_SOURCE_API,
				FIB_PROTOCOL_IP4,
				&pfx_1_1_1_1_s_32.fp_addr,
				~0, // no index provided.
				fib_index, // same as route's FIB
				1,
				FIB_ROUTE_PATH_FLAG_NONE);
    fib_table_entry_path_remove(fib_index,
				&bgp_102,
				FIB_SOURCE_API,
				FIB_PROTOCOL_IP4,
				&pfx_1_1_1_2_s_32.fp_addr,
				~0, // no index provided.
				fib_index, // same as route's FIB
				1,
				FIB_ROUTE_PATH_FLAG_NONE);
    fei = fib_table_lookup_exact_match(fib_index, &bgp_102);
    FIB_TEST((FIB_NODE_INDEX_INVALID == fei), "100.100.100.102/32 removed");

    /*
     * remove the remaining recursives
     */
    fib_table_entry_path_remove(fib_index,
				&bgp_100_pfx,
				FIB_SOURCE_API,
				FIB_PROTOCOL_IP4,
				&pfx_1_1_1_1_s_32.fp_addr,
				~0, // no index provided.
				fib_index, // same as route's FIB
				1,
				FIB_ROUTE_PATH_FLAG_NONE);
    fib_table_entry_path_remove(fib_index,
				&bgp_101_pfx,
				FIB_SOURCE_API,
				FIB_PROTOCOL_IP4,
				&pfx_1_1_1_1_s_32.fp_addr,
				~0, // no index provided.
				fib_index, // same as route's FIB
				1,
				FIB_ROUTE_PATH_FLAG_NONE);
    FIB_TEST((fib_table_lookup_exact_match(fib_index, &bgp_100_pfx) == 
	      FIB_NODE_INDEX_INVALID),
	     "100.100.100.100/32 removed");
    FIB_TEST((fib_table_lookup_exact_match(fib_index, &bgp_101_pfx) == 
	      FIB_NODE_INDEX_INVALID),
	     "100.100.100.101/32 removed");

    /*
     * -2 entry (2*BGP, the RR source is also API sourced). -1 shared path-list;
     */
    FIB_TEST((1  == fib_path_list_db_size()),   "path list DB population:%d",
	     fib_path_list_db_size());
    FIB_TEST((PNBR+5 == fib_path_list_pool_size()), "path list pool size is %d",
	     fib_path_list_pool_size());
    FIB_TEST((ENBR+7 == fib_entry_pool_size()), "entry pool size is %d",
	     fib_entry_pool_size());

    /*
     * Add a recursive route via a connected cover, using an adj-fib that does exist
     */
    fib_table_entry_path_add(fib_index,
			     &bgp_200_pfx,
			     FIB_SOURCE_API,
			     FIB_ENTRY_FLAG_NONE,
			     FIB_PROTOCOL_IP4,
			     &nh_10_10_10_1,
			     ~0, // no index provided.
			     fib_index, // Same as route's FIB
			     1,
			     NULL,
			     FIB_ROUTE_PATH_FLAG_NONE);

    /*
     * +1 entry. +1 shared path-list (recursive via 10.10.10.1)
     */
    FIB_TEST((2  == fib_path_list_db_size()),   "path list DB population:%d",
	     fib_path_list_db_size());
    FIB_TEST((PNBR+6 == fib_path_list_pool_size()), "path list pool size is %d",
	     fib_path_list_pool_size());
    FIB_TEST((ENBR+8 == fib_entry_pool_size()), "entry pool size is %d",
	     fib_entry_pool_size());

    fei = fib_table_lookup_exact_match(fib_index, &bgp_200_pfx);
    dpo = fib_entry_contribute_ip_forwarding(fei);

    fei  = fib_table_lookup_exact_match(fib_index, &pfx_10_10_10_1_s_32);
    dpo1 = fib_entry_contribute_ip_forwarding(fei);

    FIB_TEST(!dpo_cmp(dpo1, load_balance_get_bucket(dpo->dpoi_index, 0)),
	     "200.200.200.200/32 is recursive via adj for 10.10.10.1");

    FIB_TEST((FIB_ENTRY_FLAG_ATTACHED  == fib_entry_get_flags(fei)),
	     "Flags set on RR via existing attached");

    /*
     * Add a recursive route via a connected cover, using and adj-fib that does
     * not exist
     */
    ip46_address_t nh_10_10_10_3 = {
	.ip4.as_u32 = clib_host_to_net_u32(0x0a0a0a03),
    };
    fib_prefix_t pfx_10_10_10_3 = {
	.fp_len = 32,
	.fp_proto = FIB_PROTOCOL_IP4,
	.fp_addr = nh_10_10_10_3,
    };

    fib_table_entry_path_add(fib_index,
			     &bgp_201_pfx,
			     FIB_SOURCE_API,
			     FIB_ENTRY_FLAG_NONE,
			     FIB_PROTOCOL_IP4,
			     &nh_10_10_10_3,
			     ~0, // no index provided.
			     fib_index,
			     1,
			     NULL,
			     FIB_ROUTE_PATH_FLAG_NONE);

    /*
     * +2 entries (BGP and RR). +1 shared path-list (recursive via 10.10.10.3) and
     * one unshared non-recursive via 10.10.10.3
     */
    FIB_TEST((3  == fib_path_list_db_size()),   "path list DB population:%d",
	     fib_path_list_db_size());
    FIB_TEST((PNBR+8 == fib_path_list_pool_size()), "path list pool size is %d",
	     fib_path_list_pool_size());
    FIB_TEST((ENBR+10 == fib_entry_pool_size()), "entry pool size is %d",
	     fib_entry_pool_size());

    ai_03 = adj_nbr_add_or_lock(FIB_PROTOCOL_IP4,
				VNET_LINK_IP4,
				&nh_10_10_10_3,
				tm->hw[0]->sw_if_index);

    fei  = fib_table_lookup_exact_match(fib_index, &bgp_201_pfx);
    dpo  = fib_entry_contribute_ip_forwarding(fei);
    fei  = fib_table_lookup_exact_match(fib_index, &pfx_10_10_10_3);
    dpo1 = fib_entry_contribute_ip_forwarding(fei);

    ai = fib_entry_get_adj(fei);
    FIB_TEST((ai == ai_03), "adj for 10.10.10.3/32 is via adj for 10.10.10.3");
    FIB_TEST(((FIB_ENTRY_FLAG_ATTACHED | FIB_ENTRY_FLAG_CONNECTED) ==
	      fib_entry_get_flags(fei)),
	     "Flags set on RR via non-existing attached");

    FIB_TEST(!dpo_cmp(dpo1, load_balance_get_bucket(dpo->dpoi_index, 0)),
	     "adj for 200.200.200.200/32 is recursive via adj for 10.10.10.3");

    adj_unlock(ai_03);

    /*
     * remove the recursives
     */
    fib_table_entry_path_remove(fib_index,
    				&bgp_200_pfx,
    				FIB_SOURCE_API,
				FIB_PROTOCOL_IP4,
    				&nh_10_10_10_1,
    				~0, // no index provided.
				fib_index, // same as route's FIB
				1,
				FIB_ROUTE_PATH_FLAG_NONE);
    fib_table_entry_path_remove(fib_index,
    				&bgp_201_pfx,
    				FIB_SOURCE_API,
				FIB_PROTOCOL_IP4,
    				&nh_10_10_10_3,
    				~0, // no index provided.
				fib_index, // same as route's FIB
				1,
				FIB_ROUTE_PATH_FLAG_NONE);

    FIB_TEST((fib_table_lookup_exact_match(fib_index, &bgp_201_pfx) ==
    	      FIB_NODE_INDEX_INVALID),
    	     "200.200.200.201/32 removed");
    FIB_TEST((fib_table_lookup_exact_match(fib_index, &bgp_200_pfx) ==
    	      FIB_NODE_INDEX_INVALID),
    	     "200.200.200.200/32 removed");
    FIB_TEST((fib_table_lookup_exact_match(fib_index, &pfx_10_10_10_3) ==
    	      FIB_NODE_INDEX_INVALID),
    	     "10.10.10.3/32 removed");

    /*
     * -3 entries (2*BGP and RR). -2 shared path-list (recursive via 10.10.10.3 &
     *  10.10.10.1) and one unshared non-recursive via 10.10.10.3
     */
    FIB_TEST((1  == fib_path_list_db_size()),   "path list DB population:%d",
    	     fib_path_list_db_size());
    FIB_TEST((PNBR+5 == fib_path_list_pool_size()), "path list pool size is %d",
    	     fib_path_list_pool_size());
    FIB_TEST((ENBR+7 == fib_entry_pool_size()), "entry pool size is %d",
    	     fib_entry_pool_size());


    /*
     * RECURSION LOOPS
     *  Add 5.5.5.5/32 -> 5.5.5.6/32 -> 5.5.5.7/32 -> 5.5.5.5/32
     */
    fib_prefix_t pfx_5_5_5_5_s_32 = {
	.fp_len = 32,
	.fp_proto = FIB_PROTOCOL_IP4,
	.fp_addr = {
	    .ip4.as_u32 = clib_host_to_net_u32(0x05050505),
	},
    };
    fib_prefix_t pfx_5_5_5_6_s_32 = {
	.fp_len = 32,
	.fp_proto = FIB_PROTOCOL_IP4,
	.fp_addr = {
	    .ip4.as_u32 = clib_host_to_net_u32(0x05050506),
	},
    };
    fib_prefix_t pfx_5_5_5_7_s_32 = {
	.fp_len = 32,
	.fp_proto = FIB_PROTOCOL_IP4,
	.fp_addr = {
	    .ip4.as_u32 = clib_host_to_net_u32(0x05050507),
	},
    };

    fib_table_entry_path_add(fib_index,
			     &pfx_5_5_5_5_s_32,
			     FIB_SOURCE_API,
			     FIB_ENTRY_FLAG_NONE,
			     FIB_PROTOCOL_IP4,
			     &pfx_5_5_5_6_s_32.fp_addr,
			     ~0, // no index provided.
			     fib_index,
			     1,
			     NULL,
			     FIB_ROUTE_PATH_FLAG_NONE);
    fib_table_entry_path_add(fib_index,
			     &pfx_5_5_5_6_s_32,
			     FIB_SOURCE_API,
			     FIB_ENTRY_FLAG_NONE,
			     FIB_PROTOCOL_IP4,
			     &pfx_5_5_5_7_s_32.fp_addr,
			     ~0, // no index provided.
			     fib_index,
			     1,
			     NULL,
			     FIB_ROUTE_PATH_FLAG_NONE);
    fib_table_entry_path_add(fib_index,
			     &pfx_5_5_5_7_s_32,
			     FIB_SOURCE_API,
			     FIB_ENTRY_FLAG_NONE,
			     FIB_PROTOCOL_IP4,
			     &pfx_5_5_5_5_s_32.fp_addr,
			     ~0, // no index provided.
			     fib_index,
			     1,
			     NULL,
			     FIB_ROUTE_PATH_FLAG_NONE);
    /*
     * +3 entries, +3 shared path-list
     */
    FIB_TEST((4  == fib_path_list_db_size()),   "path list DB population:%d",
    	     fib_path_list_db_size());
    FIB_TEST((PNBR+8 == fib_path_list_pool_size()), "path list pool size is %d",
    	     fib_path_list_pool_size());
    FIB_TEST((ENBR+10 == fib_entry_pool_size()), "entry pool size is %d",
    	     fib_entry_pool_size());

    /*
     * All the entries have only looped paths, so they are all drop
     */
    fei = fib_table_lookup(fib_index, &pfx_5_5_5_7_s_32);
    FIB_TEST(load_balance_is_drop(fib_entry_contribute_ip_forwarding(fei)),
	     "LB for 5.5.5.7/32 is via adj for DROP");
    fei = fib_table_lookup(fib_index, &pfx_5_5_5_5_s_32);
    FIB_TEST(load_balance_is_drop(fib_entry_contribute_ip_forwarding(fei)),
	     "LB for 5.5.5.5/32 is via adj for DROP");
    fei = fib_table_lookup(fib_index, &pfx_5_5_5_6_s_32);
    FIB_TEST(load_balance_is_drop(fib_entry_contribute_ip_forwarding(fei)),
	     "LB for 5.5.5.6/32 is via adj for DROP");

    /*
     * provide 5.5.5.6/32 with alternate path.
     * this will allow only 5.5.5.6/32 to forward with this path, the others
     * are still drop since the loop is still present.
     */
    fib_table_entry_path_add(fib_index,
			     &pfx_5_5_5_6_s_32,
			     FIB_SOURCE_API,
			     FIB_ENTRY_FLAG_NONE,
			     FIB_PROTOCOL_IP4,
			     &nh_10_10_10_1,
			     tm->hw[0]->sw_if_index,
			     ~0,
			     1,
			     NULL,
			     FIB_ROUTE_PATH_FLAG_NONE);


    fei = fib_table_lookup(fib_index, &pfx_5_5_5_6_s_32);
    dpo1 = fib_entry_contribute_ip_forwarding(fei);

    lb = load_balance_get(dpo1->dpoi_index);
    FIB_TEST((lb->lb_n_buckets == 1), "5.5.5.6 LB has %d bucket", lb->lb_n_buckets);

    dpo2 = load_balance_get_bucket(dpo1->dpoi_index, 0);
    FIB_TEST(DPO_ADJACENCY == dpo2->dpoi_type, "type is %d", dpo2->dpoi_type);
    FIB_TEST((ai_01 == dpo2->dpoi_index),
	     "5.5.5.6 bucket 0 resolves via 10.10.10.2");

    fei = fib_table_lookup(fib_index, &pfx_5_5_5_7_s_32);
    FIB_TEST(load_balance_is_drop(fib_entry_contribute_ip_forwarding(fei)),
	     "LB for 5.5.5.7/32 is via adj for DROP");
    fei = fib_table_lookup(fib_index, &pfx_5_5_5_5_s_32);
    FIB_TEST(load_balance_is_drop(fib_entry_contribute_ip_forwarding(fei)),
	     "LB for 5.5.5.5/32 is via adj for DROP");

    /*
     * remove the alternate path for 5.5.5.6/32
     * back to all drop
     */
    fib_table_entry_path_remove(fib_index,
				&pfx_5_5_5_6_s_32,
				FIB_SOURCE_API,
				FIB_PROTOCOL_IP4,
				&nh_10_10_10_1,
				tm->hw[0]->sw_if_index,
				~0,
				1,
				FIB_ROUTE_PATH_FLAG_NONE);

    fei = fib_table_lookup(fib_index, &pfx_5_5_5_7_s_32);
    FIB_TEST(load_balance_is_drop(fib_entry_contribute_ip_forwarding(fei)),
	     "LB for 5.5.5.7/32 is via adj for DROP");
    fei = fib_table_lookup(fib_index, &pfx_5_5_5_5_s_32);
    FIB_TEST(load_balance_is_drop(fib_entry_contribute_ip_forwarding(fei)),
	     "LB for 5.5.5.5/32 is via adj for DROP");
    fei = fib_table_lookup(fib_index, &pfx_5_5_5_6_s_32);
    FIB_TEST(load_balance_is_drop(fib_entry_contribute_ip_forwarding(fei)),
	     "LB for 5.5.5.6/32 is via adj for DROP");

    /*
     * break the loop by giving 5.5.5.5/32 a new set of paths
     * expect all to forward via this new path.
     */
    fib_table_entry_update_one_path(fib_index,
				    &pfx_5_5_5_5_s_32,
				    FIB_SOURCE_API,
				    FIB_ENTRY_FLAG_NONE,
				    FIB_PROTOCOL_IP4,
				    &nh_10_10_10_1,
				    tm->hw[0]->sw_if_index,
				    ~0, // invalid fib index
				    1,
				    NULL,
				    FIB_ROUTE_PATH_FLAG_NONE);

    fei = fib_table_lookup(fib_index, &pfx_5_5_5_5_s_32);
    dpo1 = fib_entry_contribute_ip_forwarding(fei);
    lb = load_balance_get(dpo1->dpoi_index);
    FIB_TEST((lb->lb_n_buckets == 1), "5.5.5.5 LB has %d bucket", lb->lb_n_buckets);

    dpo2 = load_balance_get_bucket(dpo1->dpoi_index, 0);
    FIB_TEST(DPO_ADJACENCY == dpo2->dpoi_type, "type is %d", dpo2->dpoi_type);
    FIB_TEST((ai_01 == dpo2->dpoi_index),
	     "5.5.5.5 bucket 0 resolves via 10.10.10.2");

    fei = fib_table_lookup_exact_match(fib_index, &pfx_5_5_5_7_s_32);
    dpo2 = fib_entry_contribute_ip_forwarding(fei);

    lb = load_balance_get(dpo2->dpoi_index);
    FIB_TEST((lb->lb_n_buckets == 1), "Recursive LB has %d bucket", lb->lb_n_buckets);
    FIB_TEST(!dpo_cmp(dpo1, load_balance_get_bucket(dpo2->dpoi_index, 0)),
	     "5.5.5.5.7 via 5.5.5.5");

    fei = fib_table_lookup_exact_match(fib_index, &pfx_5_5_5_6_s_32);
    dpo1 = fib_entry_contribute_ip_forwarding(fei);

    lb = load_balance_get(dpo1->dpoi_index);
    FIB_TEST((lb->lb_n_buckets == 1), "Recursive LB has %d bucket", lb->lb_n_buckets);
    FIB_TEST(!dpo_cmp(dpo2, load_balance_get_bucket(dpo1->dpoi_index, 0)),
	     "5.5.5.5.6 via 5.5.5.7");

    /*
     * revert back to the loop. so we can remove the prefixes with
     * the loop intact
     */
    fib_table_entry_update_one_path(fib_index,
				    &pfx_5_5_5_5_s_32,
				    FIB_SOURCE_API,
				    FIB_ENTRY_FLAG_NONE,
				    FIB_PROTOCOL_IP4,
				    &pfx_5_5_5_6_s_32.fp_addr,
				    ~0, // no index provided.
				    fib_index,
				    1,
				    NULL,
				    FIB_ROUTE_PATH_FLAG_NONE);

    fei = fib_table_lookup(fib_index, &pfx_5_5_5_7_s_32);
    FIB_TEST(load_balance_is_drop(fib_entry_contribute_ip_forwarding(fei)),
	     "LB for 5.5.5.7/32 is via adj for DROP");
    fei = fib_table_lookup(fib_index, &pfx_5_5_5_5_s_32);
    FIB_TEST(load_balance_is_drop(fib_entry_contribute_ip_forwarding(fei)),
	     "LB for 5.5.5.5/32 is via adj for DROP");
    fei = fib_table_lookup(fib_index, &pfx_5_5_5_6_s_32);
    FIB_TEST(load_balance_is_drop(fib_entry_contribute_ip_forwarding(fei)),
	     "LB for 5.5.5.6/32 is via adj for DROP");

    /*
     * remove all the 5.5.5.x/32 prefixes
     */
    fib_table_entry_path_remove(fib_index,
				&pfx_5_5_5_5_s_32,
				FIB_SOURCE_API,
				FIB_PROTOCOL_IP4,
				&pfx_5_5_5_6_s_32.fp_addr,
				~0, // no index provided.
				fib_index, // same as route's FIB
				1,
				FIB_ROUTE_PATH_FLAG_NONE);
    fib_table_entry_path_remove(fib_index,
				&pfx_5_5_5_6_s_32,
				FIB_SOURCE_API,
				FIB_PROTOCOL_IP4,
				&pfx_5_5_5_7_s_32.fp_addr,
				~0, // no index provided.
				fib_index, // same as route's FIB
				1,
				FIB_ROUTE_PATH_FLAG_NONE);
    fib_table_entry_path_remove(fib_index,
				&pfx_5_5_5_7_s_32,
				FIB_SOURCE_API,
				FIB_PROTOCOL_IP4,
				&pfx_5_5_5_5_s_32.fp_addr,
				~0, // no index provided.
				fib_index, // same as route's FIB
				1,
				FIB_ROUTE_PATH_FLAG_NONE);
    fib_table_entry_path_remove(fib_index,
				&pfx_5_5_5_6_s_32,
				FIB_SOURCE_API,
				FIB_PROTOCOL_IP4,
				&nh_10_10_10_2,
				~0, // no index provided.
				fib_index, // same as route's FIB
				1,
				FIB_ROUTE_PATH_FLAG_NONE);

    /*
     * -3 entries, -3 shared path-list
     */
    FIB_TEST((1  == fib_path_list_db_size()),   "path list DB population:%d",
    	     fib_path_list_db_size());
    FIB_TEST((PNBR+5 == fib_path_list_pool_size()), "path list pool size is %d",
    	     fib_path_list_pool_size());
    FIB_TEST((ENBR+7 == fib_entry_pool_size()), "entry pool size is %d",
    	     fib_entry_pool_size());

    /*
     * Single level loop 5.5.5.5/32 via 5.5.5.5/32
     */
    fib_table_entry_path_add(fib_index,
			     &pfx_5_5_5_6_s_32,
			     FIB_SOURCE_API,
			     FIB_ENTRY_FLAG_NONE,
			     FIB_PROTOCOL_IP4,
			     &pfx_5_5_5_6_s_32.fp_addr,
			     ~0, // no index provided.
			     fib_index,
			     1,
			     NULL,
			     FIB_ROUTE_PATH_FLAG_NONE);
    fei = fib_table_lookup(fib_index, &pfx_5_5_5_6_s_32);
    FIB_TEST(load_balance_is_drop(fib_entry_contribute_ip_forwarding(fei)),
	     "1-level 5.5.5.6/32 loop is via adj for DROP");
 
    fib_table_entry_path_remove(fib_index,
				&pfx_5_5_5_6_s_32,
				FIB_SOURCE_API,
				FIB_PROTOCOL_IP4,
				&pfx_5_5_5_6_s_32.fp_addr,
				~0, // no index provided.
				fib_index, // same as route's FIB
				1,
				FIB_ROUTE_PATH_FLAG_NONE);
    FIB_TEST(FIB_NODE_INDEX_INVALID ==
	     fib_table_lookup_exact_match(fib_index, &pfx_5_5_5_6_s_32),
	     "1-level 5.5.5.6/32 loop is removed");

    /*
     * A recursive route whose next-hop is covered by the prefix.
     * This would mean the via-fib, which inherits forwarding from its
     * cover, thus picks up forwarding from the prfix, which is via the
     * via-fib, and we have a loop.
     */
    fib_prefix_t pfx_23_23_23_0_s_24 = {
	.fp_len = 24,
	.fp_proto = FIB_PROTOCOL_IP4,
	.fp_addr = {
	    .ip4.as_u32 = clib_host_to_net_u32(0x17171700),
	},
    };
    fib_prefix_t pfx_23_23_23_23_s_32 = {
	.fp_len = 32,
	.fp_proto = FIB_PROTOCOL_IP4,
	.fp_addr = {
            .ip4.as_u32 = clib_host_to_net_u32(0x17171717),
        },
    };
    fei = fib_table_entry_path_add(fib_index,
				   &pfx_23_23_23_0_s_24,
				   FIB_SOURCE_API,
				   FIB_ENTRY_FLAG_NONE,
				   FIB_PROTOCOL_IP4,
				   &pfx_23_23_23_23_s_32.fp_addr,
				   ~0, // recursive
				   fib_index,
				   1,
				   NULL,
				   FIB_ROUTE_PATH_FLAG_NONE);
    dpo = fib_entry_contribute_ip_forwarding(fei);
    FIB_TEST(load_balance_is_drop(dpo),
	     "23.23.23.0/24 via covered is DROP");
    fib_table_entry_delete_index(fei, FIB_SOURCE_API);

    /*
     * add-remove test. no change.
     */
    FIB_TEST((1  == fib_path_list_db_size()),   "path list DB population:%d",
    	     fib_path_list_db_size());
    FIB_TEST((PNBR+5 == fib_path_list_pool_size()), "path list pool size is %d",
    	     fib_path_list_pool_size());
    FIB_TEST((ENBR+7 == fib_entry_pool_size()), "entry pool size is %d",
    	     fib_entry_pool_size());

    /*
     * A recursive route with recursion constraints.
     *  200.200.200.200/32 via 1.1.1.1 is recurse via host constrained
     */
    fib_table_entry_path_add(fib_index,
			     &bgp_200_pfx,
			     FIB_SOURCE_API,
			     FIB_ENTRY_FLAG_NONE,
			     FIB_PROTOCOL_IP4,
			     &nh_1_1_1_1,
			     ~0,
			     fib_index,
			     1,
			     NULL,
			     FIB_ROUTE_PATH_RESOLVE_VIA_HOST);

    fei = fib_table_lookup_exact_match(fib_index, &pfx_1_1_1_1_s_32);
    dpo2 = fib_entry_contribute_ip_forwarding(fei);

    fei = fib_table_lookup_exact_match(fib_index, &bgp_200_pfx);
    dpo1 = fib_entry_contribute_ip_forwarding(fei);

    FIB_TEST(!dpo_cmp(dpo2, load_balance_get_bucket(dpo1->dpoi_index, 0)),
	     "adj for 200.200.200.200/32 is recursive via adj for 1.1.1.1");

    /*
     * save the load-balance. we expect it to be inplace modified
     */
    lb = load_balance_get(dpo1->dpoi_index);

    /*
     * add a covering prefix for the via fib that would otherwise serve
     * as the resolving route when the host is removed
     */
    fib_table_entry_path_add(fib_index,
			     &pfx_1_1_1_0_s_28,
			     FIB_SOURCE_API,
			     FIB_ENTRY_FLAG_NONE,
			     FIB_PROTOCOL_IP4,
			     &nh_10_10_10_1,
			     tm->hw[0]->sw_if_index,
			     ~0, // invalid fib index
			     1,
			     NULL,
			     FIB_ROUTE_PATH_FLAG_NONE);
    fei = fib_table_lookup_exact_match(fib_index, &pfx_1_1_1_0_s_28);
    ai = fib_entry_get_adj(fei);
    FIB_TEST((ai == ai_01),
	     "adj for 1.1.1.0/28 is via adj for 1.1.1.1");

    /*
     * remove the host via FIB - expect the BGP prefix to be drop
     */
    fib_table_entry_path_remove(fib_index,
				&pfx_1_1_1_1_s_32,
				FIB_SOURCE_API,
				FIB_PROTOCOL_IP4,
				&nh_10_10_10_1,
				tm->hw[0]->sw_if_index,
				~0, // invalid fib index
				1,
				FIB_ROUTE_PATH_FLAG_NONE);

    FIB_TEST(!dpo_cmp(dpo_drop, load_balance_get_bucket(dpo1->dpoi_index, 0)),
	     "adj for 200.200.200.200/32 is recursive via adj for DROP");

    /*
     * add the via-entry host reoute back. expect to resolve again
     */
    fib_table_entry_path_add(fib_index,
			     &pfx_1_1_1_1_s_32,
			     FIB_SOURCE_API,
			     FIB_ENTRY_FLAG_NONE,
			     FIB_PROTOCOL_IP4,
			     &nh_10_10_10_1,
			     tm->hw[0]->sw_if_index,
			     ~0, // invalid fib index
			     1,
			     NULL,
			     FIB_ROUTE_PATH_FLAG_NONE);
    FIB_TEST(!dpo_cmp(dpo2, load_balance_get_bucket(dpo1->dpoi_index, 0)),
	     "adj for 200.200.200.200/32 is recursive via adj for 1.1.1.1");

    /*
     * add another path for the recursive. it will then have 2.
     */
    fib_prefix_t pfx_1_1_1_3_s_32 = {
	.fp_len = 32,
	.fp_proto = FIB_PROTOCOL_IP4,
	.fp_addr = {
	    .ip4.as_u32 = clib_host_to_net_u32(0x01010103),
	},
    };
    fib_table_entry_path_add(fib_index,
			     &pfx_1_1_1_3_s_32,
			     FIB_SOURCE_API,
			     FIB_ENTRY_FLAG_NONE,
			     FIB_PROTOCOL_IP4,
			     &nh_10_10_10_2,
			     tm->hw[0]->sw_if_index,
			     ~0, // invalid fib index
			     1,
			     NULL,
			     FIB_ROUTE_PATH_FLAG_NONE);

    fib_table_entry_path_add(fib_index,
			     &bgp_200_pfx,
			     FIB_SOURCE_API,
			     FIB_ENTRY_FLAG_NONE,
			     FIB_PROTOCOL_IP4,
			     &pfx_1_1_1_3_s_32.fp_addr,
			     ~0,
			     fib_index,
			     1,
			     NULL,
			     FIB_ROUTE_PATH_RESOLVE_VIA_HOST);

    fei = fib_table_lookup_exact_match(fib_index, &bgp_200_pfx);
    dpo = fib_entry_contribute_ip_forwarding(fei);

    fei = fib_table_lookup_exact_match(fib_index, &pfx_1_1_1_1_s_32);
    dpo2 = fib_entry_contribute_ip_forwarding(fei);
    FIB_TEST(!dpo_cmp(dpo2, load_balance_get_bucket(dpo->dpoi_index, 0)),
	     "adj for 200.200.200.200/32 is recursive via adj for 1.1.1.1");
    fei = fib_table_lookup_exact_match(fib_index, &pfx_1_1_1_3_s_32);
    dpo1 = fib_entry_contribute_ip_forwarding(fei);
    FIB_TEST(!dpo_cmp(dpo1, load_balance_get_bucket(dpo->dpoi_index, 1)),
	     "adj for 200.200.200.200/32 is recursive via adj for 1.1.1.3");

    /*
     * expect the lb-map used by the recursive's load-balance is using both buckets
     */
    load_balance_map_t *lbm;
    index_t lbmi;

    lb = load_balance_get(dpo->dpoi_index);
    lbmi = lb->lb_map;
    load_balance_map_lock(lbmi);
    lbm = load_balance_map_get(lbmi);

    FIB_TEST(lbm->lbm_buckets[0] == 0,
             "LB maps's bucket 0 is %d",
             lbm->lbm_buckets[0]);
    FIB_TEST(lbm->lbm_buckets[1] == 1,
             "LB maps's bucket 1 is %d",
             lbm->lbm_buckets[1]);

    /*
     * withdraw one of the /32 via-entrys.
     * that ECMP path will be unresolved and forwarding should continue on the
     * other available path. this is an iBGP PIC edge failover.
     * Test the forwarding changes without re-fetching the adj from the
     * recursive entry. this ensures its the same one that is updated; i.e. an
     * inplace-modify.
     */
    fib_table_entry_path_remove(fib_index,
				&pfx_1_1_1_1_s_32,
				FIB_SOURCE_API,
				FIB_PROTOCOL_IP4,
				&nh_10_10_10_1,
				tm->hw[0]->sw_if_index,
				~0, // invalid fib index
				1,
				FIB_ROUTE_PATH_FLAG_NONE);

    fei = fib_table_lookup_exact_match(fib_index, &bgp_200_pfx);
    FIB_TEST(!dpo_cmp(dpo, fib_entry_contribute_ip_forwarding(fei)),
	     "post PIC 200.200.200.200/32 was inplace modified");

    FIB_TEST(!dpo_cmp(dpo1, load_balance_get_bucket_i(lb, 0)),
	     "post PIC adj for 200.200.200.200/32 is recursive"
	     " via adj for 1.1.1.3");

    /*
     * the LB maps that was locked above should have been modified to remove
     * the path that was down, and thus its bucket points to a path that is
     * still up.
     */
    FIB_TEST(lbm->lbm_buckets[0] == 1,
             "LB maps's bucket 0 is %d",
             lbm->lbm_buckets[0]);
    FIB_TEST(lbm->lbm_buckets[1] == 1,
             "LB maps's bucket 1 is %d",
             lbm->lbm_buckets[1]);

    load_balance_map_unlock(lb->lb_map);

    /*
     * add it back. again 
     */
    fib_table_entry_path_add(fib_index,
			     &pfx_1_1_1_1_s_32,
			     FIB_SOURCE_API,
			     FIB_ENTRY_FLAG_NONE,
			     FIB_PROTOCOL_IP4,
			     &nh_10_10_10_1,
			     tm->hw[0]->sw_if_index,
			     ~0, // invalid fib index
			     1,
			     NULL,
			     FIB_ROUTE_PATH_FLAG_NONE);

    FIB_TEST(!dpo_cmp(dpo2, load_balance_get_bucket_i(lb, 0)),
	     "post PIC recovery adj for 200.200.200.200/32 is recursive "
	     "via adj for 1.1.1.1");
    FIB_TEST(!dpo_cmp(dpo1, load_balance_get_bucket_i(lb, 1)),
	     "post PIC recovery adj for 200.200.200.200/32 is recursive "
	     "via adj for 1.1.1.3");

    fei = fib_table_lookup_exact_match(fib_index, &bgp_200_pfx);
    dpo = fib_entry_contribute_ip_forwarding(fei);
    FIB_TEST(lb == load_balance_get(dpo->dpoi_index),
	     "post PIC 200.200.200.200/32 was inplace modified");

    /*
     * add a 3rd path. this makes the LB 16 buckets. 
     */
    fib_table_entry_path_add(fib_index,
			     &bgp_200_pfx,
			     FIB_SOURCE_API,
			     FIB_ENTRY_FLAG_NONE,
			     FIB_PROTOCOL_IP4,
			     &pfx_1_1_1_2_s_32.fp_addr,
			     ~0,
			     fib_index,
			     1,
			     NULL,
			     FIB_ROUTE_PATH_RESOLVE_VIA_HOST);

    fei = fib_table_lookup_exact_match(fib_index, &bgp_200_pfx);
    dpo = fib_entry_contribute_ip_forwarding(fei);
    FIB_TEST(lb == load_balance_get(dpo->dpoi_index),
	     "200.200.200.200/32 was inplace modified for 3rd path");
    FIB_TEST(16 == lb->lb_n_buckets,
	     "200.200.200.200/32 was inplace modified for 3rd path to 16 buckets");

    lbmi = lb->lb_map;
    load_balance_map_lock(lbmi);
    lbm = load_balance_map_get(lbmi);

    for (ii = 0; ii < 16; ii++)
    {
        FIB_TEST(lbm->lbm_buckets[ii] == ii,
                 "LB Map for 200.200.200.200/32 at %d is %d",
                 ii, lbm->lbm_buckets[ii]);
    }

    /*
     * trigger PIC by removing the first via-entry
     * the first 6 buckets of the map should map to the next 6
     */
    fib_table_entry_path_remove(fib_index,
				&pfx_1_1_1_1_s_32,
				FIB_SOURCE_API,
				FIB_PROTOCOL_IP4,
				&nh_10_10_10_1,
				tm->hw[0]->sw_if_index,
				~0,
				1,
				FIB_ROUTE_PATH_FLAG_NONE);

    fei = fib_table_lookup_exact_match(fib_index, &bgp_200_pfx);
    dpo = fib_entry_contribute_ip_forwarding(fei);
    FIB_TEST(lb == load_balance_get(dpo->dpoi_index),
	     "200.200.200.200/32 was inplace modified for 3rd path");
    FIB_TEST(2 == lb->lb_n_buckets,
	     "200.200.200.200/32 was inplace modified for 3rd path remove to 2 buckets");

    for (ii = 0; ii < 6; ii++)
    {
        FIB_TEST(lbm->lbm_buckets[ii] == ii+6,
                 "LB Map for 200.200.200.200/32 at %d is %d",
                 ii, lbm->lbm_buckets[ii]);
    }
    for (ii = 6; ii < 16; ii++)
    {
        FIB_TEST(lbm->lbm_buckets[ii] == ii,
                 "LB Map for 200.200.200.200/32 at %d is %d",
                 ii, lbm->lbm_buckets[ii]);
    }


    /*
     * tidy up
     */
    fib_table_entry_path_add(fib_index,
                             &pfx_1_1_1_1_s_32,
                             FIB_SOURCE_API,
                             FIB_ENTRY_FLAG_NONE,
			     FIB_PROTOCOL_IP4,
                             &nh_10_10_10_1,
                             tm->hw[0]->sw_if_index,
                             ~0,
                             1,
                             NULL,
                             FIB_ROUTE_PATH_FLAG_NONE);

    fib_table_entry_path_remove(fib_index,
                                &bgp_200_pfx,
                                FIB_SOURCE_API,
				FIB_PROTOCOL_IP4,
                                &pfx_1_1_1_2_s_32.fp_addr,
                                ~0,
                                fib_index,
                                1,
                                MPLS_LABEL_INVALID);
    fib_table_entry_path_remove(fib_index,
				&bgp_200_pfx,
				FIB_SOURCE_API,
				FIB_PROTOCOL_IP4,
				&nh_1_1_1_1,
				~0,
				fib_index,
				1,
				FIB_ROUTE_PATH_RESOLVE_VIA_HOST);
    fib_table_entry_path_remove(fib_index,
				&bgp_200_pfx,
				FIB_SOURCE_API,
				FIB_PROTOCOL_IP4,
				&pfx_1_1_1_3_s_32.fp_addr,
				~0,
				fib_index,
				1,
				FIB_ROUTE_PATH_RESOLVE_VIA_HOST);
    fib_table_entry_delete(fib_index,
			   &pfx_1_1_1_3_s_32,
			   FIB_SOURCE_API);
    fib_table_entry_delete(fib_index,
			   &pfx_1_1_1_0_s_28,
			   FIB_SOURCE_API);
    FIB_TEST((FIB_NODE_INDEX_INVALID ==
	      fib_table_lookup_exact_match(fib_index, &pfx_1_1_1_0_s_28)),
	     "1.1.1.1/28 removed");
    FIB_TEST((FIB_NODE_INDEX_INVALID ==
	      fib_table_lookup_exact_match(fib_index, &pfx_1_1_1_3_s_32)),
	     "1.1.1.3/32 removed");
    FIB_TEST((FIB_NODE_INDEX_INVALID ==
	      fib_table_lookup_exact_match(fib_index, &bgp_200_pfx)),
	     "200.200.200.200/32 removed");

    /*
     * add-remove test. no change.
     */
    FIB_TEST((1  == fib_path_list_db_size()),   "path list DB population:%d",
    	     fib_path_list_db_size());
    FIB_TEST((PNBR+5 == fib_path_list_pool_size()), "path list pool size is %d",
    	     fib_path_list_pool_size());
    FIB_TEST((ENBR+7 == fib_entry_pool_size()), "entry pool size is %d",
    	     fib_entry_pool_size());

    /*
     * A route whose paths are built up iteratively and then removed
     * all at once
     */
    fib_prefix_t pfx_4_4_4_4_s_32 = {
	.fp_len = 32,
	.fp_proto = FIB_PROTOCOL_IP4,
	.fp_addr = {
	    /* 4.4.4.4/32 */
	    .ip4.as_u32 = clib_host_to_net_u32(0x04040404),
	},
    };

    fib_table_entry_path_add(fib_index,
			     &pfx_4_4_4_4_s_32,
			     FIB_SOURCE_API,
			     FIB_ENTRY_FLAG_NONE,
			     FIB_PROTOCOL_IP4,
			     &nh_10_10_10_1,
			     tm->hw[0]->sw_if_index,
			     ~0,
			     1,
			     NULL,
			     FIB_ROUTE_PATH_FLAG_NONE);
    fib_table_entry_path_add(fib_index,
			     &pfx_4_4_4_4_s_32,
			     FIB_SOURCE_API,
			     FIB_ENTRY_FLAG_NONE,
			     FIB_PROTOCOL_IP4,
			     &nh_10_10_10_2,
			     tm->hw[0]->sw_if_index,
			     ~0,
			     1,
			     NULL,
			     FIB_ROUTE_PATH_FLAG_NONE);
    fib_table_entry_path_add(fib_index,
			     &pfx_4_4_4_4_s_32,
			     FIB_SOURCE_API,
			     FIB_ENTRY_FLAG_NONE,
			     FIB_PROTOCOL_IP4,
			     &nh_10_10_10_3,
			     tm->hw[0]->sw_if_index,
			     ~0,
			     1,
			     NULL,
			     FIB_ROUTE_PATH_FLAG_NONE);
    FIB_TEST(FIB_NODE_INDEX_INVALID !=
	     fib_table_lookup_exact_match(fib_index, &pfx_4_4_4_4_s_32),
	     "4.4.4.4/32 present");

    fib_table_entry_delete(fib_index,
			   &pfx_4_4_4_4_s_32,
			   FIB_SOURCE_API);
    FIB_TEST(FIB_NODE_INDEX_INVALID ==
	     fib_table_lookup_exact_match(fib_index, &pfx_4_4_4_4_s_32),
	     "4.4.4.4/32 removed");

    /*
     * add-remove test. no change.
     */
    FIB_TEST((1  == fib_path_list_db_size()),   "path list DB population:%d",
    	     fib_path_list_db_size());
    FIB_TEST((PNBR+5 == fib_path_list_pool_size()), "path list pool size is %d",
    	     fib_path_list_pool_size());
    FIB_TEST((ENBR+7 == fib_entry_pool_size()), "entry pool size is %d",
    	     fib_entry_pool_size());

    /*
     * A route with multiple paths at once
     */
    fib_route_path_t *r_paths = NULL;

    for (ii = 0; ii < 4; ii++)
    {
	fib_route_path_t r_path = {
	    .frp_proto = FIB_PROTOCOL_IP4,
	    .frp_addr = {
		.ip4.as_u32 = clib_host_to_net_u32(0x0a0a0a02 + ii),
	    },
	    .frp_sw_if_index = tm->hw[0]->sw_if_index,
	    .frp_weight = 1,
	    .frp_fib_index = ~0,
	};
	vec_add1(r_paths, r_path);
    }

    fib_table_entry_update(fib_index,
			   &pfx_4_4_4_4_s_32,
			   FIB_SOURCE_API,
			   FIB_ENTRY_FLAG_NONE,
			   r_paths);

    fei = fib_table_lookup_exact_match(fib_index, &pfx_4_4_4_4_s_32);
    FIB_TEST((FIB_NODE_INDEX_INVALID != fei), "4.4.4.4/32 present");
    dpo = fib_entry_contribute_ip_forwarding(fei);

    lb = load_balance_get(dpo->dpoi_index);
    FIB_TEST((lb->lb_n_buckets == 4), "4.4.4.4/32 lb over %d paths", lb->lb_n_buckets);

    fib_table_entry_delete(fib_index,
			   &pfx_4_4_4_4_s_32,
			   FIB_SOURCE_API);
    FIB_TEST(FIB_NODE_INDEX_INVALID ==
	     fib_table_lookup_exact_match(fib_index, &pfx_4_4_4_4_s_32),
	     "4.4.4.4/32 removed");
    vec_free(r_paths);

    /*
     * add-remove test. no change.
     */
    FIB_TEST((1  == fib_path_list_db_size()),   "path list DB population:%d",
    	     fib_path_list_db_size());
    FIB_TEST((PNBR+5 == fib_path_list_pool_size()), "path list pool size is %d",
    	     fib_path_list_pool_size());
    FIB_TEST((ENBR+7 == fib_entry_pool_size()), "entry pool size is %d",
    	     fib_entry_pool_size());

    /*
     * A route deag route
     */
    fib_table_entry_path_add(fib_index,
			     &pfx_4_4_4_4_s_32,
			     FIB_SOURCE_API,
			     FIB_ENTRY_FLAG_NONE,
			     FIB_PROTOCOL_IP4,
			     &zero_addr,
			     ~0,
			     fib_index,
			     1,
			     NULL,
			     FIB_ROUTE_PATH_FLAG_NONE);

    fei = fib_table_lookup_exact_match(fib_index, &pfx_4_4_4_4_s_32);
    FIB_TEST((FIB_NODE_INDEX_INVALID != fei), "4.4.4.4/32 present");

    dpo = fib_entry_contribute_ip_forwarding(fei);
    dpo = load_balance_get_bucket(dpo->dpoi_index, 0);
    lookup_dpo_t *lkd = lookup_dpo_get(dpo->dpoi_index);

    FIB_TEST((fib_index == lkd->lkd_fib_index),
	     "4.4.4.4/32 is deag in %d %U",
             lkd->lkd_fib_index,
             format_dpo_id, dpo, 0);

    fib_table_entry_delete(fib_index,
			   &pfx_4_4_4_4_s_32,
			   FIB_SOURCE_API);
    FIB_TEST(FIB_NODE_INDEX_INVALID ==
	     fib_table_lookup_exact_match(fib_index, &pfx_4_4_4_4_s_32),
	     "4.4.4.4/32 removed");
    vec_free(r_paths);

    /*
     * add-remove test. no change.
     */
    FIB_TEST((1  == fib_path_list_db_size()),   "path list DB population:%d",
    	     fib_path_list_db_size());
    FIB_TEST((PNBR+5 == fib_path_list_pool_size()), "path list pool size is %d",
    	     fib_path_list_pool_size());
    FIB_TEST((ENBR+7 == fib_entry_pool_size()), "entry pool size is %d",
    	     fib_entry_pool_size());

    /*
     * Duplicate paths:
     *  add a recursive with duplicate paths. Expect the duplicate to be ignored.
     */
    fib_prefix_t pfx_34_1_1_1_s_32 = {
	.fp_len = 32,
	.fp_proto = FIB_PROTOCOL_IP4,
	.fp_addr = {
	    .ip4.as_u32 = clib_host_to_net_u32(0x22010101),
	},
    };
    fib_prefix_t pfx_34_34_1_1_s_32 = {
	.fp_len = 32,
	.fp_proto = FIB_PROTOCOL_IP4,
	.fp_addr = {
	    .ip4.as_u32 = clib_host_to_net_u32(0x22220101),
	},
    };
    fei = fib_table_entry_path_add(fib_index,
                                   &pfx_34_1_1_1_s_32,
                                   FIB_SOURCE_API,
                                   FIB_ENTRY_FLAG_NONE,
                                   FIB_PROTOCOL_IP4,
                                   &pfx_34_34_1_1_s_32.fp_addr,
                                   ~0,
                                   fib_index,
                                   1,
                                   NULL,
                                   FIB_ROUTE_PATH_FLAG_NONE);
    fei = fib_table_entry_path_add(fib_index,
                                   &pfx_34_1_1_1_s_32,
                                   FIB_SOURCE_API,
                                   FIB_ENTRY_FLAG_NONE,
                                   FIB_PROTOCOL_IP4,
                                   &pfx_34_34_1_1_s_32.fp_addr,
                                   ~0,
                                   fib_index,
                                   1,
                                   NULL,
                                   FIB_ROUTE_PATH_FLAG_NONE);
    FIB_TEST_REC_FORW(&pfx_34_1_1_1_s_32, &pfx_34_34_1_1_s_32, 0);
    fib_table_entry_delete_index(fei, FIB_SOURCE_API);

    /*
     * CLEANUP
     *   remove: 1.1.1.2/32, 1.1.2.0/24 and 1.1.1.1/32
     *           all of which are via 10.10.10.1, Itf1
     */
    fib_table_entry_path_remove(fib_index,
				&pfx_1_1_1_2_s_32,
				FIB_SOURCE_API,
				FIB_PROTOCOL_IP4,
				&nh_10_10_10_1,
				tm->hw[0]->sw_if_index,
				~0,
				1,
				FIB_ROUTE_PATH_FLAG_NONE);
    fib_table_entry_path_remove(fib_index,
				&pfx_1_1_1_1_s_32,
				FIB_SOURCE_API,
				FIB_PROTOCOL_IP4,
				&nh_10_10_10_1,
				tm->hw[0]->sw_if_index,
				~0,
				1,
				FIB_ROUTE_PATH_FLAG_NONE);
    fib_table_entry_path_remove(fib_index,
				&pfx_1_1_2_0_s_24,
				FIB_SOURCE_API,
				FIB_PROTOCOL_IP4,
				&nh_10_10_10_1,
				tm->hw[0]->sw_if_index,
				~0,
				1,
				FIB_ROUTE_PATH_FLAG_NONE);

    FIB_TEST(FIB_NODE_INDEX_INVALID ==
	     fib_table_lookup_exact_match(fib_index, &pfx_1_1_1_1_s_32),
	     "1.1.1.1/32 removed");
    FIB_TEST(FIB_NODE_INDEX_INVALID ==
	     fib_table_lookup_exact_match(fib_index, &pfx_1_1_1_2_s_32),
	     "1.1.1.2/32 removed");
    FIB_TEST(FIB_NODE_INDEX_INVALID ==
	     fib_table_lookup_exact_match(fib_index, &pfx_1_1_2_0_s_24),
	     "1.1.2.0/24 removed");

    /*
     * -3 entries and -1 shared path-list
     */
    FIB_TEST((0  == fib_path_list_db_size()),   "path list DB population:%d",
    	     fib_path_list_db_size());
    FIB_TEST((PNBR+4 == fib_path_list_pool_size()), "path list pool size is %d",
    	     fib_path_list_pool_size());
    FIB_TEST((ENBR+4 == fib_entry_pool_size()), "entry pool size is %d",
    	     fib_entry_pool_size());

    /*
     * An attached-host route. Expect to link to the incomplete adj
     */
    fib_prefix_t pfx_4_1_1_1_s_32 = {
	.fp_len = 32,
	.fp_proto = FIB_PROTOCOL_IP4,
	.fp_addr = {
	    /* 4.1.1.1/32 */
	    .ip4.as_u32 = clib_host_to_net_u32(0x04010101),
	},
    };
    fib_table_entry_path_add(fib_index,
			     &pfx_4_1_1_1_s_32,
			     FIB_SOURCE_API,
			     FIB_ENTRY_FLAG_NONE,
			     FIB_PROTOCOL_IP4,
			     &zero_addr,
			     tm->hw[0]->sw_if_index,
			     fib_index,
			     1,
			     NULL,
			     FIB_ROUTE_PATH_FLAG_NONE);

    fei = fib_table_lookup_exact_match(fib_index, &pfx_4_1_1_1_s_32);
    FIB_TEST((FIB_NODE_INDEX_INVALID != fei), "4.1.1.1/32 present");
    ai = fib_entry_get_adj(fei);

    ai2 = adj_nbr_add_or_lock(FIB_PROTOCOL_IP4,
			      VNET_LINK_IP4,
			      &pfx_4_1_1_1_s_32.fp_addr,
			      tm->hw[0]->sw_if_index);
    FIB_TEST((ai == ai2), "Attached-host link to incomplete ADJ");
    adj_unlock(ai2);

    /*
     * +1 entry and +1 shared path-list
     */
    FIB_TEST((1  == fib_path_list_db_size()),   "path list DB population:%d",
    	     fib_path_list_db_size());
    FIB_TEST((PNBR+5 == fib_path_list_pool_size()), "path list pool size is %d",
    	     fib_path_list_pool_size());
    FIB_TEST((ENBR+5 == fib_entry_pool_size()), "entry pool size is %d",
    	     fib_entry_pool_size());

    fib_table_entry_delete(fib_index,
			   &pfx_4_1_1_1_s_32,
			   FIB_SOURCE_API);

    FIB_TEST((0  == fib_path_list_db_size()),   "path list DB population:%d",
    	     fib_path_list_db_size());
    FIB_TEST((PNBR+4 == fib_path_list_pool_size()), "path list pool size is %d",
    	     fib_path_list_pool_size());
    FIB_TEST((ENBR+4 == fib_entry_pool_size()), "entry pool size is %d",
    	     fib_entry_pool_size());

    /*
     * add a v6 prefix via v4 next-hops
     */
    fib_prefix_t pfx_2001_s_64 = {
	.fp_len = 64,
	.fp_proto = FIB_PROTOCOL_IP6,
	.fp_addr = {
	    .ip6.as_u64[0] = clib_host_to_net_u64(0x2001000000000000),
	},
    };
    fei = fib_table_entry_path_add(0, //default v6 table
				   &pfx_2001_s_64,
				   FIB_SOURCE_API,
				   FIB_ENTRY_FLAG_NONE,
				   FIB_PROTOCOL_IP4,
				   &nh_10_10_10_1,
				   tm->hw[0]->sw_if_index,
				   fib_index,
				   1,
				   NULL,
				   FIB_ROUTE_PATH_FLAG_NONE);

    fei = fib_table_lookup_exact_match(0, &pfx_2001_s_64);
    FIB_TEST((FIB_NODE_INDEX_INVALID != fei), "2001::/64 present");
    ai = fib_entry_get_adj(fei);
    adj = adj_get(ai);
    FIB_TEST((adj->lookup_next_index == IP_LOOKUP_NEXT_ARP),
	     "2001::/64 via ARP-adj");
    FIB_TEST((adj->ia_link == VNET_LINK_IP6),
	     "2001::/64 is link type v6");
    FIB_TEST((adj->ia_nh_proto == FIB_PROTOCOL_IP4),
	     "2001::/64 ADJ-adj is NH proto v4");
    fib_table_entry_delete(0, &pfx_2001_s_64, FIB_SOURCE_API);

    /*
     * add a uRPF exempt prefix:
     *  test:
     *   - it's forwarding is drop
     *   - it's uRPF list is not empty
     *   - the uRPF list for the default route (it's cover) is empty
     */
    fei = fib_table_entry_special_add(fib_index,
				      &pfx_4_1_1_1_s_32,
				      FIB_SOURCE_URPF_EXEMPT,
				      FIB_ENTRY_FLAG_DROP,
				      ADJ_INDEX_INVALID);
    dpo = fib_entry_contribute_ip_forwarding(fei);
    FIB_TEST(load_balance_is_drop(dpo),
	     "uRPF exempt 4.1.1.1/32 DROP");
    FIB_TEST(fib_test_urpf_is_equal(fei, FIB_FORW_CHAIN_TYPE_UNICAST_IP4, 1, 0),
	     "uRPF list for exempt prefix has itf index 0");
    fei = fib_table_lookup_exact_match(fib_index, &pfx_0_0_0_0_s_0);
    FIB_TEST(fib_test_urpf_is_equal(fei, FIB_FORW_CHAIN_TYPE_UNICAST_IP4, 0),
	     "uRPF list for 0.0.0.0/0 empty");

    fib_table_entry_delete(fib_index, &pfx_4_1_1_1_s_32, FIB_SOURCE_URPF_EXEMPT);

    /*
     * An adj-fib that fails the refinement criteria - no connected cover
     */
    fib_prefix_t pfx_12_10_10_2_s_32 = {
	.fp_len = 32,
	.fp_proto = FIB_PROTOCOL_IP4,
	.fp_addr = {
	    /* 12.10.10.2 */
	    .ip4.as_u32 = clib_host_to_net_u32(0x0c0a0a02),
	},
    };

    fib_table_entry_update_one_path(fib_index,
				    &pfx_12_10_10_2_s_32,
				    FIB_SOURCE_ADJ,
				    FIB_ENTRY_FLAG_ATTACHED,
				    FIB_PROTOCOL_IP4,
				    &pfx_12_10_10_2_s_32.fp_addr,
				    tm->hw[0]->sw_if_index,
				    ~0, // invalid fib index
				    1,
				    NULL,
				    FIB_ROUTE_PATH_FLAG_NONE);

    fei = fib_table_lookup_exact_match(fib_index, &pfx_12_10_10_2_s_32);
    dpo = fib_entry_contribute_ip_forwarding(fei);
    FIB_TEST(!dpo_id_is_valid(dpo),
	     "no connected cover adj-fib fails refinement");

    fib_table_entry_delete(fib_index,
			   &pfx_12_10_10_2_s_32,
			   FIB_SOURCE_ADJ);

    /*
     * An adj-fib that fails the refinement criteria - cover is connected
     * but on a different interface
     */
    fib_prefix_t pfx_10_10_10_127_s_32 = {
	.fp_len = 32,
	.fp_proto = FIB_PROTOCOL_IP4,
	.fp_addr = {
	    /* 10.10.10.127 */
	    .ip4.as_u32 = clib_host_to_net_u32(0x0a0a0a7f),
	},
    };

    fib_table_entry_update_one_path(fib_index,
				    &pfx_10_10_10_127_s_32,
				    FIB_SOURCE_ADJ,
				    FIB_ENTRY_FLAG_ATTACHED,
				    FIB_PROTOCOL_IP4,
				    &pfx_10_10_10_127_s_32.fp_addr,
				    tm->hw[1]->sw_if_index,
				    ~0, // invalid fib index
				    1,
				    NULL,
				    FIB_ROUTE_PATH_FLAG_NONE);

    fei = fib_table_lookup_exact_match(fib_index, &pfx_10_10_10_127_s_32);
    dpo = fib_entry_contribute_ip_forwarding(fei);
    FIB_TEST(!dpo_id_is_valid(dpo),
	     "wrong interface adj-fib fails refinement");

    fib_table_entry_delete(fib_index,
			   &pfx_10_10_10_127_s_32,
			   FIB_SOURCE_ADJ);

    /*
     * CLEANUP
     *    remove adj-fibs: 
     */
    fib_table_entry_delete(fib_index,
			   &pfx_10_10_10_1_s_32,
			   FIB_SOURCE_ADJ);
    fib_table_entry_delete(fib_index,
			   &pfx_10_10_10_2_s_32,
			   FIB_SOURCE_ADJ);
    FIB_TEST(FIB_NODE_INDEX_INVALID ==
	     fib_table_lookup_exact_match(fib_index, &pfx_10_10_10_1_s_32),
	     "10.10.10.1/32 adj-fib removed");
    FIB_TEST(FIB_NODE_INDEX_INVALID ==
	     fib_table_lookup_exact_match(fib_index, &pfx_10_10_10_2_s_32),
	     "10.10.10.2/32 adj-fib removed");

    /*
     * -2 entries and -2 non-shared path-list
     */
    FIB_TEST((0  == fib_path_list_db_size()),   "path list DB population:%d",
    	     fib_path_list_db_size());
    FIB_TEST((PNBR+2 == fib_path_list_pool_size()), "path list pool size is %d",
    	     fib_path_list_pool_size());
    FIB_TEST((ENBR+2 == fib_entry_pool_size()), "entry pool size is %d",
    	     fib_entry_pool_size());

    /*
     * unlock the adjacencies for which this test provided a rewrite.
     * These are the last locks on these adjs. they should thus go away.
     */
    adj_unlock(ai_02);
    adj_unlock(ai_01);
    adj_unlock(ai_12_12_12_12);

    FIB_TEST((0 == adj_nbr_db_size()), "ADJ DB size is %d",
	     adj_nbr_db_size());

    /*
     * CLEANUP
     *   remove the interface prefixes
     */
    local_pfx.fp_len = 32;
    fib_table_entry_special_remove(fib_index, &local_pfx,
				   FIB_SOURCE_INTERFACE);
    fei = fib_table_lookup(fib_index, &local_pfx);

    FIB_TEST(FIB_NODE_INDEX_INVALID ==
	     fib_table_lookup_exact_match(fib_index, &local_pfx),
	     "10.10.10.10/32 adj-fib removed");

    local_pfx.fp_len = 24;
    fib_table_entry_delete(fib_index, &local_pfx,
			   FIB_SOURCE_INTERFACE);

    FIB_TEST(FIB_NODE_INDEX_INVALID ==
	     fib_table_lookup_exact_match(fib_index, &local_pfx),
	     "10.10.10.10/24 adj-fib removed");

    /*
     * -2 entries and -2 non-shared path-list
     */
    FIB_TEST((0  == fib_path_list_db_size()),   "path list DB population:%d",
    	     fib_path_list_db_size());
    FIB_TEST((PNBR == fib_path_list_pool_size()), "path list pool size is %d",
    	     fib_path_list_pool_size());
    FIB_TEST((ENBR == fib_entry_pool_size()), "entry pool size is %d",
    	     fib_entry_pool_size());

    /*
     * Last but not least, remove the VRF
     */
    FIB_TEST((0 == fib_table_get_num_entries(fib_index,
                                             FIB_PROTOCOL_IP4,
                                             FIB_SOURCE_API)),
             "NO API Source'd prefixes");
    FIB_TEST((0 == fib_table_get_num_entries(fib_index,
                                             FIB_PROTOCOL_IP4,
                                             FIB_SOURCE_RR)),
             "NO RR Source'd prefixes");
    FIB_TEST((0 == fib_table_get_num_entries(fib_index,
                                             FIB_PROTOCOL_IP4,
                                             FIB_SOURCE_INTERFACE)),
             "NO INterface Source'd prefixes");

    fib_table_unlock(fib_index, FIB_PROTOCOL_IP4);

    FIB_TEST((0  == fib_path_list_db_size()), "path list DB population:%d",
    	     fib_path_list_db_size());
    FIB_TEST((PNBR-5 == fib_path_list_pool_size()), "path list pool size is %d",
    	     fib_path_list_pool_size());
    FIB_TEST((ENBR-5 == fib_entry_pool_size()), "entry pool size is %d",
    	     fib_entry_pool_size());
    FIB_TEST((ENBR-5 == pool_elts(fib_urpf_list_pool)), "uRPF pool size is %d",
    	     pool_elts(fib_urpf_list_pool));

    return 0;
}

static int
fib_test_v6 (void)
{
    /*
     * In the default table check for the presence and correct forwarding
     * of the special entries
     */
    fib_node_index_t dfrt, fei, ai, locked_ai, ai_01, ai_02;
    const dpo_id_t *dpo, *dpo_drop;
    const ip_adjacency_t *adj;
    const receive_dpo_t *rd;
    test_main_t *tm;
    u32 fib_index;
    int ii;

    FIB_TEST((0 == adj_nbr_db_size()), "ADJ DB size is %d",
	     adj_nbr_db_size());

    /* via 2001:0:0:1::2 */
    ip46_address_t nh_2001_2 = {
	.ip6 = {
	    .as_u64 = {
		[0] = clib_host_to_net_u64(0x2001000000000001),
		[1] = clib_host_to_net_u64(0x0000000000000002),
	    },
	},
    };

    tm = &test_main;

    dpo_drop = drop_dpo_get(DPO_PROTO_IP6);

    /* Find or create FIB table 11 */
    fib_index = fib_table_find_or_create_and_lock(FIB_PROTOCOL_IP6, 11);

    for (ii = 0; ii < 4; ii++)
    {
	ip6_main.fib_index_by_sw_if_index[tm->hw[ii]->sw_if_index] = fib_index;
    }

    fib_prefix_t pfx_0_0 = {
	.fp_len = 0,
	.fp_proto = FIB_PROTOCOL_IP6,
	.fp_addr = {
	    .ip6 = {
		{0, 0},
	    },
	},
    };

    dfrt = fib_table_lookup(fib_index, &pfx_0_0);
    FIB_TEST((FIB_NODE_INDEX_INVALID != dfrt), "default route present");
    FIB_TEST(load_balance_is_drop(fib_entry_contribute_ip_forwarding(dfrt)),
	     "Default route is DROP");

    dpo = fib_entry_contribute_ip_forwarding(dfrt);
    FIB_TEST((dpo->dpoi_index == ip6_fib_table_fwding_lookup(
		                     &ip6_main,
				     1,
				     &pfx_0_0.fp_addr.ip6)),
	     "default-route; fwd and non-fwd tables match");

    // FIXME - check specials.

    /*
     * At this stage there is one v4 FIB with 5 routes and two v6 FIBs
     * each with 2 entries and a v6 mfib with 4 path-lists.
     * All entries are special so no path-list sharing.
     */
#define ENPS (5+4)
#define PNPS (5+4+4)
    FIB_TEST((0 == fib_path_list_db_size()),   "path list DB is empty");
    FIB_TEST((PNPS == fib_path_list_pool_size()), "path list pool size is %d",
	     fib_path_list_pool_size());
    FIB_TEST((ENPS == fib_entry_pool_size()), "entry pool size is %d",
	     fib_entry_pool_size());

    /*
     * add interface routes.
     *  validate presence of /64 attached and /128 recieve.
     *  test for the presence of the receive address in the glean and local adj
     *
     * receive on 2001:0:0:1::1/128
     */
    fib_prefix_t local_pfx = {
	.fp_len = 64,
	.fp_proto = FIB_PROTOCOL_IP6,
	.fp_addr = {
	    .ip6 = {
		.as_u64 = {
		    [0] = clib_host_to_net_u64(0x2001000000000001),
		    [1] = clib_host_to_net_u64(0x0000000000000001),
		},
	    },
	}
    };

    fib_table_entry_update_one_path(fib_index, &local_pfx,
				    FIB_SOURCE_INTERFACE,
				    (FIB_ENTRY_FLAG_CONNECTED |
				     FIB_ENTRY_FLAG_ATTACHED),
				    FIB_PROTOCOL_IP6,
				    NULL,
				    tm->hw[0]->sw_if_index,
				    ~0,
				    1,
				    NULL,
				    FIB_ROUTE_PATH_FLAG_NONE);
    fei = fib_table_lookup_exact_match(fib_index, &local_pfx);

    FIB_TEST((FIB_NODE_INDEX_INVALID != fei), "attached interface route present");

    ai = fib_entry_get_adj(fei);
    FIB_TEST((FIB_NODE_INDEX_INVALID != ai), "attached interface route adj present");
    adj = adj_get(ai);
    FIB_TEST((IP_LOOKUP_NEXT_GLEAN == adj->lookup_next_index),
	     "attached interface adj is glean");
    FIB_TEST((0 == ip46_address_cmp(&local_pfx.fp_addr,
				    &adj->sub_type.glean.receive_addr)),
	      "attached interface adj is receive ok");
    dpo = fib_entry_contribute_ip_forwarding(fei);
    FIB_TEST((dpo->dpoi_index == ip6_fib_table_fwding_lookup(
		                     &ip6_main,
				     1,
				     &local_pfx.fp_addr.ip6)),
	     "attached-route; fwd and non-fwd tables match");

    local_pfx.fp_len = 128;
    fib_table_entry_update_one_path(fib_index, &local_pfx,
				    FIB_SOURCE_INTERFACE,
				    (FIB_ENTRY_FLAG_CONNECTED |
				     FIB_ENTRY_FLAG_LOCAL),
				    FIB_PROTOCOL_IP6,
				    NULL,
				    tm->hw[0]->sw_if_index,
				    ~0, // invalid fib index
				    1,
				    NULL,
				    FIB_ROUTE_PATH_FLAG_NONE);
    fei = fib_table_lookup(fib_index, &local_pfx);

    FIB_TEST((FIB_NODE_INDEX_INVALID != fei), "local interface route present");

    dpo = fib_entry_contribute_ip_forwarding(fei);
    dpo = load_balance_get_bucket(dpo->dpoi_index, 0);
    FIB_TEST((DPO_RECEIVE == dpo->dpoi_type),
	     "local interface adj is local");
    rd = receive_dpo_get(dpo->dpoi_index);

    FIB_TEST((0 == ip46_address_cmp(&local_pfx.fp_addr,
				    &rd->rd_addr)),
	      "local interface adj is receive ok");

    dpo = fib_entry_contribute_ip_forwarding(fei);
    FIB_TEST((dpo->dpoi_index == ip6_fib_table_fwding_lookup(
		                     &ip6_main,
				     1,
				     &local_pfx.fp_addr.ip6)),
	     "local-route; fwd and non-fwd tables match");

    /*
     * +2 entries. +2 unshared path-lists
     */
    FIB_TEST((0 == fib_path_list_db_size()),   "path list DB is empty");
    FIB_TEST((PNPS+2 == fib_path_list_pool_size()), "path list pool size is%d",
	     fib_path_list_pool_size());
    FIB_TEST((ENPS+2 == fib_entry_pool_size()), "entry pool size is %d",
	     fib_entry_pool_size());

    /*
     * Modify the default route to be via an adj not yet known.
     * this sources the defalut route with the API source, which is
     * a higher preference to the DEFAULT_ROUTE source
     */
    fib_table_entry_path_add(fib_index, &pfx_0_0,
			     FIB_SOURCE_API,
			     FIB_ENTRY_FLAG_NONE,
			     FIB_PROTOCOL_IP6,
			     &nh_2001_2,
			     tm->hw[0]->sw_if_index,
			     ~0,
			     1,
			     NULL,
			     FIB_ROUTE_PATH_FLAG_NONE);
    fei = fib_table_lookup(fib_index, &pfx_0_0);

    FIB_TEST((fei == dfrt), "default route same index");
    ai = fib_entry_get_adj(fei);
    FIB_TEST((FIB_NODE_INDEX_INVALID != ai), "default route adj present");
    adj = adj_get(ai);
    FIB_TEST((IP_LOOKUP_NEXT_ARP == adj->lookup_next_index),
	     "adj is incomplete");
    FIB_TEST((0 == ip46_address_cmp(&nh_2001_2, &adj->sub_type.nbr.next_hop)),
	      "adj nbr next-hop ok");

    /*
     * find the adj in the shared db
     */
    locked_ai = adj_nbr_add_or_lock(FIB_PROTOCOL_IP6,
				    VNET_LINK_IP6,
				    &nh_2001_2,
				    tm->hw[0]->sw_if_index);
    FIB_TEST((locked_ai == ai), "ADJ NBR DB find");
    adj_unlock(locked_ai);

    /*
     * no more entires. +1 shared path-list
     */
    FIB_TEST((1 == fib_path_list_db_size()),   "path list DB population:%d",
	     fib_path_list_db_size());
    FIB_TEST((PNPS+3 == fib_path_list_pool_size()), "path list pool size is%d",
	     fib_path_list_pool_size());
    FIB_TEST((ENPS+2 == fib_entry_pool_size()), "entry pool size is %d",
	     fib_entry_pool_size());

    /*
     * remove the API source from the default route. We expected
     * the route to remain, sourced by DEFAULT_ROUTE, and hence a DROP
     */
    fib_table_entry_path_remove(fib_index, &pfx_0_0,
				FIB_SOURCE_API,	
				FIB_PROTOCOL_IP6,
				&nh_2001_2,
				tm->hw[0]->sw_if_index,
				~0,
				1,
				FIB_ROUTE_PATH_FLAG_NONE);
    fei = fib_table_lookup(fib_index, &pfx_0_0);

    FIB_TEST((fei == dfrt), "default route same index");
    FIB_TEST(load_balance_is_drop(fib_entry_contribute_ip_forwarding(dfrt)),
	     "Default route is DROP");

    /*
     * no more entires. -1 shared path-list
     */
    FIB_TEST((0 == fib_path_list_db_size()),   "path list DB population:%d",
	     fib_path_list_db_size());
    FIB_TEST((PNPS+2 == fib_path_list_pool_size()), "path list pool size is%d",
	     fib_path_list_pool_size());
    FIB_TEST((ENPS+2 == fib_entry_pool_size()), "entry pool size is %d",
	     fib_entry_pool_size());

    /*
     * Add an 2 ARP entry => a complete ADJ plus adj-fib.
     */
    fib_prefix_t pfx_2001_1_2_s_128 = {
	.fp_len   = 128,
	.fp_proto = FIB_PROTOCOL_IP6,
	.fp_addr  = {
	    .ip6 = {
		.as_u64 = {
		    [0] = clib_host_to_net_u64(0x2001000000000001),
		    [1] = clib_host_to_net_u64(0x0000000000000002),
		},
	    },
	}
    };
    fib_prefix_t pfx_2001_1_3_s_128 = {
	.fp_len   = 128,
	.fp_proto = FIB_PROTOCOL_IP6,
	.fp_addr  = {
	    .ip6 = {
		.as_u64 = {
		    [0] = clib_host_to_net_u64(0x2001000000000001),
		    [1] = clib_host_to_net_u64(0x0000000000000003),
		},
	    },
	}
    };
    u8 eth_addr[] = {
	0xde, 0xde, 0xde, 0xba, 0xba, 0xba,
    };

    ai_01 = adj_nbr_add_or_lock(FIB_PROTOCOL_IP6,
				VNET_LINK_IP6,
				&pfx_2001_1_2_s_128.fp_addr,
				tm->hw[0]->sw_if_index);
    FIB_TEST((FIB_NODE_INDEX_INVALID != ai_01), "adj created");
    adj = adj_get(ai_01);
    FIB_TEST((IP_LOOKUP_NEXT_ARP == adj->lookup_next_index),
	     "adj is incomplete");
    FIB_TEST((0 == ip46_address_cmp(&pfx_2001_1_2_s_128.fp_addr,
				    &adj->sub_type.nbr.next_hop)),
	      "adj nbr next-hop ok");

    adj_nbr_update_rewrite(ai_01, ADJ_NBR_REWRITE_FLAG_COMPLETE,
			   fib_test_build_rewrite(eth_addr));
    FIB_TEST((IP_LOOKUP_NEXT_REWRITE == adj->lookup_next_index),
	     "adj is complete");
    FIB_TEST((0 == ip46_address_cmp(&pfx_2001_1_2_s_128.fp_addr,
				    &adj->sub_type.nbr.next_hop)),
	      "adj nbr next-hop ok");

    fib_table_entry_update_one_path(fib_index,
				    &pfx_2001_1_2_s_128,
				    FIB_SOURCE_ADJ,
				    FIB_ENTRY_FLAG_ATTACHED,
				    FIB_PROTOCOL_IP6,
				    &pfx_2001_1_2_s_128.fp_addr,
				    tm->hw[0]->sw_if_index,
				    ~0,
				    1,
				    NULL,
				    FIB_ROUTE_PATH_FLAG_NONE);

    fei = fib_table_lookup(fib_index, &pfx_2001_1_2_s_128);
    ai = fib_entry_get_adj(fei);
    FIB_TEST((ai_01 == ai), "ADJ-FIB resolves via adj");

    eth_addr[5] = 0xb2;

    ai_02 = adj_nbr_add_or_lock(FIB_PROTOCOL_IP6,
				VNET_LINK_IP6,
				&pfx_2001_1_3_s_128.fp_addr,
				tm->hw[0]->sw_if_index);
    FIB_TEST((FIB_NODE_INDEX_INVALID != ai_02), "adj created");
    adj = adj_get(ai_02);
    FIB_TEST((IP_LOOKUP_NEXT_ARP == adj->lookup_next_index),
	     "adj is incomplete");
    FIB_TEST((0 == ip46_address_cmp(&pfx_2001_1_3_s_128.fp_addr,
				    &adj->sub_type.nbr.next_hop)),
	      "adj nbr next-hop ok");

    adj_nbr_update_rewrite(ai_02, ADJ_NBR_REWRITE_FLAG_COMPLETE,
			   fib_test_build_rewrite(eth_addr));
    FIB_TEST((IP_LOOKUP_NEXT_REWRITE == adj->lookup_next_index),
	     "adj is complete");
    FIB_TEST((0 == ip46_address_cmp(&pfx_2001_1_3_s_128.fp_addr,
				    &adj->sub_type.nbr.next_hop)),
	      "adj nbr next-hop ok");
    FIB_TEST((ai_01 != ai_02), "ADJs are different");

    fib_table_entry_update_one_path(fib_index,
				    &pfx_2001_1_3_s_128,
				    FIB_SOURCE_ADJ,
				    FIB_ENTRY_FLAG_ATTACHED,
				    FIB_PROTOCOL_IP6,
				    &pfx_2001_1_3_s_128.fp_addr,
				    tm->hw[0]->sw_if_index,
				    ~0,
				    1,
				    NULL,
				    FIB_ROUTE_PATH_FLAG_NONE);

    fei = fib_table_lookup(fib_index, &pfx_2001_1_3_s_128);
    ai = fib_entry_get_adj(fei);
    FIB_TEST((ai_02 == ai), "ADJ-FIB resolves via adj");

    /*
     * +2 entries, +2 unshread path-lists.
     */
    FIB_TEST((0 == fib_path_list_db_size()),   "path list DB population:%d",
	     fib_path_list_db_size());
    FIB_TEST((PNPS+4 == fib_path_list_pool_size()), "path list pool size is%d",
	     fib_path_list_pool_size());
    FIB_TEST((ENPS+4 == fib_entry_pool_size()), "entry pool size is %d",
	     fib_entry_pool_size());

    /*
     * Add a 2 routes via the first ADJ. ensure path-list sharing
     */
    fib_prefix_t pfx_2001_a_s_64 = {
	.fp_len   = 64,
	.fp_proto = FIB_PROTOCOL_IP6,
	.fp_addr  = {
	    .ip6 = {
		.as_u64 = {
		    [0] = clib_host_to_net_u64(0x200100000000000a),
		    [1] = clib_host_to_net_u64(0x0000000000000000),
		},
	    },
	}
    };
    fib_prefix_t pfx_2001_b_s_64 = {
	.fp_len   = 64,
	.fp_proto = FIB_PROTOCOL_IP6,
	.fp_addr  = {
	    .ip6 = {
		.as_u64 = {
		    [0] = clib_host_to_net_u64(0x200100000000000b),
		    [1] = clib_host_to_net_u64(0x0000000000000000),
		},
	    },
	}
    };

    fib_table_entry_path_add(fib_index,
			     &pfx_2001_a_s_64,
			     FIB_SOURCE_API,
			     FIB_ENTRY_FLAG_NONE,
			     FIB_PROTOCOL_IP6,
			     &nh_2001_2,
			     tm->hw[0]->sw_if_index,
			     ~0,
			     1,
			     NULL,
			     FIB_ROUTE_PATH_FLAG_NONE);
    fei = fib_table_lookup(fib_index, &pfx_2001_a_s_64);
    ai = fib_entry_get_adj(fei);
    FIB_TEST((ai_01 == ai), "2001::a/64 resolves via 2001:0:0:1::1");
    fib_table_entry_path_add(fib_index,
			     &pfx_2001_b_s_64,
			     FIB_SOURCE_API,
			     FIB_ENTRY_FLAG_NONE,
			     FIB_PROTOCOL_IP6,
			     &nh_2001_2,
			     tm->hw[0]->sw_if_index,
			     ~0,
			     1,
			     NULL,
			     FIB_ROUTE_PATH_FLAG_NONE);
    fei = fib_table_lookup(fib_index, &pfx_2001_b_s_64);
    ai = fib_entry_get_adj(fei);
    FIB_TEST((ai_01 == ai), "2001::b/64 resolves via 2001:0:0:1::1");

    /*
     * +2 entries, +1 shared path-list.
     */
    FIB_TEST((1 == fib_path_list_db_size()),   "path list DB population:%d",
	     fib_path_list_db_size());
    FIB_TEST((PNPS+5 == fib_path_list_pool_size()), "path list pool size is%d",
	     fib_path_list_pool_size());
    FIB_TEST((ENPS+6 == fib_entry_pool_size()), "entry pool size is %d",
	     fib_entry_pool_size());

    /*
     * add a v4 prefix via a v6 next-hop
     */
    fib_prefix_t pfx_1_1_1_1_s_32 = {
	.fp_len = 32,
	.fp_proto = FIB_PROTOCOL_IP4,
	.fp_addr = {
	    .ip4.as_u32 = 0x01010101,
	},
    };
    fei = fib_table_entry_path_add(0, // default table
				   &pfx_1_1_1_1_s_32,
				   FIB_SOURCE_API,
				   FIB_ENTRY_FLAG_NONE,
				   FIB_PROTOCOL_IP6,
				   &nh_2001_2,
				   tm->hw[0]->sw_if_index,
				   ~0,
				   1,
				   NULL,
				   FIB_ROUTE_PATH_FLAG_NONE);
    FIB_TEST(fei == fib_table_lookup_exact_match(0, &pfx_1_1_1_1_s_32),
	     "1.1.1.1/32 o v6 route present");
    ai = fib_entry_get_adj(fei);
    adj = adj_get(ai);
    FIB_TEST((adj->lookup_next_index == IP_LOOKUP_NEXT_ARP),
	     "1.1.1.1/32 via ARP-adj");
    FIB_TEST((adj->ia_link == VNET_LINK_IP4),
	     "1.1.1.1/32 ADJ-adj is link type v4");
    FIB_TEST((adj->ia_nh_proto == FIB_PROTOCOL_IP6),
	     "1.1.1.1/32 ADJ-adj is NH proto v6");
    fib_table_entry_delete(0, &pfx_1_1_1_1_s_32, FIB_SOURCE_API);

    /*
     * An attached route
     */
    fib_prefix_t pfx_2001_c_s_64 = {
	.fp_len   = 64,
	.fp_proto = FIB_PROTOCOL_IP6,
	.fp_addr  = {
	    .ip6 = {
		.as_u64 = {
		    [0] = clib_host_to_net_u64(0x200100000000000c),
		    [1] = clib_host_to_net_u64(0x0000000000000000),
		},
	    },
	}
    };
    fib_table_entry_path_add(fib_index,
			     &pfx_2001_c_s_64,
			     FIB_SOURCE_CLI,
			     FIB_ENTRY_FLAG_ATTACHED,
			     FIB_PROTOCOL_IP6,
			     NULL,
			     tm->hw[0]->sw_if_index,
			     ~0,
			     1,
			     NULL,
			     FIB_ROUTE_PATH_FLAG_NONE);
    fei = fib_table_lookup_exact_match(fib_index, &pfx_2001_c_s_64);
    FIB_TEST((FIB_NODE_INDEX_INVALID != fei), "attached route present");
    ai = fib_entry_get_adj(fei);
    adj = adj_get(ai);
    FIB_TEST((adj->lookup_next_index == IP_LOOKUP_NEXT_GLEAN),
	     "2001:0:0:c/64 attached resolves via glean");

    fib_table_entry_path_remove(fib_index,
				&pfx_2001_c_s_64,
				FIB_SOURCE_CLI,
				FIB_PROTOCOL_IP6,
				NULL,
				tm->hw[0]->sw_if_index,
				~0,
				1,
				FIB_ROUTE_PATH_FLAG_NONE);
    fei = fib_table_lookup_exact_match(fib_index, &pfx_2001_c_s_64);
    FIB_TEST((FIB_NODE_INDEX_INVALID == fei), "attached route removed");

    /*
     * Shutdown the interface on which we have a connected and through
     * which the routes are reachable.
     * This will result in the connected, adj-fibs, and routes linking to drop
     * The local/for-us prefix continues to receive.
     */
    clib_error_t * error;

    error = vnet_sw_interface_set_flags(vnet_get_main(),
					tm->hw[0]->sw_if_index,
					~VNET_SW_INTERFACE_FLAG_ADMIN_UP);
    FIB_TEST((NULL == error), "Interface shutdown OK");

    fei = fib_table_lookup_exact_match(fib_index, &pfx_2001_b_s_64);
    dpo = fib_entry_contribute_ip_forwarding(fei);
    FIB_TEST(!dpo_cmp(dpo_drop, load_balance_get_bucket(dpo->dpoi_index, 0)),
	     "2001::b/64 resolves via drop");

    fei = fib_table_lookup_exact_match(fib_index, &pfx_2001_a_s_64);
    dpo = fib_entry_contribute_ip_forwarding(fei);
    FIB_TEST(!dpo_cmp(dpo_drop, load_balance_get_bucket(dpo->dpoi_index, 0)),
	     "2001::a/64 resolves via drop");
    fei = fib_table_lookup_exact_match(fib_index, &pfx_2001_1_3_s_128);
    dpo = fib_entry_contribute_ip_forwarding(fei);
    FIB_TEST(!dpo_cmp(dpo_drop, load_balance_get_bucket(dpo->dpoi_index, 0)),
	     "2001:0:0:1::3/64 resolves via drop");
    fei = fib_table_lookup_exact_match(fib_index, &pfx_2001_1_2_s_128);
    dpo = fib_entry_contribute_ip_forwarding(fei);
    FIB_TEST(!dpo_cmp(dpo_drop, load_balance_get_bucket(dpo->dpoi_index, 0)),
	     "2001:0:0:1::2/64 resolves via drop");
    fei = fib_table_lookup_exact_match(fib_index, &local_pfx);
    dpo = fib_entry_contribute_ip_forwarding(fei);
    FIB_TEST(dpo_cmp(dpo_drop, load_balance_get_bucket(dpo->dpoi_index, 0)),
	     "2001:0:0:1::1/128 not drop");
    local_pfx.fp_len = 64;
    fei = fib_table_lookup_exact_match(fib_index, &local_pfx);
    dpo = fib_entry_contribute_ip_forwarding(fei);
    FIB_TEST(!dpo_cmp(dpo_drop, load_balance_get_bucket(dpo->dpoi_index, 0)),
	     "2001:0:0:1/64 resolves via drop");

    /*
     * no change
     */
    FIB_TEST((1 == fib_path_list_db_size()),   "path list DB population:%d",
	     fib_path_list_db_size());
    FIB_TEST((PNPS+5 == fib_path_list_pool_size()), "path list pool size is%d",
	     fib_path_list_pool_size());
    FIB_TEST((ENPS+6 == fib_entry_pool_size()), "entry pool size is %d",
	     fib_entry_pool_size());

    /*
     * shutdown one of the other interfaces, then add a connected.
     * and swap one of the routes to it.
     */
    error = vnet_sw_interface_set_flags(vnet_get_main(),
					tm->hw[1]->sw_if_index,
					~VNET_SW_INTERFACE_FLAG_ADMIN_UP);
    FIB_TEST((NULL == error), "Interface 1 shutdown OK");

    fib_prefix_t connected_pfx = {
	.fp_len = 64,
	.fp_proto = FIB_PROTOCOL_IP6,
	.fp_addr = {
	    .ip6 = {
		/* 2001:0:0:2::1/64 */
		.as_u64 = {
		    [0] = clib_host_to_net_u64(0x2001000000000002),
		    [1] = clib_host_to_net_u64(0x0000000000000001),
		},
	    },
	}
    };
    fib_table_entry_update_one_path(fib_index, &connected_pfx,
				    FIB_SOURCE_INTERFACE,
				    (FIB_ENTRY_FLAG_CONNECTED |
				     FIB_ENTRY_FLAG_ATTACHED),
				    FIB_PROTOCOL_IP6,
				    NULL,
				    tm->hw[1]->sw_if_index,
				    ~0,
				    1,
				    NULL,
				    FIB_ROUTE_PATH_FLAG_NONE);
    fei = fib_table_lookup_exact_match(fib_index, &connected_pfx);
    FIB_TEST((FIB_NODE_INDEX_INVALID != fei), "attached interface route present");
    dpo = fib_entry_contribute_ip_forwarding(fei);
    dpo = load_balance_get_bucket(dpo->dpoi_index, 0);
    FIB_TEST(!dpo_cmp(dpo, dpo_drop),
             "2001:0:0:2/64 not resolves via drop");

    connected_pfx.fp_len = 128;
    fib_table_entry_update_one_path(fib_index, &connected_pfx,
				    FIB_SOURCE_INTERFACE,
				    (FIB_ENTRY_FLAG_CONNECTED |
				     FIB_ENTRY_FLAG_LOCAL),
				    FIB_PROTOCOL_IP6,
				    NULL,
				    tm->hw[0]->sw_if_index,
				    ~0, // invalid fib index
				    1,
				    NULL,
				    FIB_ROUTE_PATH_FLAG_NONE);
    fei = fib_table_lookup(fib_index, &connected_pfx);

    FIB_TEST((FIB_NODE_INDEX_INVALID != fei), "local interface route present");
    dpo = fib_entry_contribute_ip_forwarding(fei);
    dpo = load_balance_get_bucket(dpo->dpoi_index, 0);
    FIB_TEST((DPO_RECEIVE == dpo->dpoi_type),
	     "local interface adj is local");
    rd = receive_dpo_get(dpo->dpoi_index);
    FIB_TEST((0 == ip46_address_cmp(&connected_pfx.fp_addr,
				    &rd->rd_addr)),
	      "local interface adj is receive ok");

    /*
     * +2 entries, +2 unshared path-lists
     */
    FIB_TEST((1 == fib_path_list_db_size()),   "path list DB population:%d",
	     fib_path_list_db_size());
    FIB_TEST((PNPS+7 == fib_path_list_pool_size()), "path list pool size is%d",
	     fib_path_list_pool_size());
    FIB_TEST((ENPS+8 == fib_entry_pool_size()), "entry pool size is %d",
	     fib_entry_pool_size());


    /*
     * bring the interface back up. we expected the routes to return
     * to normal forwarding.
     */
    error = vnet_sw_interface_set_flags(vnet_get_main(),
					tm->hw[0]->sw_if_index,
					VNET_SW_INTERFACE_FLAG_ADMIN_UP);
    FIB_TEST((NULL == error), "Interface bring-up OK");
    fei = fib_table_lookup_exact_match(fib_index, &pfx_2001_a_s_64);
    ai = fib_entry_get_adj(fei);
    FIB_TEST((ai_01 == ai), "2001::a/64 resolves via 2001:0:0:1::1");
    fei = fib_table_lookup_exact_match(fib_index, &pfx_2001_b_s_64);
    ai = fib_entry_get_adj(fei);
    FIB_TEST((ai_01 == ai), "2001::b/64 resolves via 2001:0:0:1::1");
    fei = fib_table_lookup_exact_match(fib_index, &pfx_2001_1_3_s_128);
    ai = fib_entry_get_adj(fei);
    FIB_TEST((ai_02 == ai), "ADJ-FIB resolves via adj");
    fei = fib_table_lookup_exact_match(fib_index, &pfx_2001_1_2_s_128);
    ai = fib_entry_get_adj(fei);
    FIB_TEST((ai_01 == ai), "ADJ-FIB resolves via adj");
    local_pfx.fp_len = 64;
    fei = fib_table_lookup_exact_match(fib_index, &local_pfx);
    ai = fib_entry_get_adj(fei);
    adj = adj_get(ai);
    FIB_TEST((IP_LOOKUP_NEXT_GLEAN == adj->lookup_next_index),
	     "attached interface adj is glean");

    /*
     * Same test as above, but this time the HW interface goes down
     */
    error = vnet_hw_interface_set_flags(vnet_get_main(),
					tm->hw_if_indicies[0],
					~VNET_HW_INTERFACE_FLAG_LINK_UP);
    FIB_TEST((NULL == error), "Interface shutdown OK");

    fei = fib_table_lookup_exact_match(fib_index, &pfx_2001_b_s_64);
    dpo = fib_entry_contribute_ip_forwarding(fei);
    FIB_TEST(!dpo_cmp(dpo_drop, load_balance_get_bucket(dpo->dpoi_index, 0)),
	     "2001::b/64 resolves via drop");
    fei = fib_table_lookup_exact_match(fib_index, &pfx_2001_a_s_64);
    dpo = fib_entry_contribute_ip_forwarding(fei);
    FIB_TEST(!dpo_cmp(dpo_drop, load_balance_get_bucket(dpo->dpoi_index, 0)),
	     "2001::a/64 resolves via drop");
    fei = fib_table_lookup_exact_match(fib_index, &pfx_2001_1_3_s_128);
    dpo = fib_entry_contribute_ip_forwarding(fei);
    FIB_TEST(!dpo_cmp(dpo_drop, load_balance_get_bucket(dpo->dpoi_index, 0)),
	     "2001:0:0:1::3/128 resolves via drop");
    fei = fib_table_lookup_exact_match(fib_index, &pfx_2001_1_2_s_128);
    dpo = fib_entry_contribute_ip_forwarding(fei);
    FIB_TEST(!dpo_cmp(dpo_drop, load_balance_get_bucket(dpo->dpoi_index, 0)),
	     "2001:0:0:1::2/128 resolves via drop");
    local_pfx.fp_len = 128;
    fei = fib_table_lookup_exact_match(fib_index, &local_pfx);
    dpo = fib_entry_contribute_ip_forwarding(fei);
    FIB_TEST(dpo_cmp(dpo_drop, load_balance_get_bucket(dpo->dpoi_index, 0)),
	     "2001:0:0:1::1/128 not drop");
    local_pfx.fp_len = 64;
    fei = fib_table_lookup_exact_match(fib_index, &local_pfx);
    dpo = fib_entry_contribute_ip_forwarding(fei);
    FIB_TEST(!dpo_cmp(dpo_drop, load_balance_get_bucket(dpo->dpoi_index, 0)),
	     "2001:0:0:1/64 resolves via drop");

    error = vnet_hw_interface_set_flags(vnet_get_main(),
					tm->hw_if_indicies[0],
					VNET_HW_INTERFACE_FLAG_LINK_UP);
    FIB_TEST((NULL == error), "Interface bring-up OK");
    fei = fib_table_lookup_exact_match(fib_index, &pfx_2001_a_s_64);
    ai = fib_entry_get_adj(fei);
    FIB_TEST((ai_01 == ai), "2001::a/64 resolves via 2001:0:0:1::1");
    fei = fib_table_lookup_exact_match(fib_index, &pfx_2001_b_s_64);
    ai = fib_entry_get_adj(fei);
    FIB_TEST((ai_01 == ai), "2001::b/64 resolves via 2001:0:0:1::1");
    fei = fib_table_lookup_exact_match(fib_index, &pfx_2001_1_3_s_128);
    ai = fib_entry_get_adj(fei);
    FIB_TEST((ai_02 == ai), "ADJ-FIB resolves via adj");
    fei = fib_table_lookup_exact_match(fib_index, &pfx_2001_1_2_s_128);
    ai = fib_entry_get_adj(fei);
    FIB_TEST((ai_01 == ai), "ADJ-FIB resolves via adj");
    local_pfx.fp_len = 64;
    fei = fib_table_lookup_exact_match(fib_index, &local_pfx);
    ai = fib_entry_get_adj(fei);
    adj = adj_get(ai);
    FIB_TEST((IP_LOOKUP_NEXT_GLEAN == adj->lookup_next_index),
	     "attached interface adj is glean");

    /*
     * Delete the interface that the routes reolve through.
     * Again no routes are removed. They all point to drop.
     *
     * This is considered an error case. The control plane should
     * not remove interfaces through which routes resolve, but
     * such things can happen. ALL affected routes will drop.
     */
    vnet_delete_hw_interface(vnet_get_main(), tm->hw_if_indicies[0]);

    fei = fib_table_lookup_exact_match(fib_index, &pfx_2001_b_s_64);
    FIB_TEST(load_balance_is_drop(fib_entry_contribute_ip_forwarding(fei)),
             "2001::b/64 resolves via drop");
    fei = fib_table_lookup_exact_match(fib_index, &pfx_2001_a_s_64);
    FIB_TEST(load_balance_is_drop(fib_entry_contribute_ip_forwarding(fei)),
             "2001::b/64 resolves via drop");
    fei = fib_table_lookup_exact_match(fib_index, &pfx_2001_1_3_s_128);
    FIB_TEST(load_balance_is_drop(fib_entry_contribute_ip_forwarding(fei)),
             "2001:0:0:1::3/64 resolves via drop");
    fei = fib_table_lookup_exact_match(fib_index, &pfx_2001_1_2_s_128);
    FIB_TEST(load_balance_is_drop(fib_entry_contribute_ip_forwarding(fei)),
             "2001:0:0:1::2/64 resolves via drop");
    fei = fib_table_lookup_exact_match(fib_index, &local_pfx);
    FIB_TEST(load_balance_is_drop(fib_entry_contribute_ip_forwarding(fei)),
             "2001:0:0:1::1/128 is drop");
    local_pfx.fp_len = 64;
    fei = fib_table_lookup_exact_match(fib_index, &local_pfx);
    FIB_TEST(load_balance_is_drop(fib_entry_contribute_ip_forwarding(fei)),
             "2001:0:0:1/64 resolves via drop");

    /*
     * no change
     */
    FIB_TEST((1 == fib_path_list_db_size()),   "path list DB population:%d",
	     fib_path_list_db_size());
    FIB_TEST((PNPS+7 == fib_path_list_pool_size()), "path list pool size is%d",
	     fib_path_list_pool_size());
    FIB_TEST((ENPS+8 == fib_entry_pool_size()), "entry pool size is %d",
	     fib_entry_pool_size());

    /*
     * Add the interface back. routes stay unresolved.
     */
    error = ethernet_register_interface(vnet_get_main(),
					test_interface_device_class.index,
					0 /* instance */,
					hw_address,
					&tm->hw_if_indicies[0],
					/* flag change */ 0);

    fei = fib_table_lookup_exact_match(fib_index, &pfx_2001_b_s_64);
    FIB_TEST(load_balance_is_drop(fib_entry_contribute_ip_forwarding(fei)),
             "2001::b/64 resolves via drop");
    fei = fib_table_lookup_exact_match(fib_index, &pfx_2001_a_s_64);
    FIB_TEST(load_balance_is_drop(fib_entry_contribute_ip_forwarding(fei)),
             "2001::b/64 resolves via drop");
    fei = fib_table_lookup_exact_match(fib_index, &pfx_2001_1_3_s_128);
    FIB_TEST(load_balance_is_drop(fib_entry_contribute_ip_forwarding(fei)),
             "2001:0:0:1::3/64 resolves via drop");
    fei = fib_table_lookup_exact_match(fib_index, &pfx_2001_1_2_s_128);
    FIB_TEST(load_balance_is_drop(fib_entry_contribute_ip_forwarding(fei)),
             "2001:0:0:1::2/64 resolves via drop");
    fei = fib_table_lookup_exact_match(fib_index, &local_pfx);
    FIB_TEST(load_balance_is_drop(fib_entry_contribute_ip_forwarding(fei)),
             "2001:0:0:1::1/128 is drop");
    local_pfx.fp_len = 64;
    fei = fib_table_lookup_exact_match(fib_index, &local_pfx);
    FIB_TEST(load_balance_is_drop(fib_entry_contribute_ip_forwarding(fei)),
             "2001:0:0:1/64 resolves via drop");

    /*
     * CLEANUP ALL the routes
     */
    fib_table_entry_delete(fib_index,
			   &pfx_2001_c_s_64,
			   FIB_SOURCE_API);
    fib_table_entry_delete(fib_index,
			   &pfx_2001_a_s_64,
			   FIB_SOURCE_API);
    fib_table_entry_delete(fib_index,
			   &pfx_2001_b_s_64,
			   FIB_SOURCE_API);
    fib_table_entry_delete(fib_index,
			   &pfx_2001_1_3_s_128,
			   FIB_SOURCE_ADJ);
    fib_table_entry_delete(fib_index,
			   &pfx_2001_1_2_s_128,
			   FIB_SOURCE_ADJ);
    local_pfx.fp_len = 64;
    fib_table_entry_delete(fib_index, &local_pfx,
			   FIB_SOURCE_INTERFACE);
    local_pfx.fp_len = 128;
    fib_table_entry_special_remove(fib_index, &local_pfx,
				   FIB_SOURCE_INTERFACE);
    connected_pfx.fp_len = 64;
    fib_table_entry_delete(fib_index, &connected_pfx,
			   FIB_SOURCE_INTERFACE);
    connected_pfx.fp_len = 128;
    fib_table_entry_special_remove(fib_index, &connected_pfx,
				   FIB_SOURCE_INTERFACE);

    FIB_TEST((FIB_NODE_INDEX_INVALID ==
	      fib_table_lookup_exact_match(fib_index, &pfx_2001_a_s_64)),
	     "2001::a/64 removed");
    FIB_TEST((FIB_NODE_INDEX_INVALID ==
	      fib_table_lookup_exact_match(fib_index, &pfx_2001_b_s_64)),
	     "2001::b/64 removed");
    FIB_TEST((FIB_NODE_INDEX_INVALID ==
	      fib_table_lookup_exact_match(fib_index, &pfx_2001_1_3_s_128)),
	     "2001:0:0:1::3/128 removed");
    FIB_TEST((FIB_NODE_INDEX_INVALID ==
	      fib_table_lookup_exact_match(fib_index, &pfx_2001_1_2_s_128)),
	     "2001:0:0:1::3/128 removed");
    local_pfx.fp_len = 64;
    FIB_TEST((FIB_NODE_INDEX_INVALID ==
	      fib_table_lookup_exact_match(fib_index, &local_pfx)),
	     "2001:0:0:1/64 removed");
    local_pfx.fp_len = 128;
    FIB_TEST((FIB_NODE_INDEX_INVALID ==
	      fib_table_lookup_exact_match(fib_index, &local_pfx)),
	     "2001:0:0:1::1/128 removed");
    connected_pfx.fp_len = 64;
    FIB_TEST((FIB_NODE_INDEX_INVALID ==
	      fib_table_lookup_exact_match(fib_index, &connected_pfx)),
	     "2001:0:0:2/64 removed");
    connected_pfx.fp_len = 128;
    FIB_TEST((FIB_NODE_INDEX_INVALID ==
	      fib_table_lookup_exact_match(fib_index, &connected_pfx)),
	     "2001:0:0:2::1/128 removed");

    /*
     * -8 entries. -7 path-lists (1 was shared).
     */
    FIB_TEST((0 == fib_path_list_db_size()),   "path list DB population:%d",
	     fib_path_list_db_size());
    FIB_TEST((PNPS == fib_path_list_pool_size()), "path list pool size is%d",
	     fib_path_list_pool_size());
    FIB_TEST((ENPS == fib_entry_pool_size()), "entry pool size is %d",
	     fib_entry_pool_size());

    /*
     * now remove the VRF
     */
    fib_table_unlock(fib_index, FIB_PROTOCOL_IP6);

    FIB_TEST((0 == fib_path_list_db_size()),   "path list DB population:%d",
	     fib_path_list_db_size());
    FIB_TEST((PNPS-2 == fib_path_list_pool_size()), "path list pool size is%d",
	     fib_path_list_pool_size());
    FIB_TEST((ENPS-2 == fib_entry_pool_size()), "entry pool size is %d",
	     fib_entry_pool_size());

    adj_unlock(ai_02);
    adj_unlock(ai_01);

    /*
     * return the interfaces to up state
     */
    error = vnet_sw_interface_set_flags(vnet_get_main(),
					tm->hw[0]->sw_if_index,
					VNET_SW_INTERFACE_FLAG_ADMIN_UP);
    error = vnet_sw_interface_set_flags(vnet_get_main(),
					tm->hw[1]->sw_if_index,
					VNET_SW_INTERFACE_FLAG_ADMIN_UP);

    FIB_TEST((0 == adj_nbr_db_size()), "ADJ DB size is %d",
	     adj_nbr_db_size());

    return (0);
}

/*
 * Test Attached Exports
 */
static int
fib_test_ae (void)
{
    const dpo_id_t *dpo, *dpo_drop;
    const u32 fib_index = 0;
    fib_node_index_t fei;
    test_main_t *tm;
    ip4_main_t *im;

    tm = &test_main;
    im = &ip4_main;

    FIB_TEST((0 == adj_nbr_db_size()), "ADJ DB size is %d",
	     adj_nbr_db_size());

    /*
     * add interface routes. We'll assume this works. It's more rigorously
     * tested elsewhere.
     */
    fib_prefix_t local_pfx = {
	.fp_len = 24,
	.fp_proto = FIB_PROTOCOL_IP4,
	.fp_addr = {
	    .ip4 = {
		/* 10.10.10.10 */
		.as_u32 = clib_host_to_net_u32(0x0a0a0a0a),
	    },
	},
    };

    vec_validate(im->fib_index_by_sw_if_index, tm->hw[0]->sw_if_index);
    im->fib_index_by_sw_if_index[tm->hw[0]->sw_if_index] = fib_index;

    dpo_drop = drop_dpo_get(DPO_PROTO_IP4);

    fib_table_entry_update_one_path(fib_index, &local_pfx,
				    FIB_SOURCE_INTERFACE,
				    (FIB_ENTRY_FLAG_CONNECTED |
				     FIB_ENTRY_FLAG_ATTACHED),
				    FIB_PROTOCOL_IP4,
				    NULL,
				    tm->hw[0]->sw_if_index,
				    ~0,
				    1,
				    NULL,
				    FIB_ROUTE_PATH_FLAG_NONE);
    fei = fib_table_lookup_exact_match(fib_index, &local_pfx);
    FIB_TEST((FIB_NODE_INDEX_INVALID != fei),
	     "attached interface route present");

    local_pfx.fp_len = 32;
    fib_table_entry_update_one_path(fib_index, &local_pfx,
				    FIB_SOURCE_INTERFACE,
				    (FIB_ENTRY_FLAG_CONNECTED |
				     FIB_ENTRY_FLAG_LOCAL),
				    FIB_PROTOCOL_IP4,
				    NULL,
				    tm->hw[0]->sw_if_index,
				    ~0, // invalid fib index
				    1,
				    NULL,
				    FIB_ROUTE_PATH_FLAG_NONE);
    fei = fib_table_lookup_exact_match(fib_index, &local_pfx);

    FIB_TEST((FIB_NODE_INDEX_INVALID != fei),
	     "local interface route present");

    /*
     * Add an 2 ARP entry => a complete ADJ plus adj-fib.
     */
    fib_prefix_t pfx_10_10_10_1_s_32 = {
	.fp_len = 32,
	.fp_proto = FIB_PROTOCOL_IP4,
	.fp_addr = {
	    /* 10.10.10.1 */
	    .ip4.as_u32 = clib_host_to_net_u32(0x0a0a0a01),
	},
    };
    fib_node_index_t ai;

    fib_table_entry_update_one_path(fib_index,
				    &pfx_10_10_10_1_s_32,
				    FIB_SOURCE_ADJ,
				    FIB_ENTRY_FLAG_ATTACHED,
				    FIB_PROTOCOL_IP4,
				    &pfx_10_10_10_1_s_32.fp_addr,
				    tm->hw[0]->sw_if_index,
				    ~0, // invalid fib index
				    1,
				    NULL,
				    FIB_ROUTE_PATH_FLAG_NONE);

    fei = fib_table_lookup(fib_index, &pfx_10_10_10_1_s_32);
    FIB_TEST((FIB_NODE_INDEX_INVALID != fei), "ADJ-fib1 created");
    ai = fib_entry_get_adj(fei);

    /*
     * create another FIB table into which routes will be imported
     */
    u32 import_fib_index1;

    import_fib_index1 = fib_table_find_or_create_and_lock(FIB_PROTOCOL_IP4, 11);

    /*
     * Add an attached route in the import FIB
     */
    local_pfx.fp_len = 24;
    fib_table_entry_update_one_path(import_fib_index1,
				    &local_pfx,
				    FIB_SOURCE_API,
				    FIB_ENTRY_FLAG_NONE,
				    FIB_PROTOCOL_IP4,
				    NULL,
				    tm->hw[0]->sw_if_index,
				    ~0, // invalid fib index
				    1,
				    NULL,
				    FIB_ROUTE_PATH_FLAG_NONE);
    fei = fib_table_lookup_exact_match(import_fib_index1, &local_pfx);
    FIB_TEST((FIB_NODE_INDEX_INVALID != fei), "attached export created");

    /*
     * check for the presence of the adj-fibs in the import table
     */
    fei = fib_table_lookup_exact_match(import_fib_index1, &pfx_10_10_10_1_s_32);
    FIB_TEST((FIB_NODE_INDEX_INVALID != fei), "ADJ-fib1 imported");
    FIB_TEST((ai == fib_entry_get_adj(fei)),
	     "adj-fib1 Import uses same adj as export");

    /*
     * check for the presence of the local in the import table
     */
    local_pfx.fp_len = 32;
    fei = fib_table_lookup_exact_match(import_fib_index1, &local_pfx);
    FIB_TEST((FIB_NODE_INDEX_INVALID != fei), "local imported");

    /*
     * Add another adj-fin in the export table. Expect this
     * to get magically exported;
     */
    fib_prefix_t pfx_10_10_10_2_s_32 = {
    	.fp_len = 32,
    	.fp_proto = FIB_PROTOCOL_IP4,
    	.fp_addr = {
    	    /* 10.10.10.2 */
    	    .ip4.as_u32 = clib_host_to_net_u32(0x0a0a0a02),
    	},
    };

    fib_table_entry_update_one_path(fib_index,
				    &pfx_10_10_10_2_s_32,
				    FIB_SOURCE_ADJ,
				    FIB_ENTRY_FLAG_ATTACHED,
				    FIB_PROTOCOL_IP4,
				    &pfx_10_10_10_2_s_32.fp_addr,
				    tm->hw[0]->sw_if_index,
				    ~0, // invalid fib index
				    1,
				    NULL,
				    FIB_ROUTE_PATH_FLAG_NONE);
    fei = fib_table_lookup_exact_match(fib_index, &pfx_10_10_10_2_s_32);
    FIB_TEST((FIB_NODE_INDEX_INVALID != fei), "ADJ-fib2 present");
    ai = fib_entry_get_adj(fei);

    fei = fib_table_lookup_exact_match(import_fib_index1, &pfx_10_10_10_2_s_32);
    FIB_TEST((FIB_NODE_INDEX_INVALID != fei), "ADJ-fib2 imported");
    FIB_TEST((ai == fib_entry_get_adj(fei)),
	     "Import uses same adj as export");
    FIB_TEST((FIB_ENTRY_FLAG_ATTACHED & fib_entry_get_flags(fei)),
             "ADJ-fib2 imported flags %d",
             fib_entry_get_flags(fei));

    /*
     * create a 2nd FIB table into which routes will be imported
     */
    u32 import_fib_index2;

    import_fib_index2 = fib_table_find_or_create_and_lock(FIB_PROTOCOL_IP4, 12);

    /*
     * Add an attached route in the import FIB
     */
    local_pfx.fp_len = 24;
    fib_table_entry_update_one_path(import_fib_index2,
				    &local_pfx,
				    FIB_SOURCE_API,
				    FIB_ENTRY_FLAG_NONE,
				    FIB_PROTOCOL_IP4,
				    NULL,
				    tm->hw[0]->sw_if_index,
				    ~0, // invalid fib index
				    1,
				    NULL,
				    FIB_ROUTE_PATH_FLAG_NONE);
    fei = fib_table_lookup_exact_match(import_fib_index1, &local_pfx);
    FIB_TEST((FIB_NODE_INDEX_INVALID != fei), "attached export created");

    /*
     * check for the presence of all the adj-fibs and local in the import table
     */
    fei = fib_table_lookup_exact_match(import_fib_index2, &pfx_10_10_10_1_s_32);
    FIB_TEST((FIB_NODE_INDEX_INVALID != fei), "ADJ-fib1 imported");
    fei = fib_table_lookup_exact_match(import_fib_index2, &pfx_10_10_10_2_s_32);
    FIB_TEST((FIB_NODE_INDEX_INVALID != fei), "ADJ-fib2 imported");
    local_pfx.fp_len = 32;
    fei = fib_table_lookup_exact_match(import_fib_index2, &local_pfx);
    FIB_TEST((FIB_NODE_INDEX_INVALID != fei), "local imported");

    /*
     * add a 3rd adj-fib. expect it to be exported to both tables.
     */
    fib_prefix_t pfx_10_10_10_3_s_32 = {
    	.fp_len = 32,
    	.fp_proto = FIB_PROTOCOL_IP4,
    	.fp_addr = {
    	    /* 10.10.10.3 */
    	    .ip4.as_u32 = clib_host_to_net_u32(0x0a0a0a03),
    	},
    };

    fib_table_entry_update_one_path(fib_index,
				    &pfx_10_10_10_3_s_32,
				    FIB_SOURCE_ADJ,
				    FIB_ENTRY_FLAG_ATTACHED,
				    FIB_PROTOCOL_IP4,
				    &pfx_10_10_10_3_s_32.fp_addr,
				    tm->hw[0]->sw_if_index,
				    ~0, // invalid fib index
				    1,
				    NULL,
				    FIB_ROUTE_PATH_FLAG_NONE);
    fei = fib_table_lookup_exact_match(fib_index, &pfx_10_10_10_3_s_32);
    FIB_TEST((FIB_NODE_INDEX_INVALID != fei), "ADJ-fib3 present");
    ai = fib_entry_get_adj(fei);

    fei = fib_table_lookup_exact_match(import_fib_index1, &pfx_10_10_10_3_s_32);
    FIB_TEST((FIB_NODE_INDEX_INVALID != fei), "ADJ-fib3 imported to FIB1");
    FIB_TEST((ai == fib_entry_get_adj(fei)),
	     "Import uses same adj as export");
    fei = fib_table_lookup_exact_match(import_fib_index2, &pfx_10_10_10_3_s_32);
    FIB_TEST((FIB_NODE_INDEX_INVALID != fei), "ADJ-fib3 imported to FIB2");
    FIB_TEST((ai == fib_entry_get_adj(fei)),
	     "Import uses same adj as export");

    /*
     * remove the 3rd adj fib. we expect it to be removed from both FIBs
     */
    fib_table_entry_delete(fib_index,
			   &pfx_10_10_10_3_s_32,
			   FIB_SOURCE_ADJ);

    fei = fib_table_lookup_exact_match(fib_index, &pfx_10_10_10_3_s_32);
    FIB_TEST((FIB_NODE_INDEX_INVALID == fei), "ADJ-fib3 remved");

    fei = fib_table_lookup_exact_match(import_fib_index1, &pfx_10_10_10_3_s_32);
    FIB_TEST((FIB_NODE_INDEX_INVALID == fei), "ADJ-fib3 removed from FIB1");

    fei = fib_table_lookup_exact_match(import_fib_index2, &pfx_10_10_10_3_s_32);
    FIB_TEST((FIB_NODE_INDEX_INVALID == fei), "ADJ-fib3 removed from FIB2");

    /*
     * remove the attached route from the 2nd FIB. expect the imported
     * entires to be removed
     */
    local_pfx.fp_len = 24;
    fib_table_entry_delete(import_fib_index2,
			   &local_pfx,
			   FIB_SOURCE_API);
    fei = fib_table_lookup_exact_match(import_fib_index2, &local_pfx);
    FIB_TEST((FIB_NODE_INDEX_INVALID == fei), "attached export removed");

    fei = fib_table_lookup_exact_match(import_fib_index2, &pfx_10_10_10_1_s_32);
    FIB_TEST((FIB_NODE_INDEX_INVALID == fei), "ADJ-fib1 removed from FIB2");
    fei = fib_table_lookup_exact_match(import_fib_index2, &pfx_10_10_10_2_s_32);
    FIB_TEST((FIB_NODE_INDEX_INVALID == fei), "ADJ-fib2 removed from FIB2");
    local_pfx.fp_len = 32;
    fei = fib_table_lookup_exact_match(import_fib_index2, &local_pfx);
    FIB_TEST((FIB_NODE_INDEX_INVALID == fei), "local removed from FIB2");

    fei = fib_table_lookup_exact_match(import_fib_index1, &pfx_10_10_10_1_s_32);
    FIB_TEST((FIB_NODE_INDEX_INVALID != fei), "ADJ-fib1 still in FIB1");
    fei = fib_table_lookup_exact_match(import_fib_index1, &pfx_10_10_10_2_s_32);
    FIB_TEST((FIB_NODE_INDEX_INVALID != fei), "ADJ-fib2 still in FIB1");
    local_pfx.fp_len = 32;
    fei = fib_table_lookup_exact_match(import_fib_index1, &local_pfx);
    FIB_TEST((FIB_NODE_INDEX_INVALID != fei), "local still in FIB1");

    /*
     * modify the route in FIB1 so it is no longer attached. expect the imported
     * entires to be removed
     */
    local_pfx.fp_len = 24;
    fib_table_entry_update_one_path(import_fib_index1,
				    &local_pfx,
				    FIB_SOURCE_API,
				    FIB_ENTRY_FLAG_NONE,
				    FIB_PROTOCOL_IP4,
				    &pfx_10_10_10_2_s_32.fp_addr,
				    tm->hw[0]->sw_if_index,
				    ~0, // invalid fib index
				    1,
				    NULL,
				    FIB_ROUTE_PATH_FLAG_NONE);
    fei = fib_table_lookup_exact_match(import_fib_index1, &pfx_10_10_10_1_s_32);
    FIB_TEST((FIB_NODE_INDEX_INVALID == fei), "ADJ-fib1 removed from FIB1");
    fei = fib_table_lookup_exact_match(import_fib_index1, &pfx_10_10_10_2_s_32);
    FIB_TEST((FIB_NODE_INDEX_INVALID == fei), "ADJ-fib2 removed from FIB1");
    local_pfx.fp_len = 32;
    fei = fib_table_lookup_exact_match(import_fib_index1, &local_pfx);
    FIB_TEST((FIB_NODE_INDEX_INVALID == fei), "local removed from FIB1");

    /*
     * modify it back to attached. expect the adj-fibs back
     */
    local_pfx.fp_len = 24;
    fib_table_entry_update_one_path(import_fib_index1,
				    &local_pfx,
				    FIB_SOURCE_API,
				    FIB_ENTRY_FLAG_NONE,
				    FIB_PROTOCOL_IP4,
				    NULL,
				    tm->hw[0]->sw_if_index,
				    ~0, // invalid fib index
				    1,
				    NULL,
				    FIB_ROUTE_PATH_FLAG_NONE);
    fei = fib_table_lookup_exact_match(import_fib_index1, &pfx_10_10_10_1_s_32);
    FIB_TEST((FIB_NODE_INDEX_INVALID != fei), "ADJ-fib1 imported in FIB1");
    fei = fib_table_lookup_exact_match(import_fib_index1, &pfx_10_10_10_2_s_32);
    FIB_TEST((FIB_NODE_INDEX_INVALID != fei), "ADJ-fib2 imported in FIB1");
    local_pfx.fp_len = 32;
    fei = fib_table_lookup_exact_match(import_fib_index1, &local_pfx);
    FIB_TEST((FIB_NODE_INDEX_INVALID != fei), "local imported in FIB1");

    /*
     * add a covering attached next-hop for the interface address, so we have
     * a valid adj to find when we check the forwarding tables
     */
    fib_prefix_t pfx_10_0_0_0_s_8 = {
    	.fp_len = 8,
    	.fp_proto = FIB_PROTOCOL_IP4,
    	.fp_addr = {
    	    /* 10.0.0.0 */
    	    .ip4.as_u32 = clib_host_to_net_u32(0x0a000000),
    	},
    };

    fei = fib_table_entry_update_one_path(fib_index,
                                          &pfx_10_0_0_0_s_8,
                                          FIB_SOURCE_API,
                                          FIB_ENTRY_FLAG_NONE,
					  FIB_PROTOCOL_IP4,
                                          &pfx_10_10_10_3_s_32.fp_addr,
                                          tm->hw[0]->sw_if_index,
                                          ~0, // invalid fib index
                                          1,
                                          NULL,
                                          FIB_ROUTE_PATH_FLAG_NONE);
    dpo = fib_entry_contribute_ip_forwarding(fei);

    /*
     * remove the route in the export fib. expect the adj-fibs to be removed
     */
    local_pfx.fp_len = 24;
    fib_table_entry_delete(fib_index,
			   &local_pfx,
			   FIB_SOURCE_INTERFACE);

    fei = fib_table_lookup_exact_match(import_fib_index1, &pfx_10_10_10_1_s_32);
    FIB_TEST((FIB_NODE_INDEX_INVALID == fei), "Delete export: ADJ-fib1 removed from FIB1");
    fei = fib_table_lookup_exact_match(import_fib_index1, &pfx_10_10_10_2_s_32);
    FIB_TEST((FIB_NODE_INDEX_INVALID == fei), "ADJ-fib2 removed from FIB1");
    local_pfx.fp_len = 32;
    fei = fib_table_lookup_exact_match(import_fib_index1, &local_pfx);
    FIB_TEST((FIB_NODE_INDEX_INVALID == fei), "local removed from FIB1");

    /*
     * the adj-fibs in the export VRF are present in the FIB table,
     * but not installed in forwarding, since they have no attached cover.
     * Consequently a lookup in the MTRIE gives the adj for the covering
     * route 10.0.0.0/8.
     */
    fei = fib_table_lookup_exact_match(fib_index, &pfx_10_10_10_1_s_32);
    FIB_TEST((FIB_NODE_INDEX_INVALID != fei), "ADJ-fib1 in export");

    index_t lbi;
    lbi = ip4_fib_forwarding_lookup(fib_index, &pfx_10_10_10_1_s_32.fp_addr.ip4);
    FIB_TEST(lbi == dpo->dpoi_index,
             "10.10.10.1 forwards on \n%U not \n%U",
             format_load_balance, lbi, 0,
             format_dpo_id, dpo, 0);
    lbi = ip4_fib_forwarding_lookup(fib_index, &pfx_10_10_10_2_s_32.fp_addr.ip4);
    FIB_TEST(lbi == dpo->dpoi_index,
             "10.10.10.2 forwards on %U", format_dpo_id, dpo, 0);
    lbi = ip4_fib_forwarding_lookup(fib_index, &pfx_10_10_10_3_s_32.fp_addr.ip4);
    FIB_TEST(lbi == dpo->dpoi_index,
             "10.10.10.3 forwards on %U", format_dpo_id, dpo, 0);

    /*
     * add the export prefix back, but not as attached.
     * No adj-fibs in export nor import tables
     */
    local_pfx.fp_len = 24;
    fei = fib_table_entry_update_one_path(fib_index,
                                          &local_pfx,
                                          FIB_SOURCE_API,
                                          FIB_ENTRY_FLAG_NONE,
					  FIB_PROTOCOL_IP4,
                                          &pfx_10_10_10_1_s_32.fp_addr,
                                          tm->hw[0]->sw_if_index,
                                          ~0, // invalid fib index
                                          1,
                                          NULL,
                                          FIB_ROUTE_PATH_FLAG_NONE);
    dpo = fib_entry_contribute_ip_forwarding(fei);

    fei = fib_table_lookup_exact_match(fib_index, &pfx_10_10_10_1_s_32);
    FIB_TEST((FIB_NODE_INDEX_INVALID != fei), "non-attached in export: ADJ-fib1 in export");
    lbi = ip4_fib_forwarding_lookup(fib_index, &pfx_10_10_10_1_s_32.fp_addr.ip4);
    FIB_TEST(lbi == dpo->dpoi_index,
             "10.10.10.1 forwards on %U", format_dpo_id, dpo, 0);
    fei = fib_table_lookup_exact_match(fib_index, &pfx_10_10_10_1_s_32);
    FIB_TEST((FIB_NODE_INDEX_INVALID != fei), "ADJ-fib1 in export");
    lbi = ip4_fib_forwarding_lookup(fib_index, &pfx_10_10_10_2_s_32.fp_addr.ip4);
    FIB_TEST(lbi == dpo->dpoi_index,
             "10.10.10.2 forwards on %U", format_dpo_id, dpo, 0);

    fei = fib_table_lookup_exact_match(import_fib_index1, &pfx_10_10_10_1_s_32);
    FIB_TEST((FIB_NODE_INDEX_INVALID == fei), "ADJ-fib1 removed from FIB1");
    fei = fib_table_lookup_exact_match(import_fib_index1, &pfx_10_10_10_2_s_32);
    FIB_TEST((FIB_NODE_INDEX_INVALID == fei), "ADJ-fib2 removed from FIB1");
    local_pfx.fp_len = 32;
    fei = fib_table_lookup_exact_match(import_fib_index1, &local_pfx);
    FIB_TEST((FIB_NODE_INDEX_INVALID == fei), "local removed from FIB1");

    /*
     * modify the export prefix so it is attached. expect all covereds to return
     */
    local_pfx.fp_len = 24;
    fib_table_entry_update_one_path(fib_index,
				    &local_pfx,
				    FIB_SOURCE_API,
				    FIB_ENTRY_FLAG_NONE,
				    FIB_PROTOCOL_IP4,
				    NULL,
				    tm->hw[0]->sw_if_index,
				    ~0, // invalid fib index
				    1,
				    NULL,
				    FIB_ROUTE_PATH_FLAG_NONE);

    fei = fib_table_lookup_exact_match(fib_index, &pfx_10_10_10_1_s_32);
    FIB_TEST((FIB_NODE_INDEX_INVALID != fei), "ADJ-fib1 reinstalled in export");
    dpo = fib_entry_contribute_ip_forwarding(fei);
    FIB_TEST(dpo_cmp(dpo_drop, load_balance_get_bucket(dpo->dpoi_index, 0)),
	     "Adj-fib1 is not drop in export");
    fei = fib_table_lookup_exact_match(fib_index, &pfx_10_10_10_2_s_32);
    FIB_TEST((FIB_NODE_INDEX_INVALID != fei), "ADJ-fib2 reinstalled in export");
    local_pfx.fp_len = 32;
    fei = fib_table_lookup_exact_match(fib_index, &local_pfx);
    FIB_TEST((FIB_NODE_INDEX_INVALID != fei), "local reinstalled in export");
    fei = fib_table_lookup_exact_match(import_fib_index1, &pfx_10_10_10_1_s_32);
    FIB_TEST((FIB_NODE_INDEX_INVALID != fei), "attached in export: ADJ-fib1 imported");
    dpo = fib_entry_contribute_ip_forwarding(fei);
    FIB_TEST(dpo_cmp(dpo_drop, load_balance_get_bucket(dpo->dpoi_index, 0)),
	     "Adj-fib1 is not drop in export");
    fei = fib_table_lookup_exact_match(import_fib_index1, &pfx_10_10_10_1_s_32);
    FIB_TEST((FIB_NODE_INDEX_INVALID != fei), "ADJ-fib1 imported");
    fei = fib_table_lookup_exact_match(import_fib_index1, &pfx_10_10_10_2_s_32);
    FIB_TEST((FIB_NODE_INDEX_INVALID != fei), "ADJ-fib2 imported");
    local_pfx.fp_len = 32;
    fei = fib_table_lookup_exact_match(import_fib_index1, &local_pfx);
    FIB_TEST((FIB_NODE_INDEX_INVALID != fei), "local imported");

    /*
     * modify the export prefix so connected. no change.
     */
    local_pfx.fp_len = 24;
    fib_table_entry_update_one_path(fib_index, &local_pfx,
				    FIB_SOURCE_INTERFACE,
				    (FIB_ENTRY_FLAG_CONNECTED |
				     FIB_ENTRY_FLAG_ATTACHED),
				    FIB_PROTOCOL_IP4,
				    NULL,
				    tm->hw[0]->sw_if_index,
				    ~0,
				    1,
				    NULL,
				    FIB_ROUTE_PATH_FLAG_NONE);

    fei = fib_table_lookup_exact_match(fib_index, &pfx_10_10_10_1_s_32);
    FIB_TEST((FIB_NODE_INDEX_INVALID != fei), "ADJ-fib1 reinstalled in export");
    dpo = fib_entry_contribute_ip_forwarding(fei);
    FIB_TEST(dpo_cmp(dpo_drop, load_balance_get_bucket(dpo->dpoi_index, 0)),
	     "Adj-fib1 is not drop in export");
    fei = fib_table_lookup_exact_match(fib_index, &pfx_10_10_10_2_s_32);
    FIB_TEST((FIB_NODE_INDEX_INVALID != fei), "ADJ-fib2 reinstalled in export");
    local_pfx.fp_len = 32;
    fei = fib_table_lookup_exact_match(fib_index, &local_pfx);
    FIB_TEST((FIB_NODE_INDEX_INVALID != fei), "local reinstalled in export");
    fei = fib_table_lookup_exact_match(import_fib_index1, &pfx_10_10_10_1_s_32);
    FIB_TEST((FIB_NODE_INDEX_INVALID != fei), "attached in export: ADJ-fib1 imported");
    dpo = fib_entry_contribute_ip_forwarding(fei);
    FIB_TEST(dpo_cmp(dpo_drop, load_balance_get_bucket(dpo->dpoi_index, 0)),
	     "Adj-fib1 is not drop in export");
    fei = fib_table_lookup_exact_match(import_fib_index1, &pfx_10_10_10_2_s_32);
    FIB_TEST((FIB_NODE_INDEX_INVALID != fei), "ADJ-fib2 imported");
    local_pfx.fp_len = 32;
    fei = fib_table_lookup_exact_match(import_fib_index1, &local_pfx);
    FIB_TEST((FIB_NODE_INDEX_INVALID != fei), "local imported");

    /*
     * CLEANUP
     */
    fib_table_entry_delete(fib_index,
                           &pfx_10_0_0_0_s_8,
                           FIB_SOURCE_API);
    fib_table_entry_delete(fib_index,
			   &pfx_10_10_10_1_s_32,
			   FIB_SOURCE_ADJ);
    fib_table_entry_delete(fib_index,
			   &pfx_10_10_10_2_s_32,
			   FIB_SOURCE_ADJ);
    local_pfx.fp_len = 32;
    fib_table_entry_delete(fib_index,
			   &local_pfx,
			   FIB_SOURCE_INTERFACE);
    local_pfx.fp_len = 24;
    fib_table_entry_delete(fib_index,
			   &local_pfx,
			   FIB_SOURCE_API);
    fib_table_entry_delete(fib_index,
			   &local_pfx,
			   FIB_SOURCE_INTERFACE);
    local_pfx.fp_len = 24;
    fib_table_entry_delete(import_fib_index1,
			   &local_pfx,
			   FIB_SOURCE_API);

    fib_table_unlock(import_fib_index1, FIB_PROTOCOL_IP4);
    fib_table_unlock(import_fib_index2, FIB_PROTOCOL_IP4);

    FIB_TEST((0 == adj_nbr_db_size()), "ADJ DB size is %d",
	     adj_nbr_db_size());

    return (0);
}


/*
 * Test the recursive route route handling for GRE tunnels
 */
static int
fib_test_label (void)
{
    fib_node_index_t fei, ai_mpls_10_10_10_1, ai_v4_10_10_11_1, ai_v4_10_10_11_2, ai_mpls_10_10_11_2, ai_mpls_10_10_11_1;
    const u32 fib_index = 0;
    test_main_t *tm;
    ip4_main_t *im;
    int lb_count, ii;

    lb_count = pool_elts(load_balance_pool);
    tm = &test_main;
    im = &ip4_main;

    /*
     * add interface routes. We'll assume this works. It's more rigorously
     * tested elsewhere.
     */
    fib_prefix_t local0_pfx = {
	.fp_len = 24,
	.fp_proto = FIB_PROTOCOL_IP4,
	.fp_addr = {
	    .ip4 = {
		/* 10.10.10.10 */
		.as_u32 = clib_host_to_net_u32(0x0a0a0a0a),
	    },
	},
    };

    FIB_TEST((0 == adj_nbr_db_size()), "ADJ DB size is %d",
	     adj_nbr_db_size());

    vec_validate(im->fib_index_by_sw_if_index, tm->hw[0]->sw_if_index);
    im->fib_index_by_sw_if_index[tm->hw[0]->sw_if_index] = fib_index;

    fib_table_entry_update_one_path(fib_index, &local0_pfx,
				    FIB_SOURCE_INTERFACE,
				    (FIB_ENTRY_FLAG_CONNECTED |
				     FIB_ENTRY_FLAG_ATTACHED),
				    FIB_PROTOCOL_IP4,
				    NULL,
				    tm->hw[0]->sw_if_index,
				    ~0,
				    1,
				    NULL,
				    FIB_ROUTE_PATH_FLAG_NONE);
    fei = fib_table_lookup_exact_match(fib_index, &local0_pfx);
    FIB_TEST((FIB_NODE_INDEX_INVALID != fei),
	     "attached interface route present");

    local0_pfx.fp_len = 32;
    fib_table_entry_update_one_path(fib_index, &local0_pfx,
				    FIB_SOURCE_INTERFACE,
				    (FIB_ENTRY_FLAG_CONNECTED |
				     FIB_ENTRY_FLAG_LOCAL),
				    FIB_PROTOCOL_IP4,
				    NULL,
				    tm->hw[0]->sw_if_index,
				    ~0, // invalid fib index
				    1,
				    NULL,
				    FIB_ROUTE_PATH_FLAG_NONE);
    fei = fib_table_lookup_exact_match(fib_index, &local0_pfx);

    FIB_TEST((FIB_NODE_INDEX_INVALID != fei),
	     "local interface route present");

    fib_prefix_t local1_pfx = {
	.fp_len = 24,
	.fp_proto = FIB_PROTOCOL_IP4,
	.fp_addr = {
	    .ip4 = {
		/* 10.10.11.10 */
		.as_u32 = clib_host_to_net_u32(0x0a0a0b0a),
	    },
	},
    };

    vec_validate(im->fib_index_by_sw_if_index, tm->hw[1]->sw_if_index);
    im->fib_index_by_sw_if_index[tm->hw[1]->sw_if_index] = fib_index;

    fib_table_entry_update_one_path(fib_index, &local1_pfx,
				    FIB_SOURCE_INTERFACE,
				    (FIB_ENTRY_FLAG_CONNECTED |
				     FIB_ENTRY_FLAG_ATTACHED),
				    FIB_PROTOCOL_IP4,
				    NULL,
				    tm->hw[1]->sw_if_index,
				    ~0,
				    1,
				    NULL,
				    FIB_ROUTE_PATH_FLAG_NONE);
    fei = fib_table_lookup_exact_match(fib_index, &local1_pfx);
    FIB_TEST((FIB_NODE_INDEX_INVALID != fei),
	     "attached interface route present");

    local1_pfx.fp_len = 32;
    fib_table_entry_update_one_path(fib_index, &local1_pfx,
				    FIB_SOURCE_INTERFACE,
				    (FIB_ENTRY_FLAG_CONNECTED |
				     FIB_ENTRY_FLAG_LOCAL),
				    FIB_PROTOCOL_IP4,
				    NULL,
				    tm->hw[1]->sw_if_index,
				    ~0, // invalid fib index
				    1,
				    NULL,
				    FIB_ROUTE_PATH_FLAG_NONE);
    fei = fib_table_lookup_exact_match(fib_index, &local1_pfx);

    FIB_TEST((FIB_NODE_INDEX_INVALID != fei),
	     "local interface route present");

    ip46_address_t nh_10_10_10_1 = {
	.ip4 = {
	    .as_u32 = clib_host_to_net_u32(0x0a0a0a01),
	},
    };
    ip46_address_t nh_10_10_11_1 = {
	.ip4 = {
	    .as_u32 = clib_host_to_net_u32(0x0a0a0b01),
	},
    };
    ip46_address_t nh_10_10_11_2 = {
	.ip4 = {
	    .as_u32 = clib_host_to_net_u32(0x0a0a0b02),
	},
    };

    ai_v4_10_10_11_1 = adj_nbr_add_or_lock(FIB_PROTOCOL_IP4,
                                           VNET_LINK_IP4,
                                           &nh_10_10_11_1,
                                           tm->hw[1]->sw_if_index);
    ai_v4_10_10_11_2 = adj_nbr_add_or_lock(FIB_PROTOCOL_IP4,
                                           VNET_LINK_IP4,
                                           &nh_10_10_11_2,
                                           tm->hw[1]->sw_if_index);
    ai_mpls_10_10_10_1 = adj_nbr_add_or_lock(FIB_PROTOCOL_IP4,
                                             VNET_LINK_MPLS,
                                             &nh_10_10_10_1,
                                             tm->hw[0]->sw_if_index);
    ai_mpls_10_10_11_2 = adj_nbr_add_or_lock(FIB_PROTOCOL_IP4,
                                             VNET_LINK_MPLS,
                                             &nh_10_10_11_2,
                                             tm->hw[1]->sw_if_index);
    ai_mpls_10_10_11_1 = adj_nbr_add_or_lock(FIB_PROTOCOL_IP4,
                                             VNET_LINK_MPLS,
                                             &nh_10_10_11_1,
                                             tm->hw[1]->sw_if_index);

    /*
     * Add an etry with one path with a real out-going label
     */
    fib_prefix_t pfx_1_1_1_1_s_32 = {
	.fp_len = 32,
	.fp_proto = FIB_PROTOCOL_IP4,
	.fp_addr = {
	    .ip4.as_u32 = clib_host_to_net_u32(0x01010101),
	},
    };
    fib_test_lb_bucket_t l99_eos_o_10_10_10_1 = {
	.type = FT_LB_LABEL_O_ADJ,
	.label_o_adj = {
	    .adj = ai_mpls_10_10_10_1,
	    .label = 99,
	    .eos = MPLS_EOS,
	},
    };
    fib_test_lb_bucket_t l99_neos_o_10_10_10_1 = {
	.type = FT_LB_LABEL_O_ADJ,
	.label_o_adj = {
	    .adj = ai_mpls_10_10_10_1,
	    .label = 99,
	    .eos = MPLS_NON_EOS,
	},
    };
    mpls_label_t *l99 = NULL;
    vec_add1(l99, 99);

    fib_table_entry_update_one_path(fib_index,
				    &pfx_1_1_1_1_s_32,
				    FIB_SOURCE_API,
				    FIB_ENTRY_FLAG_NONE,
				    FIB_PROTOCOL_IP4,
				    &nh_10_10_10_1,
				    tm->hw[0]->sw_if_index,
				    ~0, // invalid fib index
				    1,
				    l99,
				    FIB_ROUTE_PATH_FLAG_NONE);

    fei = fib_table_lookup(fib_index, &pfx_1_1_1_1_s_32);
    FIB_TEST((FIB_NODE_INDEX_INVALID != fei), "1.1.1.1/32 created");

    FIB_TEST(fib_test_validate_entry(fei, 
				     FIB_FORW_CHAIN_TYPE_UNICAST_IP4,
				     1,
				     &l99_eos_o_10_10_10_1),
	     "1.1.1.1/32 LB 1 bucket via label 99 over 10.10.10.1");

    /*
     * add a path with an implicit NULL label
     */
    fib_test_lb_bucket_t a_o_10_10_11_1 = {
	.type = FT_LB_ADJ,
	.adj = {
	    .adj = ai_v4_10_10_11_1,
	},
    };
    fib_test_lb_bucket_t a_mpls_o_10_10_11_1 = {
	.type = FT_LB_ADJ,
	.adj = {
	    .adj = ai_mpls_10_10_11_1,
	},
    };
    mpls_label_t *l_imp_null = NULL;
    vec_add1(l_imp_null, MPLS_IETF_IMPLICIT_NULL_LABEL);

    fei = fib_table_entry_path_add(fib_index,
				   &pfx_1_1_1_1_s_32,
				   FIB_SOURCE_API,
				   FIB_ENTRY_FLAG_NONE,
				   FIB_PROTOCOL_IP4,
				   &nh_10_10_11_1,
				   tm->hw[1]->sw_if_index,
				   ~0, // invalid fib index
				   1,
				   l_imp_null,
				   FIB_ROUTE_PATH_FLAG_NONE);

    FIB_TEST(fib_test_validate_entry(fei, 
				     FIB_FORW_CHAIN_TYPE_UNICAST_IP4,
				     2,
				     &l99_eos_o_10_10_10_1,
				     &a_o_10_10_11_1),
	     "1.1.1.1/32 LB 2 buckets via: "
	     "label 99 over 10.10.10.1, "
	     "adj over 10.10.11.1");

    /*
     * assign the route a local label
     */
    fib_table_entry_local_label_add(fib_index,
				    &pfx_1_1_1_1_s_32,
				    24001);

    fib_prefix_t pfx_24001_eos = {
	.fp_proto = FIB_PROTOCOL_MPLS,
	.fp_label = 24001,
	.fp_eos = MPLS_EOS,
    };
    fib_prefix_t pfx_24001_neos = {
	.fp_proto = FIB_PROTOCOL_MPLS,
	.fp_label = 24001,
	.fp_eos = MPLS_NON_EOS,
    };

    /*
     * The EOS entry should link to both the paths,
     *  and use an ip adj for the imp-null
     * The NON-EOS entry should link to both the paths,
     *  and use an mpls adj for the imp-null
     */
    fei = fib_table_lookup(MPLS_FIB_DEFAULT_TABLE_ID,
			   &pfx_24001_eos);
    FIB_TEST(fib_test_validate_entry(fei, 
				     FIB_FORW_CHAIN_TYPE_MPLS_EOS,
				     2,
				     &l99_eos_o_10_10_10_1,
				     &a_o_10_10_11_1),
	     "24001/eos LB 2 buckets via: "
	     "label 99 over 10.10.10.1, "
	     "adj over 10.10.11.1");


    fei = fib_table_lookup(MPLS_FIB_DEFAULT_TABLE_ID,
			   &pfx_24001_neos);
    FIB_TEST(fib_test_validate_entry(fei, 
				     FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS,
				     2,
				     &l99_neos_o_10_10_10_1,
				     &a_mpls_o_10_10_11_1),
	     "24001/neos LB 1 bucket via: "
	     "label 99 over 10.10.10.1 ",
	     "mpls-adj via 10.10.11.1");

    /*
     * add an unlabelled path, this is excluded from the neos chains,
     */
    fib_test_lb_bucket_t adj_o_10_10_11_2 = {
	.type = FT_LB_ADJ,
	.adj = {
	    .adj = ai_v4_10_10_11_2,
	},
    };

    fei = fib_table_entry_path_add(fib_index,
				   &pfx_1_1_1_1_s_32,
				   FIB_SOURCE_API,
				   FIB_ENTRY_FLAG_NONE,
				   FIB_PROTOCOL_IP4,
				   &nh_10_10_11_2,
				   tm->hw[1]->sw_if_index,
				   ~0, // invalid fib index
				   1,
				   NULL,
				   FIB_ROUTE_PATH_FLAG_NONE);

    FIB_TEST(fib_test_validate_entry(fei, 
				     FIB_FORW_CHAIN_TYPE_UNICAST_IP4,
				     16, // 3 choices spread over 16 buckets
				     &l99_eos_o_10_10_10_1,
				     &l99_eos_o_10_10_10_1,
				     &l99_eos_o_10_10_10_1,
				     &l99_eos_o_10_10_10_1,
				     &l99_eos_o_10_10_10_1,
				     &l99_eos_o_10_10_10_1,
				     &a_o_10_10_11_1,
				     &a_o_10_10_11_1,
				     &a_o_10_10_11_1,
				     &a_o_10_10_11_1,
				     &a_o_10_10_11_1,
				     &adj_o_10_10_11_2,
				     &adj_o_10_10_11_2,
				     &adj_o_10_10_11_2,
				     &adj_o_10_10_11_2,
				     &adj_o_10_10_11_2),
	     "1.1.1.1/32 LB 16 buckets via: "
	     "label 99 over 10.10.10.1, "
	     "adj over 10.10.11.1",
	     "adj over 10.10.11.2");

    /*
     * get and lock a reference to the non-eos of the via entry 1.1.1.1/32
     */
    dpo_id_t non_eos_1_1_1_1 = DPO_INVALID;
    fib_entry_contribute_forwarding(fei,
				    FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS,
				    &non_eos_1_1_1_1);

    /*
     * n-eos has only the 2 labelled paths
     */
    fei = fib_table_lookup(MPLS_FIB_DEFAULT_TABLE_ID,
			   &pfx_24001_neos);

    FIB_TEST(fib_test_validate_entry(fei,
				     FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS,
				     2,
				     &l99_neos_o_10_10_10_1,
				     &a_mpls_o_10_10_11_1),
	     "24001/neos LB 2 buckets via: "
	     "label 99 over 10.10.10.1, "
	     "adj-mpls over 10.10.11.2");

    /*
     * A labelled recursive
     */
    fib_prefix_t pfx_2_2_2_2_s_32 = {
	.fp_len = 32,
	.fp_proto = FIB_PROTOCOL_IP4,
	.fp_addr = {
	    .ip4.as_u32 = clib_host_to_net_u32(0x02020202),
	},
    };
    fib_test_lb_bucket_t l1600_eos_o_1_1_1_1 = {
	.type = FT_LB_LABEL_O_LB,
	.label_o_lb = {
	    .lb = non_eos_1_1_1_1.dpoi_index,
	    .label = 1600,
	    .eos = MPLS_EOS,
	},
    };
    mpls_label_t *l1600 = NULL;
    vec_add1(l1600, 1600);

    fib_table_entry_update_one_path(fib_index,
				    &pfx_2_2_2_2_s_32,
				    FIB_SOURCE_API,
				    FIB_ENTRY_FLAG_NONE,
				    FIB_PROTOCOL_IP4,
				    &pfx_1_1_1_1_s_32.fp_addr,
				    ~0,
				    fib_index,
				    1,
				    l1600,
				    FIB_ROUTE_PATH_FLAG_NONE);

    fei = fib_table_lookup(fib_index, &pfx_2_2_2_2_s_32);
    FIB_TEST(fib_test_validate_entry(fei, 
				     FIB_FORW_CHAIN_TYPE_UNICAST_IP4,
				     1,
				     &l1600_eos_o_1_1_1_1),
	     "2.2.2.2.2/32 LB 1 buckets via: "
	     "label 1600 over 1.1.1.1");

    dpo_id_t dpo_44 = DPO_INVALID;
    index_t urpfi;

    fib_entry_contribute_forwarding(fei, FIB_FORW_CHAIN_TYPE_UNICAST_IP4, &dpo_44);
    urpfi = load_balance_get_urpf(dpo_44.dpoi_index);

    FIB_TEST(fib_urpf_check(urpfi, tm->hw[0]->sw_if_index),
	     "uRPF check for 2.2.2.2/32 on %d OK",
	     tm->hw[0]->sw_if_index);
    FIB_TEST(fib_urpf_check(urpfi, tm->hw[1]->sw_if_index),
	     "uRPF check for 2.2.2.2/32 on %d OK",
	     tm->hw[1]->sw_if_index);
    FIB_TEST(!fib_urpf_check(urpfi, 99),
	     "uRPF check for 2.2.2.2/32 on 99 not-OK",
	     99);

    fib_entry_contribute_forwarding(fei, FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS, &dpo_44);
    FIB_TEST(urpfi == load_balance_get_urpf(dpo_44.dpoi_index),
	     "Shared uRPF on IP and non-EOS chain");

    dpo_reset(&dpo_44);

    /*
     * we are holding a lock on the non-eos LB of the via-entry.
     * do a PIC-core failover by shutting the link of the via-entry.
     *
     * shut down the link with the valid label
     */
    vnet_sw_interface_set_flags(vnet_get_main(),
				tm->hw[0]->sw_if_index,
				0);

    fei = fib_table_lookup(fib_index, &pfx_1_1_1_1_s_32);
    FIB_TEST(fib_test_validate_entry(fei, 
				     FIB_FORW_CHAIN_TYPE_UNICAST_IP4,
				     2,
				     &a_o_10_10_11_1,
				     &adj_o_10_10_11_2),
	     "1.1.1.1/32 LB 2 buckets via: "
	     "adj over 10.10.11.1, ",
	     "adj-v4 over 10.10.11.2");

    fei = fib_table_lookup(MPLS_FIB_DEFAULT_TABLE_ID,
			   &pfx_24001_eos);
    FIB_TEST(fib_test_validate_entry(fei, 
				     FIB_FORW_CHAIN_TYPE_MPLS_EOS,
				     2,
				     &a_o_10_10_11_1,
				     &adj_o_10_10_11_2),
	     "24001/eos LB 2 buckets via: "
	     "adj over 10.10.11.1, ",
	     "adj-v4 over 10.10.11.2");

    fei = fib_table_lookup(MPLS_FIB_DEFAULT_TABLE_ID,
			   &pfx_24001_neos);
    FIB_TEST(fib_test_validate_entry(fei,
				     FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS,
				     1,
				     &a_mpls_o_10_10_11_1),
	     "24001/neos LB 1 buckets via: "
	     "adj-mpls over 10.10.11.2");

    /*
     * test that the pre-failover load-balance has been in-place
     * modified
     */
    dpo_id_t current = DPO_INVALID;
    fib_entry_contribute_forwarding(fei,
				    FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS,
				    &current);

    FIB_TEST(!dpo_cmp(&non_eos_1_1_1_1,
                      &current),
	     "PIC-core LB inplace modified %U %U",
             format_dpo_id, &non_eos_1_1_1_1, 0,
             format_dpo_id, &current, 0);

    dpo_reset(&non_eos_1_1_1_1);
    dpo_reset(&current);

    /*
     * no-shut the link with the valid label
     */
    vnet_sw_interface_set_flags(vnet_get_main(),
				tm->hw[0]->sw_if_index,
				VNET_SW_INTERFACE_FLAG_ADMIN_UP);

    fei = fib_table_lookup(fib_index, &pfx_1_1_1_1_s_32);
    FIB_TEST(fib_test_validate_entry(fei, 
				     FIB_FORW_CHAIN_TYPE_UNICAST_IP4,
				     16, // 3 choices spread over 16 buckets
				     &l99_eos_o_10_10_10_1,
				     &l99_eos_o_10_10_10_1,
				     &l99_eos_o_10_10_10_1,
				     &l99_eos_o_10_10_10_1,
				     &l99_eos_o_10_10_10_1,
				     &l99_eos_o_10_10_10_1,
				     &a_o_10_10_11_1,
				     &a_o_10_10_11_1,
				     &a_o_10_10_11_1,
				     &a_o_10_10_11_1,
				     &a_o_10_10_11_1,
				     &adj_o_10_10_11_2,
				     &adj_o_10_10_11_2,
				     &adj_o_10_10_11_2,
				     &adj_o_10_10_11_2,
				     &adj_o_10_10_11_2),
	     "1.1.1.1/32 LB 16 buckets via: "
	     "label 99 over 10.10.10.1, "
	     "adj over 10.10.11.1",
	     "adj-v4 over 10.10.11.2");


    fei = fib_table_lookup(MPLS_FIB_DEFAULT_TABLE_ID,
			   &pfx_24001_eos);
    FIB_TEST(fib_test_validate_entry(fei, 
				     FIB_FORW_CHAIN_TYPE_MPLS_EOS,
				     16, // 3 choices spread over 16 buckets
				     &l99_eos_o_10_10_10_1,
				     &l99_eos_o_10_10_10_1,
				     &l99_eos_o_10_10_10_1,
				     &l99_eos_o_10_10_10_1,
				     &l99_eos_o_10_10_10_1,
				     &l99_eos_o_10_10_10_1,
				     &a_o_10_10_11_1,
				     &a_o_10_10_11_1,
				     &a_o_10_10_11_1,
				     &a_o_10_10_11_1,
				     &a_o_10_10_11_1,
				     &adj_o_10_10_11_2,
				     &adj_o_10_10_11_2,
				     &adj_o_10_10_11_2,
				     &adj_o_10_10_11_2,
				     &adj_o_10_10_11_2),
	     "24001/eos LB 16 buckets via: "
	     "label 99 over 10.10.10.1, "
	     "adj over 10.10.11.1",
	     "adj-v4 over 10.10.11.2");

    fei = fib_table_lookup(MPLS_FIB_DEFAULT_TABLE_ID,
			   &pfx_24001_neos);
    FIB_TEST(fib_test_validate_entry(fei, 
				     FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS,
				     2,
				     &l99_neos_o_10_10_10_1,
				     &a_mpls_o_10_10_11_1),
	     "24001/neos LB 2 buckets via: "
	     "label 99 over 10.10.10.1, "
	     "adj-mpls over 10.10.11.2");

    /*
     * remove the first path with the valid label
     */
    fib_table_entry_path_remove(fib_index,
				&pfx_1_1_1_1_s_32,
				FIB_SOURCE_API,
				FIB_PROTOCOL_IP4,
				&nh_10_10_10_1,
				tm->hw[0]->sw_if_index,
				~0, // invalid fib index
				1,
				FIB_ROUTE_PATH_FLAG_NONE);

    fei = fib_table_lookup(fib_index, &pfx_1_1_1_1_s_32);
    FIB_TEST(fib_test_validate_entry(fei, 
				     FIB_FORW_CHAIN_TYPE_UNICAST_IP4,
				     2,
				     &a_o_10_10_11_1,
				     &adj_o_10_10_11_2),
	     "1.1.1.1/32 LB 2 buckets via: "
	     "adj over 10.10.11.1, "
	     "adj-v4 over 10.10.11.2");

    fei = fib_table_lookup(MPLS_FIB_DEFAULT_TABLE_ID,
			   &pfx_24001_eos);
    FIB_TEST(fib_test_validate_entry(fei, 
				     FIB_FORW_CHAIN_TYPE_MPLS_EOS,
				     2,
				     &a_o_10_10_11_1,
				     &adj_o_10_10_11_2),
	     "24001/eos LB 2 buckets via: "
	     "adj over 10.10.11.1, "
	     "adj-v4 over 10.10.11.2");

    fei = fib_table_lookup(MPLS_FIB_DEFAULT_TABLE_ID,
			   &pfx_24001_neos);

    FIB_TEST(fib_test_validate_entry(fei, 
				     FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS,
				     1,
				     &a_mpls_o_10_10_11_1),
	     "24001/neos LB 1 buckets via: "
	     "adj-mpls over 10.10.11.2");

    /*
     * remove the other path with a valid label
     */
    fib_test_lb_bucket_t bucket_drop = {
	.type = FT_LB_SPECIAL,
	.special = {
	    .adj = DPO_PROTO_IP4,
	},
    };

    fib_table_entry_path_remove(fib_index,
				&pfx_1_1_1_1_s_32,
				FIB_SOURCE_API,
				FIB_PROTOCOL_IP4,
				&nh_10_10_11_1,
				tm->hw[1]->sw_if_index,
				~0, // invalid fib index
				1,
				FIB_ROUTE_PATH_FLAG_NONE);

    fei = fib_table_lookup(fib_index, &pfx_1_1_1_1_s_32);
    FIB_TEST(fib_test_validate_entry(fei, 
				     FIB_FORW_CHAIN_TYPE_UNICAST_IP4,
				     1,
				     &adj_o_10_10_11_2),
	     "1.1.1.1/32 LB 1 buckets via: "
	     "adj over 10.10.11.2");

    fei = fib_table_lookup(MPLS_FIB_DEFAULT_TABLE_ID,
			   &pfx_24001_eos);
    FIB_TEST(fib_test_validate_entry(fei, 
				     FIB_FORW_CHAIN_TYPE_MPLS_EOS,
				     1,
				     &adj_o_10_10_11_2),
	     "24001/eos LB 1 buckets via: "
	     "adj over 10.10.11.2");

    fei = fib_table_lookup(MPLS_FIB_DEFAULT_TABLE_ID,
			   &pfx_24001_neos);
    FIB_TEST(fib_test_validate_entry(fei, 
				     FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS,
				      1,
				      &bucket_drop),
	     "24001/eos LB 1 buckets via: DROP");

    /*
     * add back the path with the valid label
     */
    l99 = NULL;
    vec_add1(l99, 99);

    fib_table_entry_path_add(fib_index,
			     &pfx_1_1_1_1_s_32,
			     FIB_SOURCE_API,
			     FIB_ENTRY_FLAG_NONE,
			     FIB_PROTOCOL_IP4,
			     &nh_10_10_10_1,
			     tm->hw[0]->sw_if_index,
			     ~0, // invalid fib index
			     1,
			     l99,
			     FIB_ROUTE_PATH_FLAG_NONE);

    fei = fib_table_lookup(fib_index, &pfx_1_1_1_1_s_32);
    FIB_TEST(fib_test_validate_entry(fei,
				     FIB_FORW_CHAIN_TYPE_UNICAST_IP4,
				     2,
				     &l99_eos_o_10_10_10_1,
				     &adj_o_10_10_11_2),
	     "1.1.1.1/32 LB 2 buckets via: "
	     "label 99 over 10.10.10.1, "
	     "adj over 10.10.11.2");

    fei = fib_table_lookup(MPLS_FIB_DEFAULT_TABLE_ID,
			   &pfx_24001_eos);
    FIB_TEST(fib_test_validate_entry(fei, 
				     FIB_FORW_CHAIN_TYPE_MPLS_EOS,
				     2,
				     &l99_eos_o_10_10_10_1,
				     &adj_o_10_10_11_2),
	     "24001/eos LB 2 buckets via: "
	     "label 99 over 10.10.10.1, "
	     "adj over 10.10.11.2");

    fei = fib_table_lookup(MPLS_FIB_DEFAULT_TABLE_ID,
			   &pfx_24001_neos);
    FIB_TEST(fib_test_validate_entry(fei, 
				     FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS,
				     1,
				     &l99_neos_o_10_10_10_1),
	     "24001/neos LB 1 buckets via: "
	     "label 99 over 10.10.10.1");

    /*
     * change the local label
     */
    fib_table_entry_local_label_add(fib_index,
				    &pfx_1_1_1_1_s_32,
				    25005);

    fib_prefix_t pfx_25005_eos = {
	.fp_proto = FIB_PROTOCOL_MPLS,
	.fp_label = 25005,
	.fp_eos = MPLS_EOS,
    };
    fib_prefix_t pfx_25005_neos = {
	.fp_proto = FIB_PROTOCOL_MPLS,
	.fp_label = 25005,
	.fp_eos = MPLS_NON_EOS,
    };

    FIB_TEST((FIB_NODE_INDEX_INVALID ==
	      fib_table_lookup(fib_index, &pfx_24001_eos)),
	     "24001/eos removed after label change");
    FIB_TEST((FIB_NODE_INDEX_INVALID ==
	      fib_table_lookup(fib_index, &pfx_24001_neos)),
	     "24001/eos removed after label change");

    fei = fib_table_lookup(MPLS_FIB_DEFAULT_TABLE_ID,
			   &pfx_25005_eos);
    FIB_TEST(fib_test_validate_entry(fei,
				     FIB_FORW_CHAIN_TYPE_MPLS_EOS,
				     2,
				     &l99_eos_o_10_10_10_1,
				     &adj_o_10_10_11_2),
	     "25005/eos LB 2 buckets via: "
	     "label 99 over 10.10.10.1, "
	     "adj over 10.10.11.2");

    fei = fib_table_lookup(MPLS_FIB_DEFAULT_TABLE_ID,
			   &pfx_25005_neos);
    FIB_TEST(fib_test_validate_entry(fei,
				     FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS,
				     1,
				     &l99_neos_o_10_10_10_1),
	     "25005/neos LB 1 buckets via: "
	     "label 99 over 10.10.10.1");

    /*
     * remove the local label.
     * the check that the MPLS entries are gone is done by the fact the
     * MPLS table is no longer present.
     */
    fib_table_entry_local_label_remove(fib_index,
				       &pfx_1_1_1_1_s_32,
				       25005);

    fei = fib_table_lookup(fib_index, &pfx_1_1_1_1_s_32);
    FIB_TEST(fib_test_validate_entry(fei, 
				     FIB_FORW_CHAIN_TYPE_UNICAST_IP4,
				     2,
				     &l99_eos_o_10_10_10_1,
				     &adj_o_10_10_11_2),
	     "24001/eos LB 2 buckets via: "
	     "label 99 over 10.10.10.1, "
	     "adj over 10.10.11.2");

    FIB_TEST((FIB_NODE_INDEX_INVALID ==
	      mpls_fib_index_from_table_id(MPLS_FIB_DEFAULT_TABLE_ID)),
	     "No more MPLS FIB entries => table removed");

    /*
     * add another via-entry for the recursive
     */
    fib_prefix_t pfx_1_1_1_2_s_32 = {
	.fp_len = 32,
	.fp_proto = FIB_PROTOCOL_IP4,
	.fp_addr = {
	    .ip4.as_u32 = clib_host_to_net_u32(0x01010102),
	},
    };
    fib_test_lb_bucket_t l101_eos_o_10_10_10_1 = {
	.type = FT_LB_LABEL_O_ADJ,
	.label_o_adj = {
	    .adj = ai_mpls_10_10_10_1,
	    .label = 101,
	    .eos = MPLS_EOS,
	},
    };
    mpls_label_t *l101 = NULL;
    vec_add1(l101, 101);

    fei = fib_table_entry_update_one_path(fib_index,
					  &pfx_1_1_1_2_s_32,
					  FIB_SOURCE_API,
					  FIB_ENTRY_FLAG_NONE,
					  FIB_PROTOCOL_IP4,
					  &nh_10_10_10_1,
					  tm->hw[0]->sw_if_index,
					  ~0, // invalid fib index
					  1,
					  l101,
					  FIB_ROUTE_PATH_FLAG_NONE);

    FIB_TEST(fib_test_validate_entry(fei,
				     FIB_FORW_CHAIN_TYPE_UNICAST_IP4,
				     1,
				     &l101_eos_o_10_10_10_1),
	     "1.1.1.2/32 LB 1 buckets via: "
	     "label 101 over 10.10.10.1");

    dpo_id_t non_eos_1_1_1_2 = DPO_INVALID;
    fib_entry_contribute_forwarding(fib_table_lookup(fib_index,
						     &pfx_1_1_1_1_s_32),
				    FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS,
				    &non_eos_1_1_1_1);
    fib_entry_contribute_forwarding(fib_table_lookup(fib_index,
						     &pfx_1_1_1_2_s_32),
				    FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS,
				    &non_eos_1_1_1_2);

    fib_test_lb_bucket_t l1601_eos_o_1_1_1_2 = {
    	.type = FT_LB_LABEL_O_LB,
    	.label_o_lb = {
    	    .lb = non_eos_1_1_1_2.dpoi_index,
    	    .label = 1601,
    	    .eos = MPLS_EOS,
    	},
    };
    mpls_label_t *l1601 = NULL;
    vec_add1(l1601, 1601);

    l1600_eos_o_1_1_1_1.label_o_lb.lb = non_eos_1_1_1_1.dpoi_index;

    fei = fib_table_entry_path_add(fib_index,
				   &pfx_2_2_2_2_s_32,
				   FIB_SOURCE_API,
				   FIB_ENTRY_FLAG_NONE,
				   FIB_PROTOCOL_IP4,
				   &pfx_1_1_1_2_s_32.fp_addr,
				   ~0,
				   fib_index,
				   1,
				   l1601,
				   FIB_ROUTE_PATH_FLAG_NONE);

    FIB_TEST(fib_test_validate_entry(fei, 
				     FIB_FORW_CHAIN_TYPE_UNICAST_IP4,
				     2,
				     &l1600_eos_o_1_1_1_1,
				     &l1601_eos_o_1_1_1_2),
	     "2.2.2.2/32 LB 2 buckets via: "
	     "label 1600 via 1.1,1.1, "
	     "label 16001 via 1.1.1.2");

    /*
     * update the via-entry so it no longer has an imp-null path.
     * the LB for the recursive can use an imp-null
     */
    l_imp_null = NULL;
    vec_add1(l_imp_null, MPLS_IETF_IMPLICIT_NULL_LABEL);

    fei = fib_table_entry_update_one_path(fib_index,
					  &pfx_1_1_1_2_s_32,
					  FIB_SOURCE_API,
					  FIB_ENTRY_FLAG_NONE,
					  FIB_PROTOCOL_IP4,
					  &nh_10_10_11_1,
					  tm->hw[1]->sw_if_index,
					  ~0, // invalid fib index
					  1,
					  l_imp_null,
					  FIB_ROUTE_PATH_FLAG_NONE);

    FIB_TEST(fib_test_validate_entry(fei,
				     FIB_FORW_CHAIN_TYPE_UNICAST_IP4,
				     1,
				     &a_o_10_10_11_1),
	     "1.1.1.2/32 LB 1 buckets via: "
	     "adj 10.10.11.1");
 
    fei = fib_table_lookup(fib_index, &pfx_2_2_2_2_s_32);
    FIB_TEST(fib_test_validate_entry(fei, 
				     FIB_FORW_CHAIN_TYPE_UNICAST_IP4,
				     2,
				     &l1600_eos_o_1_1_1_1,
				     &l1601_eos_o_1_1_1_2),
	     "2.2.2.2/32 LB 2 buckets via: "
	     "label 1600 via 1.1,1.1, "
	     "label 16001 via 1.1.1.2");

    /*
     * update the via-entry so it no longer has labelled paths.
     * the LB for the recursive should exclue this via form its LB
     */
    fei = fib_table_entry_update_one_path(fib_index,
					  &pfx_1_1_1_2_s_32,
					  FIB_SOURCE_API,
					  FIB_ENTRY_FLAG_NONE,
					  FIB_PROTOCOL_IP4,
					  &nh_10_10_11_1,
					  tm->hw[1]->sw_if_index,
					  ~0, // invalid fib index
					  1,
					  NULL,
					  FIB_ROUTE_PATH_FLAG_NONE);

    FIB_TEST(fib_test_validate_entry(fei,
				     FIB_FORW_CHAIN_TYPE_UNICAST_IP4,
				     1,
				     &a_o_10_10_11_1),
	     "1.1.1.2/32 LB 1 buckets via: "
	     "adj 10.10.11.1");
 
    fei = fib_table_lookup(fib_index, &pfx_2_2_2_2_s_32);
    FIB_TEST(fib_test_validate_entry(fei, 
				     FIB_FORW_CHAIN_TYPE_UNICAST_IP4,
				     1,
				     &l1600_eos_o_1_1_1_1),
	     "2.2.2.2/32 LB 1 buckets via: "
	     "label 1600 via 1.1,1.1");

    dpo_reset(&non_eos_1_1_1_1);
    dpo_reset(&non_eos_1_1_1_2);

    /*
     * Add a recursive with no out-labels. We expect to use the IP of the via
     */
    fib_prefix_t pfx_2_2_2_3_s_32 = {
	.fp_len = 32,
	.fp_proto = FIB_PROTOCOL_IP4,
	.fp_addr = {
	    .ip4.as_u32 = clib_host_to_net_u32(0x02020203),
	},
    };
    dpo_id_t ip_1_1_1_1 = DPO_INVALID;

    fib_table_entry_update_one_path(fib_index,
				    &pfx_2_2_2_3_s_32,
				    FIB_SOURCE_API,
				    FIB_ENTRY_FLAG_NONE,
				    FIB_PROTOCOL_IP4,
				    &pfx_1_1_1_1_s_32.fp_addr,
				    ~0,
				    fib_index,
				    1,
				    NULL,
				    FIB_ROUTE_PATH_FLAG_NONE);

    fib_entry_contribute_forwarding(fib_table_lookup(fib_index,
						     &pfx_1_1_1_1_s_32),
				    FIB_FORW_CHAIN_TYPE_UNICAST_IP4,
				    &ip_1_1_1_1);

    fib_test_lb_bucket_t ip_o_1_1_1_1 = {
	.type = FT_LB_O_LB,
	.lb = {
	    .lb = ip_1_1_1_1.dpoi_index,
	},
    };

    fei = fib_table_lookup(fib_index, &pfx_2_2_2_3_s_32);
    FIB_TEST(fib_test_validate_entry(fei, 
				     FIB_FORW_CHAIN_TYPE_UNICAST_IP4,
				     1,
				     &ip_o_1_1_1_1),
	     "2.2.2.2.3/32 LB 1 buckets via: "
	     "ip 1.1.1.1");

    /*
     * Add a recursive with an imp-null out-label. 
     * We expect to use the IP of the via
     */
    fib_prefix_t pfx_2_2_2_4_s_32 = {
	.fp_len = 32,
	.fp_proto = FIB_PROTOCOL_IP4,
	.fp_addr = {
	    .ip4.as_u32 = clib_host_to_net_u32(0x02020204),
	},
    };

    fib_table_entry_update_one_path(fib_index,
				    &pfx_2_2_2_4_s_32,
				    FIB_SOURCE_API,
				    FIB_ENTRY_FLAG_NONE,
				    FIB_PROTOCOL_IP4,
				    &pfx_1_1_1_1_s_32.fp_addr,
				    ~0,
				    fib_index,
				    1,
				    NULL,
				    FIB_ROUTE_PATH_FLAG_NONE);

    fei = fib_table_lookup(fib_index, &pfx_2_2_2_4_s_32);
    FIB_TEST(fib_test_validate_entry(fei, 
				     FIB_FORW_CHAIN_TYPE_UNICAST_IP4,
				     1,
				     &ip_o_1_1_1_1),
	     "2.2.2.2.4/32 LB 1 buckets via: "
	     "ip 1.1.1.1");

    dpo_reset(&ip_1_1_1_1);

    /*
     * Create an entry with a deep label stack
     */
    fib_prefix_t pfx_2_2_5_5_s_32 = {
	.fp_len = 32,
	.fp_proto = FIB_PROTOCOL_IP4,
	.fp_addr = {
	    .ip4.as_u32 = clib_host_to_net_u32(0x02020505),
	},
    };
    fib_test_lb_bucket_t ls_eos_o_10_10_10_1 = {
	.type = FT_LB_LABEL_STACK_O_ADJ,
	.label_stack_o_adj = {
	    .adj = ai_mpls_10_10_11_1,
	    .label_stack_size = 8,
	    .label_stack = {
		200, 201, 202, 203, 204, 205, 206, 207
	    },
	    .eos = MPLS_EOS,
	},
    };
    mpls_label_t *label_stack = NULL;
    vec_validate(label_stack, 7);
    for (ii = 0; ii < 8; ii++)
    {
	label_stack[ii] = ii + 200;
    }

    fei = fib_table_entry_update_one_path(fib_index,
					  &pfx_2_2_5_5_s_32,
					  FIB_SOURCE_API,
					  FIB_ENTRY_FLAG_NONE,
					  FIB_PROTOCOL_IP4,
					  &nh_10_10_11_1,
					  tm->hw[1]->sw_if_index,
					  ~0, // invalid fib index
					  1,
					  label_stack,
					  FIB_ROUTE_PATH_FLAG_NONE);

    FIB_TEST(fib_test_validate_entry(fei,
				     FIB_FORW_CHAIN_TYPE_UNICAST_IP4,
				     1,
				     &ls_eos_o_10_10_10_1),
	     "2.2.5.5/32 LB 1 buckets via: "
	     "adj 10.10.11.1");
    fib_table_entry_delete_index(fei, FIB_SOURCE_API);

    /*
     * cleanup
     */
    fib_table_entry_delete(fib_index,
			   &pfx_1_1_1_2_s_32,
			   FIB_SOURCE_API);

    fei = fib_table_lookup(fib_index, &pfx_2_2_2_2_s_32);
    FIB_TEST(fib_test_validate_entry(fei, 
				     FIB_FORW_CHAIN_TYPE_UNICAST_IP4,
				     1,
				     &l1600_eos_o_1_1_1_1),
	     "2.2.2.2/32 LB 1 buckets via: "
	     "label 1600 via 1.1,1.1");

    fib_table_entry_delete(fib_index,
			   &pfx_1_1_1_1_s_32,
			   FIB_SOURCE_API);

    FIB_TEST(fib_test_validate_entry(fei, 
				     FIB_FORW_CHAIN_TYPE_UNICAST_IP4,
				     1,
				     &bucket_drop),
	     "2.2.2.2/32 LB 1 buckets via: DROP");

    fib_table_entry_delete(fib_index,
			   &pfx_2_2_2_2_s_32,
			   FIB_SOURCE_API);
    fib_table_entry_delete(fib_index,
			   &pfx_2_2_2_3_s_32,
			   FIB_SOURCE_API);
    fib_table_entry_delete(fib_index,
			   &pfx_2_2_2_4_s_32,
			   FIB_SOURCE_API);

    adj_unlock(ai_mpls_10_10_10_1);
    adj_unlock(ai_mpls_10_10_11_2);
    adj_unlock(ai_v4_10_10_11_1);
    adj_unlock(ai_v4_10_10_11_2);
    adj_unlock(ai_mpls_10_10_11_1);

    FIB_TEST((0 == adj_nbr_db_size()), "ADJ DB size is %d",
	     adj_nbr_db_size());

    local0_pfx.fp_len = 32;
    fib_table_entry_delete(fib_index,
			   &local0_pfx,
			   FIB_SOURCE_INTERFACE);
    local0_pfx.fp_len = 24;
    fib_table_entry_delete(fib_index,
			   &local0_pfx,
			   FIB_SOURCE_INTERFACE);
    local1_pfx.fp_len = 32;
    fib_table_entry_delete(fib_index,
			   &local1_pfx,
			   FIB_SOURCE_INTERFACE);
    local1_pfx.fp_len = 24;
    fib_table_entry_delete(fib_index,
			   &local1_pfx,
			   FIB_SOURCE_INTERFACE);

    /*
     * +1 for the drop LB in the MPLS tables.
     */
    FIB_TEST(lb_count+1 == pool_elts(load_balance_pool),
	     "Load-balance resources freed %d of %d",
             lb_count+1, pool_elts(load_balance_pool));

    return (0);
}

#define N_TEST_CHILDREN 4
#define PARENT_INDEX 0

typedef struct fib_node_test_t_
{
    fib_node_t node;
    u32 sibling;
    u32 index;
    fib_node_back_walk_ctx_t *ctxs;
    u32 destroyed;
} fib_node_test_t;

static fib_node_test_t fib_test_nodes[N_TEST_CHILDREN+1];

#define PARENT() (&fib_test_nodes[PARENT_INDEX].node)

#define FOR_EACH_TEST_CHILD(_tc)                     \
    for (ii = 1, (_tc) = &fib_test_nodes[1];         \
         ii < N_TEST_CHILDREN+1;                     \
         ii++, (_tc) = &fib_test_nodes[ii])

static fib_node_t *
fib_test_child_get_node (fib_node_index_t index)
{
    return (&fib_test_nodes[index].node);
}

static int fib_test_walk_spawns_walks;

static fib_node_back_walk_rc_t
fib_test_child_back_walk_notify (fib_node_t *node,
                                 fib_node_back_walk_ctx_t *ctx)
{
    fib_node_test_t *tc = (fib_node_test_t*) node;

    vec_add1(tc->ctxs, *ctx);

    if (1 == fib_test_walk_spawns_walks)
        fib_walk_sync(FIB_NODE_TYPE_TEST, tc->index, ctx);
    if (2 == fib_test_walk_spawns_walks)
        fib_walk_async(FIB_NODE_TYPE_TEST, tc->index,
                       FIB_WALK_PRIORITY_HIGH, ctx);

    return (FIB_NODE_BACK_WALK_CONTINUE);
}

static void
fib_test_child_last_lock_gone (fib_node_t *node)
{
    fib_node_test_t *tc = (fib_node_test_t *)node;

    tc->destroyed = 1;
}

/**
 * The FIB walk's graph node virtual function table
 */
static const fib_node_vft_t fib_test_child_vft = {
    .fnv_get = fib_test_child_get_node,
    .fnv_last_lock = fib_test_child_last_lock_gone,
    .fnv_back_walk = fib_test_child_back_walk_notify,
};

/*
 * the function (that should have been static but isn't so I can do this)
 * that processes the walk from the async queue,
 */
f64 fib_walk_process_queues(vlib_main_t * vm,
                            const f64 quota);
u32 fib_walk_queue_get_size(fib_walk_priority_t prio);

static int
fib_test_walk (void)
{
    fib_node_back_walk_ctx_t high_ctx = {}, low_ctx = {};
    fib_node_test_t *tc;
    vlib_main_t *vm;
    u32 ii;

    vm = vlib_get_main();
    fib_node_register_type(FIB_NODE_TYPE_TEST, &fib_test_child_vft);

    /*
     * init a fake node on which we will add children
     */
    fib_node_init(&fib_test_nodes[PARENT_INDEX].node,
                  FIB_NODE_TYPE_TEST);

    FOR_EACH_TEST_CHILD(tc)
    {
        fib_node_init(&tc->node, FIB_NODE_TYPE_TEST);
        fib_node_lock(&tc->node);
        tc->ctxs = NULL;
        tc->index = ii;
        tc->sibling = fib_node_child_add(FIB_NODE_TYPE_TEST,
                                         PARENT_INDEX,
                                         FIB_NODE_TYPE_TEST, ii);
    }

    /*
     * enqueue a walk across the parents children.
     */
    high_ctx.fnbw_reason = FIB_NODE_BW_REASON_FLAG_RESOLVE;

    fib_walk_async(FIB_NODE_TYPE_TEST, PARENT_INDEX,
                   FIB_WALK_PRIORITY_HIGH, &high_ctx);
    FIB_TEST(N_TEST_CHILDREN+1 == fib_node_list_get_size(PARENT()->fn_children),
             "Parent has %d children pre-walk",
             fib_node_list_get_size(PARENT()->fn_children));

    /*
     * give the walk a large amount of time so it gets to the end
     */
    fib_walk_process_queues(vm, 1);

    FOR_EACH_TEST_CHILD(tc)
    {
        FIB_TEST(1 == vec_len(tc->ctxs),
                 "%d child visitsed %d times",
                 ii, vec_len(tc->ctxs));
        vec_free(tc->ctxs);
    }
    FIB_TEST(0 == fib_walk_queue_get_size(FIB_WALK_PRIORITY_HIGH),
             "Queue is empty post walk");
    FIB_TEST(N_TEST_CHILDREN == fib_node_list_get_size(PARENT()->fn_children),
             "Parent has %d children post walk",
             fib_node_list_get_size(PARENT()->fn_children));

    /*
     * walk again. should be no increase in the number of visits, since
     * the walk will have terminated.
     */
    fib_walk_process_queues(vm, 1);

    FOR_EACH_TEST_CHILD(tc)
    {
        FIB_TEST(0 == vec_len(tc->ctxs),
                 "%d child visitsed %d times",
                 ii, vec_len(tc->ctxs));
    }

    /*
     * schedule a low and hig priority walk. expect the high to be performed
     * before the low.
     * schedule the high prio walk first so that it is further from the head
     * of the dependency list. that way it won't merge with the low one.
     */
    high_ctx.fnbw_reason = FIB_NODE_BW_REASON_FLAG_RESOLVE;
    low_ctx.fnbw_reason = FIB_NODE_BW_REASON_FLAG_ADJ_UPDATE;

    fib_walk_async(FIB_NODE_TYPE_TEST, PARENT_INDEX,
                   FIB_WALK_PRIORITY_HIGH, &high_ctx);
    fib_walk_async(FIB_NODE_TYPE_TEST, PARENT_INDEX,
                   FIB_WALK_PRIORITY_LOW, &low_ctx);

    fib_walk_process_queues(vm, 1);

    FOR_EACH_TEST_CHILD(tc)
    {
        FIB_TEST(high_ctx.fnbw_reason == tc->ctxs[0].fnbw_reason,
                 "%d child visitsed by high prio walk", ii);
        FIB_TEST(low_ctx.fnbw_reason  == tc->ctxs[1].fnbw_reason,
                 "%d child visitsed by low prio walk", ii);
        vec_free(tc->ctxs);
    }
    FIB_TEST(0 == fib_walk_queue_get_size(FIB_WALK_PRIORITY_HIGH),
             "Queue is empty post prio walk");
    FIB_TEST(N_TEST_CHILDREN == fib_node_list_get_size(PARENT()->fn_children),
             "Parent has %d children post prio walk",
             fib_node_list_get_size(PARENT()->fn_children));

    /*
     * schedule 2 walks of the same priority that can be megred.
     * expect that each child is thus visited only once.
     */
    high_ctx.fnbw_reason = FIB_NODE_BW_REASON_FLAG_RESOLVE;
    low_ctx.fnbw_reason  = FIB_NODE_BW_REASON_FLAG_RESOLVE;

    fib_walk_async(FIB_NODE_TYPE_TEST, PARENT_INDEX,
                   FIB_WALK_PRIORITY_HIGH, &high_ctx);
    fib_walk_async(FIB_NODE_TYPE_TEST, PARENT_INDEX,
                   FIB_WALK_PRIORITY_HIGH, &low_ctx);

    fib_walk_process_queues(vm, 1);

    FOR_EACH_TEST_CHILD(tc)
    {
        FIB_TEST(1 == vec_len(tc->ctxs),
                 "%d child visitsed %d times during merge walk",
                 ii, vec_len(tc->ctxs));
        vec_free(tc->ctxs);
    }
    FIB_TEST(0 == fib_walk_queue_get_size(FIB_WALK_PRIORITY_HIGH),
             "Queue is empty post merge walk");
    FIB_TEST(N_TEST_CHILDREN == fib_node_list_get_size(PARENT()->fn_children),
             "Parent has %d children post merge walk",
             fib_node_list_get_size(PARENT()->fn_children));

    /*
     * schedule 2 walks of the same priority that cannot be megred.
     * expect that each child is thus visited twice and in the order
     * in which the walks were scheduled.
     */
    high_ctx.fnbw_reason = FIB_NODE_BW_REASON_FLAG_RESOLVE;
    low_ctx.fnbw_reason  = FIB_NODE_BW_REASON_FLAG_ADJ_UPDATE;

    fib_walk_async(FIB_NODE_TYPE_TEST, PARENT_INDEX,
                   FIB_WALK_PRIORITY_HIGH, &high_ctx);
    fib_walk_async(FIB_NODE_TYPE_TEST, PARENT_INDEX,
                   FIB_WALK_PRIORITY_HIGH, &low_ctx);

    fib_walk_process_queues(vm, 1);

    FOR_EACH_TEST_CHILD(tc)
    {
        FIB_TEST(high_ctx.fnbw_reason == tc->ctxs[0].fnbw_reason,
                 "%d child visitsed by high prio walk", ii);
        FIB_TEST(low_ctx.fnbw_reason  == tc->ctxs[1].fnbw_reason,
                 "%d child visitsed by low prio walk", ii);
        vec_free(tc->ctxs);
    }
    FIB_TEST(0 == fib_walk_queue_get_size(FIB_WALK_PRIORITY_HIGH),
             "Queue is empty post no-merge walk");
    FIB_TEST(N_TEST_CHILDREN == fib_node_list_get_size(PARENT()->fn_children),
             "Parent has %d children post no-merge walk",
             fib_node_list_get_size(PARENT()->fn_children));

    /*
     * schedule a walk that makes one one child progress.
     * we do this by giving the queue draining process zero
     * time quanta. it's a do..while loop, so it does something.
     */
    high_ctx.fnbw_reason = FIB_NODE_BW_REASON_FLAG_RESOLVE;

    fib_walk_async(FIB_NODE_TYPE_TEST, PARENT_INDEX,
                   FIB_WALK_PRIORITY_HIGH, &high_ctx);
    fib_walk_process_queues(vm, 0);

    FOR_EACH_TEST_CHILD(tc)
    {
        if (ii == N_TEST_CHILDREN)
        {
            FIB_TEST(1 == vec_len(tc->ctxs),
                     "%d child visitsed %d times in zero quanta walk",
                     ii, vec_len(tc->ctxs));
        }
        else
        {
            FIB_TEST(0 == vec_len(tc->ctxs),
                     "%d child visitsed %d times in 0 quanta walk",
                     ii, vec_len(tc->ctxs));
        }
    }
    FIB_TEST(1 == fib_walk_queue_get_size(FIB_WALK_PRIORITY_HIGH),
             "Queue is not empty post zero quanta walk");
    FIB_TEST(N_TEST_CHILDREN+1 == fib_node_list_get_size(PARENT()->fn_children),
             "Parent has %d children post zero qunta walk",
             fib_node_list_get_size(PARENT()->fn_children));

    /*
     * another one step
     */
    fib_walk_process_queues(vm, 0);

    FOR_EACH_TEST_CHILD(tc)
    {
        if (ii >= N_TEST_CHILDREN-1)
        {
            FIB_TEST(1 == vec_len(tc->ctxs),
                     "%d child visitsed %d times in 2nd zero quanta walk",
                     ii, vec_len(tc->ctxs));
        }
        else
        {
            FIB_TEST(0 == vec_len(tc->ctxs),
                     "%d child visitsed %d times in 2nd 0 quanta walk",
                     ii, vec_len(tc->ctxs));
        }
    }
    FIB_TEST(1 == fib_walk_queue_get_size(FIB_WALK_PRIORITY_HIGH),
             "Queue is not empty post zero quanta walk");
    FIB_TEST(N_TEST_CHILDREN+1 == fib_node_list_get_size(PARENT()->fn_children),
             "Parent has %d children post zero qunta walk",
             fib_node_list_get_size(PARENT()->fn_children));

    /*
     * schedule another walk that will catch-up and merge.
     */
    fib_walk_async(FIB_NODE_TYPE_TEST, PARENT_INDEX,
                   FIB_WALK_PRIORITY_HIGH, &high_ctx);
    fib_walk_process_queues(vm, 1);

    FOR_EACH_TEST_CHILD(tc)
    {
        if (ii >= N_TEST_CHILDREN-1)
        {
            FIB_TEST(2 == vec_len(tc->ctxs),
                     "%d child visitsed %d times in 2nd zero quanta merge walk",
                     ii, vec_len(tc->ctxs));
            vec_free(tc->ctxs);
        }
        else
        {
            FIB_TEST(1 == vec_len(tc->ctxs),
                     "%d child visitsed %d times in 2nd 0 quanta merge walk",
                     ii, vec_len(tc->ctxs));
            vec_free(tc->ctxs);
        }
    }
    FIB_TEST(0 == fib_walk_queue_get_size(FIB_WALK_PRIORITY_HIGH),
             "Queue is not empty post 2nd zero quanta merge walk");
    FIB_TEST(N_TEST_CHILDREN == fib_node_list_get_size(PARENT()->fn_children),
             "Parent has %d children post 2nd zero qunta merge walk",
             fib_node_list_get_size(PARENT()->fn_children));

    /*
     * park a async walk in the middle of the list, then have an sync walk catch
     * it. same expectations as async catches async.
     */
    high_ctx.fnbw_reason = FIB_NODE_BW_REASON_FLAG_RESOLVE;

    fib_walk_async(FIB_NODE_TYPE_TEST, PARENT_INDEX,
                   FIB_WALK_PRIORITY_HIGH, &high_ctx);

    fib_walk_process_queues(vm, 0);
    fib_walk_process_queues(vm, 0);

    fib_walk_sync(FIB_NODE_TYPE_TEST, PARENT_INDEX, &high_ctx);

    FOR_EACH_TEST_CHILD(tc)
    {
        if (ii >= N_TEST_CHILDREN-1)
        {
            FIB_TEST(2 == vec_len(tc->ctxs),
                     "%d child visitsed %d times in sync catches async walk",
                     ii, vec_len(tc->ctxs));
            vec_free(tc->ctxs);
        }
        else
        {
            FIB_TEST(1 == vec_len(tc->ctxs),
                     "%d child visitsed %d times in sync catches async walk",
                     ii, vec_len(tc->ctxs));
            vec_free(tc->ctxs);
        }
    }
    FIB_TEST(0 == fib_walk_queue_get_size(FIB_WALK_PRIORITY_HIGH),
             "Queue is not empty post 2nd zero quanta merge walk");
    FIB_TEST(N_TEST_CHILDREN == fib_node_list_get_size(PARENT()->fn_children),
             "Parent has %d children post 2nd zero qunta merge walk",
             fib_node_list_get_size(PARENT()->fn_children));

    /*
     * make the parent a child of one of its children, thus inducing a routing loop.
     */
    fib_test_nodes[PARENT_INDEX].sibling =
        fib_node_child_add(FIB_NODE_TYPE_TEST,
                           1, // the first child
                           FIB_NODE_TYPE_TEST,
                           PARENT_INDEX);

    /*
     * execute a sync walk from the parent. each child visited spawns more sync
     * walks. we expect the walk to terminate.
     */
    fib_test_walk_spawns_walks = 1;

    fib_walk_sync(FIB_NODE_TYPE_TEST, PARENT_INDEX, &high_ctx);

    FOR_EACH_TEST_CHILD(tc)
    {
        /*
         * child 1 - which is last in the list - has the loop.
         * the other children a re thus visitsed first. the we meet
         * child 1. we go round the loop again, visting the other children.
         * then we meet the walk in the dep list and bail. child 1 is not visitsed
         * again.
         */
        if (1 == ii)
        {
            FIB_TEST(1 == vec_len(tc->ctxs),
                     "child %d visitsed %d times during looped sync walk",
                     ii, vec_len(tc->ctxs));
        }
        else
        {
            FIB_TEST(2 == vec_len(tc->ctxs),
                     "child %d visitsed %d times during looped sync walk",
                     ii, vec_len(tc->ctxs));
        }
        vec_free(tc->ctxs);
    }
    FIB_TEST(N_TEST_CHILDREN == fib_node_list_get_size(PARENT()->fn_children),
             "Parent has %d children post sync loop walk",
             fib_node_list_get_size(PARENT()->fn_children));

    /*
     * the walk doesn't reach the max depth because the infra knows that sync
     * meets sync implies a loop and bails early.
     */
    FIB_TEST(high_ctx.fnbw_depth == 9,
             "Walk context depth %d post sync loop walk",
             high_ctx.fnbw_depth);

    /*
     * execute an async walk of the graph loop, with each child spawns sync walks
     */
    high_ctx.fnbw_depth = 0;
    fib_walk_async(FIB_NODE_TYPE_TEST, PARENT_INDEX,
                   FIB_WALK_PRIORITY_HIGH, &high_ctx);

    fib_walk_process_queues(vm, 1);

    FOR_EACH_TEST_CHILD(tc)
    {
        /*
         * we don't really care how many times the children are visisted, as long as
         * it is more than once.
         */
        FIB_TEST(1 <= vec_len(tc->ctxs),
                 "child %d visitsed %d times during looped aync spawns sync walk",
                 ii, vec_len(tc->ctxs));
        vec_free(tc->ctxs);
    }

    /*
     * execute an async walk of the graph loop, with each child spawns async walks
     */
    fib_test_walk_spawns_walks = 2;
    high_ctx.fnbw_depth = 0;
    fib_walk_async(FIB_NODE_TYPE_TEST, PARENT_INDEX,
                   FIB_WALK_PRIORITY_HIGH, &high_ctx);

    fib_walk_process_queues(vm, 1);

    FOR_EACH_TEST_CHILD(tc)
    {
        /*
         * we don't really care how many times the children are visisted, as long as
         * it is more than once.
         */
        FIB_TEST(1 <= vec_len(tc->ctxs),
                 "child %d visitsed %d times during looped async spawns async walk",
                 ii, vec_len(tc->ctxs));
                vec_free(tc->ctxs);
    }


    fib_node_child_remove(FIB_NODE_TYPE_TEST,
                          1, // the first child
                          fib_test_nodes[PARENT_INDEX].sibling);

    /*
     * cleanup
     */
    FOR_EACH_TEST_CHILD(tc)
    {
        fib_node_child_remove(FIB_NODE_TYPE_TEST, PARENT_INDEX,
                              tc->sibling);
        fib_node_deinit(&tc->node);
        fib_node_unlock(&tc->node);
    }
    fib_node_deinit(PARENT());

    /*
     * The parent will be destroyed when the last lock on it goes.
     * this test ensures all the walk objects are unlocking it.
     */
    FIB_TEST((1 == fib_test_nodes[PARENT_INDEX].destroyed),
             "Parent was destroyed");

    return (0);
}

static int
lfib_test (void)
{
    const mpls_label_t deag_label = 50;
    const u32 lfib_index = 0;
    const u32 fib_index = 0;
    dpo_id_t dpo = DPO_INVALID;
    const dpo_id_t *dpo1;
    fib_node_index_t lfe;
    lookup_dpo_t *lkd;
    test_main_t *tm;
    int lb_count;
    adj_index_t ai_mpls_10_10_10_1;

    tm = &test_main;
    lb_count = pool_elts(load_balance_pool);

    FIB_TEST((0 == adj_nbr_db_size()), "ADJ DB size is %d",
	     adj_nbr_db_size());

    /*
     * MPLS enable an interface so we get the MPLS table created
     */
    mpls_sw_interface_enable_disable(&mpls_main,
                                     tm->hw[0]->sw_if_index,
                                     1);

    ip46_address_t nh_10_10_10_1 = {
	.ip4.as_u32 = clib_host_to_net_u32(0x0a0a0a01),
    };
    ai_mpls_10_10_10_1 = adj_nbr_add_or_lock(FIB_PROTOCOL_IP4,
                                             VNET_LINK_MPLS,
                                             &nh_10_10_10_1,
                                             tm->hw[0]->sw_if_index);

    /*
     * Test the specials stack properly.
     */
    fib_prefix_t exp_null_v6_pfx = {
	.fp_proto = FIB_PROTOCOL_MPLS,
	.fp_eos = MPLS_EOS,
	.fp_label = MPLS_IETF_IPV6_EXPLICIT_NULL_LABEL,
	.fp_payload_proto = DPO_PROTO_IP6,
    };
    lfe = fib_table_lookup(lfib_index, &exp_null_v6_pfx);
    FIB_TEST((FIB_NODE_INDEX_INVALID != lfe),
	     "%U/%U present",
	     format_mpls_unicast_label, MPLS_IETF_IPV6_EXPLICIT_NULL_LABEL,
	     format_mpls_eos_bit, MPLS_EOS);
    fib_entry_contribute_forwarding(lfe,
				    FIB_FORW_CHAIN_TYPE_MPLS_EOS,
				    &dpo);
    dpo1 = load_balance_get_bucket(dpo.dpoi_index, 0);
    lkd = lookup_dpo_get(dpo1->dpoi_index);

    FIB_TEST((fib_index == lkd->lkd_fib_index),
              "%U/%U is deag in %d %U",
             format_mpls_unicast_label, deag_label,
             format_mpls_eos_bit, MPLS_EOS,
             lkd->lkd_fib_index,
             format_dpo_id, &dpo, 0);
    FIB_TEST((LOOKUP_INPUT_DST_ADDR == lkd->lkd_input),
             "%U/%U is dst deag",
             format_mpls_unicast_label, deag_label,
             format_mpls_eos_bit, MPLS_EOS);
    FIB_TEST((LOOKUP_TABLE_FROM_INPUT_INTERFACE == lkd->lkd_table),
             "%U/%U is lookup in interface's table",
             format_mpls_unicast_label, deag_label,
             format_mpls_eos_bit, MPLS_EOS);
    FIB_TEST((DPO_PROTO_IP6 == lkd->lkd_proto),
             "%U/%U is %U dst deag",
             format_mpls_unicast_label, deag_label,
             format_mpls_eos_bit, MPLS_EOS,
             format_dpo_proto, lkd->lkd_proto);


    /*
     * A route deag route for EOS
     */
    fib_prefix_t pfx = {
	.fp_proto = FIB_PROTOCOL_MPLS,
	.fp_eos = MPLS_EOS,
	.fp_label = deag_label,
	.fp_payload_proto = DPO_PROTO_IP4,
    };
    lfe = fib_table_entry_path_add(lfib_index,
				   &pfx,
				   FIB_SOURCE_CLI,
				   FIB_ENTRY_FLAG_NONE,
				   FIB_PROTOCOL_IP4,
				   &zero_addr,
				   ~0,
				   fib_index,
				   1,
				   NULL,
				   FIB_ROUTE_PATH_FLAG_NONE);

    FIB_TEST((lfe == fib_table_lookup(lfib_index, &pfx)),
              "%U/%U present",
              format_mpls_unicast_label, deag_label,
              format_mpls_eos_bit, MPLS_EOS);

    fib_entry_contribute_forwarding(lfe,
				    FIB_FORW_CHAIN_TYPE_MPLS_EOS,
				    &dpo);
    dpo1 = load_balance_get_bucket(dpo.dpoi_index, 0);
    lkd = lookup_dpo_get(dpo1->dpoi_index);

    FIB_TEST((fib_index == lkd->lkd_fib_index),
              "%U/%U is deag in %d %U",
             format_mpls_unicast_label, deag_label,
             format_mpls_eos_bit, MPLS_EOS,
             lkd->lkd_fib_index,
             format_dpo_id, &dpo, 0);
    FIB_TEST((LOOKUP_INPUT_DST_ADDR == lkd->lkd_input),
             "%U/%U is dst deag",
             format_mpls_unicast_label, deag_label,
             format_mpls_eos_bit, MPLS_EOS);
    FIB_TEST((DPO_PROTO_IP4 == lkd->lkd_proto),
             "%U/%U is %U dst deag",
             format_mpls_unicast_label, deag_label,
             format_mpls_eos_bit, MPLS_EOS,
             format_dpo_proto, lkd->lkd_proto);

    fib_table_entry_delete_index(lfe, FIB_SOURCE_CLI);

    FIB_TEST((FIB_NODE_INDEX_INVALID == fib_table_lookup(lfib_index,
							 &pfx)),
              "%U/%U not present",
              format_mpls_unicast_label, deag_label,
              format_mpls_eos_bit, MPLS_EOS);

    /*
     * A route deag route for non-EOS
     */
    pfx.fp_eos = MPLS_NON_EOS;
    lfe = fib_table_entry_path_add(lfib_index,
				   &pfx,
				   FIB_SOURCE_CLI,
				   FIB_ENTRY_FLAG_NONE,
				   FIB_PROTOCOL_IP4,
				   &zero_addr,
				   ~0,
				   lfib_index,
				   1,
				   NULL,
				   FIB_ROUTE_PATH_FLAG_NONE);

    FIB_TEST((lfe == fib_table_lookup(lfib_index, &pfx)),
              "%U/%U present",
              format_mpls_unicast_label, deag_label,
              format_mpls_eos_bit, MPLS_NON_EOS);

    fib_entry_contribute_forwarding(lfe,
				    FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS,
				    &dpo);
    dpo1 = load_balance_get_bucket(dpo.dpoi_index, 0);
    lkd = lookup_dpo_get(dpo1->dpoi_index);

    FIB_TEST((fib_index == lkd->lkd_fib_index),
              "%U/%U is deag in %d %U",
             format_mpls_unicast_label, deag_label,
             format_mpls_eos_bit, MPLS_NON_EOS,
             lkd->lkd_fib_index,
             format_dpo_id, &dpo, 0);
    FIB_TEST((LOOKUP_INPUT_DST_ADDR == lkd->lkd_input),
             "%U/%U is dst deag",
             format_mpls_unicast_label, deag_label,
             format_mpls_eos_bit, MPLS_NON_EOS);

    FIB_TEST((DPO_PROTO_MPLS == lkd->lkd_proto),
             "%U/%U is %U dst deag",
             format_mpls_unicast_label, deag_label,
             format_mpls_eos_bit, MPLS_NON_EOS,
             format_dpo_proto, lkd->lkd_proto);

    fib_table_entry_delete_index(lfe, FIB_SOURCE_CLI);

    FIB_TEST((FIB_NODE_INDEX_INVALID == fib_table_lookup(lfib_index,
							 &pfx)),
              "%U/%U not present",
              format_mpls_unicast_label, deag_label,
              format_mpls_eos_bit, MPLS_EOS);

    dpo_reset(&dpo);

    /*
     * An MPLS x-connect
     */
    fib_prefix_t pfx_1200 = {
	.fp_len = 21,
	.fp_proto = FIB_PROTOCOL_MPLS,
	.fp_label = 1200,
	.fp_eos = MPLS_NON_EOS,
    };
    fib_test_lb_bucket_t neos_o_10_10_10_1 = {
	.type = FT_LB_LABEL_STACK_O_ADJ,
	.label_stack_o_adj = {
	    .adj = ai_mpls_10_10_10_1,
	    .label_stack_size = 4,
	    .label_stack = {
		200, 300, 400, 500,
	    },
	    .eos = MPLS_NON_EOS,
	},
    };
    dpo_id_t neos_1200 = DPO_INVALID;
    dpo_id_t ip_1200 = DPO_INVALID;
    mpls_label_t *l200 = NULL;
    vec_add1(l200, 200);
    vec_add1(l200, 300);
    vec_add1(l200, 400);
    vec_add1(l200, 500);

    lfe = fib_table_entry_update_one_path(fib_index,
					  &pfx_1200,
					  FIB_SOURCE_API,
					  FIB_ENTRY_FLAG_NONE,
					  FIB_PROTOCOL_IP4,
					  &nh_10_10_10_1,
					  tm->hw[0]->sw_if_index,
					  ~0, // invalid fib index
					  1,
					  l200,
					  FIB_ROUTE_PATH_FLAG_NONE);

    FIB_TEST(fib_test_validate_entry(lfe,
				     FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS,
				     1,
				     &neos_o_10_10_10_1),
	     "1200/0 LB 1 buckets via: "
	     "adj 10.10.11.1");

    /*
     * A recursive route via the MPLS x-connect
     */
    fib_prefix_t pfx_2_2_2_3_s_32 = {
    	.fp_len = 32,
    	.fp_proto = FIB_PROTOCOL_IP4,
    	.fp_addr = {
    	    .ip4.as_u32 = clib_host_to_net_u32(0x02020203),
    	},
    };
    fib_route_path_t *rpaths = NULL, rpath = {
    	.frp_proto = FIB_PROTOCOL_MPLS,
    	.frp_local_label = 1200,
    	.frp_sw_if_index = ~0, // recurive
    	.frp_fib_index = 0, // Default MPLS fib
    	.frp_weight = 1,
    	.frp_flags = FIB_ROUTE_PATH_FLAG_NONE,
    	.frp_label_stack = NULL,
    };
    vec_add1(rpaths, rpath);

    fib_table_entry_path_add2(fib_index,
    			      &pfx_2_2_2_3_s_32,
    			      FIB_SOURCE_API,
    			      FIB_ENTRY_FLAG_NONE,
    			      rpaths);

    /*
     * A labelled recursive route via the MPLS x-connect
     */
    fib_prefix_t pfx_2_2_2_4_s_32 = {
    	.fp_len = 32,
    	.fp_proto = FIB_PROTOCOL_IP4,
    	.fp_addr = {
    	    .ip4.as_u32 = clib_host_to_net_u32(0x02020204),
    	},
    };
    mpls_label_t *l999 = NULL;
    vec_add1(l999, 999);
    rpaths[0].frp_label_stack = l999,

    fib_table_entry_path_add2(fib_index,
    			      &pfx_2_2_2_4_s_32,
    			      FIB_SOURCE_API,
    			      FIB_ENTRY_FLAG_NONE,
    			      rpaths);

    fib_entry_contribute_forwarding(fib_table_lookup(fib_index, &pfx_1200),
    				    FIB_FORW_CHAIN_TYPE_UNICAST_IP4,
    				    &ip_1200);
    fib_entry_contribute_forwarding(fib_table_lookup(fib_index, &pfx_1200),
    				    FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS,
    				    &neos_1200);

    fib_test_lb_bucket_t ip_o_1200 = {
    	.type = FT_LB_O_LB,
    	.lb = {
    	    .lb = ip_1200.dpoi_index,
    	},
    };
    fib_test_lb_bucket_t mpls_o_1200 = {
	.type = FT_LB_LABEL_O_LB,
	.label_o_lb = {
    	    .lb = neos_1200.dpoi_index,
	    .label = 999,
	    .eos = MPLS_EOS,
	},
    };

    lfe = fib_table_lookup(fib_index, &pfx_2_2_2_3_s_32);
    FIB_TEST(fib_test_validate_entry(lfe,
    				     FIB_FORW_CHAIN_TYPE_UNICAST_IP4,
    				     1,
    				     &ip_o_1200),
    	     "2.2.2.2.3/32 LB 1 buckets via: label 1200 EOS");
    lfe = fib_table_lookup(fib_index, &pfx_2_2_2_4_s_32);
    FIB_TEST(fib_test_validate_entry(lfe,
    				     FIB_FORW_CHAIN_TYPE_UNICAST_IP4,
    				     1,
    				     &mpls_o_1200),
    	     "2.2.2.2.4/32 LB 1 buckets via: label 1200 non-EOS");

    fib_table_entry_delete(fib_index, &pfx_1200, FIB_SOURCE_API);
    fib_table_entry_delete(fib_index, &pfx_2_2_2_3_s_32, FIB_SOURCE_API);
    fib_table_entry_delete(fib_index, &pfx_2_2_2_4_s_32, FIB_SOURCE_API);

    dpo_reset(&neos_1200);
    dpo_reset(&ip_1200);

    /*
     * A recursive via a label that does not exist
     */
    fib_test_lb_bucket_t bucket_drop = {
	.type = FT_LB_SPECIAL,
	.special = {
	    .adj = DPO_PROTO_MPLS,
	},
    };

    rpaths[0].frp_label_stack = NULL;
    lfe = fib_table_entry_path_add2(fib_index,
				    &pfx_2_2_2_4_s_32,
				    FIB_SOURCE_API,
				    FIB_ENTRY_FLAG_NONE,
				    rpaths);

    fib_entry_contribute_forwarding(fib_table_lookup(fib_index, &pfx_1200),
    				    FIB_FORW_CHAIN_TYPE_UNICAST_IP4,
    				    &ip_1200);
    ip_o_1200.lb.lb = ip_1200.dpoi_index;

    FIB_TEST(fib_test_validate_entry(lfe,
    				     FIB_FORW_CHAIN_TYPE_UNICAST_IP4,
    				     1,
    				     &ip_o_1200),
    	     "2.2.2.2.4/32 LB 1 buckets via: label 1200 EOS");
    lfe = fib_table_lookup(fib_index, &pfx_1200);
    FIB_TEST(fib_test_validate_entry(lfe,
				     FIB_FORW_CHAIN_TYPE_UNICAST_IP4,
				     1,
				     &bucket_drop),
	     "2.2.2.4/32 LB 1 buckets via: ip4-DROP");

    fib_table_entry_delete(fib_index, &pfx_2_2_2_4_s_32, FIB_SOURCE_API);

    dpo_reset(&ip_1200);

    /*
     * cleanup
     */
    mpls_sw_interface_enable_disable(&mpls_main,
                                     tm->hw[0]->sw_if_index,
                                     0);

    FIB_TEST(lb_count == pool_elts(load_balance_pool),
	     "Load-balance resources freed %d of %d",
             lb_count, pool_elts(load_balance_pool));

    return (0);
}

static clib_error_t *
fib_test (vlib_main_t * vm, 
	  unformat_input_t * input,
	  vlib_cli_command_t * cmd_arg)
{
    int res;

    res = 0;
    fib_test_mk_intf(4);

    if (unformat (input, "ip"))
    {
	res += fib_test_v4();
	res += fib_test_v6();
    }
    else if (unformat (input, "label"))
    {
	res += fib_test_label();
    }
    else if (unformat (input, "ae"))
    {
	res += fib_test_ae();
    }
    else if (unformat (input, "lfib"))
    {
	res += lfib_test();
    }
    else if (unformat (input, "walk"))
    {
	res += fib_test_walk();
    }
    else
    {
        /*
         * These walk UT aren't run as part of the full suite, since the
         * fib-walk process must be disabled in order for the tests to work
         *
         * fib_test_walk();
         */
	res += fib_test_v4();
	res += fib_test_v6();
	res += fib_test_ae();
	res += fib_test_label();
	res += lfib_test();
    }

    if (res)
    {
        return clib_error_return(0, "FIB Unit Test Failed");
    }
    else
    {
        return (NULL);
    }
}

VLIB_CLI_COMMAND (test_fib_command, static) = {
    .path = "test fib",
    .short_help = "fib unit tests - DO NOT RUN ON A LIVE SYSTEM",
    .function = fib_test,
};

clib_error_t *
fib_test_init (vlib_main_t *vm)
{
    return 0;
}

VLIB_INIT_FUNCTION (fib_test_init);