aboutsummaryrefslogtreecommitdiffstats
path: root/src/vnet
diff options
context:
space:
mode:
authorDamjan Marion <damarion@cisco.com>2016-12-19 23:05:39 +0100
committerDamjan Marion <damarion@cisco.com>2016-12-28 12:25:14 +0100
commit7cd468a3d7dee7d6c92f69a0bb7061ae208ec727 (patch)
tree5de62f8dbd3a752f5a676ca600e43d2652d1ff1a /src/vnet
parent696f1adec0df3b8f161862566dd9c86174302658 (diff)
Reorganize source tree to use single autotools instance
Change-Id: I7b51f88292e057c6443b12224486f2d0c9f8ae23 Signed-off-by: Damjan Marion <damarion@cisco.com>
Diffstat (limited to 'src/vnet')
-rw-r--r--src/vnet/adj/adj.c454
-rw-r--r--src/vnet/adj/adj.h122
-rw-r--r--src/vnet/adj/adj_glean.c285
-rw-r--r--src/vnet/adj/adj_glean.h61
-rw-r--r--src/vnet/adj/adj_internal.h104
-rw-r--r--src/vnet/adj/adj_l2.c194
-rw-r--r--src/vnet/adj/adj_l2.h24
-rw-r--r--src/vnet/adj/adj_midchain.c559
-rw-r--r--src/vnet/adj/adj_midchain.h102
-rw-r--r--src/vnet/adj/adj_nbr.c1087
-rw-r--r--src/vnet/adj/adj_nbr.h176
-rw-r--r--src/vnet/adj/adj_rewrite.c53
-rw-r--r--src/vnet/adj/adj_rewrite.h49
-rw-r--r--src/vnet/adj/adj_types.h53
-rw-r--r--src/vnet/api_errno.h113
-rw-r--r--src/vnet/bfd/bfd.api205
-rw-r--r--src/vnet/bfd/bfd_api.c262
-rw-r--r--src/vnet/bfd/bfd_api.h46
-rw-r--r--src/vnet/bfd/bfd_debug.h79
-rw-r--r--src/vnet/bfd/bfd_doc.md1
-rw-r--r--src/vnet/bfd/bfd_main.c969
-rw-r--r--src/vnet/bfd/bfd_main.h220
-rw-r--r--src/vnet/bfd/bfd_protocol.c74
-rw-r--r--src/vnet/bfd/bfd_protocol.h154
-rw-r--r--src/vnet/bfd/bfd_udp.c639
-rw-r--r--src/vnet/bfd/bfd_udp.h56
-rw-r--r--src/vnet/bfd/dir.dox18
-rw-r--r--src/vnet/buffer.h381
-rw-r--r--src/vnet/cdp/cdp.pg7
-rw-r--r--src/vnet/cdp/cdp_input.c506
-rw-r--r--src/vnet/cdp/cdp_node.c208
-rw-r--r--src/vnet/cdp/cdp_node.h147
-rw-r--r--src/vnet/cdp/cdp_periodic.c512
-rw-r--r--src/vnet/cdp/cdp_protocol.h186
-rw-r--r--src/vnet/classify/README180
-rw-r--r--src/vnet/classify/flow_classify.c212
-rw-r--r--src/vnet/classify/flow_classify.h51
-rw-r--r--src/vnet/classify/flow_classify_node.c338
-rw-r--r--src/vnet/classify/input_acl.c283
-rw-r--r--src/vnet/classify/input_acl.h54
-rw-r--r--src/vnet/classify/ip_classify.c365
-rw-r--r--src/vnet/classify/policer_classify.c227
-rw-r--r--src/vnet/classify/policer_classify.h55
-rw-r--r--src/vnet/classify/vnet_classify.c2436
-rw-r--r--src/vnet/classify/vnet_classify.h523
-rw-r--r--src/vnet/config.c361
-rw-r--r--src/vnet/config.h176
-rw-r--r--src/vnet/cop/cop.c387
-rw-r--r--src/vnet/cop/cop.h89
-rw-r--r--src/vnet/cop/ip4_whitelist.c356
-rw-r--r--src/vnet/cop/ip6_whitelist.c298
-rw-r--r--src/vnet/cop/node1.c319
-rw-r--r--src/vnet/devices/af_packet/af_packet.api71
-rw-r--r--src/vnet/devices/af_packet/af_packet.c366
-rw-r--r--src/vnet/devices/af_packet/af_packet.h69
-rw-r--r--src/vnet/devices/af_packet/af_packet_api.c143
-rw-r--r--src/vnet/devices/af_packet/cli.c144
-rw-r--r--src/vnet/devices/af_packet/device.c250
-rw-r--r--src/vnet/devices/af_packet/node.c288
-rw-r--r--src/vnet/devices/devices.c91
-rw-r--r--src/vnet/devices/devices.h53
-rw-r--r--src/vnet/devices/dpdk/cli.c1296
-rw-r--r--src/vnet/devices/dpdk/device.c840
-rw-r--r--src/vnet/devices/dpdk/dpdk.h534
-rw-r--r--src/vnet/devices/dpdk/dpdk_priv.h132
-rw-r--r--src/vnet/devices/dpdk/format.c763
-rw-r--r--src/vnet/devices/dpdk/hqos.c775
-rwxr-xr-xsrc/vnet/devices/dpdk/init.c1803
-rw-r--r--src/vnet/devices/dpdk/ipsec/cli.c141
-rw-r--r--src/vnet/devices/dpdk/ipsec/crypto_node.c210
-rw-r--r--src/vnet/devices/dpdk/ipsec/dir.dox18
-rw-r--r--src/vnet/devices/dpdk/ipsec/dpdk_crypto_ipsec_doc.md73
-rw-r--r--src/vnet/devices/dpdk/ipsec/esp.h295
-rw-r--r--src/vnet/devices/dpdk/ipsec/esp_decrypt.c583
-rw-r--r--src/vnet/devices/dpdk/ipsec/esp_encrypt.c598
-rw-r--r--src/vnet/devices/dpdk/ipsec/ipsec.c313
-rw-r--r--src/vnet/devices/dpdk/ipsec/ipsec.h227
-rw-r--r--src/vnet/devices/dpdk/node.c687
-rw-r--r--src/vnet/devices/dpdk/qos_doc.md404
-rw-r--r--src/vnet/devices/netmap/cli.c146
-rw-r--r--src/vnet/devices/netmap/device.c261
-rw-r--r--src/vnet/devices/netmap/net_netmap.h650
-rw-r--r--src/vnet/devices/netmap/netmap.api74
-rw-r--r--src/vnet/devices/netmap/netmap.c316
-rw-r--r--src/vnet/devices/netmap/netmap.h164
-rw-r--r--src/vnet/devices/netmap/netmap_api.c137
-rw-r--r--src/vnet/devices/netmap/node.c300
-rw-r--r--src/vnet/devices/nic/ixge.c2938
-rw-r--r--src/vnet/devices/nic/ixge.h1293
-rw-r--r--src/vnet/devices/nic/sfp.c117
-rw-r--r--src/vnet/devices/nic/sfp.h117
-rw-r--r--src/vnet/devices/ssvm/node.c343
-rw-r--r--src/vnet/devices/ssvm/ssvm_eth.c491
-rw-r--r--src/vnet/devices/ssvm/ssvm_eth.h141
-rw-r--r--src/vnet/devices/virtio/dir.dox27
-rw-r--r--src/vnet/devices/virtio/vhost-user.c3314
-rw-r--r--src/vnet/devices/virtio/vhost-user.h350
-rw-r--r--src/vnet/devices/virtio/vhost_user.api125
-rw-r--r--src/vnet/devices/virtio/vhost_user_api.c262
-rw-r--r--src/vnet/dhcp/client.c1031
-rw-r--r--src/vnet/dhcp/client.h118
-rw-r--r--src/vnet/dhcp/packet.h61
-rw-r--r--src/vnet/dhcp/proxy.h92
-rw-r--r--src/vnet/dhcp/proxy_error.def30
-rw-r--r--src/vnet/dhcp/proxy_node.c1114
-rw-r--r--src/vnet/dhcpv6/packet.h183
-rw-r--r--src/vnet/dhcpv6/proxy.h95
-rw-r--r--src/vnet/dhcpv6/proxy_error.def29
-rw-r--r--src/vnet/dhcpv6/proxy_node.c1191
-rw-r--r--src/vnet/dpo/classify_dpo.c131
-rw-r--r--src/vnet/dpo/classify_dpo.h56
-rw-r--r--src/vnet/dpo/dpo.c500
-rw-r--r--src/vnet/dpo/dpo.h381
-rw-r--r--src/vnet/dpo/drop_dpo.c106
-rw-r--r--src/vnet/dpo/drop_dpo.h31
-rw-r--r--src/vnet/dpo/ip_null_dpo.c408
-rw-r--r--src/vnet/dpo/ip_null_dpo.h56
-rw-r--r--src/vnet/dpo/load_balance.c993
-rw-r--r--src/vnet/dpo/load_balance.h211
-rw-r--r--src/vnet/dpo/load_balance_map.c575
-rw-r--r--src/vnet/dpo/load_balance_map.h79
-rw-r--r--src/vnet/dpo/lookup_dpo.c1185
-rw-r--r--src/vnet/dpo/lookup_dpo.h108
-rw-r--r--src/vnet/dpo/mpls_label_dpo.c570
-rw-r--r--src/vnet/dpo/mpls_label_dpo.h101
-rw-r--r--src/vnet/dpo/punt_dpo.c100
-rw-r--r--src/vnet/dpo/punt_dpo.h30
-rw-r--r--src/vnet/dpo/receive_dpo.c165
-rw-r--r--src/vnet/dpo/receive_dpo.h62
-rw-r--r--src/vnet/ethernet/arp.c2355
-rw-r--r--src/vnet/ethernet/arp_packet.h173
-rw-r--r--src/vnet/ethernet/dir.dox24
-rw-r--r--src/vnet/ethernet/error.def46
-rw-r--r--src/vnet/ethernet/ethernet.h561
-rw-r--r--src/vnet/ethernet/format.c366
-rw-r--r--src/vnet/ethernet/init.c128
-rw-r--r--src/vnet/ethernet/interface.c730
-rw-r--r--src/vnet/ethernet/mac_swap.c397
-rwxr-xr-xsrc/vnet/ethernet/node.c1368
-rw-r--r--src/vnet/ethernet/packet.h152
-rw-r--r--src/vnet/ethernet/pg.c183
-rw-r--r--src/vnet/ethernet/types.def113
-rw-r--r--src/vnet/feature/feature.c463
-rw-r--r--src/vnet/feature/feature.h382
-rw-r--r--src/vnet/feature/registration.c301
-rw-r--r--src/vnet/fib/fib.c41
-rw-r--r--src/vnet/fib/fib.h652
-rw-r--r--src/vnet/fib/fib_api.h54
-rw-r--r--src/vnet/fib/fib_attached_export.c572
-rw-r--r--src/vnet/fib/fib_attached_export.h57
-rw-r--r--src/vnet/fib/fib_entry.c1503
-rw-r--r--src/vnet/fib/fib_entry.h530
-rw-r--r--src/vnet/fib/fib_entry_cover.c225
-rw-r--r--src/vnet/fib/fib_entry_cover.h47
-rw-r--r--src/vnet/fib/fib_entry_delegate.c149
-rw-r--r--src/vnet/fib/fib_entry_delegate.h124
-rw-r--r--src/vnet/fib/fib_entry_src.c1456
-rw-r--r--src/vnet/fib/fib_entry_src.h296
-rw-r--r--src/vnet/fib/fib_entry_src_adj.c207
-rw-r--r--src/vnet/fib/fib_entry_src_api.c119
-rw-r--r--src/vnet/fib/fib_entry_src_default.c121
-rw-r--r--src/vnet/fib/fib_entry_src_default_route.c58
-rw-r--r--src/vnet/fib/fib_entry_src_interface.c195
-rw-r--r--src/vnet/fib/fib_entry_src_lisp.c133
-rw-r--r--src/vnet/fib/fib_entry_src_mpls.c196
-rw-r--r--src/vnet/fib/fib_entry_src_rr.c293
-rw-r--r--src/vnet/fib/fib_entry_src_special.c71
-rw-r--r--src/vnet/fib/fib_internal.h69
-rw-r--r--src/vnet/fib/fib_node.c277
-rw-r--r--src/vnet/fib/fib_node.h371
-rw-r--r--src/vnet/fib/fib_node_list.c390
-rw-r--r--src/vnet/fib/fib_node_list.h64
-rw-r--r--src/vnet/fib/fib_path.c2001
-rw-r--r--src/vnet/fib/fib_path.h158
-rw-r--r--src/vnet/fib/fib_path_ext.c231
-rw-r--r--src/vnet/fib/fib_path_ext.h69
-rw-r--r--src/vnet/fib/fib_path_list.c1223
-rw-r--r--src/vnet/fib/fib_path_list.h158
-rw-r--r--src/vnet/fib/fib_table.c1104
-rw-r--r--src/vnet/fib/fib_table.h732
-rw-r--r--src/vnet/fib/fib_test.c7112
-rw-r--r--src/vnet/fib/fib_types.c326
-rw-r--r--src/vnet/fib/fib_types.h340
-rw-r--r--src/vnet/fib/fib_urpf_list.c260
-rw-r--r--src/vnet/fib/fib_urpf_list.h146
-rw-r--r--src/vnet/fib/fib_walk.c1108
-rw-r--r--src/vnet/fib/fib_walk.h58
-rw-r--r--src/vnet/fib/ip4_fib.c664
-rw-r--r--src/vnet/fib/ip4_fib.h141
-rw-r--r--src/vnet/fib/ip6_fib.c784
-rw-r--r--src/vnet/fib/ip6_fib.h130
-rw-r--r--src/vnet/fib/mpls_fib.c439
-rw-r--r--src/vnet/fib/mpls_fib.h106
-rw-r--r--src/vnet/flow/flow_report.c502
-rw-r--r--src/vnet/flow/flow_report.h145
-rw-r--r--src/vnet/flow/flow_report_classify.c529
-rw-r--r--src/vnet/flow/flow_report_classify.h122
-rw-r--r--src/vnet/flow/ipfix_info_elements.h429
-rw-r--r--src/vnet/flow/ipfix_packet.h188
-rw-r--r--src/vnet/global_funcs.h32
-rw-r--r--src/vnet/gre/error.def23
-rw-r--r--src/vnet/gre/gre.api57
-rw-r--r--src/vnet/gre/gre.c455
-rw-r--r--src/vnet/gre/gre.h235
-rw-r--r--src/vnet/gre/gre_api.c204
-rw-r--r--src/vnet/gre/interface.c606
-rw-r--r--src/vnet/gre/node.c531
-rw-r--r--src/vnet/gre/packet.h55
-rw-r--r--src/vnet/gre/pg.c77
-rw-r--r--src/vnet/handoff.c594
-rw-r--r--src/vnet/handoff.h259
-rw-r--r--src/vnet/hdlc/error.def42
-rw-r--r--src/vnet/hdlc/hdlc.c249
-rw-r--r--src/vnet/hdlc/hdlc.h127
-rw-r--r--src/vnet/hdlc/node.c351
-rw-r--r--src/vnet/hdlc/packet.h72
-rw-r--r--src/vnet/hdlc/pg.c105
-rw-r--r--src/vnet/interface.api339
-rw-r--r--src/vnet/interface.c1398
-rw-r--r--src/vnet/interface.h658
-rw-r--r--src/vnet/interface_api.c725
-rw-r--r--src/vnet/interface_cli.c1165
-rw-r--r--src/vnet/interface_format.c401
-rw-r--r--src/vnet/interface_funcs.h318
-rw-r--r--src/vnet/interface_output.c1404
-rw-r--r--src/vnet/ip/dir.dox26
-rw-r--r--src/vnet/ip/format.c121
-rw-r--r--src/vnet/ip/format.h114
-rw-r--r--src/vnet/ip/icmp4.c784
-rw-r--r--src/vnet/ip/icmp4.h60
-rw-r--r--src/vnet/ip/icmp46_packet.h398
-rw-r--r--src/vnet/ip/icmp6.c882
-rw-r--r--src/vnet/ip/icmp6.h86
-rw-r--r--src/vnet/ip/igmp_packet.h155
-rw-r--r--src/vnet/ip/ip.api434
-rw-r--r--src/vnet/ip/ip.h195
-rw-r--r--src/vnet/ip/ip4.h322
-rw-r--r--src/vnet/ip/ip46_cli.c236
-rw-r--r--src/vnet/ip/ip4_error.h95
-rw-r--r--src/vnet/ip/ip4_format.c256
-rw-r--r--src/vnet/ip/ip4_forward.c3345
-rw-r--r--src/vnet/ip/ip4_input.c507
-rw-r--r--src/vnet/ip/ip4_mtrie.c568
-rw-r--r--src/vnet/ip/ip4_mtrie.h188
-rw-r--r--src/vnet/ip/ip4_packet.h384
-rw-r--r--src/vnet/ip/ip4_pg.c387
-rw-r--r--src/vnet/ip/ip4_source_and_port_range_check.c1415
-rw-r--r--src/vnet/ip/ip4_source_check.c573
-rw-r--r--src/vnet/ip/ip4_test.c340
-rw-r--r--src/vnet/ip/ip6.h476
-rw-r--r--src/vnet/ip/ip6_error.h92
-rw-r--r--src/vnet/ip/ip6_format.c383
-rw-r--r--src/vnet/ip/ip6_forward.c3402
-rw-r--r--src/vnet/ip/ip6_hop_by_hop.c1194
-rw-r--r--src/vnet/ip/ip6_hop_by_hop.h217
-rw-r--r--src/vnet/ip/ip6_hop_by_hop_packet.h66
-rw-r--r--src/vnet/ip/ip6_input.c353
-rw-r--r--src/vnet/ip/ip6_neighbor.c4088
-rw-r--r--src/vnet/ip/ip6_neighbor.h52
-rw-r--r--src/vnet/ip/ip6_packet.h499
-rw-r--r--src/vnet/ip/ip6_pg.c231
-rw-r--r--src/vnet/ip/ip_api.c1196
-rw-r--r--src/vnet/ip/ip_checksum.c228
-rw-r--r--src/vnet/ip/ip_frag.c581
-rw-r--r--src/vnet/ip/ip_frag.h96
-rw-r--r--src/vnet/ip/ip_init.c152
-rw-r--r--src/vnet/ip/ip_input_acl.c450
-rw-r--r--src/vnet/ip/ip_packet.h180
-rw-r--r--src/vnet/ip/ip_source_and_port_range_check.h148
-rw-r--r--src/vnet/ip/lookup.c967
-rw-r--r--src/vnet/ip/lookup.h498
-rw-r--r--src/vnet/ip/ping.c888
-rw-r--r--src/vnet/ip/ping.h108
-rw-r--r--src/vnet/ip/ports.def757
-rw-r--r--src/vnet/ip/protocols.def162
-rw-r--r--src/vnet/ip/punt.c323
-rw-r--r--src/vnet/ip/punt.h43
-rw-r--r--src/vnet/ip/punt_error.def19
-rw-r--r--src/vnet/ip/tcp_packet.h138
-rw-r--r--src/vnet/ip/udp.h313
-rw-r--r--src/vnet/ip/udp_error.def21
-rw-r--r--src/vnet/ip/udp_format.c91
-rw-r--r--src/vnet/ip/udp_init.c71
-rw-r--r--src/vnet/ip/udp_local.c645
-rw-r--r--src/vnet/ip/udp_packet.h65
-rw-r--r--src/vnet/ip/udp_pg.c237
-rw-r--r--src/vnet/ipsec-gre/dir.dox18
-rw-r--r--src/vnet/ipsec-gre/error.def26
-rw-r--r--src/vnet/ipsec-gre/interface.c311
-rw-r--r--src/vnet/ipsec-gre/ipsec_gre.api79
-rw-r--r--src/vnet/ipsec-gre/ipsec_gre.c407
-rw-r--r--src/vnet/ipsec-gre/ipsec_gre.h114
-rw-r--r--src/vnet/ipsec-gre/ipsec_gre_api.c190
-rw-r--r--src/vnet/ipsec-gre/ipsec_gre_doc.md74
-rw-r--r--src/vnet/ipsec-gre/node.c433
-rw-r--r--src/vnet/ipsec/esp.h320
-rw-r--r--src/vnet/ipsec/esp_decrypt.c430
-rw-r--r--src/vnet/ipsec/esp_encrypt.c425
-rw-r--r--src/vnet/ipsec/ikev2.c2186
-rw-r--r--src/vnet/ipsec/ikev2.h410
-rw-r--r--src/vnet/ipsec/ikev2_cli.c479
-rw-r--r--src/vnet/ipsec/ikev2_crypto.c765
-rw-r--r--src/vnet/ipsec/ikev2_format.c155
-rw-r--r--src/vnet/ipsec/ikev2_payload.c535
-rw-r--r--src/vnet/ipsec/ikev2_priv.h321
-rw-r--r--src/vnet/ipsec/ipsec.api457
-rw-r--r--src/vnet/ipsec/ipsec.c581
-rw-r--r--src/vnet/ipsec/ipsec.h344
-rw-r--r--src/vnet/ipsec/ipsec_api.c537
-rw-r--r--src/vnet/ipsec/ipsec_cli.c807
-rw-r--r--src/vnet/ipsec/ipsec_format.c141
-rw-r--r--src/vnet/ipsec/ipsec_if.c372
-rw-r--r--src/vnet/ipsec/ipsec_if_in.c175
-rw-r--r--src/vnet/ipsec/ipsec_if_out.c161
-rw-r--r--src/vnet/ipsec/ipsec_input.c455
-rw-r--r--src/vnet/ipsec/ipsec_output.c478
-rw-r--r--src/vnet/l2/dir.dox24
-rw-r--r--src/vnet/l2/feat_bitmap.c185
-rw-r--r--src/vnet/l2/feat_bitmap.h96
-rw-r--r--src/vnet/l2/l2.api38
-rw-r--r--src/vnet/l2/l2_api.c140
-rw-r--r--src/vnet/l2/l2_bd.c1079
-rw-r--r--src/vnet/l2/l2_bd.h150
-rw-r--r--src/vnet/l2/l2_bvi.c40
-rw-r--r--src/vnet/l2/l2_bvi.h117
-rw-r--r--src/vnet/l2/l2_classify.h116
-rw-r--r--src/vnet/l2/l2_efp_filter.c614
-rw-r--r--src/vnet/l2/l2_efp_filter.h33
-rw-r--r--src/vnet/l2/l2_fib.c857
-rw-r--r--src/vnet/l2/l2_fib.h341
-rw-r--r--src/vnet/l2/l2_flood.c568
-rw-r--r--src/vnet/l2/l2_flood.h35
-rw-r--r--src/vnet/l2/l2_fwd.c544
-rw-r--r--src/vnet/l2/l2_fwd.h36
-rw-r--r--src/vnet/l2/l2_input.c1116
-rw-r--r--src/vnet/l2/l2_input.h266
-rw-r--r--src/vnet/l2/l2_input_acl.c434
-rw-r--r--src/vnet/l2/l2_input_classify.c655
-rw-r--r--src/vnet/l2/l2_input_vtr.c401
-rw-r--r--src/vnet/l2/l2_input_vtr.h54
-rw-r--r--src/vnet/l2/l2_learn.c597
-rw-r--r--src/vnet/l2/l2_learn.h64
-rw-r--r--src/vnet/l2/l2_output.c708
-rw-r--r--src/vnet/l2/l2_output.h285
-rw-r--r--src/vnet/l2/l2_output_acl.c358
-rw-r--r--src/vnet/l2/l2_output_classify.c657
-rw-r--r--src/vnet/l2/l2_patch.c452
-rw-r--r--src/vnet/l2/l2_rw.c719
-rw-r--r--src/vnet/l2/l2_rw.h95
-rw-r--r--src/vnet/l2/l2_vtr.c770
-rw-r--r--src/vnet/l2/l2_vtr.h270
-rw-r--r--src/vnet/l2/l2_xcrw.c591
-rw-r--r--src/vnet/l2/l2_xcrw.h91
-rw-r--r--src/vnet/l2tp/decap.c309
-rw-r--r--src/vnet/l2tp/encap.c238
-rw-r--r--src/vnet/l2tp/l2tp.api126
-rw-r--r--src/vnet/l2tp/l2tp.c739
-rw-r--r--src/vnet/l2tp/l2tp.h147
-rw-r--r--src/vnet/l2tp/l2tp_api.c267
-rw-r--r--src/vnet/l2tp/packet.h44
-rw-r--r--src/vnet/l2tp/pg.c106
-rw-r--r--src/vnet/l3_types.h59
-rw-r--r--src/vnet/lawful-intercept/lawful_intercept.c112
-rw-r--r--src/vnet/lawful-intercept/lawful_intercept.h45
-rw-r--r--src/vnet/lawful-intercept/node.c275
-rw-r--r--src/vnet/lisp-cp/control.c4950
-rw-r--r--src/vnet/lisp-cp/control.h314
-rw-r--r--src/vnet/lisp-cp/gid_dictionary.c865
-rw-r--r--src/vnet/lisp-cp/gid_dictionary.h120
-rw-r--r--src/vnet/lisp-cp/lisp.api835
-rw-r--r--src/vnet/lisp-cp/lisp_api.c1257
-rw-r--r--src/vnet/lisp-cp/lisp_cp_dpo.c117
-rw-r--r--src/vnet/lisp-cp/lisp_cp_dpo.h45
-rw-r--r--src/vnet/lisp-cp/lisp_cp_messages.h613
-rw-r--r--src/vnet/lisp-cp/lisp_msg_serdes.c372
-rw-r--r--src/vnet/lisp-cp/lisp_msg_serdes.h58
-rw-r--r--src/vnet/lisp-cp/lisp_types.c1574
-rw-r--r--src/vnet/lisp-cp/lisp_types.h354
-rw-r--r--src/vnet/lisp-cp/packets.c269
-rw-r--r--src/vnet/lisp-cp/packets.h82
-rw-r--r--src/vnet/lisp-gpe/decap.c501
-rw-r--r--src/vnet/lisp-gpe/dir.dox26
-rw-r--r--src/vnet/lisp-gpe/interface.c709
-rw-r--r--src/vnet/lisp-gpe/lisp_gpe.api143
-rw-r--r--src/vnet/lisp-gpe/lisp_gpe.c327
-rw-r--r--src/vnet/lisp-gpe/lisp_gpe.h257
-rw-r--r--src/vnet/lisp-gpe/lisp_gpe_adjacency.c542
-rw-r--r--src/vnet/lisp-gpe/lisp_gpe_adjacency.h136
-rw-r--r--src/vnet/lisp-gpe/lisp_gpe_api.c304
-rw-r--r--src/vnet/lisp-gpe/lisp_gpe_error.def18
-rw-r--r--src/vnet/lisp-gpe/lisp_gpe_fwd_entry.c1053
-rw-r--r--src/vnet/lisp-gpe/lisp_gpe_fwd_entry.h188
-rw-r--r--src/vnet/lisp-gpe/lisp_gpe_packet.h149
-rw-r--r--src/vnet/lisp-gpe/lisp_gpe_sub_interface.c278
-rw-r--r--src/vnet/lisp-gpe/lisp_gpe_sub_interface.h157
-rw-r--r--src/vnet/lisp-gpe/lisp_gpe_tenant.c330
-rw-r--r--src/vnet/lisp-gpe/lisp_gpe_tenant.h88
-rw-r--r--src/vnet/lisp-gpe/lisp_gpe_tunnel.c289
-rw-r--r--src/vnet/lisp-gpe/lisp_gpe_tunnel.h89
-rw-r--r--src/vnet/lisp-gpe/rfc.txt826
-rw-r--r--src/vnet/llc/llc.c241
-rw-r--r--src/vnet/llc/llc.h194
-rw-r--r--src/vnet/llc/node.c331
-rw-r--r--src/vnet/llc/pg.c113
-rw-r--r--src/vnet/lldp/dir.dox18
-rw-r--r--src/vnet/lldp/lldp_cli.c646
-rw-r--r--src/vnet/lldp/lldp_doc.md84
-rw-r--r--src/vnet/lldp/lldp_input.c302
-rw-r--r--src/vnet/lldp/lldp_node.c341
-rw-r--r--src/vnet/lldp/lldp_node.h145
-rw-r--r--src/vnet/lldp/lldp_output.c216
-rw-r--r--src/vnet/lldp/lldp_protocol.h142
-rwxr-xr-xsrc/vnet/map/examples/gen-rules.py186
-rw-r--r--src/vnet/map/examples/health_check.c109
-rwxr-xr-xsrc/vnet/map/examples/test_map.py141
-rwxr-xr-xsrc/vnet/map/gen-rules.py107
-rw-r--r--src/vnet/map/ip4_map.c813
-rw-r--r--src/vnet/map/ip4_map_t.c1363
-rw-r--r--src/vnet/map/ip6_map.c1269
-rw-r--r--src/vnet/map/ip6_map_t.c1517
-rw-r--r--src/vnet/map/map.api178
-rw-r--r--src/vnet/map/map.c2166
-rw-r--r--src/vnet/map/map.h591
-rw-r--r--src/vnet/map/map_api.c295
-rw-r--r--src/vnet/map/map_doc.md69
-rw-r--r--src/vnet/map/map_dpo.c191
-rw-r--r--src/vnet/map/map_dpo.h67
-rw-r--r--src/vnet/map/test.c205
-rw-r--r--src/vnet/mcast/mcast.c565
-rw-r--r--src/vnet/mcast/mcast.h50
-rw-r--r--src/vnet/mcast/mcast_test.c149
-rw-r--r--src/vnet/misc.c124
-rw-r--r--src/vnet/mpls/error.def31
-rw-r--r--src/vnet/mpls/interface.c121
-rw-r--r--src/vnet/mpls/mpls.c511
-rw-r--r--src/vnet/mpls/mpls.h172
-rw-r--r--src/vnet/mpls/mpls_features.c156
-rw-r--r--src/vnet/mpls/mpls_lookup.c531
-rw-r--r--src/vnet/mpls/mpls_output.c479
-rw-r--r--src/vnet/mpls/mpls_tunnel.c787
-rw-r--r--src/vnet/mpls/mpls_tunnel.h98
-rw-r--r--src/vnet/mpls/mpls_types.h39
-rw-r--r--src/vnet/mpls/node.c303
-rw-r--r--src/vnet/mpls/packet.h125
-rw-r--r--src/vnet/mpls/pg.c71
-rw-r--r--src/vnet/osi/node.c326
-rw-r--r--src/vnet/osi/osi.c201
-rw-r--r--src/vnet/osi/osi.h171
-rw-r--r--src/vnet/osi/pg.c106
-rw-r--r--src/vnet/pg/cli.c636
-rw-r--r--src/vnet/pg/edit.c186
-rw-r--r--src/vnet/pg/edit.h210
-rw-r--r--src/vnet/pg/example.script6
-rw-r--r--src/vnet/pg/init.c72
-rw-r--r--src/vnet/pg/input.c1667
-rw-r--r--src/vnet/pg/output.c85
-rw-r--r--src/vnet/pg/pg.h383
-rw-r--r--src/vnet/pg/stream.c497
-rw-r--r--src/vnet/pipeline.h456
-rw-r--r--src/vnet/plugin/p1.c52
-rw-r--r--src/vnet/plugin/plugin.h32
-rw-r--r--src/vnet/policer/node_funcs.c938
-rw-r--r--src/vnet/policer/police.h214
-rw-r--r--src/vnet/policer/policer.c528
-rw-r--r--src/vnet/policer/policer.h107
-rw-r--r--src/vnet/policer/xlate.c1505
-rw-r--r--src/vnet/policer/xlate.h186
-rw-r--r--src/vnet/ppp/error.def42
-rw-r--r--src/vnet/ppp/node.c368
-rw-r--r--src/vnet/ppp/packet.h199
-rw-r--r--src/vnet/ppp/pg.c114
-rw-r--r--src/vnet/ppp/ppp.c261
-rw-r--r--src/vnet/ppp/ppp.h135
-rw-r--r--src/vnet/replication.c293
-rw-r--r--src/vnet/replication.h136
-rw-r--r--src/vnet/rewrite.c329
-rw-r--r--src/vnet/rewrite.h305
-rw-r--r--src/vnet/snap/node.c353
-rw-r--r--src/vnet/snap/pg.c116
-rw-r--r--src/vnet/snap/snap.c204
-rw-r--r--src/vnet/snap/snap.h209
-rw-r--r--src/vnet/span/node.c286
-rw-r--r--src/vnet/span/span.api60
-rw-r--r--src/vnet/span/span.c197
-rw-r--r--src/vnet/span/span.h62
-rw-r--r--src/vnet/span/span.md65
-rw-r--r--src/vnet/span/span_api.c153
-rw-r--r--src/vnet/sr/dir.dox25
-rw-r--r--src/vnet/sr/examples/sr_multicastmap.script4
-rw-r--r--src/vnet/sr/rfc_draft_05.txt1265
-rw-r--r--src/vnet/sr/sr.c3333
-rw-r--r--src/vnet/sr/sr.h262
-rw-r--r--src/vnet/sr/sr_error.def20
-rw-r--r--src/vnet/sr/sr_fix_dst_error.def17
-rw-r--r--src/vnet/sr/sr_packet.h251
-rw-r--r--src/vnet/sr/sr_replicate.c490
-rw-r--r--src/vnet/srp/format.c147
-rw-r--r--src/vnet/srp/interface.c458
-rw-r--r--src/vnet/srp/node.c932
-rw-r--r--src/vnet/srp/packet.h204
-rw-r--r--src/vnet/srp/pg.c157
-rw-r--r--src/vnet/srp/srp.h222
-rw-r--r--src/vnet/unix/gdb_funcs.c171
-rw-r--r--src/vnet/unix/pcap.c241
-rw-r--r--src/vnet/unix/pcap.h230
-rw-r--r--src/vnet/unix/pcap2pg.c182
-rw-r--r--src/vnet/unix/tap.api123
-rw-r--r--src/vnet/unix/tap_api.c257
-rw-r--r--src/vnet/unix/tapcli.c1328
-rw-r--r--src/vnet/unix/tapcli.h52
-rw-r--r--src/vnet/unix/tuntap.c1000
-rw-r--r--src/vnet/unix/tuntap.h36
-rw-r--r--src/vnet/vnet.h96
-rw-r--r--src/vnet/vnet_all_api_h.h57
-rw-r--r--src/vnet/vnet_msg_enum.h37
-rw-r--r--src/vnet/vxlan-gpe/decap.c733
-rw-r--r--src/vnet/vxlan-gpe/dir.dox32
-rw-r--r--src/vnet/vxlan-gpe/encap.c388
-rw-r--r--src/vnet/vxlan-gpe/vxlan-gpe-rfc.txt868
-rw-r--r--src/vnet/vxlan-gpe/vxlan_gpe.api61
-rw-r--r--src/vnet/vxlan-gpe/vxlan_gpe.c659
-rw-r--r--src/vnet/vxlan-gpe/vxlan_gpe.h221
-rw-r--r--src/vnet/vxlan-gpe/vxlan_gpe_api.c249
-rw-r--r--src/vnet/vxlan-gpe/vxlan_gpe_error.def16
-rw-r--r--src/vnet/vxlan-gpe/vxlan_gpe_packet.h110
-rw-r--r--src/vnet/vxlan/decap.c1130
-rw-r--r--src/vnet/vxlan/dir.dox24
-rw-r--r--src/vnet/vxlan/encap.c553
-rw-r--r--src/vnet/vxlan/vxlan.api81
-rw-r--r--src/vnet/vxlan/vxlan.c899
-rw-r--r--src/vnet/vxlan/vxlan.h199
-rw-r--r--src/vnet/vxlan/vxlan_api.c253
-rw-r--r--src/vnet/vxlan/vxlan_error.def17
-rw-r--r--src/vnet/vxlan/vxlan_packet.h69
534 files changed, 217787 insertions, 0 deletions
diff --git a/src/vnet/adj/adj.c b/src/vnet/adj/adj.c
new file mode 100644
index 00000000000..e740c4cb79b
--- /dev/null
+++ b/src/vnet/adj/adj.c
@@ -0,0 +1,454 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/adj/adj.h>
+#include <vnet/adj/adj_internal.h>
+#include <vnet/adj/adj_glean.h>
+#include <vnet/adj/adj_midchain.h>
+#include <vnet/fib/fib_node_list.h>
+
+/*
+ * Special Adj with index zero. we need to define this since the v4 mtrie
+ * assumes an index of 0 implies the ply is empty. therefore all 'real'
+ * adjs need a non-zero index.
+ */
+static ip_adjacency_t *special_v4_miss_adj_with_index_zero;
+
+/* Adjacency packet/byte counters indexed by adjacency index. */
+vlib_combined_counter_main_t adjacency_counters;
+
+/*
+ * the single adj pool
+ */
+ip_adjacency_t *adj_pool;
+
+always_inline void
+adj_poison (ip_adjacency_t * adj)
+{
+ if (CLIB_DEBUG > 0)
+ {
+ memset (adj, 0xfe, sizeof (adj[0]));
+ }
+}
+
+ip_adjacency_t *
+adj_alloc (fib_protocol_t proto)
+{
+ ip_adjacency_t *adj;
+
+ pool_get(adj_pool, adj);
+
+ adj_poison(adj);
+
+ /* Make sure certain fields are always initialized. */
+ /* Validate adjacency counters. */
+ vlib_validate_combined_counter(&adjacency_counters,
+ adj_get_index(adj));
+
+ adj->rewrite_header.sw_if_index = ~0;
+ adj->mcast_group_index = ~0;
+ adj->saved_lookup_next_index = 0;
+ adj->n_adj = 1;
+ adj->lookup_next_index = 0;
+
+ fib_node_init(&adj->ia_node,
+ FIB_NODE_TYPE_ADJ);
+ adj->ia_nh_proto = proto;
+ adj->ia_flags = 0;
+
+ ip4_main.lookup_main.adjacency_heap = adj_pool;
+ ip6_main.lookup_main.adjacency_heap = adj_pool;
+
+ return (adj);
+}
+
+static int
+adj_index_is_special (adj_index_t adj_index)
+{
+ if (ADJ_INDEX_INVALID == adj_index)
+ return (!0);
+
+ return (0);
+}
+
+/**
+ * @brief Pretty print helper function for formatting specific adjacencies.
+ * @param s - input string to format
+ * @param args - other args passed to format function such as:
+ * - vnet_main_t
+ * - ip_lookup_main_t
+ * - adj_index
+ */
+u8 *
+format_ip_adjacency (u8 * s, va_list * args)
+{
+ format_ip_adjacency_flags_t fiaf;
+ ip_adjacency_t * adj;
+ u32 adj_index;
+
+ adj_index = va_arg (*args, u32);
+ fiaf = va_arg (*args, format_ip_adjacency_flags_t);
+ adj = adj_get(adj_index);
+
+ switch (adj->lookup_next_index)
+ {
+ case IP_LOOKUP_NEXT_REWRITE:
+ s = format (s, "%U", format_adj_nbr, adj_index, 0);
+ break;
+ case IP_LOOKUP_NEXT_ARP:
+ s = format (s, "%U", format_adj_nbr_incomplete, adj_index, 0);
+ break;
+ case IP_LOOKUP_NEXT_GLEAN:
+ s = format (s, "%U", format_adj_glean, adj_index, 0);
+ break;
+ case IP_LOOKUP_NEXT_MIDCHAIN:
+ s = format (s, "%U", format_adj_midchain, adj_index, 2);
+ break;
+ default:
+ break;
+ }
+
+ if (fiaf & FORMAT_IP_ADJACENCY_DETAIL)
+ {
+ s = format (s, "\n locks:%d", adj->ia_node.fn_locks);
+ s = format (s, " node:[%d]:%U",
+ adj->rewrite_header.node_index,
+ format_vlib_node_name, vlib_get_main(),
+ adj->rewrite_header.node_index);
+ s = format (s, " next:[%d]:%U",
+ adj->rewrite_header.next_index,
+ format_vlib_next_node_name,
+ vlib_get_main(),
+ adj->rewrite_header.node_index,
+ adj->rewrite_header.next_index);
+ s = format(s, "\n children:\n ");
+ s = fib_node_children_format(adj->ia_node.fn_children, s);
+ }
+
+ return s;
+}
+
+/*
+ * adj_last_lock_gone
+ *
+ * last lock/reference to the adj has gone, we no longer need it.
+ */
+static void
+adj_last_lock_gone (ip_adjacency_t *adj)
+{
+ vlib_main_t * vm = vlib_get_main();
+
+ ASSERT(0 == fib_node_list_get_size(adj->ia_node.fn_children));
+ ADJ_DBG(adj, "last-lock-gone");
+
+ vlib_worker_thread_barrier_sync (vm);
+
+ switch (adj->lookup_next_index)
+ {
+ case IP_LOOKUP_NEXT_MIDCHAIN:
+ dpo_reset(&adj->sub_type.midchain.next_dpo);
+ /* FALL THROUGH */
+ case IP_LOOKUP_NEXT_ARP:
+ case IP_LOOKUP_NEXT_REWRITE:
+ /*
+ * complete and incomplete nbr adjs
+ */
+ adj_nbr_remove(adj_get_index(adj),
+ adj->ia_nh_proto,
+ adj->ia_link,
+ &adj->sub_type.nbr.next_hop,
+ adj->rewrite_header.sw_if_index);
+ break;
+ case IP_LOOKUP_NEXT_GLEAN:
+ adj_glean_remove(adj->ia_nh_proto,
+ adj->rewrite_header.sw_if_index);
+ break;
+ default:
+ /*
+ * type not stored in any DB from which we need to remove it
+ */
+ break;
+ }
+
+ vlib_worker_thread_barrier_release(vm);
+
+ fib_node_deinit(&adj->ia_node);
+ pool_put(adj_pool, adj);
+}
+
+void
+adj_lock (adj_index_t adj_index)
+{
+ ip_adjacency_t *adj;
+
+ if (adj_index_is_special(adj_index))
+ {
+ return;
+ }
+
+ adj = adj_get(adj_index);
+ ASSERT(adj);
+
+ ADJ_DBG(adj, "lock");
+ fib_node_lock(&adj->ia_node);
+}
+
+void
+adj_unlock (adj_index_t adj_index)
+{
+ ip_adjacency_t *adj;
+
+ if (adj_index_is_special(adj_index))
+ {
+ return;
+ }
+
+ adj = adj_get(adj_index);
+ ASSERT(adj);
+
+ ADJ_DBG(adj, "unlock");
+ ASSERT(adj);
+
+ fib_node_unlock(&adj->ia_node);
+}
+
+u32
+adj_child_add (adj_index_t adj_index,
+ fib_node_type_t child_type,
+ fib_node_index_t child_index)
+{
+ ASSERT(ADJ_INDEX_INVALID != adj_index);
+ if (adj_index_is_special(adj_index))
+ {
+ return (~0);
+ }
+
+ return (fib_node_child_add(FIB_NODE_TYPE_ADJ,
+ adj_index,
+ child_type,
+ child_index));
+}
+
+void
+adj_child_remove (adj_index_t adj_index,
+ u32 sibling_index)
+{
+ if (adj_index_is_special(adj_index))
+ {
+ return;
+ }
+
+ fib_node_child_remove(FIB_NODE_TYPE_ADJ,
+ adj_index,
+ sibling_index);
+}
+
+/**
+ * @brief Return the link type of the adjacency
+ */
+vnet_link_t
+adj_get_link_type (adj_index_t ai)
+{
+ const ip_adjacency_t *adj;
+
+ adj = adj_get(ai);
+
+ return (adj->ia_link);
+}
+
+/**
+ * @brief Return the sw interface index of the adjacency.
+ */
+u32
+adj_get_sw_if_index (adj_index_t ai)
+{
+ const ip_adjacency_t *adj;
+
+ adj = adj_get(ai);
+
+ return (adj->rewrite_header.sw_if_index);
+}
+
+/**
+ * @brief Return the link type of the adjacency
+ */
+const u8*
+adj_get_rewrite (adj_index_t ai)
+{
+ vnet_rewrite_header_t *rw;
+ ip_adjacency_t *adj;
+
+ adj = adj_get(ai);
+ rw = &adj->rewrite_header;
+
+ ASSERT (rw->data_bytes != 0xfefe);
+
+ return (rw->data - rw->data_bytes);
+}
+
+static fib_node_t *
+adj_get_node (fib_node_index_t index)
+{
+ ip_adjacency_t *adj;
+
+ adj = adj_get(index);
+
+ return (&adj->ia_node);
+}
+
+#define ADJ_FROM_NODE(_node) \
+ ((ip_adjacency_t*)((char*)_node - STRUCT_OFFSET_OF(ip_adjacency_t, ia_node)))
+
+static void
+adj_node_last_lock_gone (fib_node_t *node)
+{
+ adj_last_lock_gone(ADJ_FROM_NODE(node));
+}
+
+static fib_node_back_walk_rc_t
+adj_back_walk_notify (fib_node_t *node,
+ fib_node_back_walk_ctx_t *ctx)
+{
+ /*
+ * Que pasa. yo soj en el final!
+ */
+ ASSERT(0);
+
+ return (FIB_NODE_BACK_WALK_CONTINUE);
+}
+
+/*
+ * Adjacency's graph node virtual function table
+ */
+static const fib_node_vft_t adj_vft = {
+ .fnv_get = adj_get_node,
+ .fnv_last_lock = adj_node_last_lock_gone,
+ .fnv_back_walk = adj_back_walk_notify,
+};
+
+static clib_error_t *
+adj_module_init (vlib_main_t * vm)
+{
+ fib_node_register_type(FIB_NODE_TYPE_ADJ, &adj_vft);
+
+ adj_nbr_module_init();
+ adj_glean_module_init();
+ adj_midchain_module_init();
+
+ /*
+ * one special adj to reserve index 0
+ */
+ special_v4_miss_adj_with_index_zero = adj_alloc(FIB_PROTOCOL_IP4);
+
+ return (NULL);
+}
+
+VLIB_INIT_FUNCTION (adj_module_init);
+
+static clib_error_t *
+adj_show (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ adj_index_t ai = ADJ_INDEX_INVALID;
+ u32 sw_if_index = ~0;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "%d", &ai))
+ ;
+ else if (unformat (input, "%U",
+ unformat_vnet_sw_interface, vnet_get_main(),
+ &sw_if_index))
+ ;
+ else
+ break;
+ }
+
+ if (ADJ_INDEX_INVALID != ai)
+ {
+ if (pool_is_free_index(adj_pool, ai))
+ {
+ vlib_cli_output (vm, "adjacency %d invalid", ai);
+ return 0;
+ }
+
+ vlib_cli_output (vm, "[@%d] %U",
+ ai,
+ format_ip_adjacency, ai,
+ FORMAT_IP_ADJACENCY_DETAIL);
+ }
+ else
+ {
+ /* *INDENT-OFF* */
+ pool_foreach_index(ai, adj_pool,
+ ({
+ if (~0 != sw_if_index &&
+ sw_if_index != adj_get_sw_if_index(ai))
+ {
+ }
+ else
+ {
+ vlib_cli_output (vm, "[@%d] %U",
+ ai,
+ format_ip_adjacency, ai,
+ FORMAT_IP_ADJACENCY_NONE);
+ }
+ }));
+ /* *INDENT-ON* */
+ }
+
+ return 0;
+}
+
+/*?
+ * Show all adjacencies.
+ * @cliexpar
+ * @cliexstart{sh adj}
+ * [@0]
+ * [@1] glean: loop0
+ * [@2] ipv4 via 1.0.0.2 loop0: IP4: 00:00:22:aa:bb:cc -> 00:00:11:aa:bb:cc
+ * [@3] mpls via 1.0.0.2 loop0: MPLS_UNICAST: 00:00:22:aa:bb:cc -> 00:00:11:aa:bb:cc
+ * [@4] ipv4 via 1.0.0.3 loop0: IP4: 00:00:22:aa:bb:cc -> 00:00:11:aa:bb:cc
+ * [@5] mpls via 1.0.0.3 loop0: MPLS_UNICAST: 00:00:22:aa:bb:cc -> 00:00:11:aa:bb:cc
+ * @cliexend
+ ?*/
+VLIB_CLI_COMMAND (adj_show_command, static) = {
+ .path = "show adj",
+ .short_help = "show adj [<adj_index>] [interface]",
+ .function = adj_show,
+};
+
+/*
+ * DEPRECATED: DO NOT USE
+ */
+ip_adjacency_t *
+ip_add_adjacency (ip_lookup_main_t * lm,
+ ip_adjacency_t * copy_adj,
+ u32 n_adj,
+ u32 * adj_index_return)
+{
+ ip_adjacency_t * adj;
+
+ ASSERT(1==n_adj);
+
+ adj = adj_alloc(FIB_PROTOCOL_IP4);
+
+ if (copy_adj)
+ *adj = *copy_adj;
+
+ *adj_index_return = adj_get_index(adj);
+ return adj;
+}
diff --git a/src/vnet/adj/adj.h b/src/vnet/adj/adj.h
new file mode 100644
index 00000000000..e85625db7ee
--- /dev/null
+++ b/src/vnet/adj/adj.h
@@ -0,0 +1,122 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * An adjacency is a representation of an attached L3 peer.
+ *
+ * Adjacency Sub-types:
+ * - neighbour: a representation of an attached L3 peer.
+ * Key:{addr,interface,link/ether-type}
+ * SHARED
+ * - glean: used to drive ARP/ND for packets destined to a local sub-net.
+ * 'glean' mean use the packet's destination address as the target
+ * address in the ARP packet.
+ * UNSHARED. Only one per-interface.
+ * - midchain: a nighbour adj on a virtual/tunnel interface.
+ * - rewrite: an adj with no key, but with a rewrite string.
+ *
+ * The API to create and update the adjacency is very sub-type specific. This
+ * is intentional as it encourages the user to carefully consider which adjacency
+ * sub-type they are really using, and hence assign it data in the appropriate
+ * sub-type space in the union of sub-types. This prevents the adj becoming a
+ * disorganised dumping group for 'my features needs a u16 somewhere' data. It
+ * is important to enforce this approach as space in the adjacency is a premium,
+ * as we need it to fit in 1 cache line.
+ *
+ * the API is also based around an index to an ajdacency not a raw pointer. This
+ * is so the user doesn't suffer the same limp inducing firearm injuries that
+ * the author suffered as the adjacenices can realloc.
+ */
+
+#ifndef __ADJ_H__
+#define __ADJ_H__
+
+#include <vnet/ip/lookup.h>
+#include <vnet/adj/adj_types.h>
+#include <vnet/adj/adj_nbr.h>
+#include <vnet/adj/adj_rewrite.h>
+#include <vnet/adj/adj_glean.h>
+
+/**
+ * @brief
+ * Take a reference counting lock on the adjacency
+ */
+extern void adj_lock(adj_index_t adj_index);
+/**
+ * @brief
+ * Release a reference counting lock on the adjacency
+ */
+extern void adj_unlock(adj_index_t adj_index);
+
+/**
+ * @brief
+ * Add a child dependent to an adjacency. The child will
+ * thus be informed via its registerd back-walk function
+ * when the adjacency state changes.
+ */
+extern u32 adj_child_add(adj_index_t adj_index,
+ fib_node_type_t type,
+ fib_node_index_t child_index);
+/**
+ * @brief
+ * Remove a child dependent
+ */
+extern void adj_child_remove(adj_index_t adj_index,
+ u32 sibling_index);
+
+/**
+ * @brief Walk the Adjacencies on a given interface
+ */
+extern void adj_walk (u32 sw_if_index,
+ adj_walk_cb_t cb,
+ void *ctx);
+
+/**
+ * @brief Return the link type of the adjacency
+ */
+extern vnet_link_t adj_get_link_type (adj_index_t ai);
+
+/**
+ * @brief Return the sw interface index of the adjacency.
+ */
+extern u32 adj_get_sw_if_index (adj_index_t ai);
+
+/**
+ * @brief Return the link type of the adjacency
+ */
+extern const u8* adj_get_rewrite (adj_index_t ai);
+
+/**
+ * @brief
+ * The global adjacnecy pool. Exposed for fast/inline data-plane access
+ */
+extern ip_adjacency_t *adj_pool;
+
+/**
+ * @brief
+ * Adjacency packet counters
+ */
+extern vlib_combined_counter_main_t adjacency_counters;
+
+/**
+ * @brief
+ * Get a pointer to an adjacency object from its index
+ */
+static inline ip_adjacency_t *
+adj_get (adj_index_t adj_index)
+{
+ return (vec_elt_at_index(adj_pool, adj_index));
+}
+
+#endif
diff --git a/src/vnet/adj/adj_glean.c b/src/vnet/adj/adj_glean.c
new file mode 100644
index 00000000000..8d86e2a9f00
--- /dev/null
+++ b/src/vnet/adj/adj_glean.c
@@ -0,0 +1,285 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/adj/adj.h>
+#include <vnet/adj/adj_internal.h>
+#include <vnet/fib/fib_walk.h>
+
+/*
+ * The 'DB' of all glean adjs.
+ * There is only one glean per-interface per-protocol, so this is a per-interface
+ * vector
+ */
+static adj_index_t *adj_gleans[FIB_PROTOCOL_MAX];
+
+static inline vlib_node_registration_t*
+adj_get_glean_node (fib_protocol_t proto)
+{
+ switch (proto) {
+ case FIB_PROTOCOL_IP4:
+ return (&ip4_glean_node);
+ case FIB_PROTOCOL_IP6:
+ return (&ip6_glean_node);
+ case FIB_PROTOCOL_MPLS:
+ break;
+ }
+ ASSERT(0);
+ return (NULL);
+}
+
+/*
+ * adj_glean_add_or_lock
+ *
+ * The next_hop address here is used for source address selection in the DP.
+ * The glean adj is added to an interface's connected prefix, the next-hop
+ * passed here is the local prefix on the same interface.
+ */
+adj_index_t
+adj_glean_add_or_lock (fib_protocol_t proto,
+ u32 sw_if_index,
+ const ip46_address_t *nh_addr)
+{
+ ip_adjacency_t * adj;
+
+ vec_validate_init_empty(adj_gleans[proto], sw_if_index, ADJ_INDEX_INVALID);
+
+ if (ADJ_INDEX_INVALID == adj_gleans[proto][sw_if_index])
+ {
+ adj = adj_alloc(proto);
+
+ adj->lookup_next_index = IP_LOOKUP_NEXT_GLEAN;
+ adj->ia_nh_proto = proto;
+ adj_gleans[proto][sw_if_index] = adj_get_index(adj);
+
+ if (NULL != nh_addr)
+ {
+ adj->sub_type.glean.receive_addr = *nh_addr;
+ }
+
+ adj->rewrite_header.data_bytes = 0;
+
+ vnet_rewrite_for_sw_interface(vnet_get_main(),
+ adj_fib_proto_2_nd(proto),
+ sw_if_index,
+ adj_get_glean_node(proto)->index,
+ VNET_REWRITE_FOR_SW_INTERFACE_ADDRESS_BROADCAST,
+ &adj->rewrite_header,
+ sizeof (adj->rewrite_data));
+ }
+ else
+ {
+ adj = adj_get(adj_gleans[proto][sw_if_index]);
+ }
+
+ adj_lock(adj_get_index(adj));
+
+ return (adj_get_index(adj));
+}
+
+void
+adj_glean_remove (fib_protocol_t proto,
+ u32 sw_if_index)
+{
+ ASSERT(sw_if_index < vec_len(adj_gleans[proto]));
+
+ adj_gleans[proto][sw_if_index] = ADJ_INDEX_INVALID;
+}
+
+static clib_error_t *
+adj_glean_interface_state_change (vnet_main_t * vnm,
+ u32 sw_if_index,
+ u32 flags)
+{
+ /*
+ * for each glean on the interface trigger a walk back to the children
+ */
+ fib_protocol_t proto;
+ ip_adjacency_t *adj;
+
+
+ for (proto = FIB_PROTOCOL_IP4; proto <= FIB_PROTOCOL_IP6; proto++)
+ {
+ if (sw_if_index >= vec_len(adj_gleans[proto]) ||
+ ADJ_INDEX_INVALID == adj_gleans[proto][sw_if_index])
+ continue;
+
+ adj = adj_get(adj_gleans[proto][sw_if_index]);
+
+ fib_node_back_walk_ctx_t bw_ctx = {
+ .fnbw_reason = (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP ?
+ FIB_NODE_BW_REASON_FLAG_INTERFACE_UP :
+ FIB_NODE_BW_REASON_FLAG_INTERFACE_DOWN),
+ };
+
+ fib_walk_sync(FIB_NODE_TYPE_ADJ, adj_get_index(adj), &bw_ctx);
+ }
+
+ return (NULL);
+}
+
+VNET_SW_INTERFACE_ADMIN_UP_DOWN_FUNCTION(adj_glean_interface_state_change);
+
+/**
+ * @brief Invoked on each SW interface of a HW interface when the
+ * HW interface state changes
+ */
+static void
+adj_nbr_hw_sw_interface_state_change (vnet_main_t * vnm,
+ u32 sw_if_index,
+ void *arg)
+{
+ adj_glean_interface_state_change(vnm, sw_if_index, (uword) arg);
+}
+
+/**
+ * @brief Registered callback for HW interface state changes
+ */
+static clib_error_t *
+adj_glean_hw_interface_state_change (vnet_main_t * vnm,
+ u32 hw_if_index,
+ u32 flags)
+{
+ /*
+ * walk SW interfaces on the HW
+ */
+ uword sw_flags;
+
+ sw_flags = ((flags & VNET_HW_INTERFACE_FLAG_LINK_UP) ?
+ VNET_SW_INTERFACE_FLAG_ADMIN_UP :
+ 0);
+
+ vnet_hw_interface_walk_sw(vnm, hw_if_index,
+ adj_nbr_hw_sw_interface_state_change,
+ (void*) sw_flags);
+
+ return (NULL);
+}
+
+VNET_HW_INTERFACE_LINK_UP_DOWN_FUNCTION(
+ adj_glean_hw_interface_state_change);
+
+static clib_error_t *
+adj_glean_interface_delete (vnet_main_t * vnm,
+ u32 sw_if_index,
+ u32 is_add)
+{
+ /*
+ * for each glean on the interface trigger a walk back to the children
+ */
+ fib_protocol_t proto;
+ ip_adjacency_t *adj;
+
+ if (is_add)
+ {
+ /*
+ * not interested in interface additions. we will not back walk
+ * to resolve paths through newly added interfaces. Why? The control
+ * plane should have the brains to add interfaces first, then routes.
+ * So the case where there are paths with a interface that matches
+ * one just created is the case where the path resolved through an
+ * interface that was deleted, and still has not been removed. The
+ * new interface added, is NO GUARANTEE that the interface being
+ * added now, even though it may have the same sw_if_index, is the
+ * same interface that the path needs. So tough!
+ * If the control plane wants these routes to resolve it needs to
+ * remove and add them again.
+ */
+ return (NULL);
+ }
+
+ for (proto = FIB_PROTOCOL_IP4; proto <= FIB_PROTOCOL_IP6; proto++)
+ {
+ if (sw_if_index >= vec_len(adj_gleans[proto]) ||
+ ADJ_INDEX_INVALID == adj_gleans[proto][sw_if_index])
+ continue;
+
+ adj = adj_get(adj_gleans[proto][sw_if_index]);
+
+ fib_node_back_walk_ctx_t bw_ctx = {
+ .fnbw_reason = FIB_NODE_BW_REASON_FLAG_INTERFACE_DELETE,
+ };
+
+ fib_walk_sync(FIB_NODE_TYPE_ADJ, adj_get_index(adj), &bw_ctx);
+ }
+
+ return (NULL);
+}
+
+VNET_SW_INTERFACE_ADD_DEL_FUNCTION(adj_glean_interface_delete);
+
+u8*
+format_adj_glean (u8* s, va_list *ap)
+{
+ index_t index = va_arg(*ap, index_t);
+ CLIB_UNUSED(u32 indent) = va_arg(*ap, u32);
+ vnet_main_t * vnm = vnet_get_main();
+ ip_adjacency_t * adj = adj_get(index);
+
+ return (format(s, "%U-glean: %U",
+ format_fib_protocol, adj->ia_nh_proto,
+ format_vnet_sw_interface_name,
+ vnm,
+ vnet_get_sw_interface(vnm,
+ adj->rewrite_header.sw_if_index)));
+}
+
+
+static void
+adj_dpo_lock (dpo_id_t *dpo)
+{
+ adj_lock(dpo->dpoi_index);
+}
+static void
+adj_dpo_unlock (dpo_id_t *dpo)
+{
+ adj_unlock(dpo->dpoi_index);
+}
+
+const static dpo_vft_t adj_glean_dpo_vft = {
+ .dv_lock = adj_dpo_lock,
+ .dv_unlock = adj_dpo_unlock,
+ .dv_format = format_adj_glean,
+};
+
+/**
+ * @brief The per-protocol VLIB graph nodes that are assigned to a glean
+ * object.
+ *
+ * this means that these graph nodes are ones from which a glean is the
+ * parent object in the DPO-graph.
+ */
+const static char* const glean_ip4_nodes[] =
+{
+ "ip4-glean",
+ NULL,
+};
+const static char* const glean_ip6_nodes[] =
+{
+ "ip6-glean",
+ NULL,
+};
+
+const static char* const * const glean_nodes[DPO_PROTO_NUM] =
+{
+ [DPO_PROTO_IP4] = glean_ip4_nodes,
+ [DPO_PROTO_IP6] = glean_ip6_nodes,
+ [DPO_PROTO_MPLS] = NULL,
+};
+
+void
+adj_glean_module_init (void)
+{
+ dpo_register(DPO_ADJACENCY_GLEAN, &adj_glean_dpo_vft, glean_nodes);
+}
diff --git a/src/vnet/adj/adj_glean.h b/src/vnet/adj/adj_glean.h
new file mode 100644
index 00000000000..640bd2f91eb
--- /dev/null
+++ b/src/vnet/adj/adj_glean.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @brief Glean Adjacency
+ *
+ * A gleean adjacency represent the need to discover new peers on an
+ * attached link. Packets that hit a glean adjacency will generate an
+ * ARP/ND packet addessesed to the packet's destination address.
+ * Note this is different to an incomplete neighbour adjacency, which
+ * does not send ARP/ND requests to the packet's destination address,
+ * but instead to the next-hop address of the adjacency itself.
+ */
+
+#ifndef __ADJ_GLEAN_H__
+#define __ADJ_GLEAN_H__
+
+#include <vnet/adj/adj_types.h>
+
+/**
+ * @brief
+ * Add (and lock) a new or lock an existing glean adjacency
+ *
+ * @param proto
+ * The protocol for the neighbours that we wish to glean
+ *
+ * @param sw_if_index
+ * The interface on which to glean
+ *
+ * @param nh_addr
+ * the address applied to the interface on which to glean. This
+ * as the source address in packets when the ARP/ND packet is sent
+ */
+extern adj_index_t adj_glean_add_or_lock(fib_protocol_t proto,
+ u32 sw_if_index,
+ const ip46_address_t *nh_addr);
+
+/**
+ * @brief Format/display a glean adjacency.
+ */
+extern u8* format_adj_glean(u8* s, va_list *ap);
+
+/**
+ * @brief
+ * Module initialisation
+ */
+extern void adj_glean_module_init(void);
+
+#endif
diff --git a/src/vnet/adj/adj_internal.h b/src/vnet/adj/adj_internal.h
new file mode 100644
index 00000000000..833bc7c9e01
--- /dev/null
+++ b/src/vnet/adj/adj_internal.h
@@ -0,0 +1,104 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __ADJ_INTERNAL_H__
+#define __ADJ_INTERNAL_H__
+
+#include <vnet/adj/adj.h>
+#include <vnet/ip/ip.h>
+#include <vnet/mpls/mpls.h>
+#include <vnet/adj/adj_l2.h>
+
+
+/**
+ * big switch to turn on Adjacency debugging
+ */
+#undef ADJ_DEBUG
+
+/*
+ * Debug macro
+ */
+#ifdef ADJ_DEBUG
+#define ADJ_DBG(_adj, _fmt, _args...) \
+{ \
+ clib_warning("adj:[%d:%p]:" _fmt, \
+ _adj - adj_pool, _adj, \
+ ##_args); \
+}
+#else
+#define ADJ_DBG(_e, _fmt, _args...)
+#endif
+
+static inline u32
+adj_get_rewrite_node (vnet_link_t linkt)
+{
+ switch (linkt) {
+ case VNET_LINK_IP4:
+ return (ip4_rewrite_node.index);
+ case VNET_LINK_IP6:
+ return (ip6_rewrite_node.index);
+ case VNET_LINK_MPLS:
+ return (mpls_output_node.index);
+ case VNET_LINK_ETHERNET:
+ return (adj_l2_rewrite_node.index);
+ case VNET_LINK_ARP:
+ break;
+ }
+ ASSERT(0);
+ return (0);
+}
+
+static inline vnet_link_t
+adj_fib_proto_2_nd (fib_protocol_t fp)
+{
+ switch (fp)
+ {
+ case FIB_PROTOCOL_IP4:
+ return (VNET_LINK_ARP);
+ case FIB_PROTOCOL_IP6:
+ return (VNET_LINK_IP6);
+ case FIB_PROTOCOL_MPLS:
+ return (VNET_LINK_MPLS);
+ }
+ return (0);
+}
+
+/**
+ * @brief
+ * Get a pointer to an adjacency object from its index
+ */
+static inline adj_index_t
+adj_get_index (ip_adjacency_t *adj)
+{
+ return (adj - adj_pool);
+}
+
+extern void adj_nbr_update_rewrite_internal (ip_adjacency_t *adj,
+ ip_lookup_next_t adj_next_index,
+ u32 complete_next_index,
+ u32 next_index,
+ u8 *rewrite);
+
+extern ip_adjacency_t * adj_alloc(fib_protocol_t proto);
+
+extern void adj_nbr_remove(adj_index_t ai,
+ fib_protocol_t nh_proto,
+ vnet_link_t link_type,
+ const ip46_address_t *nh_addr,
+ u32 sw_if_index);
+extern void adj_glean_remove(fib_protocol_t proto,
+ u32 sw_if_index);
+
+#endif
diff --git a/src/vnet/adj/adj_l2.c b/src/vnet/adj/adj_l2.c
new file mode 100644
index 00000000000..4d2dd7082f1
--- /dev/null
+++ b/src/vnet/adj/adj_l2.c
@@ -0,0 +1,194 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/vnet.h>
+#include <vnet/adj/adj_l2.h>
+#include <vnet/ethernet/ethernet.h>
+#include <vnet/ip/ip.h>
+
+/**
+ * @brief Trace data for a L2 Midchain
+ */
+typedef struct adj_l2_trace_t_ {
+ /** Adjacency index taken. */
+ u32 adj_index;
+} adj_l2_trace_t;
+
+static u8 *
+format_adj_l2_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ adj_l2_trace_t * t = va_arg (*args, adj_l2_trace_t *);
+
+ s = format (s, "adj-idx %d : %U",
+ t->adj_index,
+ format_ip_adjacency, t->adj_index, FORMAT_IP_ADJACENCY_NONE);
+ return s;
+}
+
+typedef enum adj_l2_rewrite_next_t_
+{
+ ADJ_L2_REWRITE_NEXT_DROP,
+} adj_l2_rewrite_next_t;
+
+always_inline uword
+adj_l2_rewrite_inline (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame,
+ int is_midchain)
+{
+ u32 * from = vlib_frame_vector_args (frame);
+ u32 n_left_from, n_left_to_next, * to_next, next_index;
+ u32 cpu_index = os_get_cpu_number();
+ ethernet_main_t * em = &ethernet_main;
+
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ ip_adjacency_t * adj0;
+ vlib_buffer_t * p0;
+ char *h0;
+ u32 pi0, rw_len0, adj_index0, next0 = 0;
+ u32 tx_sw_if_index0;
+
+ pi0 = to_next[0] = from[0];
+ from += 1;
+ n_left_from -= 1;
+ to_next += 1;
+ n_left_to_next -= 1;
+
+ p0 = vlib_get_buffer (vm, pi0);
+ h0 = vlib_buffer_get_current (p0);
+
+ adj_index0 = vnet_buffer (p0)->ip.adj_index[VLIB_TX];
+
+ /* We should never rewrite a pkt using the MISS adjacency */
+ ASSERT(adj_index0);
+
+ adj0 = adj_get (adj_index0);
+
+ /* Guess we are only writing on simple Ethernet header. */
+ vnet_rewrite_one_header (adj0[0], h0,
+ sizeof (ethernet_header_t));
+
+ /* Update packet buffer attributes/set output interface. */
+ rw_len0 = adj0[0].rewrite_header.data_bytes;
+ vnet_buffer(p0)->ip.save_rewrite_length = rw_len0;
+
+ vlib_increment_combined_counter
+ (&adjacency_counters,
+ cpu_index, adj_index0,
+ /* packet increment */ 0,
+ /* byte increment */ rw_len0-sizeof(ethernet_header_t));
+
+ /* Check MTU of outgoing interface. */
+ if (PREDICT_TRUE((vlib_buffer_length_in_chain (vm, p0) <=
+ adj0[0].rewrite_header.max_l3_packet_bytes)))
+ {
+ /* Don't adjust the buffer for ttl issue; icmp-error node wants
+ * to see the IP headerr */
+ p0->current_data -= rw_len0;
+ p0->current_length += rw_len0;
+ tx_sw_if_index0 = adj0[0].rewrite_header.sw_if_index;
+
+ if (is_midchain)
+ {
+ adj0->sub_type.midchain.fixup_func(vm, adj0, p0);
+ }
+
+ vnet_buffer (p0)->sw_if_index[VLIB_TX] = tx_sw_if_index0;
+
+ /*
+ * Follow the feature ARC. this will result eventually in
+ * the midchain-tx node
+ */
+ vnet_feature_arc_start(em->output_feature_arc_index, tx_sw_if_index0, &next0, p0);
+ }
+ else
+ {
+ /* can't fragment L2 */
+ next0 = ADJ_L2_REWRITE_NEXT_DROP;
+ }
+
+ if (PREDICT_FALSE(p0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ adj_l2_trace_t *tr = vlib_add_trace (vm, node,
+ p0, sizeof (*tr));
+ tr->adj_index = vnet_buffer(p0)->ip.adj_index[VLIB_TX];
+ }
+
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ pi0, next0);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ return frame->n_vectors;
+}
+
+static uword
+adj_l2_rewrite (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ return adj_l2_rewrite_inline (vm, node, frame, 0);
+}
+
+static uword
+adj_l2_midchain (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ return adj_l2_rewrite_inline (vm, node, frame, 1);
+}
+
+VLIB_REGISTER_NODE (adj_l2_rewrite_node) = {
+ .function = adj_l2_rewrite,
+ .name = "adj-l2-rewrite",
+ .vector_size = sizeof (u32),
+
+ .format_trace = format_adj_l2_trace,
+
+ .n_next_nodes = 1,
+ .next_nodes = {
+ [ADJ_L2_REWRITE_NEXT_DROP] = "error-drop",
+ },
+};
+
+VLIB_NODE_FUNCTION_MULTIARCH (adj_l2_rewrite_node, adj_l2_rewrite)
+
+VLIB_REGISTER_NODE (adj_l2_midchain_node) = {
+ .function = adj_l2_midchain,
+ .name = "adj-l2-midchain",
+ .vector_size = sizeof (u32),
+
+ .format_trace = format_adj_l2_trace,
+
+ .n_next_nodes = 1,
+ .next_nodes = {
+ [ADJ_L2_REWRITE_NEXT_DROP] = "error-drop",
+ },
+};
+
+VLIB_NODE_FUNCTION_MULTIARCH (adj_l2_midchain_node, adj_l2_midchain)
diff --git a/src/vnet/adj/adj_l2.h b/src/vnet/adj/adj_l2.h
new file mode 100644
index 00000000000..3aa1c74b224
--- /dev/null
+++ b/src/vnet/adj/adj_l2.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __ADJ_L2_H__
+#define __ADJ_L2_H__
+
+#include <vnet/adj/adj.h>
+
+extern vlib_node_registration_t adj_l2_midchain_node;
+extern vlib_node_registration_t adj_l2_rewrite_node;
+
+#endif
diff --git a/src/vnet/adj/adj_midchain.c b/src/vnet/adj/adj_midchain.c
new file mode 100644
index 00000000000..8c6ab5aa17b
--- /dev/null
+++ b/src/vnet/adj/adj_midchain.c
@@ -0,0 +1,559 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/adj/adj_nbr.h>
+#include <vnet/adj/adj_internal.h>
+#include <vnet/adj/adj_l2.h>
+#include <vnet/adj/adj_midchain.h>
+#include <vnet/ethernet/arp_packet.h>
+#include <vnet/dpo/drop_dpo.h>
+#include <vnet/fib/fib_walk.h>
+
+/**
+ * The two midchain tx feature node indices
+ */
+static u32 adj_midchain_tx_feature_node[VNET_LINK_NUM];
+static u32 adj_midchain_tx_no_count_feature_node[VNET_LINK_NUM];
+
+/**
+ * @brief Trace data for packets traversing the midchain tx node
+ */
+typedef struct adj_midchain_tx_trace_t_
+{
+ /**
+ * @brief the midchain adj we are traversing
+ */
+ adj_index_t ai;
+} adj_midchain_tx_trace_t;
+
+always_inline uword
+adj_midchain_tx_inline (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame,
+ int interface_count)
+{
+ u32 * from, * to_next, n_left_from, n_left_to_next;
+ u32 next_index;
+ vnet_main_t *vnm = vnet_get_main ();
+ vnet_interface_main_t *im = &vnm->interface_main;
+ u32 cpu_index = vm->cpu_index;
+
+ /* Vector of buffer / pkt indices we're supposed to process */
+ from = vlib_frame_vector_args (frame);
+
+ /* Number of buffers / pkts */
+ n_left_from = frame->n_vectors;
+
+ /* Speculatively send the first buffer to the last disposition we used */
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ /* set up to enqueue to our disposition with index = next_index */
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+
+ while (n_left_from >= 4 && n_left_to_next > 2)
+ {
+ u32 bi0, adj_index0, next0;
+ const ip_adjacency_t * adj0;
+ const dpo_id_t *dpo0;
+ vlib_buffer_t * b0;
+ u32 bi1, adj_index1, next1;
+ const ip_adjacency_t * adj1;
+ const dpo_id_t *dpo1;
+ vlib_buffer_t * b1;
+
+ /* Prefetch next iteration. */
+ {
+ vlib_buffer_t * p2, * p3;
+
+ p2 = vlib_get_buffer (vm, from[2]);
+ p3 = vlib_get_buffer (vm, from[3]);
+
+ vlib_prefetch_buffer_header (p2, LOAD);
+ vlib_prefetch_buffer_header (p3, LOAD);
+
+ CLIB_PREFETCH (p2->data, CLIB_CACHE_LINE_BYTES, STORE);
+ CLIB_PREFETCH (p3->data, CLIB_CACHE_LINE_BYTES, STORE);
+ }
+
+ bi0 = from[0];
+ to_next[0] = bi0;
+ bi1 = from[1];
+ to_next[1] = bi1;
+
+ from += 2;
+ to_next += 2;
+ n_left_from -= 2;
+ n_left_to_next -= 2;
+
+ b0 = vlib_get_buffer(vm, bi0);
+ b1 = vlib_get_buffer(vm, bi1);
+
+ /* Follow the DPO on which the midchain is stacked */
+ adj_index0 = vnet_buffer(b0)->ip.adj_index[VLIB_TX];
+ adj_index1 = vnet_buffer(b1)->ip.adj_index[VLIB_TX];
+
+ adj0 = adj_get(adj_index0);
+ adj1 = adj_get(adj_index1);
+
+ dpo0 = &adj0->sub_type.midchain.next_dpo;
+ dpo1 = &adj1->sub_type.midchain.next_dpo;
+
+ next0 = dpo0->dpoi_next_node;
+ next1 = dpo1->dpoi_next_node;
+
+ vnet_buffer(b1)->ip.adj_index[VLIB_TX] = dpo1->dpoi_index;
+ vnet_buffer(b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
+
+ if (interface_count)
+ {
+ vlib_increment_combined_counter (im->combined_sw_if_counters
+ + VNET_INTERFACE_COUNTER_TX,
+ cpu_index,
+ adj0->rewrite_header.sw_if_index,
+ 1,
+ vlib_buffer_length_in_chain (vm, b0));
+ vlib_increment_combined_counter (im->combined_sw_if_counters
+ + VNET_INTERFACE_COUNTER_TX,
+ cpu_index,
+ adj1->rewrite_header.sw_if_index,
+ 1,
+ vlib_buffer_length_in_chain (vm, b1));
+ }
+
+ if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ adj_midchain_tx_trace_t *tr = vlib_add_trace (vm, node,
+ b0, sizeof (*tr));
+ tr->ai = adj_index0;
+ }
+ if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ adj_midchain_tx_trace_t *tr = vlib_add_trace (vm, node,
+ b1, sizeof (*tr));
+ tr->ai = adj_index1;
+ }
+
+ vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, bi1,
+ next0, next1);
+ }
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 bi0, adj_index0, next0;
+ const ip_adjacency_t * adj0;
+ const dpo_id_t *dpo0;
+ vlib_buffer_t * b0;
+
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer(vm, bi0);
+
+ /* Follow the DPO on which the midchain is stacked */
+ adj_index0 = vnet_buffer(b0)->ip.adj_index[VLIB_TX];
+ adj0 = adj_get(adj_index0);
+ dpo0 = &adj0->sub_type.midchain.next_dpo;
+ next0 = dpo0->dpoi_next_node;
+ vnet_buffer(b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
+
+ if (interface_count)
+ {
+ vlib_increment_combined_counter (im->combined_sw_if_counters
+ + VNET_INTERFACE_COUNTER_TX,
+ cpu_index,
+ adj0->rewrite_header.sw_if_index,
+ 1,
+ vlib_buffer_length_in_chain (vm, b0));
+ }
+
+ if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ adj_midchain_tx_trace_t *tr = vlib_add_trace (vm, node,
+ b0, sizeof (*tr));
+ tr->ai = adj_index0;
+ }
+
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, next0);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ return frame->n_vectors;
+}
+
+static u8 *
+format_adj_midchain_tx_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ adj_midchain_tx_trace_t *tr = va_arg (*args, adj_midchain_tx_trace_t*);
+
+ s = format(s, "adj-midchain:[%d]:%U", tr->ai,
+ format_ip_adjacency, tr->ai,
+ FORMAT_IP_ADJACENCY_NONE);
+
+ return (s);
+}
+
+static uword
+adj_midchain_tx (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ return (adj_midchain_tx_inline(vm, node, frame, 1));
+}
+
+VLIB_REGISTER_NODE (adj_midchain_tx_node, static) = {
+ .function = adj_midchain_tx,
+ .name = "adj-midchain-tx",
+ .vector_size = sizeof (u32),
+
+ .format_trace = format_adj_midchain_tx_trace,
+
+ .n_next_nodes = 1,
+ .next_nodes = {
+ [0] = "error-drop",
+ },
+};
+
+static uword
+adj_midchain_tx_no_count (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ return (adj_midchain_tx_inline(vm, node, frame, 0));
+}
+
+VLIB_REGISTER_NODE (adj_midchain_tx_no_count_node, static) = {
+ .function = adj_midchain_tx_no_count,
+ .name = "adj-midchain-tx-no-count",
+ .vector_size = sizeof (u32),
+
+ .format_trace = format_adj_midchain_tx_trace,
+
+ .n_next_nodes = 1,
+ .next_nodes = {
+ [0] = "error-drop",
+ },
+};
+
+VNET_FEATURE_INIT (adj_midchain_tx_ip4, static) = {
+ .arc_name = "ip4-output",
+ .node_name = "adj-midchain-tx",
+ .runs_before = VNET_FEATURES ("interface-output"),
+ .feature_index_ptr = &adj_midchain_tx_feature_node[VNET_LINK_IP4],
+};
+VNET_FEATURE_INIT (adj_midchain_tx_no_count_ip4, static) = {
+ .arc_name = "ip4-output",
+ .node_name = "adj-midchain-tx-no-count",
+ .runs_before = VNET_FEATURES ("interface-output"),
+ .feature_index_ptr = &adj_midchain_tx_no_count_feature_node[VNET_LINK_IP4],
+};
+VNET_FEATURE_INIT (adj_midchain_tx_ip6, static) = {
+ .arc_name = "ip6-output",
+ .node_name = "adj-midchain-tx",
+ .runs_before = VNET_FEATURES ("interface-output"),
+ .feature_index_ptr = &adj_midchain_tx_feature_node[VNET_LINK_IP6],
+};
+VNET_FEATURE_INIT (adj_midchain_tx_no_count_ip6, static) = {
+ .arc_name = "ip6-output",
+ .node_name = "adj-midchain-tx-no-count",
+ .runs_before = VNET_FEATURES ("interface-output"),
+ .feature_index_ptr = &adj_midchain_tx_no_count_feature_node[VNET_LINK_IP6],
+};
+VNET_FEATURE_INIT (adj_midchain_tx_mpls, static) = {
+ .arc_name = "mpls-output",
+ .node_name = "adj-midchain-tx",
+ .runs_before = VNET_FEATURES ("interface-output"),
+ .feature_index_ptr = &adj_midchain_tx_feature_node[VNET_LINK_MPLS],
+};
+VNET_FEATURE_INIT (adj_midchain_tx_no_count_mpls, static) = {
+ .arc_name = "mpls-output",
+ .node_name = "adj-midchain-tx-no-count",
+ .runs_before = VNET_FEATURES ("interface-output"),
+ .feature_index_ptr = &adj_midchain_tx_no_count_feature_node[VNET_LINK_MPLS],
+};
+VNET_FEATURE_INIT (adj_midchain_tx_ethernet, static) = {
+ .arc_name = "ethernet-output",
+ .node_name = "adj-midchain-tx",
+ .runs_before = VNET_FEATURES ("error-drop"),
+ .feature_index_ptr = &adj_midchain_tx_feature_node[VNET_LINK_ETHERNET],
+};
+VNET_FEATURE_INIT (adj_midchain_tx_no_count_ethernet, static) = {
+ .arc_name = "ethernet-output",
+ .node_name = "adj-midchain-tx-no-count",
+ .runs_before = VNET_FEATURES ("error-drop"),
+ .feature_index_ptr = &adj_midchain_tx_no_count_feature_node[VNET_LINK_ETHERNET],
+};
+
+static inline u32
+adj_get_midchain_node (vnet_link_t link)
+{
+ switch (link) {
+ case VNET_LINK_IP4:
+ return (ip4_midchain_node.index);
+ case VNET_LINK_IP6:
+ return (ip6_midchain_node.index);
+ case VNET_LINK_MPLS:
+ return (mpls_midchain_node.index);
+ case VNET_LINK_ETHERNET:
+ return (adj_l2_midchain_node.index);
+ case VNET_LINK_ARP:
+ break;
+ }
+ ASSERT(0);
+ return (0);
+}
+
+static u8
+adj_midchain_get_feature_arc_index_for_link_type (const ip_adjacency_t *adj)
+{
+ u8 arc = (u8) ~0;
+ switch (adj->ia_link)
+ {
+ case VNET_LINK_IP4:
+ {
+ arc = ip4_main.lookup_main.output_feature_arc_index;
+ break;
+ }
+ case VNET_LINK_IP6:
+ {
+ arc = ip6_main.lookup_main.output_feature_arc_index;
+ break;
+ }
+ case VNET_LINK_MPLS:
+ {
+ arc = mpls_main.output_feature_arc_index;
+ break;
+ }
+ case VNET_LINK_ETHERNET:
+ {
+ arc = ethernet_main.output_feature_arc_index;
+ break;
+ }
+ case VNET_LINK_ARP:
+ ASSERT(0);
+ break;
+ }
+
+ ASSERT (arc != (u8) ~0);
+
+ return (arc);
+}
+
+/**
+ * adj_nbr_midchain_update_rewrite
+ *
+ * Update the adjacency's rewrite string. A NULL string implies the
+ * rewrite is reset (i.e. when ARP/ND etnry is gone).
+ * NB: the adj being updated may be handling traffic in the DP.
+ */
+void
+adj_nbr_midchain_update_rewrite (adj_index_t adj_index,
+ adj_midchain_fixup_t fixup,
+ adj_midchain_flag_t flags,
+ u8 *rewrite)
+{
+ ip_adjacency_t *adj;
+ u8 arc_index;
+ u32 feature_index;
+
+ ASSERT(ADJ_INDEX_INVALID != adj_index);
+
+ adj = adj_get(adj_index);
+
+ /*
+ * one time only update. since we don't support chainging the tunnel
+ * src,dst, this is all we need.
+ */
+ ASSERT(adj->lookup_next_index == IP_LOOKUP_NEXT_ARP);
+ /*
+ * tunnels can always provide a rewrite.
+ */
+ ASSERT(NULL != rewrite);
+
+ adj->sub_type.midchain.fixup_func = fixup;
+
+ arc_index = adj_midchain_get_feature_arc_index_for_link_type (adj);
+ feature_index = (flags & ADJ_MIDCHAIN_FLAG_NO_COUNT) ?
+ adj_midchain_tx_no_count_feature_node[adj->ia_link] :
+ adj_midchain_tx_feature_node[adj->ia_link];
+
+ adj->sub_type.midchain.tx_function_node = (flags & ADJ_MIDCHAIN_FLAG_NO_COUNT) ?
+ adj_midchain_tx_no_count_node.index :
+ adj_midchain_tx_node.index;
+
+ vnet_feature_enable_disable_with_index (arc_index, feature_index,
+ adj->rewrite_header.sw_if_index,
+ 1 /* enable */, 0, 0);
+
+ /*
+ * stack the midchain on the drop so it's ready to forward in the adj-midchain-tx.
+ * The graph arc used/created here is from the midchain-tx node to the
+ * child's registered node. This is because post adj processing the next
+ * node are any output features, then the midchain-tx. from there we
+ * need to get to the stacked child's node.
+ */
+ dpo_stack_from_node(adj->sub_type.midchain.tx_function_node,
+ &adj->sub_type.midchain.next_dpo,
+ drop_dpo_get(vnet_link_to_dpo_proto(adj->ia_link)));
+
+ /*
+ * update the rewirte with the workers paused.
+ */
+ adj_nbr_update_rewrite_internal(adj,
+ IP_LOOKUP_NEXT_MIDCHAIN,
+ adj_get_midchain_node(adj->ia_link),
+ adj->sub_type.midchain.tx_function_node,
+ rewrite);
+}
+
+/**
+ * adj_nbr_midchain_unstack
+ *
+ * Unstack the adj. stack it on drop
+ */
+void
+adj_nbr_midchain_unstack (adj_index_t adj_index)
+{
+ ip_adjacency_t *adj;
+
+ ASSERT(ADJ_INDEX_INVALID != adj_index);
+
+ adj = adj_get(adj_index);
+
+ /*
+ * stack on the drop
+ */
+ dpo_stack(DPO_ADJACENCY_MIDCHAIN,
+ vnet_link_to_dpo_proto(adj->ia_link),
+ &adj->sub_type.midchain.next_dpo,
+ drop_dpo_get(vnet_link_to_dpo_proto(adj->ia_link)));
+
+ CLIB_MEMORY_BARRIER();
+}
+
+/**
+ * adj_nbr_midchain_stack
+ */
+void
+adj_nbr_midchain_stack (adj_index_t adj_index,
+ const dpo_id_t *next)
+{
+ ip_adjacency_t *adj;
+
+ ASSERT(ADJ_INDEX_INVALID != adj_index);
+
+ adj = adj_get(adj_index);
+
+ ASSERT(IP_LOOKUP_NEXT_MIDCHAIN == adj->lookup_next_index);
+
+ dpo_stack_from_node(adj->sub_type.midchain.tx_function_node,
+ &adj->sub_type.midchain.next_dpo,
+ next);
+}
+
+u8*
+format_adj_midchain (u8* s, va_list *ap)
+{
+ index_t index = va_arg(*ap, index_t);
+ u32 indent = va_arg(*ap, u32);
+ vnet_main_t * vnm = vnet_get_main();
+ ip_adjacency_t * adj = adj_get(index);
+
+ s = format (s, "%U", format_vnet_link, adj->ia_link);
+ s = format (s, " via %U ",
+ format_ip46_address, &adj->sub_type.nbr.next_hop);
+ s = format (s, " %U",
+ format_vnet_rewrite,
+ vnm->vlib_main, &adj->rewrite_header,
+ sizeof (adj->rewrite_data), indent);
+ s = format (s, "\n%Ustacked-on:\n%U%U",
+ format_white_space, indent,
+ format_white_space, indent+2,
+ format_dpo_id, &adj->sub_type.midchain.next_dpo, indent+2);
+
+ return (s);
+}
+
+static void
+adj_dpo_lock (dpo_id_t *dpo)
+{
+ adj_lock(dpo->dpoi_index);
+}
+static void
+adj_dpo_unlock (dpo_id_t *dpo)
+{
+ adj_unlock(dpo->dpoi_index);
+}
+
+const static dpo_vft_t adj_midchain_dpo_vft = {
+ .dv_lock = adj_dpo_lock,
+ .dv_unlock = adj_dpo_unlock,
+ .dv_format = format_adj_midchain,
+};
+
+/**
+ * @brief The per-protocol VLIB graph nodes that are assigned to a midchain
+ * object.
+ *
+ * this means that these graph nodes are ones from which a midchain is the
+ * parent object in the DPO-graph.
+ */
+const static char* const midchain_ip4_nodes[] =
+{
+ "ip4-midchain",
+ NULL,
+};
+const static char* const midchain_ip6_nodes[] =
+{
+ "ip6-midchain",
+ NULL,
+};
+const static char* const midchain_mpls_nodes[] =
+{
+ "mpls-midchain",
+ NULL,
+};
+const static char* const midchain_ethernet_nodes[] =
+{
+ "adj-l2-midchain",
+ NULL,
+};
+
+const static char* const * const midchain_nodes[DPO_PROTO_NUM] =
+{
+ [DPO_PROTO_IP4] = midchain_ip4_nodes,
+ [DPO_PROTO_IP6] = midchain_ip6_nodes,
+ [DPO_PROTO_MPLS] = midchain_mpls_nodes,
+ [DPO_PROTO_ETHERNET] = midchain_ethernet_nodes,
+};
+
+void
+adj_midchain_module_init (void)
+{
+ dpo_register(DPO_ADJACENCY_MIDCHAIN, &adj_midchain_dpo_vft, midchain_nodes);
+}
diff --git a/src/vnet/adj/adj_midchain.h b/src/vnet/adj/adj_midchain.h
new file mode 100644
index 00000000000..ae414aea6dc
--- /dev/null
+++ b/src/vnet/adj/adj_midchain.h
@@ -0,0 +1,102 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * Midchain Adjacency sub-type. These adjs represent an L3 peer on a
+ * tunnel interface. The tunnel's adjacency is thus not the end of the chain,
+ * and needs to stack on/link to another chain (or portion of the graph) to
+ * reach the tunnel's destination.
+ */
+
+#ifndef __ADJ_MIDCHAIN_H__
+#define __ADJ_MIDCHAIN_H__
+
+#include <vnet/adj/adj.h>
+
+/**
+ * @brief Flags controlling the midchain adjacency
+ */
+typedef enum adj_midchain_flag_t_
+{
+ /**
+ * No flags
+ */
+ ADJ_MIDCHAIN_FLAG_NONE = 0,
+
+ /**
+ * Packets TX through the midchain do not increment the interface
+ * counters. This should be used when the adj is associated with an L2
+ * interface and that L2 interface is in a bridege domain. In that case
+ * the packet will have traversed the interface's TX node, and hence have
+ * been counted, before it traverses ths midchain
+ */
+ ADJ_MIDCHAIN_FLAG_NO_COUNT = (1 << 0),
+} adj_midchain_flag_t;
+
+/**
+ * @brief
+ * Convert an existing neighbour adjacency into a midchain
+ *
+ * @param adj_index
+ * The index of the neighbour adjacency.
+ *
+ * @param post_rewrite_node
+ * The VLIB graph node that provides the post-encap fixup.
+ * where 'fixup' is e.g., correcting chksum, length, etc.
+ *
+ * @param rewrite
+ * The rewrite.
+ */
+extern void adj_nbr_midchain_update_rewrite(adj_index_t adj_index,
+ adj_midchain_fixup_t fixup,
+ adj_midchain_flag_t flags,
+ u8 *rewrite);
+
+/**
+ * @brief
+ * [re]stack a midchain. 'Stacking' is the act of forming parent-child
+ * relationships in the data-plane graph.
+ *
+ * @param adj_index
+ * The index of the midchain to stack
+ *
+ * @param dpo
+ * The parent DPO to stack onto (i.e. become a child of).
+ */
+extern void adj_nbr_midchain_stack(adj_index_t adj_index,
+ const dpo_id_t *dpo);
+
+/**
+ * @brief
+ * unstack a midchain. This will break the chain between the midchain and
+ * the next graph section. This is a implemented as stack-on-drop
+ *
+ * @param adj_index
+ * The index of the midchain to stack
+ */
+extern void adj_nbr_midchain_unstack(adj_index_t adj_index);
+
+/**
+ * @brief
+ * Module initialisation
+ */
+extern void adj_midchain_module_init(void);
+
+/**
+ * @brief
+ * Format a midchain adjacency
+ */
+extern u8* format_adj_midchain(u8* s, va_list *ap);
+
+#endif
diff --git a/src/vnet/adj/adj_nbr.c b/src/vnet/adj/adj_nbr.c
new file mode 100644
index 00000000000..1344bb67fcc
--- /dev/null
+++ b/src/vnet/adj/adj_nbr.c
@@ -0,0 +1,1087 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/adj/adj_nbr.h>
+#include <vnet/adj/adj_internal.h>
+#include <vnet/ethernet/arp_packet.h>
+#include <vnet/fib/fib_walk.h>
+
+/*
+ * Vector Hash tables of neighbour (traditional) adjacencies
+ * Key: interface(for the vector index), address (and its proto),
+ * link-type/ether-type.
+ */
+static BVT(clib_bihash) **adj_nbr_tables[FIB_PROTOCOL_MAX];
+
+// FIXME SIZE APPROPRIATELY. ASK DAVEB.
+#define ADJ_NBR_DEFAULT_HASH_NUM_BUCKETS (64 * 64)
+#define ADJ_NBR_DEFAULT_HASH_MEMORY_SIZE (32<<20)
+
+
+#define ADJ_NBR_SET_KEY(_key, _lt, _nh) \
+{ \
+ _key.key[0] = (_nh)->as_u64[0]; \
+ _key.key[1] = (_nh)->as_u64[1]; \
+ _key.key[2] = (_lt); \
+}
+
+#define ADJ_NBR_ITF_OK(_proto, _itf) \
+ (((_itf) < vec_len(adj_nbr_tables[_proto])) && \
+ (NULL != adj_nbr_tables[_proto][sw_if_index]))
+
+static void
+adj_nbr_insert (fib_protocol_t nh_proto,
+ vnet_link_t link_type,
+ const ip46_address_t *nh_addr,
+ u32 sw_if_index,
+ adj_index_t adj_index)
+{
+ BVT(clib_bihash_kv) kv;
+
+ if (sw_if_index >= vec_len(adj_nbr_tables[nh_proto]))
+ {
+ vec_validate(adj_nbr_tables[nh_proto], sw_if_index);
+ }
+ if (NULL == adj_nbr_tables[nh_proto][sw_if_index])
+ {
+ adj_nbr_tables[nh_proto][sw_if_index] =
+ clib_mem_alloc_aligned(sizeof(BVT(clib_bihash)),
+ CLIB_CACHE_LINE_BYTES);
+ memset(adj_nbr_tables[nh_proto][sw_if_index],
+ 0,
+ sizeof(BVT(clib_bihash)));
+
+ BV(clib_bihash_init) (adj_nbr_tables[nh_proto][sw_if_index],
+ "Adjacency Neighbour table",
+ ADJ_NBR_DEFAULT_HASH_NUM_BUCKETS,
+ ADJ_NBR_DEFAULT_HASH_MEMORY_SIZE);
+ }
+
+ ADJ_NBR_SET_KEY(kv, link_type, nh_addr);
+ kv.value = adj_index;
+
+ BV(clib_bihash_add_del) (adj_nbr_tables[nh_proto][sw_if_index], &kv, 1);
+}
+
+void
+adj_nbr_remove (adj_index_t ai,
+ fib_protocol_t nh_proto,
+ vnet_link_t link_type,
+ const ip46_address_t *nh_addr,
+ u32 sw_if_index)
+{
+ BVT(clib_bihash_kv) kv;
+
+ if (!ADJ_NBR_ITF_OK(nh_proto, sw_if_index))
+ return;
+
+ ADJ_NBR_SET_KEY(kv, link_type, nh_addr);
+ kv.value = ai;
+
+ BV(clib_bihash_add_del) (adj_nbr_tables[nh_proto][sw_if_index], &kv, 0);
+}
+
+static adj_index_t
+adj_nbr_find (fib_protocol_t nh_proto,
+ vnet_link_t link_type,
+ const ip46_address_t *nh_addr,
+ u32 sw_if_index)
+{
+ BVT(clib_bihash_kv) kv;
+
+ ADJ_NBR_SET_KEY(kv, link_type, nh_addr);
+
+ if (!ADJ_NBR_ITF_OK(nh_proto, sw_if_index))
+ return (ADJ_INDEX_INVALID);
+
+ if (BV(clib_bihash_search)(adj_nbr_tables[nh_proto][sw_if_index],
+ &kv, &kv) < 0)
+ {
+ return (ADJ_INDEX_INVALID);
+ }
+ else
+ {
+ return (kv.value);
+ }
+}
+
+static inline u32
+adj_get_nd_node (fib_protocol_t proto)
+{
+ switch (proto) {
+ case FIB_PROTOCOL_IP4:
+ return (ip4_arp_node.index);
+ case FIB_PROTOCOL_IP6:
+ return (ip6_discover_neighbor_node.index);
+ case FIB_PROTOCOL_MPLS:
+ break;
+ }
+ ASSERT(0);
+ return (ip4_arp_node.index);
+}
+
+static ip_adjacency_t*
+adj_nbr_alloc (fib_protocol_t nh_proto,
+ vnet_link_t link_type,
+ const ip46_address_t *nh_addr,
+ u32 sw_if_index)
+{
+ ip_adjacency_t *adj;
+
+ adj = adj_alloc(nh_proto);
+
+ adj_nbr_insert(nh_proto, link_type, nh_addr,
+ sw_if_index,
+ adj_get_index(adj));
+
+ /*
+ * since we just added the ADJ we have no rewrite string for it,
+ * so its for ARP
+ */
+ adj->lookup_next_index = IP_LOOKUP_NEXT_ARP;
+ adj->sub_type.nbr.next_hop = *nh_addr;
+ adj->ia_link = link_type;
+ adj->ia_nh_proto = nh_proto;
+ adj->rewrite_header.sw_if_index = sw_if_index;
+ memset(&adj->sub_type.midchain.next_dpo, 0,
+ sizeof(adj->sub_type.midchain.next_dpo));
+
+ return (adj);
+}
+
+/*
+ * adj_add_for_nbr
+ *
+ * Add an adjacency for the neighbour requested.
+ *
+ * The key for an adj is:
+ * - the Next-hops protocol (i.e. v4 or v6)
+ * - the address of the next-hop
+ * - the interface the next-hop is reachable through
+ */
+adj_index_t
+adj_nbr_add_or_lock (fib_protocol_t nh_proto,
+ vnet_link_t link_type,
+ const ip46_address_t *nh_addr,
+ u32 sw_if_index)
+{
+ adj_index_t adj_index;
+ ip_adjacency_t *adj;
+
+ adj_index = adj_nbr_find(nh_proto, link_type, nh_addr, sw_if_index);
+
+ if (ADJ_INDEX_INVALID == adj_index)
+ {
+ vnet_main_t *vnm;
+
+ vnm = vnet_get_main();
+ adj = adj_nbr_alloc(nh_proto, link_type, nh_addr, sw_if_index);
+ adj_index = adj_get_index(adj);
+ adj_lock(adj_index);
+
+ vnet_rewrite_init(vnm, sw_if_index,
+ adj_get_nd_node(nh_proto),
+ vnet_tx_node_index_for_sw_interface(vnm, sw_if_index),
+ &adj->rewrite_header);
+
+ /*
+ * we need a rewrite where the destination IP address is converted
+ * to the appropriate link-layer address. This is interface specific.
+ * So ask the interface to do it.
+ */
+ vnet_update_adjacency_for_sw_interface(vnm, sw_if_index, adj_index);
+ }
+ else
+ {
+ adj_lock(adj_index);
+ }
+
+ return (adj_index);
+}
+
+adj_index_t
+adj_nbr_add_or_lock_w_rewrite (fib_protocol_t nh_proto,
+ vnet_link_t link_type,
+ const ip46_address_t *nh_addr,
+ u32 sw_if_index,
+ u8 *rewrite)
+{
+ adj_index_t adj_index;
+ ip_adjacency_t *adj;
+
+ adj_index = adj_nbr_find(nh_proto, link_type, nh_addr, sw_if_index);
+
+ if (ADJ_INDEX_INVALID == adj_index)
+ {
+ adj = adj_nbr_alloc(nh_proto, link_type, nh_addr, sw_if_index);
+ adj->rewrite_header.sw_if_index = sw_if_index;
+ }
+ else
+ {
+ adj = adj_get(adj_index);
+ }
+
+ adj_lock(adj_get_index(adj));
+ adj_nbr_update_rewrite(adj_get_index(adj),
+ ADJ_NBR_REWRITE_FLAG_COMPLETE,
+ rewrite);
+
+ return (adj_get_index(adj));
+}
+
+/**
+ * adj_nbr_update_rewrite
+ *
+ * Update the adjacency's rewrite string. A NULL string implies the
+ * rewirte is reset (i.e. when ARP/ND etnry is gone).
+ * NB: the adj being updated may be handling traffic in the DP.
+ */
+void
+adj_nbr_update_rewrite (adj_index_t adj_index,
+ adj_nbr_rewrite_flag_t flags,
+ u8 *rewrite)
+{
+ ip_adjacency_t *adj;
+
+ ASSERT(ADJ_INDEX_INVALID != adj_index);
+
+ adj = adj_get(adj_index);
+
+ if (flags & ADJ_NBR_REWRITE_FLAG_COMPLETE)
+ {
+ /*
+ * update the adj's rewrite string and build the arc
+ * from the rewrite node to the interface's TX node
+ */
+ adj_nbr_update_rewrite_internal(adj, IP_LOOKUP_NEXT_REWRITE,
+ adj_get_rewrite_node(adj->ia_link),
+ vnet_tx_node_index_for_sw_interface(
+ vnet_get_main(),
+ adj->rewrite_header.sw_if_index),
+ rewrite);
+ }
+ else
+ {
+ adj_nbr_update_rewrite_internal(adj, IP_LOOKUP_NEXT_ARP,
+ adj_get_nd_node(adj->ia_nh_proto),
+ vnet_tx_node_index_for_sw_interface(
+ vnet_get_main(),
+ adj->rewrite_header.sw_if_index),
+ rewrite);
+ }
+}
+
+/**
+ * adj_nbr_update_rewrite_internal
+ *
+ * Update the adjacency's rewrite string. A NULL string implies the
+ * rewirte is reset (i.e. when ARP/ND etnry is gone).
+ * NB: the adj being updated may be handling traffic in the DP.
+ */
+void
+adj_nbr_update_rewrite_internal (ip_adjacency_t *adj,
+ u32 adj_next_index,
+ u32 this_node,
+ u32 next_node,
+ u8 *rewrite)
+{
+ ip_adjacency_t *walk_adj;
+ adj_index_t walk_ai;
+ vlib_main_t * vm;
+ u32 old_next;
+ int do_walk;
+
+ vm = vlib_get_main();
+ old_next = adj->lookup_next_index;
+
+ walk_ai = adj_get_index(adj);
+ if (VNET_LINK_MPLS == adj->ia_link)
+ {
+ /*
+ * The link type MPLS has no children in the control plane graph, it only
+ * has children in the data-palne graph. The backwalk is up the former.
+ * So we need to walk from its IP cousin.
+ */
+ walk_ai = adj_nbr_find(adj->ia_nh_proto,
+ fib_proto_to_link(adj->ia_nh_proto),
+ &adj->sub_type.nbr.next_hop,
+ adj->rewrite_header.sw_if_index);
+ }
+
+ /*
+ * Don't call the walk re-entrantly
+ */
+ if (ADJ_INDEX_INVALID != walk_ai)
+ {
+ walk_adj = adj_get(walk_ai);
+ if (IP_ADJ_SYNC_WALK_ACTIVE & walk_adj->ia_flags)
+ {
+ do_walk = 0;
+ }
+ else
+ {
+ /*
+ * Prevent re-entrant walk of the same adj
+ */
+ walk_adj->ia_flags |= IP_ADJ_SYNC_WALK_ACTIVE;
+ do_walk = 1;
+ }
+ }
+ else
+ {
+ do_walk = 0;
+ }
+
+ /*
+ * lock the adjacencies that are affected by updates this walk will provoke.
+ * Since the aim of the walk is to update children to link to a different
+ * DPO, this adj will no longer be in use and its lock count will drop to 0.
+ * We don't want it to be deleted as part of this endevour.
+ */
+ adj_lock(adj_get_index(adj));
+ adj_lock(walk_ai);
+
+ /*
+ * Updating a rewrite string is not atomic;
+ * - the rewrite string is too long to write in one instruction
+ * - when swapping from incomplete to complete, we also need to update
+ * the VLIB graph next-index of the adj.
+ * ideally we would only want to suspend forwarding via this adj whilst we
+ * do this, but we do not have that level of granularity - it's suspend all
+ * worker threads or nothing.
+ * The other chioces are:
+ * - to mark the adj down and back walk so child load-balances drop this adj
+ * from the set.
+ * - update the next_node index of this adj to point to error-drop
+ * both of which will mean for MAC change we will drop for this adj
+ * which is not acceptable. However, when the adj changes type (from
+ * complete to incomplete and vice-versa) the child DPOs, which have the
+ * VLIB graph next node index, will be sending packets to the wrong graph
+ * node. So from the options above, updating the next_node of the adj to
+ * be drop will work, but it relies on each graph node v4/v6/mpls, rewrite/
+ * arp/midchain always be valid w.r.t. a mis-match of adj type and node type
+ * (i.e. a rewrite adj in the arp node). This is not enforcable. Getting it
+ * wrong will lead to hard to find bugs since its a race condition. So we
+ * choose the more reliable method of updating the children to use the drop,
+ * then switching adj's type, then updating the children again. Did I mention
+ * that this doesn't happen often...
+ * So we need to distinguish between the two cases:
+ * 1 - mac change
+ * 2 - adj type change
+ */
+ if (do_walk &&
+ old_next != adj_next_index &&
+ ADJ_INDEX_INVALID != walk_ai)
+ {
+ /*
+ * the adj is changing type. we need to fix all children so that they
+ * stack momentarily on a drop, while the adj changes. If we don't do
+ * this the children will send packets to a VLIB graph node that does
+ * not correspond to the adj's type - and it goes downhill from there.
+ */
+ fib_node_back_walk_ctx_t bw_ctx = {
+ .fnbw_reason = FIB_NODE_BW_REASON_FLAG_ADJ_DOWN,
+ /*
+ * force this walk to be synchrous. if we don't and a node in the graph
+ * (a heavily shared path-list) chooses to back-ground the walk (make it
+ * async) then it will pause and we will do the adj update below, before
+ * all the children are updated. not good.
+ */
+ .fnbw_flags = FIB_NODE_BW_FLAG_FORCE_SYNC,
+ };
+
+ fib_walk_sync(FIB_NODE_TYPE_ADJ, walk_ai, &bw_ctx);
+ }
+
+ /*
+ * If we are just updating the MAC string of the adj (which we also can't
+ * do atomically), then we need to stop packets switching through the adj.
+ * We can't do that on a per-adj basis, so it's all the packets.
+ * If we are updating the type, and we walked back to the children above,
+ * then this barrier serves to flush the queues/frames.
+ */
+ vlib_worker_thread_barrier_sync(vm);
+
+ adj->lookup_next_index = adj_next_index;
+
+ if (NULL != rewrite)
+ {
+ /*
+ * new rewrite provided.
+ * fill in the adj's rewrite string, and build the VLIB graph arc.
+ */
+ vnet_rewrite_set_data_internal(&adj->rewrite_header,
+ sizeof(adj->rewrite_data),
+ rewrite,
+ vec_len(rewrite));
+ vec_free(rewrite);
+ }
+ else
+ {
+ vnet_rewrite_clear_data_internal(&adj->rewrite_header,
+ sizeof(adj->rewrite_data));
+ }
+ adj->rewrite_header.node_index = this_node;
+ adj->rewrite_header.next_index = vlib_node_add_next(vlib_get_main(),
+ this_node,
+ next_node);
+
+ /*
+ * done with the rewirte update - let the workers loose.
+ */
+ vlib_worker_thread_barrier_release(vm);
+
+ if (do_walk &&
+ (old_next != adj->lookup_next_index) &&
+ (ADJ_INDEX_INVALID != walk_ai))
+ {
+ /*
+ * backwalk to the children so they can stack on the now updated
+ * adjacency
+ */
+ fib_node_back_walk_ctx_t bw_ctx = {
+ .fnbw_reason = FIB_NODE_BW_REASON_FLAG_ADJ_UPDATE,
+ };
+
+ fib_walk_sync(FIB_NODE_TYPE_ADJ, walk_ai, &bw_ctx);
+ }
+ /*
+ * Prevent re-entrant walk of the same adj
+ */
+ if (do_walk)
+ {
+ walk_adj->ia_flags &= ~IP_ADJ_SYNC_WALK_ACTIVE;
+ }
+
+ adj_unlock(adj_get_index(adj));
+ adj_unlock(walk_ai);
+}
+
+typedef struct adj_db_count_ctx_t_ {
+ u64 count;
+} adj_db_count_ctx_t;
+
+static void
+adj_db_count (BVT(clib_bihash_kv) * kvp,
+ void *arg)
+{
+ adj_db_count_ctx_t * ctx = arg;
+ ctx->count++;
+}
+
+u32
+adj_nbr_db_size (void)
+{
+ adj_db_count_ctx_t ctx = {
+ .count = 0,
+ };
+ fib_protocol_t proto;
+ u32 sw_if_index = 0;
+
+ for (proto = FIB_PROTOCOL_IP4; proto <= FIB_PROTOCOL_IP6; proto++)
+ {
+ vec_foreach_index(sw_if_index, adj_nbr_tables[proto])
+ {
+ if (NULL != adj_nbr_tables[proto][sw_if_index])
+ {
+ BV(clib_bihash_foreach_key_value_pair) (
+ adj_nbr_tables[proto][sw_if_index],
+ adj_db_count,
+ &ctx);
+ }
+ }
+ }
+ return (ctx.count);
+}
+
+/**
+ * @brief Context for a walk of the adjacency neighbour DB
+ */
+typedef struct adj_walk_ctx_t_
+{
+ adj_walk_cb_t awc_cb;
+ void *awc_ctx;
+} adj_walk_ctx_t;
+
+static void
+adj_nbr_walk_cb (BVT(clib_bihash_kv) * kvp,
+ void *arg)
+{
+ adj_walk_ctx_t *ctx = arg;
+
+ // FIXME: can't stop early...
+ ctx->awc_cb(kvp->value, ctx->awc_ctx);
+}
+
+void
+adj_nbr_walk (u32 sw_if_index,
+ fib_protocol_t adj_nh_proto,
+ adj_walk_cb_t cb,
+ void *ctx)
+{
+ if (!ADJ_NBR_ITF_OK(adj_nh_proto, sw_if_index))
+ return;
+
+ adj_walk_ctx_t awc = {
+ .awc_ctx = ctx,
+ .awc_cb = cb,
+ };
+
+ BV(clib_bihash_foreach_key_value_pair) (
+ adj_nbr_tables[adj_nh_proto][sw_if_index],
+ adj_nbr_walk_cb,
+ &awc);
+}
+
+/**
+ * @brief Context for a walk of the adjacency neighbour DB
+ */
+typedef struct adj_walk_nh_ctx_t_
+{
+ adj_walk_cb_t awc_cb;
+ void *awc_ctx;
+ const ip46_address_t *awc_nh;
+} adj_walk_nh_ctx_t;
+
+static void
+adj_nbr_walk_nh_cb (BVT(clib_bihash_kv) * kvp,
+ void *arg)
+{
+ ip_adjacency_t *adj;
+ adj_walk_nh_ctx_t *ctx = arg;
+
+ adj = adj_get(kvp->value);
+
+ if (!ip46_address_cmp(&adj->sub_type.nbr.next_hop, ctx->awc_nh))
+ ctx->awc_cb(kvp->value, ctx->awc_ctx);
+}
+
+/**
+ * @brief Walk adjacencies on a link with a given v4 next-hop.
+ * that is visit the adjacencies with different link types.
+ */
+void
+adj_nbr_walk_nh4 (u32 sw_if_index,
+ const ip4_address_t *addr,
+ adj_walk_cb_t cb,
+ void *ctx)
+{
+ if (!ADJ_NBR_ITF_OK(FIB_PROTOCOL_IP4, sw_if_index))
+ return;
+
+ ip46_address_t nh = {
+ .ip4 = *addr,
+ };
+
+ adj_walk_nh_ctx_t awc = {
+ .awc_ctx = ctx,
+ .awc_cb = cb,
+ .awc_nh = &nh,
+ };
+
+ BV(clib_bihash_foreach_key_value_pair) (
+ adj_nbr_tables[FIB_PROTOCOL_IP4][sw_if_index],
+ adj_nbr_walk_nh_cb,
+ &awc);
+}
+
+/**
+ * @brief Walk adjacencies on a link with a given v6 next-hop.
+ * that is visit the adjacencies with different link types.
+ */
+void
+adj_nbr_walk_nh6 (u32 sw_if_index,
+ const ip6_address_t *addr,
+ adj_walk_cb_t cb,
+ void *ctx)
+{
+ if (!ADJ_NBR_ITF_OK(FIB_PROTOCOL_IP6, sw_if_index))
+ return;
+
+ ip46_address_t nh = {
+ .ip6 = *addr,
+ };
+
+ adj_walk_nh_ctx_t awc = {
+ .awc_ctx = ctx,
+ .awc_cb = cb,
+ .awc_nh = &nh,
+ };
+
+ BV(clib_bihash_foreach_key_value_pair) (
+ adj_nbr_tables[FIB_PROTOCOL_IP6][sw_if_index],
+ adj_nbr_walk_nh_cb,
+ &awc);
+}
+
+/**
+ * @brief Walk adjacencies on a link with a given next-hop.
+ * that is visit the adjacencies with different link types.
+ */
+void
+adj_nbr_walk_nh (u32 sw_if_index,
+ fib_protocol_t adj_nh_proto,
+ const ip46_address_t *nh,
+ adj_walk_cb_t cb,
+ void *ctx)
+{
+ if (!ADJ_NBR_ITF_OK(adj_nh_proto, sw_if_index))
+ return;
+
+ adj_walk_nh_ctx_t awc = {
+ .awc_ctx = ctx,
+ .awc_cb = cb,
+ .awc_nh = nh,
+ };
+
+ BV(clib_bihash_foreach_key_value_pair) (
+ adj_nbr_tables[adj_nh_proto][sw_if_index],
+ adj_nbr_walk_nh_cb,
+ &awc);
+}
+
+/**
+ * Flags associated with the interface state walks
+ */
+typedef enum adj_nbr_interface_flags_t_
+{
+ ADJ_NBR_INTERFACE_UP = (1 << 0),
+} adj_nbr_interface_flags_t;
+
+/**
+ * Context for the state change walk of the DB
+ */
+typedef struct adj_nbr_interface_state_change_ctx_t_
+{
+ /**
+ * Flags on the interface
+ */
+ adj_nbr_interface_flags_t flags;
+} adj_nbr_interface_state_change_ctx_t;
+
+static adj_walk_rc_t
+adj_nbr_interface_state_change_one (adj_index_t ai,
+ void *arg)
+{
+ /*
+ * Back walk the graph to inform the forwarding entries
+ * that this interface state has changed. Do this synchronously
+ * since this is the walk that provides convergence
+ */
+ adj_nbr_interface_state_change_ctx_t *ctx = arg;
+
+ fib_node_back_walk_ctx_t bw_ctx = {
+ .fnbw_reason = ((ctx->flags & ADJ_NBR_INTERFACE_UP) ?
+ FIB_NODE_BW_REASON_FLAG_INTERFACE_UP :
+ FIB_NODE_BW_REASON_FLAG_INTERFACE_DOWN),
+ /*
+ * the force sync applies only as far as the first fib_entry.
+ * And it's the fib_entry's we need to converge away from
+ * the adjacencies on the now down link
+ */
+ .fnbw_flags = (!(ctx->flags & ADJ_NBR_INTERFACE_UP) ?
+ FIB_NODE_BW_FLAG_FORCE_SYNC :
+ 0),
+ };
+
+ fib_walk_sync(FIB_NODE_TYPE_ADJ, ai, &bw_ctx);
+
+ return (ADJ_WALK_RC_CONTINUE);
+}
+
+/**
+ * @brief Registered function for SW interface state changes
+ */
+static clib_error_t *
+adj_nbr_sw_interface_state_change (vnet_main_t * vnm,
+ u32 sw_if_index,
+ u32 flags)
+{
+ fib_protocol_t proto;
+
+ /*
+ * walk each adj on the interface and trigger a walk from that adj
+ */
+ for (proto = FIB_PROTOCOL_IP4; proto <= FIB_PROTOCOL_IP6; proto++)
+ {
+ adj_nbr_interface_state_change_ctx_t ctx = {
+ .flags = ((flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) ?
+ ADJ_NBR_INTERFACE_UP :
+ 0),
+ };
+
+ adj_nbr_walk(sw_if_index, proto,
+ adj_nbr_interface_state_change_one,
+ &ctx);
+ }
+
+ return (NULL);
+}
+
+VNET_SW_INTERFACE_ADMIN_UP_DOWN_FUNCTION_PRIO(
+ adj_nbr_sw_interface_state_change,
+ VNET_ITF_FUNC_PRIORITY_HIGH);
+
+/**
+ * @brief Invoked on each SW interface of a HW interface when the
+ * HW interface state changes
+ */
+static void
+adj_nbr_hw_sw_interface_state_change (vnet_main_t * vnm,
+ u32 sw_if_index,
+ void *arg)
+{
+ adj_nbr_interface_state_change_ctx_t *ctx = arg;
+ fib_protocol_t proto;
+
+ /*
+ * walk each adj on the interface and trigger a walk from that adj
+ */
+ for (proto = FIB_PROTOCOL_IP4; proto <= FIB_PROTOCOL_IP6; proto++)
+ {
+ adj_nbr_walk(sw_if_index, proto,
+ adj_nbr_interface_state_change_one,
+ ctx);
+ }
+}
+
+/**
+ * @brief Registered callback for HW interface state changes
+ */
+static clib_error_t *
+adj_nbr_hw_interface_state_change (vnet_main_t * vnm,
+ u32 hw_if_index,
+ u32 flags)
+{
+ /*
+ * walk SW interface on the HW
+ */
+ adj_nbr_interface_state_change_ctx_t ctx = {
+ .flags = ((flags & VNET_HW_INTERFACE_FLAG_LINK_UP) ?
+ ADJ_NBR_INTERFACE_UP :
+ 0),
+ };
+
+ vnet_hw_interface_walk_sw(vnm, hw_if_index,
+ adj_nbr_hw_sw_interface_state_change,
+ &ctx);
+
+ return (NULL);
+}
+
+VNET_HW_INTERFACE_LINK_UP_DOWN_FUNCTION_PRIO(
+ adj_nbr_hw_interface_state_change,
+ VNET_ITF_FUNC_PRIORITY_HIGH);
+
+static adj_walk_rc_t
+adj_nbr_interface_delete_one (adj_index_t ai,
+ void *arg)
+{
+ /*
+ * Back walk the graph to inform the forwarding entries
+ * that this interface has been deleted.
+ */
+ fib_node_back_walk_ctx_t bw_ctx = {
+ .fnbw_reason = FIB_NODE_BW_REASON_FLAG_INTERFACE_DELETE,
+ };
+
+ fib_walk_sync(FIB_NODE_TYPE_ADJ, ai, &bw_ctx);
+
+ return (ADJ_WALK_RC_CONTINUE);
+}
+
+/**
+ * adj_nbr_interface_add_del
+ *
+ * Registered to receive interface Add and delete notifications
+ */
+static clib_error_t *
+adj_nbr_interface_add_del (vnet_main_t * vnm,
+ u32 sw_if_index,
+ u32 is_add)
+{
+ fib_protocol_t proto;
+
+ if (is_add)
+ {
+ /*
+ * not interested in interface additions. we will not back walk
+ * to resolve paths through newly added interfaces. Why? The control
+ * plane should have the brains to add interfaces first, then routes.
+ * So the case where there are paths with a interface that matches
+ * one just created is the case where the path resolved through an
+ * interface that was deleted, and still has not been removed. The
+ * new interface added, is NO GUARANTEE that the interface being
+ * added now, even though it may have the same sw_if_index, is the
+ * same interface that the path needs. So tough!
+ * If the control plane wants these routes to resolve it needs to
+ * remove and add them again.
+ */
+ return (NULL);
+ }
+
+ for (proto = FIB_PROTOCOL_IP4; proto <= FIB_PROTOCOL_IP6; proto++)
+ {
+ adj_nbr_walk(sw_if_index, proto,
+ adj_nbr_interface_delete_one,
+ NULL);
+ }
+
+ return (NULL);
+
+}
+
+VNET_SW_INTERFACE_ADD_DEL_FUNCTION(adj_nbr_interface_add_del);
+
+
+static adj_walk_rc_t
+adj_nbr_show_one (adj_index_t ai,
+ void *arg)
+{
+ vlib_cli_output (arg, "[@%d] %U",
+ ai,
+ format_ip_adjacency, ai,
+ FORMAT_IP_ADJACENCY_NONE);
+
+ return (ADJ_WALK_RC_CONTINUE);
+}
+
+static clib_error_t *
+adj_nbr_show (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ adj_index_t ai = ADJ_INDEX_INVALID;
+ u32 sw_if_index = ~0;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "%d", &ai))
+ ;
+ else if (unformat (input, "%U",
+ unformat_vnet_sw_interface, vnet_get_main(),
+ &sw_if_index))
+ ;
+ else
+ break;
+ }
+
+ if (ADJ_INDEX_INVALID != ai)
+ {
+ vlib_cli_output (vm, "[@%d] %U",
+ ai,
+ format_ip_adjacency, ai,
+ FORMAT_IP_ADJACENCY_DETAIL);
+ }
+ else if (~0 != sw_if_index)
+ {
+ fib_protocol_t proto;
+
+ for (proto = FIB_PROTOCOL_IP4; proto <= FIB_PROTOCOL_IP6; proto++)
+ {
+ adj_nbr_walk(sw_if_index, proto,
+ adj_nbr_show_one,
+ vm);
+ }
+ }
+ else
+ {
+ fib_protocol_t proto;
+
+ for (proto = FIB_PROTOCOL_IP4; proto <= FIB_PROTOCOL_IP6; proto++)
+ {
+ vec_foreach_index(sw_if_index, adj_nbr_tables[proto])
+ {
+ adj_nbr_walk(sw_if_index, proto,
+ adj_nbr_show_one,
+ vm);
+ }
+ }
+ }
+
+ return 0;
+}
+
+/*?
+ * Show all neighbour adjacencies.
+ * @cliexpar
+ * @cliexstart{sh adj nbr}
+ * [@2] ipv4 via 1.0.0.2 loop0: IP4: 00:00:22:aa:bb:cc -> 00:00:11:aa:bb:cc
+ * [@3] mpls via 1.0.0.2 loop0: MPLS_UNICAST: 00:00:22:aa:bb:cc -> 00:00:11:aa:bb:cc
+ * [@4] ipv4 via 1.0.0.3 loop0: IP4: 00:00:22:aa:bb:cc -> 00:00:11:aa:bb:cc
+ * [@5] mpls via 1.0.0.3 loop0: MPLS_UNICAST: 00:00:22:aa:bb:cc -> 00:00:11:aa:bb:cc
+ * @cliexend
+ ?*/
+VLIB_CLI_COMMAND (ip4_show_fib_command, static) = {
+ .path = "show adj nbr",
+ .short_help = "show adj nbr [<adj_index>] [interface]",
+ .function = adj_nbr_show,
+};
+
+static ip46_type_t
+adj_proto_to_46 (fib_protocol_t proto)
+{
+ switch (proto)
+ {
+ case FIB_PROTOCOL_IP4:
+ return (IP46_TYPE_IP4);
+ case FIB_PROTOCOL_IP6:
+ return (IP46_TYPE_IP6);
+ default:
+ return (IP46_TYPE_IP4);
+ }
+ return (IP46_TYPE_IP4);
+}
+
+u8*
+format_adj_nbr_incomplete (u8* s, va_list *ap)
+{
+ index_t index = va_arg(*ap, index_t);
+ CLIB_UNUSED(u32 indent) = va_arg(*ap, u32);
+ vnet_main_t * vnm = vnet_get_main();
+ ip_adjacency_t * adj = adj_get(index);
+
+ s = format (s, "arp-%U", format_vnet_link, adj->ia_link);
+ s = format (s, ": via %U",
+ format_ip46_address, &adj->sub_type.nbr.next_hop,
+ adj_proto_to_46(adj->ia_nh_proto));
+ s = format (s, " %U",
+ format_vnet_sw_interface_name,
+ vnm,
+ vnet_get_sw_interface(vnm,
+ adj->rewrite_header.sw_if_index));
+
+ return (s);
+}
+
+u8*
+format_adj_nbr (u8* s, va_list *ap)
+{
+ index_t index = va_arg(*ap, index_t);
+ CLIB_UNUSED(u32 indent) = va_arg(*ap, u32);
+ vnet_main_t * vnm = vnet_get_main();
+ ip_adjacency_t * adj = adj_get(index);
+
+ s = format (s, "%U", format_vnet_link, adj->ia_link);
+ s = format (s, " via %U ",
+ format_ip46_address, &adj->sub_type.nbr.next_hop,
+ adj_proto_to_46(adj->ia_nh_proto));
+ s = format (s, "%U",
+ format_vnet_rewrite,
+ vnm->vlib_main, &adj->rewrite_header, sizeof (adj->rewrite_data), 0);
+
+ return (s);
+}
+
+static void
+adj_dpo_lock (dpo_id_t *dpo)
+{
+ adj_lock(dpo->dpoi_index);
+}
+static void
+adj_dpo_unlock (dpo_id_t *dpo)
+{
+ adj_unlock(dpo->dpoi_index);
+}
+
+static void
+adj_mem_show (void)
+{
+ fib_show_memory_usage("Adjacency",
+ pool_elts(adj_pool),
+ pool_len(adj_pool),
+ sizeof(ip_adjacency_t));
+}
+
+const static dpo_vft_t adj_nbr_dpo_vft = {
+ .dv_lock = adj_dpo_lock,
+ .dv_unlock = adj_dpo_unlock,
+ .dv_format = format_adj_nbr,
+ .dv_mem_show = adj_mem_show,
+};
+const static dpo_vft_t adj_nbr_incompl_dpo_vft = {
+ .dv_lock = adj_dpo_lock,
+ .dv_unlock = adj_dpo_unlock,
+ .dv_format = format_adj_nbr_incomplete,
+};
+
+/**
+ * @brief The per-protocol VLIB graph nodes that are assigned to an adjacency
+ * object.
+ *
+ * this means that these graph nodes are ones from which a nbr is the
+ * parent object in the DPO-graph.
+ */
+const static char* const nbr_ip4_nodes[] =
+{
+ "ip4-rewrite",
+ NULL,
+};
+const static char* const nbr_ip6_nodes[] =
+{
+ "ip6-rewrite",
+ NULL,
+};
+const static char* const nbr_mpls_nodes[] =
+{
+ "mpls-output",
+ NULL,
+};
+const static char* const nbr_ethernet_nodes[] =
+{
+ "adj-l2-rewrite",
+ NULL,
+};
+const static char* const * const nbr_nodes[DPO_PROTO_NUM] =
+{
+ [DPO_PROTO_IP4] = nbr_ip4_nodes,
+ [DPO_PROTO_IP6] = nbr_ip6_nodes,
+ [DPO_PROTO_MPLS] = nbr_mpls_nodes,
+ [DPO_PROTO_ETHERNET] = nbr_ethernet_nodes,
+};
+
+const static char* const nbr_incomplete_ip4_nodes[] =
+{
+ "ip4-arp",
+ NULL,
+};
+const static char* const nbr_incomplete_ip6_nodes[] =
+{
+ "ip6-discover-neighbor",
+ NULL,
+};
+const static char* const nbr_incomplete_mpls_nodes[] =
+{
+ "mpls-adj-incomplete",
+ NULL,
+};
+
+const static char* const * const nbr_incomplete_nodes[DPO_PROTO_NUM] =
+{
+ [DPO_PROTO_IP4] = nbr_incomplete_ip4_nodes,
+ [DPO_PROTO_IP6] = nbr_incomplete_ip6_nodes,
+ [DPO_PROTO_MPLS] = nbr_incomplete_mpls_nodes,
+};
+
+void
+adj_nbr_module_init (void)
+{
+ dpo_register(DPO_ADJACENCY,
+ &adj_nbr_dpo_vft,
+ nbr_nodes);
+ dpo_register(DPO_ADJACENCY_INCOMPLETE,
+ &adj_nbr_incompl_dpo_vft,
+ nbr_incomplete_nodes);
+}
diff --git a/src/vnet/adj/adj_nbr.h b/src/vnet/adj/adj_nbr.h
new file mode 100644
index 00000000000..293766b8519
--- /dev/null
+++ b/src/vnet/adj/adj_nbr.h
@@ -0,0 +1,176 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * @brief
+ * Neighbour Adjacency sub-type. These adjs represent an L3 peer on a
+ * connected link.
+ */
+
+#ifndef __ADJ_NBR_H__
+#define __ADJ_NBR_H__
+
+#include <vnet/vnet.h>
+#include <vnet/adj/adj_types.h>
+#include <vnet/fib/fib_node.h>
+#include <vnet/dpo/dpo.h>
+
+/**
+ * @brief
+ * Add (and lock) a new or lock an existing neighbour adjacency
+ *
+ * @param nh_proto
+ * The protocol for the next-hop address (v4 or v6)
+ *
+ * @param link_type
+ * A description of the protocol of the packets that will forward
+ * through this adj. On an ethernet interface this is the MAC header's
+ * ether-type
+ *
+ * @param nh_addr
+ * The address of the next-hop/peer to send the packet to
+ *
+ * @param sw_if_index
+ * The interface on which the peer resides
+ */
+extern adj_index_t adj_nbr_add_or_lock(fib_protocol_t nh_proto,
+ vnet_link_t link_type,
+ const ip46_address_t *nh_addr,
+ u32 sw_if_index);
+
+/**
+ * @brief
+ * Add (and lock) a new or lock an existing neighbour adjacency
+ *
+ * @param nh_proto
+ * The protocol for the next-hop address (v4 or v6)
+ *
+ * @param link_type
+ * A description of the protocol of the packets that will forward
+ * through this adj. On an ethernet interface this is the MAC header's
+ * ether-type
+ *
+ * @param nh_addr
+ * The address of the next-hop/peer to send the packet to
+ *
+ * @param sw_if_index
+ * The interface on which the peer resides
+ *
+ * @param rewrite
+ * The rewrite to prepend to packets
+ */
+extern adj_index_t adj_nbr_add_or_lock_w_rewrite(fib_protocol_t nh_proto,
+ vnet_link_t link_type,
+ const ip46_address_t *nh_addr,
+ u32 sw_if_index,
+ u8 *rewrite);
+/**
+ * @brief When adding a rewrite to an adjacency these are flags that
+ * apply to that rewrite
+ */
+typedef enum adj_nbr_rewrite_flag_t_
+{
+ ADJ_NBR_REWRITE_FLAG_NONE,
+
+ /**
+ * An indication that the rewrite is incomplete, i.e. that it describes the
+ * ARP/ND rewrite when probing.
+ */
+ ADJ_NBR_REWRITE_FLAG_INCOMPLETE = ADJ_NBR_REWRITE_FLAG_NONE,
+
+ /**
+ * An indication that the rewrite is complete, i.e. that it fully describes
+ * the link-layer addressing for the desintation.
+ * The opposite of this is an incomplete rewrite that describes the ARP/ND
+ * rewrite when probing.
+ */
+ ADJ_NBR_REWRITE_FLAG_COMPLETE = (1 << 0),
+} adj_nbr_rewrite_flag_t;
+
+/**
+ * @brief
+ * Update the rewrite string for an existing adjacecny.
+ *
+ * @param
+ * The index of the adj to update
+ *
+ * @param
+ * The new rewrite
+ */
+extern void adj_nbr_update_rewrite(adj_index_t adj_index,
+ adj_nbr_rewrite_flag_t flags,
+ u8 *rewrite);
+
+/**
+ * @brief
+ * Format aa incomplete neigbour (ARP) adjacency
+ */
+extern u8* format_adj_nbr_incomplete(u8* s, va_list *ap);
+
+/**
+ * @brief
+ * Format a neigbour (REWRITE) adjacency
+ */
+extern u8* format_adj_nbr(u8* s, va_list *ap);
+
+/**
+ * @brief Walk the neighbour Adjacencies on a given interface
+ */
+extern void adj_nbr_walk (u32 sw_if_index,
+ fib_protocol_t adj_nh_proto,
+ adj_walk_cb_t cb,
+ void *ctx);
+/**
+ * @brief Walk the neighbour Adjacencies on a given interface with a given next-hop
+ */
+void
+adj_nbr_walk_nh (u32 sw_if_index,
+ fib_protocol_t adj_nh_proto,
+ const ip46_address_t *nh,
+ adj_walk_cb_t cb,
+ void *ctx);
+
+/**
+ * @brief Walk adjacencies on a link with a given v4 next-hop.
+ * that is visit the adjacencies with different link types.
+ */
+void
+adj_nbr_walk_nh4 (u32 sw_if_index,
+ const ip4_address_t *addr,
+ adj_walk_cb_t cb,
+ void *ctx);
+
+/**
+ * @brief Walk adjacencies on a link with a given v6 next-hop.
+ * that is visit the adjacencies with different link types.
+ */
+void
+adj_nbr_walk_nh6 (u32 sw_if_index,
+ const ip6_address_t *addr,
+ adj_walk_cb_t cb,
+ void *ctx);
+
+/**
+ * @brief
+ * Module initialisation
+ */
+extern void adj_nbr_module_init(void);
+
+/**
+ * @brief
+ * Return the size of the adjacency database. for testing purposes
+ */
+extern u32 adj_nbr_db_size(void);
+
+#endif
diff --git a/src/vnet/adj/adj_rewrite.c b/src/vnet/adj/adj_rewrite.c
new file mode 100644
index 00000000000..7d792557724
--- /dev/null
+++ b/src/vnet/adj/adj_rewrite.c
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/adj/adj.h>
+#include <vnet/adj/adj_internal.h>
+
+/**
+ * adj_rewrite_add_and_lock
+ *
+ * A rewrite sub-type has the rewrite string provided, but no key
+ */
+adj_index_t
+adj_rewrite_add_and_lock (fib_protocol_t nh_proto,
+ vnet_link_t link_type,
+ u32 sw_if_index,
+ u8 *rewrite)
+{
+ ip_adjacency_t *adj;
+
+ adj = adj_alloc(nh_proto);
+
+ adj->lookup_next_index = IP_LOOKUP_NEXT_REWRITE;
+ memset(&adj->sub_type.nbr.next_hop, 0, sizeof(adj->sub_type.nbr.next_hop));
+ adj->ia_link = link_type;
+ adj->ia_nh_proto = nh_proto;
+ adj->rewrite_header.sw_if_index = sw_if_index;
+
+ ASSERT(NULL != rewrite);
+
+ vnet_rewrite_for_sw_interface(vnet_get_main(),
+ link_type,
+ adj->rewrite_header.sw_if_index,
+ adj_get_rewrite_node(link_type),
+ rewrite,
+ &adj->rewrite_header,
+ sizeof (adj->rewrite_data));
+
+ adj_lock(adj_get_index(adj));
+
+ return (adj_get_index(adj));
+}
diff --git a/src/vnet/adj/adj_rewrite.h b/src/vnet/adj/adj_rewrite.h
new file mode 100644
index 00000000000..25e6bba8868
--- /dev/null
+++ b/src/vnet/adj/adj_rewrite.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * @brief
+ * A rewrite adjacency has no key, and thus cannot be 'found' from the
+ * FIB resolution code. the client therefore needs to maange these adjacencies
+ */
+
+#ifndef __ADJ_REWRITE_H__
+#define __ADJ_REWRITE_H__
+
+#include <vnet/adj/adj_types.h>
+
+/**
+ * @brief
+ * Add (and lock) a new or lock an existing neighbour adjacency
+ *
+ * @param nh_proto
+ * The protocol for the next-hop address (v4 or v6)
+ *
+ * @param link_type
+ * A description of the protocol of the packets that will forward
+ * through this adj. On an ethernet interface this is the MAC header's
+ * ether-type
+ *
+ * @param sw_if_index
+ * The interface on which the peer resides
+ *
+ * @param rewrite
+ * The rewrite to prepend to packets
+ */
+extern adj_index_t adj_rewrite_add_and_lock(fib_protocol_t nh_proto,
+ vnet_link_t link_type,
+ u32 sw_if_index,
+ u8 *rewrite);
+
+#endif
diff --git a/src/vnet/adj/adj_types.h b/src/vnet/adj/adj_types.h
new file mode 100644
index 00000000000..cf90c08418d
--- /dev/null
+++ b/src/vnet/adj/adj_types.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __ADJ_TYPES_H__
+#define __ADJ_TYPES_H__
+
+#include <vnet/vnet.h>
+
+/**
+ * @brief An index for adjacencies.
+ * Alas 'C' is not typesafe enough to b0rk when a u32 is used instead of
+ * an adi_index_t. However, for us humans, we can glean much more intent
+ * from the declaration
+ * foo bar(adj_index_t t);
+ * than we can from
+ * foo bar(u32 t);
+ */
+typedef u32 adj_index_t;
+
+/**
+ * @brief Invalid ADJ index - used when no adj is known
+ * likewise blazoned capitals INVALID speak volumes where ~0 does not.
+ */
+#define ADJ_INDEX_INVALID ((u32)~0)
+
+/**
+ * @brief return codes from a adjacency walker callback function
+ */
+typedef enum adj_walk_rc_t_
+{
+ ADJ_WALK_RC_STOP,
+ ADJ_WALK_RC_CONTINUE,
+} adj_walk_rc_t;
+
+/**
+ * @brief Call back function when walking adjacencies
+ */
+typedef adj_walk_rc_t (*adj_walk_cb_t)(adj_index_t ai,
+ void *ctx);
+
+#endif
diff --git a/src/vnet/api_errno.h b/src/vnet/api_errno.h
new file mode 100644
index 00000000000..65e3e59121e
--- /dev/null
+++ b/src/vnet/api_errno.h
@@ -0,0 +1,113 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef included_vnet_api_errno_h
+#define included_vnet_api_errno_h
+
+#define foreach_vnet_api_error \
+_(UNSPECIFIED, -1, "Unspecified Error") \
+_(INVALID_SW_IF_INDEX, -2, "Invalid sw_if_index") \
+_(NO_SUCH_FIB, -3, "No such FIB / VRF") \
+_(NO_SUCH_INNER_FIB, -4, "No such inner FIB / VRF") \
+_(NO_SUCH_LABEL, -5, "No such label") \
+_(NO_SUCH_ENTRY, -6, "No such entry") \
+_(INVALID_VALUE, -7, "Invalid value") \
+_(INVALID_VALUE_2, -8, "Invalid value #2") \
+_(UNIMPLEMENTED, -9, "Unimplemented") \
+_(INVALID_SW_IF_INDEX_2, -10, "Invalid sw_if_index #2") \
+_(SYSCALL_ERROR_1, -11, "System call error #1") \
+_(SYSCALL_ERROR_2, -12, "System call error #2") \
+_(SYSCALL_ERROR_3, -13, "System call error #3") \
+_(SYSCALL_ERROR_4, -14, "System call error #4") \
+_(SYSCALL_ERROR_5, -15, "System call error #5") \
+_(SYSCALL_ERROR_6, -16, "System call error #6") \
+_(SYSCALL_ERROR_7, -17, "System call error #7") \
+_(SYSCALL_ERROR_8, -18, "System call error #8") \
+_(SYSCALL_ERROR_9, -19, "System call error #9") \
+_(SYSCALL_ERROR_10, -20, "System call error #9") \
+_(FEATURE_DISABLED, -30, "Feature disabled by configuration") \
+_(INVALID_REGISTRATION, -31, "Invalid registration") \
+_(NEXT_HOP_NOT_IN_FIB, -50, "Next hop not in FIB") \
+_(UNKNOWN_DESTINATION, -51, "Unknown destination") \
+_(PREFIX_MATCHES_NEXT_HOP, -52, "Prefix matches next hop") \
+_(NEXT_HOP_NOT_FOUND_MP, -53, "Next hop not found (multipath)") \
+_(NO_MATCHING_INTERFACE, -54, "No matching interface for probe") \
+_(INVALID_VLAN, -55, "Invalid VLAN") \
+_(VLAN_ALREADY_EXISTS, -56, "VLAN subif already exists") \
+_(INVALID_SRC_ADDRESS, -57, "Invalid src address") \
+_(INVALID_DST_ADDRESS, -58, "Invalid dst address") \
+_(ADDRESS_LENGTH_MISMATCH, -59, "Address length mismatch") \
+_(ADDRESS_NOT_FOUND_FOR_INTERFACE, -60, "Address not found for interface") \
+_(ADDRESS_NOT_LINK_LOCAL, -61, "Address not link-local") \
+_(IP6_NOT_ENABLED, -62, "ip6 not enabled") \
+_(ADDRESS_MATCHES_INTERFACE_ADDRESS, -63, "Address matches interface address") \
+_(IN_PROGRESS, 10, "Operation in progress") \
+_(NO_SUCH_NODE, -63, "No such graph node") \
+_(NO_SUCH_NODE2, -64, "No such graph node #2") \
+_(NO_SUCH_TABLE, -65, "No such table") \
+_(NO_SUCH_TABLE2, -66, "No such table #2") \
+_(NO_SUCH_TABLE3, -67, "No such table #3") \
+_(SUBIF_ALREADY_EXISTS, -68, "Subinterface already exists") \
+_(SUBIF_CREATE_FAILED, -69, "Subinterface creation failed") \
+_(INVALID_MEMORY_SIZE, -70, "Invalid memory size requested") \
+_(INVALID_INTERFACE, -71, "Invalid interface") \
+_(INVALID_VLAN_TAG_COUNT, -72, "Invalid number of tags for requested operation") \
+_(INVALID_ARGUMENT, -73, "Invalid argument") \
+_(UNEXPECTED_INTF_STATE, -74, "Unexpected interface state") \
+_(TUNNEL_EXIST, -75, "Tunnel already exists") \
+_(INVALID_DECAP_NEXT, -76, "Invalid decap-next") \
+_(RESPONSE_NOT_READY, -77, "Response not ready") \
+_(NOT_CONNECTED, -78, "Not connected to the data plane") \
+_(IF_ALREADY_EXISTS, -79, "Interface already exists") \
+_(BOND_SLAVE_NOT_ALLOWED, -80, "Operation not allowed on slave of BondEthernet") \
+_(VALUE_EXIST, -81, "Value already exists") \
+_(SAME_SRC_DST, -82, "Source and destination are the same") \
+_(IP6_MULTICAST_ADDRESS_NOT_PRESENT, -83, "IP6 multicast address required") \
+_(SR_POLICY_NAME_NOT_PRESENT, -84, "Segement routing policy name required") \
+_(NOT_RUNNING_AS_ROOT, -85, "Not running as root") \
+_(ALREADY_CONNECTED, -86, "Connection to the data plane already exists") \
+_(UNSUPPORTED_JNI_VERSION, -87, "Unsupported JNI version") \
+_(FAILED_TO_ATTACH_TO_JAVA_THREAD, -88, "Failed to attach to Java thread") \
+_(INVALID_WORKER, -89, "Invalid worker thread") \
+_(LISP_DISABLED, -90, "LISP is disabled") \
+_(CLASSIFY_TABLE_NOT_FOUND, -91, "Classify table not found") \
+_(INVALID_EID_TYPE, -92, "Unsupported LSIP EID type") \
+_(CANNOT_CREATE_PCAP_FILE, -93, "Cannot create pcap file") \
+_(INCORRECT_ADJACENCY_TYPE, -94, "Invalid adjacency type for this operation") \
+_(EXCEEDED_NUMBER_OF_RANGES_CAPACITY, -95, "Operation would exceed configured capacity of ranges") \
+_(EXCEEDED_NUMBER_OF_PORTS_CAPACITY, -96, "Operation would exceed capacity of number of ports") \
+_(INVALID_ADDRESS_FAMILY, -97, "Invalid address family") \
+_(INVALID_SUB_SW_IF_INDEX, -98, "Invalid sub-interface sw_if_index") \
+_(TABLE_TOO_BIG, -99, "Table too big") \
+_(CANNOT_ENABLE_DISABLE_FEATURE, -100, "Cannot enable/disable feature") \
+_(BFD_EEXIST, -101, "Duplicate BFD session") \
+_(BFD_NOENT, -102, "No such BFD session")
+
+typedef enum
+{
+#define _(a,b,c) VNET_API_ERROR_##a = (b),
+ foreach_vnet_api_error
+#undef _
+ VNET_API_N_ERROR,
+} vnet_api_error_t;
+
+#endif /* included_vnet_api_errno_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/bfd/bfd.api b/src/vnet/bfd/bfd.api
new file mode 100644
index 00000000000..5798ee698ce
--- /dev/null
+++ b/src/vnet/bfd/bfd.api
@@ -0,0 +1,205 @@
+/*
+ * Copyright (c) 2015-2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/** \brief Configure BFD feature
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param slow_timer - slow timer (seconds)
+ @param min_tx - desired min tx interval
+ @param min_rx - desired min rx interval
+ @param detect_mult - desired detection multiplier
+*/
+define bfd_set_config {
+ u32 client_index;
+ u32 context;
+ u32 slow_timer;
+ u32 min_tx;
+ u32 min_rx;
+ u8 detect_mult;
+};
+
+/** \brief Configure BFD feature response
+ @param context - sender context, to match reply w/ request
+ @param retval - return code for the request
+*/
+define bfd_set_config_reply {
+ u32 context;
+ i32 retval;
+};
+
+/** \brief Get BFD configuration
+*/
+define bfd_get_config {
+ u32 client_index;
+ u32 context;
+};
+
+/** \brief Get BFD configuration response
+ @param context - sender context, to match reply w/ request
+ @param retval - return code for the request
+ @param slow_timer - slow timer (seconds)
+ @param min_tx - desired min tx interval
+ @param min_rx - desired min rx interval
+ @param detect_mult - desired detection multiplier
+*/
+define bfd_get_config_reply {
+ u32 client_index;
+ u32 context;
+ u32 slow_timer;
+ u32 min_tx;
+ u32 min_rx;
+ u8 detect_mult;
+};
+
+/** \brief Add UDP BFD session on interface
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param sw_if_index - sw index of the interface
+ @param desired_min_tx - desired min transmit interval (microseconds)
+ @param required_min_rx - required min receive interval (microseconds)
+ @param detect_mult - detect multiplier (# of packets missed between connection goes down)
+ @param local_addr - local address
+ @param peer_addr - peer address
+ @param is_ipv6 - local_addr, peer_addr are IPv6 if non-zero, otherwise IPv4
+*/
+define bfd_udp_add {
+ u32 client_index;
+ u32 context;
+ u32 sw_if_index;
+ u32 desired_min_tx;
+ u32 required_min_rx;
+ u8 local_addr[16];
+ u8 peer_addr[16];
+ u8 is_ipv6;
+ u8 detect_mult;
+};
+
+/** \brief Add UDP BFD session response
+ @param context - sender context, to match reply w/ request
+ @param retval - return code for the request
+ @param bs_index - index of the session created
+*/
+define bfd_udp_add_reply {
+ u32 context;
+ i32 retval;
+ u32 bs_index;
+};
+
+/** \brief Delete UDP BFD session on interface
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param sw_if_index - sw index of the interface
+ @param local_addr - local address
+ @param peer_addr - peer address
+ @param is_ipv6 - local_addr, peer_addr are IPv6 if non-zero, otherwise IPv4
+*/
+define bfd_udp_del {
+ u32 client_index;
+ u32 context;
+ u32 sw_if_index;
+ u8 local_addr[16];
+ u8 peer_addr[16];
+ u8 is_ipv6;
+};
+
+/** \brief Delete UDP BFD session response
+ @param context - sender context, to match reply w/ request
+ @param retval - return code for the request
+*/
+define bfd_udp_del_reply {
+ u32 context;
+ i32 retval;
+};
+
+/** \brief Get all BFD sessions
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+*/
+define bfd_udp_session_dump {
+ u32 client_index;
+ u32 context;
+};
+
+/** \brief BFD session details structure
+ @param context - sender context, to match reply w/ request
+ @param bs_index - index of the session
+ @param sw_if_index - sw index of the interface
+ @param local_addr - local address
+ @param peer_addr - peer address
+ @param is_ipv6 - local_addr, peer_addr are IPv6 if non-zero, otherwise IPv4
+ @param state - session state
+*/
+define bfd_udp_session_details {
+ u32 context;
+ u32 bs_index;
+ u32 sw_if_index;
+ u8 local_addr[16];
+ u8 peer_addr[16];
+ u8 is_ipv6;
+ u8 state;
+};
+
+/** \brief Set flags of BFD session
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param bs_index - index of the bfd session to set flags on
+ @param admin_up_down - set the admin state, 1 = up, 0 = down
+*/
+define bfd_session_set_flags {
+ u32 client_index;
+ u32 context;
+ u32 bs_index;
+ u8 admin_up_down;
+};
+
+/** \brief Reply to bfd_session_set_flags
+ @param context - sender context which was passed in the request
+ @param retval - return code of the set flags request
+*/
+define bfd_session_set_flags_reply
+{
+ u32 context;
+ i32 retval;
+};
+
+/** \brief Register for BFD events
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param enable_disable - 1 => register for events, 0 => cancel registration
+ @param pid - sender's pid
+*/
+define want_bfd_events
+{
+ u32 client_index;
+ u32 context;
+ u32 enable_disable;
+ u32 pid;
+};
+
+/** \brief Reply for BFD events registration
+ @param context - returned sender context, to match reply w/ request
+ @param retval - return code
+*/
+define want_bfd_events_reply
+{
+ u32 context;
+ i32 retval;
+};
+
+/*
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/bfd/bfd_api.c b/src/vnet/bfd/bfd_api.c
new file mode 100644
index 00000000000..126cf29a801
--- /dev/null
+++ b/src/vnet/bfd/bfd_api.c
@@ -0,0 +1,262 @@
+/*
+ *------------------------------------------------------------------
+ * bfd_api.c - bfd api
+ *
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+
+#include <vnet/vnet.h>
+#include <vlibmemory/api.h>
+
+#include <vnet/interface.h>
+#include <vnet/api_errno.h>
+#include <vnet/bfd/bfd_main.h>
+#include <vnet/bfd/bfd_api.h>
+
+#include <vnet/vnet_msg_enum.h>
+
+#define vl_typedefs /* define message structures */
+#include <vnet/vnet_all_api_h.h>
+#undef vl_typedefs
+
+#define vl_endianfun /* define message structures */
+#include <vnet/vnet_all_api_h.h>
+#undef vl_endianfun
+
+/* instantiate all the print functions we know about */
+#define vl_print(handle, ...) vlib_cli_output (handle, __VA_ARGS__)
+#define vl_printfun
+#include <vnet/vnet_all_api_h.h>
+#undef vl_printfun
+
+#include <vlibapi/api_helper_macros.h>
+
+#define foreach_vpe_api_msg \
+_(BFD_UDP_ADD, bfd_udp_add) \
+_(BFD_UDP_DEL, bfd_udp_del) \
+_(BFD_UDP_SESSION_DUMP, bfd_udp_session_dump) \
+_(BFD_SESSION_SET_FLAGS, bfd_session_set_flags) \
+_(WANT_BFD_EVENTS, want_bfd_events)
+
+pub_sub_handler (bfd_events, BFD_EVENTS);
+
+static void
+vl_api_bfd_udp_add_t_handler (vl_api_bfd_udp_add_t * mp)
+{
+ vl_api_bfd_udp_add_reply_t *rmp;
+ int rv;
+
+ VALIDATE_SW_IF_INDEX (mp);
+
+ ip46_address_t local_addr;
+ memset (&local_addr, 0, sizeof (local_addr));
+ ip46_address_t peer_addr;
+ memset (&peer_addr, 0, sizeof (peer_addr));
+ if (mp->is_ipv6)
+ {
+ clib_memcpy (&local_addr.ip6, mp->local_addr, sizeof (local_addr.ip6));
+ clib_memcpy (&peer_addr.ip6, mp->peer_addr, sizeof (peer_addr.ip6));
+ }
+ else
+ {
+ clib_memcpy (&local_addr.ip4, mp->local_addr, sizeof (local_addr.ip4));
+ clib_memcpy (&peer_addr.ip4, mp->peer_addr, sizeof (peer_addr.ip4));
+ }
+
+ rv = bfd_udp_add_session (clib_net_to_host_u32 (mp->sw_if_index),
+ clib_net_to_host_u32 (mp->desired_min_tx),
+ clib_net_to_host_u32 (mp->required_min_rx),
+ mp->detect_mult, &local_addr, &peer_addr);
+
+ BAD_SW_IF_INDEX_LABEL;
+ REPLY_MACRO (VL_API_BFD_UDP_ADD_REPLY);
+}
+
+static void
+vl_api_bfd_udp_del_t_handler (vl_api_bfd_udp_del_t * mp)
+{
+ vl_api_bfd_udp_del_reply_t *rmp;
+ int rv;
+
+ VALIDATE_SW_IF_INDEX (mp);
+
+ ip46_address_t local_addr;
+ memset (&local_addr, 0, sizeof (local_addr));
+ ip46_address_t peer_addr;
+ memset (&peer_addr, 0, sizeof (peer_addr));
+ if (mp->is_ipv6)
+ {
+ clib_memcpy (&local_addr.ip6, mp->local_addr, sizeof (local_addr.ip6));
+ clib_memcpy (&peer_addr.ip6, mp->peer_addr, sizeof (peer_addr.ip6));
+ }
+ else
+ {
+ clib_memcpy (&local_addr.ip4, mp->local_addr, sizeof (local_addr.ip4));
+ clib_memcpy (&peer_addr.ip4, mp->peer_addr, sizeof (peer_addr.ip4));
+ }
+
+ rv =
+ bfd_udp_del_session (clib_net_to_host_u32 (mp->sw_if_index), &local_addr,
+ &peer_addr);
+
+ BAD_SW_IF_INDEX_LABEL;
+ REPLY_MACRO (VL_API_BFD_UDP_DEL_REPLY);
+}
+
+void
+send_bfd_udp_session_details (unix_shared_memory_queue_t * q, u32 context,
+ bfd_session_t * bs)
+{
+ if (bs->transport != BFD_TRANSPORT_UDP4 &&
+ bs->transport != BFD_TRANSPORT_UDP6)
+ {
+ return;
+ }
+
+ vl_api_bfd_udp_session_details_t *mp = vl_msg_api_alloc (sizeof (*mp));
+ memset (mp, 0, sizeof (*mp));
+ mp->_vl_msg_id = ntohs (VL_API_BFD_UDP_SESSION_DETAILS);
+ mp->context = context;
+ mp->bs_index = clib_host_to_net_u32 (bs->bs_idx);
+ mp->state = bs->local_state;
+ bfd_udp_session_t *bus = &bs->udp;
+ bfd_udp_key_t *key = &bus->key;
+ mp->sw_if_index = clib_host_to_net_u32 (key->sw_if_index);
+ mp->is_ipv6 = !(ip46_address_is_ip4 (&key->local_addr));
+ if (mp->is_ipv6)
+ {
+ clib_memcpy (mp->local_addr, &key->local_addr,
+ sizeof (key->local_addr));
+ clib_memcpy (mp->peer_addr, &key->peer_addr, sizeof (key->peer_addr));
+ }
+ else
+ {
+ clib_memcpy (mp->local_addr, key->local_addr.ip4.data,
+ sizeof (key->local_addr.ip4.data));
+ clib_memcpy (mp->peer_addr, key->peer_addr.ip4.data,
+ sizeof (key->peer_addr.ip4.data));
+ }
+
+ vl_msg_api_send_shmem (q, (u8 *) & mp);
+}
+
+void
+bfd_event (bfd_main_t * bm, bfd_session_t * bs)
+{
+ vpe_api_main_t *vam = &vpe_api_main;
+ vpe_client_registration_t *reg;
+ unix_shared_memory_queue_t *q;
+ /* *INDENT-OFF* */
+ pool_foreach (reg, vam->bfd_events_registrations, ({
+ q = vl_api_client_index_to_input_queue (reg->client_index);
+ if (q)
+ {
+ switch (bs->transport)
+ {
+ case BFD_TRANSPORT_UDP4:
+ /* fallthrough */
+ case BFD_TRANSPORT_UDP6:
+ send_bfd_udp_session_details (q, 0, bs);
+ }
+ }
+ }));
+ /* *INDENT-ON* */
+}
+
+static void
+vl_api_bfd_udp_session_dump_t_handler (vl_api_bfd_udp_session_dump_t * mp)
+{
+ unix_shared_memory_queue_t *q;
+
+ q = vl_api_client_index_to_input_queue (mp->client_index);
+
+ if (q == 0)
+ return;
+
+ bfd_session_t *bs = NULL;
+ /* *INDENT-OFF* */
+ pool_foreach (bs, bfd_main.sessions, ({
+ if (bs->transport == BFD_TRANSPORT_UDP4 ||
+ bs->transport == BFD_TRANSPORT_UDP6)
+ send_bfd_udp_session_details (q, mp->context, bs);
+ }));
+ /* *INDENT-ON* */
+}
+
+static void
+vl_api_bfd_session_set_flags_t_handler (vl_api_bfd_session_set_flags_t * mp)
+{
+ vl_api_bfd_session_set_flags_reply_t *rmp;
+ int rv;
+
+ rv =
+ bfd_session_set_flags (clib_net_to_host_u32 (mp->bs_index),
+ mp->admin_up_down);
+
+ REPLY_MACRO (VL_API_BFD_SESSION_SET_FLAGS_REPLY);
+}
+
+
+/*
+ * bfd_api_hookup
+ * Add vpe's API message handlers to the table.
+ * vlib has alread mapped shared memory and
+ * added the client registration handlers.
+ * See .../vlib-api/vlibmemory/memclnt_vlib.c:memclnt_process()
+ */
+#define vl_msg_name_crc_list
+#include <vnet/vnet_all_api_h.h>
+#undef vl_msg_name_crc_list
+
+static void
+setup_message_id_table (api_main_t * am)
+{
+#define _(id,n,crc) vl_msg_api_add_msg_name_crc (am, #n "_" #crc, id);
+ foreach_vl_msg_name_crc_bfd;
+#undef _
+}
+
+static clib_error_t *
+bfd_api_hookup (vlib_main_t * vm)
+{
+ api_main_t *am = &api_main;
+
+#define _(N,n) \
+ vl_msg_api_set_handlers(VL_API_##N, #n, \
+ vl_api_##n##_t_handler, \
+ vl_noop_handler, \
+ vl_api_##n##_t_endian, \
+ vl_api_##n##_t_print, \
+ sizeof(vl_api_##n##_t), 1);
+ foreach_vpe_api_msg;
+#undef _
+
+ /*
+ * Set up the (msg_name, crc, message-id) table
+ */
+ setup_message_id_table (am);
+
+ return 0;
+}
+
+VLIB_API_INIT_FUNCTION (bfd_api_hookup);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/bfd/bfd_api.h b/src/vnet/bfd/bfd_api.h
new file mode 100644
index 00000000000..cfcd04f3f50
--- /dev/null
+++ b/src/vnet/bfd/bfd_api.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2011-2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * @file
+ * @brief BFD global declarations
+ */
+#ifndef __included_bfd_api_h__
+#define __included_bfd_api_h__
+
+#include <vnet/api_errno.h>
+#include <vnet/vnet.h>
+#include <vnet/ip/ip6_packet.h>
+#include <vnet/bfd/bfd_udp.h>
+
+vnet_api_error_t bfd_udp_add_session (u32 sw_if_index, u32 desired_min_tx_us,
+ u32 required_min_rx_us, u8 detect_mult,
+ const ip46_address_t * local_addr,
+ const ip46_address_t * peer_addr);
+
+vnet_api_error_t bfd_udp_del_session (u32 sw_if_index,
+ const ip46_address_t * local_addr,
+ const ip46_address_t * peer_addr);
+
+vnet_api_error_t bfd_session_set_flags (u32 bs_index, u8 admin_up_down);
+
+#endif /* __included_bfd_api_h__ */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/bfd/bfd_debug.h b/src/vnet/bfd/bfd_debug.h
new file mode 100644
index 00000000000..707ebab2ddd
--- /dev/null
+++ b/src/vnet/bfd/bfd_debug.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2011-2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * @file
+ * @brief BFD global declarations
+ */
+#ifndef __included_bfd_debug_h__
+#define __included_bfd_debug_h__
+
+/* controls debug prints */
+#define BFD_DEBUG (0)
+
+#if BFD_DEBUG
+#define BFD_DEBUG_FILE_DEF \
+ static const char *__file = NULL; \
+ { \
+ __file = strrchr (__FILE__, '/'); \
+ if (__file) \
+ { \
+ ++__file; \
+ } \
+ else \
+ { \
+ __file = __FILE__; \
+ } \
+ }
+
+#define BFD_DBG(fmt, ...) \
+ do \
+ { \
+ BFD_DEBUG_FILE_DEF \
+ static u8 *_s = NULL; \
+ vlib_main_t *vm = vlib_get_main (); \
+ _s = format (_s, "%6.02f:DBG:%s:%d:%s():" fmt, vlib_time_now (vm), \
+ __file, __LINE__, __func__, ##__VA_ARGS__); \
+ printf ("%.*s\n", vec_len (_s), _s); \
+ vec_reset_length (_s); \
+ } \
+ while (0);
+
+#define BFD_ERR(fmt, ...) \
+ do \
+ { \
+ BFD_DEBUG_FILE_DEF \
+ static u8 *_s = NULL; \
+ vlib_main_t *vm = vlib_get_main (); \
+ _s = format (_s, "%6.02f:ERR:%s:%d:%s():" fmt, vlib_time_now (vm), \
+ __file, __LINE__, __func__, ##__VA_ARGS__); \
+ printf ("%.*s\n", vec_len (_s), _s); \
+ vec_reset_length (_s); \
+ } \
+ while (0);
+
+#else
+#define BFD_DBG(...)
+#define BFD_ERR(...)
+#endif
+
+#endif /* __included_bfd_debug_h__ */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/bfd/bfd_doc.md b/src/vnet/bfd/bfd_doc.md
new file mode 100644
index 00000000000..1333ed77b7e
--- /dev/null
+++ b/src/vnet/bfd/bfd_doc.md
@@ -0,0 +1 @@
+TODO
diff --git a/src/vnet/bfd/bfd_main.c b/src/vnet/bfd/bfd_main.c
new file mode 100644
index 00000000000..e25eadfc510
--- /dev/null
+++ b/src/vnet/bfd/bfd_main.c
@@ -0,0 +1,969 @@
+/*
+ * Copyright (c) 2011-2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * @file
+ * @brief BFD nodes implementation
+ */
+
+#include <vppinfra/random.h>
+#include <vppinfra/error.h>
+#include <vppinfra/hash.h>
+#include <vnet/ethernet/ethernet.h>
+#include <vnet/ethernet/packet.h>
+#include <vnet/bfd/bfd_debug.h>
+#include <vnet/bfd/bfd_protocol.h>
+#include <vnet/bfd/bfd_main.h>
+
+static u64
+bfd_us_to_clocks (bfd_main_t * bm, u64 us)
+{
+ return bm->cpu_cps * ((f64) us / USEC_PER_SECOND);
+}
+
+static vlib_node_registration_t bfd_process_node;
+
+typedef enum
+{
+#define F(t, n) BFD_OUTPUT_##t,
+ foreach_bfd_transport (F)
+#undef F
+ BFD_OUTPUT_N_NEXT,
+} bfd_output_next_t;
+
+static u32 bfd_next_index_by_transport[] = {
+#define F(t, n) [BFD_TRANSPORT_##t] = BFD_OUTPUT_##t,
+ foreach_bfd_transport (F)
+#undef F
+};
+
+/*
+ * We actually send all bfd pkts to the "error" node after scanning
+ * them, so the graph node has only one next-index. The "error-drop"
+ * node automatically bumps our per-node packet counters for us.
+ */
+typedef enum
+{
+ BFD_INPUT_NEXT_NORMAL,
+ BFD_INPUT_N_NEXT,
+} bfd_input_next_t;
+
+static void bfd_on_state_change (bfd_main_t * bm, bfd_session_t * bs, u64 now,
+ int handling_wakeup);
+
+static void
+bfd_set_defaults (bfd_main_t * bm, bfd_session_t * bs)
+{
+ bs->local_state = BFD_STATE_down;
+ bs->local_diag = BFD_DIAG_CODE_no_diag;
+ bs->remote_state = BFD_STATE_down;
+ bs->local_demand = 0;
+ bs->remote_discr = 0;
+ bs->desired_min_tx_us = BFD_DEFAULT_DESIRED_MIN_TX_US;
+ bs->desired_min_tx_clocks = bfd_us_to_clocks (bm, bs->desired_min_tx_us);
+ bs->remote_min_rx_us = 1;
+ bs->remote_demand = 0;
+}
+
+static void
+bfd_set_diag (bfd_session_t * bs, bfd_diag_code_e code)
+{
+ if (bs->local_diag != code)
+ {
+ BFD_DBG ("set local_diag, bs_idx=%d: '%d:%s'", bs->bs_idx, code,
+ bfd_diag_code_string (code));
+ bs->local_diag = code;
+ }
+}
+
+static void
+bfd_set_state (bfd_main_t * bm, bfd_session_t * bs,
+ bfd_state_e new_state, int handling_wakeup)
+{
+ if (bs->local_state != new_state)
+ {
+ BFD_DBG ("Change state, bs_idx=%d: %s->%s", bs->bs_idx,
+ bfd_state_string (bs->local_state),
+ bfd_state_string (new_state));
+ bs->local_state = new_state;
+ bfd_on_state_change (bm, bs, clib_cpu_time_now (), handling_wakeup);
+ }
+}
+
+static void
+bfd_recalc_tx_interval (bfd_main_t * bm, bfd_session_t * bs)
+{
+ if (!bs->local_demand)
+ {
+ bs->transmit_interval_clocks =
+ clib_max (bs->desired_min_tx_clocks, bs->remote_min_rx_clocks);
+ }
+ else
+ {
+ /* TODO */
+ }
+ BFD_DBG ("Recalculated transmit interval %lu clocks/%.2fs",
+ bs->transmit_interval_clocks,
+ bs->transmit_interval_clocks / bm->cpu_cps);
+}
+
+static void
+bfd_calc_next_tx (bfd_main_t * bm, bfd_session_t * bs, u64 now)
+{
+ if (!bs->local_demand)
+ {
+ if (bs->local_detect_mult > 1)
+ {
+ /* common case - 75-100% of transmit interval */
+ bs->tx_timeout_clocks = now +
+ (1 - .25 * (random_f64 (&bm->random_seed))) *
+ bs->transmit_interval_clocks;
+ if (bs->tx_timeout_clocks < now)
+ {
+ /* huh, we've missed it already, skip the missed events */
+ const u64 missed =
+ (now - bs->tx_timeout_clocks) / bs->transmit_interval_clocks;
+ BFD_ERR ("Missed %lu transmit events (now is %lu, calc "
+ "tx_timeout is %lu)!",
+ missed, now, bs->tx_timeout_clocks);
+ bs->tx_timeout_clocks +=
+ (missed + 1) * bs->transmit_interval_clocks;
+ }
+ }
+ else
+ {
+ /* special case - 75-90% of transmit interval */
+ bs->tx_timeout_clocks =
+ now +
+ (.9 - .15 * (random_f64 (&bm->random_seed))) *
+ bs->transmit_interval_clocks;
+ if (bs->tx_timeout_clocks < now)
+ {
+ /* huh, we've missed it already, skip the missed events */
+ const u64 missed =
+ (now - bs->tx_timeout_clocks) / bs->transmit_interval_clocks;
+ BFD_ERR ("Missed %lu transmit events (now is %lu, calc "
+ "tx_timeout is %lu)!",
+ missed, now, bs->tx_timeout_clocks);
+ bs->tx_timeout_clocks +=
+ (missed + 1) * bs->transmit_interval_clocks;
+ }
+ }
+ }
+ else
+ {
+ /* TODO */
+ }
+ if (bs->tx_timeout_clocks)
+ {
+ BFD_DBG ("Next transmit in %lu clocks/%.02fs@%lu",
+ bs->tx_timeout_clocks - now,
+ (bs->tx_timeout_clocks - now) / bm->cpu_cps,
+ bs->tx_timeout_clocks);
+ }
+}
+
+static void
+bfd_recalc_detection_time (bfd_main_t * bm, bfd_session_t * bs)
+{
+ if (!bs->local_demand)
+ {
+ bs->detection_time_clocks =
+ bs->remote_detect_mult *
+ bfd_us_to_clocks (bm, clib_max (bs->required_min_rx_us,
+ bs->remote_desired_min_tx_us));
+ }
+ else
+ {
+ bs->detection_time_clocks =
+ bs->local_detect_mult *
+ bfd_us_to_clocks (bm,
+ clib_max (bs->desired_min_tx_us,
+ bs->remote_min_rx_us));
+ }
+ BFD_DBG ("Recalculated detection time %lu clocks/%.2fs",
+ bs->detection_time_clocks,
+ bs->detection_time_clocks / bm->cpu_cps);
+}
+
+static void
+bfd_set_timer (bfd_main_t * bm, bfd_session_t * bs, u64 now,
+ int handling_wakeup)
+{
+ u64 next = 0;
+ u64 rx_timeout = 0;
+ if (BFD_STATE_up == bs->local_state)
+ {
+ rx_timeout = bs->last_rx_clocks + bs->detection_time_clocks;
+ }
+ if (bs->tx_timeout_clocks && rx_timeout)
+ {
+ next = clib_min (bs->tx_timeout_clocks, rx_timeout);
+ }
+ else if (bs->tx_timeout_clocks)
+ {
+ next = bs->tx_timeout_clocks;
+ }
+ else if (rx_timeout)
+ {
+ next = rx_timeout;
+ }
+ BFD_DBG ("bs_idx=%u, tx_timeout=%lu, rx_timeout=%lu, next=%s", bs->bs_idx,
+ bs->tx_timeout_clocks, rx_timeout,
+ next == bs->tx_timeout_clocks ? "tx" : "rx");
+ /* sometimes the wheel expires an event a bit sooner than requested, account
+ for that here */
+ if (next && (now + bm->wheel_inaccuracy > bs->wheel_time_clocks ||
+ next < bs->wheel_time_clocks || !bs->wheel_time_clocks))
+ {
+ bs->wheel_time_clocks = next;
+ BFD_DBG ("timing_wheel_insert(%p, %lu (%ld clocks/%.2fs in the "
+ "future), %u);",
+ &bm->wheel, bs->wheel_time_clocks,
+ (i64) bs->wheel_time_clocks - clib_cpu_time_now (),
+ (i64) (bs->wheel_time_clocks - clib_cpu_time_now ()) /
+ bm->cpu_cps, bs->bs_idx);
+ timing_wheel_insert (&bm->wheel, bs->wheel_time_clocks, bs->bs_idx);
+ if (!handling_wakeup)
+ {
+ vlib_process_signal_event (bm->vlib_main,
+ bm->bfd_process_node_index,
+ BFD_EVENT_RESCHEDULE, bs->bs_idx);
+ }
+ }
+}
+
+static void
+bfd_set_desired_min_tx (bfd_main_t * bm, bfd_session_t * bs, u64 now,
+ u32 desired_min_tx_us, int handling_wakeup)
+{
+ bs->desired_min_tx_us = desired_min_tx_us;
+ bs->desired_min_tx_clocks = bfd_us_to_clocks (bm, bs->desired_min_tx_us);
+ BFD_DBG ("Set desired min tx to %uus/%lu clocks/%.2fs",
+ bs->desired_min_tx_us, bs->desired_min_tx_clocks,
+ bs->desired_min_tx_clocks / bm->cpu_cps);
+ bfd_recalc_detection_time (bm, bs);
+ bfd_recalc_tx_interval (bm, bs);
+ bfd_calc_next_tx (bm, bs, now);
+ bfd_set_timer (bm, bs, now, handling_wakeup);
+}
+
+static void
+bfd_set_remote_required_min_rx (bfd_main_t * bm, bfd_session_t * bs,
+ u64 now,
+ u32 remote_required_min_rx_us,
+ int handling_wakeup)
+{
+ bs->remote_min_rx_us = remote_required_min_rx_us;
+ bs->remote_min_rx_clocks = bfd_us_to_clocks (bm, bs->remote_min_rx_us);
+ BFD_DBG ("Set remote min rx to %uus/%lu clocks/%.2fs", bs->remote_min_rx_us,
+ bs->remote_min_rx_clocks, bs->remote_min_rx_clocks / bm->cpu_cps);
+ bfd_recalc_detection_time (bm, bs);
+ bfd_recalc_tx_interval (bm, bs);
+ bfd_calc_next_tx (bm, bs, now);
+ bfd_set_timer (bm, bs, now, handling_wakeup);
+}
+
+void
+bfd_session_start (bfd_main_t * bm, bfd_session_t * bs)
+{
+ BFD_DBG ("%U", format_bfd_session, bs);
+ bfd_recalc_tx_interval (bm, bs);
+ vlib_process_signal_event (bm->vlib_main, bm->bfd_process_node_index,
+ BFD_EVENT_NEW_SESSION, bs->bs_idx);
+}
+
+vnet_api_error_t
+bfd_del_session (uword bs_idx)
+{
+ const bfd_main_t *bm = &bfd_main;
+ if (!pool_is_free_index (bm->sessions, bs_idx))
+ {
+ bfd_session_t *bs = pool_elt_at_index (bm->sessions, bs_idx);
+ pool_put (bm->sessions, bs);
+ return 0;
+ }
+ else
+ {
+ BFD_ERR ("no such session");
+ return VNET_API_ERROR_BFD_NOENT;
+ }
+ return 0;
+}
+
+const char *
+bfd_diag_code_string (bfd_diag_code_e diag)
+{
+#define F(n, t, s) \
+ case BFD_DIAG_CODE_NAME (t): \
+ return s;
+ switch (diag)
+ {
+ foreach_bfd_diag_code (F)}
+ return "UNKNOWN";
+#undef F
+}
+
+const char *
+bfd_state_string (bfd_state_e state)
+{
+#define F(n, t, s) \
+ case BFD_STATE_NAME (t): \
+ return s;
+ switch (state)
+ {
+ foreach_bfd_state (F)}
+ return "UNKNOWN";
+#undef F
+}
+
+vnet_api_error_t
+bfd_session_set_flags (u32 bs_idx, u8 admin_up_down)
+{
+ bfd_main_t *bm = &bfd_main;
+ if (pool_is_free_index (bm->sessions, bs_idx))
+ {
+ BFD_ERR ("invalid bs_idx=%u", bs_idx);
+ return VNET_API_ERROR_BFD_NOENT;
+ }
+ bfd_session_t *bs = pool_elt_at_index (bm->sessions, bs_idx);
+ if (admin_up_down)
+ {
+ bfd_set_state (bm, bs, BFD_STATE_down, 0);
+ }
+ else
+ {
+ bfd_set_diag (bs, BFD_DIAG_CODE_neighbor_sig_down);
+ bfd_set_state (bm, bs, BFD_STATE_admin_down, 0);
+ }
+ return 0;
+}
+
+u8 *
+bfd_input_format_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ const bfd_input_trace_t *t = va_arg (*args, bfd_input_trace_t *);
+ const bfd_pkt_t *pkt = (bfd_pkt_t *) t->data;
+ if (t->len > STRUCT_SIZE_OF (bfd_pkt_t, head))
+ {
+ s = format (s, "BFD v%u, diag=%u(%s), state=%u(%s),\n"
+ " flags=(P:%u, F:%u, C:%u, A:%u, D:%u, M:%u), detect_mult=%u, "
+ "length=%u\n",
+ bfd_pkt_get_version (pkt), bfd_pkt_get_diag_code (pkt),
+ bfd_diag_code_string (bfd_pkt_get_diag_code (pkt)),
+ bfd_pkt_get_state (pkt),
+ bfd_state_string (bfd_pkt_get_state (pkt)),
+ bfd_pkt_get_poll (pkt), bfd_pkt_get_final (pkt),
+ bfd_pkt_get_control_plane_independent (pkt),
+ bfd_pkt_get_auth_present (pkt), bfd_pkt_get_demand (pkt),
+ bfd_pkt_get_multipoint (pkt), pkt->head.detect_mult,
+ pkt->head.length);
+ if (t->len >= sizeof (bfd_pkt_t)
+ && pkt->head.length >= sizeof (bfd_pkt_t))
+ {
+ s = format (s, " my discriminator: %u\n", pkt->my_disc);
+ s = format (s, " your discriminator: %u\n", pkt->your_disc);
+ s = format (s, " desired min tx interval: %u\n",
+ clib_net_to_host_u32 (pkt->des_min_tx));
+ s = format (s, " required min rx interval: %u\n",
+ clib_net_to_host_u32 (pkt->req_min_rx));
+ s = format (s, " required min echo rx interval: %u\n",
+ clib_net_to_host_u32 (pkt->req_min_echo_rx));
+ }
+ }
+
+ return s;
+}
+
+static void
+bfd_on_state_change (bfd_main_t * bm, bfd_session_t * bs, u64 now,
+ int handling_wakeup)
+{
+ BFD_DBG ("State changed: %U", format_bfd_session, bs);
+ bfd_event (bm, bs);
+ switch (bs->local_state)
+ {
+ case BFD_STATE_admin_down:
+ bfd_set_desired_min_tx (bm, bs, now,
+ clib_max (bs->config_desired_min_tx_us,
+ BFD_DEFAULT_DESIRED_MIN_TX_US),
+ handling_wakeup);
+ break;
+ case BFD_STATE_down:
+ bfd_set_desired_min_tx (bm, bs, now,
+ clib_max (bs->config_desired_min_tx_us,
+ BFD_DEFAULT_DESIRED_MIN_TX_US),
+ handling_wakeup);
+ break;
+ case BFD_STATE_init:
+ bfd_set_desired_min_tx (bm, bs, now,
+ clib_max (bs->config_desired_min_tx_us,
+ BFD_DEFAULT_DESIRED_MIN_TX_US),
+ handling_wakeup);
+ break;
+ case BFD_STATE_up:
+ bfd_set_desired_min_tx (bm, bs, now, bs->config_desired_min_tx_us,
+ handling_wakeup);
+ break;
+ }
+}
+
+static void
+bfd_add_transport_layer (vlib_main_t * vm, vlib_buffer_t * b,
+ bfd_session_t * bs)
+{
+ switch (bs->transport)
+ {
+ case BFD_TRANSPORT_UDP4:
+ /* fallthrough */
+ case BFD_TRANSPORT_UDP6:
+ BFD_DBG ("Transport bfd via udp, bs_idx=%u", bs->bs_idx);
+ bfd_add_udp_transport (vm, b, &bs->udp);
+ break;
+ }
+}
+
+static vlib_buffer_t *
+bfd_create_frame (vlib_main_t * vm, vlib_node_runtime_t * rt,
+ bfd_session_t * bs)
+{
+ u32 bi;
+ if (vlib_buffer_alloc (vm, &bi, 1) != 1)
+ {
+ clib_warning ("buffer allocation failure");
+ return NULL;
+ }
+
+ vlib_buffer_t *b = vlib_get_buffer (vm, bi);
+ ASSERT (b->current_data == 0);
+
+ u32 *to_next;
+ u32 n_left_to_next;
+
+ vlib_get_next_frame (vm, rt, bfd_next_index_by_transport[bs->transport],
+ to_next, n_left_to_next);
+
+ to_next[0] = bi;
+ n_left_to_next -= 1;
+
+ vlib_put_next_frame (vm, rt, bfd_next_index_by_transport[bs->transport],
+ n_left_to_next);
+ return b;
+}
+
+static void
+bfd_init_control_frame (vlib_buffer_t * b, bfd_session_t * bs)
+{
+ bfd_pkt_t *pkt = vlib_buffer_get_current (b);
+ const u32 bfd_length = 24;
+ memset (pkt, 0, sizeof (*pkt));
+
+ bfd_pkt_set_version (pkt, 1);
+ bfd_pkt_set_diag_code (pkt, bs->local_diag);
+ bfd_pkt_set_state (pkt, bs->local_state);
+ if (bs->local_demand && BFD_STATE_up == bs->local_state &&
+ BFD_STATE_up == bs->remote_state)
+ {
+ bfd_pkt_set_demand (pkt);
+ }
+ pkt->head.detect_mult = bs->local_detect_mult;
+ pkt->head.length = clib_host_to_net_u32 (bfd_length);
+ pkt->my_disc = bs->local_discr;
+ pkt->your_disc = bs->remote_discr;
+ pkt->des_min_tx = clib_host_to_net_u32 (bs->desired_min_tx_us);
+ pkt->req_min_rx = clib_host_to_net_u32 (bs->required_min_rx_us);
+ pkt->req_min_echo_rx = clib_host_to_net_u32 (0); /* FIXME */
+ b->current_length = bfd_length;
+}
+
+static void
+bfd_send_periodic (vlib_main_t * vm, vlib_node_runtime_t * rt,
+ bfd_main_t * bm, bfd_session_t * bs, u64 now,
+ int handling_wakeup)
+{
+ if (!bs->remote_min_rx_us)
+ {
+ BFD_DBG
+ ("bfd.RemoteMinRxInterval is zero, not sending periodic control "
+ "frame");
+ return;
+ }
+ /* FIXME
+ A system MUST NOT periodically transmit BFD Control packets if Demand
+ mode is active on the remote system (bfd.RemoteDemandMode is 1,
+ bfd.SessionState is Up, and bfd.RemoteSessionState is Up) and a Poll
+ Sequence is not being transmitted.
+ */
+ /* sometimes the wheel expires an event a bit sooner than requested, account
+ for that here */
+ if (now + bm->wheel_inaccuracy >= bs->tx_timeout_clocks)
+ {
+ BFD_DBG ("Send periodic control frame for bs_idx=%lu", bs->bs_idx);
+ vlib_buffer_t *b = bfd_create_frame (vm, rt, bs);
+ if (!b)
+ {
+ return;
+ }
+ bfd_init_control_frame (b, bs);
+ bfd_add_transport_layer (vm, b, bs);
+ bfd_calc_next_tx (bm, bs, now);
+ }
+ else
+ {
+ BFD_DBG
+ ("No need to send control frame now, now is %lu, tx_timeout is %lu",
+ now, bs->tx_timeout_clocks);
+ }
+ bfd_set_timer (bm, bs, now, handling_wakeup);
+}
+
+void
+bfd_send_final (vlib_main_t * vm, vlib_buffer_t * b, bfd_session_t * bs)
+{
+ BFD_DBG ("Send final control frame for bs_idx=%lu", bs->bs_idx);
+ bfd_init_control_frame (b, bs);
+ bfd_pkt_set_final (vlib_buffer_get_current (b));
+ bfd_add_transport_layer (vm, b, bs);
+}
+
+static void
+bfd_check_rx_timeout (bfd_main_t * bm, bfd_session_t * bs, u64 now,
+ int handling_wakeup)
+{
+ /* sometimes the wheel expires an event a bit sooner than requested, account
+ for that here */
+ if (bs->last_rx_clocks + bs->detection_time_clocks <=
+ now + bm->wheel_inaccuracy)
+ {
+ BFD_DBG ("Rx timeout, session goes down");
+ bfd_set_diag (bs, BFD_DIAG_CODE_det_time_exp);
+ bfd_set_state (bm, bs, BFD_STATE_down, handling_wakeup);
+ }
+}
+
+void
+bfd_on_timeout (vlib_main_t * vm, vlib_node_runtime_t * rt, bfd_main_t * bm,
+ bfd_session_t * bs, u64 now)
+{
+ BFD_DBG ("Timeout for bs_idx=%lu", bs->bs_idx);
+ switch (bs->local_state)
+ {
+ case BFD_STATE_admin_down:
+ BFD_ERR ("Unexpected timeout when in %s state",
+ bfd_state_string (bs->local_state));
+ abort ();
+ break;
+ case BFD_STATE_down:
+ bfd_send_periodic (vm, rt, bm, bs, now, 1);
+ break;
+ case BFD_STATE_init:
+ BFD_ERR ("Unexpected timeout when in %s state",
+ bfd_state_string (bs->local_state));
+ abort ();
+ break;
+ case BFD_STATE_up:
+ bfd_check_rx_timeout (bm, bs, now, 1);
+ bfd_send_periodic (vm, rt, bm, bs, now, 1);
+ break;
+ }
+}
+
+/*
+ * bfd process node function
+ */
+static uword
+bfd_process (vlib_main_t * vm, vlib_node_runtime_t * rt, vlib_frame_t * f)
+{
+ bfd_main_t *bm = &bfd_main;
+ u32 *expired = 0;
+ uword event_type, *event_data = 0;
+
+ /* So we can send events to the bfd process */
+ bm->bfd_process_node_index = bfd_process_node.index;
+
+ while (1)
+ {
+ u64 now = clib_cpu_time_now ();
+ u64 next_expire = timing_wheel_next_expiring_elt_time (&bm->wheel);
+ BFD_DBG ("timing_wheel_next_expiring_elt_time(%p) returns %lu",
+ &bm->wheel, next_expire);
+ if ((i64) next_expire < 0)
+ {
+ BFD_DBG ("wait for event without timeout");
+ (void) vlib_process_wait_for_event (vm);
+ event_type = vlib_process_get_events (vm, &event_data);
+ }
+ else
+ {
+ f64 timeout = ((i64) next_expire - (i64) now) / bm->cpu_cps;
+ BFD_DBG ("wait for event with timeout %.02f", timeout);
+ if (timeout < 0)
+ {
+ BFD_DBG ("negative timeout, already expired, skipping wait");
+ event_type = ~0;
+ }
+ else
+ {
+ (void) vlib_process_wait_for_event_or_clock (vm, timeout);
+ event_type = vlib_process_get_events (vm, &event_data);
+ }
+ }
+ now = clib_cpu_time_now ();
+ switch (event_type)
+ {
+ case ~0: /* no events => timeout */
+ /* nothing to do here */
+ break;
+ case BFD_EVENT_RESCHEDULE:
+ /* nothing to do here - reschedule is done automatically after
+ * each event or timeout */
+ break;
+ case BFD_EVENT_NEW_SESSION:
+ do
+ {
+ bfd_session_t *bs =
+ pool_elt_at_index (bm->sessions, *event_data);
+ bfd_send_periodic (vm, rt, bm, bs, now, 1);
+ }
+ while (0);
+ break;
+ default:
+ clib_warning ("BUG: event type 0x%wx", event_type);
+ break;
+ }
+ BFD_DBG ("advancing wheel, now is %lu", now);
+ BFD_DBG ("timing_wheel_advance (%p, %lu, %p, 0);", &bm->wheel, now,
+ expired);
+ expired = timing_wheel_advance (&bm->wheel, now, expired, 0);
+ BFD_DBG ("Expired %d elements", vec_len (expired));
+ u32 *p = NULL;
+ vec_foreach (p, expired)
+ {
+ const u32 bs_idx = *p;
+ if (!pool_is_free_index (bm->sessions, bs_idx))
+ {
+ bfd_session_t *bs = pool_elt_at_index (bm->sessions, bs_idx);
+ bfd_on_timeout (vm, rt, bm, bs, now);
+ }
+ }
+ if (expired)
+ {
+ _vec_len (expired) = 0;
+ }
+ if (event_data)
+ {
+ _vec_len (event_data) = 0;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * bfd process node declaration
+ */
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (bfd_process_node, static) = {
+ .function = bfd_process,
+ .type = VLIB_NODE_TYPE_PROCESS,
+ .name = "bfd-process",
+ .n_next_nodes = BFD_OUTPUT_N_NEXT,
+ .next_nodes =
+ {
+#define F(t, n) [BFD_OUTPUT_##t] = n,
+ foreach_bfd_transport (F)
+#undef F
+ },
+};
+/* *INDENT-ON* */
+
+static clib_error_t *
+bfd_sw_interface_up_down (vnet_main_t * vnm, u32 sw_if_index, u32 flags)
+{
+ // bfd_main_t *bm = &bfd_main;
+ // vnet_hw_interface_t *hi = vnet_get_sup_hw_interface (vnm, sw_if_index);
+ if (!(flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP))
+ {
+ /* TODO */
+ }
+ return 0;
+}
+
+VNET_SW_INTERFACE_ADMIN_UP_DOWN_FUNCTION (bfd_sw_interface_up_down);
+
+static clib_error_t *
+bfd_hw_interface_up_down (vnet_main_t * vnm, u32 hw_if_index, u32 flags)
+{
+ // bfd_main_t *bm = &bfd_main;
+ if (flags & VNET_HW_INTERFACE_FLAG_LINK_UP)
+ {
+ /* TODO */
+ }
+ return 0;
+}
+
+VNET_HW_INTERFACE_LINK_UP_DOWN_FUNCTION (bfd_hw_interface_up_down);
+
+/*
+ * setup function
+ */
+static clib_error_t *
+bfd_main_init (vlib_main_t * vm)
+{
+ bfd_main_t *bm = &bfd_main;
+ bm->random_seed = random_default_seed ();
+ bm->vlib_main = vm;
+ bm->vnet_main = vnet_get_main ();
+ memset (&bm->wheel, 0, sizeof (bm->wheel));
+ bm->cpu_cps = 2590000000; // vm->clib_time.clocks_per_second;
+ BFD_DBG ("cps is %.2f", bm->cpu_cps);
+ const u64 now = clib_cpu_time_now ();
+ timing_wheel_init (&bm->wheel, now, bm->cpu_cps);
+ bm->wheel_inaccuracy = 2 << bm->wheel.log2_clocks_per_bin;
+
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (bfd_main_init);
+
+bfd_session_t *
+bfd_get_session (bfd_main_t * bm, bfd_transport_t t)
+{
+ bfd_session_t *result;
+ pool_get (bm->sessions, result);
+ memset (result, 0, sizeof (*result));
+ result->bs_idx = result - bm->sessions;
+ result->transport = t;
+ result->local_discr = random_u32 (&bm->random_seed);
+ bfd_set_defaults (bm, result);
+ hash_set (bm->session_by_disc, result->local_discr, result->bs_idx);
+ return result;
+}
+
+void
+bfd_put_session (bfd_main_t * bm, bfd_session_t * bs)
+{
+ hash_unset (bm->session_by_disc, bs->local_discr);
+ pool_put (bm->sessions, bs);
+}
+
+bfd_session_t *
+bfd_find_session_by_idx (bfd_main_t * bm, uword bs_idx)
+{
+ if (!pool_is_free_index (bm->sessions, bs_idx))
+ {
+ return pool_elt_at_index (bm->sessions, bs_idx);
+ }
+ return NULL;
+}
+
+bfd_session_t *
+bfd_find_session_by_disc (bfd_main_t * bm, u32 disc)
+{
+ uword *p = hash_get (bfd_main.session_by_disc, disc);
+ if (p)
+ {
+ return pool_elt_at_index (bfd_main.sessions, *p);
+ }
+ return NULL;
+}
+
+/**
+ * @brief verify bfd packet - common checks
+ *
+ * @param pkt
+ *
+ * @return 1 if bfd packet is valid
+ */
+int
+bfd_verify_pkt_common (const bfd_pkt_t * pkt)
+{
+ if (1 != bfd_pkt_get_version (pkt))
+ {
+ BFD_ERR ("BFD verification failed - unexpected version: '%d'",
+ bfd_pkt_get_version (pkt));
+ return 0;
+ }
+ if (pkt->head.length < sizeof (bfd_pkt_t) ||
+ (bfd_pkt_get_auth_present (pkt) &&
+ pkt->head.length < sizeof (bfd_pkt_with_auth_t)))
+ {
+ BFD_ERR ("BFD verification failed - unexpected length: '%d' (auth "
+ "present: %d)",
+ pkt->head.length, bfd_pkt_get_auth_present (pkt));
+ return 0;
+ }
+ if (!pkt->head.detect_mult)
+ {
+ BFD_ERR ("BFD verification failed - unexpected detect-mult: '%d'",
+ pkt->head.detect_mult);
+ return 0;
+ }
+ if (bfd_pkt_get_multipoint (pkt))
+ {
+ BFD_ERR ("BFD verification failed - unexpected multipoint: '%d'",
+ bfd_pkt_get_multipoint (pkt));
+ return 0;
+ }
+ if (!pkt->my_disc)
+ {
+ BFD_ERR ("BFD verification failed - unexpected my-disc: '%d'",
+ pkt->my_disc);
+ return 0;
+ }
+ if (!pkt->your_disc)
+ {
+ const u8 pkt_state = bfd_pkt_get_state (pkt);
+ if (pkt_state != BFD_STATE_down && pkt_state != BFD_STATE_admin_down)
+ {
+ BFD_ERR ("BFD verification failed - unexpected state: '%s' "
+ "(your-disc is zero)", bfd_state_string (pkt_state));
+ return 0;
+ }
+ }
+ return 1;
+}
+
+/**
+ * @brief verify bfd packet - authentication
+ *
+ * @param pkt
+ *
+ * @return 1 if bfd packet is valid
+ */
+int
+bfd_verify_pkt_session (const bfd_pkt_t * pkt, u16 pkt_size,
+ const bfd_session_t * bs)
+{
+ const bfd_pkt_with_auth_t *with_auth = (bfd_pkt_with_auth_t *) pkt;
+ if (!bfd_pkt_get_auth_present (pkt))
+ {
+ if (pkt_size > sizeof (*pkt))
+ {
+ BFD_ERR ("BFD verification failed - unexpected packet size '%d' "
+ "(auth not present)", pkt_size);
+ return 0;
+ }
+ }
+ else
+ {
+ if (!with_auth->auth.type)
+ {
+ BFD_ERR ("BFD verification failed - unexpected auth type: '%d'",
+ with_auth->auth.type);
+ return 0;
+ }
+ /* TODO FIXME - implement the actual verification */
+ }
+ return 1;
+}
+
+void
+bfd_consume_pkt (bfd_main_t * bm, const bfd_pkt_t * pkt, u32 bs_idx)
+{
+ bfd_session_t *bs = bfd_find_session_by_idx (bm, bs_idx);
+ if (!bs)
+ {
+ return;
+ }
+ BFD_DBG ("Scanning bfd packet, bs_idx=%d", bs->bs_idx);
+ bs->remote_discr = pkt->my_disc;
+ bs->remote_state = bfd_pkt_get_state (pkt);
+ bs->remote_demand = bfd_pkt_get_demand (pkt);
+ u64 now = clib_cpu_time_now ();
+ bs->last_rx_clocks = now;
+ bs->remote_desired_min_tx_us = clib_net_to_host_u32 (pkt->des_min_tx);
+ bs->remote_detect_mult = pkt->head.detect_mult;
+ bfd_set_remote_required_min_rx (bm, bs, now,
+ clib_net_to_host_u32 (pkt->req_min_rx), 0);
+ /* FIXME
+ If the Required Min Echo RX Interval field is zero, the
+ transmission of Echo packets, if any, MUST cease.
+
+ If a Poll Sequence is being transmitted by the local system and
+ the Final (F) bit in the received packet is set, the Poll Sequence
+ MUST be terminated.
+ */
+ /* FIXME 6.8.2 */
+ /* FIXME 6.8.4 */
+ if (BFD_STATE_admin_down == bs->local_state)
+ return;
+ if (BFD_STATE_admin_down == bs->remote_state)
+ {
+ bfd_set_diag (bs, BFD_DIAG_CODE_neighbor_sig_down);
+ bfd_set_state (bm, bs, BFD_STATE_down, 0);
+ }
+ else if (BFD_STATE_down == bs->local_state)
+ {
+ if (BFD_STATE_down == bs->remote_state)
+ {
+ bfd_set_state (bm, bs, BFD_STATE_init, 0);
+ }
+ else if (BFD_STATE_init == bs->remote_state)
+ {
+ bfd_set_state (bm, bs, BFD_STATE_up, 0);
+ }
+ }
+ else if (BFD_STATE_init == bs->local_state)
+ {
+ if (BFD_STATE_up == bs->remote_state ||
+ BFD_STATE_init == bs->remote_state)
+ {
+ bfd_set_state (bm, bs, BFD_STATE_up, 0);
+ }
+ }
+ else /* BFD_STATE_up == bs->local_state */
+ {
+ if (BFD_STATE_down == bs->remote_state)
+ {
+ bfd_set_diag (bs, BFD_DIAG_CODE_neighbor_sig_down);
+ bfd_set_state (bm, bs, BFD_STATE_down, 0);
+ }
+ }
+}
+
+u8 *
+format_bfd_session (u8 * s, va_list * args)
+{
+ const bfd_session_t *bs = va_arg (*args, bfd_session_t *);
+ return format (s, "BFD(%u): bfd.SessionState=%s, "
+ "bfd.RemoteSessionState=%s, "
+ "bfd.LocalDiscr=%u, "
+ "bfd.RemoteDiscr=%u, "
+ "bfd.LocalDiag=%s, "
+ "bfd.DesiredMinTxInterval=%u, "
+ "bfd.RequiredMinRxInterval=%u, "
+ "bfd.RemoteMinRxInterval=%u, "
+ "bfd.DemandMode=%s, "
+ "bfd.RemoteDemandMode=%s, "
+ "bfd.DetectMult=%u, ",
+ bs->bs_idx, bfd_state_string (bs->local_state),
+ bfd_state_string (bs->remote_state), bs->local_discr,
+ bs->remote_discr, bfd_diag_code_string (bs->local_diag),
+ bs->desired_min_tx_us, bs->required_min_rx_us,
+ bs->remote_min_rx_us, (bs->local_demand ? "yes" : "no"),
+ (bs->remote_demand ? "yes" : "no"), bs->local_detect_mult);
+}
+
+bfd_main_t bfd_main;
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/bfd/bfd_main.h b/src/vnet/bfd/bfd_main.h
new file mode 100644
index 00000000000..c72ea92a70f
--- /dev/null
+++ b/src/vnet/bfd/bfd_main.h
@@ -0,0 +1,220 @@
+/*
+ * Copyright (c) 2011-2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * @file
+ * @brief BFD global declarations
+ */
+#ifndef __included_bfd_main_h__
+#define __included_bfd_main_h__
+
+#include <vppinfra/timing_wheel.h>
+#include <vnet/vnet.h>
+#include <vnet/bfd/bfd_protocol.h>
+#include <vnet/bfd/bfd_udp.h>
+
+#define foreach_bfd_transport(F) \
+ F (UDP4, "ip4-rewrite") \
+ F (UDP6, "ip6-rewrite")
+
+typedef enum
+{
+#define F(t, n) BFD_TRANSPORT_##t,
+ foreach_bfd_transport (F)
+#undef F
+} bfd_transport_t;
+
+#define foreach_bfd_mode(F) \
+ F (asynchronous) \
+ F (demand)
+
+typedef enum
+{
+#define F(x) BFD_MODE_##x,
+ foreach_bfd_mode (F)
+#undef F
+} bfd_mode_e;
+
+typedef struct
+{
+ /* index in bfd_main.sessions pool */
+ u32 bs_idx;
+
+ /* session state */
+ bfd_state_e local_state;
+
+ /* local diagnostics */
+ bfd_diag_code_e local_diag;
+
+ /* remote session state */
+ bfd_state_e remote_state;
+
+ /* local discriminator */
+ u32 local_discr;
+
+ /* remote discriminator */
+ u32 remote_discr;
+
+ /* configured desired min tx interval (microseconds) */
+ u32 config_desired_min_tx_us;
+
+ /* desired min tx interval (microseconds) */
+ u32 desired_min_tx_us;
+
+ /* desired min tx interval (clocks) */
+ u64 desired_min_tx_clocks;
+
+ /* required min rx interval */
+ u32 required_min_rx_us;
+
+ /* remote min rx interval (microseconds) */
+ u32 remote_min_rx_us;
+
+ /* remote min rx interval (clocks) */
+ u64 remote_min_rx_clocks;
+
+ /* remote desired min tx interval */
+ u32 remote_desired_min_tx_us;
+
+ /* 1 if in demand mode, 0 otherwise */
+ u8 local_demand;
+
+ /* 1 if remote system sets demand mode, 0 otherwise */
+ u8 remote_demand;
+
+ /* local detect multiplier */
+ u8 local_detect_mult;
+
+ /* remote detect multiplier */
+ u8 remote_detect_mult;
+
+ /* set to value of timer in timing wheel, 0 if never set */
+ u64 wheel_time_clocks;
+
+ /* transmit interval */
+ u64 transmit_interval_clocks;
+
+ /* next time at which to transmit a packet */
+ u64 tx_timeout_clocks;
+
+ /* timestamp of last packet received */
+ u64 last_rx_clocks;
+
+ /* detection time */
+ u64 detection_time_clocks;
+
+ /* transport type for this session */
+ bfd_transport_t transport;
+
+ union
+ {
+ bfd_udp_session_t udp;
+ };
+} bfd_session_t;
+
+typedef struct
+{
+ u32 client_index;
+ u32 client_pid;
+} event_subscriber_t;
+
+typedef struct
+{
+ /* pool of bfd sessions context data */
+ bfd_session_t *sessions;
+
+ /* timing wheel for scheduling timeouts */
+ timing_wheel_t wheel;
+
+ /* timing wheel inaccuracy, in clocks */
+ u64 wheel_inaccuracy;
+
+ /* hashmap - bfd session by discriminator */
+ u32 *session_by_disc;
+
+ /* background process node index */
+ u32 bfd_process_node_index;
+
+ /* convenience variables */
+ vlib_main_t *vlib_main;
+ vnet_main_t *vnet_main;
+
+ /* cpu clocks per second */
+ f64 cpu_cps;
+
+ /* for generating random numbers */
+ u32 random_seed;
+
+} bfd_main_t;
+
+extern bfd_main_t bfd_main;
+
+/* Packet counters */
+#define foreach_bfd_error(F) \
+ F (NONE, "good bfd packets (processed)") \
+ F (BAD, "invalid bfd packets") \
+ F (DISABLED, "bfd packets received on disabled interfaces")
+
+typedef enum
+{
+#define F(sym, str) BFD_ERROR_##sym,
+ foreach_bfd_error (F)
+#undef F
+ BFD_N_ERROR,
+} bfd_error_t;
+
+/* bfd packet trace capture */
+typedef struct
+{
+ u32 len;
+ u8 data[400];
+} bfd_input_trace_t;
+
+enum
+{
+ BFD_EVENT_RESCHEDULE = 1,
+ BFD_EVENT_NEW_SESSION,
+} bfd_process_event_e;
+
+u8 *bfd_input_format_trace (u8 * s, va_list * args);
+
+bfd_session_t *bfd_get_session (bfd_main_t * bm, bfd_transport_t t);
+void bfd_put_session (bfd_main_t * bm, bfd_session_t * bs);
+bfd_session_t *bfd_find_session_by_idx (bfd_main_t * bm, uword bs_idx);
+bfd_session_t *bfd_find_session_by_disc (bfd_main_t * bm, u32 disc);
+void bfd_session_start (bfd_main_t * bm, bfd_session_t * bs);
+void bfd_consume_pkt (bfd_main_t * bm, const bfd_pkt_t * bfd, u32 bs_idx);
+int bfd_verify_pkt_common (const bfd_pkt_t * pkt);
+int bfd_verify_pkt_session (const bfd_pkt_t * pkt, u16 pkt_size,
+ const bfd_session_t * bs);
+void bfd_event (bfd_main_t * bm, bfd_session_t * bs);
+void bfd_send_final (vlib_main_t * vm, vlib_buffer_t * b, bfd_session_t * bs);
+u8 *format_bfd_session (u8 * s, va_list * args);
+
+
+#define USEC_PER_MS 1000LL
+#define USEC_PER_SECOND (1000 * USEC_PER_MS)
+
+/* default, slow transmission interval for BFD packets, per spec at least 1s */
+#define BFD_DEFAULT_DESIRED_MIN_TX_US USEC_PER_SECOND
+
+#endif /* __included_bfd_main_h__ */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/bfd/bfd_protocol.c b/src/vnet/bfd/bfd_protocol.c
new file mode 100644
index 00000000000..ede9536f3cf
--- /dev/null
+++ b/src/vnet/bfd/bfd_protocol.c
@@ -0,0 +1,74 @@
+#include <vnet/bfd/bfd_protocol.h>
+
+u8 bfd_pkt_get_version (const bfd_pkt_t *pkt)
+{
+ return pkt->head.vers_diag >> 5;
+}
+
+void bfd_pkt_set_version (bfd_pkt_t *pkt, int version)
+{
+ pkt->head.vers_diag =
+ (version << 5) | (pkt->head.vers_diag & ((1 << 5) - 1));
+}
+
+u8 bfd_pkt_get_diag_code (const bfd_pkt_t *pkt)
+{
+ return pkt->head.vers_diag & ((1 << 5) - 1);
+}
+
+void bfd_pkt_set_diag_code (bfd_pkt_t *pkt, int value)
+{
+ pkt->head.vers_diag =
+ (pkt->head.vers_diag & ~((1 << 5) - 1)) | (value & ((1 << 5) - 1));
+}
+
+u8 bfd_pkt_get_state (const bfd_pkt_t *pkt)
+{
+ return pkt->head.sta_flags >> 6;
+}
+
+void bfd_pkt_set_state (bfd_pkt_t *pkt, int value)
+{
+ pkt->head.sta_flags = (value << 6) | (pkt->head.sta_flags & ((1 << 6) - 1));
+}
+
+u8 bfd_pkt_get_poll (const bfd_pkt_t *pkt)
+{
+ return (pkt->head.sta_flags >> 5) & 1;
+}
+
+void bfd_pkt_set_final (bfd_pkt_t *pkt) { pkt->head.sta_flags |= 1 << 5; }
+
+u8 bfd_pkt_get_final (const bfd_pkt_t *pkt)
+{
+ return (pkt->head.sta_flags >> 4) & 1;
+}
+
+void bfd_pkt_set_poll (bfd_pkt_t *pkt);
+u8 bfd_pkt_get_control_plane_independent (const bfd_pkt_t *pkt)
+{
+ return (pkt->head.sta_flags >> 3) & 1;
+}
+
+void bfd_pkt_set_control_plane_independent (bfd_pkt_t *pkt);
+
+u8 bfd_pkt_get_auth_present (const bfd_pkt_t *pkt)
+{
+ return (pkt->head.sta_flags >> 2) & 1;
+}
+
+void bfd_pkt_set_auth_present (bfd_pkt_t *pkt);
+
+u8 bfd_pkt_get_demand (const bfd_pkt_t *pkt)
+{
+ return (pkt->head.sta_flags >> 1) & 1;
+}
+
+void bfd_pkt_set_demand (bfd_pkt_t *pkt) { pkt->head.sta_flags |= 1 << 1; }
+
+u8 bfd_pkt_get_multipoint (const bfd_pkt_t *pkt)
+{
+ return pkt->head.sta_flags & 1;
+}
+
+void bfd_pkt_set_multipoint (bfd_pkt_t *pkt);
diff --git a/src/vnet/bfd/bfd_protocol.h b/src/vnet/bfd/bfd_protocol.h
new file mode 100644
index 00000000000..cf751b3b89a
--- /dev/null
+++ b/src/vnet/bfd/bfd_protocol.h
@@ -0,0 +1,154 @@
+/*
+ * Copyright (c) 2011-2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef __included_bfd_protocol_h__
+#define __included_bfd_protocol_h__
+/**
+ * @file
+ * @brief BFD protocol declarations
+ */
+
+#include <vppinfra/types.h>
+#include <vppinfra/clib.h>
+
+/* *INDENT-OFF* */
+typedef CLIB_PACKED (struct {
+ /*
+ An optional Authentication Section MAY be present:
+
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Auth Type | Auth Len | Authentication Data... |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+ u8 type;
+ u8 len;
+ u8 data[0];
+}) bfd_auth_t;
+/* *INDENT-ON* */
+
+/* *INDENT-OFF* */
+typedef CLIB_PACKED (struct {
+ /*
+ The Mandatory Section of a BFD Control packet has the following
+ format:
+
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ |Vers | Diag |Sta|P|F|C|A|D|M| Detect Mult | Length |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | My Discriminator |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Your Discriminator |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Desired Min TX Interval |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Required Min RX Interval |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Required Min Echo RX Interval |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+ struct
+ {
+ u8 vers_diag;
+ u8 sta_flags;
+ u8 detect_mult;
+ u8 length;
+ } head;
+ u32 my_disc;
+ u32 your_disc;
+ u32 des_min_tx;
+ u32 req_min_rx;
+ u32 req_min_echo_rx;
+}) bfd_pkt_t;
+/* *INDENT-ON* */
+
+/* *INDENT-OFF* */
+typedef CLIB_PACKED (struct {
+ bfd_pkt_t pkt;
+ bfd_auth_t auth;
+}) bfd_pkt_with_auth_t;
+/* *INDENT-ON* */
+
+u8 bfd_pkt_get_version (const bfd_pkt_t * pkt);
+void bfd_pkt_set_version (bfd_pkt_t * pkt, int version);
+u8 bfd_pkt_get_diag_code (const bfd_pkt_t * pkt);
+void bfd_pkt_set_diag_code (bfd_pkt_t * pkt, int value);
+u8 bfd_pkt_get_state (const bfd_pkt_t * pkt);
+void bfd_pkt_set_state (bfd_pkt_t * pkt, int value);
+u8 bfd_pkt_get_poll (const bfd_pkt_t * pkt);
+void bfd_pkt_set_final (bfd_pkt_t * pkt);
+u8 bfd_pkt_get_final (const bfd_pkt_t * pkt);
+void bfd_pkt_set_poll (bfd_pkt_t * pkt);
+u8 bfd_pkt_get_control_plane_independent (const bfd_pkt_t * pkt);
+void bfd_pkt_set_control_plane_independent (bfd_pkt_t * pkt);
+u8 bfd_pkt_get_auth_present (const bfd_pkt_t * pkt);
+void bfd_pkt_set_auth_present (bfd_pkt_t * pkt);
+u8 bfd_pkt_get_demand (const bfd_pkt_t * pkt);
+void bfd_pkt_set_demand (bfd_pkt_t * pkt);
+u8 bfd_pkt_get_multipoint (const bfd_pkt_t * pkt);
+void bfd_pkt_set_multipoint (bfd_pkt_t * pkt);
+
+/* BFD diagnostic codes */
+#define foreach_bfd_diag_code(F) \
+ F (0, no_diag, "No Diagnostic") \
+ F (1, det_time_exp, "Control Detection Time Expired") \
+ F (2, echo_failed, "Echo Function Failed") \
+ F (3, neighbor_sig_down, "Neighbor Signaled Session Down") \
+ F (4, fwd_plain_reset, "Forwarding Plane Reset") \
+ F (5, path_down, "Path Down") \
+ F (6, concat_path_down, "Concatenated Path Down") \
+ F (7, admin_down, "Administratively Down") \
+ F (8, reverse_concat_path_down, "Reverse Concatenated Path Down")
+
+#define BFD_DIAG_CODE_NAME(t) BFD_DIAG_CODE_##t
+
+typedef enum
+{
+#define F(n, t, s) BFD_DIAG_CODE_NAME (t) = n,
+ foreach_bfd_diag_code (F)
+#undef F
+} bfd_diag_code_e;
+
+const char *bfd_diag_code_string (bfd_diag_code_e diag);
+
+/* BFD state values */
+#define foreach_bfd_state(F) \
+ F (0, admin_down, "AdminDown") \
+ F (1, down, "Down") \
+ F (2, init, "Init") \
+ F (3, up, "Up")
+
+#define BFD_STATE_NAME(t) BFD_STATE_##t
+
+typedef enum
+{
+#define F(n, t, s) BFD_STATE_NAME (t) = n,
+ foreach_bfd_state (F)
+#undef F
+} bfd_state_e;
+
+const char *bfd_state_string (bfd_state_e state);
+
+#endif /* __included_bfd_protocol_h__ */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/bfd/bfd_udp.c b/src/vnet/bfd/bfd_udp.c
new file mode 100644
index 00000000000..3c747d86a10
--- /dev/null
+++ b/src/vnet/bfd/bfd_udp.c
@@ -0,0 +1,639 @@
+#include <vppinfra/types.h>
+#include <vlibmemory/api.h>
+#include <vlib/vlib.h>
+#include <vlib/buffer.h>
+#include <vnet/ip/format.h>
+#include <vnet/ethernet/packet.h>
+#include <vnet/ip/udp_packet.h>
+#include <vnet/ip/lookup.h>
+#include <vnet/ip/icmp46_packet.h>
+#include <vnet/ip/ip4.h>
+#include <vnet/ip/ip6.h>
+#include <vnet/ip/udp.h>
+#include <vnet/ip/ip6_packet.h>
+#include <vnet/adj/adj.h>
+#include <vnet/adj/adj_nbr.h>
+#include <vnet/bfd/bfd_debug.h>
+#include <vnet/bfd/bfd_udp.h>
+#include <vnet/bfd/bfd_main.h>
+#include <vnet/bfd/bfd_api.h>
+
+typedef struct
+{
+ bfd_main_t *bfd_main;
+ /* hashmap - bfd session index by bfd key - used for CLI/API lookup, where
+ * discriminator is unknown */
+ mhash_t bfd_session_idx_by_bfd_key;
+} bfd_udp_main_t;
+
+static vlib_node_registration_t bfd_udp4_input_node;
+static vlib_node_registration_t bfd_udp6_input_node;
+
+bfd_udp_main_t bfd_udp_main;
+
+void bfd_udp_transport_to_buffer (vlib_main_t *vm, vlib_buffer_t *b,
+ bfd_udp_session_t *bus)
+{
+ udp_header_t *udp;
+ u16 udp_length, ip_length;
+ bfd_udp_key_t *key = &bus->key;
+
+ b->flags |= VNET_BUFFER_LOCALLY_ORIGINATED;
+ if (ip46_address_is_ip4 (&key->local_addr))
+ {
+ ip4_header_t *ip4;
+ const size_t data_size = sizeof (*ip4) + sizeof (*udp);
+ vlib_buffer_advance (b, -data_size);
+ ip4 = vlib_buffer_get_current (b);
+ udp = (udp_header_t *)(ip4 + 1);
+ memset (ip4, 0, data_size);
+ ip4->ip_version_and_header_length = 0x45;
+ ip4->ttl = 255;
+ ip4->protocol = IP_PROTOCOL_UDP;
+ ip4->src_address.as_u32 = key->local_addr.ip4.as_u32;
+ ip4->dst_address.as_u32 = key->peer_addr.ip4.as_u32;
+
+ udp->src_port = clib_host_to_net_u16 (50000); /* FIXME */
+ udp->dst_port = clib_host_to_net_u16 (UDP_DST_PORT_bfd4);
+
+ /* fix ip length, checksum and udp length */
+ ip_length = vlib_buffer_length_in_chain (vm, b);
+
+ ip4->length = clib_host_to_net_u16 (ip_length);
+ ip4->checksum = ip4_header_checksum (ip4);
+
+ udp_length = ip_length - (sizeof (*ip4));
+ udp->length = clib_host_to_net_u16 (udp_length);
+ }
+ else
+ {
+ BFD_ERR ("not implemented");
+ abort ();
+ }
+}
+
+void bfd_add_udp_transport (vlib_main_t *vm, vlib_buffer_t *b,
+ bfd_udp_session_t *bus)
+{
+ vnet_buffer (b)->ip.adj_index[VLIB_RX] = bus->adj_index;
+ vnet_buffer (b)->ip.adj_index[VLIB_TX] = bus->adj_index;
+ bfd_udp_transport_to_buffer (vm, b, bus);
+}
+
+static bfd_session_t *bfd_lookup_session (bfd_udp_main_t *bum,
+ const bfd_udp_key_t *key)
+{
+ uword *p = mhash_get (&bum->bfd_session_idx_by_bfd_key, key);
+ if (p)
+ {
+ return bfd_find_session_by_idx (bum->bfd_main, *p);
+ }
+ return 0;
+}
+
+static vnet_api_error_t
+bfd_udp_add_session_internal (bfd_udp_main_t *bum, u32 sw_if_index,
+ u32 desired_min_tx_us, u32 required_min_rx_us,
+ u8 detect_mult, const ip46_address_t *local_addr,
+ const ip46_address_t *peer_addr)
+{
+ vnet_sw_interface_t *sw_if =
+ vnet_get_sw_interface (vnet_get_main (), sw_if_index);
+ /* get a pool entry and if we end up not needing it, give it back */
+ bfd_transport_t t = BFD_TRANSPORT_UDP4;
+ if (!ip46_address_is_ip4 (local_addr))
+ {
+ t = BFD_TRANSPORT_UDP6;
+ }
+ bfd_session_t *bs = bfd_get_session (bum->bfd_main, t);
+ bfd_udp_session_t *bus = &bs->udp;
+ memset (bus, 0, sizeof (*bus));
+ bfd_udp_key_t *key = &bus->key;
+ key->sw_if_index = sw_if->sw_if_index;
+ key->local_addr.as_u64[0] = local_addr->as_u64[0];
+ key->local_addr.as_u64[1] = local_addr->as_u64[1];
+ key->peer_addr.as_u64[0] = peer_addr->as_u64[0];
+ key->peer_addr.as_u64[1] = peer_addr->as_u64[1];
+ const bfd_session_t *tmp = bfd_lookup_session (bum, key);
+ if (tmp)
+ {
+ BFD_ERR ("duplicate bfd-udp session, existing bs_idx=%d", tmp->bs_idx);
+ bfd_put_session (bum->bfd_main, bs);
+ return VNET_API_ERROR_BFD_EEXIST;
+ }
+ key->sw_if_index = sw_if->sw_if_index;
+ mhash_set (&bum->bfd_session_idx_by_bfd_key, key, bs->bs_idx, NULL);
+ BFD_DBG ("session created, bs_idx=%u, sw_if_index=%d, local=%U, peer=%U",
+ bs->bs_idx, key->sw_if_index, format_ip46_address, &key->local_addr,
+ IP46_TYPE_ANY, format_ip46_address, &key->peer_addr, IP46_TYPE_ANY);
+ if (BFD_TRANSPORT_UDP4 == t)
+ {
+ bus->adj_index = adj_nbr_add_or_lock (FIB_PROTOCOL_IP4, VNET_LINK_IP4,
+ &key->peer_addr, key->sw_if_index);
+ BFD_DBG ("adj_nbr_add_or_lock(FIB_PROTOCOL_IP4, VNET_LINK_IP4, %U, %d) "
+ "returns %d",
+ format_ip46_address, &key->peer_addr, IP46_TYPE_ANY,
+ key->sw_if_index, bus->adj_index);
+ }
+ else
+ {
+ bus->adj_index = adj_nbr_add_or_lock (FIB_PROTOCOL_IP6, VNET_LINK_IP6,
+ &key->peer_addr, key->sw_if_index);
+ BFD_DBG ("adj_nbr_add_or_lock(FIB_PROTOCOL_IP6, VNET_LINK_IP6, %U, %d) "
+ "returns %d",
+ format_ip46_address, &key->peer_addr, IP46_TYPE_ANY,
+ key->sw_if_index, bus->adj_index);
+ }
+ bs->config_desired_min_tx_us = desired_min_tx_us;
+ bs->required_min_rx_us = required_min_rx_us;
+ bs->local_detect_mult = detect_mult;
+ bfd_session_start (bum->bfd_main, bs);
+ return 0;
+}
+
+static vnet_api_error_t
+bfd_udp_validate_api_input (u32 sw_if_index, const ip46_address_t *local_addr,
+ const ip46_address_t *peer_addr)
+{
+ vnet_sw_interface_t *sw_if =
+ vnet_get_sw_interface (vnet_get_main (), sw_if_index);
+ u8 local_ip_valid = 0;
+ ip_interface_address_t *ia = NULL;
+ if (!sw_if)
+ {
+ BFD_ERR ("got NULL sw_if");
+ return VNET_API_ERROR_INVALID_SW_IF_INDEX;
+ }
+ if (ip46_address_is_ip4 (local_addr))
+ {
+ if (!ip46_address_is_ip4 (peer_addr))
+ {
+ BFD_ERR ("IP family mismatch");
+ return VNET_API_ERROR_INVALID_ARGUMENT;
+ }
+ ip4_main_t *im = &ip4_main;
+
+ /* *INDENT-OFF* */
+ foreach_ip_interface_address (
+ &im->lookup_main, ia, sw_if_index, 0 /* honor unnumbered */, ({
+ ip4_address_t *x =
+ ip_interface_address_get_address (&im->lookup_main, ia);
+ if (x->as_u32 == local_addr->ip4.as_u32)
+ {
+ /* valid address for this interface */
+ local_ip_valid = 1;
+ break;
+ }
+ }));
+ /* *INDENT-ON* */
+ }
+ else
+ {
+ if (ip46_address_is_ip4 (peer_addr))
+ {
+ BFD_ERR ("IP family mismatch");
+ return VNET_API_ERROR_INVALID_ARGUMENT;
+ }
+ ip6_main_t *im = &ip6_main;
+ /* *INDENT-OFF* */
+ foreach_ip_interface_address (
+ &im->lookup_main, ia, sw_if_index, 0 /* honor unnumbered */, ({
+ ip6_address_t *x =
+ ip_interface_address_get_address (&im->lookup_main, ia);
+ if (local_addr->ip6.as_u64[0] == x->as_u64[0] &&
+ local_addr->ip6.as_u64[1] == x->as_u64[1])
+ {
+ /* valid address for this interface */
+ local_ip_valid = 1;
+ break;
+ }
+ }));
+ /* *INDENT-ON* */
+ }
+
+ if (!local_ip_valid)
+ {
+ BFD_ERR ("address not found on interface");
+ return VNET_API_ERROR_ADDRESS_NOT_FOUND_FOR_INTERFACE;
+ }
+
+ return 0;
+}
+
+vnet_api_error_t bfd_udp_add_session (u32 sw_if_index, u32 desired_min_tx_us,
+ u32 required_min_rx_us, u8 detect_mult,
+ const ip46_address_t *local_addr,
+ const ip46_address_t *peer_addr)
+{
+ vnet_api_error_t rv =
+ bfd_udp_validate_api_input (sw_if_index, local_addr, peer_addr);
+ if (rv)
+ {
+ return rv;
+ }
+ if (detect_mult < 1)
+ {
+ BFD_ERR ("detect_mult < 1");
+ return VNET_API_ERROR_INVALID_ARGUMENT;
+ }
+ if (desired_min_tx_us < 1)
+ {
+ BFD_ERR ("desired_min_tx_us < 1");
+ return VNET_API_ERROR_INVALID_ARGUMENT;
+ }
+ return bfd_udp_add_session_internal (&bfd_udp_main, sw_if_index,
+ desired_min_tx_us, required_min_rx_us,
+ detect_mult, local_addr, peer_addr);
+}
+
+vnet_api_error_t bfd_udp_del_session (u32 sw_if_index,
+ const ip46_address_t *local_addr,
+ const ip46_address_t *peer_addr)
+{
+ vnet_api_error_t rv =
+ bfd_udp_validate_api_input (sw_if_index, local_addr, peer_addr);
+ if (rv)
+ {
+ return rv;
+ }
+ bfd_udp_main_t *bum = &bfd_udp_main;
+ vnet_sw_interface_t *sw_if =
+ vnet_get_sw_interface (vnet_get_main (), sw_if_index);
+ bfd_udp_key_t key;
+ memset (&key, 0, sizeof (key));
+ key.sw_if_index = sw_if->sw_if_index;
+ key.local_addr.as_u64[0] = local_addr->as_u64[0];
+ key.local_addr.as_u64[1] = local_addr->as_u64[1];
+ key.peer_addr.as_u64[0] = peer_addr->as_u64[0];
+ key.peer_addr.as_u64[1] = peer_addr->as_u64[1];
+ bfd_session_t *tmp = bfd_lookup_session (bum, &key);
+ if (tmp)
+ {
+ BFD_DBG ("free bfd-udp session, bs_idx=%d", tmp->bs_idx);
+ mhash_unset (&bum->bfd_session_idx_by_bfd_key, &key, NULL);
+ adj_unlock (tmp->udp.adj_index);
+ bfd_put_session (bum->bfd_main, tmp);
+ }
+ else
+ {
+ BFD_ERR ("no such session");
+ return VNET_API_ERROR_BFD_NOENT;
+ }
+ return 0;
+}
+
+typedef enum {
+ BFD_UDP_INPUT_NEXT_NORMAL,
+ BFD_UDP_INPUT_NEXT_REPLY,
+ BFD_UDP_INPUT_N_NEXT,
+} bfd_udp_input_next_t;
+
+/* Packet counters */
+#define foreach_bfd_udp_error(F) \
+ F (NONE, "good bfd packets (processed)") \
+ F (BAD, "invalid bfd packets") \
+ F (DISABLED, "bfd packets received on disabled interfaces")
+
+#define F(sym, string) static char BFD_UDP_ERR_##sym##_STR[] = string;
+foreach_bfd_udp_error (F);
+#undef F
+
+static char *bfd_udp_error_strings[] = {
+#define F(sym, string) BFD_UDP_ERR_##sym##_STR,
+ foreach_bfd_udp_error (F)
+#undef F
+};
+
+typedef enum {
+#define F(sym, str) BFD_UDP_ERROR_##sym,
+ foreach_bfd_udp_error (F)
+#undef F
+ BFD_UDP_N_ERROR,
+} bfd_udp_error_t;
+
+static void bfd_udp4_find_headers (vlib_buffer_t *b, const ip4_header_t **ip4,
+ const udp_header_t **udp)
+{
+ /* sanity check first */
+ const i32 start = vnet_buffer (b)->ip.start_of_ip_header;
+ if (start < 0 && start < sizeof (b->pre_data))
+ {
+ BFD_ERR ("Start of ip header is before pre_data, ignoring");
+ *ip4 = NULL;
+ *udp = NULL;
+ return;
+ }
+ *ip4 = (ip4_header_t *)(b->data + start);
+ if ((u8 *)*ip4 > (u8 *)vlib_buffer_get_current (b))
+ {
+ BFD_ERR ("Start of ip header is beyond current data, ignoring");
+ *ip4 = NULL;
+ *udp = NULL;
+ return;
+ }
+ *udp = (udp_header_t *)((*ip4) + 1);
+}
+
+static bfd_udp_error_t bfd_udp4_verify_transport (const ip4_header_t *ip4,
+ const udp_header_t *udp,
+ const bfd_session_t *bs)
+{
+ const bfd_udp_session_t *bus = &bs->udp;
+ const bfd_udp_key_t *key = &bus->key;
+ if (ip4->src_address.as_u32 != key->peer_addr.ip4.as_u32)
+ {
+ BFD_ERR ("IP src addr mismatch, got %U, expected %U", format_ip4_address,
+ ip4->src_address.as_u32, format_ip4_address,
+ key->peer_addr.ip4.as_u32);
+ return BFD_UDP_ERROR_BAD;
+ }
+ if (ip4->dst_address.as_u32 != key->local_addr.ip4.as_u32)
+ {
+ BFD_ERR ("IP dst addr mismatch, got %U, expected %U", format_ip4_address,
+ ip4->dst_address.as_u32, format_ip4_address,
+ key->local_addr.ip4.as_u32);
+ return BFD_UDP_ERROR_BAD;
+ }
+ const u8 expected_ttl = 255;
+ if (ip4->ttl != expected_ttl)
+ {
+ BFD_ERR ("IP unexpected TTL value %d, expected %d", ip4->ttl,
+ expected_ttl);
+ return BFD_UDP_ERROR_BAD;
+ }
+ if (clib_net_to_host_u16 (udp->src_port) < 49152 ||
+ clib_net_to_host_u16 (udp->src_port) > 65535)
+ {
+ BFD_ERR ("Invalid UDP src port %d, out of range <49152,65535>",
+ udp->src_port);
+ }
+ return BFD_UDP_ERROR_NONE;
+}
+
+typedef struct
+{
+ u32 bs_idx;
+ bfd_pkt_t pkt;
+} bfd_rpc_update_t;
+
+static void bfd_rpc_update_session_cb (const bfd_rpc_update_t *a)
+{
+ bfd_consume_pkt (bfd_udp_main.bfd_main, &a->pkt, a->bs_idx);
+}
+
+static void bfd_rpc_update_session (u32 bs_idx, const bfd_pkt_t *pkt)
+{
+ /* packet length was already verified to be correct by the caller */
+ const u32 data_size = sizeof (bfd_rpc_update_t) -
+ STRUCT_SIZE_OF (bfd_rpc_update_t, pkt) +
+ pkt->head.length;
+ u8 data[data_size];
+ bfd_rpc_update_t *update = (bfd_rpc_update_t *)data;
+ update->bs_idx = bs_idx;
+ clib_memcpy (&update->pkt, pkt, pkt->head.length);
+ vl_api_rpc_call_main_thread (bfd_rpc_update_session_cb, data, data_size);
+}
+
+static bfd_udp_error_t bfd_udp4_scan (vlib_main_t *vm, vlib_node_runtime_t *rt,
+ vlib_buffer_t *b, bfd_session_t **bs_out)
+{
+ const bfd_pkt_t *pkt = vlib_buffer_get_current (b);
+ if (sizeof (*pkt) > b->current_length)
+ {
+ BFD_ERR (
+ "Payload size %d too small to hold bfd packet of minimum size %d",
+ b->current_length, sizeof (*pkt));
+ return BFD_UDP_ERROR_BAD;
+ }
+ const ip4_header_t *ip4;
+ const udp_header_t *udp;
+ bfd_udp4_find_headers (b, &ip4, &udp);
+ if (!ip4 || !udp)
+ {
+ BFD_ERR ("Couldn't find ip4 or udp header");
+ return BFD_UDP_ERROR_BAD;
+ }
+ if (!bfd_verify_pkt_common (pkt))
+ {
+ return BFD_UDP_ERROR_BAD;
+ }
+ bfd_session_t *bs = NULL;
+ if (pkt->your_disc)
+ {
+ BFD_DBG ("Looking up BFD session using discriminator %u",
+ pkt->your_disc);
+ bs = bfd_find_session_by_disc (bfd_udp_main.bfd_main, pkt->your_disc);
+ }
+ else
+ {
+ bfd_udp_key_t key;
+ memset (&key, 0, sizeof (key));
+ key.sw_if_index = vnet_buffer (b)->sw_if_index[VLIB_RX];
+ key.local_addr.ip4.as_u32 = ip4->dst_address.as_u32;
+ key.peer_addr.ip4.as_u32 = ip4->src_address.as_u32;
+ BFD_DBG ("Looking up BFD session using key (sw_if_index=%u, local=%U, "
+ "peer=%U)",
+ key.sw_if_index, format_ip4_address, key.local_addr.ip4.as_u8,
+ format_ip4_address, key.peer_addr.ip4.as_u8);
+ bs = bfd_lookup_session (&bfd_udp_main, &key);
+ }
+ if (!bs)
+ {
+ BFD_ERR ("BFD session lookup failed - no session matches BFD pkt");
+ return BFD_UDP_ERROR_BAD;
+ }
+ BFD_DBG ("BFD session found, bs_idx=%u", bs->bs_idx);
+ if (!bfd_verify_pkt_session (pkt, b->current_length, bs))
+ {
+ return BFD_UDP_ERROR_BAD;
+ }
+ bfd_udp_error_t err;
+ if (BFD_UDP_ERROR_NONE != (err = bfd_udp4_verify_transport (ip4, udp, bs)))
+ {
+ return err;
+ }
+ bfd_rpc_update_session (bs->bs_idx, pkt);
+ *bs_out = bs;
+ return BFD_UDP_ERROR_NONE;
+}
+
+static bfd_udp_error_t bfd_udp6_scan (vlib_main_t *vm, vlib_buffer_t *b)
+{
+ /* TODO */
+ return BFD_UDP_ERROR_BAD;
+}
+
+/*
+ * Process a frame of bfd packets
+ * Expect 1 packet / frame
+ */
+static uword bfd_udp_input (vlib_main_t *vm, vlib_node_runtime_t *rt,
+ vlib_frame_t *f, int is_ipv6)
+{
+ u32 n_left_from, *from;
+ bfd_input_trace_t *t0;
+
+ from = vlib_frame_vector_args (f); /* array of buffer indices */
+ n_left_from = f->n_vectors; /* number of buffer indices */
+
+ while (n_left_from > 0)
+ {
+ u32 bi0;
+ vlib_buffer_t *b0;
+ u32 next0, error0;
+
+ bi0 = from[0];
+ b0 = vlib_get_buffer (vm, bi0);
+
+ bfd_session_t *bs = NULL;
+
+ /* If this pkt is traced, snapshot the data */
+ if (b0->flags & VLIB_BUFFER_IS_TRACED)
+ {
+ int len;
+ t0 = vlib_add_trace (vm, rt, b0, sizeof (*t0));
+ len = (b0->current_length < sizeof (t0->data)) ? b0->current_length
+ : sizeof (t0->data);
+ t0->len = len;
+ clib_memcpy (t0->data, vlib_buffer_get_current (b0), len);
+ }
+
+ /* scan this bfd pkt. error0 is the counter index to bmp */
+ if (is_ipv6)
+ {
+ error0 = bfd_udp6_scan (vm, b0);
+ }
+ else
+ {
+ error0 = bfd_udp4_scan (vm, rt, b0, &bs);
+ }
+ b0->error = rt->errors[error0];
+
+ next0 = BFD_UDP_INPUT_NEXT_NORMAL;
+ if (BFD_UDP_ERROR_NONE == error0)
+ {
+ /* if everything went fine, check for poll bit, if present, re-use
+ the buffer and based on (now updated) session parameters, send the
+ final packet back */
+ const bfd_pkt_t *pkt = vlib_buffer_get_current (b0);
+ if (bfd_pkt_get_poll (pkt))
+ {
+ bfd_send_final (vm, b0, bs);
+ if (is_ipv6)
+ {
+ vlib_node_increment_counter (vm, bfd_udp6_input_node.index,
+ b0->error, 1);
+ }
+ else
+ {
+ vlib_node_increment_counter (vm, bfd_udp4_input_node.index,
+ b0->error, 1);
+ }
+ next0 = BFD_UDP_INPUT_NEXT_REPLY;
+ }
+ }
+ vlib_set_next_frame_buffer (vm, rt, next0, bi0);
+
+ from += 1;
+ n_left_from -= 1;
+ }
+
+ return f->n_vectors;
+}
+
+static uword bfd_udp4_input (vlib_main_t *vm, vlib_node_runtime_t *rt,
+ vlib_frame_t *f)
+{
+ return bfd_udp_input (vm, rt, f, 0);
+}
+
+/*
+ * bfd input graph node declaration
+ */
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (bfd_udp4_input_node, static) = {
+ .function = bfd_udp4_input,
+ .name = "bfd-udp4-input",
+ .vector_size = sizeof (u32),
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_errors = BFD_UDP_N_ERROR,
+ .error_strings = bfd_udp_error_strings,
+
+ .format_trace = bfd_input_format_trace,
+
+ .n_next_nodes = BFD_UDP_INPUT_N_NEXT,
+ .next_nodes =
+ {
+ [BFD_UDP_INPUT_NEXT_NORMAL] = "error-drop",
+ [BFD_UDP_INPUT_NEXT_REPLY] = "ip4-lookup",
+ },
+};
+/* *INDENT-ON* */
+
+static uword bfd_udp6_input (vlib_main_t *vm, vlib_node_runtime_t *rt,
+ vlib_frame_t *f)
+{
+ return bfd_udp_input (vm, rt, f, 1);
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (bfd_udp6_input_node, static) = {
+ .function = bfd_udp6_input,
+ .name = "bfd-udp6-input",
+ .vector_size = sizeof (u32),
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_errors = BFD_UDP_N_ERROR,
+ .error_strings = bfd_udp_error_strings,
+
+ .format_trace = bfd_input_format_trace,
+
+ .n_next_nodes = BFD_UDP_INPUT_N_NEXT,
+ .next_nodes =
+ {
+ [BFD_UDP_INPUT_NEXT_NORMAL] = "error-drop",
+ [BFD_UDP_INPUT_NEXT_REPLY] = "ip6-lookup",
+ },
+};
+/* *INDENT-ON* */
+
+static clib_error_t *bfd_sw_interface_up_down (vnet_main_t *vnm,
+ u32 sw_if_index, u32 flags)
+{
+ // vnet_hw_interface_t *hi = vnet_get_sup_hw_interface (vnm, sw_if_index);
+ if (!(flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP))
+ {
+ /* TODO */
+ }
+ return 0;
+}
+
+VNET_SW_INTERFACE_ADMIN_UP_DOWN_FUNCTION (bfd_sw_interface_up_down);
+
+static clib_error_t *bfd_hw_interface_up_down (vnet_main_t *vnm,
+ u32 hw_if_index, u32 flags)
+{
+ if (flags & VNET_HW_INTERFACE_FLAG_LINK_UP)
+ {
+ /* TODO */
+ }
+ return 0;
+}
+
+VNET_HW_INTERFACE_LINK_UP_DOWN_FUNCTION (bfd_hw_interface_up_down);
+
+/*
+ * setup function
+ */
+static clib_error_t *bfd_udp_init (vlib_main_t *vm)
+{
+ mhash_init (&bfd_udp_main.bfd_session_idx_by_bfd_key, sizeof (uword),
+ sizeof (bfd_udp_key_t));
+ bfd_udp_main.bfd_main = &bfd_main;
+ udp_register_dst_port (vm, UDP_DST_PORT_bfd4, bfd_udp4_input_node.index, 1);
+ udp_register_dst_port (vm, UDP_DST_PORT_bfd6, bfd_udp6_input_node.index, 0);
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (bfd_udp_init);
diff --git a/src/vnet/bfd/bfd_udp.h b/src/vnet/bfd/bfd_udp.h
new file mode 100644
index 00000000000..51f5327be01
--- /dev/null
+++ b/src/vnet/bfd/bfd_udp.h
@@ -0,0 +1,56 @@
+/* * Copyright (c) 2011-2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * @file
+ * @brief BFD global declarations
+ */
+
+#ifndef __included_bfd_udp_h__
+#define __included_bfd_udp_h__
+
+#include <vppinfra/clib.h>
+#include <vnet/adj/adj_types.h>
+#include <vnet/ip/ip6_packet.h>
+
+#define BFD_UDP_KEY_BODY
+
+/* *INDENT-OFF* */
+typedef CLIB_PACKED (struct {
+
+ u32 sw_if_index;
+ ip46_address_t local_addr;
+ ip46_address_t peer_addr;
+
+}) bfd_udp_key_t;
+/* *INDENT-ON* */
+
+typedef struct
+{
+ bfd_udp_key_t key;
+
+ adj_index_t adj_index;
+} bfd_udp_session_t;
+
+void bfd_add_udp_transport (vlib_main_t * vm, vlib_buffer_t * b,
+ bfd_udp_session_t * bs);
+
+#endif /* __included_bfd_udp_h__ */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/bfd/dir.dox b/src/vnet/bfd/dir.dox
new file mode 100644
index 00000000000..ed656b52074
--- /dev/null
+++ b/src/vnet/bfd/dir.dox
@@ -0,0 +1,18 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ @dir vnet/vnet/bfd
+ @brief Bidirectional Forwarding Detection (BFD) implementation
+*/
diff --git a/src/vnet/buffer.h b/src/vnet/buffer.h
new file mode 100644
index 00000000000..7935027f9ec
--- /dev/null
+++ b/src/vnet/buffer.h
@@ -0,0 +1,381 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * vnet/buffer.h: vnet buffer flags
+ *
+ * Copyright (c) 2008 Eliot Dresselhaus
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef included_vnet_buffer_h
+#define included_vnet_buffer_h
+
+#include <vlib/vlib.h>
+
+/* VLIB buffer flags for ip4/ip6 packets. Set by input interfaces for ip4/ip6
+ tcp/udp packets with hardware computed checksums. */
+#define LOG2_IP_BUFFER_L4_CHECKSUM_COMPUTED LOG2_VLIB_BUFFER_FLAG_USER(1)
+#define LOG2_IP_BUFFER_L4_CHECKSUM_CORRECT LOG2_VLIB_BUFFER_FLAG_USER(2)
+#define IP_BUFFER_L4_CHECKSUM_COMPUTED (1 << LOG2_IP_BUFFER_L4_CHECKSUM_COMPUTED)
+#define IP_BUFFER_L4_CHECKSUM_CORRECT (1 << LOG2_IP_BUFFER_L4_CHECKSUM_CORRECT)
+
+/* VLAN header flags.
+ * These bits are zeroed in vlib_buffer_init_for_free_list()
+ * meaning wherever the buffer comes from they have a reasonable
+ * value (eg, if ip4/ip6 generates the packet.)
+ */
+#define LOG2_ETH_BUFFER_VLAN_2_DEEP LOG2_VLIB_BUFFER_FLAG_USER(3)
+#define LOG2_ETH_BUFFER_VLAN_1_DEEP LOG2_VLIB_BUFFER_FLAG_USER(4)
+#define ETH_BUFFER_VLAN_2_DEEP (1 << LOG2_ETH_BUFFER_VLAN_2_DEEP)
+#define ETH_BUFFER_VLAN_1_DEEP (1 << LOG2_ETH_BUFFER_VLAN_1_DEEP)
+#define ETH_BUFFER_VLAN_BITS (ETH_BUFFER_VLAN_1_DEEP | \
+ ETH_BUFFER_VLAN_2_DEEP)
+
+#define LOG2_VNET_BUFFER_RTE_MBUF_VALID LOG2_VLIB_BUFFER_FLAG_USER(5)
+#define VNET_BUFFER_RTE_MBUF_VALID (1 << LOG2_VNET_BUFFER_RTE_MBUF_VALID)
+
+#define LOG2_BUFFER_HANDOFF_NEXT_VALID LOG2_VLIB_BUFFER_FLAG_USER(6)
+#define BUFFER_HANDOFF_NEXT_VALID (1 << LOG2_BUFFER_HANDOFF_NEXT_VALID)
+
+#define LOG2_VNET_BUFFER_LOCALLY_ORIGINATED LOG2_VLIB_BUFFER_FLAG_USER(7)
+#define VNET_BUFFER_LOCALLY_ORIGINATED (1 << LOG2_VNET_BUFFER_LOCALLY_ORIGINATED)
+
+#define LOG2_VNET_BUFFER_SPAN_CLONE LOG2_VLIB_BUFFER_FLAG_USER(8)
+#define VNET_BUFFER_SPAN_CLONE (1 << LOG2_VNET_BUFFER_SPAN_CLONE)
+
+#define foreach_buffer_opaque_union_subtype \
+_(ethernet) \
+_(ip) \
+_(mcast) \
+_(swt) \
+_(l2) \
+_(l2t) \
+_(gre) \
+_(l2_classify) \
+_(handoff) \
+_(policer) \
+_(ipsec) \
+_(map) \
+_(map_t) \
+_(ip_frag)
+
+/*
+ * vnet stack buffer opaque array overlay structure.
+ * The vnet_buffer_opaque_t *must* be the same size as the
+ * vlib_buffer_t "opaque" structure member, 32 bytes.
+ *
+ * When adding a union type, please add a stanza to
+ * foreach_buffer_opaque_union_subtype (directly above).
+ * Code in vnet_interface_init(...) verifies the size
+ * of the union, and will announce any deviations in an
+ * impossible-to-miss manner.
+ */
+typedef struct
+{
+ u32 sw_if_index[VLIB_N_RX_TX];
+
+ union
+ {
+ /* Ethernet. */
+ struct
+ {
+ /* Saved value of current header by ethernet-input. */
+ i32 start_of_ethernet_header;
+ } ethernet;
+
+ /* IP4/6 buffer opaque. */
+ struct
+ {
+ /* Adjacency from destination IP address lookup [VLIB_TX].
+ Adjacency from source IP address lookup [VLIB_RX].
+ This gets set to ~0 until source lookup is performed. */
+ u32 adj_index[VLIB_N_RX_TX];
+
+ union
+ {
+ struct
+ {
+ /* Flow hash value for this packet computed from IP src/dst address
+ protocol and ports. */
+ u32 flow_hash;
+
+ /* next protocol */
+ u32 save_protocol;
+
+ /* Rewrite length */
+ u32 save_rewrite_length;
+ };
+
+ /* ICMP */
+ struct
+ {
+ u8 type;
+ u8 code;
+ u32 data;
+ } icmp;
+
+ /* IP header offset from vlib_buffer.data - saved by ip*_local nodes */
+ i32 start_of_ip_header;
+ };
+
+ } ip;
+
+ /*
+ * MPLS:
+ * data copied from the MPLS header that was popped from the packet
+ * during the look-up.
+ */
+ struct
+ {
+ u8 ttl;
+ u8 exp;
+ u8 first;
+ } mpls;
+
+ /* Multicast replication */
+ struct
+ {
+ u32 pad[3];
+ u32 mcast_group_index;
+ u32 mcast_current_index;
+ u32 original_free_list_index;
+ } mcast;
+
+ /* ip4-in-ip6 softwire termination, only valid there */
+ struct
+ {
+ u8 swt_disable;
+ u32 mapping_index;
+ } swt;
+
+ /* l2 bridging path, only valid there */
+ struct
+ {
+ u32 feature_bitmap;
+ u16 bd_index; // bridge-domain index
+ u8 l2_len; // ethernet header length
+ u8 shg; // split-horizon group
+ } l2;
+
+ /* l2tpv3 softwire encap, only valid there */
+ struct
+ {
+ u32 pad[4]; /* do not overlay w/ ip.adj_index[0,1] */
+ u8 next_index;
+ u32 session_index;
+ } l2t;
+
+ struct
+ {
+ u32 src, dst;
+ } gre;
+
+ /* L2 classify */
+ struct
+ {
+ u64 pad;
+ u32 table_index;
+ u32 opaque_index;
+ u64 hash;
+ } l2_classify;
+
+ /* IO - worker thread handoff */
+ struct
+ {
+ u32 next_index;
+ } handoff;
+
+ /* vnet policer */
+ struct
+ {
+ u32 pad[8 - VLIB_N_RX_TX - 1]; /* to end of opaque */
+ u32 index;
+ } policer;
+
+ /* interface output features */
+ struct
+ {
+ u32 flags;
+ u32 sad_index;
+ } ipsec;
+
+ /* vcgn udp inside input, only valid there */
+ struct
+ {
+ /* This part forms context of the packet. The structure should be
+ * exactly same as spp_ctx_t. Also this should be the first
+ * element of this vcgn_uii structure.
+ */
+ /****** BEGIN spp_ctx_t section ***********************/
+ union
+ { /* Roddick specific */
+ u32 roddick_info;
+ struct _tx_pkt_info
+ { /* Used by PI to PI communication for TX */
+ u32 uidb_index:16; /* uidb_index to transmit */
+ u32 packet_type:2; /* 1-IPv4, 2-Ipv6, - 0,3 - Unused */
+ u32 ipv4_defrag:1; /* 0 - Normal, 1 - update first
+ * segment size
+ * (set by 6rd defrag node)
+ */
+
+ u32 dst_ip_port_idx:4; /* Index to dst_ip_port_table */
+ u32 from_node:4;
+ u32 calc_chksum:1;
+ u32 reserved:4;
+ } tx;
+ struct _rx_pkt_info
+ { /* Used by PD / PI communication */
+ u32 uidb_index:16; /* uidb_index received in packet */
+ u32 packet_type:2; /* 1-IPv4, 2-Ipv6, - 0,3 - Unused */
+ u32 icmp_type:1; /* 0-ICMP query type, 1-ICMP error type */
+ u32 protocol_type:2; /* 1-TCP, 2-UDP, 3-ICMP, 0 - Unused */
+ u32 ipv4_defrag:1; /* 0 - Normal, 1 - update first
+ * segment size
+ * (set by 6rd defrag node)
+ */
+
+ u32 direction:1; /* 0-Outside, 1-Inside */
+ u32 frag:1; /*IP fragment-1, Otherwise-0 */
+ u32 option:1; /* 0-No IP option (v4) present, non-fragHdr
+ * option hdr present (v6)
+ */
+ u32 df_bit:1; /* IPv4 DF bit copied here */
+ u32 reserved1:6;
+ } rx;
+ } ru;
+ /****** END spp_ctx_t section ***********************/
+
+ union
+ {
+ struct
+ {
+ u32 ipv4;
+ u16 port;
+ u16 vrf; //bit0-13:i/f, bit14-15:protocol
+ } k;
+
+ u64 key64;
+ } key;
+
+ u32 bucket;
+
+ u16 ovrf; /* Exit interface */
+ u8 frag_pkt;
+ u8 vcgn_unused1;
+ } vcgn_uii;
+
+ /* MAP */
+ struct
+ {
+ u16 mtu;
+ } map;
+
+ /* MAP-T */
+ struct
+ {
+ u32 map_domain_index;
+ struct
+ {
+ u32 saddr, daddr;
+ u16 frag_offset; //Fragmentation header offset
+ u16 l4_offset; //L4 header overall offset
+ u8 l4_protocol; //The final protocol number
+ } v6; //Used by ip6_map_t only
+ u16 checksum_offset; //L4 checksum overall offset
+ u16 mtu; //Exit MTU
+ } map_t;
+
+ /* IP Fragmentation */
+ struct
+ {
+ u16 header_offset;
+ u16 mtu;
+ u8 next_index;
+ u8 flags; //See ip_frag.h
+ } ip_frag;
+
+ /* COP - configurable junk filter(s) */
+ struct
+ {
+ /* Current configuration index. */
+ u32 current_config_index;
+ } cop;
+
+ /* LISP */
+ struct
+ {
+ /* overlay address family */
+ u16 overlay_afi;
+ } lisp;
+
+ /* Driver rx feature */
+ struct
+ {
+ u32 saved_next_index; /**< saved by drivers for short-cut */
+ u16 buffer_advance;
+ } device_input_feat;
+
+ u32 unused[6];
+ };
+} vnet_buffer_opaque_t;
+
+/*
+ * The opaque field of the vlib_buffer_t is intepreted as a
+ * vnet_buffer_opaque_t. Hence it should be big enough to accommodate one.
+ */
+STATIC_ASSERT (sizeof (vnet_buffer_opaque_t) <= STRUCT_SIZE_OF (vlib_buffer_t,
+ opaque),
+ "VNET buffer meta-data too large for vlib_buffer");
+
+#define vnet_buffer(b) ((vnet_buffer_opaque_t *) (b)->opaque)
+
+/* Full cache line (64 bytes) of additional space */
+typedef struct
+{
+ union
+ {
+ };
+} vnet_buffer_opaque2_t;
+
+
+
+#endif /* included_vnet_buffer_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/cdp/cdp.pg b/src/vnet/cdp/cdp.pg
new file mode 100644
index 00000000000..b6ba18656c2
--- /dev/null
+++ b/src/vnet/cdp/cdp.pg
@@ -0,0 +1,7 @@
+packet-generator new {
+ name cdp
+ limit 1
+ node cdp-input
+ size 374-374
+ data { hex 0x02b46b96000100096978676265000500bf436973636f20494f5320536f6674776172652c2043333735304520536f66747761726520284333373530452d554e4956455253414c2d4d292c2056657273696f6e2031322e32283335295345352c2052454c4541534520534f4654574152452028666331290a436f707972696768742028632920313938362d3230303720627920436973636f2053797374656d732c20496e632e0a436f6d70696c6564205468752031392d4a756c2d30372031363a3137206279206e616368656e00060018636973636f2057532d4333373530452d3234544400020011000000010101cc0004000000000003001b54656e4769676162697445746865726e6574312f302f3100040008000000280008002400000c011200000000ffffffff010221ff000000000000001e7a50f000ff000000090004000a00060001000b0005010012000500001300050000160011000000010101cc000400000000001a00100000000100000000ffffffff }
+}
diff --git a/src/vnet/cdp/cdp_input.c b/src/vnet/cdp/cdp_input.c
new file mode 100644
index 00000000000..3574de68534
--- /dev/null
+++ b/src/vnet/cdp/cdp_input.c
@@ -0,0 +1,506 @@
+/*
+ * Copyright (c) 2011-2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <vnet/cdp/cdp_node.h>
+
+cdp_main_t cdp_main;
+
+#define DEBUG_TLV_DUMP 0 /* 1=> dump TLV's to stdout while processing them */
+
+/* Reliable multicast messages we use to keep peers updated */
+mc_serialize_msg_t serialize_cdp_neighbor_msg;
+mc_serialize_msg_t serialize_cdp_keepalive_msg;
+
+/*
+ * ported from an unspecified Cisco cdp implementation.
+ * Compute / return in HOST byte order. 0 => good checksum.
+ */
+u16
+cdp_checksum (void *p, int count)
+{
+ u32 sum;
+ u16 i, *data;
+
+ data = p;
+ sum = 0;
+ while (count > 1)
+ {
+ sum += ntohs (*data);
+ data++;
+ count -= 2;
+ }
+
+ if (count > 0)
+ sum += *(char *) data;
+
+ while (sum >> 16)
+ {
+ sum = (sum & 0xFFFF) + (sum >> 16);
+ }
+
+ i = (i16) sum;
+ return (~i);
+}
+
+/* TLV handler table */
+typedef struct
+{
+ char *name;
+ u32 tlv_id;
+ void *format;
+ void *process;
+} tlv_handler_t;
+
+static tlv_handler_t tlv_handlers[];
+
+/* Display a generic TLV as a set of hex bytes */
+static u8 *
+format_generic_tlv (u8 * s, va_list * va)
+{
+ cdp_tlv_t *t = va_arg (*va, cdp_tlv_t *);
+ tlv_handler_t *h = &tlv_handlers[t->t];
+
+ s = format (s, "%s(%d): %U\n", h->name,
+ t->t, format_hex_bytes, t->v, t->l - sizeof (*t));
+ return s;
+}
+
+/* Ignore / skip a TLV we don't support */
+static cdp_error_t
+process_generic_tlv (cdp_main_t * cm, cdp_neighbor_t * n, cdp_tlv_t * t)
+{
+#if DEBUG_TLV_DUMP > 0
+ fformat (stdout, "%U", format_generic_tlv, t);
+#endif
+
+ return CDP_ERROR_NONE;
+}
+
+/* print a text tlv */
+static u8 *
+format_text_tlv (u8 * s, va_list * va)
+{
+ cdp_tlv_t *t = va_arg (*va, cdp_tlv_t *);
+ tlv_handler_t *h = &tlv_handlers[t->t];
+ int i;
+
+ s = format (s, "%s(%d): ", h->name, t->t);
+
+ for (i = 0; i < (t->l - sizeof (*t)); i++)
+ vec_add1 (s, t->v[i]);
+
+ vec_add1 (s, '\n');
+ return s;
+}
+
+#if DEBUG_TLV_DUMP == 0
+/* gcc warning be gone */
+CLIB_UNUSED (static cdp_error_t
+ process_text_tlv (cdp_main_t * cm, cdp_neighbor_t * n,
+ cdp_tlv_t * t));
+#endif
+
+/* process / skip a generic text TLV that we don't support */
+static cdp_error_t
+process_text_tlv (cdp_main_t * cm, cdp_neighbor_t * n, cdp_tlv_t * t)
+{
+#if DEBUG_TLV_DUMP > 0
+ fformat (stdout, "%U\n", format_text_tlv, t);
+#endif
+
+ return CDP_ERROR_NONE;
+}
+
+/* per-TLV format function definitions */
+#define format_unused_tlv format_generic_tlv
+#define format_device_name_tlv format_text_tlv
+#define format_address_tlv format_generic_tlv
+#define format_port_id_tlv format_text_tlv
+#define format_capabilities_tlv format_generic_tlv
+#define format_version_tlv format_text_tlv
+#define format_platform_tlv format_text_tlv
+#define format_ipprefix_tlv format_generic_tlv
+#define format_hello_tlv format_generic_tlv
+#define format_vtp_domain_tlv format_generic_tlv
+#define format_native_vlan_tlv format_generic_tlv
+#define format_duplex_tlv format_generic_tlv
+#define format_appl_vlan_tlv format_generic_tlv
+#define format_trigger_tlv format_generic_tlv
+#define format_power_tlv format_generic_tlv
+#define format_mtu_tlv format_generic_tlv
+#define format_trust_tlv format_generic_tlv
+#define format_cos_tlv format_generic_tlv
+#define format_sysname_tlv format_generic_tlv
+#define format_sysobject_tlv format_generic_tlv
+#define format_mgmt_addr_tlv format_generic_tlv
+#define format_physical_loc_tlv format_generic_tlv
+#define format_mgmt_addr2_tlv format_generic_tlv
+#define format_power_requested_tlv format_generic_tlv
+#define format_power_available_tlv format_generic_tlv
+#define format_port_unidirectional_tlv format_generic_tlv
+#define format_unknown_28_tlv format_generic_tlv
+#define format_energywise_tlv format_generic_tlv
+#define format_unknown_30_tlv format_generic_tlv
+#define format_spare_poe_tlv format_generic_tlv
+
+/* tlv ID=0 is a mistake */
+static cdp_error_t
+process_unused_tlv (cdp_main_t * cm, cdp_neighbor_t * n, cdp_tlv_t * t)
+{
+ return CDP_ERROR_BAD_TLV;
+}
+
+/* list of text TLV's that we snapshoot */
+#define foreach_text_to_struct_tlv \
+_(device_name,DEBUG_TLV_DUMP) \
+_(version,DEBUG_TLV_DUMP) \
+_(platform,DEBUG_TLV_DUMP) \
+_(port_id,DEBUG_TLV_DUMP)
+
+#define _(z,dbg) \
+static \
+cdp_error_t process_##z##_tlv (cdp_main_t *cm, cdp_neighbor_t *n, \
+ cdp_tlv_t *t) \
+{ \
+ int i; \
+ if (dbg) \
+ fformat(stdout, "%U\n", format_text_tlv, t); \
+ \
+ if (n->z) \
+ _vec_len(n->z) = 0; \
+ \
+ for (i = 0; i < (t->l - sizeof (*t)); i++) \
+ vec_add1(n->z, t->v[i]); \
+ \
+ vec_add1(n->z, 0); \
+ \
+ return CDP_ERROR_NONE; \
+}
+
+foreach_text_to_struct_tlv
+#undef _
+#define process_address_tlv process_generic_tlv
+#define process_capabilities_tlv process_generic_tlv
+#define process_ipprefix_tlv process_generic_tlv
+#define process_hello_tlv process_generic_tlv
+#define process_vtp_domain_tlv process_generic_tlv
+#define process_native_vlan_tlv process_generic_tlv
+#define process_duplex_tlv process_generic_tlv
+#define process_appl_vlan_tlv process_generic_tlv
+#define process_trigger_tlv process_generic_tlv
+#define process_power_tlv process_generic_tlv
+#define process_mtu_tlv process_generic_tlv
+#define process_trust_tlv process_generic_tlv
+#define process_cos_tlv process_generic_tlv
+#define process_sysname_tlv process_generic_tlv
+#define process_sysobject_tlv process_generic_tlv
+#define process_mgmt_addr_tlv process_generic_tlv
+#define process_physical_loc_tlv process_generic_tlv
+#define process_mgmt_addr2_tlv process_generic_tlv
+#define process_power_requested_tlv process_generic_tlv
+#define process_power_available_tlv process_generic_tlv
+#define process_port_unidirectional_tlv process_generic_tlv
+#define process_unknown_28_tlv process_generic_tlv
+#define process_energywise_tlv process_generic_tlv
+#define process_unknown_30_tlv process_generic_tlv
+#define process_spare_poe_tlv process_generic_tlv
+static tlv_handler_t tlv_handlers[] = {
+#define _(a) {#a, CDP_TLV_##a, format_##a##_tlv, process_##a##_tlv},
+ foreach_cdp_tlv_type
+#undef _
+};
+
+#if DEBUG_TLV_DUMP == 0
+CLIB_UNUSED (static u8 * format_cdp_hdr (u8 * s, va_list * va));
+#endif
+
+static u8 *
+format_cdp_hdr (u8 * s, va_list * va)
+{
+ cdp_hdr_t *h = va_arg (*va, cdp_hdr_t *);
+
+ s = format (s, "version %d, ttl %d(secs), cksum 0x%04x\n",
+ h->version, h->ttl, h->checksum);
+ return s;
+}
+
+static cdp_error_t
+process_cdp_hdr (cdp_main_t * cm, cdp_neighbor_t * n, cdp_hdr_t * h)
+{
+#if DEBUG_TLV_DUMP > 0
+ fformat (stdout, "%U", format_cdp_hdr, h);
+#endif
+
+ if (h->version != 1 && h->version != 2)
+ return CDP_ERROR_PROTOCOL_VERSION;
+
+ n->ttl_in_seconds = h->ttl;
+
+ return CDP_ERROR_NONE;
+}
+
+/* scan a cdp packet; header, then tlv's */
+static int
+cdp_packet_scan (cdp_main_t * cm, cdp_neighbor_t * n)
+{
+ u8 *cur = n->last_rx_pkt;
+ cdp_hdr_t *h;
+ cdp_tlv_t *tlv;
+ cdp_error_t e = CDP_ERROR_NONE;
+ tlv_handler_t *handler;
+ cdp_error_t (*fp) (cdp_main_t *, cdp_neighbor_t *, cdp_tlv_t *);
+ u16 computed_checksum;
+
+ computed_checksum = cdp_checksum (cur, vec_len (cur));
+
+ if (computed_checksum)
+ return CDP_ERROR_CHECKSUM;
+
+ h = (cdp_hdr_t *) cur;
+
+ e = process_cdp_hdr (cm, n, h);
+ if (e)
+ return e;
+
+ cur = (u8 *) (h + 1);
+
+ while (cur < n->last_rx_pkt + vec_len (n->last_rx_pkt) - 1)
+ {
+ tlv = (cdp_tlv_t *) cur;
+ tlv->t = ntohs (tlv->t);
+ tlv->l = ntohs (tlv->l);
+ if (tlv->t >= ARRAY_LEN (tlv_handlers))
+ return CDP_ERROR_BAD_TLV;
+ handler = &tlv_handlers[tlv->t];
+ fp = handler->process;
+ e = (*fp) (cm, n, tlv);
+ if (e)
+ return e;
+ /* tlv length includes (t, l) */
+ cur += tlv->l;
+ }
+
+ return CDP_ERROR_NONE;
+}
+
+/*
+ * cdp input routine
+ */
+cdp_error_t
+cdp_input (vlib_main_t * vm, vlib_buffer_t * b0, u32 bi0)
+{
+ cdp_main_t *cm = &cdp_main;
+ cdp_neighbor_t *n;
+ uword *p, nbytes;
+ cdp_error_t e;
+ uword last_packet_signature;
+
+ /* find or create a neighbor pool entry for the (sw) interface
+ upon which we received this pkt */
+ p = hash_get (cm->neighbor_by_sw_if_index,
+ vnet_buffer (b0)->sw_if_index[VLIB_RX]);
+
+ if (p == 0)
+ {
+ pool_get (cm->neighbors, n);
+ memset (n, 0, sizeof (*n));
+ n->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+ n->packet_template_index = (u8) ~ 0;
+ hash_set (cm->neighbor_by_sw_if_index, n->sw_if_index,
+ n - cm->neighbors);
+ }
+ else
+ {
+ n = pool_elt_at_index (cm->neighbors, p[0]);
+ }
+
+ /*
+ * typical clib idiom. Don't repeatedly allocate and free
+ * the per-neighbor rx buffer. Reset its apparent length to zero
+ * and reuse it.
+ */
+
+ if (n->last_rx_pkt)
+ _vec_len (n->last_rx_pkt) = 0;
+
+ /* cdp disabled on this interface, we're done */
+ if (n->disabled)
+ return CDP_ERROR_DISABLED;
+
+ /*
+ * Make sure the per-neighbor rx buffer is big enough to hold
+ * the data we're about to copy
+ */
+ vec_validate (n->last_rx_pkt, vlib_buffer_length_in_chain (vm, b0) - 1);
+
+ /*
+ * Coalesce / copy e the buffer chain into the per-neighbor
+ * rx buffer
+ */
+ nbytes = vlib_buffer_contents (vm, bi0, n->last_rx_pkt);
+ ASSERT (nbytes <= vec_len (n->last_rx_pkt));
+
+ /*
+ * Compute Jenkins hash of the new packet, decide if we need to
+ * actually parse through the TLV's. CDP packets are all identical,
+ * so unless we time out the peer, we don't need to process the packet.
+ */
+ last_packet_signature =
+ hash_memory (n->last_rx_pkt, vec_len (n->last_rx_pkt), 0xd00b);
+
+ if (n->last_packet_signature_valid &&
+ n->last_packet_signature == last_packet_signature)
+ {
+ e = CDP_ERROR_CACHE_HIT;
+ }
+ else
+ {
+ /* Actually scan the packet */
+ e = cdp_packet_scan (cm, n);
+ n->last_packet_signature_valid = 1;
+ n->last_packet_signature = last_packet_signature;
+ }
+
+ if (e == CDP_ERROR_NONE)
+ {
+ n->last_heard = vlib_time_now (vm);
+ }
+
+ return e;
+}
+
+/*
+ * setup neighbor hash table
+ */
+static clib_error_t *
+cdp_init (vlib_main_t * vm)
+{
+ clib_error_t *error;
+ cdp_main_t *cm = &cdp_main;
+ void vnet_cdp_node_reference (void);
+
+ vnet_cdp_node_reference ();
+
+ if ((error = vlib_call_init_function (vm, cdp_periodic_init)))
+ return error;
+
+ cm->vlib_main = vm;
+ cm->vnet_main = vnet_get_main ();
+ cm->neighbor_by_sw_if_index = hash_create (0, sizeof (uword));
+
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (cdp_init);
+
+
+static u8 *
+format_cdp_neighbors (u8 * s, va_list * va)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*va, vlib_main_t *);
+ cdp_main_t *cm = va_arg (*va, cdp_main_t *);
+ vnet_main_t *vnm = &vnet_main;
+ cdp_neighbor_t *n;
+ vnet_hw_interface_t *hw;
+
+ s = format (s,
+ "%=25s %=15s %=25s %=10s\n",
+ "Our Port", "Peer System", "Peer Port", "Last Heard");
+
+ /* *INDENT-OFF* */
+ pool_foreach (n, cm->neighbors,
+ ({
+ hw = vnet_get_sup_hw_interface (vnm, n->sw_if_index);
+
+ if (n->disabled == 0)
+ s = format (s, "%=25s %=15s %=25s %=10.1f\n",
+ hw->name, n->device_name, n->port_id,
+ n->last_heard);
+ }));
+ /* *INDENT-ON* */
+ return s;
+}
+
+
+static clib_error_t *
+show_cdp (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ cdp_main_t *cm = &cdp_main;
+
+ vlib_cli_output (vm, "%U\n", format_cdp_neighbors, vm, cm);
+
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (show_cdp_command, static) = {
+ .path = "show cdp",
+ .short_help = "Show cdp command",
+ .function = show_cdp,
+};
+/* *INDENT-ON* */
+
+
+/*
+ * packet trace format function, very similar to
+ * cdp_packet_scan except that we call the per TLV format
+ * functions instead of the per TLV processing functions
+ */
+u8 *
+cdp_input_format_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ cdp_input_trace_t *t = va_arg (*args, cdp_input_trace_t *);
+ u8 *cur;
+ cdp_hdr_t *h;
+ cdp_tlv_t *tlv;
+ tlv_handler_t *handler;
+ u8 *(*fp) (cdp_tlv_t *);
+
+ cur = t->data;
+
+ h = (cdp_hdr_t *) cur;
+ s = format (s, "%U", format_cdp_hdr, h);
+
+ cur = (u8 *) (h + 1);
+
+ while (cur < t->data + t->len)
+ {
+ tlv = (cdp_tlv_t *) cur;
+ tlv->t = ntohs (tlv->t);
+ tlv->l = ntohs (tlv->l);
+ if (tlv->t >= ARRAY_LEN (tlv_handlers))
+ {
+ s = format (s, "BAD_TLV\n");
+ break;
+ }
+ handler = &tlv_handlers[tlv->t];
+ fp = handler->format;
+ s = format (s, " %U", fp, tlv);
+ /* tlv length includes (t, l) */
+ cur += tlv->l;
+ }
+
+ return s;
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/cdp/cdp_node.c b/src/vnet/cdp/cdp_node.c
new file mode 100644
index 00000000000..39ac4a908fb
--- /dev/null
+++ b/src/vnet/cdp/cdp_node.c
@@ -0,0 +1,208 @@
+/*
+ * Copyright (c) 2011-2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <vnet/cdp/cdp_node.h>
+#include <vnet/ethernet/packet.h>
+
+static vlib_node_registration_t cdp_process_node;
+
+/** \file
+
+ 2 x CDP graph nodes: an "interior" node to process
+ incoming announcements, and a "process" node to periodically
+ send announcements.
+
+ The interior node is neither pipelined nor dual-looped, because
+ it would be very unusual to see more than one CDP packet in
+ a given input frame. So, it's a very simple / straighforward
+ example.
+*/
+
+/*
+ * packet counter strings
+ * Dump these counters via the "show error" CLI command
+ */
+static char *cdp_error_strings[] = {
+#define _(sym,string) string,
+ foreach_cdp_error
+#undef _
+};
+
+/*
+ * We actually send all cdp pkts to the "error" node after scanning
+ * them, so the graph node has only one next-index. The "error-drop"
+ * node automatically bumps our per-node packet counters for us.
+ */
+typedef enum
+{
+ CDP_INPUT_NEXT_NORMAL,
+ CDP_INPUT_N_NEXT,
+} cdp_next_t;
+
+/*
+ * Process a frame of cdp packets
+ * Expect 1 packet / frame
+ */
+static uword
+cdp_node_fn (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ u32 n_left_from, *from;
+ cdp_input_trace_t *t0;
+
+ from = vlib_frame_vector_args (frame); /* array of buffer indices */
+ n_left_from = frame->n_vectors; /* number of buffer indices */
+
+ while (n_left_from > 0)
+ {
+ u32 bi0;
+ vlib_buffer_t *b0;
+ u32 next0, error0;
+
+ bi0 = from[0];
+ b0 = vlib_get_buffer (vm, bi0);
+
+ next0 = CDP_INPUT_NEXT_NORMAL;
+
+ /* scan this cdp pkt. error0 is the counter index to bump */
+ error0 = cdp_input (vm, b0, bi0);
+ b0->error = node->errors[error0];
+
+ /* If this pkt is traced, snapshoot the data */
+ if (b0->flags & VLIB_BUFFER_IS_TRACED)
+ {
+ int len;
+ t0 = vlib_add_trace (vm, node, b0, sizeof (*t0));
+ len = (b0->current_length < sizeof (t0->data))
+ ? b0->current_length : sizeof (t0->data);
+ t0->len = len;
+ clib_memcpy (t0->data, vlib_buffer_get_current (b0), len);
+ }
+ /* push this pkt to the next graph node, always error-drop */
+ vlib_set_next_frame_buffer (vm, node, next0, bi0);
+
+ from += 1;
+ n_left_from -= 1;
+ }
+
+ return frame->n_vectors;
+}
+
+/*
+ * cdp input graph node declaration
+ */
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (cdp_input_node, static) = {
+ .function = cdp_node_fn,
+ .name = "cdp-input",
+ .vector_size = sizeof (u32),
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_errors = CDP_N_ERROR,
+ .error_strings = cdp_error_strings,
+
+ .format_trace = cdp_input_format_trace,
+
+ .n_next_nodes = CDP_INPUT_N_NEXT,
+ .next_nodes = {
+ [CDP_INPUT_NEXT_NORMAL] = "error-drop",
+ },
+};
+/* *INDENT-ON* */
+
+/*
+ * cdp periodic function
+ */
+static uword
+cdp_process (vlib_main_t * vm, vlib_node_runtime_t * rt, vlib_frame_t * f)
+{
+ cdp_main_t *cm = &cdp_main;
+ f64 poll_time_remaining;
+ uword event_type, *event_data = 0;
+
+ /* So we can send events to the cdp process */
+ cm->cdp_process_node_index = cdp_process_node.index;
+
+ /* Dynamically register the cdp input node with the snap classifier */
+ snap_register_input_protocol (vm, "cdp-input", 0xC /* ieee_oui, Cisco */ ,
+ 0x2000 /* protocol CDP */ ,
+ cdp_input_node.index);
+
+ snap_register_input_protocol (vm, "cdp-input", 0xC /* ieee_oui, Cisco */ ,
+ 0x2004 /* protocol CDP */ ,
+ cdp_input_node.index);
+
+#if 0 /* retain for reference */
+ /* with the hdlc classifier */
+ hdlc_register_input_protocol (vm, HDLC_PROTOCOL_cdp, cdp_input_node.index);
+#endif
+
+ /* with ethernet input (for SRP) */
+ ethernet_register_input_type (vm, ETHERNET_TYPE_CDP /* CDP */ ,
+ cdp_input_node.index);
+
+ poll_time_remaining = 10.0 /* seconds */ ;
+ while (1)
+ {
+ /* sleep until next poll time, or msg serialize event occurs */
+ poll_time_remaining =
+ vlib_process_wait_for_event_or_clock (vm, poll_time_remaining);
+
+ event_type = vlib_process_get_events (vm, &event_data);
+ switch (event_type)
+ {
+ case ~0: /* no events => timeout */
+ break;
+
+ default:
+ clib_warning ("BUG: event type 0x%wx", event_type);
+ break;
+ }
+ if (event_data)
+ _vec_len (event_data) = 0;
+
+ /* peer timeout scan, send announcements */
+ if (vlib_process_suspend_time_is_zero (poll_time_remaining))
+ {
+ cdp_periodic (vm);
+ poll_time_remaining = 10.0;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * cdp periodic node declaration
+ */
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (cdp_process_node, static) = {
+ .function = cdp_process,
+ .type = VLIB_NODE_TYPE_PROCESS,
+ .name = "cdp-process",
+};
+/* *INDENT-ON* */
+
+void
+vnet_cdp_node_reference (void)
+{
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/cdp/cdp_node.h b/src/vnet/cdp/cdp_node.h
new file mode 100644
index 00000000000..7028ddcaf9c
--- /dev/null
+++ b/src/vnet/cdp/cdp_node.h
@@ -0,0 +1,147 @@
+/*
+ * Copyright (c) 2011-2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef __included_cdp_node_h__
+#define __included_cdp_node_h__
+
+#include <vlib/vlib.h>
+#include <vlib/unix/unix.h>
+
+#include <vnet/snap/snap.h>
+#include <vnet/hdlc/hdlc.h>
+#include <vnet/hdlc/packet.h>
+
+#include <vppinfra/format.h>
+#include <vppinfra/hash.h>
+
+#include <vnet/cdp/cdp_protocol.h>
+
+typedef enum
+{
+ CDP_PACKET_TEMPLATE_ETHERNET,
+ CDP_PACKET_TEMPLATE_HDLC,
+ CDP_PACKET_TEMPLATE_SRP,
+ CDP_N_PACKET_TEMPLATES,
+} cdp_packet_template_id_t;
+
+typedef struct
+{
+ /* neighbor's vlib software interface index */
+ u32 sw_if_index;
+
+ /* Timers */
+ f64 last_heard;
+ f64 last_sent;
+
+ /* Neighbor time-to-live (usually 180s) */
+ u8 ttl_in_seconds;
+
+ /* "no cdp run" or similar */
+ u8 disabled;
+
+ /* tx packet template id for this neighbor */
+ u8 packet_template_index;
+
+ /* Jenkins hash optimization: avoid tlv scan, send short keepalive msg */
+ u8 last_packet_signature_valid;
+ uword last_packet_signature;
+
+ /* Info we actually keep about each neighbor */
+ u8 *device_name;
+ u8 *version;
+ u8 *port_id;
+ u8 *platform;
+
+ /* last received packet, for the J-hash optimization */
+ u8 *last_rx_pkt;
+} cdp_neighbor_t;
+
+#define foreach_neighbor_string_field \
+_(device_name) \
+_(version) \
+_(port_id) \
+_(platform)
+
+typedef struct
+{
+ /* pool of cdp neighbors */
+ cdp_neighbor_t *neighbors;
+
+ /* tx pcap debug enable */
+ u8 tx_pcap_debug;
+
+ /* rapidly find a neighbor by vlib software interface index */
+ uword *neighbor_by_sw_if_index;
+
+ /* Background process node index */
+ u32 cdp_process_node_index;
+
+ /* Packet templates for different encap types */
+ vlib_packet_template_t packet_templates[CDP_N_PACKET_TEMPLATES];
+
+ /* convenience variables */
+ vlib_main_t *vlib_main;
+ vnet_main_t *vnet_main;
+} cdp_main_t;
+
+cdp_main_t cdp_main;
+
+/* Packet counters */
+#define foreach_cdp_error \
+_ (NONE, "good cdp packets (processed)") \
+_ (CACHE_HIT, "good cdp packets (cache hit)") \
+_ (BAD_TLV, "cdp packets with bad TLVs") \
+_ (PROTOCOL_VERSION, "cdp packets with bad protocol versions") \
+_ (CHECKSUM, "cdp packets with bad checksums") \
+_ (DISABLED, "cdp packets received on disabled interfaces")
+
+typedef enum
+{
+#define _(sym,str) CDP_ERROR_##sym,
+ foreach_cdp_error
+#undef _
+ CDP_N_ERROR,
+} cdp_error_t;
+
+/* cdp packet trace capture */
+typedef struct
+{
+ u32 len;
+ u8 data[400];
+} cdp_input_trace_t;
+
+typedef enum
+{
+ CDP_EVENT_SEND_NEIGHBOR,
+ CDP_EVENT_SEND_KEEPALIVE,
+} cdp_process_event_t;
+
+
+cdp_error_t cdp_input (vlib_main_t * vm, vlib_buffer_t * b0, u32 bi0);
+void cdp_periodic (vlib_main_t * vm);
+void cdp_keepalive (cdp_main_t * cm, cdp_neighbor_t * n);
+u16 cdp_checksum (void *p, int count);
+u8 *cdp_input_format_trace (u8 * s, va_list * args);
+
+serialize_function_t serialize_cdp_main, unserialize_cdp_main;
+
+#endif /* __included_cdp_node_h__ */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/cdp/cdp_periodic.c b/src/vnet/cdp/cdp_periodic.c
new file mode 100644
index 00000000000..de111079aa3
--- /dev/null
+++ b/src/vnet/cdp/cdp_periodic.c
@@ -0,0 +1,512 @@
+/*
+ * Copyright (c) 2011-2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <vnet/cdp/cdp_node.h>
+#include <vppinfra/hash.h>
+#include <vnet/unix/pcap.h>
+#include <vnet/srp/srp.h>
+#include <vnet/ppp/ppp.h>
+#include <vnet/hdlc/hdlc.h>
+#include <vnet/srp/packet.h>
+
+/*
+ * Generate a set of specific CDP TLVs.
+ *
+ * $$$ eventually these need to fish better data from
+ * other data structures; e.g. the hostname, software version info
+ * etc.
+ */
+
+static void
+add_device_name_tlv (vnet_hw_interface_t * hw, u8 ** t0p)
+{
+ cdp_tlv_t *t = (cdp_tlv_t *) * t0p;
+
+ t->t = htons (CDP_TLV_device_name);
+ t->l = htons (3 + sizeof (*t));
+ clib_memcpy (&t->v, "VPP", 3);
+
+ *t0p += ntohs (t->l);
+}
+
+static void
+add_port_id_tlv (vnet_hw_interface_t * hw, u8 ** t0p)
+{
+ cdp_tlv_t *t = (cdp_tlv_t *) * t0p;
+
+ t->t = htons (CDP_TLV_port_id);
+ t->l = htons (vec_len (hw->name) + sizeof (*t));
+ clib_memcpy (&t->v, hw->name, vec_len (hw->name));
+ *t0p += ntohs (t->l);
+}
+
+static void
+add_version_tlv (vnet_hw_interface_t * hw, u8 ** t0p)
+{
+ cdp_tlv_t *t = (cdp_tlv_t *) * t0p;
+
+ t->t = htons (CDP_TLV_version);
+ t->l = htons (12 + sizeof (*t));
+ clib_memcpy (&t->v, "VPP Software", 12);
+ *t0p += ntohs (t->l);
+}
+
+static void
+add_platform_tlv (vnet_hw_interface_t * hw, u8 ** t0p)
+{
+ cdp_tlv_t *t = (cdp_tlv_t *) * t0p;
+
+ t->t = htons (CDP_TLV_platform);
+ t->l = htons (2 + sizeof (*t));
+ clib_memcpy (&t->v, "SW", 2);
+ *t0p += ntohs (t->l);
+}
+
+static void
+add_capability_tlv (vnet_hw_interface_t * hw, u8 ** t0p)
+{
+ cdp_tlv_t *t = (cdp_tlv_t *) * t0p;
+ u32 capabilities;
+
+ t->t = htons (CDP_TLV_capabilities);
+ t->l = htons (4 + sizeof (*t));
+ capabilities = CDP_ROUTER_DEVICE;
+ capabilities = htonl (capabilities);
+ clib_memcpy (&t->v, &capabilities, sizeof (capabilities));
+ *t0p += ntohs (t->l);
+}
+
+static void
+add_tlvs (cdp_main_t * cm, vnet_hw_interface_t * hw, u8 ** t0p)
+{
+ add_device_name_tlv (hw, t0p);
+ add_port_id_tlv (hw, t0p);
+ add_version_tlv (hw, t0p);
+ add_platform_tlv (hw, t0p);
+ add_capability_tlv (hw, t0p);
+}
+
+/*
+ * send a cdp pkt on an ethernet interface
+ */
+static void
+send_ethernet_hello (cdp_main_t * cm, cdp_neighbor_t * n, int count)
+{
+ u32 *to_next;
+ ethernet_llc_snap_and_cdp_header_t *h0;
+ vnet_hw_interface_t *hw;
+ u32 bi0;
+ vlib_buffer_t *b0;
+ u8 *t0;
+ u16 checksum;
+ int nbytes_to_checksum;
+ int i;
+ vlib_frame_t *f;
+ vlib_main_t *vm = cm->vlib_main;
+ vnet_main_t *vnm = cm->vnet_main;
+
+ for (i = 0; i < count; i++)
+ {
+ /*
+ * see cdp_periodic_init() to understand what's already painted
+ * into the buffer by the packet template mechanism
+ */
+ h0 = vlib_packet_template_get_packet
+ (vm, &cm->packet_templates[n->packet_template_index], &bi0);
+
+ /* Add the interface's ethernet source address */
+ hw = vnet_get_sup_hw_interface (vnm, n->sw_if_index);
+
+ clib_memcpy (h0->ethernet.src_address, hw->hw_address,
+ vec_len (hw->hw_address));
+
+ t0 = (u8 *) & h0->cdp.data;
+
+ /* add TLVs */
+ add_tlvs (cm, hw, &t0);
+
+ /* add the cdp packet checksum */
+ nbytes_to_checksum = t0 - (u8 *) & h0->cdp;
+ checksum = cdp_checksum (&h0->cdp, nbytes_to_checksum);
+ h0->cdp.checksum = htons (checksum);
+
+ /* Set the outbound packet length */
+ b0 = vlib_get_buffer (vm, bi0);
+ b0->current_length = nbytes_to_checksum + sizeof (*h0)
+ - sizeof (cdp_hdr_t);
+
+ /* And the outbound interface */
+ vnet_buffer (b0)->sw_if_index[VLIB_TX] = hw->sw_if_index;
+
+ /* Set the 802.3 ethernet length */
+ h0->ethernet.len = htons (b0->current_length
+ - sizeof (ethernet_802_3_header_t));
+
+ /* And output the packet on the correct interface */
+ f = vlib_get_frame_to_node (vm, hw->output_node_index);
+ to_next = vlib_frame_vector_args (f);
+ to_next[0] = bi0;
+ f->n_vectors = 1;
+
+ vlib_put_frame_to_node (vm, hw->output_node_index, f);
+ n->last_sent = vlib_time_now (vm);
+ }
+}
+
+/*
+ * send a cdp pkt on an hdlc interface
+ */
+static void
+send_hdlc_hello (cdp_main_t * cm, cdp_neighbor_t * n, int count)
+{
+ u32 *to_next;
+ hdlc_and_cdp_header_t *h0;
+ vnet_hw_interface_t *hw;
+ u32 bi0;
+ vlib_buffer_t *b0;
+ u8 *t0;
+ u16 checksum;
+ int nbytes_to_checksum;
+ int i;
+ vlib_frame_t *f;
+ vlib_main_t *vm = cm->vlib_main;
+ vnet_main_t *vnm = cm->vnet_main;
+
+ for (i = 0; i < count; i++)
+ {
+ /*
+ * see cdp_periodic_init() to understand what's already painted
+ * into the buffer by the packet template mechanism
+ */
+ h0 = vlib_packet_template_get_packet
+ (vm, &cm->packet_templates[n->packet_template_index], &bi0);
+
+ hw = vnet_get_sup_hw_interface (vnm, n->sw_if_index);
+
+ t0 = (u8 *) & h0->cdp.data;
+
+ /* add TLVs */
+ add_tlvs (cm, hw, &t0);
+
+ /* add the cdp packet checksum */
+ nbytes_to_checksum = t0 - (u8 *) & h0->cdp;
+ checksum = cdp_checksum (&h0->cdp, nbytes_to_checksum);
+ h0->cdp.checksum = htons (checksum);
+
+ /* Set the outbound packet length */
+ b0 = vlib_get_buffer (vm, bi0);
+ b0->current_length = nbytes_to_checksum + sizeof (*h0)
+ - sizeof (cdp_hdr_t);
+
+ /* And output the packet on the correct interface */
+ f = vlib_get_frame_to_node (vm, hw->output_node_index);
+ to_next = vlib_frame_vector_args (f);
+ to_next[0] = bi0;
+ f->n_vectors = 1;
+
+ vlib_put_frame_to_node (vm, hw->output_node_index, f);
+ n->last_sent = vlib_time_now (vm);
+ }
+}
+
+/*
+ * send a cdp pkt on an srp interface
+ */
+static void
+send_srp_hello (cdp_main_t * cm, cdp_neighbor_t * n, int count)
+{
+ u32 *to_next;
+ srp_and_cdp_header_t *h0;
+ vnet_hw_interface_t *hw;
+ u32 bi0;
+ vlib_buffer_t *b0;
+ u8 *t0;
+ u16 checksum;
+ int nbytes_to_checksum;
+ int i;
+ vlib_frame_t *f;
+ vlib_main_t *vm = cm->vlib_main;
+ vnet_main_t *vnm = cm->vnet_main;
+
+ for (i = 0; i < count; i++)
+ {
+ /*
+ * see cdp_periodic_init() to understand what's already painted
+ * into the buffer by the packet template mechanism
+ */
+ h0 = vlib_packet_template_get_packet
+ (vm, &cm->packet_templates[n->packet_template_index], &bi0);
+
+ hw = vnet_get_sup_hw_interface (vnm, n->sw_if_index);
+
+ t0 = (u8 *) & h0->cdp.data;
+
+ /* add TLVs */
+ add_tlvs (cm, hw, &t0);
+
+ /* Add the interface's ethernet source address */
+ clib_memcpy (h0->ethernet.src_address, hw->hw_address,
+ vec_len (hw->hw_address));
+
+ /* add the cdp packet checksum */
+ nbytes_to_checksum = t0 - (u8 *) & h0->cdp;
+ checksum = cdp_checksum (&h0->cdp, nbytes_to_checksum);
+ h0->cdp.checksum = htons (checksum);
+
+ /* Set the outbound packet length */
+ b0 = vlib_get_buffer (vm, bi0);
+ b0->current_length = nbytes_to_checksum + sizeof (*h0)
+ - sizeof (cdp_hdr_t);
+
+ /* And output the packet on the correct interface */
+ f = vlib_get_frame_to_node (vm, hw->output_node_index);
+ to_next = vlib_frame_vector_args (f);
+ to_next[0] = bi0;
+ f->n_vectors = 1;
+
+ vlib_put_frame_to_node (vm, hw->output_node_index, f);
+ n->last_sent = vlib_time_now (vm);
+ }
+}
+
+/*
+ * Decide which cdp packet template to use
+ */
+static int
+pick_packet_template (cdp_main_t * cm, cdp_neighbor_t * n)
+{
+ n->packet_template_index = CDP_PACKET_TEMPLATE_ETHERNET;
+
+ return 0;
+}
+
+/* Send a cdp neighbor announcement */
+static void
+send_hello (cdp_main_t * cm, cdp_neighbor_t * n, int count)
+{
+ if (n->packet_template_index == (u8) ~ 0)
+ {
+ /* If we don't know how to talk to this peer, don't try again */
+ if (pick_packet_template (cm, n))
+ {
+ n->last_sent = 1e70;
+ return;
+ }
+ }
+
+ switch (n->packet_template_index)
+ {
+ case CDP_PACKET_TEMPLATE_ETHERNET:
+ send_ethernet_hello (cm, n, count);
+ break;
+
+ case CDP_PACKET_TEMPLATE_HDLC:
+ send_hdlc_hello (cm, n, count);
+ break;
+
+ case CDP_PACKET_TEMPLATE_SRP:
+ send_srp_hello (cm, n, count);
+ break;
+
+ default:
+ ASSERT (0);
+ }
+ n->last_sent = vlib_time_now (cm->vlib_main);
+}
+
+static void
+delete_neighbor (cdp_main_t * cm, cdp_neighbor_t * n, int want_broadcast)
+{
+ hash_unset (cm->neighbor_by_sw_if_index, n->sw_if_index);
+ vec_free (n->device_name);
+ vec_free (n->version);
+ vec_free (n->port_id);
+ vec_free (n->platform);
+ vec_free (n->last_rx_pkt);
+ pool_put (cm->neighbors, n);
+}
+
+void
+cdp_periodic (vlib_main_t * vm)
+{
+ cdp_main_t *cm = &cdp_main;
+ cdp_neighbor_t *n;
+ f64 now = vlib_time_now (vm);
+ vnet_sw_interface_t *sw;
+ static u32 *delete_list = 0;
+ int i;
+ static cdp_neighbor_t **n_list = 0;
+
+ /* *INDENT-OFF* */
+ pool_foreach (n, cm->neighbors,
+ ({
+ vec_add1 (n_list, n);
+ }));
+ /* *INDENT-ON* */
+
+ /* Across all cdp neighbors known to the system */
+ for (i = 0; i < vec_len (n_list); i++)
+ {
+ n = n_list[i];
+
+ /* "no cdp run" provisioned on the interface? */
+ if (n->disabled == 1)
+ continue;
+
+ sw = vnet_get_sw_interface (cm->vnet_main, n->sw_if_index);
+
+ /* Interface shutdown or rx timeout? */
+ if (!(sw->flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP)
+ || (now > (n->last_heard + (f64) n->ttl_in_seconds)))
+ /* add to list of neighbors to delete */
+ vec_add1 (delete_list, n - cm->neighbors);
+ else if (n->last_sent == 0.0)
+ /* First time, send 3 hellos */
+ send_hello (cm, n, 3 /* three to begin with */ );
+ else if (now > (n->last_sent + (((f64) n->ttl_in_seconds) / 6.0)))
+ /* Normal keepalive, send one */
+ send_hello (cm, n, 1 /* one as a keepalive */ );
+ }
+
+ for (i = 0; i < vec_len (delete_list); i++)
+ {
+ n = vec_elt_at_index (cm->neighbors, delete_list[i]);
+ delete_neighbor (cm, n, 1);
+ }
+ if (delete_list)
+ _vec_len (delete_list) = 0;
+ if (n_list)
+ _vec_len (n_list) = 0;
+}
+
+static clib_error_t *
+cdp_periodic_init (vlib_main_t * vm)
+{
+ cdp_main_t *cm = &cdp_main;
+
+ /* Create the ethernet cdp hello packet template */
+ {
+ ethernet_llc_snap_and_cdp_header_t h;
+
+ memset (&h, 0, sizeof (h));
+
+ /* Send to 01:00:0c:cc:cc */
+ h.ethernet.dst_address[0] = 0x01;
+ /* h.ethernet.dst_address[1] = 0x00; (memset) */
+ h.ethernet.dst_address[2] = 0x0C;
+ h.ethernet.dst_address[3] = 0xCC;
+ h.ethernet.dst_address[4] = 0xCC;
+ h.ethernet.dst_address[5] = 0xCC;
+
+ /* leave src address blank (fill in at send time) */
+
+ /* leave length blank (fill in at send time) */
+
+ /* LLC */
+ h.llc.dst_sap = h.llc.src_sap = 0xAA; /* SNAP */
+ h.llc.control = 0x03; /* UI (no extended control bytes) */
+
+ /* SNAP */
+ /* h.snap.oui[0] = 0x00; (memset) */
+ /* h.snap.oui[1] = 0x00; (memset) */
+ h.snap.oui[2] = 0x0C; /* Cisco = 0x00000C */
+ h.snap.protocol = htons (0x2000); /* CDP = 0x2000 */
+
+ /* CDP */
+ h.cdp.version = 2;
+ h.cdp.ttl = 180;
+
+ vlib_packet_template_init
+ (vm, &cm->packet_templates[CDP_PACKET_TEMPLATE_ETHERNET],
+ /* data */ &h,
+ sizeof (h),
+ /* alloc chunk size */ 8,
+ "cdp-ethernet");
+ }
+
+#if 0 /* retain for reference */
+
+ /* Create the hdlc cdp hello packet template */
+ {
+ hdlc_and_cdp_header_t h;
+
+ memset (&h, 0, sizeof (h));
+
+ h.hdlc.address = 0x0f;
+ /* h.hdlc.control = 0; (memset) */
+ h.hdlc.protocol = htons (0x2000); /* CDP = 0x2000 */
+
+ /* CDP */
+ h.cdp.version = 2;
+ h.cdp.ttl = 180;
+
+ vlib_packet_template_init
+ (vm, &cm->packet_templates[CDP_PACKET_TEMPLATE_HDLC],
+ /* data */ &h,
+ sizeof (h),
+ /* alloc chunk size */ 8,
+ "cdp-hdlc");
+ }
+
+ /* Create the srp cdp hello packet template */
+ {
+ srp_and_cdp_header_t h;
+
+ memset (&h, 0, sizeof (h));
+
+ /* Send to 01:00:0c:cc:cc */
+ h.ethernet.dst_address[0] = 0x01;
+ /* h.ethernet.dst_address[1] = 0x00; (memset) */
+ h.ethernet.dst_address[2] = 0x0C;
+ h.ethernet.dst_address[3] = 0xCC;
+ h.ethernet.dst_address[4] = 0xCC;
+ h.ethernet.dst_address[5] = 0xCC;
+
+ /* leave src address blank (fill in at send time) */
+
+ /* The srp header is filled in at xmt */
+ h.srp.ttl = 1;
+ h.srp.priority = 7;
+ h.srp.mode = SRP_MODE_data;
+ srp_header_compute_parity (&h.srp);
+
+ /* Inner ring and parity will be set at send time */
+
+ h.ethernet.type = htons (0x2000); /* CDP = 0x2000 */
+
+ /* CDP */
+ h.cdp.version = 2;
+ h.cdp.ttl = 180;
+
+ vlib_packet_template_init
+ (vm, &cm->packet_templates[CDP_PACKET_TEMPLATE_SRP],
+ /* data */ &h,
+ sizeof (h),
+ /* alloc chunk size */ 8,
+ "cdp-srp");
+ }
+#endif
+
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (cdp_periodic_init);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/cdp/cdp_protocol.h b/src/vnet/cdp/cdp_protocol.h
new file mode 100644
index 00000000000..dc6c66d52c3
--- /dev/null
+++ b/src/vnet/cdp/cdp_protocol.h
@@ -0,0 +1,186 @@
+/*
+ * Copyright (c) 2011-2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef __included_cdp_protocol_h__
+#define __included_cdp_protocol_h__
+
+#include <vnet/ethernet/ethernet.h> /* for ethernet_header_t */
+#include <vnet/llc/llc.h>
+#include <vnet/snap/snap.h>
+#include <vnet/srp/packet.h>
+
+typedef CLIB_PACKED (struct
+ {
+ u8 version;
+ u8 ttl;
+ u16 checksum; /* 1's complement of the 1's complement sum */
+ u8 data[0];
+ }) cdp_hdr_t;
+
+typedef struct
+{
+ u8 dst_address[6];
+ u8 src_address[6];
+ u16 len;
+} ethernet_802_3_header_t;
+
+typedef CLIB_PACKED (struct
+ {
+ ethernet_802_3_header_t ethernet;
+ llc_header_t llc; snap_header_t snap; cdp_hdr_t cdp;
+ }) ethernet_llc_snap_and_cdp_header_t;
+
+typedef CLIB_PACKED (struct
+ {
+ hdlc_header_t hdlc; cdp_hdr_t cdp;
+ }) hdlc_and_cdp_header_t;
+
+typedef CLIB_PACKED (struct
+ {
+ srp_header_t srp;
+ ethernet_header_t ethernet; cdp_hdr_t cdp;
+ }) srp_and_cdp_header_t;
+
+typedef CLIB_PACKED (struct
+ {
+ u16 t;
+ u16 l;
+ u8 v[0];
+ }) cdp_tlv_t;
+
+/*
+ * TLV codes.
+ */
+#define foreach_cdp_tlv_type \
+_(unused) \
+_(device_name) /* uniquely identifies the device */ \
+_(address) /* list of addresses this device has */ \
+_(port_id) /* port CDP packet was sent out on */ \
+_(capabilities) /* funct. capabilities of the device */ \
+_(version) /* version */ \
+_(platform) /* hardware platform of this device */ \
+_(ipprefix) /* An IP network prefix */ \
+_(hello) /* Pprotocol piggyback hello msg */ \
+_(vtp_domain) /* VTP management domain */ \
+_(native_vlan) /* Native VLAN number */ \
+_(duplex) /* The interface duplex mode */ \
+_(appl_vlan) /* Appliance VLAN-ID TLV */ \
+_(trigger) /* For sending trigger TLV msgs. */ \
+_(power) /* Power consumption of that device */ \
+_(mtu) /* MTU defined for sending intf. */ \
+_(trust) /* Extended trust TLV */ \
+_(cos) /* COS for Untrusted Port TLV */ \
+_(sysname) /* System name (FQDN of device) */ \
+_(sysobject) /* OID of sysObjectID MIB object */ \
+_(mgmt_addr) /* SNMP manageable addrs. of device */ \
+_(physical_loc) /* Physical Location of the device */ \
+_(mgmt_addr2) /* External Port-ID */ \
+_(power_requested) \
+_(power_available) \
+_(port_unidirectional) \
+_(unknown_28) \
+_(energywise) \
+_(unknown_30) \
+_(spare_poe)
+
+typedef enum
+{
+#define _(t) CDP_TLV_##t,
+ foreach_cdp_tlv_type
+#undef _
+} cdp_tlv_code_t;
+
+/*
+ The address TLV looks as follows:
+
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Number of addresses |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | IDRP encoded address |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+ An address is encoded in IDRP format:
+
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | PT | PT Length | Protocol (variable) ...
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Address length | Address (variable) ...
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+ PT: Protocol type
+ 1 = NLPID format
+ 2 = 802.2 format
+
+ PT Length:
+ Length of protocol field, 1 for PT = 1, and either 3 or 8 for
+ 802.2 format depending if SNAP is used for PT = 2.
+
+ The encodings for the other protocols have the following format:
+
+ field: <SSAP><DSAP><CTRL><-------OUI------><protocl_TYPE>
+ | | | | | | | | |
+ bytes: 0 1 2 3 4 5 6 7 8
+
+ where the first 3 bytes are 0xAAAA03 for SNAP encoded addresses.
+ The OUI is 000000 for ethernet and <protocl_TYPE>
+ is the assigned Ethernet type code for the particular protocol.
+ e.g. for DECnet the encoding is AAAA03 000000 6003.
+ for IPv6 the encoding is AAAA03 000000 86DD
+*/
+
+/*
+ * Capabilities.
+ */
+
+#define CDP_ROUTER_DEVICE 0x0001
+#define CDP_TB_DEVICE 0x0002
+#define CDP_SRB_DEVICE 0x0004
+#define CDP_SWITCH_DEVICE 0x0008
+#define CDP_HOST_DEVICE 0x0010
+#define CDP_IGMP_DEVICE 0x0020
+#define CDP_REPEATER_DEVICE 0x0040
+
+/*
+ The protocol-hello TLV looks as follows:
+
+ 0 1 2 3
+ 012345678901234567890123456789012345678
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Type | Length |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | OUI |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Protocol ID |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | up to 27 bytes of message |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+*/
+
+/*
+ * These macros define the valid values for the Duplex TLV.
+ */
+#define CDP_DUPLEX_TLV_HALF 0x0
+#define CDP_DUPLEX_TLV_FULL 0x1
+
+#endif /* __included_cdp_protocol_h__ */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/classify/README b/src/vnet/classify/README
new file mode 100644
index 00000000000..1ef5ab5ac34
--- /dev/null
+++ b/src/vnet/classify/README
@@ -0,0 +1,180 @@
+=== vnet classifier theory of operation ===
+
+The vnet classifier trades off simplicity and perf / scale
+characteristics. At a certain level, it's a dumb robot. Given an
+incoming packet, search an ordered list of (mask, match) tables. If
+the classifier finds a matching entry, take the indicated action. If
+not, take a last-resort action.
+
+We use the MMX-unit to match or hash 16 octets at a time. For hardware
+backward compatibility, the code does not [currently] use 256-bit
+(32-octet) vector instructions.
+
+Effective use of the classifier centers around building table lists
+which "hit" as soon as practicable. In many cases, established
+sessions hit in the first table. In this mode of operation, the
+classifier easily processes multiple MPPS / core - even with millions
+of sessions in the data base. Searching 357 tables on a regular basis
+will neatly solve the halting problem.
+
+==== Basic operation ====
+
+The classifier mask-and-match operation proceeds as follows. Given a
+starting classifier table index, lay hands on the indicated mask
+vector. When building tables, we arrange for the mask to obey
+mmx-unit (16-octet) alignment.
+
+We know that the first octet of packet data starts on a cache-line
+boundary. Further, it's reasonably likely that folks won't want to use
+the generalized classifier on the L2 header; preferring to decode the
+Ethertype manually. That scheme makes it easy to select among ip4 /
+ip6 / MPLS, etc. classifier table sets.
+
+A no-vlan-tag L2 header is 14 octets long. A typical ipv4 header
+begins with the octets 0x4500: version=4, header_length=5, DSCP=0,
+ECN=0. If one doesn't intend to classify on (DSCP, ECN) - the typical
+case - we program the classifier to skip the first 16-octet vector.
+
+To classify untagged ipv4 packets on source address, we program the
+classifier to skip one vector, and mask-and-match one vector.
+
+The basic match-and-match operation looks like this:
+
+ switch (t->match_n_vectors)
+ {
+ case 1:
+ result = (data[0 + t->skip_n_vectors] & mask[0]) ^ key[0];
+ break;
+
+ case 2:
+ result = (data[0 + t->skip_n_vectors] & mask[0]) ^ key[0];
+ result |= (data[1 + t->skip_n_vectors] & mask[1]) ^ key[1];
+ break;
+
+ <etc>
+ }
+
+ result_mask = u32x4_zero_byte_mask (result);
+ if (result_mask == 0xffff)
+ return (v);
+
+Net of setup, it costs a couple of clock cycles to mask-and-match 16
+octets.
+
+At the risk of belaboring an obvious point, the control-plane
+'''must''' pay attention to detail. When skipping one (or more)
+vectors, masks and matches must reflect that decision. See
+.../vnet/vnet/classify/vnet_classify.c:unformat_classify_[mask|match]. Note
+that vec_validate (xxx, 13) creates a 14-element vector.
+
+==== Creating a classifier table ====
+
+To create a new classifier table via the control-plane API, send a
+"classify_add_del_table" message. The underlying action routine,
+vnet_classify_add_del_table(...), is located in
+.../vnet/vnet/classify/vnet_classify.c, and has the following
+prototype:
+
+ int vnet_classify_add_del_table (vnet_classify_main_t * cm,
+ u8 * mask,
+ u32 nbuckets,
+ u32 memory_size,
+ u32 skip,
+ u32 match,
+ u32 next_table_index,
+ u32 miss_next_index,
+ u32 * table_index,
+ int is_add)
+
+Pass cm = &vnet_classify_main if calling this routine directly. Mask,
+skip(_n_vectors) and match(_n_vectors) are as described above. Mask
+need not be aligned, but it must be match*16 octets in length. To
+avoid having your head explode, be absolutely certain that '''only'''
+the bits you intend to match on are set.
+
+The classifier uses thread-safe, no-reader-locking-required
+bounded-index extensible hashing. Nbuckets is the [fixed] size of the
+hash bucket vector. The algorithm works in constant time regardless of
+hash collisions, but wastes space when the bucket array is too
+small. A good rule of thumb: let nbuckets = approximate number of
+entries expected.
+
+At a signficant cost in complexity, it would be possible to resize the
+bucket array dynamically. We have no plans to implement that function.
+
+Each classifier table has its own clib mheap memory allocation
+arena. To pick the memory_size parameter, note that each classifier
+table entry needs 16*(1 + match_n_vectors) bytes. Within reason, aim a
+bit high. Clib mheap memory uses o/s level virtual memory - not wired
+or hugetlb memory - so it's best not to scrimp on size.
+
+The "next_table_index" parameter is as described: the pool index in
+vnet_classify_main.tables of the next table to search. Code ~0 to
+indicate the end of the table list. 0 is a valid table index!
+
+We often create classification tables in reverse order -
+last-table-searched to first-table-searched - so we can easily set
+this parameter. Of course, one can manually adjust the data structure
+after-the-fact.
+
+Specific classifier client nodes - for example,
+.../vnet/vnet/classify/ip_classify.c - interpret the "miss_next_index"
+parameter as a vpp graph-node next index. When packet classification
+fails to produce a match, ip_classify_inline sends packets to the
+indicated disposition. A classifier application might program this
+parameter to send packets which don't match an existing session to a
+"first-sign-of-life, create-new-session" node.
+
+Finally, the is_add parameter indicates whether to add or delete the
+indicated table. The delete case implicitly terminates all sessions
+with extreme prejudice, by freeing the specified clib mheap.
+
+==== Creating a classifier session ====
+
+To create a new classifier session via the control-plane API, send a
+"classify_add_del_session" message. The underlying action routine,
+vnet_classify_add_del_session(...), is located in
+.../vnet/vnet/classify/vnet_classify.c, and has the following
+prototype:
+
+int vnet_classify_add_del_session (vnet_classify_main_t * cm,
+ u32 table_index,
+ u8 * match,
+ u32 hit_next_index,
+ u32 opaque_index,
+ i32 advance,
+ int is_add)
+
+Pass cm = &vnet_classify_main if calling this routine directly. Table
+index specifies the table which receives the new session / contains
+the session to delete depending on is_add.
+
+Match is the key for the indicated session. It need not be aligned,
+but it must be table->match_n_vectors*16 octets in length. As a
+courtesy, vnet_classify_add_del_session applies the table's mask to
+the stored key-value. In this way, one can create a session by passing
+unmasked (packet_data + offset) as the "match" parameter, and end up
+with unconfusing session keys.
+
+Specific classifier client nodes - for example,
+.../vnet/vnet/classify/ip_classify.c - interpret the per-session
+hit_next_index parameter as a vpp graph-node next index. When packet
+classification produces a match, ip_classify_inline sends packets to
+the indicated disposition.
+
+ip4/6_classify place the per-session opaque_index parameter into
+vnet_buffer(b)->l2_classify.opaque_index; a slight misnomer, but
+anyhow classifier applications can send session-hit packets to
+specific graph nodes, with useful values in buffer metadata. Depending
+on the required semantics, we send known-session traffic to a certain
+node, with e.g. a session pool index in buffer metadata. It's totally
+up to the control-plane and the specific use-case.
+
+Finally, nodes such as ip4/6-classify apply the advance parameter as a
+[signed!] argument to vlib_buffer_advance(...); to "consume" a
+networking layer. Example: if we classify incoming tunneled IP packets
+by (inner) source/dest address and source/dest port, we might choose
+to decapsulate and reencapsulate the inner packet. In such a case,
+program the advance parameter to perform the tunnel decapsulation, and
+program next_index to send traffic to a node which uses
+e.g. opaque_index to output traffic on a specific tunnel interface.
diff --git a/src/vnet/classify/flow_classify.c b/src/vnet/classify/flow_classify.c
new file mode 100644
index 00000000000..0a624204e34
--- /dev/null
+++ b/src/vnet/classify/flow_classify.c
@@ -0,0 +1,212 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <vnet/classify/flow_classify.h>
+
+static void
+vnet_flow_classify_feature_enable (vlib_main_t * vnm,
+ flow_classify_main_t * fcm,
+ u32 sw_if_index,
+ flow_classify_table_id_t tid,
+ int feature_enable)
+{
+ vnet_feature_config_main_t *vfcm;
+ u8 arc;
+
+ if (tid == FLOW_CLASSIFY_TABLE_IP4)
+ {
+ vnet_feature_enable_disable ("ip4-unicast", "ip4-flow-classify",
+ sw_if_index, feature_enable, 0, 0);
+ arc = vnet_get_feature_arc_index ("ip4-unicast");
+ }
+ else
+ {
+ vnet_feature_enable_disable ("ip6-unicast", "ip6-flow-classify",
+ sw_if_index, feature_enable, 0, 0);
+ arc = vnet_get_feature_arc_index ("ip6-unicast");
+ }
+
+ vfcm = vnet_get_feature_arc_config_main (arc);
+ fcm->vnet_config_main[tid] = &vfcm->config_main;
+}
+
+int vnet_set_flow_classify_intfc (vlib_main_t * vm, u32 sw_if_index,
+ u32 ip4_table_index, u32 ip6_table_index,
+ u32 is_add)
+{
+ flow_classify_main_t * fcm = &flow_classify_main;
+ vnet_classify_main_t * vcm = fcm->vnet_classify_main;
+ u32 pct[FLOW_CLASSIFY_N_TABLES] = {ip4_table_index, ip6_table_index};
+ u32 ti;
+
+ /* Assume that we've validated sw_if_index in the API layer */
+
+ for (ti = 0; ti < FLOW_CLASSIFY_N_TABLES; ti++)
+ {
+ if (pct[ti] == ~0)
+ continue;
+
+ if (pool_is_free_index (vcm->tables, pct[ti]))
+ return VNET_API_ERROR_NO_SUCH_TABLE;
+
+ vec_validate_init_empty
+ (fcm->classify_table_index_by_sw_if_index[ti], sw_if_index, ~0);
+
+ /* Reject any DEL operation with wrong sw_if_index */
+ if (!is_add &&
+ (pct[ti] != fcm->classify_table_index_by_sw_if_index[ti][sw_if_index]))
+ {
+ clib_warning ("Non-existent intf_idx=%d with table_index=%d for delete",
+ sw_if_index, pct[ti]);
+ return VNET_API_ERROR_NO_SUCH_TABLE;
+ }
+
+ /* Return ok on ADD operaton if feature is already enabled */
+ if (is_add &&
+ fcm->classify_table_index_by_sw_if_index[ti][sw_if_index] != ~0)
+ return 0;
+
+ vnet_flow_classify_feature_enable (vm, fcm, sw_if_index, ti, is_add);
+
+ if (is_add)
+ fcm->classify_table_index_by_sw_if_index[ti][sw_if_index] = pct[ti];
+ else
+ fcm->classify_table_index_by_sw_if_index[ti][sw_if_index] = ~0;
+ }
+
+
+ return 0;
+}
+
+static clib_error_t *
+set_flow_classify_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ vnet_main_t * vnm = vnet_get_main();
+ u32 sw_if_index = ~0;
+ u32 ip4_table_index = ~0;
+ u32 ip6_table_index = ~0;
+ u32 is_add = 1;
+ u32 idx_cnt = 0;
+ int rv;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "interface %U", unformat_vnet_sw_interface,
+ vnm, &sw_if_index))
+ ;
+ else if (unformat (input, "ip4-table %d", &ip4_table_index))
+ idx_cnt++;
+ else if (unformat (input, "ip6-table %d", &ip6_table_index))
+ idx_cnt++;
+ else if (unformat (input, "del"))
+ is_add = 0;
+ else
+ break;
+ }
+
+ if (sw_if_index == ~0)
+ return clib_error_return (0, "Interface must be specified.");
+
+ if (!idx_cnt)
+ return clib_error_return (0, "Table index should be specified.");
+
+ if (idx_cnt > 1)
+ return clib_error_return (0, "Only one table index per API is allowed.");
+
+ rv = vnet_set_flow_classify_intfc(vm, sw_if_index, ip4_table_index,
+ ip6_table_index, is_add);
+
+ switch (rv)
+ {
+ case 0:
+ break;
+
+ case VNET_API_ERROR_NO_MATCHING_INTERFACE:
+ return clib_error_return (0, "No such interface");
+
+ case VNET_API_ERROR_NO_SUCH_ENTRY:
+ return clib_error_return (0, "No such classifier table");
+ }
+ return 0;
+}
+
+VLIB_CLI_COMMAND (set_input_acl_command, static) = {
+ .path = "set flow classify",
+ .short_help =
+ "set flow classify interface <int> [ip4-table <index>]\n"
+ " [ip6-table <index>] [del]",
+ .function = set_flow_classify_command_fn,
+};
+
+static uword
+unformat_table_type (unformat_input_t * input, va_list * va)
+{
+ u32 * r = va_arg (*va, u32 *);
+ u32 tid;
+
+ if (unformat (input, "ip4"))
+ tid = FLOW_CLASSIFY_TABLE_IP4;
+ else if (unformat (input, "ip6"))
+ tid = FLOW_CLASSIFY_TABLE_IP6;
+ else
+ return 0;
+
+ *r = tid;
+ return 1;
+}
+static clib_error_t *
+show_flow_classify_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ flow_classify_main_t * fcm = &flow_classify_main;
+ u32 type = FLOW_CLASSIFY_N_TABLES;
+ u32 * vec_tbl;
+ int i;
+
+ if (unformat (input, "type %U", unformat_table_type, &type))
+ ;
+ else
+ return clib_error_return (0, "Type must be specified.");;
+
+ if (type == FLOW_CLASSIFY_N_TABLES)
+ return clib_error_return (0, "Invalid table type.");
+
+ vec_tbl = fcm->classify_table_index_by_sw_if_index[type];
+
+ if (vec_len(vec_tbl))
+ vlib_cli_output (vm, "%10s%20s\t\t%s", "Intfc idx", "Classify table",
+ "Interface name");
+ else
+ vlib_cli_output (vm, "No tables configured.");
+
+ for (i = 0; i < vec_len (vec_tbl); i++)
+ {
+ if (vec_elt(vec_tbl, i) == ~0)
+ continue;
+
+ vlib_cli_output (vm, "%10d%20d\t\t%U", i, vec_elt(vec_tbl, i),
+ format_vnet_sw_if_index_name, fcm->vnet_main, i);
+ }
+
+ return 0;
+}
+
+VLIB_CLI_COMMAND (show_flow_classify_command, static) = {
+ .path = "show classify flow",
+ .short_help = "show classify flow type [ip4|ip6]",
+ .function = show_flow_classify_command_fn,
+};
diff --git a/src/vnet/classify/flow_classify.h b/src/vnet/classify/flow_classify.h
new file mode 100644
index 00000000000..3ae04cd7b21
--- /dev/null
+++ b/src/vnet/classify/flow_classify.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __included_vnet_flow_classify_h__
+#define __included_vnet_flow_classify_h__
+
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+#include <vnet/classify/vnet_classify.h>
+
+typedef enum {
+ FLOW_CLASSIFY_TABLE_IP4,
+ FLOW_CLASSIFY_TABLE_IP6,
+ FLOW_CLASSIFY_N_TABLES,
+} flow_classify_table_id_t;
+
+typedef enum {
+ FLOW_CLASSIFY_NEXT_INDEX_DROP,
+ FLOW_CLASSIFY_NEXT_INDEX_N_NEXT,
+} flow_classify_next_index_t;
+
+typedef struct {
+ /* Classifier table vectors */
+ u32 * classify_table_index_by_sw_if_index [FLOW_CLASSIFY_N_TABLES];
+
+ /* Convenience variables */
+ vlib_main_t * vlib_main;
+ vnet_main_t * vnet_main;
+ vnet_classify_main_t * vnet_classify_main;
+ vnet_config_main_t * vnet_config_main [FLOW_CLASSIFY_N_TABLES];
+} flow_classify_main_t;
+
+flow_classify_main_t flow_classify_main;
+
+int vnet_set_flow_classify_intfc (vlib_main_t * vm, u32 sw_if_index,
+ u32 ip4_table_index, u32 ip6_table_index,
+ u32 is_add);
+
+#endif /* __included_vnet_flow_classify_h__ */
diff --git a/src/vnet/classify/flow_classify_node.c b/src/vnet/classify/flow_classify_node.c
new file mode 100644
index 00000000000..d3261d33bca
--- /dev/null
+++ b/src/vnet/classify/flow_classify_node.c
@@ -0,0 +1,338 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdint.h>
+
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+#include <vnet/ip/ip.h>
+#include <vnet/classify/flow_classify.h>
+#include <vnet/classify/vnet_classify.h>
+
+typedef struct {
+ u32 sw_if_index;
+ u32 next_index;
+ u32 table_index;
+ u32 offset;
+} flow_classify_trace_t;
+
+static u8 *
+format_flow_classify_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ flow_classify_trace_t * t = va_arg (*args, flow_classify_trace_t *);
+
+ s = format (s, "FLOW_CLASSIFY: sw_if_index %d next %d table %d offset %d",
+ t->sw_if_index, t->next_index, t->table_index, t->offset);
+ return s;
+}
+
+#define foreach_flow_classify_error \
+_(MISS, "Flow classify misses") \
+_(HIT, "Flow classify hits") \
+_(CHAIN_HIT, "Flow classify hits after chain walk") \
+_(DROP, "Flow classify action drop")
+
+typedef enum {
+#define _(sym,str) FLOW_CLASSIFY_ERROR_##sym,
+ foreach_flow_classify_error
+#undef _
+ FLOW_CLASSIFY_N_ERROR,
+} flow_classify_error_t;
+
+static char * flow_classify_error_strings[] = {
+#define _(sym,string) string,
+ foreach_flow_classify_error
+#undef _
+};
+
+static inline uword
+flow_classify_inline (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame,
+ flow_classify_table_id_t tid)
+{
+ u32 n_left_from, * from, * to_next;
+ flow_classify_next_index_t next_index;
+ flow_classify_main_t * fcm = &flow_classify_main;
+ vnet_classify_main_t * vcm = fcm->vnet_classify_main;
+ f64 now = vlib_time_now (vm);
+ u32 hits = 0;
+ u32 misses = 0;
+ u32 chain_hits = 0;
+ u32 drop = 0;
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+
+ /* First pass: compute hashes */
+ while (n_left_from > 2)
+ {
+ vlib_buffer_t * b0, * b1;
+ u32 bi0, bi1;
+ u8 * h0, * h1;
+ u32 sw_if_index0, sw_if_index1;
+ u32 table_index0, table_index1;
+ vnet_classify_table_t * t0, * t1;
+
+ /* Prefetch next iteration */
+ {
+ vlib_buffer_t * p1, * p2;
+
+ p1 = vlib_get_buffer (vm, from[1]);
+ p2 = vlib_get_buffer (vm, from[2]);
+
+ vlib_prefetch_buffer_header (p1, STORE);
+ CLIB_PREFETCH (p1->data, CLIB_CACHE_LINE_BYTES, STORE);
+ vlib_prefetch_buffer_header (p2, STORE);
+ CLIB_PREFETCH (p2->data, CLIB_CACHE_LINE_BYTES, STORE);
+ }
+
+ bi0 = from[0];
+ b0 = vlib_get_buffer (vm, bi0);
+ h0 = b0->data;
+
+ bi1 = from[1];
+ b1 = vlib_get_buffer (vm, bi1);
+ h1 = b1->data;
+
+ sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+ table_index0 = fcm->classify_table_index_by_sw_if_index[tid][sw_if_index0];
+
+ sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_RX];
+ table_index1 = fcm->classify_table_index_by_sw_if_index[tid][sw_if_index1];
+
+ t0 = pool_elt_at_index (vcm->tables, table_index0);
+
+ t1 = pool_elt_at_index (vcm->tables, table_index1);
+
+ vnet_buffer(b0)->l2_classify.hash =
+ vnet_classify_hash_packet (t0, (u8 *) h0);
+
+ vnet_classify_prefetch_bucket (t0, vnet_buffer(b0)->l2_classify.hash);
+
+ vnet_buffer(b1)->l2_classify.hash =
+ vnet_classify_hash_packet (t1, (u8 *) h1);
+
+ vnet_classify_prefetch_bucket (t1, vnet_buffer(b1)->l2_classify.hash);
+
+ vnet_buffer(b0)->l2_classify.table_index = table_index0;
+
+ vnet_buffer(b1)->l2_classify.table_index = table_index1;
+
+ from += 2;
+ n_left_from -= 2;
+ }
+
+ while (n_left_from > 0)
+ {
+ vlib_buffer_t * b0;
+ u32 bi0;
+ u8 * h0;
+ u32 sw_if_index0;
+ u32 table_index0;
+ vnet_classify_table_t * t0;
+
+ bi0 = from[0];
+ b0 = vlib_get_buffer (vm, bi0);
+ h0 = b0->data;
+
+ sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+ table_index0 = fcm->classify_table_index_by_sw_if_index[tid][sw_if_index0];
+
+ t0 = pool_elt_at_index (vcm->tables, table_index0);
+ vnet_buffer(b0)->l2_classify.hash =
+ vnet_classify_hash_packet (t0, (u8 *) h0);
+
+ vnet_buffer(b0)->l2_classify.table_index = table_index0;
+ vnet_classify_prefetch_bucket (t0, vnet_buffer(b0)->l2_classify.hash);
+
+ from++;
+ n_left_from--;
+ }
+
+ next_index = node->cached_next_index;
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ /* Not enough load/store slots to dual loop... */
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 bi0;
+ vlib_buffer_t * b0;
+ u32 next0 = FLOW_CLASSIFY_NEXT_INDEX_DROP;
+ u32 table_index0;
+ vnet_classify_table_t * t0;
+ vnet_classify_entry_t * e0;
+ u64 hash0;
+ u8 * h0;
+
+ /* Stride 3 seems to work best */
+ if (PREDICT_TRUE (n_left_from > 3))
+ {
+ vlib_buffer_t * p1 = vlib_get_buffer(vm, from[3]);
+ vnet_classify_table_t * tp1;
+ u32 table_index1;
+ u64 phash1;
+
+ table_index1 = vnet_buffer(p1)->l2_classify.table_index;
+
+ if (PREDICT_TRUE (table_index1 != ~0))
+ {
+ tp1 = pool_elt_at_index (vcm->tables, table_index1);
+ phash1 = vnet_buffer(p1)->l2_classify.hash;
+ vnet_classify_prefetch_entry (tp1, phash1);
+ }
+ }
+
+ /* Speculatively enqueue b0 to the current next frame */
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ h0 = b0->data;
+ table_index0 = vnet_buffer(b0)->l2_classify.table_index;
+ e0 = 0;
+ t0 = 0;
+
+ vnet_get_config_data (fcm->vnet_config_main[tid],
+ &b0->current_config_index,
+ &next0,
+ /* # bytes of config data */ 0);
+
+ if (PREDICT_TRUE(table_index0 != ~0))
+ {
+ hash0 = vnet_buffer(b0)->l2_classify.hash;
+ t0 = pool_elt_at_index (vcm->tables, table_index0);
+ e0 = vnet_classify_find_entry (t0, (u8 *) h0, hash0, now);
+ if (e0)
+ {
+ hits++;
+ }
+ else
+ {
+ misses++;
+ vnet_classify_add_del_session (vcm, table_index0,
+ h0, ~0, 0, 0, 0, 0, 1);
+ /* increment counter */
+ vnet_classify_find_entry (t0, (u8 *) h0, hash0, now);
+ }
+ }
+ if (PREDICT_FALSE((node->flags & VLIB_NODE_FLAG_TRACE)
+ && (b0->flags & VLIB_BUFFER_IS_TRACED)))
+ {
+ flow_classify_trace_t * t =
+ vlib_add_trace (vm, node, b0, sizeof (*t));
+ t->sw_if_index = vnet_buffer(b0)->sw_if_index[VLIB_RX];
+ t->next_index = next0;
+ t->table_index = t0 ? t0 - vcm->tables : ~0;
+ t->offset = (t0 && e0) ? vnet_classify_get_offset (t0, e0): ~0;
+ }
+
+ /* Verify speculative enqueue, maybe switch current next frame */
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
+ n_left_to_next, bi0, next0);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ vlib_node_increment_counter (vm, node->node_index,
+ FLOW_CLASSIFY_ERROR_MISS,
+ misses);
+ vlib_node_increment_counter (vm, node->node_index,
+ FLOW_CLASSIFY_ERROR_HIT,
+ hits);
+ vlib_node_increment_counter (vm, node->node_index,
+ FLOW_CLASSIFY_ERROR_CHAIN_HIT,
+ chain_hits);
+ vlib_node_increment_counter (vm, node->node_index,
+ FLOW_CLASSIFY_ERROR_DROP,
+ drop);
+
+ return frame->n_vectors;
+}
+
+static uword
+ip4_flow_classify (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ return flow_classify_inline(vm, node, frame, FLOW_CLASSIFY_TABLE_IP4);
+}
+
+VLIB_REGISTER_NODE (ip4_flow_classify_node) = {
+ .function = ip4_flow_classify,
+ .name = "ip4-flow-classify",
+ .vector_size = sizeof (u32),
+ .format_trace = format_flow_classify_trace,
+ .n_errors = ARRAY_LEN(flow_classify_error_strings),
+ .error_strings = flow_classify_error_strings,
+ .n_next_nodes = FLOW_CLASSIFY_NEXT_INDEX_N_NEXT,
+ .next_nodes = {
+ [FLOW_CLASSIFY_NEXT_INDEX_DROP] = "error-drop",
+ },
+};
+
+VLIB_NODE_FUNCTION_MULTIARCH (ip4_flow_classify_node, ip4_flow_classify);
+
+static uword
+ip6_flow_classify (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ return flow_classify_inline(vm, node, frame, FLOW_CLASSIFY_TABLE_IP6);
+}
+
+VLIB_REGISTER_NODE (ip6_flow_classify_node) = {
+ .function = ip6_flow_classify,
+ .name = "ip6-flow-classify",
+ .vector_size = sizeof (u32),
+ .format_trace = format_flow_classify_trace,
+ .n_errors = ARRAY_LEN(flow_classify_error_strings),
+ .error_strings = flow_classify_error_strings,
+ .n_next_nodes = FLOW_CLASSIFY_NEXT_INDEX_N_NEXT,
+ .next_nodes = {
+ [FLOW_CLASSIFY_NEXT_INDEX_DROP] = "error-drop",
+ },
+};
+
+VLIB_NODE_FUNCTION_MULTIARCH (ip6_flow_classify_node, ip6_flow_classify);
+
+
+static clib_error_t *
+flow_classify_init (vlib_main_t *vm)
+{
+ flow_classify_main_t * fcm = &flow_classify_main;
+
+ fcm->vlib_main = vm;
+ fcm->vnet_main = vnet_get_main();
+ fcm->vnet_classify_main = &vnet_classify_main;
+
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (flow_classify_init);
diff --git a/src/vnet/classify/input_acl.c b/src/vnet/classify/input_acl.c
new file mode 100644
index 00000000000..c446f2d687c
--- /dev/null
+++ b/src/vnet/classify/input_acl.c
@@ -0,0 +1,283 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <vnet/ip/ip.h>
+#include <vnet/classify/vnet_classify.h>
+#include <vnet/classify/input_acl.h>
+
+input_acl_main_t input_acl_main;
+
+static int
+vnet_inacl_ip_feature_enable (vlib_main_t * vnm,
+ input_acl_main_t *am,
+ u32 sw_if_index,
+ input_acl_table_id_t tid,
+ int feature_enable)
+{
+
+ if (tid == INPUT_ACL_TABLE_L2)
+ {
+ l2input_intf_bitmap_enable (sw_if_index, L2INPUT_FEAT_ACL,
+ feature_enable);
+ }
+ else
+ { /* IP[46] */
+ vnet_feature_config_main_t *fcm;
+ u8 arc;
+
+ if (tid == INPUT_ACL_TABLE_IP4)
+ {
+ vnet_feature_enable_disable ("ip4-unicast", "ip4-inacl",
+ sw_if_index, feature_enable, 0, 0);
+ arc = vnet_get_feature_arc_index ("ip4-unicast");
+ }
+ else
+ {
+ vnet_feature_enable_disable ("ip6-unicast", "ip6-inacl",
+ sw_if_index, feature_enable, 0, 0);
+ arc = vnet_get_feature_arc_index ("ip6-unicast");
+ }
+
+ fcm = vnet_get_feature_arc_config_main (arc);
+ am->vnet_config_main[tid] = &fcm->config_main;
+ }
+
+ return 0;
+}
+
+int vnet_set_input_acl_intfc (vlib_main_t * vm, u32 sw_if_index,
+ u32 ip4_table_index,
+ u32 ip6_table_index,
+ u32 l2_table_index, u32 is_add)
+{
+ input_acl_main_t * am = &input_acl_main;
+ vnet_classify_main_t * vcm = am->vnet_classify_main;
+ u32 acl[INPUT_ACL_N_TABLES] = {ip4_table_index, ip6_table_index,
+ l2_table_index};
+ u32 ti;
+
+ /* Assume that we've validated sw_if_index in the API layer */
+
+ for (ti = 0; ti < INPUT_ACL_N_TABLES; ti++)
+ {
+ if (acl[ti] == ~0)
+ continue;
+
+ if (pool_is_free_index (vcm->tables, acl[ti]))
+ return VNET_API_ERROR_NO_SUCH_TABLE;
+
+ vec_validate_init_empty
+ (am->classify_table_index_by_sw_if_index[ti], sw_if_index, ~0);
+
+ /* Reject any DEL operation with wrong sw_if_index */
+ if (!is_add &&
+ (acl[ti] != am->classify_table_index_by_sw_if_index[ti][sw_if_index]))
+ {
+ clib_warning ("Non-existent intf_idx=%d with table_index=%d for delete",
+ sw_if_index, acl[ti]);
+ return VNET_API_ERROR_NO_SUCH_TABLE;
+ }
+
+ /* Return ok on ADD operaton if feature is already enabled */
+ if (is_add &&
+ am->classify_table_index_by_sw_if_index[ti][sw_if_index] != ~0)
+ return 0;
+
+ vnet_inacl_ip_feature_enable (vm, am, sw_if_index, ti, is_add);
+
+ if (is_add)
+ am->classify_table_index_by_sw_if_index[ti][sw_if_index] = acl[ti];
+ else
+ am->classify_table_index_by_sw_if_index[ti][sw_if_index] = ~0;
+ }
+
+ return 0;
+}
+
+static clib_error_t *
+set_input_acl_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ vnet_main_t * vnm = vnet_get_main();
+ u32 sw_if_index = ~0;
+ u32 ip4_table_index = ~0;
+ u32 ip6_table_index = ~0;
+ u32 l2_table_index = ~0;
+ u32 is_add = 1;
+ u32 idx_cnt = 0;
+ int rv;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "intfc %U", unformat_vnet_sw_interface,
+ vnm, &sw_if_index))
+ ;
+ else if (unformat (input, "ip4-table %d", &ip4_table_index))
+ idx_cnt++;
+ else if (unformat (input, "ip6-table %d", &ip6_table_index))
+ idx_cnt++;
+ else if (unformat (input, "l2-table %d", &l2_table_index))
+ idx_cnt++;
+ else if (unformat (input, "del"))
+ is_add = 0;
+ else
+ break;
+ }
+
+ if (sw_if_index == ~0)
+ return clib_error_return (0, "Interface must be specified.");
+
+ if (!idx_cnt)
+ return clib_error_return (0, "Table index should be specified.");
+
+ if (idx_cnt > 1)
+ return clib_error_return (0, "Only one table index per API is allowed.");
+
+ rv = vnet_set_input_acl_intfc (vm, sw_if_index, ip4_table_index,
+ ip6_table_index, l2_table_index, is_add);
+
+ switch (rv)
+ {
+ case 0:
+ break;
+
+ case VNET_API_ERROR_NO_MATCHING_INTERFACE:
+ return clib_error_return (0, "No such interface");
+
+ case VNET_API_ERROR_NO_SUCH_ENTRY:
+ return clib_error_return (0, "No such classifier table");
+ }
+ return 0;
+}
+
+/*
+ * Configure interface to enable/disble input ACL feature:
+ * intfc - interface name to be configured as input ACL
+ * Ip4-table <index> [del] - enable/disable IP4 input ACL
+ * Ip6-table <index> [del] - enable/disable IP6 input ACL
+ * l2-table <index> [del] - enable/disable Layer2 input ACL
+ *
+ * Note: Only one table index per API call is allowed.
+ *
+ */
+VLIB_CLI_COMMAND (set_input_acl_command, static) = {
+ .path = "set interface input acl",
+ .short_help =
+ "set interface input acl intfc <int> [ip4-table <index>]\n"
+ " [ip6-table <index>] [l2-table <index>] [del]",
+ .function = set_input_acl_command_fn,
+};
+
+clib_error_t *input_acl_init (vlib_main_t *vm)
+{
+ input_acl_main_t * am = &input_acl_main;
+ clib_error_t * error = 0;
+
+ if ((error = vlib_call_init_function (vm, ip_inacl_init)))
+ return error;
+
+ am->vlib_main = vm;
+ am->vnet_main = vnet_get_main();
+ am->vnet_classify_main = &vnet_classify_main;
+
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (input_acl_init);
+
+uword unformat_acl_type (unformat_input_t * input, va_list * args)
+{
+ u32 * acl_type = va_arg (*args, u32 *);
+ u32 tid = INPUT_ACL_N_TABLES;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) {
+ if (unformat (input, "ip4"))
+ tid = INPUT_ACL_TABLE_IP4;
+ else if (unformat (input, "ip6"))
+ tid = INPUT_ACL_TABLE_IP6;
+ else if (unformat (input, "l2"))
+ tid = INPUT_ACL_TABLE_L2;
+ else
+ break;
+ }
+
+ *acl_type = tid;
+ return 1;
+}
+
+u8 * format_vnet_inacl_info (u8 * s, va_list * va)
+{
+ input_acl_main_t * am = va_arg (*va, input_acl_main_t *);
+ int sw_if_idx = va_arg (*va, int);
+ u32 tid = va_arg (*va, u32);
+
+ if (tid == ~0)
+ {
+ s = format (s, "%10s%20s\t\t%s", "Intfc idx", "Classify table",
+ "Interface name");
+ return s;
+ }
+
+ s = format (s, "%10d%20d\t\t%U", sw_if_idx, tid,
+ format_vnet_sw_if_index_name, am->vnet_main, sw_if_idx);
+
+ return s;
+}
+
+static clib_error_t *
+show_inacl_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ input_acl_main_t * am = &input_acl_main;
+ u32 type = INPUT_ACL_N_TABLES;
+ int i;
+ u32 * vec_tbl;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "type %U", unformat_acl_type, &type))
+ ;
+ else
+ break;
+ }
+
+ if (type == INPUT_ACL_N_TABLES)
+ return clib_error_return (0, "Invalid input ACL table type.");
+
+ vec_tbl = am->classify_table_index_by_sw_if_index[type];
+
+ if (vec_len(vec_tbl))
+ vlib_cli_output (vm, "%U", format_vnet_inacl_info, am, ~0 /* hdr */, ~0);
+ else
+ vlib_cli_output (vm, "No input ACL tables configured");
+
+ for (i = 0; i < vec_len (vec_tbl); i++)
+ {
+ if (vec_elt(vec_tbl, i) == ~0)
+ continue;
+
+ vlib_cli_output (vm, "%U", format_vnet_inacl_info,
+ am, i, vec_elt(vec_tbl, i));
+ }
+
+ return 0;
+}
+
+VLIB_CLI_COMMAND (show_inacl_command, static) = {
+ .path = "show inacl",
+ .short_help = "show inacl type [ip4|ip6|l2]",
+ .function = show_inacl_command_fn,
+};
diff --git a/src/vnet/classify/input_acl.h b/src/vnet/classify/input_acl.h
new file mode 100644
index 00000000000..7ffc189f053
--- /dev/null
+++ b/src/vnet/classify/input_acl.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __included_vnet_input_acl_h__
+#define __included_vnet_input_acl_h__
+
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+#include <vnet/classify/vnet_classify.h>
+
+typedef enum {
+ INPUT_ACL_TABLE_IP4,
+ INPUT_ACL_TABLE_IP6,
+ INPUT_ACL_TABLE_L2,
+ INPUT_ACL_N_TABLES,
+} input_acl_table_id_t;
+
+typedef enum {
+ ACL_NEXT_INDEX_DENY,
+ ACL_NEXT_INDEX_N_NEXT,
+} acl_next_index_t;
+
+typedef struct {
+
+ /* classifier table vectors */
+ u32 * classify_table_index_by_sw_if_index [INPUT_ACL_N_TABLES];
+
+ /* convenience variables */
+ vlib_main_t * vlib_main;
+ vnet_main_t * vnet_main;
+ vnet_classify_main_t * vnet_classify_main;
+ vnet_config_main_t * vnet_config_main [INPUT_ACL_N_TABLES];
+} input_acl_main_t;
+
+extern input_acl_main_t input_acl_main;
+
+int vnet_set_input_acl_intfc (vlib_main_t * vm, u32 sw_if_index,
+ u32 ip4_table_index,
+ u32 ip6_table_index,
+ u32 l2_table_index, u32 is_add);
+
+#endif /* __included_vnet_input_acl_h__ */
diff --git a/src/vnet/classify/ip_classify.c b/src/vnet/classify/ip_classify.c
new file mode 100644
index 00000000000..44973ae5e99
--- /dev/null
+++ b/src/vnet/classify/ip_classify.c
@@ -0,0 +1,365 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <vnet/ip/ip.h>
+#include <vnet/ethernet/ethernet.h> /* for ethernet_header_t */
+#include <vnet/classify/vnet_classify.h>
+#include <vnet/dpo/classify_dpo.h>
+
+typedef struct {
+ u32 next_index;
+ u32 table_index;
+ u32 entry_index;
+} ip_classify_trace_t;
+
+/* packet trace format function */
+static u8 * format_ip_classify_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ ip_classify_trace_t * t = va_arg (*args, ip_classify_trace_t *);
+
+ s = format (s, "IP_CLASSIFY: next_index %d, table %d, entry %d",
+ t->next_index, t->table_index, t->entry_index);
+ return s;
+}
+
+vlib_node_registration_t ip4_classify_node;
+vlib_node_registration_t ip6_classify_node;
+
+#define foreach_ip_classify_error \
+_(MISS, "Classify misses") \
+_(HIT, "Classify hits") \
+_(CHAIN_HIT, "Classify hits after chain walk")
+
+typedef enum {
+#define _(sym,str) IP_CLASSIFY_ERROR_##sym,
+ foreach_ip_classify_error
+#undef _
+ IP_CLASSIFY_N_ERROR,
+} ip_classify_error_t;
+
+static char * ip_classify_error_strings[] = {
+#define _(sym,string) string,
+ foreach_ip_classify_error
+#undef _
+};
+
+static inline uword
+ip_classify_inline (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame, int is_ip4)
+{
+ u32 n_left_from, * from, * to_next;
+ ip_lookup_next_t next_index;
+ vnet_classify_main_t * vcm = &vnet_classify_main;
+ f64 now = vlib_time_now (vm);
+ u32 hits = 0;
+ u32 misses = 0;
+ u32 chain_hits = 0;
+ u32 n_next;
+
+ if (is_ip4) {
+ n_next = IP4_LOOKUP_N_NEXT;
+ } else {
+ n_next = IP6_LOOKUP_N_NEXT;
+ }
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+
+ /* First pass: compute hashes */
+
+ while (n_left_from > 2)
+ {
+ vlib_buffer_t * b0, * b1;
+ u32 bi0, bi1;
+ u8 * h0, * h1;
+ u32 cd_index0, cd_index1;
+ classify_dpo_t *cd0, * cd1;
+ u32 table_index0, table_index1;
+ vnet_classify_table_t * t0, * t1;
+
+ /* prefetch next iteration */
+ {
+ vlib_buffer_t * p1, * p2;
+
+ p1 = vlib_get_buffer (vm, from[1]);
+ p2 = vlib_get_buffer (vm, from[2]);
+
+ vlib_prefetch_buffer_header (p1, STORE);
+ CLIB_PREFETCH (p1->data, CLIB_CACHE_LINE_BYTES, STORE);
+ vlib_prefetch_buffer_header (p2, STORE);
+ CLIB_PREFETCH (p2->data, CLIB_CACHE_LINE_BYTES, STORE);
+ }
+
+ bi0 = from[0];
+ b0 = vlib_get_buffer (vm, bi0);
+ h0 = (void *)vlib_buffer_get_current(b0) -
+ ethernet_buffer_header_size(b0);
+
+ bi1 = from[1];
+ b1 = vlib_get_buffer (vm, bi1);
+ h1 = (void *)vlib_buffer_get_current(b1) -
+ ethernet_buffer_header_size(b1);
+
+ cd_index0 = vnet_buffer (b0)->ip.adj_index[VLIB_TX];
+ cd0 = classify_dpo_get(cd_index0);
+ table_index0 = cd0->cd_table_index;
+
+ cd_index1 = vnet_buffer (b1)->ip.adj_index[VLIB_TX];
+ cd1 = classify_dpo_get(cd_index1);
+ table_index1 = cd1->cd_table_index;
+
+ t0 = pool_elt_at_index (vcm->tables, table_index0);
+
+ t1 = pool_elt_at_index (vcm->tables, table_index1);
+
+ vnet_buffer(b0)->l2_classify.hash =
+ vnet_classify_hash_packet (t0, (u8 *) h0);
+
+ vnet_classify_prefetch_bucket (t0, vnet_buffer(b0)->l2_classify.hash);
+
+ vnet_buffer(b1)->l2_classify.hash =
+ vnet_classify_hash_packet (t1, (u8 *) h1);
+
+ vnet_classify_prefetch_bucket (t1, vnet_buffer(b1)->l2_classify.hash);
+
+ vnet_buffer(b0)->l2_classify.table_index = table_index0;
+
+ vnet_buffer(b1)->l2_classify.table_index = table_index1;
+
+ from += 2;
+ n_left_from -= 2;
+ }
+
+ while (n_left_from > 0)
+ {
+ vlib_buffer_t * b0;
+ u32 bi0;
+ u8 * h0;
+ u32 cd_index0;
+ classify_dpo_t *cd0;
+ u32 table_index0;
+ vnet_classify_table_t * t0;
+
+ bi0 = from[0];
+ b0 = vlib_get_buffer (vm, bi0);
+ h0 = (void *)vlib_buffer_get_current(b0) -
+ ethernet_buffer_header_size(b0);
+
+ cd_index0 = vnet_buffer (b0)->ip.adj_index[VLIB_TX];
+ cd0 = classify_dpo_get(cd_index0);
+ table_index0 = cd0->cd_table_index;
+
+ t0 = pool_elt_at_index (vcm->tables, table_index0);
+ vnet_buffer(b0)->l2_classify.hash =
+ vnet_classify_hash_packet (t0, (u8 *) h0);
+
+ vnet_buffer(b0)->l2_classify.table_index = table_index0;
+ vnet_classify_prefetch_bucket (t0, vnet_buffer(b0)->l2_classify.hash);
+
+ from++;
+ n_left_from--;
+ }
+
+ next_index = node->cached_next_index;
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame (vm, node, next_index,
+ to_next, n_left_to_next);
+
+ /* Not enough load/store slots to dual loop... */
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 bi0;
+ vlib_buffer_t * b0;
+ u32 next0 = IP_LOOKUP_NEXT_DROP;
+ u32 table_index0;
+ vnet_classify_table_t * t0;
+ vnet_classify_entry_t * e0;
+ u64 hash0;
+ u8 * h0;
+
+ /* Stride 3 seems to work best */
+ if (PREDICT_TRUE (n_left_from > 3))
+ {
+ vlib_buffer_t * p1 = vlib_get_buffer(vm, from[3]);
+ vnet_classify_table_t * tp1;
+ u32 table_index1;
+ u64 phash1;
+
+ table_index1 = vnet_buffer(p1)->l2_classify.table_index;
+
+ if (PREDICT_TRUE (table_index1 != ~0))
+ {
+ tp1 = pool_elt_at_index (vcm->tables, table_index1);
+ phash1 = vnet_buffer(p1)->l2_classify.hash;
+ vnet_classify_prefetch_entry (tp1, phash1);
+ }
+ }
+
+ /* speculatively enqueue b0 to the current next frame */
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ h0 = b0->data;
+ table_index0 = vnet_buffer(b0)->l2_classify.table_index;
+ e0 = 0;
+ t0 = 0;
+ vnet_buffer(b0)->l2_classify.opaque_index = ~0;
+
+ if (PREDICT_TRUE(table_index0 != ~0))
+ {
+ hash0 = vnet_buffer(b0)->l2_classify.hash;
+ t0 = pool_elt_at_index (vcm->tables, table_index0);
+
+ e0 = vnet_classify_find_entry (t0, (u8 *) h0, hash0,
+ now);
+ if (e0)
+ {
+ vnet_buffer(b0)->l2_classify.opaque_index
+ = e0->opaque_index;
+ vlib_buffer_advance (b0, e0->advance);
+ next0 = (e0->next_index < node->n_next_nodes)?
+ e0->next_index:next0;
+ hits++;
+ }
+ else
+ {
+ while (1)
+ {
+ if (t0->next_table_index != ~0)
+ t0 = pool_elt_at_index (vcm->tables,
+ t0->next_table_index);
+ else
+ {
+ next0 = (t0->miss_next_index < n_next) ?
+ t0->miss_next_index : next0;
+ misses++;
+ break;
+ }
+
+ hash0 = vnet_classify_hash_packet (t0, (u8 *) h0);
+ e0 = vnet_classify_find_entry
+ (t0, (u8 *) h0, hash0, now);
+ if (e0)
+ {
+ vnet_buffer(b0)->l2_classify.opaque_index
+ = e0->opaque_index;
+ vlib_buffer_advance (b0, e0->advance);
+ next0 = (e0->next_index < node->n_next_nodes)?
+ e0->next_index:next0;
+ hits++;
+ chain_hits++;
+ break;
+ }
+ }
+ }
+ }
+
+ if (PREDICT_FALSE((node->flags & VLIB_NODE_FLAG_TRACE)
+ && (b0->flags & VLIB_BUFFER_IS_TRACED)))
+ {
+ ip_classify_trace_t *t =
+ vlib_add_trace (vm, node, b0, sizeof (*t));
+ t->next_index = next0;
+ t->table_index = t0 ? t0 - vcm->tables : ~0;
+ t->entry_index = e0 ? e0 - t0->entries : ~0;
+ }
+
+ /* verify speculative enqueue, maybe switch current next frame */
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, next0);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ vlib_node_increment_counter (vm, node->node_index,
+ IP_CLASSIFY_ERROR_MISS,
+ misses);
+ vlib_node_increment_counter (vm, node->node_index,
+ IP_CLASSIFY_ERROR_HIT,
+ hits);
+ vlib_node_increment_counter (vm, node->node_index,
+ IP_CLASSIFY_ERROR_CHAIN_HIT,
+ chain_hits);
+ return frame->n_vectors;
+}
+
+static uword
+ip4_classify (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ return ip_classify_inline (vm, node, frame, 1 /* is_ip4 */);
+}
+
+
+VLIB_REGISTER_NODE (ip4_classify_node) = {
+ .function = ip4_classify,
+ .name = "ip4-classify",
+ .vector_size = sizeof (u32),
+ .sibling_of = "ip4-lookup",
+ .format_trace = format_ip_classify_trace,
+ .n_errors = ARRAY_LEN(ip_classify_error_strings),
+ .error_strings = ip_classify_error_strings,
+
+ .n_next_nodes = 0,
+};
+
+VLIB_NODE_FUNCTION_MULTIARCH (ip4_classify_node, ip4_classify)
+
+static uword
+ip6_classify (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ return ip_classify_inline (vm, node, frame, 0 /* is_ip4 */);
+}
+
+
+VLIB_REGISTER_NODE (ip6_classify_node) = {
+ .function = ip6_classify,
+ .name = "ip6-classify",
+ .vector_size = sizeof (u32),
+ .sibling_of = "ip6-lookup",
+ .format_trace = format_ip_classify_trace,
+ .n_errors = ARRAY_LEN(ip_classify_error_strings),
+ .error_strings = ip_classify_error_strings,
+
+ .n_next_nodes = 0,
+};
+
+VLIB_NODE_FUNCTION_MULTIARCH (ip6_classify_node, ip6_classify)
+
+static clib_error_t *
+ip_classify_init (vlib_main_t * vm)
+{
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (ip_classify_init);
diff --git a/src/vnet/classify/policer_classify.c b/src/vnet/classify/policer_classify.c
new file mode 100644
index 00000000000..569234fba3b
--- /dev/null
+++ b/src/vnet/classify/policer_classify.c
@@ -0,0 +1,227 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <vnet/classify/policer_classify.h>
+
+static void
+vnet_policer_classify_feature_enable (vlib_main_t * vnm,
+ policer_classify_main_t * pcm,
+ u32 sw_if_index,
+ policer_classify_table_id_t tid,
+ int feature_enable)
+{
+ if (tid == POLICER_CLASSIFY_TABLE_L2)
+ {
+ l2input_intf_bitmap_enable (sw_if_index, L2INPUT_FEAT_POLICER_CLAS,
+ feature_enable);
+ }
+ else
+ {
+ vnet_feature_config_main_t * fcm;
+ u8 arc;
+
+ if (tid == POLICER_CLASSIFY_TABLE_IP4)
+ {
+ vnet_feature_enable_disable ("ip4-unicast", "ip4-policer-classify",
+ sw_if_index, feature_enable, 0, 0);
+ arc = vnet_get_feature_arc_index ("ip4-unicast");
+ }
+
+ else
+ {
+ vnet_feature_enable_disable ("ip6-unicast", "ip6-policer-classify",
+ sw_if_index, feature_enable, 0, 0);
+ arc = vnet_get_feature_arc_index ("ip6-unicast");
+ }
+
+ fcm = vnet_get_feature_arc_config_main (arc);
+ pcm->vnet_config_main[tid] = &fcm->config_main;
+ }
+}
+
+int vnet_set_policer_classify_intfc (vlib_main_t * vm, u32 sw_if_index,
+ u32 ip4_table_index, u32 ip6_table_index,
+ u32 l2_table_index, u32 is_add)
+{
+ policer_classify_main_t * pcm = &policer_classify_main;
+ vnet_classify_main_t * vcm = pcm->vnet_classify_main;
+ u32 pct[POLICER_CLASSIFY_N_TABLES] = {ip4_table_index, ip6_table_index,
+ l2_table_index};
+ u32 ti;
+
+ /* Assume that we've validated sw_if_index in the API layer */
+
+ for (ti = 0; ti < POLICER_CLASSIFY_N_TABLES; ti++)
+ {
+ if (pct[ti] == ~0)
+ continue;
+
+ if (pool_is_free_index (vcm->tables, pct[ti]))
+ return VNET_API_ERROR_NO_SUCH_TABLE;
+
+ vec_validate_init_empty
+ (pcm->classify_table_index_by_sw_if_index[ti], sw_if_index, ~0);
+
+ /* Reject any DEL operation with wrong sw_if_index */
+ if (!is_add &&
+ (pct[ti] != pcm->classify_table_index_by_sw_if_index[ti][sw_if_index]))
+ {
+ clib_warning ("Non-existent intf_idx=%d with table_index=%d for delete",
+ sw_if_index, pct[ti]);
+ return VNET_API_ERROR_NO_SUCH_TABLE;
+ }
+
+ /* Return ok on ADD operaton if feature is already enabled */
+ if (is_add &&
+ pcm->classify_table_index_by_sw_if_index[ti][sw_if_index] != ~0)
+ return 0;
+
+ vnet_policer_classify_feature_enable (vm, pcm, sw_if_index, ti, is_add);
+
+ if (is_add)
+ pcm->classify_table_index_by_sw_if_index[ti][sw_if_index] = pct[ti];
+ else
+ pcm->classify_table_index_by_sw_if_index[ti][sw_if_index] = ~0;
+ }
+
+
+ return 0;
+}
+
+static clib_error_t *
+set_policer_classify_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ vnet_main_t * vnm = vnet_get_main();
+ u32 sw_if_index = ~0;
+ u32 ip4_table_index = ~0;
+ u32 ip6_table_index = ~0;
+ u32 l2_table_index = ~0;
+ u32 is_add = 1;
+ u32 idx_cnt = 0;
+ int rv;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "interface %U", unformat_vnet_sw_interface,
+ vnm, &sw_if_index))
+ ;
+ else if (unformat (input, "ip4-table %d", &ip4_table_index))
+ idx_cnt++;
+ else if (unformat (input, "ip6-table %d", &ip6_table_index))
+ idx_cnt++;
+ else if (unformat (input, "l2-table %d", &l2_table_index))
+ idx_cnt++;
+ else if (unformat (input, "del"))
+ is_add = 0;
+ else
+ break;
+ }
+
+ if (sw_if_index == ~0)
+ return clib_error_return (0, "Interface must be specified.");
+
+ if (!idx_cnt)
+ return clib_error_return (0, "Table index should be specified.");
+
+ if (idx_cnt > 1)
+ return clib_error_return (0, "Only one table index per API is allowed.");
+
+ rv = vnet_set_policer_classify_intfc(vm, sw_if_index, ip4_table_index,
+ ip6_table_index, l2_table_index, is_add);
+
+ switch (rv)
+ {
+ case 0:
+ break;
+
+ case VNET_API_ERROR_NO_MATCHING_INTERFACE:
+ return clib_error_return (0, "No such interface");
+
+ case VNET_API_ERROR_NO_SUCH_ENTRY:
+ return clib_error_return (0, "No such classifier table");
+ }
+ return 0;
+}
+
+VLIB_CLI_COMMAND (set_policer_classify_command, static) = {
+ .path = "set policer classify",
+ .short_help =
+ "set policer classify interface <int> [ip4-table <index>]\n"
+ " [ip6-table <index>] [l2-table <index>] [del]",
+ .function = set_policer_classify_command_fn,
+};
+
+static uword
+unformat_table_type (unformat_input_t * input, va_list * va)
+{
+ u32 * r = va_arg (*va, u32 *);
+ u32 tid;
+
+ if (unformat (input, "ip4"))
+ tid = POLICER_CLASSIFY_TABLE_IP4;
+ else if (unformat (input, "ip6"))
+ tid = POLICER_CLASSIFY_TABLE_IP6;
+ else if (unformat (input, "l2"))
+ tid = POLICER_CLASSIFY_TABLE_L2;
+ else
+ return 0;
+
+ *r = tid;
+ return 1;
+}
+static clib_error_t *
+show_policer_classify_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ policer_classify_main_t * pcm = &policer_classify_main;
+ u32 type = POLICER_CLASSIFY_N_TABLES;
+ u32 * vec_tbl;
+ int i;
+
+ if (unformat (input, "type %U", unformat_table_type, &type))
+ ;
+ else
+ return clib_error_return (0, "Type must be specified.");;
+
+ if (type == POLICER_CLASSIFY_N_TABLES)
+ return clib_error_return (0, "Invalid table type.");
+
+ vec_tbl = pcm->classify_table_index_by_sw_if_index[type];
+
+ if (vec_len(vec_tbl))
+ vlib_cli_output (vm, "%10s%20s\t\t%s", "Intfc idx", "Classify table",
+ "Interface name");
+ else
+ vlib_cli_output (vm, "No tables configured.");
+
+ for (i = 0; i < vec_len (vec_tbl); i++)
+ {
+ if (vec_elt(vec_tbl, i) == ~0)
+ continue;
+
+ vlib_cli_output (vm, "%10d%20d\t\t%U", i, vec_elt(vec_tbl, i),
+ format_vnet_sw_if_index_name, pcm->vnet_main, i);
+ }
+
+ return 0;
+}
+
+VLIB_CLI_COMMAND (show_policer_classify_command, static) = {
+ .path = "show classify policer",
+ .short_help = "show classify policer type [ip4|ip6|l2]",
+ .function = show_policer_classify_command_fn,
+};
diff --git a/src/vnet/classify/policer_classify.h b/src/vnet/classify/policer_classify.h
new file mode 100644
index 00000000000..3065644438d
--- /dev/null
+++ b/src/vnet/classify/policer_classify.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __included_vnet_policer_classify_h__
+#define __included_vnet_policer_classify_h__
+
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+#include <vnet/classify/vnet_classify.h>
+
+typedef enum {
+ POLICER_CLASSIFY_TABLE_IP4,
+ POLICER_CLASSIFY_TABLE_IP6,
+ POLICER_CLASSIFY_TABLE_L2,
+ POLICER_CLASSIFY_N_TABLES,
+} policer_classify_table_id_t;
+
+typedef enum {
+ POLICER_CLASSIFY_NEXT_INDEX_DROP,
+ POLICER_CLASSIFY_NEXT_INDEX_N_NEXT,
+} policer_classify_next_index_t;
+
+typedef struct {
+ /* Classifier table vectors */
+ u32 * classify_table_index_by_sw_if_index [POLICER_CLASSIFY_N_TABLES];
+
+ /* L2 next nodes for each feature */
+ u32 feat_next_node_index[32];
+
+ /* Convenience variables */
+ vlib_main_t * vlib_main;
+ vnet_main_t * vnet_main;
+ vnet_classify_main_t * vnet_classify_main;
+ vnet_config_main_t * vnet_config_main [POLICER_CLASSIFY_N_TABLES];
+} policer_classify_main_t;
+
+policer_classify_main_t policer_classify_main;
+
+int vnet_set_policer_classify_intfc (vlib_main_t * vm, u32 sw_if_index,
+ u32 ip4_table_index, u32 ip6_table_index,
+ u32 l2_table_index, u32 is_add);
+
+#endif /* __included_vnet_policer_classify_h__ */
diff --git a/src/vnet/classify/vnet_classify.c b/src/vnet/classify/vnet_classify.c
new file mode 100644
index 00000000000..ce38f9f173e
--- /dev/null
+++ b/src/vnet/classify/vnet_classify.c
@@ -0,0 +1,2436 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <vnet/classify/vnet_classify.h>
+#include <vnet/classify/input_acl.h>
+#include <vnet/ip/ip.h>
+#include <vnet/api_errno.h> /* for API error numbers */
+#include <vnet/l2/l2_classify.h> /* for L2_INPUT_CLASSIFY_NEXT_xxx */
+#include <vnet/fib/fib_table.h>
+
+vnet_classify_main_t vnet_classify_main;
+
+#if VALIDATION_SCAFFOLDING
+/* Validation scaffolding */
+void mv (vnet_classify_table_t * t)
+{
+ void * oldheap;
+
+ oldheap = clib_mem_set_heap (t->mheap);
+ clib_mem_validate();
+ clib_mem_set_heap (oldheap);
+}
+
+void rogue (vnet_classify_table_t * t)
+{
+ int i, j, k;
+ vnet_classify_entry_t * v, * save_v;
+ u32 active_elements = 0;
+ vnet_classify_bucket_t * b;
+
+ for (i = 0; i < t->nbuckets; i++)
+ {
+ b = &t->buckets [i];
+ if (b->offset == 0)
+ continue;
+ save_v = vnet_classify_get_entry (t, b->offset);
+ for (j = 0; j < (1<<b->log2_pages); j++)
+ {
+ for (k = 0; k < t->entries_per_page; k++)
+ {
+ v = vnet_classify_entry_at_index
+ (t, save_v, j*t->entries_per_page + k);
+
+ if (vnet_classify_entry_is_busy (v))
+ active_elements++;
+ }
+ }
+ }
+
+ if (active_elements != t->active_elements)
+ clib_warning ("found %u expected %u elts", active_elements,
+ t->active_elements);
+}
+#else
+void mv (vnet_classify_table_t * t) { }
+void rogue (vnet_classify_table_t * t) { }
+#endif
+
+void vnet_classify_register_unformat_l2_next_index_fn (unformat_function_t * fn)
+{
+ vnet_classify_main_t * cm = &vnet_classify_main;
+
+ vec_add1 (cm->unformat_l2_next_index_fns, fn);
+}
+
+void vnet_classify_register_unformat_ip_next_index_fn (unformat_function_t * fn)
+{
+ vnet_classify_main_t * cm = &vnet_classify_main;
+
+ vec_add1 (cm->unformat_ip_next_index_fns, fn);
+}
+
+void
+vnet_classify_register_unformat_acl_next_index_fn (unformat_function_t * fn)
+{
+ vnet_classify_main_t * cm = &vnet_classify_main;
+
+ vec_add1 (cm->unformat_acl_next_index_fns, fn);
+}
+
+void
+vnet_classify_register_unformat_policer_next_index_fn (unformat_function_t * fn)
+{
+ vnet_classify_main_t * cm = &vnet_classify_main;
+
+ vec_add1 (cm->unformat_policer_next_index_fns, fn);
+}
+
+void vnet_classify_register_unformat_opaque_index_fn (unformat_function_t * fn)
+{
+ vnet_classify_main_t * cm = &vnet_classify_main;
+
+ vec_add1 (cm->unformat_opaque_index_fns, fn);
+}
+
+vnet_classify_table_t *
+vnet_classify_new_table (vnet_classify_main_t *cm,
+ u8 * mask, u32 nbuckets, u32 memory_size,
+ u32 skip_n_vectors,
+ u32 match_n_vectors)
+{
+ vnet_classify_table_t * t;
+ void * oldheap;
+
+ nbuckets = 1 << (max_log2 (nbuckets));
+
+ pool_get_aligned (cm->tables, t, CLIB_CACHE_LINE_BYTES);
+ memset(t, 0, sizeof (*t));
+
+ vec_validate_aligned (t->mask, match_n_vectors - 1, sizeof(u32x4));
+ clib_memcpy (t->mask, mask, match_n_vectors * sizeof (u32x4));
+
+ t->next_table_index = ~0;
+ t->nbuckets = nbuckets;
+ t->log2_nbuckets = max_log2 (nbuckets);
+ t->match_n_vectors = match_n_vectors;
+ t->skip_n_vectors = skip_n_vectors;
+ t->entries_per_page = 2;
+
+ t->mheap = mheap_alloc (0 /* use VM */, memory_size);
+
+ vec_validate_aligned (t->buckets, nbuckets - 1, CLIB_CACHE_LINE_BYTES);
+ oldheap = clib_mem_set_heap (t->mheap);
+
+ t->writer_lock = clib_mem_alloc_aligned (CLIB_CACHE_LINE_BYTES,
+ CLIB_CACHE_LINE_BYTES);
+ t->writer_lock[0] = 0;
+
+ clib_mem_set_heap (oldheap);
+ return (t);
+}
+
+void vnet_classify_delete_table_index (vnet_classify_main_t *cm,
+ u32 table_index, int del_chain)
+{
+ vnet_classify_table_t * t;
+
+ /* Tolerate multiple frees, up to a point */
+ if (pool_is_free_index (cm->tables, table_index))
+ return;
+
+ t = pool_elt_at_index (cm->tables, table_index);
+ if (del_chain && t->next_table_index != ~0)
+ /* Recursively delete the entire chain */
+ vnet_classify_delete_table_index (cm, t->next_table_index, del_chain);
+
+ vec_free (t->mask);
+ vec_free (t->buckets);
+ mheap_free (t->mheap);
+
+ pool_put (cm->tables, t);
+}
+
+static vnet_classify_entry_t *
+vnet_classify_entry_alloc (vnet_classify_table_t * t, u32 log2_pages)
+{
+ vnet_classify_entry_t * rv = 0;
+#define _(size) \
+ vnet_classify_entry_##size##_t * rv##size = 0;
+ foreach_size_in_u32x4;
+#undef _
+
+ void * oldheap;
+
+ ASSERT (t->writer_lock[0]);
+ if (log2_pages >= vec_len (t->freelists) || t->freelists [log2_pages] == 0)
+ {
+ oldheap = clib_mem_set_heap (t->mheap);
+
+ vec_validate (t->freelists, log2_pages);
+
+ switch(t->match_n_vectors)
+ {
+ /* Euchre the vector allocator into allocating the right sizes */
+#define _(size) \
+ case size: \
+ vec_validate_aligned \
+ (rv##size, ((1<<log2_pages)*t->entries_per_page) - 1, \
+ CLIB_CACHE_LINE_BYTES); \
+ rv = (vnet_classify_entry_t *) rv##size; \
+ break;
+ foreach_size_in_u32x4;
+#undef _
+
+ default:
+ abort();
+ }
+
+ clib_mem_set_heap (oldheap);
+ goto initialize;
+ }
+ rv = t->freelists[log2_pages];
+ t->freelists[log2_pages] = rv->next_free;
+
+initialize:
+ ASSERT(rv);
+ ASSERT (vec_len(rv) == (1<<log2_pages)*t->entries_per_page);
+
+ switch (t->match_n_vectors)
+ {
+#define _(size) \
+ case size: \
+ if(vec_len(rv)) \
+ memset (rv, 0xff, sizeof (*rv##size) * vec_len(rv)); \
+ break;
+ foreach_size_in_u32x4;
+#undef _
+
+ default:
+ abort();
+ }
+
+ return rv;
+}
+
+static void
+vnet_classify_entry_free (vnet_classify_table_t * t,
+ vnet_classify_entry_t * v)
+{
+ u32 free_list_index;
+
+ ASSERT (t->writer_lock[0]);
+
+ free_list_index = min_log2(vec_len(v)/t->entries_per_page);
+
+ ASSERT(vec_len (t->freelists) > free_list_index);
+
+ v->next_free = t->freelists[free_list_index];
+ t->freelists[free_list_index] = v;
+}
+
+static inline void make_working_copy
+(vnet_classify_table_t * t, vnet_classify_bucket_t * b)
+{
+ vnet_classify_entry_t * v;
+ vnet_classify_bucket_t working_bucket __attribute__((aligned (8)));
+ void * oldheap;
+ vnet_classify_entry_t * working_copy;
+#define _(size) \
+ vnet_classify_entry_##size##_t * working_copy##size = 0;
+ foreach_size_in_u32x4;
+#undef _
+ u32 cpu_number = os_get_cpu_number();
+
+ if (cpu_number >= vec_len (t->working_copies))
+ {
+ oldheap = clib_mem_set_heap (t->mheap);
+ vec_validate (t->working_copies, cpu_number);
+ clib_mem_set_heap (oldheap);
+ }
+
+ /*
+ * working_copies are per-cpu so that near-simultaneous
+ * updates from multiple threads will not result in sporadic, spurious
+ * lookup failures.
+ */
+ working_copy = t->working_copies[cpu_number];
+
+ t->saved_bucket.as_u64 = b->as_u64;
+ oldheap = clib_mem_set_heap (t->mheap);
+
+ if ((1<<b->log2_pages)*t->entries_per_page > vec_len (working_copy))
+ {
+ switch(t->match_n_vectors)
+ {
+ /* Euchre the vector allocator into allocating the right sizes */
+#define _(size) \
+ case size: \
+ working_copy##size = (void *) working_copy; \
+ vec_validate_aligned \
+ (working_copy##size, \
+ ((1<<b->log2_pages)*t->entries_per_page) - 1, \
+ CLIB_CACHE_LINE_BYTES); \
+ working_copy = (void *) working_copy##size; \
+ break;
+ foreach_size_in_u32x4;
+#undef _
+
+ default:
+ abort();
+ }
+ t->working_copies[cpu_number] = working_copy;
+ }
+
+ _vec_len(working_copy) = (1<<b->log2_pages)*t->entries_per_page;
+ clib_mem_set_heap (oldheap);
+
+ v = vnet_classify_get_entry (t, b->offset);
+
+ switch(t->match_n_vectors)
+ {
+#define _(size) \
+ case size: \
+ clib_memcpy (working_copy, v, \
+ sizeof (vnet_classify_entry_##size##_t) \
+ * (1<<b->log2_pages) \
+ * (t->entries_per_page)); \
+ break;
+ foreach_size_in_u32x4 ;
+#undef _
+
+ default:
+ abort();
+ }
+
+ working_bucket.as_u64 = b->as_u64;
+ working_bucket.offset = vnet_classify_get_offset (t, working_copy);
+ CLIB_MEMORY_BARRIER();
+ b->as_u64 = working_bucket.as_u64;
+ t->working_copies[cpu_number] = working_copy;
+}
+
+static vnet_classify_entry_t *
+split_and_rehash (vnet_classify_table_t * t,
+ vnet_classify_entry_t * old_values,
+ u32 new_log2_pages)
+{
+ vnet_classify_entry_t * new_values, * v, * new_v;
+ int i, j, k;
+
+ new_values = vnet_classify_entry_alloc (t, new_log2_pages);
+
+ for (i = 0; i < (vec_len (old_values)/t->entries_per_page); i++)
+ {
+ u64 new_hash;
+
+ for (j = 0; j < t->entries_per_page; j++)
+ {
+ v = vnet_classify_entry_at_index
+ (t, old_values, i * t->entries_per_page + j);
+
+ if (vnet_classify_entry_is_busy (v))
+ {
+ /* Hack so we can use the packet hash routine */
+ u8 * key_minus_skip;
+ key_minus_skip = (u8 *) v->key;
+ key_minus_skip -= t->skip_n_vectors * sizeof (u32x4);
+
+ new_hash = vnet_classify_hash_packet (t, key_minus_skip);
+ new_hash >>= t->log2_nbuckets;
+ new_hash &= (1<<new_log2_pages) - 1;
+
+ for (k = 0; k < t->entries_per_page; k++)
+ {
+ new_v = vnet_classify_entry_at_index (t, new_values,
+ new_hash + k);
+
+ if (vnet_classify_entry_is_free (new_v))
+ {
+ clib_memcpy (new_v, v, sizeof (vnet_classify_entry_t)
+ + (t->match_n_vectors * sizeof (u32x4)));
+ new_v->flags &= ~(VNET_CLASSIFY_ENTRY_FREE);
+ goto doublebreak;
+ }
+ }
+ /* Crap. Tell caller to try again */
+ vnet_classify_entry_free (t, new_values);
+ return 0;
+ }
+ doublebreak:
+ ;
+ }
+ }
+ return new_values;
+}
+
+int vnet_classify_add_del (vnet_classify_table_t * t,
+ vnet_classify_entry_t * add_v,
+ int is_add)
+{
+ u32 bucket_index;
+ vnet_classify_bucket_t * b, tmp_b;
+ vnet_classify_entry_t * v, * new_v, * save_new_v, * working_copy, * save_v;
+ u32 value_index;
+ int rv = 0;
+ int i;
+ u64 hash, new_hash;
+ u32 new_log2_pages;
+ u32 cpu_number = os_get_cpu_number();
+ u8 * key_minus_skip;
+
+ ASSERT ((add_v->flags & VNET_CLASSIFY_ENTRY_FREE) == 0);
+
+ key_minus_skip = (u8 *) add_v->key;
+ key_minus_skip -= t->skip_n_vectors * sizeof (u32x4);
+
+ hash = vnet_classify_hash_packet (t, key_minus_skip);
+
+ bucket_index = hash & (t->nbuckets-1);
+ b = &t->buckets[bucket_index];
+
+ hash >>= t->log2_nbuckets;
+
+ while (__sync_lock_test_and_set (t->writer_lock, 1))
+ ;
+
+ /* First elt in the bucket? */
+ if (b->offset == 0)
+ {
+ if (is_add == 0)
+ {
+ rv = -1;
+ goto unlock;
+ }
+
+ v = vnet_classify_entry_alloc (t, 0 /* new_log2_pages */);
+ clib_memcpy (v, add_v, sizeof (vnet_classify_entry_t) +
+ t->match_n_vectors * sizeof (u32x4));
+ v->flags &= ~(VNET_CLASSIFY_ENTRY_FREE);
+
+ tmp_b.as_u64 = 0;
+ tmp_b.offset = vnet_classify_get_offset (t, v);
+
+ b->as_u64 = tmp_b.as_u64;
+ t->active_elements ++;
+
+ goto unlock;
+ }
+
+ make_working_copy (t, b);
+
+ save_v = vnet_classify_get_entry (t, t->saved_bucket.offset);
+ value_index = hash & ((1<<t->saved_bucket.log2_pages)-1);
+
+ if (is_add)
+ {
+ /*
+ * For obvious (in hindsight) reasons, see if we're supposed to
+ * replace an existing key, then look for an empty slot.
+ */
+
+ for (i = 0; i < t->entries_per_page; i++)
+ {
+ v = vnet_classify_entry_at_index (t, save_v, value_index + i);
+
+ if (!memcmp (v->key, add_v->key, t->match_n_vectors * sizeof (u32x4)))
+ {
+ clib_memcpy (v, add_v, sizeof (vnet_classify_entry_t) +
+ t->match_n_vectors * sizeof(u32x4));
+ v->flags &= ~(VNET_CLASSIFY_ENTRY_FREE);
+
+ CLIB_MEMORY_BARRIER();
+ /* Restore the previous (k,v) pairs */
+ b->as_u64 = t->saved_bucket.as_u64;
+ goto unlock;
+ }
+ }
+ for (i = 0; i < t->entries_per_page; i++)
+ {
+ v = vnet_classify_entry_at_index (t, save_v, value_index + i);
+
+ if (vnet_classify_entry_is_free (v))
+ {
+ clib_memcpy (v, add_v, sizeof (vnet_classify_entry_t) +
+ t->match_n_vectors * sizeof(u32x4));
+ v->flags &= ~(VNET_CLASSIFY_ENTRY_FREE);
+ CLIB_MEMORY_BARRIER();
+ b->as_u64 = t->saved_bucket.as_u64;
+ t->active_elements ++;
+ goto unlock;
+ }
+ }
+ /* no room at the inn... split case... */
+ }
+ else
+ {
+ for (i = 0; i < t->entries_per_page; i++)
+ {
+ v = vnet_classify_entry_at_index (t, save_v, value_index + i);
+
+ if (!memcmp (v->key, add_v->key, t->match_n_vectors * sizeof (u32x4)))
+ {
+ memset (v, 0xff, sizeof (vnet_classify_entry_t) +
+ t->match_n_vectors * sizeof(u32x4));
+ v->flags |= VNET_CLASSIFY_ENTRY_FREE;
+ CLIB_MEMORY_BARRIER();
+ b->as_u64 = t->saved_bucket.as_u64;
+ t->active_elements --;
+ goto unlock;
+ }
+ }
+ rv = -3;
+ b->as_u64 = t->saved_bucket.as_u64;
+ goto unlock;
+ }
+
+ new_log2_pages = t->saved_bucket.log2_pages + 1;
+
+ expand_again:
+ working_copy = t->working_copies[cpu_number];
+ new_v = split_and_rehash (t, working_copy, new_log2_pages);
+
+ if (new_v == 0)
+ {
+ new_log2_pages++;
+ goto expand_again;
+ }
+
+ /* Try to add the new entry */
+ save_new_v = new_v;
+
+ key_minus_skip = (u8 *) add_v->key;
+ key_minus_skip -= t->skip_n_vectors * sizeof (u32x4);
+
+ new_hash = vnet_classify_hash_packet_inline (t, key_minus_skip);
+ new_hash >>= t->log2_nbuckets;
+ new_hash &= (1<<min_log2((vec_len(new_v)/t->entries_per_page))) - 1;
+
+ for (i = 0; i < t->entries_per_page; i++)
+ {
+ new_v = vnet_classify_entry_at_index (t, save_new_v, new_hash + i);
+
+ if (vnet_classify_entry_is_free (new_v))
+ {
+ clib_memcpy (new_v, add_v, sizeof (vnet_classify_entry_t) +
+ t->match_n_vectors * sizeof(u32x4));
+ new_v->flags &= ~(VNET_CLASSIFY_ENTRY_FREE);
+ goto expand_ok;
+ }
+ }
+ /* Crap. Try again */
+ new_log2_pages++;
+ vnet_classify_entry_free (t, save_new_v);
+ goto expand_again;
+
+ expand_ok:
+ tmp_b.log2_pages = min_log2 (vec_len (save_new_v)/t->entries_per_page);
+ tmp_b.offset = vnet_classify_get_offset (t, save_new_v);
+ CLIB_MEMORY_BARRIER();
+ b->as_u64 = tmp_b.as_u64;
+ t->active_elements ++;
+ v = vnet_classify_get_entry (t, t->saved_bucket.offset);
+ vnet_classify_entry_free (t, v);
+
+ unlock:
+ CLIB_MEMORY_BARRIER();
+ t->writer_lock[0] = 0;
+
+ return rv;
+}
+
+typedef CLIB_PACKED(struct {
+ ethernet_header_t eh;
+ ip4_header_t ip;
+}) classify_data_or_mask_t;
+
+u64 vnet_classify_hash_packet (vnet_classify_table_t * t, u8 * h)
+{
+ return vnet_classify_hash_packet_inline (t, h);
+}
+
+vnet_classify_entry_t *
+vnet_classify_find_entry (vnet_classify_table_t * t,
+ u8 * h, u64 hash, f64 now)
+{
+ return vnet_classify_find_entry_inline (t, h, hash, now);
+}
+
+static u8 * format_classify_entry (u8 * s, va_list * args)
+ {
+ vnet_classify_table_t * t = va_arg (*args, vnet_classify_table_t *);
+ vnet_classify_entry_t * e = va_arg (*args, vnet_classify_entry_t *);
+
+ s = format
+ (s, "[%u]: next_index %d advance %d opaque %d action %d metadata %d\n",
+ vnet_classify_get_offset (t, e), e->next_index, e->advance,
+ e->opaque_index, e->action, e->metadata);
+
+
+ s = format (s, " k: %U\n", format_hex_bytes, e->key,
+ t->match_n_vectors * sizeof(u32x4));
+
+ if (vnet_classify_entry_is_busy (e))
+ s = format (s, " hits %lld, last_heard %.2f\n",
+ e->hits, e->last_heard);
+ else
+ s = format (s, " entry is free\n");
+ return s;
+ }
+
+u8 * format_classify_table (u8 * s, va_list * args)
+{
+ vnet_classify_table_t * t = va_arg (*args, vnet_classify_table_t *);
+ int verbose = va_arg (*args, int);
+ vnet_classify_bucket_t * b;
+ vnet_classify_entry_t * v, * save_v;
+ int i, j, k;
+ u64 active_elements = 0;
+
+ for (i = 0; i < t->nbuckets; i++)
+ {
+ b = &t->buckets [i];
+ if (b->offset == 0)
+ {
+ if (verbose > 1)
+ s = format (s, "[%d]: empty\n", i);
+ continue;
+ }
+
+ if (verbose)
+ {
+ s = format (s, "[%d]: heap offset %d, len %d\n", i,
+ b->offset, (1<<b->log2_pages));
+ }
+
+ save_v = vnet_classify_get_entry (t, b->offset);
+ for (j = 0; j < (1<<b->log2_pages); j++)
+ {
+ for (k = 0; k < t->entries_per_page; k++)
+ {
+
+ v = vnet_classify_entry_at_index (t, save_v,
+ j*t->entries_per_page + k);
+
+ if (vnet_classify_entry_is_free (v))
+ {
+ if (verbose > 1)
+ s = format (s, " %d: empty\n",
+ j * t->entries_per_page + k);
+ continue;
+ }
+ if (verbose)
+ {
+ s = format (s, " %d: %U\n",
+ j * t->entries_per_page + k,
+ format_classify_entry, t, v);
+ }
+ active_elements++;
+ }
+ }
+ }
+
+ s = format (s, " %lld active elements\n", active_elements);
+ s = format (s, " %d free lists\n", vec_len (t->freelists));
+ return s;
+}
+
+int vnet_classify_add_del_table (vnet_classify_main_t * cm,
+ u8 * mask,
+ u32 nbuckets,
+ u32 memory_size,
+ u32 skip,
+ u32 match,
+ u32 next_table_index,
+ u32 miss_next_index,
+ u32 * table_index,
+ u8 current_data_flag,
+ i16 current_data_offset,
+ int is_add,
+ int del_chain)
+{
+ vnet_classify_table_t * t;
+
+ if (is_add)
+ {
+ if (*table_index == ~0) /* add */
+ {
+ if (memory_size == 0)
+ return VNET_API_ERROR_INVALID_MEMORY_SIZE;
+
+ if (nbuckets == 0)
+ return VNET_API_ERROR_INVALID_VALUE;
+
+ t = vnet_classify_new_table (cm, mask, nbuckets, memory_size,
+ skip, match);
+ t->next_table_index = next_table_index;
+ t->miss_next_index = miss_next_index;
+ t->current_data_flag = current_data_flag;
+ t->current_data_offset = current_data_offset;
+ *table_index = t - cm->tables;
+ }
+ else /* update */
+ {
+ vnet_classify_main_t *cm = &vnet_classify_main;
+ t = pool_elt_at_index (cm->tables, *table_index);
+
+ t->next_table_index = next_table_index;
+ }
+ return 0;
+ }
+
+ vnet_classify_delete_table_index (cm, *table_index, del_chain);
+ return 0;
+}
+
+#define foreach_tcp_proto_field \
+_(src_port) \
+_(dst_port)
+
+#define foreach_udp_proto_field \
+_(src_port) \
+_(dst_port)
+
+#define foreach_ip4_proto_field \
+_(src_address) \
+_(dst_address) \
+_(tos) \
+_(length) \
+_(fragment_id) \
+_(ttl) \
+_(protocol) \
+_(checksum)
+
+uword unformat_tcp_mask (unformat_input_t * input, va_list * args)
+{
+ u8 ** maskp = va_arg (*args, u8 **);
+ u8 * mask = 0;
+ u8 found_something = 0;
+ tcp_header_t * tcp;
+
+#define _(a) u8 a=0;
+ foreach_tcp_proto_field;
+#undef _
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (0) ;
+#define _(a) else if (unformat (input, #a)) a=1;
+ foreach_tcp_proto_field
+#undef _
+ else
+ break;
+ }
+
+#define _(a) found_something += a;
+ foreach_tcp_proto_field;
+#undef _
+
+ if (found_something == 0)
+ return 0;
+
+ vec_validate (mask, sizeof (*tcp) - 1);
+
+ tcp = (tcp_header_t *) mask;
+
+#define _(a) if (a) memset (&tcp->a, 0xff, sizeof (tcp->a));
+ foreach_tcp_proto_field;
+#undef _
+
+ *maskp = mask;
+ return 1;
+}
+
+uword unformat_udp_mask (unformat_input_t * input, va_list * args)
+{
+ u8 ** maskp = va_arg (*args, u8 **);
+ u8 * mask = 0;
+ u8 found_something = 0;
+ udp_header_t * udp;
+
+#define _(a) u8 a=0;
+ foreach_udp_proto_field;
+#undef _
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (0) ;
+#define _(a) else if (unformat (input, #a)) a=1;
+ foreach_udp_proto_field
+#undef _
+ else
+ break;
+ }
+
+#define _(a) found_something += a;
+ foreach_udp_proto_field;
+#undef _
+
+ if (found_something == 0)
+ return 0;
+
+ vec_validate (mask, sizeof (*udp) - 1);
+
+ udp = (udp_header_t *) mask;
+
+#define _(a) if (a) memset (&udp->a, 0xff, sizeof (udp->a));
+ foreach_udp_proto_field;
+#undef _
+
+ *maskp = mask;
+ return 1;
+}
+
+typedef struct {
+ u16 src_port, dst_port;
+} tcpudp_header_t;
+
+uword unformat_l4_mask (unformat_input_t * input, va_list * args)
+{
+ u8 ** maskp = va_arg (*args, u8 **);
+ u16 src_port = 0, dst_port = 0;
+ tcpudp_header_t * tcpudp;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "tcp %U", unformat_tcp_mask, maskp))
+ return 1;
+ else if (unformat (input, "udp %U", unformat_udp_mask, maskp))
+ return 1;
+ else if (unformat (input, "src_port"))
+ src_port = 0xFFFF;
+ else if (unformat (input, "dst_port"))
+ dst_port = 0xFFFF;
+ else
+ return 0;
+ }
+
+ if (!src_port && !dst_port)
+ return 0;
+
+ u8 * mask = 0;
+ vec_validate (mask, sizeof (tcpudp_header_t) - 1);
+
+ tcpudp = (tcpudp_header_t *) mask;
+ tcpudp->src_port = src_port;
+ tcpudp->dst_port = dst_port;
+
+ *maskp = mask;
+
+ return 1;
+}
+
+uword unformat_ip4_mask (unformat_input_t * input, va_list * args)
+{
+ u8 ** maskp = va_arg (*args, u8 **);
+ u8 * mask = 0;
+ u8 found_something = 0;
+ ip4_header_t * ip;
+
+#define _(a) u8 a=0;
+ foreach_ip4_proto_field;
+#undef _
+ u8 version = 0;
+ u8 hdr_length = 0;
+
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "version"))
+ version = 1;
+ else if (unformat (input, "hdr_length"))
+ hdr_length = 1;
+ else if (unformat (input, "src"))
+ src_address = 1;
+ else if (unformat (input, "dst"))
+ dst_address = 1;
+ else if (unformat (input, "proto"))
+ protocol = 1;
+
+#define _(a) else if (unformat (input, #a)) a=1;
+ foreach_ip4_proto_field
+#undef _
+ else
+ break;
+ }
+
+#define _(a) found_something += a;
+ foreach_ip4_proto_field;
+#undef _
+
+ if (found_something == 0)
+ return 0;
+
+ vec_validate (mask, sizeof (*ip) - 1);
+
+ ip = (ip4_header_t *) mask;
+
+#define _(a) if (a) memset (&ip->a, 0xff, sizeof (ip->a));
+ foreach_ip4_proto_field;
+#undef _
+
+ ip->ip_version_and_header_length = 0;
+
+ if (version)
+ ip->ip_version_and_header_length |= 0xF0;
+
+ if (hdr_length)
+ ip->ip_version_and_header_length |= 0x0F;
+
+ *maskp = mask;
+ return 1;
+}
+
+#define foreach_ip6_proto_field \
+_(src_address) \
+_(dst_address) \
+_(payload_length) \
+_(hop_limit) \
+_(protocol)
+
+uword unformat_ip6_mask (unformat_input_t * input, va_list * args)
+{
+ u8 ** maskp = va_arg (*args, u8 **);
+ u8 * mask = 0;
+ u8 found_something = 0;
+ ip6_header_t * ip;
+ u32 ip_version_traffic_class_and_flow_label;
+
+#define _(a) u8 a=0;
+ foreach_ip6_proto_field;
+#undef _
+ u8 version = 0;
+ u8 traffic_class = 0;
+ u8 flow_label = 0;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "version"))
+ version = 1;
+ else if (unformat (input, "traffic-class"))
+ traffic_class = 1;
+ else if (unformat (input, "flow-label"))
+ flow_label = 1;
+ else if (unformat (input, "src"))
+ src_address = 1;
+ else if (unformat (input, "dst"))
+ dst_address = 1;
+ else if (unformat (input, "proto"))
+ protocol = 1;
+
+#define _(a) else if (unformat (input, #a)) a=1;
+ foreach_ip6_proto_field
+#undef _
+ else
+ break;
+ }
+
+#define _(a) found_something += a;
+ foreach_ip6_proto_field;
+#undef _
+
+ if (found_something == 0)
+ return 0;
+
+ vec_validate (mask, sizeof (*ip) - 1);
+
+ ip = (ip6_header_t *) mask;
+
+#define _(a) if (a) memset (&ip->a, 0xff, sizeof (ip->a));
+ foreach_ip6_proto_field;
+#undef _
+
+ ip_version_traffic_class_and_flow_label = 0;
+
+ if (version)
+ ip_version_traffic_class_and_flow_label |= 0xF0000000;
+
+ if (traffic_class)
+ ip_version_traffic_class_and_flow_label |= 0x0FF00000;
+
+ if (flow_label)
+ ip_version_traffic_class_and_flow_label |= 0x000FFFFF;
+
+ ip->ip_version_traffic_class_and_flow_label =
+ clib_host_to_net_u32 (ip_version_traffic_class_and_flow_label);
+
+ *maskp = mask;
+ return 1;
+}
+
+uword unformat_l3_mask (unformat_input_t * input, va_list * args)
+{
+ u8 ** maskp = va_arg (*args, u8 **);
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) {
+ if (unformat (input, "ip4 %U", unformat_ip4_mask, maskp))
+ return 1;
+ else if (unformat (input, "ip6 %U", unformat_ip6_mask, maskp))
+ return 1;
+ else
+ break;
+ }
+ return 0;
+}
+
+uword unformat_l2_mask (unformat_input_t * input, va_list * args)
+{
+ u8 ** maskp = va_arg (*args, u8 **);
+ u8 * mask = 0;
+ u8 src = 0;
+ u8 dst = 0;
+ u8 proto = 0;
+ u8 tag1 = 0;
+ u8 tag2 = 0;
+ u8 ignore_tag1 = 0;
+ u8 ignore_tag2 = 0;
+ u8 cos1 = 0;
+ u8 cos2 = 0;
+ u8 dot1q = 0;
+ u8 dot1ad = 0;
+ int len = 14;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) {
+ if (unformat (input, "src"))
+ src = 1;
+ else if (unformat (input, "dst"))
+ dst = 1;
+ else if (unformat (input, "proto"))
+ proto = 1;
+ else if (unformat (input, "tag1"))
+ tag1 = 1;
+ else if (unformat (input, "tag2"))
+ tag2 = 1;
+ else if (unformat (input, "ignore-tag1"))
+ ignore_tag1 = 1;
+ else if (unformat (input, "ignore-tag2"))
+ ignore_tag2 = 1;
+ else if (unformat (input, "cos1"))
+ cos1 = 1;
+ else if (unformat (input, "cos2"))
+ cos2 = 1;
+ else if (unformat (input, "dot1q"))
+ dot1q = 1;
+ else if (unformat (input, "dot1ad"))
+ dot1ad = 1;
+ else
+ break;
+ }
+ if ((src + dst + proto + tag1 + tag2 + dot1q + dot1ad +
+ ignore_tag1 + ignore_tag2 + cos1 + cos2) == 0)
+ return 0;
+
+ if (tag1 || ignore_tag1 || cos1 || dot1q)
+ len = 18;
+ if (tag2 || ignore_tag2 || cos2 || dot1ad)
+ len = 22;
+
+ vec_validate (mask, len-1);
+
+ if (dst)
+ memset (mask, 0xff, 6);
+
+ if (src)
+ memset (mask + 6, 0xff, 6);
+
+ if (tag2 || dot1ad)
+ {
+ /* inner vlan tag */
+ if (tag2)
+ {
+ mask[19] = 0xff;
+ mask[18] = 0x0f;
+ }
+ if (cos2)
+ mask[18] |= 0xe0;
+ if (proto)
+ mask[21] = mask [20] = 0xff;
+ if (tag1)
+ {
+ mask [15] = 0xff;
+ mask [14] = 0x0f;
+ }
+ if (cos1)
+ mask[14] |= 0xe0;
+ *maskp = mask;
+ return 1;
+ }
+ if (tag1 | dot1q)
+ {
+ if (tag1)
+ {
+ mask [15] = 0xff;
+ mask [14] = 0x0f;
+ }
+ if (cos1)
+ mask[14] |= 0xe0;
+ if (proto)
+ mask[16] = mask [17] = 0xff;
+ *maskp = mask;
+ return 1;
+ }
+ if (cos2)
+ mask[18] |= 0xe0;
+ if (cos1)
+ mask[14] |= 0xe0;
+ if (proto)
+ mask[12] = mask [13] = 0xff;
+
+ *maskp = mask;
+ return 1;
+}
+
+uword unformat_classify_mask (unformat_input_t * input, va_list * args)
+{
+ vnet_classify_main_t * CLIB_UNUSED(cm)
+ = va_arg (*args, vnet_classify_main_t *);
+ u8 ** maskp = va_arg (*args, u8 **);
+ u32 * skipp = va_arg (*args, u32 *);
+ u32 * matchp = va_arg (*args, u32 *);
+ u32 match;
+ u8 * mask = 0;
+ u8 * l2 = 0;
+ u8 * l3 = 0;
+ u8 * l4 = 0;
+ int i;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) {
+ if (unformat (input, "hex %U", unformat_hex_string, &mask))
+ ;
+ else if (unformat (input, "l2 %U", unformat_l2_mask, &l2))
+ ;
+ else if (unformat (input, "l3 %U", unformat_l3_mask, &l3))
+ ;
+ else if (unformat (input, "l4 %U", unformat_l4_mask, &l4))
+ ;
+ else
+ break;
+ }
+
+ if (l4 && !l3) {
+ vec_free (mask);
+ vec_free (l2);
+ vec_free (l4);
+ return 0;
+ }
+
+ if (mask || l2 || l3 || l4)
+ {
+ if (l2 || l3 || l4)
+ {
+ /* "With a free Ethernet header in every package" */
+ if (l2 == 0)
+ vec_validate (l2, 13);
+ mask = l2;
+ if (l3)
+ {
+ vec_append (mask, l3);
+ vec_free (l3);
+ }
+ if (l4)
+ {
+ vec_append (mask, l4);
+ vec_free (l4);
+ }
+ }
+
+ /* Scan forward looking for the first significant mask octet */
+ for (i = 0; i < vec_len (mask); i++)
+ if (mask[i])
+ break;
+
+ /* compute (skip, match) params */
+ *skipp = i / sizeof(u32x4);
+ vec_delete (mask, *skipp * sizeof(u32x4), 0);
+
+ /* Pad mask to an even multiple of the vector size */
+ while (vec_len (mask) % sizeof (u32x4))
+ vec_add1 (mask, 0);
+
+ match = vec_len (mask) / sizeof (u32x4);
+
+ for (i = match*sizeof(u32x4); i > 0; i-= sizeof(u32x4))
+ {
+ u64 *tmp = (u64 *)(mask + (i-sizeof(u32x4)));
+ if (*tmp || *(tmp+1))
+ break;
+ match--;
+ }
+ if (match == 0)
+ clib_warning ("BUG: match 0");
+
+ _vec_len (mask) = match * sizeof(u32x4);
+
+ *matchp = match;
+ *maskp = mask;
+
+ return 1;
+ }
+
+ return 0;
+}
+
+#define foreach_l2_input_next \
+_(drop, DROP) \
+_(ethernet, ETHERNET_INPUT) \
+_(ip4, IP4_INPUT) \
+_(ip6, IP6_INPUT) \
+_(li, LI)
+
+uword unformat_l2_input_next_index (unformat_input_t * input, va_list * args)
+{
+ vnet_classify_main_t * cm = &vnet_classify_main;
+ u32 * miss_next_indexp = va_arg (*args, u32 *);
+ u32 next_index = 0;
+ u32 tmp;
+ int i;
+
+ /* First try registered unformat fns, allowing override... */
+ for (i = 0; i < vec_len (cm->unformat_l2_next_index_fns); i++)
+ {
+ if (unformat (input, "%U", cm->unformat_l2_next_index_fns[i], &tmp))
+ {
+ next_index = tmp;
+ goto out;
+ }
+ }
+
+#define _(n,N) \
+ if (unformat (input, #n)) { next_index = L2_INPUT_CLASSIFY_NEXT_##N; goto out;}
+ foreach_l2_input_next;
+#undef _
+
+ if (unformat (input, "%d", &tmp))
+ {
+ next_index = tmp;
+ goto out;
+ }
+
+ return 0;
+
+ out:
+ *miss_next_indexp = next_index;
+ return 1;
+}
+
+#define foreach_l2_output_next \
+_(drop, DROP)
+
+uword unformat_l2_output_next_index (unformat_input_t * input, va_list * args)
+{
+ vnet_classify_main_t * cm = &vnet_classify_main;
+ u32 * miss_next_indexp = va_arg (*args, u32 *);
+ u32 next_index = 0;
+ u32 tmp;
+ int i;
+
+ /* First try registered unformat fns, allowing override... */
+ for (i = 0; i < vec_len (cm->unformat_l2_next_index_fns); i++)
+ {
+ if (unformat (input, "%U", cm->unformat_l2_next_index_fns[i], &tmp))
+ {
+ next_index = tmp;
+ goto out;
+ }
+ }
+
+#define _(n,N) \
+ if (unformat (input, #n)) { next_index = L2_OUTPUT_CLASSIFY_NEXT_##N; goto out;}
+ foreach_l2_output_next;
+#undef _
+
+ if (unformat (input, "%d", &tmp))
+ {
+ next_index = tmp;
+ goto out;
+ }
+
+ return 0;
+
+ out:
+ *miss_next_indexp = next_index;
+ return 1;
+}
+
+#define foreach_ip_next \
+_(drop, DROP) \
+_(rewrite, REWRITE)
+
+uword unformat_ip_next_index (unformat_input_t * input, va_list * args)
+{
+ u32 * miss_next_indexp = va_arg (*args, u32 *);
+ vnet_classify_main_t * cm = &vnet_classify_main;
+ u32 next_index = 0;
+ u32 tmp;
+ int i;
+
+ /* First try registered unformat fns, allowing override... */
+ for (i = 0; i < vec_len (cm->unformat_ip_next_index_fns); i++)
+ {
+ if (unformat (input, "%U", cm->unformat_ip_next_index_fns[i], &tmp))
+ {
+ next_index = tmp;
+ goto out;
+ }
+ }
+
+#define _(n,N) \
+ if (unformat (input, #n)) { next_index = IP_LOOKUP_NEXT_##N; goto out;}
+ foreach_ip_next;
+#undef _
+
+ if (unformat (input, "%d", &tmp))
+ {
+ next_index = tmp;
+ goto out;
+ }
+
+ return 0;
+
+ out:
+ *miss_next_indexp = next_index;
+ return 1;
+}
+
+#define foreach_acl_next \
+_(deny, DENY)
+
+uword unformat_acl_next_index (unformat_input_t * input, va_list * args)
+{
+ u32 * next_indexp = va_arg (*args, u32 *);
+ vnet_classify_main_t * cm = &vnet_classify_main;
+ u32 next_index = 0;
+ u32 tmp;
+ int i;
+
+ /* First try registered unformat fns, allowing override... */
+ for (i = 0; i < vec_len (cm->unformat_acl_next_index_fns); i++)
+ {
+ if (unformat (input, "%U", cm->unformat_acl_next_index_fns[i], &tmp))
+ {
+ next_index = tmp;
+ goto out;
+ }
+ }
+
+#define _(n,N) \
+ if (unformat (input, #n)) { next_index = ACL_NEXT_INDEX_##N; goto out;}
+ foreach_acl_next;
+#undef _
+
+ if (unformat (input, "permit"))
+ {
+ next_index = ~0;
+ goto out;
+ }
+ else if (unformat (input, "%d", &tmp))
+ {
+ next_index = tmp;
+ goto out;
+ }
+
+ return 0;
+
+ out:
+ *next_indexp = next_index;
+ return 1;
+}
+
+uword unformat_policer_next_index (unformat_input_t * input, va_list * args)
+{
+ u32 * next_indexp = va_arg (*args, u32 *);
+ vnet_classify_main_t * cm = &vnet_classify_main;
+ u32 next_index = 0;
+ u32 tmp;
+ int i;
+
+ /* First try registered unformat fns, allowing override... */
+ for (i = 0; i < vec_len (cm->unformat_policer_next_index_fns); i++)
+ {
+ if (unformat (input, "%U", cm->unformat_policer_next_index_fns[i], &tmp))
+ {
+ next_index = tmp;
+ goto out;
+ }
+ }
+
+ if (unformat (input, "%d", &tmp))
+ {
+ next_index = tmp;
+ goto out;
+ }
+
+ return 0;
+
+ out:
+ *next_indexp = next_index;
+ return 1;
+}
+
+static clib_error_t *
+classify_table_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ u32 nbuckets = 2;
+ u32 skip = ~0;
+ u32 match = ~0;
+ int is_add = 1;
+ int del_chain = 0;
+ u32 table_index = ~0;
+ u32 next_table_index = ~0;
+ u32 miss_next_index = ~0;
+ u32 memory_size = 2<<20;
+ u32 tmp;
+ u32 current_data_flag = 0;
+ int current_data_offset = 0;
+
+ u8 * mask = 0;
+ vnet_classify_main_t * cm = &vnet_classify_main;
+ int rv;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) {
+ if (unformat (input, "del"))
+ is_add = 0;
+ else if (unformat (input, "del-chain"))
+ {
+ is_add = 0;
+ del_chain = 1;
+ }
+ else if (unformat (input, "buckets %d", &nbuckets))
+ ;
+ else if (unformat (input, "skip %d", &skip))
+ ;
+ else if (unformat (input, "match %d", &match))
+ ;
+ else if (unformat (input, "table %d", &table_index))
+ ;
+ else if (unformat (input, "mask %U", unformat_classify_mask,
+ cm, &mask, &skip, &match))
+ ;
+ else if (unformat (input, "memory-size %uM", &tmp))
+ memory_size = tmp<<20;
+ else if (unformat (input, "memory-size %uG", &tmp))
+ memory_size = tmp<<30;
+ else if (unformat (input, "next-table %d", &next_table_index))
+ ;
+ else if (unformat (input, "miss-next %U", unformat_ip_next_index,
+ &miss_next_index))
+ ;
+ else if (unformat (input, "l2-input-miss-next %U", unformat_l2_input_next_index,
+ &miss_next_index))
+ ;
+ else if (unformat (input, "l2-output-miss-next %U", unformat_l2_output_next_index,
+ &miss_next_index))
+ ;
+ else if (unformat (input, "acl-miss-next %U", unformat_acl_next_index,
+ &miss_next_index))
+ ;
+ else if (unformat (input, "current-data-flag %d", &current_data_flag))
+ ;
+ else if (unformat (input, "current-data-offset %d", &current_data_offset))
+ ;
+
+ else
+ break;
+ }
+
+ if (is_add && mask == 0 && table_index == ~0)
+ return clib_error_return (0, "Mask required");
+
+ if (is_add && skip == ~0 && table_index == ~0)
+ return clib_error_return (0, "skip count required");
+
+ if (is_add && match == ~0 && table_index == ~0)
+ return clib_error_return (0, "match count required");
+
+ if (!is_add && table_index == ~0)
+ return clib_error_return (0, "table index required for delete");
+
+ rv = vnet_classify_add_del_table (cm, mask, nbuckets, memory_size,
+ skip, match, next_table_index, miss_next_index, &table_index,
+ current_data_flag, current_data_offset, is_add, del_chain);
+ switch (rv)
+ {
+ case 0:
+ break;
+
+ default:
+ return clib_error_return (0, "vnet_classify_add_del_table returned %d",
+ rv);
+ }
+ return 0;
+}
+
+VLIB_CLI_COMMAND (classify_table, static) = {
+ .path = "classify table",
+ .short_help =
+ "classify table [miss-next|l2-miss_next|acl-miss-next <next_index>]"
+ "\n mask <mask-value> buckets <nn> [skip <n>] [match <n>]"
+ "\n [current-data-flag <n>] [current-data-offset <n>] [table <n>]"
+ "\n [del] [del-chain]",
+ .function = classify_table_command_fn,
+};
+
+static u8 * format_vnet_classify_table (u8 * s, va_list * args)
+{
+ vnet_classify_main_t * cm = va_arg (*args, vnet_classify_main_t *);
+ int verbose = va_arg (*args, int);
+ u32 index = va_arg (*args, u32);
+ vnet_classify_table_t * t;
+
+ if (index == ~0)
+ {
+ s = format (s, "%10s%10s%10s%10s", "TableIdx", "Sessions", "NextTbl",
+ "NextNode", verbose ? "Details" : "");
+ return s;
+ }
+
+ t = pool_elt_at_index (cm->tables, index);
+ s = format (s, "%10u%10d%10d%10d", index, t->active_elements,
+ t->next_table_index, t->miss_next_index);
+
+ s = format (s, "\n Heap: %U", format_mheap, t->mheap, 0 /*verbose*/);
+
+ s = format (s, "\n nbuckets %d, skip %d match %d flag %d offset %d",
+ t->nbuckets, t->skip_n_vectors, t->match_n_vectors,
+ t->current_data_flag, t->current_data_offset);
+ s = format (s, "\n mask %U", format_hex_bytes, t->mask,
+ t->match_n_vectors * sizeof (u32x4));
+
+ if (verbose == 0)
+ return s;
+
+ s = format (s, "\n%U", format_classify_table, t, verbose);
+
+ return s;
+}
+
+static clib_error_t *
+show_classify_tables_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ vnet_classify_main_t * cm = &vnet_classify_main;
+ vnet_classify_table_t * t;
+ u32 match_index = ~0;
+ u32 * indices = 0;
+ int verbose = 0;
+ int i;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "index %d", &match_index))
+ ;
+ else if (unformat (input, "verbose %d", &verbose))
+ ;
+ else if (unformat (input, "verbose"))
+ verbose = 1;
+ else
+ break;
+ }
+
+ pool_foreach (t, cm->tables,
+ ({
+ if (match_index == ~0 || (match_index == t - cm->tables))
+ vec_add1 (indices, t - cm->tables);
+ }));
+
+ if (vec_len(indices))
+ {
+ vlib_cli_output (vm, "%U", format_vnet_classify_table, cm, verbose,
+ ~0 /* hdr */);
+ for (i = 0; i < vec_len (indices); i++)
+ vlib_cli_output (vm, "%U", format_vnet_classify_table, cm,
+ verbose, indices[i]);
+ }
+ else
+ vlib_cli_output (vm, "No classifier tables configured");
+
+ vec_free (indices);
+
+ return 0;
+}
+
+VLIB_CLI_COMMAND (show_classify_table_command, static) = {
+ .path = "show classify tables",
+ .short_help = "show classify tables [index <nn>]",
+ .function = show_classify_tables_command_fn,
+};
+
+uword unformat_l4_match (unformat_input_t * input, va_list * args)
+{
+ u8 ** matchp = va_arg (*args, u8 **);
+
+ u8 * proto_header = 0;
+ int src_port = 0;
+ int dst_port = 0;
+
+ tcpudp_header_t h;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "src_port %d", &src_port))
+ ;
+ else if (unformat (input, "dst_port %d", &dst_port))
+ ;
+ else
+ return 0;
+ }
+
+ h.src_port = clib_host_to_net_u16(src_port);
+ h.dst_port = clib_host_to_net_u16(dst_port);
+ vec_validate(proto_header, sizeof(h)-1);
+ memcpy(proto_header, &h, sizeof(h));
+
+ *matchp = proto_header;
+
+ return 1;
+}
+
+uword unformat_ip4_match (unformat_input_t * input, va_list * args)
+{
+ u8 ** matchp = va_arg (*args, u8 **);
+ u8 * match = 0;
+ ip4_header_t * ip;
+ int version = 0;
+ u32 version_val;
+ int hdr_length = 0;
+ u32 hdr_length_val;
+ int src = 0, dst = 0;
+ ip4_address_t src_val, dst_val;
+ int proto = 0;
+ u32 proto_val;
+ int tos = 0;
+ u32 tos_val;
+ int length = 0;
+ u32 length_val;
+ int fragment_id = 0;
+ u32 fragment_id_val;
+ int ttl = 0;
+ int ttl_val;
+ int checksum = 0;
+ u32 checksum_val;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "version %d", &version_val))
+ version = 1;
+ else if (unformat (input, "hdr_length %d", &hdr_length_val))
+ hdr_length = 1;
+ else if (unformat (input, "src %U", unformat_ip4_address, &src_val))
+ src = 1;
+ else if (unformat (input, "dst %U", unformat_ip4_address, &dst_val))
+ dst = 1;
+ else if (unformat (input, "proto %d", &proto_val))
+ proto = 1;
+ else if (unformat (input, "tos %d", &tos_val))
+ tos = 1;
+ else if (unformat (input, "length %d", &length_val))
+ length = 1;
+ else if (unformat (input, "fragment_id %d", &fragment_id_val))
+ fragment_id = 1;
+ else if (unformat (input, "ttl %d", &ttl_val))
+ ttl = 1;
+ else if (unformat (input, "checksum %d", &checksum_val))
+ checksum = 1;
+ else
+ break;
+ }
+
+ if (version + hdr_length + src + dst + proto + tos + length + fragment_id
+ + ttl + checksum == 0)
+ return 0;
+
+ /*
+ * Aligned because we use the real comparison functions
+ */
+ vec_validate_aligned (match, sizeof (*ip) - 1, sizeof(u32x4));
+
+ ip = (ip4_header_t *) match;
+
+ /* These are realistically matched in practice */
+ if (src)
+ ip->src_address.as_u32 = src_val.as_u32;
+
+ if (dst)
+ ip->dst_address.as_u32 = dst_val.as_u32;
+
+ if (proto)
+ ip->protocol = proto_val;
+
+
+ /* These are not, but they're included for completeness */
+ if (version)
+ ip->ip_version_and_header_length |= (version_val & 0xF)<<4;
+
+ if (hdr_length)
+ ip->ip_version_and_header_length |= (hdr_length_val & 0xF);
+
+ if (tos)
+ ip->tos = tos_val;
+
+ if (length)
+ ip->length = clib_host_to_net_u16 (length_val);
+
+ if (ttl)
+ ip->ttl = ttl_val;
+
+ if (checksum)
+ ip->checksum = clib_host_to_net_u16 (checksum_val);
+
+ *matchp = match;
+ return 1;
+}
+
+uword unformat_ip6_match (unformat_input_t * input, va_list * args)
+{
+ u8 ** matchp = va_arg (*args, u8 **);
+ u8 * match = 0;
+ ip6_header_t * ip;
+ int version = 0;
+ u32 version_val;
+ u8 traffic_class = 0;
+ u32 traffic_class_val;
+ u8 flow_label = 0;
+ u8 flow_label_val;
+ int src = 0, dst = 0;
+ ip6_address_t src_val, dst_val;
+ int proto = 0;
+ u32 proto_val;
+ int payload_length = 0;
+ u32 payload_length_val;
+ int hop_limit = 0;
+ int hop_limit_val;
+ u32 ip_version_traffic_class_and_flow_label;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "version %d", &version_val))
+ version = 1;
+ else if (unformat (input, "traffic_class %d", &traffic_class_val))
+ traffic_class = 1;
+ else if (unformat (input, "flow_label %d", &flow_label_val))
+ flow_label = 1;
+ else if (unformat (input, "src %U", unformat_ip6_address, &src_val))
+ src = 1;
+ else if (unformat (input, "dst %U", unformat_ip6_address, &dst_val))
+ dst = 1;
+ else if (unformat (input, "proto %d", &proto_val))
+ proto = 1;
+ else if (unformat (input, "payload_length %d", &payload_length_val))
+ payload_length = 1;
+ else if (unformat (input, "hop_limit %d", &hop_limit_val))
+ hop_limit = 1;
+ else
+ break;
+ }
+
+ if (version + traffic_class + flow_label + src + dst + proto +
+ payload_length + hop_limit == 0)
+ return 0;
+
+ /*
+ * Aligned because we use the real comparison functions
+ */
+ vec_validate_aligned (match, sizeof (*ip) - 1, sizeof(u32x4));
+
+ ip = (ip6_header_t *) match;
+
+ if (src)
+ clib_memcpy (&ip->src_address, &src_val, sizeof (ip->src_address));
+
+ if (dst)
+ clib_memcpy (&ip->dst_address, &dst_val, sizeof (ip->dst_address));
+
+ if (proto)
+ ip->protocol = proto_val;
+
+ ip_version_traffic_class_and_flow_label = 0;
+
+ if (version)
+ ip_version_traffic_class_and_flow_label |= (version_val & 0xF) << 28;
+
+ if (traffic_class)
+ ip_version_traffic_class_and_flow_label |= (traffic_class_val & 0xFF) << 20;
+
+ if (flow_label)
+ ip_version_traffic_class_and_flow_label |= (flow_label_val & 0xFFFFF);
+
+ ip->ip_version_traffic_class_and_flow_label =
+ clib_host_to_net_u32 (ip_version_traffic_class_and_flow_label);
+
+ if (payload_length)
+ ip->payload_length = clib_host_to_net_u16 (payload_length_val);
+
+ if (hop_limit)
+ ip->hop_limit = hop_limit_val;
+
+ *matchp = match;
+ return 1;
+}
+
+uword unformat_l3_match (unformat_input_t * input, va_list * args)
+{
+ u8 ** matchp = va_arg (*args, u8 **);
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) {
+ if (unformat (input, "ip4 %U", unformat_ip4_match, matchp))
+ return 1;
+ else if (unformat (input, "ip6 %U", unformat_ip6_match, matchp))
+ return 1;
+ /* $$$$ add mpls */
+ else
+ break;
+ }
+ return 0;
+}
+
+uword unformat_vlan_tag (unformat_input_t * input, va_list * args)
+{
+ u8 * tagp = va_arg (*args, u8 *);
+ u32 tag;
+
+ if (unformat(input, "%d", &tag))
+ {
+ tagp[0] = (tag>>8) & 0x0F;
+ tagp[1] = tag & 0xFF;
+ return 1;
+ }
+
+ return 0;
+}
+
+uword unformat_l2_match (unformat_input_t * input, va_list * args)
+{
+ u8 ** matchp = va_arg (*args, u8 **);
+ u8 * match = 0;
+ u8 src = 0;
+ u8 src_val[6];
+ u8 dst = 0;
+ u8 dst_val[6];
+ u8 proto = 0;
+ u16 proto_val;
+ u8 tag1 = 0;
+ u8 tag1_val [2];
+ u8 tag2 = 0;
+ u8 tag2_val [2];
+ int len = 14;
+ u8 ignore_tag1 = 0;
+ u8 ignore_tag2 = 0;
+ u8 cos1 = 0;
+ u8 cos2 = 0;
+ u32 cos1_val = 0;
+ u32 cos2_val = 0;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) {
+ if (unformat (input, "src %U", unformat_ethernet_address, &src_val))
+ src = 1;
+ else if (unformat (input, "dst %U", unformat_ethernet_address, &dst_val))
+ dst = 1;
+ else if (unformat (input, "proto %U",
+ unformat_ethernet_type_host_byte_order, &proto_val))
+ proto = 1;
+ else if (unformat (input, "tag1 %U", unformat_vlan_tag, tag1_val))
+ tag1 = 1;
+ else if (unformat (input, "tag2 %U", unformat_vlan_tag, tag2_val))
+ tag2 = 1;
+ else if (unformat (input, "ignore-tag1"))
+ ignore_tag1 = 1;
+ else if (unformat (input, "ignore-tag2"))
+ ignore_tag2 = 1;
+ else if (unformat (input, "cos1 %d", &cos1_val))
+ cos1 = 1;
+ else if (unformat (input, "cos2 %d", &cos2_val))
+ cos2 = 1;
+ else
+ break;
+ }
+ if ((src + dst + proto + tag1 + tag2 +
+ ignore_tag1 + ignore_tag2 + cos1 + cos2) == 0)
+ return 0;
+
+ if (tag1 || ignore_tag1 || cos1)
+ len = 18;
+ if (tag2 || ignore_tag2 || cos2)
+ len = 22;
+
+ vec_validate_aligned (match, len-1, sizeof(u32x4));
+
+ if (dst)
+ clib_memcpy (match, dst_val, 6);
+
+ if (src)
+ clib_memcpy (match + 6, src_val, 6);
+
+ if (tag2)
+ {
+ /* inner vlan tag */
+ match[19] = tag2_val[1];
+ match[18] = tag2_val[0];
+ if (cos2)
+ match [18] |= (cos2_val & 0x7) << 5;
+ if (proto)
+ {
+ match[21] = proto_val & 0xff;
+ match[20] = proto_val >> 8;
+ }
+ if (tag1)
+ {
+ match [15] = tag1_val[1];
+ match [14] = tag1_val[0];
+ }
+ if (cos1)
+ match [14] |= (cos1_val & 0x7) << 5;
+ *matchp = match;
+ return 1;
+ }
+ if (tag1)
+ {
+ match [15] = tag1_val[1];
+ match [14] = tag1_val[0];
+ if (proto)
+ {
+ match[17] = proto_val & 0xff;
+ match[16] = proto_val >> 8;
+ }
+ if (cos1)
+ match [14] |= (cos1_val & 0x7) << 5;
+
+ *matchp = match;
+ return 1;
+ }
+ if (cos2)
+ match [18] |= (cos2_val & 0x7) << 5;
+ if (cos1)
+ match [14] |= (cos1_val & 0x7) << 5;
+ if (proto)
+ {
+ match[13] = proto_val & 0xff;
+ match[12] = proto_val >> 8;
+ }
+
+ *matchp = match;
+ return 1;
+}
+
+
+uword unformat_classify_match (unformat_input_t * input, va_list * args)
+{
+ vnet_classify_main_t * cm = va_arg (*args, vnet_classify_main_t *);
+ u8 ** matchp = va_arg (*args, u8 **);
+ u32 table_index = va_arg (*args, u32);
+ vnet_classify_table_t * t;
+
+ u8 * match = 0;
+ u8 * l2 = 0;
+ u8 * l3 = 0;
+ u8 * l4 = 0;
+
+ if (pool_is_free_index (cm->tables, table_index))
+ return 0;
+
+ t = pool_elt_at_index (cm->tables, table_index);
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) {
+ if (unformat (input, "hex %U", unformat_hex_string, &match))
+ ;
+ else if (unformat (input, "l2 %U", unformat_l2_match, &l2))
+ ;
+ else if (unformat (input, "l3 %U", unformat_l3_match, &l3))
+ ;
+ else if (unformat (input, "l4 %U", unformat_l4_match, &l4))
+ ;
+ else
+ break;
+ }
+
+ if (l4 && !l3) {
+ vec_free (match);
+ vec_free (l2);
+ vec_free (l4);
+ return 0;
+ }
+
+ if (match || l2 || l3 || l4)
+ {
+ if (l2 || l3 || l4)
+ {
+ /* "Win a free Ethernet header in every packet" */
+ if (l2 == 0)
+ vec_validate_aligned (l2, 13, sizeof(u32x4));
+ match = l2;
+ if (l3)
+ {
+ vec_append_aligned (match, l3, sizeof(u32x4));
+ vec_free (l3);
+ }
+ if (l4)
+ {
+ vec_append_aligned (match, l4, sizeof(u32x4));
+ vec_free (l4);
+ }
+ }
+
+ /* Make sure the vector is big enough even if key is all 0's */
+ vec_validate_aligned
+ (match, ((t->match_n_vectors + t->skip_n_vectors) * sizeof(u32x4)) - 1,
+ sizeof(u32x4));
+
+ /* Set size, include skipped vectors*/
+ _vec_len (match) = (t->match_n_vectors+t->skip_n_vectors) * sizeof(u32x4);
+
+ *matchp = match;
+
+ return 1;
+ }
+
+ return 0;
+}
+
+int vnet_classify_add_del_session (vnet_classify_main_t * cm,
+ u32 table_index,
+ u8 * match,
+ u32 hit_next_index,
+ u32 opaque_index,
+ i32 advance,
+ u8 action,
+ u32 metadata,
+ int is_add)
+{
+ vnet_classify_table_t * t;
+ vnet_classify_entry_5_t _max_e __attribute__((aligned (16)));
+ vnet_classify_entry_t * e;
+ int i, rv;
+
+ if (pool_is_free_index (cm->tables, table_index))
+ return VNET_API_ERROR_NO_SUCH_TABLE;
+
+ t = pool_elt_at_index (cm->tables, table_index);
+
+ e = (vnet_classify_entry_t *)&_max_e;
+ e->next_index = hit_next_index;
+ e->opaque_index = opaque_index;
+ e->advance = advance;
+ e->hits = 0;
+ e->last_heard = 0;
+ e->flags = 0;
+ e->action = action;
+ if (e->action == CLASSIFY_ACTION_SET_IP4_FIB_INDEX)
+ e->metadata = fib_table_find_or_create_and_lock (FIB_PROTOCOL_IP4, metadata);
+ else if (e->action == CLASSIFY_ACTION_SET_IP6_FIB_INDEX)
+ e->metadata = fib_table_find_or_create_and_lock (FIB_PROTOCOL_IP6, metadata);
+
+ /* Copy key data, honoring skip_n_vectors */
+ clib_memcpy (&e->key, match + t->skip_n_vectors * sizeof (u32x4),
+ t->match_n_vectors * sizeof (u32x4));
+
+ /* Clear don't-care bits; likely when dynamically creating sessions */
+ for (i = 0; i < t->match_n_vectors; i++)
+ e->key[i] &= t->mask[i];
+
+ rv = vnet_classify_add_del (t, e, is_add);
+ if (rv)
+ return VNET_API_ERROR_NO_SUCH_ENTRY;
+ return 0;
+}
+
+static clib_error_t *
+classify_session_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ vnet_classify_main_t * cm = &vnet_classify_main;
+ int is_add = 1;
+ u32 table_index = ~0;
+ u32 hit_next_index = ~0;
+ u64 opaque_index = ~0;
+ u8 * match = 0;
+ i32 advance = 0;
+ u32 action = 0;
+ u32 metadata = 0;
+ int i, rv;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "del"))
+ is_add = 0;
+ else if (unformat (input, "hit-next %U", unformat_ip_next_index,
+ &hit_next_index))
+ ;
+ else if (unformat (input, "l2-input-hit-next %U", unformat_l2_input_next_index,
+ &hit_next_index))
+ ;
+ else if (unformat (input, "l2-output-hit-next %U", unformat_l2_output_next_index,
+ &hit_next_index))
+ ;
+ else if (unformat (input, "acl-hit-next %U", unformat_acl_next_index,
+ &hit_next_index))
+ ;
+ else if (unformat (input, "policer-hit-next %U",
+ unformat_policer_next_index, &hit_next_index))
+ ;
+ else if (unformat (input, "opaque-index %lld", &opaque_index))
+ ;
+ else if (unformat (input, "match %U", unformat_classify_match,
+ cm, &match, table_index))
+ ;
+ else if (unformat (input, "advance %d", &advance))
+ ;
+ else if (unformat (input, "table-index %d", &table_index))
+ ;
+ else if (unformat (input, "action set-ip4-fib-id %d", &metadata))
+ action = 1;
+ else if (unformat (input, "action set-ip6-fib-id %d", &metadata))
+ action = 2;
+ else
+ {
+ /* Try registered opaque-index unformat fns */
+ for (i = 0; i < vec_len (cm->unformat_opaque_index_fns); i++)
+ {
+ if (unformat (input, "%U", cm->unformat_opaque_index_fns[i],
+ &opaque_index))
+ goto found_opaque;
+ }
+ break;
+ }
+ found_opaque:
+ ;
+ }
+
+ if (table_index == ~0)
+ return clib_error_return (0, "Table index required");
+
+ if (is_add && match == 0)
+ return clib_error_return (0, "Match value required");
+
+ rv = vnet_classify_add_del_session (cm, table_index, match,
+ hit_next_index,
+ opaque_index, advance,
+ action, metadata, is_add);
+
+ switch(rv)
+ {
+ case 0:
+ break;
+
+ default:
+ return clib_error_return (0, "vnet_classify_add_del_session returned %d",
+ rv);
+ }
+
+ return 0;
+}
+
+VLIB_CLI_COMMAND (classify_session_command, static) = {
+ .path = "classify session",
+ .short_help =
+ "classify session [hit-next|l2-hit-next|"
+ "acl-hit-next <next_index>|policer-hit-next <policer_name>]"
+ "\n table-index <nn> match [hex] [l2] [l3 ip4] [opaque-index <index>]"
+ "\n [action set-ip4-fib-id <n>] [action set-ip6-fib-id <n>] [del]",
+ .function = classify_session_command_fn,
+};
+
+static uword
+unformat_opaque_sw_if_index (unformat_input_t * input, va_list * args)
+{
+ u64 * opaquep = va_arg (*args, u64 *);
+ u32 sw_if_index;
+
+ if (unformat (input, "opaque-sw_if_index %U", unformat_vnet_sw_interface,
+ vnet_get_main(), &sw_if_index))
+ {
+ *opaquep = sw_if_index;
+ return 1;
+ }
+ return 0;
+}
+
+static uword
+unformat_ip_next_node (unformat_input_t * input, va_list * args)
+{
+ vnet_classify_main_t * cm = &vnet_classify_main;
+ u32 * next_indexp = va_arg (*args, u32 *);
+ u32 node_index;
+ u32 next_index = ~0;
+
+ if (unformat (input, "ip6-node %U", unformat_vlib_node,
+ cm->vlib_main, &node_index))
+ {
+ next_index = vlib_node_add_next (cm->vlib_main,
+ ip6_classify_node.index, node_index);
+ }
+ else if (unformat (input, "ip4-node %U", unformat_vlib_node,
+ cm->vlib_main, &node_index))
+ {
+ next_index = vlib_node_add_next (cm->vlib_main,
+ ip4_classify_node.index, node_index);
+ }
+ else
+ return 0;
+
+ *next_indexp = next_index;
+ return 1;
+}
+
+static uword
+unformat_acl_next_node (unformat_input_t * input, va_list * args)
+{
+ vnet_classify_main_t * cm = &vnet_classify_main;
+ u32 * next_indexp = va_arg (*args, u32 *);
+ u32 node_index;
+ u32 next_index;
+
+ if (unformat (input, "ip6-node %U", unformat_vlib_node,
+ cm->vlib_main, &node_index))
+ {
+ next_index = vlib_node_add_next (cm->vlib_main,
+ ip6_inacl_node.index, node_index);
+ }
+ else if (unformat (input, "ip4-node %U", unformat_vlib_node,
+ cm->vlib_main, &node_index))
+ {
+ next_index = vlib_node_add_next (cm->vlib_main,
+ ip4_inacl_node.index, node_index);
+ }
+ else
+ return 0;
+
+ *next_indexp = next_index;
+ return 1;
+}
+
+static uword
+unformat_l2_input_next_node (unformat_input_t * input, va_list * args)
+{
+ vnet_classify_main_t * cm = &vnet_classify_main;
+ u32 * next_indexp = va_arg (*args, u32 *);
+ u32 node_index;
+ u32 next_index;
+
+ if (unformat (input, "input-node %U", unformat_vlib_node,
+ cm->vlib_main, &node_index))
+ {
+ next_index = vlib_node_add_next
+ (cm->vlib_main, l2_input_classify_node.index, node_index);
+
+ *next_indexp = next_index;
+ return 1;
+ }
+ return 0;
+}
+
+static uword
+unformat_l2_output_next_node (unformat_input_t * input, va_list * args)
+{
+ vnet_classify_main_t * cm = &vnet_classify_main;
+ u32 * next_indexp = va_arg (*args, u32 *);
+ u32 node_index;
+ u32 next_index;
+
+ if (unformat (input, "output-node %U", unformat_vlib_node,
+ cm->vlib_main, &node_index))
+ {
+ next_index = vlib_node_add_next
+ (cm->vlib_main, l2_output_classify_node.index, node_index);
+
+ *next_indexp = next_index;
+ return 1;
+ }
+ return 0;
+}
+
+static clib_error_t *
+vnet_classify_init (vlib_main_t * vm)
+{
+ vnet_classify_main_t * cm = &vnet_classify_main;
+
+ cm->vlib_main = vm;
+ cm->vnet_main = vnet_get_main();
+
+ vnet_classify_register_unformat_opaque_index_fn
+ (unformat_opaque_sw_if_index);
+
+ vnet_classify_register_unformat_ip_next_index_fn
+ (unformat_ip_next_node);
+
+ vnet_classify_register_unformat_l2_next_index_fn
+ (unformat_l2_input_next_node);
+
+ vnet_classify_register_unformat_l2_next_index_fn
+ (unformat_l2_output_next_node);
+
+ vnet_classify_register_unformat_acl_next_index_fn
+ (unformat_acl_next_node);
+
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (vnet_classify_init);
+
+#define TEST_CODE 1
+
+#if TEST_CODE > 0
+static clib_error_t *
+test_classify_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ u32 buckets = 2;
+ u32 sessions = 10;
+ int i, rv;
+ vnet_classify_table_t * t = 0;
+ classify_data_or_mask_t * mask;
+ classify_data_or_mask_t * data;
+ u8 *mp = 0, *dp = 0;
+ vnet_classify_main_t * cm = &vnet_classify_main;
+ vnet_classify_entry_t * e;
+ int is_add = 1;
+ u32 tmp;
+ u32 table_index = ~0;
+ ip4_address_t src;
+ u32 deleted = 0;
+ u32 memory_size = 64<<20;
+
+ /* Default starting address 1.0.0.10 */
+ src.as_u32 = clib_net_to_host_u32 (0x0100000A);
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) {
+ if (unformat (input, "sessions %d", &sessions))
+ ;
+ else if (unformat (input, "src %U", unformat_ip4_address, &src))
+ ;
+ else if (unformat (input, "buckets %d", &buckets))
+ ;
+ else if (unformat (input, "memory-size %uM", &tmp))
+ memory_size = tmp<<20;
+ else if (unformat (input, "memory-size %uG", &tmp))
+ memory_size = tmp<<30;
+ else if (unformat (input, "del"))
+ is_add = 0;
+ else if (unformat (input, "table %d", &table_index))
+ ;
+ else
+ break;
+ }
+
+ vec_validate_aligned (mp, 3 * sizeof(u32x4), sizeof(u32x4));
+ vec_validate_aligned (dp, 3 * sizeof(u32x4), sizeof(u32x4));
+
+ mask = (classify_data_or_mask_t *) mp;
+ data = (classify_data_or_mask_t *) dp;
+
+ data->ip.src_address.as_u32 = src.as_u32;
+
+ /* Mask on src address */
+ memset (&mask->ip.src_address, 0xff, 4);
+
+ buckets = 1<<max_log2(buckets);
+
+ if (table_index != ~0)
+ {
+ if (pool_is_free_index (cm->tables, table_index))
+ {
+ vlib_cli_output (vm, "No such table %d", table_index);
+ goto out;
+ }
+ t = pool_elt_at_index (cm->tables, table_index);
+ }
+
+ if (is_add)
+ {
+ if (t == 0)
+ {
+ t = vnet_classify_new_table (cm, (u8 *)mask, buckets,
+ memory_size,
+ 0 /* skip */,
+ 3 /* vectors to match */);
+ t->miss_next_index = IP_LOOKUP_NEXT_DROP;
+ vlib_cli_output (vm, "Create table %d", t - cm->tables);
+ }
+
+ vlib_cli_output (vm, "Add %d sessions to %d buckets...",
+ sessions, buckets);
+
+ for (i = 0; i < sessions; i++)
+ {
+ rv = vnet_classify_add_del_session (cm, t - cm->tables, (u8 *) data,
+ IP_LOOKUP_NEXT_DROP,
+ i+100 /* opaque_index */,
+ 0 /* advance */, 0, 0,
+ 1 /* is_add */);
+
+ if (rv != 0)
+ clib_warning ("add: returned %d", rv);
+
+ tmp = clib_net_to_host_u32 (data->ip.src_address.as_u32) + 1;
+ data->ip.src_address.as_u32 = clib_net_to_host_u32 (tmp);
+ }
+ goto out;
+ }
+
+ if (t == 0)
+ {
+ vlib_cli_output (vm, "Must specify table index to delete sessions");
+ goto out;
+ }
+
+ vlib_cli_output (vm, "Try to delete %d sessions...", sessions);
+
+ for (i = 0; i < sessions; i++)
+ {
+ u8 * key_minus_skip;
+ u64 hash;
+
+ hash = vnet_classify_hash_packet (t, (u8 *) data);
+
+ e = vnet_classify_find_entry (t, (u8 *) data, hash, 0 /* time_now */);
+ /* Previous delete, perhaps... */
+ if (e == 0)
+ continue;
+ ASSERT (e->opaque_index == (i+100));
+
+ key_minus_skip = (u8 *)e->key;
+ key_minus_skip -= t->skip_n_vectors * sizeof (u32x4);
+
+ rv = vnet_classify_add_del_session (cm, t - cm->tables, key_minus_skip,
+ IP_LOOKUP_NEXT_DROP,
+ i+100 /* opaque_index */,
+ 0 /* advance */, 0, 0,
+ 0 /* is_add */);
+ if (rv != 0)
+ clib_warning ("del: returned %d", rv);
+
+ tmp = clib_net_to_host_u32 (data->ip.src_address.as_u32) + 1;
+ data->ip.src_address.as_u32 = clib_net_to_host_u32 (tmp);
+ deleted++;
+ }
+
+ vlib_cli_output (vm, "Deleted %d sessions...", deleted);
+
+ out:
+ vec_free (mp);
+ vec_free (dp);
+
+ return 0;
+}
+
+VLIB_CLI_COMMAND (test_classify_command, static) = {
+ .path = "test classify",
+ .short_help =
+ "test classify [src <ip>] [sessions <nn>] [buckets <nn>] [table <nn>] [del]",
+ .function = test_classify_command_fn,
+};
+#endif /* TEST_CODE */
diff --git a/src/vnet/classify/vnet_classify.h b/src/vnet/classify/vnet_classify.h
new file mode 100644
index 00000000000..d0b896ed7d2
--- /dev/null
+++ b/src/vnet/classify/vnet_classify.h
@@ -0,0 +1,523 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef __included_vnet_classify_h__
+#define __included_vnet_classify_h__
+
+#include <stdarg.h>
+
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+#include <vnet/pg/pg.h>
+#include <vnet/ethernet/ethernet.h>
+#include <vnet/ethernet/packet.h>
+#include <vnet/ip/ip_packet.h>
+#include <vnet/ip/ip4_packet.h>
+#include <vnet/ip/ip6_packet.h>
+#include <vlib/cli.h>
+#include <vnet/l2/l2_input.h>
+#include <vnet/l2/feat_bitmap.h>
+#include <vnet/api_errno.h> /* for API error numbers */
+
+#include <vppinfra/error.h>
+#include <vppinfra/hash.h>
+#include <vppinfra/cache.h>
+#include <vppinfra/xxhash.h>
+
+extern vlib_node_registration_t ip4_classify_node;
+extern vlib_node_registration_t ip6_classify_node;
+
+#define CLASSIFY_TRACE 0
+
+#if !defined( __aarch64__) && !defined(__arm__)
+#define CLASSIFY_USE_SSE //Allow usage of SSE operations
+#endif
+
+#define U32X4_ALIGNED(p) PREDICT_TRUE((((intptr_t)p) & 0xf) == 0)
+
+/*
+ * Classify table option to process packets
+ * CLASSIFY_FLAG_USE_CURR_DATA:
+ * - classify packets starting from VPP node’s current data pointer
+ */
+#define CLASSIFY_FLAG_USE_CURR_DATA 1
+
+/*
+ * Classify session action
+ * CLASSIFY_ACTION_SET_IP4_FIB_INDEX:
+ * - Classified IP packets will be looked up
+ * from the specified ipv4 fib table
+ * CLASSIFY_ACTION_SET_IP6_FIB_INDEX:
+ * - Classified IP packets will be looked up
+ * from the specified ipv6 fib table
+ */
+#define CLASSIFY_ACTION_SET_IP4_FIB_INDEX 1
+#define CLASSIFY_ACTION_SET_IP6_FIB_INDEX 2
+
+struct _vnet_classify_main;
+typedef struct _vnet_classify_main vnet_classify_main_t;
+
+#define foreach_size_in_u32x4 \
+_(1) \
+_(2) \
+_(3) \
+_(4) \
+_(5)
+
+typedef CLIB_PACKED(struct _vnet_classify_entry {
+ /* Graph node next index */
+ u32 next_index;
+
+ /* put into vnet_buffer(b)->l2_classfy.opaque_index */
+ union {
+ struct {
+ u32 opaque_index;
+ /* advance on hit, note it's a signed quantity... */
+ i32 advance;
+ };
+ u64 opaque_count;
+ };
+
+ /* Really only need 1 bit */
+ u8 flags;
+#define VNET_CLASSIFY_ENTRY_FREE (1<<0)
+
+ u8 action;
+ u16 metadata;
+
+ /* Hit counter, last heard time */
+ union {
+ u64 hits;
+ struct _vnet_classify_entry * next_free;
+ };
+
+ f64 last_heard;
+
+ /* Must be aligned to a 16-octet boundary */
+ u32x4 key[0];
+}) vnet_classify_entry_t;
+
+static inline int vnet_classify_entry_is_free (vnet_classify_entry_t * e)
+{
+ return e->flags & VNET_CLASSIFY_ENTRY_FREE;
+}
+
+static inline int vnet_classify_entry_is_busy (vnet_classify_entry_t * e)
+{
+ return ((e->flags & VNET_CLASSIFY_ENTRY_FREE) == 0);
+}
+
+/* Need these to con the vector allocator */
+#define _(size) \
+typedef CLIB_PACKED(struct { \
+ u32 pad0[4]; \
+ u64 pad1[2]; \
+ u32x4 key[size]; \
+}) vnet_classify_entry_##size##_t;
+foreach_size_in_u32x4;
+#undef _
+
+typedef struct {
+ union {
+ struct {
+ u32 offset;
+ u8 pad[3];
+ u8 log2_pages;
+ };
+ u64 as_u64;
+ };
+} vnet_classify_bucket_t;
+
+typedef struct {
+ /* Mask to apply after skipping N vectors */
+ u32x4 *mask;
+ /* Buckets and entries */
+ vnet_classify_bucket_t * buckets;
+ vnet_classify_entry_t * entries;
+
+ /* Config parameters */
+ u32 match_n_vectors;
+ u32 skip_n_vectors;
+ u32 nbuckets;
+ u32 log2_nbuckets;
+ int entries_per_page;
+ u32 active_elements;
+ u32 current_data_flag;
+ int current_data_offset;
+ u32 data_offset;
+ /* Index of next table to try */
+ u32 next_table_index;
+
+ /* Miss next index, return if next_table_index = 0 */
+ u32 miss_next_index;
+
+ /* Per-bucket working copies, one per thread */
+ vnet_classify_entry_t ** working_copies;
+ vnet_classify_bucket_t saved_bucket;
+
+ /* Free entry freelists */
+ vnet_classify_entry_t **freelists;
+
+ u8 * name;
+
+ /* Private allocation arena, protected by the writer lock */
+ void * mheap;
+
+ /* Writer (only) lock for this table */
+ volatile u32 * writer_lock;
+
+} vnet_classify_table_t;
+
+struct _vnet_classify_main {
+ /* Table pool */
+ vnet_classify_table_t * tables;
+
+ /* Registered next-index, opaque unformat fcns */
+ unformat_function_t ** unformat_l2_next_index_fns;
+ unformat_function_t ** unformat_ip_next_index_fns;
+ unformat_function_t ** unformat_acl_next_index_fns;
+ unformat_function_t ** unformat_policer_next_index_fns;
+ unformat_function_t ** unformat_opaque_index_fns;
+
+ /* convenience variables */
+ vlib_main_t * vlib_main;
+ vnet_main_t * vnet_main;
+};
+
+extern vnet_classify_main_t vnet_classify_main;
+
+u8 * format_classify_table (u8 * s, va_list * args);
+
+u64 vnet_classify_hash_packet (vnet_classify_table_t * t, u8 * h);
+
+static inline u64
+vnet_classify_hash_packet_inline (vnet_classify_table_t * t,
+ u8 * h)
+{
+ u32x4 *mask;
+
+ union {
+ u32x4 as_u32x4;
+ u64 as_u64[2];
+ } xor_sum __attribute__((aligned(sizeof(u32x4))));
+
+ ASSERT(t);
+ mask = t->mask;
+#ifdef CLASSIFY_USE_SSE
+ if (U32X4_ALIGNED(h)) { //SSE can't handle unaligned data
+ u32x4 *data = (u32x4 *)h;
+ xor_sum.as_u32x4 = data[0 + t->skip_n_vectors] & mask[0];
+ switch (t->match_n_vectors)
+ {
+ case 5:
+ xor_sum.as_u32x4 ^= data[4 + t->skip_n_vectors] & mask[4];
+ /* FALLTHROUGH */
+ case 4:
+ xor_sum.as_u32x4 ^= data[3 + t->skip_n_vectors] & mask[3];
+ /* FALLTHROUGH */
+ case 3:
+ xor_sum.as_u32x4 ^= data[2 + t->skip_n_vectors] & mask[2];
+ /* FALLTHROUGH */
+ case 2:
+ xor_sum.as_u32x4 ^= data[1 + t->skip_n_vectors] & mask[1];
+ /* FALLTHROUGH */
+ case 1:
+ break;
+ default:
+ abort();
+ }
+ } else
+#endif /* CLASSIFY_USE_SSE */
+ {
+ u32 skip_u64 = t->skip_n_vectors * 2;
+ u64 *data64 = (u64 *)h;
+ xor_sum.as_u64[0] = data64[0 + skip_u64] & ((u64 *)mask)[0];
+ xor_sum.as_u64[1] = data64[1 + skip_u64] & ((u64 *)mask)[1];
+ switch (t->match_n_vectors)
+ {
+ case 5:
+ xor_sum.as_u64[0] ^= data64[8 + skip_u64] & ((u64 *)mask)[8];
+ xor_sum.as_u64[1] ^= data64[9 + skip_u64] & ((u64 *)mask)[9];
+ /* FALLTHROUGH */
+ case 4:
+ xor_sum.as_u64[0] ^= data64[6 + skip_u64] & ((u64 *)mask)[6];
+ xor_sum.as_u64[1] ^= data64[7 + skip_u64] & ((u64 *)mask)[7];
+ /* FALLTHROUGH */
+ case 3:
+ xor_sum.as_u64[0] ^= data64[4 + skip_u64] & ((u64 *)mask)[4];
+ xor_sum.as_u64[1] ^= data64[5 + skip_u64] & ((u64 *)mask)[5];
+ /* FALLTHROUGH */
+ case 2:
+ xor_sum.as_u64[0] ^= data64[2 + skip_u64] & ((u64 *)mask)[2];
+ xor_sum.as_u64[1] ^= data64[3 + skip_u64] & ((u64 *)mask)[3];
+ /* FALLTHROUGH */
+ case 1:
+ break;
+
+ default:
+ abort();
+ }
+ }
+
+ return clib_xxhash (xor_sum.as_u64[0] ^ xor_sum.as_u64[1]);
+}
+
+static inline void
+vnet_classify_prefetch_bucket (vnet_classify_table_t * t, u64 hash)
+{
+ u32 bucket_index;
+
+ ASSERT (is_pow2(t->nbuckets));
+
+ bucket_index = hash & (t->nbuckets - 1);
+
+ CLIB_PREFETCH(&t->buckets[bucket_index], CLIB_CACHE_LINE_BYTES, LOAD);
+}
+
+static inline vnet_classify_entry_t *
+vnet_classify_get_entry (vnet_classify_table_t * t, uword offset)
+{
+ u8 * hp = t->mheap;
+ u8 * vp = hp + offset;
+
+ return (void *) vp;
+}
+
+static inline uword vnet_classify_get_offset (vnet_classify_table_t * t,
+ vnet_classify_entry_t * v)
+{
+ u8 * hp, * vp;
+
+ hp = (u8 *) t->mheap;
+ vp = (u8 *) v;
+
+ ASSERT((vp - hp) < 0x100000000ULL);
+ return vp - hp;
+}
+
+static inline vnet_classify_entry_t *
+vnet_classify_entry_at_index (vnet_classify_table_t * t,
+ vnet_classify_entry_t * e,
+ u32 index)
+{
+ u8 * eu8;
+
+ eu8 = (u8 *)e;
+
+ eu8 += index * (sizeof (vnet_classify_entry_t) +
+ (t->match_n_vectors * sizeof (u32x4)));
+
+ return (vnet_classify_entry_t *) eu8;
+}
+
+static inline void
+vnet_classify_prefetch_entry (vnet_classify_table_t * t,
+ u64 hash)
+{
+ u32 bucket_index;
+ u32 value_index;
+ vnet_classify_bucket_t * b;
+ vnet_classify_entry_t * e;
+
+ bucket_index = hash & (t->nbuckets - 1);
+
+ b = &t->buckets[bucket_index];
+
+ if (b->offset == 0)
+ return;
+
+ hash >>= t->log2_nbuckets;
+
+ e = vnet_classify_get_entry (t, b->offset);
+ value_index = hash & ((1<<b->log2_pages)-1);
+
+ e = vnet_classify_entry_at_index (t, e, value_index);
+
+ CLIB_PREFETCH(e, CLIB_CACHE_LINE_BYTES, LOAD);
+}
+
+vnet_classify_entry_t *
+vnet_classify_find_entry (vnet_classify_table_t * t,
+ u8 * h, u64 hash, f64 now);
+
+static inline vnet_classify_entry_t *
+vnet_classify_find_entry_inline (vnet_classify_table_t * t,
+ u8 * h, u64 hash, f64 now)
+ {
+ vnet_classify_entry_t * v;
+ u32x4 *mask, *key;
+ union {
+ u32x4 as_u32x4;
+ u64 as_u64[2];
+ } result __attribute__((aligned(sizeof(u32x4))));
+ vnet_classify_bucket_t * b;
+ u32 value_index;
+ u32 bucket_index;
+ int i;
+
+ bucket_index = hash & (t->nbuckets-1);
+ b = &t->buckets[bucket_index];
+ mask = t->mask;
+
+ if (b->offset == 0)
+ return 0;
+
+ hash >>= t->log2_nbuckets;
+
+ v = vnet_classify_get_entry (t, b->offset);
+ value_index = hash & ((1<<b->log2_pages)-1);
+ v = vnet_classify_entry_at_index (t, v, value_index);
+
+#ifdef CLASSIFY_USE_SSE
+ if (U32X4_ALIGNED(h)) {
+ u32x4 *data = (u32x4 *) h;
+ for (i = 0; i < t->entries_per_page; i++) {
+ key = v->key;
+ result.as_u32x4 = (data[0 + t->skip_n_vectors] & mask[0]) ^ key[0];
+ switch (t->match_n_vectors)
+ {
+ case 5:
+ result.as_u32x4 |= (data[4 + t->skip_n_vectors] & mask[4]) ^ key[4];
+ /* FALLTHROUGH */
+ case 4:
+ result.as_u32x4 |= (data[3 + t->skip_n_vectors] & mask[3]) ^ key[3];
+ /* FALLTHROUGH */
+ case 3:
+ result.as_u32x4 |= (data[2 + t->skip_n_vectors] & mask[2]) ^ key[2];
+ /* FALLTHROUGH */
+ case 2:
+ result.as_u32x4 |= (data[1 + t->skip_n_vectors] & mask[1]) ^ key[1];
+ /* FALLTHROUGH */
+ case 1:
+ break;
+ default:
+ abort();
+ }
+
+ if (u32x4_zero_byte_mask (result.as_u32x4) == 0xffff) {
+ if (PREDICT_TRUE(now)) {
+ v->hits++;
+ v->last_heard = now;
+ }
+ return (v);
+ }
+ v = vnet_classify_entry_at_index (t, v, 1);
+ }
+ } else
+#endif /* CLASSIFY_USE_SSE */
+ {
+ u32 skip_u64 = t->skip_n_vectors * 2;
+ u64 *data64 = (u64 *)h;
+ for (i = 0; i < t->entries_per_page; i++) {
+ key = v->key;
+
+ result.as_u64[0] = (data64[0 + skip_u64] & ((u64 *)mask)[0]) ^ ((u64 *)key)[0];
+ result.as_u64[1] = (data64[1 + skip_u64] & ((u64 *)mask)[1]) ^ ((u64 *)key)[1];
+ switch (t->match_n_vectors)
+ {
+ case 5:
+ result.as_u64[0] |= (data64[8 + skip_u64] & ((u64 *)mask)[8]) ^ ((u64 *)key)[8];
+ result.as_u64[1] |= (data64[9 + skip_u64] & ((u64 *)mask)[9]) ^ ((u64 *)key)[9];
+ /* FALLTHROUGH */
+ case 4:
+ result.as_u64[0] |= (data64[6 + skip_u64] & ((u64 *)mask)[6]) ^ ((u64 *)key)[6];
+ result.as_u64[1] |= (data64[7 + skip_u64] & ((u64 *)mask)[7]) ^ ((u64 *)key)[7];
+ /* FALLTHROUGH */
+ case 3:
+ result.as_u64[0] |= (data64[4 + skip_u64] & ((u64 *)mask)[4]) ^ ((u64 *)key)[4];
+ result.as_u64[1] |= (data64[5 + skip_u64] & ((u64 *)mask)[5]) ^ ((u64 *)key)[5];
+ /* FALLTHROUGH */
+ case 2:
+ result.as_u64[0] |= (data64[2 + skip_u64] & ((u64 *)mask)[2]) ^ ((u64 *)key)[2];
+ result.as_u64[1] |= (data64[3 + skip_u64] & ((u64 *)mask)[3]) ^ ((u64 *)key)[3];
+ /* FALLTHROUGH */
+ case 1:
+ break;
+ default:
+ abort();
+ }
+
+ if (result.as_u64[0] == 0 && result.as_u64[1] == 0) {
+ if (PREDICT_TRUE(now)) {
+ v->hits++;
+ v->last_heard = now;
+ }
+ return (v);
+ }
+
+ v = vnet_classify_entry_at_index (t, v, 1);
+ }
+ }
+ return 0;
+ }
+
+vnet_classify_table_t *
+vnet_classify_new_table (vnet_classify_main_t *cm,
+ u8 * mask, u32 nbuckets, u32 memory_size,
+ u32 skip_n_vectors,
+ u32 match_n_vectors);
+
+int vnet_classify_add_del_session (vnet_classify_main_t * cm,
+ u32 table_index,
+ u8 * match,
+ u32 hit_next_index,
+ u32 opaque_index,
+ i32 advance,
+ u8 action,
+ u32 metadata,
+ int is_add);
+
+int vnet_classify_add_del_table (vnet_classify_main_t * cm,
+ u8 * mask,
+ u32 nbuckets,
+ u32 memory_size,
+ u32 skip,
+ u32 match,
+ u32 next_table_index,
+ u32 miss_next_index,
+ u32 * table_index,
+ u8 current_data_flag,
+ i16 current_data_offset,
+ int is_add,
+ int del_chain);
+
+unformat_function_t unformat_ip4_mask;
+unformat_function_t unformat_ip6_mask;
+unformat_function_t unformat_l3_mask;
+unformat_function_t unformat_l2_mask;
+unformat_function_t unformat_classify_mask;
+unformat_function_t unformat_l2_next_index;
+unformat_function_t unformat_ip_next_index;
+unformat_function_t unformat_ip4_match;
+unformat_function_t unformat_ip6_match;
+unformat_function_t unformat_l3_match;
+unformat_function_t unformat_vlan_tag;
+unformat_function_t unformat_l2_match;
+unformat_function_t unformat_classify_match;
+
+void vnet_classify_register_unformat_ip_next_index_fn
+(unformat_function_t * fn);
+
+void vnet_classify_register_unformat_l2_next_index_fn
+(unformat_function_t * fn);
+
+void vnet_classify_register_unformat_acl_next_index_fn
+(unformat_function_t * fn);
+
+void vnet_classify_register_unformat_policer_next_index_fn
+(unformat_function_t * fn);
+
+void vnet_classify_register_unformat_opaque_index_fn (unformat_function_t * fn);
+
+#endif /* __included_vnet_classify_h__ */
diff --git a/src/vnet/config.c b/src/vnet/config.c
new file mode 100644
index 00000000000..03189d77cd5
--- /dev/null
+++ b/src/vnet/config.c
@@ -0,0 +1,361 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * config.c: feature configuration
+ *
+ * Copyright (c) 2008 Eliot Dresselhaus
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <vnet/vnet.h>
+
+static vnet_config_feature_t *
+duplicate_feature_vector (vnet_config_feature_t * feature_vector)
+{
+ vnet_config_feature_t *result, *f;
+
+ result = vec_dup (feature_vector);
+ vec_foreach (f, result) f->feature_config = vec_dup (f->feature_config);
+
+ return result;
+}
+
+static void
+free_feature_vector (vnet_config_feature_t * feature_vector)
+{
+ vnet_config_feature_t *f;
+
+ vec_foreach (f, feature_vector) vnet_config_feature_free (f);
+ vec_free (feature_vector);
+}
+
+static u32
+add_next (vlib_main_t * vm,
+ vnet_config_main_t * cm, u32 last_node_index, u32 this_node_index)
+{
+ u32 i, ni = ~0;
+
+ if (last_node_index != ~0)
+ return vlib_node_add_next (vm, last_node_index, this_node_index);
+
+ for (i = 0; i < vec_len (cm->start_node_indices); i++)
+ {
+ u32 tmp;
+ tmp =
+ vlib_node_add_next (vm, cm->start_node_indices[i], this_node_index);
+ if (ni == ~0)
+ ni = tmp;
+ /* Start nodes to first must agree on next indices. */
+ ASSERT (ni == tmp);
+ }
+
+ return ni;
+}
+
+static vnet_config_t *
+find_config_with_features (vlib_main_t * vm,
+ vnet_config_main_t * cm,
+ vnet_config_feature_t * feature_vector)
+{
+ u32 last_node_index = ~0;
+ vnet_config_feature_t *f;
+ u32 *config_string;
+ uword *p;
+ vnet_config_t *c;
+
+ config_string = cm->config_string_temp;
+ cm->config_string_temp = 0;
+ if (config_string)
+ _vec_len (config_string) = 0;
+
+ vec_foreach (f, feature_vector)
+ {
+ /* Connect node graph. */
+ f->next_index = add_next (vm, cm, last_node_index, f->node_index);
+ last_node_index = f->node_index;
+
+ /* Store next index in config string. */
+ vec_add1 (config_string, f->next_index);
+
+ /* Store feature config. */
+ vec_add (config_string, f->feature_config, vec_len (f->feature_config));
+ }
+
+ /* Terminate config string with next for end node. */
+ if (last_node_index == ~0 || last_node_index != cm->end_node_index)
+ {
+ u32 next_index = add_next (vm, cm, last_node_index, cm->end_node_index);
+ vec_add1 (config_string, next_index);
+ }
+
+ /* See if config string is unique. */
+ p = hash_get_mem (cm->config_string_hash, config_string);
+ if (p)
+ {
+ /* Not unique. Share existing config. */
+ cm->config_string_temp = config_string; /* we'll use it again later. */
+ free_feature_vector (feature_vector);
+ c = pool_elt_at_index (cm->config_pool, p[0]);
+ }
+ else
+ {
+ u32 *d;
+
+ pool_get (cm->config_pool, c);
+ c->index = c - cm->config_pool;
+ c->features = feature_vector;
+ c->config_string_vector = config_string;
+
+ /* Allocate copy of config string in heap.
+ VLIB buffers will maintain pointers to heap as they read out
+ configuration data. */
+ c->config_string_heap_index
+ = heap_alloc (cm->config_string_heap, vec_len (config_string) + 1,
+ c->config_string_heap_handle);
+
+ /* First element in heap points back to pool index. */
+ d =
+ vec_elt_at_index (cm->config_string_heap,
+ c->config_string_heap_index);
+ d[0] = c->index;
+ clib_memcpy (d + 1, config_string, vec_bytes (config_string));
+ hash_set_mem (cm->config_string_hash, config_string, c->index);
+
+ c->reference_count = 0; /* will be incremented by caller. */
+ }
+
+ return c;
+}
+
+void
+vnet_config_init (vlib_main_t * vm,
+ vnet_config_main_t * cm,
+ char *start_node_names[],
+ int n_start_node_names,
+ char *feature_node_names[], int n_feature_node_names)
+{
+ vlib_node_t *n;
+ u32 i;
+
+ memset (cm, 0, sizeof (cm[0]));
+
+ cm->config_string_hash =
+ hash_create_vec (0,
+ STRUCT_SIZE_OF (vnet_config_t, config_string_vector[0]),
+ sizeof (uword));
+
+ ASSERT (n_feature_node_names >= 1);
+
+ vec_resize (cm->start_node_indices, n_start_node_names);
+ for (i = 0; i < n_start_node_names; i++)
+ {
+ n = vlib_get_node_by_name (vm, (u8 *) start_node_names[i]);
+ /* Given node name must exist. */
+ ASSERT (n != 0);
+ cm->start_node_indices[i] = n->index;
+ }
+
+ vec_resize (cm->node_index_by_feature_index, n_feature_node_names);
+ for (i = 0; i < n_feature_node_names; i++)
+ {
+ if (!feature_node_names[i])
+ cm->node_index_by_feature_index[i] = ~0;
+ else
+ {
+ n = vlib_get_node_by_name (vm, (u8 *) feature_node_names[i]);
+ /* Given node may exist in plug-in library which is not present */
+ if (n)
+ {
+ if (i + 1 == n_feature_node_names)
+ cm->end_node_index = n->index;
+ cm->node_index_by_feature_index[i] = n->index;
+ }
+ else
+ cm->node_index_by_feature_index[i] = ~0;
+ }
+ }
+}
+
+static void
+remove_reference (vnet_config_main_t * cm, vnet_config_t * c)
+{
+ ASSERT (c->reference_count > 0);
+ c->reference_count -= 1;
+ if (c->reference_count == 0)
+ {
+ hash_unset (cm->config_string_hash, c->config_string_vector);
+ vnet_config_free (cm, c);
+ pool_put (cm->config_pool, c);
+ }
+}
+
+static int
+feature_cmp (void *a1, void *a2)
+{
+ vnet_config_feature_t *f1 = a1;
+ vnet_config_feature_t *f2 = a2;
+
+ return (int) f1->feature_index - f2->feature_index;
+}
+
+always_inline u32 *
+vnet_get_config_heap (vnet_config_main_t * cm, u32 ci)
+{
+ return heap_elt_at_index (cm->config_string_heap, ci);
+}
+
+u32
+vnet_config_add_feature (vlib_main_t * vm,
+ vnet_config_main_t * cm,
+ u32 config_string_heap_index,
+ u32 feature_index,
+ void *feature_config, u32 n_feature_config_bytes)
+{
+ vnet_config_t *old, *new;
+ vnet_config_feature_t *new_features, *f;
+ u32 n_feature_config_u32s;
+ u32 node_index = vec_elt (cm->node_index_by_feature_index, feature_index);
+
+ if (node_index == ~0) // feature node does not exist
+ return config_string_heap_index; // return original config index
+
+ if (config_string_heap_index == ~0)
+ {
+ old = 0;
+ new_features = 0;
+ }
+ else
+ {
+ u32 *p = vnet_get_config_heap (cm, config_string_heap_index);
+ old = pool_elt_at_index (cm->config_pool, p[-1]);
+ new_features = old->features;
+ if (new_features)
+ new_features = duplicate_feature_vector (new_features);
+ }
+
+ vec_add2 (new_features, f, 1);
+ f->feature_index = feature_index;
+ f->node_index = node_index;
+
+ n_feature_config_u32s =
+ round_pow2 (n_feature_config_bytes,
+ sizeof (f->feature_config[0])) /
+ sizeof (f->feature_config[0]);
+ vec_add (f->feature_config, feature_config, n_feature_config_u32s);
+
+ /* Sort (prioritize) features. */
+ if (vec_len (new_features) > 1)
+ vec_sort_with_function (new_features, feature_cmp);
+
+ if (old)
+ remove_reference (cm, old);
+
+ new = find_config_with_features (vm, cm, new_features);
+ new->reference_count += 1;
+
+ /*
+ * User gets pointer to config string first element
+ * (which defines the pool index
+ * this config string comes from).
+ */
+ vec_validate (cm->config_pool_index_by_user_index,
+ new->config_string_heap_index + 1);
+ cm->config_pool_index_by_user_index[new->config_string_heap_index + 1]
+ = new - cm->config_pool;
+ return new->config_string_heap_index + 1;
+}
+
+u32
+vnet_config_del_feature (vlib_main_t * vm,
+ vnet_config_main_t * cm,
+ u32 config_string_heap_index,
+ u32 feature_index,
+ void *feature_config, u32 n_feature_config_bytes)
+{
+ vnet_config_t *old, *new;
+ vnet_config_feature_t *new_features, *f;
+ u32 n_feature_config_u32s;
+
+ {
+ u32 *p = vnet_get_config_heap (cm, config_string_heap_index);
+
+ old = pool_elt_at_index (cm->config_pool, p[-1]);
+ }
+
+ n_feature_config_u32s =
+ round_pow2 (n_feature_config_bytes,
+ sizeof (f->feature_config[0])) /
+ sizeof (f->feature_config[0]);
+
+ /* Find feature with same index and opaque data. */
+ vec_foreach (f, old->features)
+ {
+ if (f->feature_index == feature_index
+ && vec_len (f->feature_config) == n_feature_config_u32s
+ && (n_feature_config_u32s == 0
+ || !memcmp (f->feature_config, feature_config,
+ n_feature_config_bytes)))
+ break;
+ }
+
+ /* Feature not found. */
+ if (f >= vec_end (old->features))
+ return config_string_heap_index; // return original config index
+
+ new_features = duplicate_feature_vector (old->features);
+ f = new_features + (f - old->features);
+ vnet_config_feature_free (f);
+ vec_delete (new_features, 1, f - new_features);
+
+ /* must remove old from config_pool now as it may be expanded and change
+ memory location if the following function find_config_with_features()
+ adds a new config because none of existing config's has matching features
+ and so can be reused */
+ remove_reference (cm, old);
+ new = find_config_with_features (vm, cm, new_features);
+ new->reference_count += 1;
+
+ vec_validate (cm->config_pool_index_by_user_index,
+ new->config_string_heap_index + 1);
+ cm->config_pool_index_by_user_index[new->config_string_heap_index + 1]
+ = new - cm->config_pool;
+ return new->config_string_heap_index + 1;
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/config.h b/src/vnet/config.h
new file mode 100644
index 00000000000..b77a7794a6e
--- /dev/null
+++ b/src/vnet/config.h
@@ -0,0 +1,176 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * config.h: feature configuration
+ *
+ * Copyright (c) 2008 Eliot Dresselhaus
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef included_vnet_config_h
+#define included_vnet_config_h
+
+#include <vlib/vlib.h>
+#include <vppinfra/heap.h>
+
+typedef struct
+{
+ /* Features are prioritized by index. Smaller indices get
+ performed first. */
+ u32 feature_index;
+
+ /* VLIB node which performs feature. */
+ u32 node_index;
+
+ /* Next index relative to previous node or main node. */
+ u32 next_index;
+
+ /* Opaque per feature configuration data. */
+ u32 *feature_config;
+} vnet_config_feature_t;
+
+always_inline void
+vnet_config_feature_free (vnet_config_feature_t * f)
+{
+ vec_free (f->feature_config);
+}
+
+typedef struct
+{
+ /* Sorted vector of features for this configuration. */
+ vnet_config_feature_t *features;
+
+ /* Config string as vector for hashing. */
+ u32 *config_string_vector;
+
+ /* Config string including all next indices and feature data as a vector. */
+ u32 config_string_heap_index, config_string_heap_handle;
+
+ /* Index in main pool. */
+ u32 index;
+
+ /* Number of interfaces/traffic classes that reference this config. */
+ u32 reference_count;
+} vnet_config_t;
+
+typedef struct
+{
+ /* Pool of configs. Index 0 is always null config and is never deleted. */
+ vnet_config_t *config_pool;
+
+ /* Hash table mapping vector config string to config pool index. */
+ uword *config_string_hash;
+
+ /* Global heap of configuration data. */
+ u32 *config_string_heap;
+
+ /* Node index which starts/ends feature processing. */
+ u32 *start_node_indices, end_node_index;
+
+ /* Interior feature processing nodes (not including start and end nodes). */
+ u32 *node_index_by_feature_index;
+
+ /* vnet_config pool index by user index */
+ u32 *config_pool_index_by_user_index;
+
+ /* Temporary vector for holding config strings. Used to avoid continually
+ allocating vectors. */
+ u32 *config_string_temp;
+} vnet_config_main_t;
+
+always_inline void
+vnet_config_free (vnet_config_main_t * cm, vnet_config_t * c)
+{
+ vnet_config_feature_t *f;
+ vec_foreach (f, c->features) vnet_config_feature_free (f);
+ vec_free (c->features);
+ heap_dealloc (cm->config_string_heap, c->config_string_heap_handle);
+ vec_free (c->config_string_vector);
+}
+
+always_inline void *
+vnet_get_config_data (vnet_config_main_t * cm,
+ u32 * config_index, u32 * next_index, u32 n_data_bytes)
+{
+ u32 i, n, *d;
+
+ i = *config_index;
+
+ d = heap_elt_at_index (cm->config_string_heap, i);
+
+ n = round_pow2 (n_data_bytes, sizeof (d[0])) / sizeof (d[0]);
+
+ /* Last 32 bits are next index. */
+ *next_index = d[n];
+
+ /* Advance config index to next config. */
+ *config_index = (i + n + 1);
+
+ /* Return config data to user for this feature. */
+ return (void *) d;
+}
+
+void vnet_config_init (vlib_main_t * vm,
+ vnet_config_main_t * cm,
+ char *start_node_names[],
+ int n_start_node_names,
+ char *feature_node_names[], int n_feature_node_names);
+
+/* Calls to add/delete features from configurations. */
+u32 vnet_config_add_feature (vlib_main_t * vm,
+ vnet_config_main_t * cm,
+ u32 config_id,
+ u32 feature_index,
+ void *feature_config,
+ u32 n_feature_config_bytes);
+
+u32 vnet_config_del_feature (vlib_main_t * vm,
+ vnet_config_main_t * cm,
+ u32 config_id,
+ u32 feature_index,
+ void *feature_config,
+ u32 n_feature_config_bytes);
+
+u8 *vnet_config_format_features (vlib_main_t * vm,
+ vnet_config_main_t * cm,
+ u32 config_index, u8 * s);
+
+#endif /* included_vnet_config_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/cop/cop.c b/src/vnet/cop/cop.c
new file mode 100644
index 00000000000..465d6c97a2a
--- /dev/null
+++ b/src/vnet/cop/cop.c
@@ -0,0 +1,387 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <vnet/cop/cop.h>
+
+cop_main_t cop_main;
+
+static clib_error_t *
+cop_sw_interface_add_del (vnet_main_t * vnm, u32 sw_if_index, u32 is_add)
+{
+ cop_main_t * cm = &cop_main;
+ cop_config_data_t _data, *data = &_data;
+ vlib_main_t * vm = cm->vlib_main;
+ vnet_hw_interface_t * hi = vnet_get_sup_hw_interface (vnm, sw_if_index);;
+ cop_config_main_t * ccm;
+ int address_family;
+ u32 ci, default_next;
+
+ memset (data, 0, sizeof(*data));
+
+ /*
+ * Ignore local interface, pg interfaces. $$$ need a #define for the
+ * first "real" interface. The answer is 5 at the moment.
+ */
+ if (hi->dev_class_index == vnet_local_interface_device_class.index)
+ return 0;
+
+ for (address_family = VNET_COP_IP4; address_family < VNET_N_COPS;
+ address_family++)
+ {
+ ccm = &cm->cop_config_mains[address_family];
+
+ /*
+ * Once-only code to initialize the per-address-family
+ * cop feature subgraphs.
+ * Since the (single) start-node, cop-input, must be able
+ * to push pkts into three separate subgraphs, we
+ * use a unified cop_feature_type_t enumeration.
+ */
+
+ if (!(ccm->config_main.node_index_by_feature_index))
+ {
+ switch (address_family)
+ {
+ case VNET_COP_IP4:
+ {
+ static char * start_nodes[] = { "cop-input" };
+ static char * feature_nodes[] = {
+ [IP4_RX_COP_WHITELIST] = "ip4-cop-whitelist",
+ [IP4_RX_COP_INPUT] = "ip4-input",
+ };
+
+ vnet_config_init (vm, &ccm->config_main,
+ start_nodes, ARRAY_LEN(start_nodes),
+ feature_nodes, ARRAY_LEN(feature_nodes));
+ }
+ break;
+ case VNET_COP_IP6:
+ {
+ static char * start_nodes[] = { "cop-input" };
+ static char * feature_nodes[] = {
+ [IP6_RX_COP_WHITELIST] = "ip6-cop-whitelist",
+ [IP6_RX_COP_INPUT] = "ip6-input",
+ };
+ vnet_config_init (vm, &ccm->config_main,
+ start_nodes, ARRAY_LEN(start_nodes),
+ feature_nodes, ARRAY_LEN(feature_nodes));
+ }
+ break;
+
+ case VNET_COP_DEFAULT:
+ {
+ static char * start_nodes[] = { "cop-input" };
+ static char * feature_nodes[] = {
+ [DEFAULT_RX_COP_WHITELIST] = "default-cop-whitelist",
+ [DEFAULT_RX_COP_INPUT] = "ethernet-input",
+ };
+ vnet_config_init (vm, &ccm->config_main,
+ start_nodes, ARRAY_LEN(start_nodes),
+ feature_nodes, ARRAY_LEN(feature_nodes));
+ }
+ break;
+
+ default:
+ clib_warning ("bug");
+ break;
+ }
+ }
+ vec_validate_init_empty (ccm->config_index_by_sw_if_index, sw_if_index,
+ ~0);
+
+ ci = ccm->config_index_by_sw_if_index[sw_if_index];
+
+ /* Create a sensible initial config: send pkts to xxx-input */
+ if (address_family == VNET_COP_IP4)
+ default_next = IP4_RX_COP_INPUT;
+ else if (address_family == VNET_COP_IP6)
+ default_next = IP6_RX_COP_INPUT;
+ else
+ default_next = DEFAULT_RX_COP_INPUT;
+
+ if (is_add)
+ ci = vnet_config_add_feature (vm, &ccm->config_main,
+ ci,
+ default_next,
+ data, sizeof(*data));
+ else
+ ci = vnet_config_del_feature (vm, &ccm->config_main,
+ ci,
+ default_next,
+ data, sizeof(*data));
+
+ ccm->config_index_by_sw_if_index[sw_if_index] = ci;
+ }
+ return 0;
+}
+
+VNET_SW_INTERFACE_ADD_DEL_FUNCTION (cop_sw_interface_add_del);
+
+static clib_error_t *
+cop_init (vlib_main_t *vm)
+{
+ cop_main_t * cm = &cop_main;
+ clib_error_t * error;
+
+ if ((error = vlib_call_init_function (vm, ip4_whitelist_init)))
+ return error;
+
+ if ((error = vlib_call_init_function (vm, ip6_whitelist_init)))
+ return error;
+
+ cm->vlib_main = vm;
+ cm->vnet_main = vnet_get_main();
+
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (cop_init);
+
+int cop_interface_enable_disable (u32 sw_if_index, int enable_disable)
+{
+ cop_main_t * cm = &cop_main;
+ vnet_sw_interface_t * sw;
+ int rv;
+ u32 node_index = enable_disable ? cop_input_node.index : ~0;
+
+ /* Not a physical port? */
+ sw = vnet_get_sw_interface (cm->vnet_main, sw_if_index);
+ if (sw->type != VNET_SW_INTERFACE_TYPE_HARDWARE)
+ return VNET_API_ERROR_INVALID_SW_IF_INDEX;
+
+ /*
+ * Redirect pkts from the driver to the cop node.
+ * Returns VNET_API_ERROR_UNIMPLEMENTED if the h/w driver
+ * doesn't implement the API.
+ *
+ * Node_index = ~0 => shut off redirection
+ */
+ rv = vnet_hw_interface_rx_redirect_to_node (cm->vnet_main, sw_if_index,
+ node_index);
+ return rv;
+}
+
+static clib_error_t *
+cop_enable_disable_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ cop_main_t * cm = &cop_main;
+ u32 sw_if_index = ~0;
+ int enable_disable = 1;
+
+ int rv;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) {
+ if (unformat (input, "disable"))
+ enable_disable = 0;
+ else if (unformat (input, "%U", unformat_vnet_sw_interface,
+ cm->vnet_main, &sw_if_index))
+ ;
+ else
+ break;
+ }
+
+ if (sw_if_index == ~0)
+ return clib_error_return (0, "Please specify an interface...");
+
+ rv = cop_interface_enable_disable (sw_if_index, enable_disable);
+
+ switch(rv) {
+ case 0:
+ break;
+
+ case VNET_API_ERROR_INVALID_SW_IF_INDEX:
+ return clib_error_return
+ (0, "Invalid interface, only works on physical ports");
+ break;
+
+ case VNET_API_ERROR_UNIMPLEMENTED:
+ return clib_error_return (0, "Device driver doesn't support redirection");
+ break;
+
+ default:
+ return clib_error_return (0, "cop_interface_enable_disable returned %d",
+ rv);
+ }
+ return 0;
+}
+
+VLIB_CLI_COMMAND (cop_interface_command, static) = {
+ .path = "cop interface",
+ .short_help =
+ "cop interface <interface-name> [disable]",
+ .function = cop_enable_disable_command_fn,
+};
+
+
+int cop_whitelist_enable_disable (cop_whitelist_enable_disable_args_t *a)
+{
+ cop_main_t * cm = &cop_main;
+ vlib_main_t * vm = cm->vlib_main;
+ ip4_main_t * im4 = &ip4_main;
+ ip6_main_t * im6 = &ip6_main;
+ int address_family;
+ int is_add;
+ cop_config_main_t * ccm;
+ u32 next_to_add_del = 0;
+ uword * p;
+ u32 fib_index = 0;
+ u32 ci;
+ cop_config_data_t _data, *data=&_data;
+
+ /*
+ * Enable / disable whitelist processing on the specified interface
+ */
+
+ for (address_family = VNET_COP_IP4; address_family < VNET_N_COPS;
+ address_family++)
+ {
+ ccm = &cm->cop_config_mains[address_family];
+
+ switch(address_family)
+ {
+ case VNET_COP_IP4:
+ is_add = (a->ip4 != 0);
+ next_to_add_del = IP4_RX_COP_WHITELIST;
+ /* configured opaque data must match, or no supper */
+ p = hash_get (im4->fib_index_by_table_id, a->fib_id);
+ if (p)
+ fib_index = p[0];
+ else
+ {
+ if (is_add)
+ return VNET_API_ERROR_NO_SUCH_FIB;
+ else
+ continue;
+ }
+ break;
+
+ case VNET_COP_IP6:
+ is_add = (a->ip6 != 0);
+ next_to_add_del = IP6_RX_COP_WHITELIST;
+ p = hash_get (im6->fib_index_by_table_id, a->fib_id);
+ if (p)
+ fib_index = p[0];
+ else
+ {
+ if (is_add)
+ return VNET_API_ERROR_NO_SUCH_FIB;
+ else
+ continue;
+ }
+ break;
+
+ case VNET_COP_DEFAULT:
+ is_add = (a->default_cop != 0);
+ next_to_add_del = DEFAULT_RX_COP_WHITELIST;
+ break;
+
+ default:
+ clib_warning ("BUG");
+ }
+
+ ci = ccm->config_index_by_sw_if_index[a->sw_if_index];
+ data->fib_index = fib_index;
+
+ if (is_add)
+ ci = vnet_config_add_feature (vm, &ccm->config_main,
+ ci,
+ next_to_add_del,
+ data, sizeof (*data));
+ else
+ ci = vnet_config_del_feature (vm, &ccm->config_main,
+ ci,
+ next_to_add_del,
+ data, sizeof (*data));
+
+ ccm->config_index_by_sw_if_index[a->sw_if_index] = ci;
+ }
+ return 0;
+}
+
+static clib_error_t *
+cop_whitelist_enable_disable_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ cop_main_t * cm = &cop_main;
+ u32 sw_if_index = ~0;
+ u8 ip4 = 0;
+ u8 ip6 = 0;
+ u8 default_cop = 0;
+ u32 fib_id = 0;
+ int rv;
+ cop_whitelist_enable_disable_args_t _a, * a = &_a;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) {
+ if (unformat (input, "ip4"))
+ ip4 = 1;
+ else if (unformat (input, "ip6"))
+ ip6 = 1;
+ else if (unformat (input, "default"))
+ default_cop = 1;
+ else if (unformat (input, "%U", unformat_vnet_sw_interface,
+ cm->vnet_main, &sw_if_index))
+ ;
+ else if (unformat (input, "fib-id %d", &fib_id))
+ ;
+ else
+ break;
+ }
+
+ if (sw_if_index == ~0)
+ return clib_error_return (0, "Please specify an interface...");
+
+ a->sw_if_index = sw_if_index;
+ a->ip4 = ip4;
+ a->ip6 = ip6;
+ a->default_cop = default_cop;
+ a->fib_id = fib_id;
+
+ rv = cop_whitelist_enable_disable (a);
+
+ switch(rv) {
+ case 0:
+ break;
+
+ case VNET_API_ERROR_INVALID_SW_IF_INDEX:
+ return clib_error_return
+ (0, "Invalid interface, only works on physical ports");
+ break;
+
+ case VNET_API_ERROR_NO_SUCH_FIB:
+ return clib_error_return
+ (0, "Invalid fib");
+ break;
+
+ case VNET_API_ERROR_UNIMPLEMENTED:
+ return clib_error_return (0, "Device driver doesn't support redirection");
+ break;
+
+ default:
+ return clib_error_return (0, "cop_whitelist_enable_disable returned %d",
+ rv);
+ }
+
+ return 0;
+}
+
+VLIB_CLI_COMMAND (cop_whitelist_command, static) = {
+ .path = "cop whitelist",
+ .short_help =
+ "cop whitelist <interface-name> [ip4][ip6][default][fib-id <NN>][disable]",
+ .function = cop_whitelist_enable_disable_command_fn,
+};
+
diff --git a/src/vnet/cop/cop.h b/src/vnet/cop/cop.h
new file mode 100644
index 00000000000..eb5f1dfd78e
--- /dev/null
+++ b/src/vnet/cop/cop.h
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __vnet_cop_h__
+#define __vnet_cop_h__
+
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+#include <vnet/pg/pg.h>
+
+#include <vppinfra/error.h>
+#include <vppinfra/hash.h>
+#include <vnet/vnet.h>
+#include <vnet/ip/ip.h>
+#include <vnet/l2/l2_input.h>
+#include <vnet/ethernet/ethernet.h>
+#include <vnet/ip/ip4_packet.h>
+#include <vnet/ip/ip6_packet.h>
+
+typedef enum {
+ VNET_COP_IP4,
+ VNET_COP_IP6,
+ VNET_COP_DEFAULT,
+ VNET_N_COPS,
+} vnet_cop_t;
+
+typedef enum {
+ /* First check src address against whitelist */
+ IP4_RX_COP_WHITELIST,
+ IP6_RX_COP_WHITELIST,
+ DEFAULT_RX_COP_WHITELIST,
+
+ /* Pkts not otherwise dropped go to xxx-input */
+ IP4_RX_COP_INPUT,
+ IP6_RX_COP_INPUT,
+ DEFAULT_RX_COP_INPUT,
+
+ /* Going, going, gone... */
+ RX_COP_DROP,
+
+ COP_RX_N_FEATURES,
+} cop_feature_type_t;
+
+typedef struct {
+ vnet_config_main_t config_main;
+ u32 * config_index_by_sw_if_index;
+} cop_config_main_t;
+
+typedef struct {
+ u32 fib_index;
+} cop_config_data_t;
+
+typedef struct {
+ cop_config_main_t cop_config_mains[VNET_N_COPS];
+
+ /* convenience */
+ vlib_main_t * vlib_main;
+ vnet_main_t * vnet_main;
+} cop_main_t;
+
+cop_main_t cop_main;
+
+extern vlib_node_registration_t cop_input_node;
+
+int cop_interface_enable_disable (u32 sw_if_index, int enable_disable);
+
+typedef struct {
+ u32 sw_if_index;
+ u8 ip4;
+ u8 ip6;
+ u8 default_cop;
+ u32 fib_id;
+} cop_whitelist_enable_disable_args_t;
+
+int cop_whitelist_enable_disable (cop_whitelist_enable_disable_args_t *a);
+
+#endif /* __vnet_cop_h__ */
diff --git a/src/vnet/cop/ip4_whitelist.c b/src/vnet/cop/ip4_whitelist.c
new file mode 100644
index 00000000000..d5121e72980
--- /dev/null
+++ b/src/vnet/cop/ip4_whitelist.c
@@ -0,0 +1,356 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <vnet/cop/cop.h>
+#include <vnet/fib/ip4_fib.h>
+#include <vnet/dpo/load_balance.h>
+
+typedef struct {
+ u32 next_index;
+ u32 sw_if_index;
+} ip4_cop_whitelist_trace_t;
+
+/* packet trace format function */
+static u8 * format_ip4_cop_whitelist_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ ip4_cop_whitelist_trace_t * t = va_arg (*args, ip4_cop_whitelist_trace_t *);
+
+ s = format (s, "IP4_COP_WHITELIST: sw_if_index %d, next index %d",
+ t->sw_if_index, t->next_index);
+ return s;
+}
+
+vlib_node_registration_t ip4_cop_whitelist_node;
+
+#define foreach_ip4_cop_whitelist_error \
+_(DROPPED, "ip4 cop whitelist packets dropped")
+
+typedef enum {
+#define _(sym,str) IP4_COP_WHITELIST_ERROR_##sym,
+ foreach_ip4_cop_whitelist_error
+#undef _
+ IP4_COP_WHITELIST_N_ERROR,
+} ip4_cop_whitelist_error_t;
+
+static char * ip4_cop_whitelist_error_strings[] = {
+#define _(sym,string) string,
+ foreach_ip4_cop_whitelist_error
+#undef _
+};
+
+static uword
+ip4_cop_whitelist_node_fn (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ u32 n_left_from, * from, * to_next;
+ cop_feature_type_t next_index;
+ cop_main_t *cm = &cop_main;
+ vlib_combined_counter_main_t * vcm = &load_balance_main.lbm_via_counters;
+ u32 cpu_index = vm->cpu_index;
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame (vm, node, next_index,
+ to_next, n_left_to_next);
+
+ while (n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ u32 bi0, bi1;
+ vlib_buffer_t * b0, * b1;
+ u32 next0, next1;
+ u32 sw_if_index0, sw_if_index1;
+ ip4_header_t * ip0, * ip1;
+ cop_config_main_t * ccm0, * ccm1;
+ cop_config_data_t * c0, * c1;
+ ip4_fib_mtrie_t * mtrie0, * mtrie1;
+ ip4_fib_mtrie_leaf_t leaf0, leaf1;
+ u32 lb_index0, lb_index1;
+ const load_balance_t * lb0, *lb1;
+ const dpo_id_t *dpo0, *dpo1;
+
+ /* Prefetch next iteration. */
+ {
+ vlib_buffer_t * p2, * p3;
+
+ p2 = vlib_get_buffer (vm, from[2]);
+ p3 = vlib_get_buffer (vm, from[3]);
+
+ vlib_prefetch_buffer_header (p2, LOAD);
+ vlib_prefetch_buffer_header (p3, LOAD);
+
+ CLIB_PREFETCH (p2->data, CLIB_CACHE_LINE_BYTES, STORE);
+ CLIB_PREFETCH (p3->data, CLIB_CACHE_LINE_BYTES, STORE);
+ }
+
+ /* speculatively enqueue b0 and b1 to the current next frame */
+ to_next[0] = bi0 = from[0];
+ to_next[1] = bi1 = from[1];
+ from += 2;
+ to_next += 2;
+ n_left_from -= 2;
+ n_left_to_next -= 2;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_RX];
+
+ ip0 = vlib_buffer_get_current (b0);
+
+ ccm0 = cm->cop_config_mains + VNET_COP_IP4;
+
+ c0 = vnet_get_config_data
+ (&ccm0->config_main,
+ &vnet_buffer (b0)->cop.current_config_index,
+ &next0,
+ sizeof (c0[0]));
+
+ mtrie0 = &ip4_fib_get (c0->fib_index)->mtrie;
+
+ leaf0 = IP4_FIB_MTRIE_LEAF_ROOT;
+
+ leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0,
+ &ip0->src_address, 0);
+
+ leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0,
+ &ip0->src_address, 1);
+
+ leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0,
+ &ip0->src_address, 2);
+
+ leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0,
+ &ip0->src_address, 3);
+
+ lb_index0 = ip4_fib_mtrie_leaf_get_adj_index (leaf0);
+
+ ASSERT (lb_index0
+ == ip4_fib_table_lookup_lb (ip4_fib_get(c0->fib_index),
+ &ip0->src_address));
+ lb0 = load_balance_get (lb_index0);
+ dpo0 = load_balance_get_bucket_i(lb0, 0);
+
+ if (PREDICT_FALSE(dpo0->dpoi_type != DPO_RECEIVE))
+ {
+ b0->error = node->errors[IP4_COP_WHITELIST_ERROR_DROPPED];
+ next0 = RX_COP_DROP;
+ }
+
+ b1 = vlib_get_buffer (vm, bi1);
+ sw_if_index1 = vnet_buffer(b1)->sw_if_index[VLIB_RX];
+
+ ip1 = vlib_buffer_get_current (b1);
+
+ ccm1 = cm->cop_config_mains + VNET_COP_IP4;
+
+ c1 = vnet_get_config_data
+ (&ccm1->config_main,
+ &vnet_buffer (b1)->cop.current_config_index,
+ &next1,
+ sizeof (c1[0]));
+ mtrie1 = &ip4_fib_get (c1->fib_index)->mtrie;
+
+ leaf1 = IP4_FIB_MTRIE_LEAF_ROOT;
+
+ leaf1 = ip4_fib_mtrie_lookup_step (mtrie1, leaf1,
+ &ip1->src_address, 0);
+
+ leaf1 = ip4_fib_mtrie_lookup_step (mtrie1, leaf1,
+ &ip1->src_address, 1);
+
+ leaf1 = ip4_fib_mtrie_lookup_step (mtrie1, leaf1,
+ &ip1->src_address, 2);
+
+ leaf1 = ip4_fib_mtrie_lookup_step (mtrie1, leaf1,
+ &ip1->src_address, 3);
+
+ lb_index1 = ip4_fib_mtrie_leaf_get_adj_index (leaf1);
+ ASSERT (lb_index1
+ == ip4_fib_table_lookup_lb (ip4_fib_get(c1->fib_index),
+ &ip1->src_address));
+ lb1 = load_balance_get (lb_index1);
+ dpo1 = load_balance_get_bucket_i(lb1, 0);
+
+ vlib_increment_combined_counter
+ (vcm, cpu_index, lb_index0, 1,
+ vlib_buffer_length_in_chain (vm, b0)
+ + sizeof(ethernet_header_t));
+
+ vlib_increment_combined_counter
+ (vcm, cpu_index, lb_index1, 1,
+ vlib_buffer_length_in_chain (vm, b1)
+ + sizeof(ethernet_header_t));
+
+
+ if (PREDICT_FALSE(dpo1->dpoi_type != DPO_RECEIVE))
+ {
+ b1->error = node->errors[IP4_COP_WHITELIST_ERROR_DROPPED];
+ next1 = RX_COP_DROP;
+ }
+
+ if (PREDICT_FALSE((node->flags & VLIB_NODE_FLAG_TRACE)
+ && (b0->flags & VLIB_BUFFER_IS_TRACED)))
+ {
+ ip4_cop_whitelist_trace_t *t =
+ vlib_add_trace (vm, node, b0, sizeof (*t));
+ t->sw_if_index = sw_if_index0;
+ t->next_index = next0;
+ }
+
+ if (PREDICT_FALSE((node->flags & VLIB_NODE_FLAG_TRACE)
+ && (b1->flags & VLIB_BUFFER_IS_TRACED)))
+ {
+ ip4_cop_whitelist_trace_t *t =
+ vlib_add_trace (vm, node, b1, sizeof (*t));
+ t->sw_if_index = sw_if_index1;
+ t->next_index = next1;
+ }
+
+ /* verify speculative enqueues, maybe switch current next frame */
+ vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, bi1, next0, next1);
+ }
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 bi0;
+ vlib_buffer_t * b0;
+ u32 next0;
+ u32 sw_if_index0;
+ ip4_header_t * ip0;
+ cop_config_main_t *ccm0;
+ cop_config_data_t *c0;
+ ip4_fib_mtrie_t * mtrie0;
+ ip4_fib_mtrie_leaf_t leaf0;
+ u32 lb_index0;
+ const load_balance_t * lb0;
+ const dpo_id_t *dpo0;
+
+ /* speculatively enqueue b0 to the current next frame */
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_RX];
+
+ ip0 = vlib_buffer_get_current (b0);
+
+ ccm0 = cm->cop_config_mains + VNET_COP_IP4;
+
+ c0 = vnet_get_config_data
+ (&ccm0->config_main,
+ &vnet_buffer (b0)->cop.current_config_index,
+ &next0,
+ sizeof (c0[0]));
+
+ mtrie0 = &ip4_fib_get (c0->fib_index)->mtrie;
+
+ leaf0 = IP4_FIB_MTRIE_LEAF_ROOT;
+
+ leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0,
+ &ip0->src_address, 0);
+
+ leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0,
+ &ip0->src_address, 1);
+
+ leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0,
+ &ip0->src_address, 2);
+
+ leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0,
+ &ip0->src_address, 3);
+
+ lb_index0 = ip4_fib_mtrie_leaf_get_adj_index (leaf0);
+
+ ASSERT (lb_index0
+ == ip4_fib_table_lookup_lb (ip4_fib_get(c0->fib_index),
+ &ip0->src_address));
+
+ lb0 = load_balance_get (lb_index0);
+ dpo0 = load_balance_get_bucket_i(lb0, 0);
+
+ vlib_increment_combined_counter
+ (vcm, cpu_index, lb_index0, 1,
+ vlib_buffer_length_in_chain (vm, b0)
+ + sizeof(ethernet_header_t));
+
+ if (PREDICT_FALSE(dpo0->dpoi_type != DPO_RECEIVE))
+ {
+ b0->error = node->errors[IP4_COP_WHITELIST_ERROR_DROPPED];
+ next0 = RX_COP_DROP;
+ }
+
+ if (PREDICT_FALSE((node->flags & VLIB_NODE_FLAG_TRACE)
+ && (b0->flags & VLIB_BUFFER_IS_TRACED)))
+ {
+ ip4_cop_whitelist_trace_t *t =
+ vlib_add_trace (vm, node, b0, sizeof (*t));
+ t->sw_if_index = sw_if_index0;
+ t->next_index = next0;
+ }
+
+ /* verify speculative enqueue, maybe switch current next frame */
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, next0);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+ return frame->n_vectors;
+}
+
+VLIB_REGISTER_NODE (ip4_cop_whitelist_node) = {
+ .function = ip4_cop_whitelist_node_fn,
+ .name = "ip4-cop-whitelist",
+ .vector_size = sizeof (u32),
+ .format_trace = format_ip4_cop_whitelist_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_errors = ARRAY_LEN(ip4_cop_whitelist_error_strings),
+ .error_strings = ip4_cop_whitelist_error_strings,
+
+ .n_next_nodes = COP_RX_N_FEATURES,
+
+ /* edit / add dispositions here */
+ .next_nodes = {
+ [IP4_RX_COP_WHITELIST] = "ip4-cop-whitelist",
+ [IP6_RX_COP_WHITELIST] = "ip6-cop-whitelist",
+ [DEFAULT_RX_COP_WHITELIST] = "default-cop-whitelist",
+ [IP4_RX_COP_INPUT] = "ip4-input",
+ [IP6_RX_COP_INPUT] = "ip6-input",
+ [DEFAULT_RX_COP_INPUT] = "ethernet-input",
+ [RX_COP_DROP] = "error-drop",
+ },
+};
+
+VLIB_NODE_FUNCTION_MULTIARCH (ip4_cop_whitelist_node, ip4_cop_whitelist_node_fn)
+
+static clib_error_t *
+ip4_whitelist_init (vlib_main_t * vm)
+{
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (ip4_whitelist_init);
diff --git a/src/vnet/cop/ip6_whitelist.c b/src/vnet/cop/ip6_whitelist.c
new file mode 100644
index 00000000000..c2e16ccfe54
--- /dev/null
+++ b/src/vnet/cop/ip6_whitelist.c
@@ -0,0 +1,298 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <vnet/cop/cop.h>
+#include <vnet/fib/ip6_fib.h>
+#include <vnet/dpo/load_balance.h>
+
+typedef struct {
+ u32 next_index;
+ u32 sw_if_index;
+} ip6_cop_whitelist_trace_t;
+
+/* packet trace format function */
+static u8 * format_ip6_cop_whitelist_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ ip6_cop_whitelist_trace_t * t = va_arg (*args, ip6_cop_whitelist_trace_t *);
+
+ s = format (s, "IP6_COP_WHITELIST: sw_if_index %d, next index %d",
+ t->sw_if_index, t->next_index);
+ return s;
+}
+
+vlib_node_registration_t ip6_cop_whitelist_node;
+
+#define foreach_ip6_cop_whitelist_error \
+_(DROPPED, "ip6 cop whitelist packets dropped")
+
+typedef enum {
+#define _(sym,str) IP6_COP_WHITELIST_ERROR_##sym,
+ foreach_ip6_cop_whitelist_error
+#undef _
+ IP6_COP_WHITELIST_N_ERROR,
+} ip6_cop_whitelist_error_t;
+
+static char * ip6_cop_whitelist_error_strings[] = {
+#define _(sym,string) string,
+ foreach_ip6_cop_whitelist_error
+#undef _
+};
+
+static uword
+ip6_cop_whitelist_node_fn (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ u32 n_left_from, * from, * to_next;
+ cop_feature_type_t next_index;
+ cop_main_t *cm = &cop_main;
+ ip6_main_t * im6 = &ip6_main;
+ vlib_combined_counter_main_t * vcm = &load_balance_main.lbm_via_counters;
+ u32 cpu_index = vm->cpu_index;
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame (vm, node, next_index,
+ to_next, n_left_to_next);
+
+ while (n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ u32 bi0, bi1;
+ vlib_buffer_t * b0, * b1;
+ u32 next0, next1;
+ u32 sw_if_index0, sw_if_index1;
+ ip6_header_t * ip0, * ip1;
+ cop_config_main_t * ccm0, * ccm1;
+ cop_config_data_t * c0, * c1;
+ u32 lb_index0, lb_index1;
+ const load_balance_t * lb0, *lb1;
+ const dpo_id_t *dpo0, *dpo1;
+
+ /* Prefetch next iteration. */
+ {
+ vlib_buffer_t * p2, * p3;
+
+ p2 = vlib_get_buffer (vm, from[2]);
+ p3 = vlib_get_buffer (vm, from[3]);
+
+ vlib_prefetch_buffer_header (p2, LOAD);
+ vlib_prefetch_buffer_header (p3, LOAD);
+
+ CLIB_PREFETCH (p2->data, CLIB_CACHE_LINE_BYTES, STORE);
+ CLIB_PREFETCH (p3->data, CLIB_CACHE_LINE_BYTES, STORE);
+ }
+
+ /* speculatively enqueue b0 and b1 to the current next frame */
+ to_next[0] = bi0 = from[0];
+ to_next[1] = bi1 = from[1];
+ from += 2;
+ to_next += 2;
+ n_left_from -= 2;
+ n_left_to_next -= 2;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_RX];
+
+ ip0 = vlib_buffer_get_current (b0);
+
+ ccm0 = cm->cop_config_mains + VNET_COP_IP6;
+
+ c0 = vnet_get_config_data
+ (&ccm0->config_main,
+ &vnet_buffer (b0)->cop.current_config_index,
+ &next0,
+ sizeof (c0[0]));
+
+ lb_index0 = ip6_fib_table_fwding_lookup (im6, c0->fib_index,
+ &ip0->src_address);
+ lb0 = load_balance_get (lb_index0);
+ dpo0 = load_balance_get_bucket_i(lb0, 0);
+
+ if (PREDICT_FALSE(dpo0->dpoi_type != DPO_RECEIVE))
+ {
+ b0->error = node->errors[IP6_COP_WHITELIST_ERROR_DROPPED];
+ next0 = RX_COP_DROP;
+ }
+
+ b1 = vlib_get_buffer (vm, bi1);
+ sw_if_index1 = vnet_buffer(b1)->sw_if_index[VLIB_RX];
+
+ ip1 = vlib_buffer_get_current (b1);
+
+ ccm1 = cm->cop_config_mains + VNET_COP_IP6;
+
+ c1 = vnet_get_config_data
+ (&ccm1->config_main,
+ &vnet_buffer (b1)->cop.current_config_index,
+ &next1,
+ sizeof (c1[0]));
+
+ lb_index1 = ip6_fib_table_fwding_lookup (im6, c1->fib_index,
+ &ip1->src_address);
+
+ lb1 = load_balance_get (lb_index1);
+ dpo1 = load_balance_get_bucket_i(lb1, 0);
+
+ vlib_increment_combined_counter
+ (vcm, cpu_index, lb_index0, 1,
+ vlib_buffer_length_in_chain (vm, b0)
+ + sizeof(ethernet_header_t));
+
+ vlib_increment_combined_counter
+ (vcm, cpu_index, lb_index1, 1,
+ vlib_buffer_length_in_chain (vm, b1)
+ + sizeof(ethernet_header_t));
+
+ if (PREDICT_FALSE(dpo1->dpoi_type != DPO_RECEIVE))
+ {
+ b1->error = node->errors[IP6_COP_WHITELIST_ERROR_DROPPED];
+ next1 = RX_COP_DROP;
+ }
+
+ if (PREDICT_FALSE((node->flags & VLIB_NODE_FLAG_TRACE)
+ && (b0->flags & VLIB_BUFFER_IS_TRACED)))
+ {
+ ip6_cop_whitelist_trace_t *t =
+ vlib_add_trace (vm, node, b0, sizeof (*t));
+ t->sw_if_index = sw_if_index0;
+ t->next_index = next0;
+ }
+
+ if (PREDICT_FALSE((node->flags & VLIB_NODE_FLAG_TRACE)
+ && (b1->flags & VLIB_BUFFER_IS_TRACED)))
+ {
+ ip6_cop_whitelist_trace_t *t =
+ vlib_add_trace (vm, node, b1, sizeof (*t));
+ t->sw_if_index = sw_if_index1;
+ t->next_index = next1;
+ }
+
+ /* verify speculative enqueues, maybe switch current next frame */
+ vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, bi1, next0, next1);
+ }
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 bi0;
+ vlib_buffer_t * b0;
+ u32 next0;
+ u32 sw_if_index0;
+ ip6_header_t * ip0;
+ cop_config_main_t *ccm0;
+ cop_config_data_t *c0;
+ u32 lb_index0;
+ const load_balance_t * lb0;
+ const dpo_id_t *dpo0;
+
+ /* speculatively enqueue b0 to the current next frame */
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_RX];
+
+ ip0 = vlib_buffer_get_current (b0);
+
+ ccm0 = cm->cop_config_mains + VNET_COP_IP6;
+
+ c0 = vnet_get_config_data
+ (&ccm0->config_main,
+ &vnet_buffer (b0)->cop.current_config_index,
+ &next0,
+ sizeof (c0[0]));
+
+ lb_index0 = ip6_fib_table_fwding_lookup (im6, c0->fib_index,
+ &ip0->src_address);
+
+ lb0 = load_balance_get (lb_index0);
+ dpo0 = load_balance_get_bucket_i(lb0, 0);
+
+ vlib_increment_combined_counter
+ (vcm, cpu_index, lb_index0, 1,
+ vlib_buffer_length_in_chain (vm, b0)
+ + sizeof(ethernet_header_t));
+
+ if (PREDICT_FALSE(dpo0->dpoi_type != DPO_RECEIVE))
+ {
+ b0->error = node->errors[IP6_COP_WHITELIST_ERROR_DROPPED];
+ next0 = RX_COP_DROP;
+ }
+
+ if (PREDICT_FALSE((node->flags & VLIB_NODE_FLAG_TRACE)
+ && (b0->flags & VLIB_BUFFER_IS_TRACED)))
+ {
+ ip6_cop_whitelist_trace_t *t =
+ vlib_add_trace (vm, node, b0, sizeof (*t));
+ t->sw_if_index = sw_if_index0;
+ t->next_index = next0;
+ }
+
+ /* verify speculative enqueue, maybe switch current next frame */
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, next0);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+ return frame->n_vectors;
+}
+
+VLIB_REGISTER_NODE (ip6_cop_whitelist_node) = {
+ .function = ip6_cop_whitelist_node_fn,
+ .name = "ip6-cop-whitelist",
+ .vector_size = sizeof (u32),
+ .format_trace = format_ip6_cop_whitelist_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_errors = ARRAY_LEN(ip6_cop_whitelist_error_strings),
+ .error_strings = ip6_cop_whitelist_error_strings,
+
+ .n_next_nodes = COP_RX_N_FEATURES,
+
+ /* edit / add dispositions here */
+ .next_nodes = {
+ [IP4_RX_COP_WHITELIST] = "ip4-cop-whitelist",
+ [IP6_RX_COP_WHITELIST] = "ip6-cop-whitelist",
+ [DEFAULT_RX_COP_WHITELIST] = "default-cop-whitelist",
+ [IP4_RX_COP_INPUT] = "ip4-input",
+ [IP6_RX_COP_INPUT] = "ip6-input",
+ [DEFAULT_RX_COP_INPUT] = "ethernet-input",
+ [RX_COP_DROP] = "error-drop",
+ },
+};
+
+VLIB_NODE_FUNCTION_MULTIARCH (ip6_cop_whitelist_node, ip6_cop_whitelist_node_fn)
+
+static clib_error_t *
+ip6_whitelist_init (vlib_main_t * vm)
+{
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (ip6_whitelist_init);
diff --git a/src/vnet/cop/node1.c b/src/vnet/cop/node1.c
new file mode 100644
index 00000000000..b448b531039
--- /dev/null
+++ b/src/vnet/cop/node1.c
@@ -0,0 +1,319 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/cop/cop.h>
+
+typedef struct {
+ u32 next_index;
+ u32 sw_if_index;
+} cop_input_trace_t;
+
+/* packet trace format function */
+static u8 * format_cop_input_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ cop_input_trace_t * t = va_arg (*args, cop_input_trace_t *);
+
+ s = format (s, "COP_INPUT: sw_if_index %d, next index %d",
+ t->sw_if_index, t->next_index);
+ return s;
+}
+
+vlib_node_registration_t cop_input_node;
+
+#define foreach_cop_input_error \
+_(PROCESSED, "COP input packets processed")
+
+typedef enum {
+#define _(sym,str) COP_INPUT_ERROR_##sym,
+ foreach_cop_input_error
+#undef _
+ COP_INPUT_N_ERROR,
+} cop_input_error_t;
+
+static char * cop_input_error_strings[] = {
+#define _(sym,string) string,
+ foreach_cop_input_error
+#undef _
+};
+
+static uword
+cop_input_node_fn (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ u32 n_left_from, * from, * to_next;
+ cop_feature_type_t next_index;
+ cop_main_t *cm = &cop_main;
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame (vm, node, next_index,
+ to_next, n_left_to_next);
+
+ while (n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ u32 bi0, bi1;
+ vlib_buffer_t * b0, * b1;
+ u32 next0, next1;
+ u32 sw_if_index0, sw_if_index1;
+ ethernet_header_t * en0, * en1;
+ cop_config_main_t * ccm0, * ccm1;
+ u32 advance0, advance1;
+ int proto0, proto1;
+
+ /* Prefetch next iteration. */
+ {
+ vlib_buffer_t * p2, * p3;
+
+ p2 = vlib_get_buffer (vm, from[2]);
+ p3 = vlib_get_buffer (vm, from[3]);
+
+ vlib_prefetch_buffer_header (p2, LOAD);
+ vlib_prefetch_buffer_header (p3, LOAD);
+
+ CLIB_PREFETCH (p2->data, CLIB_CACHE_LINE_BYTES, STORE);
+ CLIB_PREFETCH (p3->data, CLIB_CACHE_LINE_BYTES, STORE);
+ }
+
+ /* speculatively enqueue b0 and b1 to the current next frame */
+ to_next[0] = bi0 = from[0];
+ to_next[1] = bi1 = from[1];
+ from += 2;
+ to_next += 2;
+ n_left_from -= 2;
+ n_left_to_next -= 2;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ b1 = vlib_get_buffer (vm, bi1);
+
+ en0 = vlib_buffer_get_current (b0);
+ en1 = vlib_buffer_get_current (b1);
+
+ sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_RX];
+ sw_if_index1 = vnet_buffer(b1)->sw_if_index[VLIB_RX];
+
+ proto0 = VNET_COP_DEFAULT;
+ proto1 = VNET_COP_DEFAULT;
+ advance0 = 0;
+ advance1 = 0;
+
+ if (en0->type == clib_host_to_net_u16(ETHERNET_TYPE_IP4))
+ {
+ proto0 = VNET_COP_IP4;
+ advance0 = sizeof(ethernet_header_t);
+ }
+ else if (en0->type == clib_host_to_net_u16(ETHERNET_TYPE_IP6))
+ {
+ proto0 = VNET_COP_IP6;
+ advance0 = sizeof(ethernet_header_t);
+ }
+
+ if (en1->type == clib_host_to_net_u16(ETHERNET_TYPE_IP4))
+ {
+ proto1 = VNET_COP_IP4;
+ advance1 = sizeof(ethernet_header_t);
+ }
+ else if (en1->type == clib_host_to_net_u16(ETHERNET_TYPE_IP6))
+ {
+ proto1 = VNET_COP_IP6;
+ advance1 = sizeof(ethernet_header_t);
+ }
+
+ ccm0 = cm->cop_config_mains + proto0;
+ ccm1 = cm->cop_config_mains + proto1;
+ vnet_buffer(b0)->cop.current_config_index =
+ ccm0->config_index_by_sw_if_index [sw_if_index0];
+
+ vnet_buffer(b1)->cop.current_config_index =
+ ccm1->config_index_by_sw_if_index [sw_if_index1];
+
+ vlib_buffer_advance (b0, advance0);
+ vlib_buffer_advance (b1, advance1);
+
+ vnet_get_config_data (&ccm0->config_main,
+ &vnet_buffer(b0)->cop.current_config_index,
+ &next0, 0 /* bytes of config data */);
+
+ vnet_get_config_data (&ccm1->config_main,
+ &vnet_buffer(b1)->cop.current_config_index,
+ &next1, 0 /* bytes of config data */);
+
+ if (PREDICT_FALSE((node->flags & VLIB_NODE_FLAG_TRACE)
+ && (b0->flags & VLIB_BUFFER_IS_TRACED)))
+ {
+ cop_input_trace_t *t =
+ vlib_add_trace (vm, node, b0, sizeof (*t));
+ t->sw_if_index = sw_if_index0;
+ t->next_index = next0;
+ }
+
+ if (PREDICT_FALSE((node->flags & VLIB_NODE_FLAG_TRACE)
+ && (b1->flags & VLIB_BUFFER_IS_TRACED)))
+ {
+ cop_input_trace_t *t =
+ vlib_add_trace (vm, node, b1, sizeof (*t));
+ t->sw_if_index = sw_if_index1;
+ t->next_index = next1;
+ }
+ /* verify speculative enqueues, maybe switch current next frame */
+ vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, bi1, next0, next1);
+ }
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 bi0;
+ vlib_buffer_t * b0;
+ u32 next0;
+ u32 sw_if_index0;
+ ethernet_header_t *en0;
+ cop_config_main_t *ccm0;
+ u32 advance0;
+ int proto0;
+
+ /* speculatively enqueue b0 to the current next frame */
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+
+ /*
+ * Direct from the driver, we should be at offset 0
+ * aka at &b0->data[0]
+ */
+ ASSERT (b0->current_data == 0);
+
+ en0 = vlib_buffer_get_current (b0);
+
+ sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_RX];
+
+ proto0 = VNET_COP_DEFAULT;
+ advance0 = 0;
+
+ if (en0->type == clib_host_to_net_u16(ETHERNET_TYPE_IP4))
+ {
+ proto0 = VNET_COP_IP4;
+ advance0 = sizeof(ethernet_header_t);
+ }
+ else if (en0->type == clib_host_to_net_u16(ETHERNET_TYPE_IP6))
+ {
+ proto0 = VNET_COP_IP6;
+ advance0 = sizeof(ethernet_header_t);
+ }
+
+ ccm0 = cm->cop_config_mains + proto0;
+ vnet_buffer(b0)->cop.current_config_index =
+ ccm0->config_index_by_sw_if_index [sw_if_index0];
+
+ vlib_buffer_advance (b0, advance0);
+
+ vnet_get_config_data (&ccm0->config_main,
+ &vnet_buffer(b0)->cop.current_config_index,
+ &next0, 0 /* bytes of config data */);
+
+ if (PREDICT_FALSE((node->flags & VLIB_NODE_FLAG_TRACE)
+ && (b0->flags & VLIB_BUFFER_IS_TRACED)))
+ {
+ cop_input_trace_t *t =
+ vlib_add_trace (vm, node, b0, sizeof (*t));
+ t->sw_if_index = sw_if_index0;
+ t->next_index = next0;
+ }
+
+ /* verify speculative enqueue, maybe switch current next frame */
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, next0);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+ vlib_node_increment_counter (vm, cop_input_node.index,
+ COP_INPUT_ERROR_PROCESSED, frame->n_vectors);
+ return frame->n_vectors;
+}
+
+VLIB_REGISTER_NODE (cop_input_node) = {
+ .function = cop_input_node_fn,
+ .name = "cop-input",
+ .vector_size = sizeof (u32),
+ .format_trace = format_cop_input_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_errors = ARRAY_LEN(cop_input_error_strings),
+ .error_strings = cop_input_error_strings,
+
+ .n_next_nodes = COP_RX_N_FEATURES,
+
+ /* edit / add dispositions here */
+ .next_nodes = {
+ [IP4_RX_COP_WHITELIST] = "ip4-cop-whitelist",
+ [IP6_RX_COP_WHITELIST] = "ip6-cop-whitelist",
+ [DEFAULT_RX_COP_WHITELIST] = "default-cop-whitelist",
+ [IP4_RX_COP_INPUT] = "ip4-input",
+ [IP6_RX_COP_INPUT] = "ip6-input",
+ [DEFAULT_RX_COP_INPUT] = "ethernet-input",
+ [RX_COP_DROP] = "error-drop",
+ },
+};
+
+VLIB_NODE_FUNCTION_MULTIARCH (cop_input_node, cop_input_node_fn)
+
+#define foreach_cop_stub \
+_(default-cop-whitelist, default_cop_whitelist)
+
+#define _(n,f) \
+ \
+static uword \
+f##_node_fn (vlib_main_t * vm, \
+ vlib_node_runtime_t * node, \
+ vlib_frame_t * frame) \
+{ \
+ clib_warning ("BUG: stub function called"); \
+ return 0; \
+} \
+ \
+VLIB_REGISTER_NODE (f##_input_node) = { \
+ .function = f##_node_fn, \
+ .name = #n, \
+ .vector_size = sizeof (u32), \
+ .type = VLIB_NODE_TYPE_INTERNAL, \
+ \
+ .n_errors = 0, \
+ .error_strings = 0, \
+ \
+ .n_next_nodes = 0, \
+};
+
+foreach_cop_stub;
+
+
+
+
+
+
diff --git a/src/vnet/devices/af_packet/af_packet.api b/src/vnet/devices/af_packet/af_packet.api
new file mode 100644
index 00000000000..9fb2a2070f2
--- /dev/null
+++ b/src/vnet/devices/af_packet/af_packet.api
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 2015-2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/** \brief Create host-interface
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param host_if_name - interface name
+ @param hw_addr - interface MAC
+ @param use_random_hw_addr - use random generated MAC
+*/
+define af_packet_create
+{
+ u32 client_index;
+ u32 context;
+
+ u8 host_if_name[64];
+ u8 hw_addr[6];
+ u8 use_random_hw_addr;
+};
+
+/** \brief Create host-interface response
+ @param context - sender context, to match reply w/ request
+ @param retval - return value for request
+*/
+define af_packet_create_reply
+{
+ u32 context;
+ i32 retval;
+ u32 sw_if_index;
+};
+
+/** \brief Delete host-interface
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param host_if_name - interface name
+*/
+define af_packet_delete
+{
+ u32 client_index;
+ u32 context;
+
+ u8 host_if_name[64];
+};
+
+/** \brief Delete host-interface response
+ @param context - sender context, to match reply w/ request
+ @param retval - return value for request
+*/
+define af_packet_delete_reply
+{
+ u32 context;
+ i32 retval;
+};
+
+/*
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/devices/af_packet/af_packet.c b/src/vnet/devices/af_packet/af_packet.c
new file mode 100644
index 00000000000..91c3988b439
--- /dev/null
+++ b/src/vnet/devices/af_packet/af_packet.c
@@ -0,0 +1,366 @@
+/*
+ *------------------------------------------------------------------
+ * af_packet.c - linux kernel packet interface
+ *
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+
+#include <linux/if_ether.h>
+#include <linux/if_packet.h>
+
+#include <vlib/vlib.h>
+#include <vlib/unix/unix.h>
+#include <vnet/ip/ip.h>
+#include <vnet/ethernet/ethernet.h>
+
+#include <vnet/devices/af_packet/af_packet.h>
+
+#define AF_PACKET_DEBUG_SOCKET 0
+
+#define AF_PACKET_TX_FRAMES_PER_BLOCK 1024
+#define AF_PACKET_TX_FRAME_SIZE (2048 * 5)
+#define AF_PACKET_TX_BLOCK_NR 1
+#define AF_PACKET_TX_FRAME_NR (AF_PACKET_TX_BLOCK_NR * \
+ AF_PACKET_TX_FRAMES_PER_BLOCK)
+#define AF_PACKET_TX_BLOCK_SIZE (AF_PACKET_TX_FRAME_SIZE * \
+ AF_PACKET_TX_FRAMES_PER_BLOCK)
+
+#define AF_PACKET_RX_FRAMES_PER_BLOCK 1024
+#define AF_PACKET_RX_FRAME_SIZE (2048 * 5)
+#define AF_PACKET_RX_BLOCK_NR 1
+#define AF_PACKET_RX_FRAME_NR (AF_PACKET_RX_BLOCK_NR * \
+ AF_PACKET_RX_FRAMES_PER_BLOCK)
+#define AF_PACKET_RX_BLOCK_SIZE (AF_PACKET_RX_FRAME_SIZE * \
+ AF_PACKET_RX_FRAMES_PER_BLOCK)
+
+#if AF_PACKET_DEBUG_SOCKET == 1
+#define DBG_SOCK(args...) clib_warning(args);
+#else
+#define DBG_SOCK(args...)
+#endif
+
+/*defined in net/if.h but clashes with dpdk headers */
+unsigned int if_nametoindex (const char *ifname);
+
+typedef struct tpacket_req tpacket_req_t;
+
+static u32
+af_packet_eth_flag_change (vnet_main_t * vnm, vnet_hw_interface_t * hi,
+ u32 flags)
+{
+ /* nothing for now */
+ return 0;
+}
+
+static clib_error_t *
+af_packet_fd_read_ready (unix_file_t * uf)
+{
+ vlib_main_t *vm = vlib_get_main ();
+ af_packet_main_t *apm = &af_packet_main;
+ u32 idx = uf->private_data;
+
+ apm->pending_input_bitmap =
+ clib_bitmap_set (apm->pending_input_bitmap, idx, 1);
+
+ /* Schedule the rx node */
+ vlib_node_set_interrupt_pending (vm, af_packet_input_node.index);
+
+ return 0;
+}
+
+static int
+create_packet_v2_sock (u8 * name, tpacket_req_t * rx_req,
+ tpacket_req_t * tx_req, int *fd, u8 ** ring)
+{
+ int ret, err;
+ struct sockaddr_ll sll;
+ uint host_if_index;
+ int ver = TPACKET_V2;
+ socklen_t req_sz = sizeof (struct tpacket_req);
+ u32 ring_sz = rx_req->tp_block_size * rx_req->tp_block_nr +
+ tx_req->tp_block_size * tx_req->tp_block_nr;
+
+ host_if_index = if_nametoindex ((const char *) name);
+
+ if (!host_if_index)
+ {
+ DBG_SOCK ("Wrong host interface name");
+ ret = VNET_API_ERROR_INVALID_INTERFACE;
+ goto error;
+ }
+
+ if ((*fd = socket (AF_PACKET, SOCK_RAW, htons (ETH_P_ALL))) < 0)
+ {
+ DBG_SOCK ("Failed to create socket");
+ ret = VNET_API_ERROR_SYSCALL_ERROR_1;
+ goto error;
+ }
+
+ if ((err =
+ setsockopt (*fd, SOL_PACKET, PACKET_VERSION, &ver, sizeof (ver))) < 0)
+ {
+ DBG_SOCK ("Failed to set rx packet interface version");
+ ret = VNET_API_ERROR_SYSCALL_ERROR_1;
+ goto error;
+ }
+
+ int opt = 1;
+ if ((err =
+ setsockopt (*fd, SOL_PACKET, PACKET_LOSS, &opt, sizeof (opt))) < 0)
+ {
+ DBG_SOCK ("Failed to set packet tx ring error handling option");
+ ret = VNET_API_ERROR_SYSCALL_ERROR_1;
+ goto error;
+ }
+
+ if ((err =
+ setsockopt (*fd, SOL_PACKET, PACKET_RX_RING, rx_req, req_sz)) < 0)
+ {
+ DBG_SOCK ("Failed to set packet rx ring options");
+ ret = VNET_API_ERROR_SYSCALL_ERROR_1;
+ goto error;
+ }
+
+ if ((err =
+ setsockopt (*fd, SOL_PACKET, PACKET_TX_RING, tx_req, req_sz)) < 0)
+ {
+ DBG_SOCK ("Failed to set packet rx ring options");
+ ret = VNET_API_ERROR_SYSCALL_ERROR_1;
+ goto error;
+ }
+
+ *ring =
+ mmap (NULL, ring_sz, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_LOCKED, *fd,
+ 0);
+ if (*ring == MAP_FAILED)
+ {
+ DBG_SOCK ("mmap failure");
+ ret = VNET_API_ERROR_SYSCALL_ERROR_1;
+ goto error;
+ }
+
+ memset (&sll, 0, sizeof (sll));
+ sll.sll_family = PF_PACKET;
+ sll.sll_protocol = htons (ETH_P_ALL);
+ sll.sll_ifindex = host_if_index;
+
+ if ((err = bind (*fd, (struct sockaddr *) &sll, sizeof (sll))) < 0)
+ {
+ DBG_SOCK ("Failed to bind rx packet socket (error %d)", err);
+ ret = VNET_API_ERROR_SYSCALL_ERROR_1;
+ goto error;
+ }
+
+ return 0;
+error:
+ if (*fd >= 0)
+ close (*fd);
+ *fd = -1;
+ return ret;
+}
+
+int
+af_packet_create_if (vlib_main_t * vm, u8 * host_if_name, u8 * hw_addr_set,
+ u32 * sw_if_index)
+{
+ af_packet_main_t *apm = &af_packet_main;
+ int ret, fd = -1;
+ struct tpacket_req *rx_req = 0;
+ struct tpacket_req *tx_req = 0;
+ u8 *ring = 0;
+ af_packet_if_t *apif = 0;
+ u8 hw_addr[6];
+ clib_error_t *error;
+ vnet_sw_interface_t *sw;
+ vnet_main_t *vnm = vnet_get_main ();
+ uword *p;
+ uword if_index;
+ u8 *host_if_name_dup = vec_dup (host_if_name);
+
+ p = mhash_get (&apm->if_index_by_host_if_name, host_if_name);
+ if (p)
+ {
+ return VNET_API_ERROR_SUBIF_ALREADY_EXISTS;
+ }
+
+ vec_validate (rx_req, 0);
+ rx_req->tp_block_size = AF_PACKET_RX_BLOCK_SIZE;
+ rx_req->tp_frame_size = AF_PACKET_RX_FRAME_SIZE;
+ rx_req->tp_block_nr = AF_PACKET_RX_BLOCK_NR;
+ rx_req->tp_frame_nr = AF_PACKET_RX_FRAME_NR;
+
+ vec_validate (tx_req, 0);
+ tx_req->tp_block_size = AF_PACKET_TX_BLOCK_SIZE;
+ tx_req->tp_frame_size = AF_PACKET_TX_FRAME_SIZE;
+ tx_req->tp_block_nr = AF_PACKET_TX_BLOCK_NR;
+ tx_req->tp_frame_nr = AF_PACKET_TX_FRAME_NR;
+
+ ret = create_packet_v2_sock (host_if_name, rx_req, tx_req, &fd, &ring);
+
+ if (ret != 0)
+ goto error;
+
+ /* So far everything looks good, let's create interface */
+ pool_get (apm->interfaces, apif);
+ if_index = apif - apm->interfaces;
+
+ apif->fd = fd;
+ apif->rx_ring = ring;
+ apif->tx_ring = ring + rx_req->tp_block_size * rx_req->tp_block_nr;
+ apif->rx_req = rx_req;
+ apif->tx_req = tx_req;
+ apif->host_if_name = host_if_name_dup;
+ apif->per_interface_next_index = ~0;
+ apif->next_tx_frame = 0;
+ apif->next_rx_frame = 0;
+
+ {
+ unix_file_t template = { 0 };
+ template.read_function = af_packet_fd_read_ready;
+ template.file_descriptor = fd;
+ template.private_data = if_index;
+ template.flags = UNIX_FILE_EVENT_EDGE_TRIGGERED;
+ apif->unix_file_index = unix_file_add (&unix_main, &template);
+ }
+
+ /*use configured or generate random MAC address */
+ if (hw_addr_set)
+ clib_memcpy (hw_addr, hw_addr_set, 6);
+ else
+ {
+ f64 now = vlib_time_now (vm);
+ u32 rnd;
+ rnd = (u32) (now * 1e6);
+ rnd = random_u32 (&rnd);
+
+ clib_memcpy (hw_addr + 2, &rnd, sizeof (rnd));
+ hw_addr[0] = 2;
+ hw_addr[1] = 0xfe;
+ }
+
+ error = ethernet_register_interface (vnm, af_packet_device_class.index,
+ if_index, hw_addr, &apif->hw_if_index,
+ af_packet_eth_flag_change);
+
+ if (error)
+ {
+ memset (apif, 0, sizeof (*apif));
+ pool_put (apm->interfaces, apif);
+ clib_error_report (error);
+ ret = VNET_API_ERROR_SYSCALL_ERROR_1;
+ goto error;
+ }
+
+ sw = vnet_get_hw_sw_interface (vnm, apif->hw_if_index);
+ apif->sw_if_index = sw->sw_if_index;
+
+ vnet_hw_interface_set_flags (vnm, apif->hw_if_index,
+ VNET_HW_INTERFACE_FLAG_LINK_UP);
+
+ mhash_set_mem (&apm->if_index_by_host_if_name, host_if_name_dup, &if_index,
+ 0);
+ if (sw_if_index)
+ *sw_if_index = apif->sw_if_index;
+ return 0;
+
+error:
+ vec_free (host_if_name_dup);
+ vec_free (rx_req);
+ vec_free (tx_req);
+ return ret;
+}
+
+int
+af_packet_delete_if (vlib_main_t * vm, u8 * host_if_name)
+{
+ vnet_main_t *vnm = vnet_get_main ();
+ af_packet_main_t *apm = &af_packet_main;
+ af_packet_if_t *apif;
+ uword *p;
+ uword if_index;
+ u32 ring_sz;
+
+ p = mhash_get (&apm->if_index_by_host_if_name, host_if_name);
+ if (p == NULL)
+ {
+ clib_warning ("Host interface %s does not exist", host_if_name);
+ return VNET_API_ERROR_SYSCALL_ERROR_1;
+ }
+ apif = pool_elt_at_index (apm->interfaces, p[0]);
+ if_index = apif - apm->interfaces;
+
+ /* bring down the interface */
+ vnet_hw_interface_set_flags (vnm, apif->hw_if_index, 0);
+
+ /* clean up */
+ if (apif->unix_file_index != ~0)
+ {
+ unix_file_del (&unix_main, unix_main.file_pool + apif->unix_file_index);
+ apif->unix_file_index = ~0;
+ }
+ else
+ close (apif->fd);
+
+ ring_sz = apif->rx_req->tp_block_size * apif->rx_req->tp_block_nr +
+ apif->tx_req->tp_block_size * apif->tx_req->tp_block_nr;
+ if (munmap (apif->rx_ring, ring_sz))
+ clib_warning ("Host interface %s could not free rx/tx ring",
+ host_if_name);
+ apif->rx_ring = NULL;
+ apif->tx_ring = NULL;
+ apif->fd = -1;
+
+ vec_free (apif->rx_req);
+ apif->rx_req = NULL;
+ vec_free (apif->tx_req);
+ apif->tx_req = NULL;
+
+ vec_free (apif->host_if_name);
+ apif->host_if_name = NULL;
+
+ mhash_unset (&apm->if_index_by_host_if_name, host_if_name, &if_index);
+
+ ethernet_delete_interface (vnm, apif->hw_if_index);
+
+ pool_put (apm->interfaces, apif);
+
+ return 0;
+}
+
+static clib_error_t *
+af_packet_init (vlib_main_t * vm)
+{
+ af_packet_main_t *apm = &af_packet_main;
+ vlib_thread_main_t *tm = vlib_get_thread_main ();
+
+ memset (apm, 0, sizeof (af_packet_main_t));
+
+ mhash_init_vec_string (&apm->if_index_by_host_if_name, sizeof (uword));
+
+ vec_validate_aligned (apm->rx_buffers, tm->n_vlib_mains - 1,
+ CLIB_CACHE_LINE_BYTES);
+
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (af_packet_init);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/devices/af_packet/af_packet.h b/src/vnet/devices/af_packet/af_packet.h
new file mode 100644
index 00000000000..19e2523d6c9
--- /dev/null
+++ b/src/vnet/devices/af_packet/af_packet.h
@@ -0,0 +1,69 @@
+/*
+ *------------------------------------------------------------------
+ * af_packet.h - linux kernel packet interface header file
+ *
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+
+typedef struct
+{
+ CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
+ u8 *host_if_name;
+ int fd;
+ struct tpacket_req *rx_req;
+ struct tpacket_req *tx_req;
+ u8 *rx_ring;
+ u8 *tx_ring;
+ u32 hw_if_index;
+ u32 sw_if_index;
+ u32 unix_file_index;
+
+ u32 next_rx_frame;
+ u32 next_tx_frame;
+
+ u32 per_interface_next_index;
+ u8 is_admin_up;
+} af_packet_if_t;
+
+typedef struct
+{
+ CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
+ af_packet_if_t *interfaces;
+
+ /* bitmap of pending rx interfaces */
+ uword *pending_input_bitmap;
+
+ /* rx buffer cache */
+ u32 **rx_buffers;
+
+ /* hash of host interface names */
+ mhash_t if_index_by_host_if_name;
+} af_packet_main_t;
+
+af_packet_main_t af_packet_main;
+extern vnet_device_class_t af_packet_device_class;
+extern vlib_node_registration_t af_packet_input_node;
+
+int af_packet_create_if (vlib_main_t * vm, u8 * host_if_name,
+ u8 * hw_addr_set, u32 * sw_if_index);
+int af_packet_delete_if (vlib_main_t * vm, u8 * host_if_name);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/devices/af_packet/af_packet_api.c b/src/vnet/devices/af_packet/af_packet_api.c
new file mode 100644
index 00000000000..414c838cdf7
--- /dev/null
+++ b/src/vnet/devices/af_packet/af_packet_api.c
@@ -0,0 +1,143 @@
+/*
+ *------------------------------------------------------------------
+ * af_packet_api.c - af-packet api
+ *
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+
+#include <vnet/vnet.h>
+#include <vlibmemory/api.h>
+
+#include <vnet/interface.h>
+#include <vnet/api_errno.h>
+#include <vnet/devices/af_packet/af_packet.h>
+
+#include <vnet/vnet_msg_enum.h>
+
+#define vl_typedefs /* define message structures */
+#include <vnet/vnet_all_api_h.h>
+#undef vl_typedefs
+
+#define vl_endianfun /* define message structures */
+#include <vnet/vnet_all_api_h.h>
+#undef vl_endianfun
+
+/* instantiate all the print functions we know about */
+#define vl_print(handle, ...) vlib_cli_output (handle, __VA_ARGS__)
+#define vl_printfun
+#include <vnet/vnet_all_api_h.h>
+#undef vl_printfun
+
+#include <vlibapi/api_helper_macros.h>
+
+#define foreach_vpe_api_msg \
+_(AF_PACKET_CREATE, af_packet_create) \
+_(AF_PACKET_DELETE, af_packet_delete)
+
+static void
+vl_api_af_packet_create_t_handler (vl_api_af_packet_create_t * mp)
+{
+ vlib_main_t *vm = vlib_get_main ();
+ vl_api_af_packet_create_reply_t *rmp;
+ int rv = 0;
+ u8 *host_if_name = NULL;
+ u32 sw_if_index;
+
+ host_if_name = format (0, "%s", mp->host_if_name);
+ vec_add1 (host_if_name, 0);
+
+ rv = af_packet_create_if (vm, host_if_name,
+ mp->use_random_hw_addr ? 0 : mp->hw_addr,
+ &sw_if_index);
+
+ vec_free (host_if_name);
+
+ /* *INDENT-OFF* */
+ REPLY_MACRO2(VL_API_AF_PACKET_CREATE_REPLY,
+ ({
+ rmp->sw_if_index = clib_host_to_net_u32(sw_if_index);
+ }));
+ /* *INDENT-ON* */
+}
+
+static void
+vl_api_af_packet_delete_t_handler (vl_api_af_packet_delete_t * mp)
+{
+ vlib_main_t *vm = vlib_get_main ();
+ vl_api_af_packet_delete_reply_t *rmp;
+ int rv = 0;
+ u8 *host_if_name = NULL;
+
+ host_if_name = format (0, "%s", mp->host_if_name);
+ vec_add1 (host_if_name, 0);
+
+ rv = af_packet_delete_if (vm, host_if_name);
+
+ vec_free (host_if_name);
+
+ REPLY_MACRO (VL_API_AF_PACKET_DELETE_REPLY);
+}
+
+/*
+ * af_packet_api_hookup
+ * Add vpe's API message handlers to the table.
+ * vlib has alread mapped shared memory and
+ * added the client registration handlers.
+ * See .../vlib-api/vlibmemory/memclnt_vlib.c:memclnt_process()
+ */
+#define vl_msg_name_crc_list
+#include <vnet/vnet_all_api_h.h>
+#undef vl_msg_name_crc_list
+
+static void
+setup_message_id_table (api_main_t * am)
+{
+#define _(id,n,crc) vl_msg_api_add_msg_name_crc (am, #n "_" #crc, id);
+ foreach_vl_msg_name_crc_af_packet;
+#undef _
+}
+
+static clib_error_t *
+af_packet_api_hookup (vlib_main_t * vm)
+{
+ api_main_t *am = &api_main;
+
+#define _(N,n) \
+ vl_msg_api_set_handlers(VL_API_##N, #n, \
+ vl_api_##n##_t_handler, \
+ vl_noop_handler, \
+ vl_api_##n##_t_endian, \
+ vl_api_##n##_t_print, \
+ sizeof(vl_api_##n##_t), 1);
+ foreach_vpe_api_msg;
+#undef _
+
+ /*
+ * Set up the (msg_name, crc, message-id) table
+ */
+ setup_message_id_table (am);
+
+ return 0;
+}
+
+VLIB_API_INIT_FUNCTION (af_packet_api_hookup);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/devices/af_packet/cli.c b/src/vnet/devices/af_packet/cli.c
new file mode 100644
index 00000000000..2cbd415289e
--- /dev/null
+++ b/src/vnet/devices/af_packet/cli.c
@@ -0,0 +1,144 @@
+/*
+ *------------------------------------------------------------------
+ * af_packet.c - linux kernel packet interface
+ *
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+
+#include <fcntl.h> /* for open */
+#include <sys/ioctl.h>
+#include <sys/socket.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <sys/uio.h> /* for iovec */
+#include <netinet/in.h>
+
+#include <vlib/vlib.h>
+#include <vlib/unix/unix.h>
+#include <vnet/ip/ip.h>
+#include <vnet/ethernet/ethernet.h>
+
+#include <vnet/devices/af_packet/af_packet.h>
+
+static clib_error_t *
+af_packet_create_command_fn (vlib_main_t * vm, unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ unformat_input_t _line_input, *line_input = &_line_input;
+ u8 *host_if_name = NULL;
+ u8 hwaddr[6];
+ u8 *hw_addr_ptr = 0;
+ u32 sw_if_index;
+ int r;
+
+ /* Get a line of input. */
+ if (!unformat_user (input, unformat_line_input, line_input))
+ return 0;
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "name %s", &host_if_name))
+ ;
+ else
+ if (unformat
+ (line_input, "hw-addr %U", unformat_ethernet_address, hwaddr))
+ hw_addr_ptr = hwaddr;
+ else
+ return clib_error_return (0, "unknown input `%U'",
+ format_unformat_error, input);
+ }
+ unformat_free (line_input);
+
+ if (host_if_name == NULL)
+ return clib_error_return (0, "missing host interface name");
+
+ r = af_packet_create_if (vm, host_if_name, hw_addr_ptr, &sw_if_index);
+ vec_free (host_if_name);
+
+ if (r == VNET_API_ERROR_SYSCALL_ERROR_1)
+ return clib_error_return (0, "%s (errno %d)", strerror (errno), errno);
+
+ if (r == VNET_API_ERROR_INVALID_INTERFACE)
+ return clib_error_return (0, "Invalid interface name");
+
+ if (r == VNET_API_ERROR_SUBIF_ALREADY_EXISTS)
+ return clib_error_return (0, "Interface elready exists");
+
+ vlib_cli_output (vm, "%U\n", format_vnet_sw_if_index_name, vnet_get_main (),
+ sw_if_index);
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (af_packet_create_command, static) = {
+ .path = "create host-interface",
+ .short_help = "create host-interface name <interface name> [hw-addr <mac>]",
+ .function = af_packet_create_command_fn,
+};
+/* *INDENT-ON* */
+
+static clib_error_t *
+af_packet_delete_command_fn (vlib_main_t * vm, unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ unformat_input_t _line_input, *line_input = &_line_input;
+ u8 *host_if_name = NULL;
+
+ /* Get a line of input. */
+ if (!unformat_user (input, unformat_line_input, line_input))
+ return 0;
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "name %s", &host_if_name))
+ ;
+ else
+ return clib_error_return (0, "unknown input `%U'",
+ format_unformat_error, input);
+ }
+ unformat_free (line_input);
+
+ if (host_if_name == NULL)
+ return clib_error_return (0, "missing host interface name");
+
+ af_packet_delete_if (vm, host_if_name);
+ vec_free (host_if_name);
+
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (af_packet_delete_command, static) = {
+ .path = "delete host-interface",
+ .short_help = "delete host-interface name <interface name>",
+ .function = af_packet_delete_command_fn,
+};
+/* *INDENT-ON* */
+
+clib_error_t *
+af_packet_cli_init (vlib_main_t * vm)
+{
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (af_packet_cli_init);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/devices/af_packet/device.c b/src/vnet/devices/af_packet/device.c
new file mode 100644
index 00000000000..1fb4000f6e6
--- /dev/null
+++ b/src/vnet/devices/af_packet/device.c
@@ -0,0 +1,250 @@
+/*
+ *------------------------------------------------------------------
+ * af_packet.c - linux kernel packet interface
+ *
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+
+#include <linux/if_packet.h>
+
+#include <vlib/vlib.h>
+#include <vlib/unix/unix.h>
+#include <vnet/ip/ip.h>
+#include <vnet/ethernet/ethernet.h>
+
+#include <vnet/devices/af_packet/af_packet.h>
+
+#define foreach_af_packet_tx_func_error \
+_(FRAME_NOT_READY, "tx frame not ready") \
+_(TXRING_EAGAIN, "tx sendto temporary failure") \
+_(TXRING_FATAL, "tx sendto fatal failure") \
+_(TXRING_OVERRUN, "tx ring overrun")
+
+typedef enum
+{
+#define _(f,s) AF_PACKET_TX_ERROR_##f,
+ foreach_af_packet_tx_func_error
+#undef _
+ AF_PACKET_TX_N_ERROR,
+} af_packet_tx_func_error_t;
+
+static char *af_packet_tx_func_error_strings[] = {
+#define _(n,s) s,
+ foreach_af_packet_tx_func_error
+#undef _
+};
+
+
+static u8 *
+format_af_packet_device_name (u8 * s, va_list * args)
+{
+ u32 i = va_arg (*args, u32);
+ af_packet_main_t *apm = &af_packet_main;
+ af_packet_if_t *apif = pool_elt_at_index (apm->interfaces, i);
+
+ s = format (s, "host-%s", apif->host_if_name);
+ return s;
+}
+
+static u8 *
+format_af_packet_device (u8 * s, va_list * args)
+{
+ s = format (s, "Linux PACKET socket interface");
+ return s;
+}
+
+static u8 *
+format_af_packet_tx_trace (u8 * s, va_list * args)
+{
+ s = format (s, "Unimplemented...");
+ return s;
+}
+
+static uword
+af_packet_interface_tx (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ af_packet_main_t *apm = &af_packet_main;
+ u32 *buffers = vlib_frame_args (frame);
+ u32 n_left = frame->n_vectors;
+ u32 n_sent = 0;
+ vnet_interface_output_runtime_t *rd = (void *) node->runtime_data;
+ af_packet_if_t *apif =
+ pool_elt_at_index (apm->interfaces, rd->dev_instance);
+ int block = 0;
+ u32 block_size = apif->tx_req->tp_block_size;
+ u32 frame_size = apif->tx_req->tp_frame_size;
+ u32 frame_num = apif->tx_req->tp_frame_nr;
+ u8 *block_start = apif->tx_ring + block * block_size;
+ u32 tx_frame = apif->next_tx_frame;
+ struct tpacket2_hdr *tph;
+ u32 frame_not_ready = 0;
+
+ while (n_left > 0)
+ {
+ u32 len;
+ u32 offset = 0;
+ vlib_buffer_t *b0;
+ n_left--;
+ u32 bi = buffers[0];
+ buffers++;
+
+ tph = (struct tpacket2_hdr *) (block_start + tx_frame * frame_size);
+
+ if (PREDICT_FALSE
+ (tph->tp_status & (TP_STATUS_SEND_REQUEST | TP_STATUS_SENDING)))
+ {
+ frame_not_ready++;
+ goto next;
+ }
+
+ do
+ {
+ b0 = vlib_get_buffer (vm, bi);
+ len = b0->current_length;
+ clib_memcpy ((u8 *) tph +
+ TPACKET_ALIGN (sizeof (struct tpacket2_hdr)) + offset,
+ vlib_buffer_get_current (b0), len);
+ offset += len;
+ }
+ while ((bi = b0->next_buffer));
+
+ tph->tp_len = tph->tp_snaplen = offset;
+ tph->tp_status = TP_STATUS_SEND_REQUEST;
+ n_sent++;
+ next:
+ /* check if we've exhausted the ring */
+ if (PREDICT_FALSE (frame_not_ready + n_sent == frame_num))
+ break;
+
+ tx_frame = (tx_frame + 1) % frame_num;
+ }
+
+ CLIB_MEMORY_BARRIER ();
+
+ if (PREDICT_TRUE (n_sent))
+ {
+ apif->next_tx_frame = tx_frame;
+
+ if (PREDICT_FALSE (sendto (apif->fd, NULL, 0,
+ MSG_DONTWAIT, NULL, 0) == -1))
+ {
+ /* Uh-oh, drop & move on, but count whether it was fatal or not.
+ * Note that we have no reliable way to properly determine the
+ * disposition of the packets we just enqueued for delivery.
+ */
+ vlib_error_count (vm, node->node_index,
+ unix_error_is_fatal (errno) ?
+ AF_PACKET_TX_ERROR_TXRING_FATAL :
+ AF_PACKET_TX_ERROR_TXRING_EAGAIN, n_sent);
+ }
+ }
+
+ if (PREDICT_FALSE (frame_not_ready))
+ vlib_error_count (vm, node->node_index,
+ AF_PACKET_TX_ERROR_FRAME_NOT_READY, frame_not_ready);
+
+ if (PREDICT_FALSE (frame_not_ready + n_sent == frame_num))
+ vlib_error_count (vm, node->node_index, AF_PACKET_TX_ERROR_TXRING_OVERRUN,
+ n_left);
+
+ vlib_buffer_free (vm, vlib_frame_args (frame), frame->n_vectors);
+ return frame->n_vectors;
+}
+
+static void
+af_packet_set_interface_next_node (vnet_main_t * vnm, u32 hw_if_index,
+ u32 node_index)
+{
+ af_packet_main_t *apm = &af_packet_main;
+ vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
+ af_packet_if_t *apif =
+ pool_elt_at_index (apm->interfaces, hw->dev_instance);
+
+ /* Shut off redirection */
+ if (node_index == ~0)
+ {
+ apif->per_interface_next_index = node_index;
+ return;
+ }
+
+ apif->per_interface_next_index =
+ vlib_node_add_next (vlib_get_main (), af_packet_input_node.index,
+ node_index);
+}
+
+static void
+af_packet_clear_hw_interface_counters (u32 instance)
+{
+ /* Nothing for now */
+}
+
+static clib_error_t *
+af_packet_interface_admin_up_down (vnet_main_t * vnm, u32 hw_if_index,
+ u32 flags)
+{
+ af_packet_main_t *apm = &af_packet_main;
+ vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
+ af_packet_if_t *apif =
+ pool_elt_at_index (apm->interfaces, hw->dev_instance);
+ u32 hw_flags;
+
+ apif->is_admin_up = (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) != 0;
+
+ if (apif->is_admin_up)
+ hw_flags = VNET_HW_INTERFACE_FLAG_LINK_UP;
+ else
+ hw_flags = 0;
+
+ vnet_hw_interface_set_flags (vnm, hw_if_index, hw_flags);
+
+ return 0;
+}
+
+static clib_error_t *
+af_packet_subif_add_del_function (vnet_main_t * vnm,
+ u32 hw_if_index,
+ struct vnet_sw_interface_t *st, int is_add)
+{
+ /* Nothing for now */
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VNET_DEVICE_CLASS (af_packet_device_class) = {
+ .name = "af-packet",
+ .tx_function = af_packet_interface_tx,
+ .format_device_name = format_af_packet_device_name,
+ .format_device = format_af_packet_device,
+ .format_tx_trace = format_af_packet_tx_trace,
+ .tx_function_n_errors = AF_PACKET_TX_N_ERROR,
+ .tx_function_error_strings = af_packet_tx_func_error_strings,
+ .rx_redirect_to_node = af_packet_set_interface_next_node,
+ .clear_counters = af_packet_clear_hw_interface_counters,
+ .admin_up_down_function = af_packet_interface_admin_up_down,
+ .subif_add_del_function = af_packet_subif_add_del_function,
+};
+
+VLIB_DEVICE_TX_FUNCTION_MULTIARCH (af_packet_device_class,
+ af_packet_interface_tx)
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/devices/af_packet/node.c b/src/vnet/devices/af_packet/node.c
new file mode 100644
index 00000000000..72004320c67
--- /dev/null
+++ b/src/vnet/devices/af_packet/node.c
@@ -0,0 +1,288 @@
+/*
+ *------------------------------------------------------------------
+ * af_packet.c - linux kernel packet interface
+ *
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+
+#include <linux/if_packet.h>
+
+#include <vlib/vlib.h>
+#include <vlib/unix/unix.h>
+#include <vnet/ip/ip.h>
+#include <vnet/ethernet/ethernet.h>
+#include <vnet/devices/devices.h>
+#include <vnet/feature/feature.h>
+
+#include <vnet/devices/af_packet/af_packet.h>
+
+#define foreach_af_packet_input_error
+
+typedef enum
+{
+#define _(f,s) AF_PACKET_INPUT_ERROR_##f,
+ foreach_af_packet_input_error
+#undef _
+ AF_PACKET_INPUT_N_ERROR,
+} af_packet_input_error_t;
+
+static char *af_packet_input_error_strings[] = {
+#define _(n,s) s,
+ foreach_af_packet_input_error
+#undef _
+};
+
+typedef struct
+{
+ u32 next_index;
+ u32 hw_if_index;
+ int block;
+ struct tpacket2_hdr tph;
+} af_packet_input_trace_t;
+
+static u8 *
+format_af_packet_input_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ af_packet_input_trace_t *t = va_arg (*args, af_packet_input_trace_t *);
+ uword indent = format_get_indent (s);
+
+ s = format (s, "af_packet: hw_if_index %d next-index %d",
+ t->hw_if_index, t->next_index);
+
+ s =
+ format (s,
+ "\n%Utpacket2_hdr:\n%Ustatus 0x%x len %u snaplen %u mac %u net %u"
+ "\n%Usec 0x%x nsec 0x%x vlan %U"
+#ifdef TP_STATUS_VLAN_TPID_VALID
+ " vlan_tpid %u"
+#endif
+ ,
+ format_white_space, indent + 2,
+ format_white_space, indent + 4,
+ t->tph.tp_status,
+ t->tph.tp_len,
+ t->tph.tp_snaplen,
+ t->tph.tp_mac,
+ t->tph.tp_net,
+ format_white_space, indent + 4,
+ t->tph.tp_sec,
+ t->tph.tp_nsec, format_ethernet_vlan_tci, t->tph.tp_vlan_tci
+#ifdef TP_STATUS_VLAN_TPID_VALID
+ , t->tph.tp_vlan_tpid
+#endif
+ );
+ return s;
+}
+
+always_inline void
+buffer_add_to_chain (vlib_main_t * vm, u32 bi, u32 first_bi, u32 prev_bi)
+{
+ vlib_buffer_t *b = vlib_get_buffer (vm, bi);
+ vlib_buffer_t *first_b = vlib_get_buffer (vm, first_bi);
+ vlib_buffer_t *prev_b = vlib_get_buffer (vm, prev_bi);
+
+ /* update first buffer */
+ first_b->total_length_not_including_first_buffer += b->current_length;
+
+ /* update previous buffer */
+ prev_b->next_buffer = bi;
+ prev_b->flags |= VLIB_BUFFER_NEXT_PRESENT;
+
+ /* update current buffer */
+ b->next_buffer = 0;
+}
+
+always_inline uword
+af_packet_device_input_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
+ vlib_frame_t * frame, u32 device_idx)
+{
+ af_packet_main_t *apm = &af_packet_main;
+ af_packet_if_t *apif = pool_elt_at_index (apm->interfaces, device_idx);
+ struct tpacket2_hdr *tph;
+ u32 next_index = VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT;
+ u32 block = 0;
+ u32 rx_frame;
+ u32 n_free_bufs;
+ u32 n_rx_packets = 0;
+ u32 n_rx_bytes = 0;
+ u32 *to_next = 0;
+ u32 block_size = apif->rx_req->tp_block_size;
+ u32 frame_size = apif->rx_req->tp_frame_size;
+ u32 frame_num = apif->rx_req->tp_frame_nr;
+ u8 *block_start = apif->rx_ring + block * block_size;
+ uword n_trace = vlib_get_trace_count (vm, node);
+ u32 n_buffer_bytes = vlib_buffer_free_list_buffer_size (vm,
+ VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX);
+ u32 min_bufs = apif->rx_req->tp_frame_size / n_buffer_bytes;
+ int cpu_index = node->cpu_index;
+
+ if (apif->per_interface_next_index != ~0)
+ next_index = apif->per_interface_next_index;
+
+ n_free_bufs = vec_len (apm->rx_buffers[cpu_index]);
+ if (PREDICT_FALSE (n_free_bufs < VLIB_FRAME_SIZE))
+ {
+ vec_validate (apm->rx_buffers[cpu_index],
+ VLIB_FRAME_SIZE + n_free_bufs - 1);
+ n_free_bufs +=
+ vlib_buffer_alloc (vm, &apm->rx_buffers[cpu_index][n_free_bufs],
+ VLIB_FRAME_SIZE);
+ _vec_len (apm->rx_buffers[cpu_index]) = n_free_bufs;
+ }
+
+ rx_frame = apif->next_rx_frame;
+ tph = (struct tpacket2_hdr *) (block_start + rx_frame * frame_size);
+ while ((tph->tp_status & TP_STATUS_USER) && (n_free_bufs > min_bufs))
+ {
+ vlib_buffer_t *b0 = 0, *first_b0 = 0;
+ u32 next0 = next_index;
+
+ u32 n_left_to_next;
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+ while ((tph->tp_status & TP_STATUS_USER) && (n_free_bufs > min_bufs) &&
+ n_left_to_next)
+ {
+ u32 data_len = tph->tp_snaplen;
+ u32 offset = 0;
+ u32 bi0 = 0, first_bi0 = 0, prev_bi0;
+
+ while (data_len)
+ {
+ /* grab free buffer */
+ u32 last_empty_buffer =
+ vec_len (apm->rx_buffers[cpu_index]) - 1;
+ prev_bi0 = bi0;
+ bi0 = apm->rx_buffers[cpu_index][last_empty_buffer];
+ b0 = vlib_get_buffer (vm, bi0);
+ _vec_len (apm->rx_buffers[cpu_index]) = last_empty_buffer;
+ n_free_bufs--;
+
+ /* copy data */
+ u32 bytes_to_copy =
+ data_len > n_buffer_bytes ? n_buffer_bytes : data_len;
+ b0->current_data = 0;
+ clib_memcpy (vlib_buffer_get_current (b0),
+ (u8 *) tph + tph->tp_mac + offset, bytes_to_copy);
+
+ /* fill buffer header */
+ b0->current_length = bytes_to_copy;
+
+ if (offset == 0)
+ {
+ b0->total_length_not_including_first_buffer = 0;
+ b0->flags = VLIB_BUFFER_TOTAL_LENGTH_VALID;
+ vnet_buffer (b0)->sw_if_index[VLIB_RX] = apif->sw_if_index;
+ vnet_buffer (b0)->sw_if_index[VLIB_TX] = (u32) ~ 0;
+ first_bi0 = bi0;
+ first_b0 = vlib_get_buffer (vm, first_bi0);
+ }
+ else
+ buffer_add_to_chain (vm, bi0, first_bi0, prev_bi0);
+
+ offset += bytes_to_copy;
+ data_len -= bytes_to_copy;
+ }
+ n_rx_packets++;
+ n_rx_bytes += tph->tp_snaplen;
+ to_next[0] = first_bi0;
+ to_next += 1;
+ n_left_to_next--;
+
+ /* trace */
+ VLIB_BUFFER_TRACE_TRAJECTORY_INIT (first_b0);
+ if (PREDICT_FALSE (n_trace > 0))
+ {
+ af_packet_input_trace_t *tr;
+ vlib_trace_buffer (vm, node, next0, first_b0, /* follow_chain */
+ 0);
+ vlib_set_trace_count (vm, node, --n_trace);
+ tr = vlib_add_trace (vm, node, first_b0, sizeof (*tr));
+ tr->next_index = next0;
+ tr->hw_if_index = apif->hw_if_index;
+ clib_memcpy (&tr->tph, tph, sizeof (struct tpacket2_hdr));
+ }
+
+ /* redirect if feature path enabled */
+ vnet_feature_start_device_input_x1 (apif->sw_if_index, &next0, b0,
+ 0);
+
+ /* enque and take next packet */
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
+ n_left_to_next, first_bi0, next0);
+
+ /* next packet */
+ tph->tp_status = TP_STATUS_KERNEL;
+ rx_frame = (rx_frame + 1) % frame_num;
+ tph = (struct tpacket2_hdr *) (block_start + rx_frame * frame_size);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ apif->next_rx_frame = rx_frame;
+
+ vlib_increment_combined_counter
+ (vnet_get_main ()->interface_main.combined_sw_if_counters
+ + VNET_INTERFACE_COUNTER_RX,
+ os_get_cpu_number (), apif->hw_if_index, n_rx_packets, n_rx_bytes);
+
+ return n_rx_packets;
+}
+
+static uword
+af_packet_input_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ int i;
+ u32 n_rx_packets = 0;
+
+ af_packet_main_t *apm = &af_packet_main;
+
+ /* *INDENT-OFF* */
+ clib_bitmap_foreach (i, apm->pending_input_bitmap,
+ ({
+ clib_bitmap_set (apm->pending_input_bitmap, i, 0);
+ n_rx_packets += af_packet_device_input_fn(vm, node, frame, i);
+ }));
+ /* *INDENT-ON* */
+
+ return n_rx_packets;
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (af_packet_input_node) = {
+ .function = af_packet_input_fn,
+ .name = "af-packet-input",
+ .sibling_of = "device-input",
+ .format_trace = format_af_packet_input_trace,
+ .type = VLIB_NODE_TYPE_INPUT,
+ .state = VLIB_NODE_STATE_INTERRUPT,
+ .n_errors = AF_PACKET_INPUT_N_ERROR,
+ .error_strings = af_packet_input_error_strings,
+};
+
+VLIB_NODE_FUNCTION_MULTIARCH (af_packet_input_node, af_packet_input_fn)
+/* *INDENT-ON* */
+
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/devices/devices.c b/src/vnet/devices/devices.c
new file mode 100644
index 00000000000..cd4386ebdca
--- /dev/null
+++ b/src/vnet/devices/devices.c
@@ -0,0 +1,91 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/vnet.h>
+#include <vnet/devices/devices.h>
+#include <vnet/feature/feature.h>
+#include <vnet/ip/ip.h>
+#include <vnet/ethernet/ethernet.h>
+
+static uword
+device_input_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (device_input_node) = {
+ .function = device_input_fn,
+ .name = "device-input",
+ .type = VLIB_NODE_TYPE_INPUT,
+ .state = VLIB_NODE_STATE_DISABLED,
+ .n_next_nodes = VNET_DEVICE_INPUT_N_NEXT_NODES,
+ .next_nodes = VNET_DEVICE_INPUT_NEXT_NODES,
+};
+
+/* Table defines how much we need to advance current data pointer
+ in the buffer if we shortcut to l3 nodes */
+
+const u32 __attribute__((aligned (CLIB_CACHE_LINE_BYTES)))
+device_input_next_node_advance[((VNET_DEVICE_INPUT_N_NEXT_NODES /
+ CLIB_CACHE_LINE_BYTES) +1) * CLIB_CACHE_LINE_BYTES] =
+{
+ [VNET_DEVICE_INPUT_NEXT_IP4_INPUT] = sizeof (ethernet_header_t),
+ [VNET_DEVICE_INPUT_NEXT_IP4_NCS_INPUT] = sizeof (ethernet_header_t),
+ [VNET_DEVICE_INPUT_NEXT_IP6_INPUT] = sizeof (ethernet_header_t),
+ [VNET_DEVICE_INPUT_NEXT_MPLS_INPUT] = sizeof (ethernet_header_t),
+};
+
+VNET_FEATURE_ARC_INIT (device_input, static) =
+{
+ .arc_name = "device-input",
+ .start_nodes = VNET_FEATURES ("device-input"),
+ .end_node = "ethernet-input",
+ .arc_index_ptr = &feature_main.device_input_feature_arc_index,
+};
+
+VNET_FEATURE_INIT (l2_patch, static) = {
+ .arc_name = "device-input",
+ .node_name = "l2-patch",
+ .runs_before = VNET_FEATURES ("ethernet-input"),
+};
+
+VNET_FEATURE_INIT (worker_handoff, static) = {
+ .arc_name = "device-input",
+ .node_name = "worker-handoff",
+ .runs_before = VNET_FEATURES ("ethernet-input"),
+};
+
+VNET_FEATURE_INIT (span_input, static) = {
+ .arc_name = "device-input",
+ .node_name = "span-input",
+ .runs_before = VNET_FEATURES ("ethernet-input"),
+};
+
+VNET_FEATURE_INIT (ethernet_input, static) = {
+ .arc_name = "device-input",
+ .node_name = "ethernet-input",
+ .runs_before = 0, /* not before any other features */
+};
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/devices/devices.h b/src/vnet/devices/devices.h
new file mode 100644
index 00000000000..c46dab904c3
--- /dev/null
+++ b/src/vnet/devices/devices.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef included_vnet_vnet_device_h
+#define included_vnet_vnet_device_h
+
+#include <vnet/unix/pcap.h>
+#include <vnet/l3_types.h>
+
+typedef enum
+{
+ VNET_DEVICE_INPUT_NEXT_IP4_NCS_INPUT,
+ VNET_DEVICE_INPUT_NEXT_IP4_INPUT,
+ VNET_DEVICE_INPUT_NEXT_IP6_INPUT,
+ VNET_DEVICE_INPUT_NEXT_MPLS_INPUT,
+ VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT,
+ VNET_DEVICE_INPUT_NEXT_DROP,
+ VNET_DEVICE_INPUT_N_NEXT_NODES,
+} vnet_device_input_next_t;
+
+#define VNET_DEVICE_INPUT_NEXT_NODES { \
+ [VNET_DEVICE_INPUT_NEXT_DROP] = "error-drop", \
+ [VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT] = "ethernet-input", \
+ [VNET_DEVICE_INPUT_NEXT_IP4_NCS_INPUT] = "ip4-input-no-checksum", \
+ [VNET_DEVICE_INPUT_NEXT_IP4_INPUT] = "ip4-input", \
+ [VNET_DEVICE_INPUT_NEXT_IP6_INPUT] = "ip6-input", \
+ [VNET_DEVICE_INPUT_NEXT_MPLS_INPUT] = "mpls-input", \
+}
+
+extern vlib_node_registration_t device_input_node;
+extern const u32 device_input_next_node_advance[];
+
+#endif /* included_vnet_vnet_device_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/devices/dpdk/cli.c b/src/vnet/devices/dpdk/cli.c
new file mode 100644
index 00000000000..538a00fd975
--- /dev/null
+++ b/src/vnet/devices/dpdk/cli.c
@@ -0,0 +1,1296 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <vnet/vnet.h>
+#include <vppinfra/vec.h>
+#include <vppinfra/error.h>
+#include <vppinfra/format.h>
+#include <vppinfra/xxhash.h>
+
+#include <vnet/ethernet/ethernet.h>
+#include <vnet/devices/dpdk/dpdk.h>
+#include <vnet/classify/vnet_classify.h>
+#include <vnet/mpls/packet.h>
+
+#include "dpdk_priv.h"
+
+static clib_error_t *
+pcap_trace_command_fn (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ dpdk_main_t *dm = &dpdk_main;
+ u8 *filename;
+ u32 max;
+ int matched = 0;
+ clib_error_t *error = 0;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "on"))
+ {
+ if (dm->tx_pcap_enable == 0)
+ {
+ if (dm->pcap_filename == 0)
+ dm->pcap_filename = format (0, "/tmp/vpe.pcap%c", 0);
+
+ memset (&dm->pcap_main, 0, sizeof (dm->pcap_main));
+ dm->pcap_main.file_name = (char *) dm->pcap_filename;
+ dm->pcap_main.n_packets_to_capture = 100;
+ if (dm->pcap_pkts_to_capture)
+ dm->pcap_main.n_packets_to_capture = dm->pcap_pkts_to_capture;
+
+ dm->pcap_main.packet_type = PCAP_PACKET_TYPE_ethernet;
+ dm->tx_pcap_enable = 1;
+ matched = 1;
+ vlib_cli_output (vm, "pcap tx capture on...");
+ }
+ else
+ {
+ vlib_cli_output (vm, "pcap tx capture already on...");
+ }
+ matched = 1;
+ }
+ else if (unformat (input, "off"))
+ {
+ if (dm->tx_pcap_enable)
+ {
+ vlib_cli_output (vm, "captured %d pkts...",
+ dm->pcap_main.n_packets_captured + 1);
+ if (dm->pcap_main.n_packets_captured)
+ {
+ dm->pcap_main.n_packets_to_capture =
+ dm->pcap_main.n_packets_captured;
+ error = pcap_write (&dm->pcap_main);
+ if (error)
+ clib_error_report (error);
+ else
+ vlib_cli_output (vm, "saved to %s...", dm->pcap_filename);
+ }
+ }
+ else
+ {
+ vlib_cli_output (vm, "pcap tx capture already off...");
+ }
+
+ dm->tx_pcap_enable = 0;
+ matched = 1;
+ }
+ else if (unformat (input, "max %d", &max))
+ {
+ dm->pcap_pkts_to_capture = max;
+ matched = 1;
+ }
+
+ else if (unformat (input, "intfc %U",
+ unformat_vnet_sw_interface, dm->vnet_main,
+ &dm->pcap_sw_if_index))
+ matched = 1;
+ else if (unformat (input, "intfc any"))
+ {
+ dm->pcap_sw_if_index = 0;
+ matched = 1;
+ }
+ else if (unformat (input, "file %s", &filename))
+ {
+ u8 *chroot_filename;
+ /* Brain-police user path input */
+ if (strstr ((char *) filename, "..")
+ || index ((char *) filename, '/'))
+ {
+ vlib_cli_output (vm, "illegal characters in filename '%s'",
+ filename);
+ continue;
+ }
+
+ chroot_filename = format (0, "/tmp/%s%c", filename, 0);
+ vec_free (filename);
+
+ if (dm->pcap_filename)
+ vec_free (dm->pcap_filename);
+ vec_add1 (filename, 0);
+ dm->pcap_filename = chroot_filename;
+ matched = 1;
+ }
+ else if (unformat (input, "status"))
+ {
+ if (dm->tx_pcap_enable == 0)
+ {
+ vlib_cli_output (vm, "pcap tx capture is off...");
+ continue;
+ }
+
+ vlib_cli_output (vm, "pcap tx capture: %d of %d pkts...",
+ dm->pcap_main.n_packets_captured,
+ dm->pcap_main.n_packets_to_capture);
+ matched = 1;
+ }
+
+ else
+ break;
+ }
+
+ if (matched == 0)
+ return clib_error_return (0, "unknown input `%U'",
+ format_unformat_error, input);
+
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (pcap_trace_command, static) = {
+ .path = "pcap tx trace",
+ .short_help =
+ "pcap tx trace on off max <nn> intfc <intfc> file <name> status",
+ .function = pcap_trace_command_fn,
+};
+/* *INDENT-ON* */
+
+
+static clib_error_t *
+show_dpdk_buffer (vlib_main_t * vm, unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ struct rte_mempool *rmp;
+ int i;
+
+ for (i = 0; i < vec_len (vm->buffer_main->pktmbuf_pools); i++)
+ {
+ rmp = vm->buffer_main->pktmbuf_pools[i];
+ if (rmp)
+ {
+ unsigned count = rte_mempool_avail_count (rmp);
+ unsigned free_count = rte_mempool_in_use_count (rmp);
+
+ vlib_cli_output (vm,
+ "name=\"%s\" available = %7d allocated = %7d total = %7d\n",
+ rmp->name, (u32) count, (u32) free_count,
+ (u32) (count + free_count));
+ }
+ else
+ {
+ vlib_cli_output (vm, "rte_mempool is NULL (!)\n");
+ }
+ }
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (cmd_show_dpdk_bufferr,static) = {
+ .path = "show dpdk buffer",
+ .short_help = "show dpdk buffer state",
+ .function = show_dpdk_buffer,
+ .is_mp_safe = 1,
+};
+/* *INDENT-ON* */
+
+static clib_error_t *
+test_dpdk_buffer (vlib_main_t * vm, unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ static u32 *allocated_buffers;
+ u32 n_alloc = 0;
+ u32 n_free = 0;
+ u32 first, actual_alloc;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "allocate %d", &n_alloc))
+ ;
+ else if (unformat (input, "free %d", &n_free))
+ ;
+ else
+ break;
+ }
+
+ if (n_free)
+ {
+ if (vec_len (allocated_buffers) < n_free)
+ return clib_error_return (0, "Can't free %d, only %d allocated",
+ n_free, vec_len (allocated_buffers));
+
+ first = vec_len (allocated_buffers) - n_free;
+ vlib_buffer_free (vm, allocated_buffers + first, n_free);
+ _vec_len (allocated_buffers) = first;
+ }
+ if (n_alloc)
+ {
+ first = vec_len (allocated_buffers);
+ vec_validate (allocated_buffers,
+ vec_len (allocated_buffers) + n_alloc - 1);
+
+ actual_alloc = vlib_buffer_alloc (vm, allocated_buffers + first,
+ n_alloc);
+ _vec_len (allocated_buffers) = first + actual_alloc;
+
+ if (actual_alloc < n_alloc)
+ vlib_cli_output (vm, "WARNING: only allocated %d buffers",
+ actual_alloc);
+ }
+
+ vlib_cli_output (vm, "Currently %d buffers allocated",
+ vec_len (allocated_buffers));
+
+ if (allocated_buffers && vec_len (allocated_buffers) == 0)
+ vec_free (allocated_buffers);
+
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (cmd_test_dpdk_buffer,static) = {
+ .path = "test dpdk buffer",
+ .short_help = "test dpdk buffer [allocate <nn>][free <nn>]",
+ .function = test_dpdk_buffer,
+ .is_mp_safe = 1,
+};
+/* *INDENT-ON* */
+
+static clib_error_t *
+set_dpdk_if_desc (vlib_main_t * vm, unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ unformat_input_t _line_input, *line_input = &_line_input;
+ dpdk_main_t *dm = &dpdk_main;
+ vnet_hw_interface_t *hw;
+ dpdk_device_t *xd;
+ u32 hw_if_index = (u32) ~ 0;
+ u32 nb_rx_desc = (u32) ~ 0;
+ u32 nb_tx_desc = (u32) ~ 0;
+ clib_error_t *rv;
+
+ if (!unformat_user (input, unformat_line_input, line_input))
+ return 0;
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat
+ (line_input, "%U", unformat_vnet_hw_interface, dm->vnet_main,
+ &hw_if_index))
+ ;
+ else if (unformat (line_input, "tx %d", &nb_tx_desc))
+ ;
+ else if (unformat (line_input, "rx %d", &nb_rx_desc))
+ ;
+ else
+ return clib_error_return (0, "parse error: '%U'",
+ format_unformat_error, line_input);
+ }
+
+ unformat_free (line_input);
+
+ if (hw_if_index == (u32) ~ 0)
+ return clib_error_return (0, "please specify valid interface name");
+
+ hw = vnet_get_hw_interface (dm->vnet_main, hw_if_index);
+ xd = vec_elt_at_index (dm->devices, hw->dev_instance);
+
+ if ((xd->flags & DPDK_DEVICE_FLAG_PMD) == 0)
+ return clib_error_return (0, "number of descriptors can be set only for "
+ "physical devices");
+
+ if ((nb_rx_desc == (u32) ~ 0 || nb_rx_desc == xd->nb_rx_desc) &&
+ (nb_tx_desc == (u32) ~ 0 || nb_tx_desc == xd->nb_tx_desc))
+ return clib_error_return (0, "nothing changed");
+
+ if (nb_rx_desc != (u32) ~ 0)
+ xd->nb_rx_desc = nb_rx_desc;
+
+ if (nb_tx_desc != (u32) ~ 0)
+ xd->nb_tx_desc = nb_tx_desc;
+
+ rv = dpdk_port_setup (dm, xd);
+
+ return rv;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (cmd_set_dpdk_if_desc,static) = {
+ .path = "set dpdk interface descriptors",
+ .short_help = "set dpdk interface descriptors <if-name> [rx <n>] [tx <n>]",
+ .function = set_dpdk_if_desc,
+};
+/* *INDENT-ON* */
+
+static clib_error_t *
+show_dpdk_if_placement (vlib_main_t * vm, unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ vlib_thread_main_t *tm = vlib_get_thread_main ();
+ dpdk_main_t *dm = &dpdk_main;
+ dpdk_device_and_queue_t *dq;
+ int cpu;
+
+ if (tm->n_vlib_mains == 1)
+ vlib_cli_output (vm, "All interfaces are handled by main thread");
+
+ for (cpu = 0; cpu < vec_len (dm->devices_by_cpu); cpu++)
+ {
+ if (vec_len (dm->devices_by_cpu[cpu]))
+ vlib_cli_output (vm, "Thread %u (%s at lcore %u):", cpu,
+ vlib_worker_threads[cpu].name,
+ vlib_worker_threads[cpu].lcore_id);
+
+ /* *INDENT-OFF* */
+ vec_foreach(dq, dm->devices_by_cpu[cpu])
+ {
+ u32 hw_if_index = dm->devices[dq->device].vlib_hw_if_index;
+ vnet_hw_interface_t * hi = vnet_get_hw_interface(dm->vnet_main, hw_if_index);
+ vlib_cli_output(vm, " %v queue %u", hi->name, dq->queue_id);
+ }
+ /* *INDENT-ON* */
+ }
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (cmd_show_dpdk_if_placement,static) = {
+ .path = "show dpdk interface placement",
+ .short_help = "show dpdk interface placement",
+ .function = show_dpdk_if_placement,
+};
+/* *INDENT-ON* */
+
+static int
+dpdk_device_queue_sort (void *a1, void *a2)
+{
+ dpdk_device_and_queue_t *dq1 = a1;
+ dpdk_device_and_queue_t *dq2 = a2;
+
+ if (dq1->device > dq2->device)
+ return 1;
+ else if (dq1->device < dq2->device)
+ return -1;
+ else if (dq1->queue_id > dq2->queue_id)
+ return 1;
+ else if (dq1->queue_id < dq2->queue_id)
+ return -1;
+ else
+ return 0;
+}
+
+static clib_error_t *
+set_dpdk_if_placement (vlib_main_t * vm, unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ unformat_input_t _line_input, *line_input = &_line_input;
+ dpdk_main_t *dm = &dpdk_main;
+ dpdk_device_and_queue_t *dq;
+ vnet_hw_interface_t *hw;
+ dpdk_device_t *xd;
+ u32 hw_if_index = (u32) ~ 0;
+ u32 queue = (u32) 0;
+ u32 cpu = (u32) ~ 0;
+ int i;
+
+ if (!unformat_user (input, unformat_line_input, line_input))
+ return 0;
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat
+ (line_input, "%U", unformat_vnet_hw_interface, dm->vnet_main,
+ &hw_if_index))
+ ;
+ else if (unformat (line_input, "queue %d", &queue))
+ ;
+ else if (unformat (line_input, "thread %d", &cpu))
+ ;
+ else
+ return clib_error_return (0, "parse error: '%U'",
+ format_unformat_error, line_input);
+ }
+
+ unformat_free (line_input);
+
+ if (hw_if_index == (u32) ~ 0)
+ return clib_error_return (0, "please specify valid interface name");
+
+ if (cpu < dm->input_cpu_first_index ||
+ cpu >= (dm->input_cpu_first_index + dm->input_cpu_count))
+ return clib_error_return (0, "please specify valid thread id");
+
+ hw = vnet_get_hw_interface (dm->vnet_main, hw_if_index);
+ xd = vec_elt_at_index (dm->devices, hw->dev_instance);
+
+ for (i = 0; i < vec_len (dm->devices_by_cpu); i++)
+ {
+ /* *INDENT-OFF* */
+ vec_foreach(dq, dm->devices_by_cpu[i])
+ {
+ if (hw_if_index == dm->devices[dq->device].vlib_hw_if_index &&
+ queue == dq->queue_id)
+ {
+ if (cpu == i) /* nothing to do */
+ return 0;
+
+ vec_del1(dm->devices_by_cpu[i], dq - dm->devices_by_cpu[i]);
+ vec_add2(dm->devices_by_cpu[cpu], dq, 1);
+ dq->queue_id = queue;
+ dq->device = xd->device_index;
+ xd->cpu_socket_id_by_queue[queue] =
+ rte_lcore_to_socket_id(vlib_worker_threads[cpu].lcore_id);
+
+ vec_sort_with_function(dm->devices_by_cpu[i],
+ dpdk_device_queue_sort);
+
+ vec_sort_with_function(dm->devices_by_cpu[cpu],
+ dpdk_device_queue_sort);
+
+ if (vec_len(dm->devices_by_cpu[i]) == 0)
+ vlib_node_set_state (vlib_mains[i], dpdk_input_node.index,
+ VLIB_NODE_STATE_DISABLED);
+
+ if (vec_len(dm->devices_by_cpu[cpu]) == 1)
+ vlib_node_set_state (vlib_mains[cpu], dpdk_input_node.index,
+ VLIB_NODE_STATE_POLLING);
+
+ return 0;
+ }
+ }
+ /* *INDENT-ON* */
+ }
+
+ return clib_error_return (0, "not found");
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (cmd_set_dpdk_if_placement,static) = {
+ .path = "set dpdk interface placement",
+ .short_help = "set dpdk interface placement <if-name> [queue <n>] thread <n>",
+ .function = set_dpdk_if_placement,
+};
+/* *INDENT-ON* */
+
+static clib_error_t *
+show_dpdk_if_hqos_placement (vlib_main_t * vm, unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ vlib_thread_main_t *tm = vlib_get_thread_main ();
+ dpdk_main_t *dm = &dpdk_main;
+ dpdk_device_and_queue_t *dq;
+ int cpu;
+
+ if (tm->n_vlib_mains == 1)
+ vlib_cli_output (vm, "All interfaces are handled by main thread");
+
+ for (cpu = 0; cpu < vec_len (dm->devices_by_hqos_cpu); cpu++)
+ {
+ if (vec_len (dm->devices_by_hqos_cpu[cpu]))
+ vlib_cli_output (vm, "Thread %u (%s at lcore %u):", cpu,
+ vlib_worker_threads[cpu].name,
+ vlib_worker_threads[cpu].lcore_id);
+
+ vec_foreach (dq, dm->devices_by_hqos_cpu[cpu])
+ {
+ u32 hw_if_index = dm->devices[dq->device].vlib_hw_if_index;
+ vnet_hw_interface_t *hi =
+ vnet_get_hw_interface (dm->vnet_main, hw_if_index);
+ vlib_cli_output (vm, " %v queue %u", hi->name, dq->queue_id);
+ }
+ }
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (cmd_show_dpdk_if_hqos_placement, static) = {
+ .path = "show dpdk interface hqos placement",
+ .short_help = "show dpdk interface hqos placement",
+ .function = show_dpdk_if_hqos_placement,
+};
+/* *INDENT-ON* */
+
+static clib_error_t *
+set_dpdk_if_hqos_placement (vlib_main_t * vm, unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ unformat_input_t _line_input, *line_input = &_line_input;
+ dpdk_main_t *dm = &dpdk_main;
+ dpdk_device_and_queue_t *dq;
+ vnet_hw_interface_t *hw;
+ dpdk_device_t *xd;
+ u32 hw_if_index = (u32) ~ 0;
+ u32 cpu = (u32) ~ 0;
+ int i;
+
+ if (!unformat_user (input, unformat_line_input, line_input))
+ return 0;
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat
+ (line_input, "%U", unformat_vnet_hw_interface, dm->vnet_main,
+ &hw_if_index))
+ ;
+ else if (unformat (line_input, "thread %d", &cpu))
+ ;
+ else
+ return clib_error_return (0, "parse error: '%U'",
+ format_unformat_error, line_input);
+ }
+
+ unformat_free (line_input);
+
+ if (hw_if_index == (u32) ~ 0)
+ return clib_error_return (0, "please specify valid interface name");
+
+ if (cpu < dm->hqos_cpu_first_index ||
+ cpu >= (dm->hqos_cpu_first_index + dm->hqos_cpu_count))
+ return clib_error_return (0, "please specify valid thread id");
+
+ hw = vnet_get_hw_interface (dm->vnet_main, hw_if_index);
+ xd = vec_elt_at_index (dm->devices, hw->dev_instance);
+
+ for (i = 0; i < vec_len (dm->devices_by_hqos_cpu); i++)
+ {
+ vec_foreach (dq, dm->devices_by_hqos_cpu[i])
+ {
+ if (hw_if_index == dm->devices[dq->device].vlib_hw_if_index)
+ {
+ if (cpu == i) /* nothing to do */
+ return 0;
+
+ vec_del1 (dm->devices_by_hqos_cpu[i],
+ dq - dm->devices_by_hqos_cpu[i]);
+ vec_add2 (dm->devices_by_hqos_cpu[cpu], dq, 1);
+ dq->queue_id = 0;
+ dq->device = xd->device_index;
+
+ vec_sort_with_function (dm->devices_by_hqos_cpu[i],
+ dpdk_device_queue_sort);
+
+ vec_sort_with_function (dm->devices_by_hqos_cpu[cpu],
+ dpdk_device_queue_sort);
+
+ return 0;
+ }
+ }
+ }
+
+ return clib_error_return (0, "not found");
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (cmd_set_dpdk_if_hqos_placement, static) = {
+ .path = "set dpdk interface hqos placement",
+ .short_help = "set dpdk interface hqos placement <if-name> thread <n>",
+ .function = set_dpdk_if_hqos_placement,
+};
+/* *INDENT-ON* */
+
+static clib_error_t *
+set_dpdk_if_hqos_pipe (vlib_main_t * vm, unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ unformat_input_t _line_input, *line_input = &_line_input;
+ dpdk_main_t *dm = &dpdk_main;
+ vnet_hw_interface_t *hw;
+ dpdk_device_t *xd;
+ u32 hw_if_index = (u32) ~ 0;
+ u32 subport_id = (u32) ~ 0;
+ u32 pipe_id = (u32) ~ 0;
+ u32 profile_id = (u32) ~ 0;
+ int rv;
+
+ if (!unformat_user (input, unformat_line_input, line_input))
+ return 0;
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat
+ (line_input, "%U", unformat_vnet_hw_interface, dm->vnet_main,
+ &hw_if_index))
+ ;
+ else if (unformat (line_input, "subport %d", &subport_id))
+ ;
+ else if (unformat (line_input, "pipe %d", &pipe_id))
+ ;
+ else if (unformat (line_input, "profile %d", &profile_id))
+ ;
+ else
+ return clib_error_return (0, "parse error: '%U'",
+ format_unformat_error, line_input);
+ }
+
+ unformat_free (line_input);
+
+ if (hw_if_index == (u32) ~ 0)
+ return clib_error_return (0, "please specify valid interface name");
+
+ hw = vnet_get_hw_interface (dm->vnet_main, hw_if_index);
+ xd = vec_elt_at_index (dm->devices, hw->dev_instance);
+
+ rv =
+ rte_sched_pipe_config (xd->hqos_ht->hqos, subport_id, pipe_id,
+ profile_id);
+ if (rv)
+ return clib_error_return (0, "pipe configuration failed");
+
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (cmd_set_dpdk_if_hqos_pipe, static) =
+{
+ .path = "set dpdk interface hqos pipe",
+ .short_help = "set dpdk interface hqos pipe <if-name> subport <n> pipe <n> "
+ "profile <n>",
+ .function = set_dpdk_if_hqos_pipe,
+};
+/* *INDENT-ON* */
+
+static clib_error_t *
+set_dpdk_if_hqos_subport (vlib_main_t * vm, unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ unformat_input_t _line_input, *line_input = &_line_input;
+ dpdk_main_t *dm = &dpdk_main;
+ vnet_hw_interface_t *hw;
+ dpdk_device_t *xd;
+ u32 hw_if_index = (u32) ~ 0;
+ u32 subport_id = (u32) ~ 0;
+ struct rte_sched_subport_params p = {
+ .tb_rate = 1250000000, /* 10GbE */
+ .tb_size = 1000000,
+ .tc_rate = {1250000000, 1250000000, 1250000000, 1250000000},
+ .tc_period = 10,
+ };
+ int rv;
+
+ if (!unformat_user (input, unformat_line_input, line_input))
+ return 0;
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat
+ (line_input, "%U", unformat_vnet_hw_interface, dm->vnet_main,
+ &hw_if_index))
+ ;
+ else if (unformat (line_input, "subport %d", &subport_id))
+ ;
+ else if (unformat (line_input, "rate %d", &p.tb_rate))
+ {
+ p.tc_rate[0] = p.tb_rate;
+ p.tc_rate[1] = p.tb_rate;
+ p.tc_rate[2] = p.tb_rate;
+ p.tc_rate[3] = p.tb_rate;
+ }
+ else if (unformat (line_input, "bktsize %d", &p.tb_size))
+ ;
+ else if (unformat (line_input, "tc0 %d", &p.tc_rate[0]))
+ ;
+ else if (unformat (line_input, "tc1 %d", &p.tc_rate[1]))
+ ;
+ else if (unformat (line_input, "tc2 %d", &p.tc_rate[2]))
+ ;
+ else if (unformat (line_input, "tc3 %d", &p.tc_rate[3]))
+ ;
+ else if (unformat (line_input, "period %d", &p.tc_period))
+ ;
+ else
+ return clib_error_return (0, "parse error: '%U'",
+ format_unformat_error, line_input);
+ }
+
+ unformat_free (line_input);
+
+ if (hw_if_index == (u32) ~ 0)
+ return clib_error_return (0, "please specify valid interface name");
+
+ hw = vnet_get_hw_interface (dm->vnet_main, hw_if_index);
+ xd = vec_elt_at_index (dm->devices, hw->dev_instance);
+
+ rv = rte_sched_subport_config (xd->hqos_ht->hqos, subport_id, &p);
+ if (rv)
+ return clib_error_return (0, "subport configuration failed");
+
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (cmd_set_dpdk_if_hqos_subport, static) = {
+ .path = "set dpdk interface hqos subport",
+ .short_help = "set dpdk interface hqos subport <if-name> subport <n> "
+ "[rate <n>] [bktsize <n>] [tc0 <n>] [tc1 <n>] [tc2 <n>] [tc3 <n>] "
+ "[period <n>]",
+ .function = set_dpdk_if_hqos_subport,
+};
+/* *INDENT-ON* */
+
+static clib_error_t *
+set_dpdk_if_hqos_tctbl (vlib_main_t * vm, unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ unformat_input_t _line_input, *line_input = &_line_input;
+ vlib_thread_main_t *tm = vlib_get_thread_main ();
+ dpdk_main_t *dm = &dpdk_main;
+ vnet_hw_interface_t *hw;
+ dpdk_device_t *xd;
+ u32 hw_if_index = (u32) ~ 0;
+ u32 tc = (u32) ~ 0;
+ u32 queue = (u32) ~ 0;
+ u32 entry = (u32) ~ 0;
+ u32 val, i;
+
+ if (!unformat_user (input, unformat_line_input, line_input))
+ return 0;
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat
+ (line_input, "%U", unformat_vnet_hw_interface, dm->vnet_main,
+ &hw_if_index))
+ ;
+ else if (unformat (line_input, "entry %d", &entry))
+ ;
+ else if (unformat (line_input, "tc %d", &tc))
+ ;
+ else if (unformat (line_input, "queue %d", &queue))
+ ;
+ else
+ return clib_error_return (0, "parse error: '%U'",
+ format_unformat_error, line_input);
+ }
+
+ unformat_free (line_input);
+
+ if (hw_if_index == (u32) ~ 0)
+ return clib_error_return (0, "please specify valid interface name");
+ if (entry >= 64)
+ return clib_error_return (0, "invalid entry");
+ if (tc >= RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE)
+ return clib_error_return (0, "invalid traffic class");
+ if (queue >= RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS)
+ return clib_error_return (0, "invalid traffic class");
+
+ hw = vnet_get_hw_interface (dm->vnet_main, hw_if_index);
+ xd = vec_elt_at_index (dm->devices, hw->dev_instance);
+
+ /* Detect the set of worker threads */
+ uword *p = hash_get_mem (tm->thread_registrations_by_name, "workers");
+ /* Should never happen, shut up Coverity warning */
+ if (p == 0)
+ return clib_error_return (0, "no worker registrations?");
+
+ vlib_thread_registration_t *tr = (vlib_thread_registration_t *) p[0];
+ int worker_thread_first = tr->first_index;
+ int worker_thread_count = tr->count;
+
+ val = tc * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS + queue;
+ for (i = 0; i < worker_thread_count; i++)
+ xd->hqos_wt[worker_thread_first + i].hqos_tc_table[entry] = val;
+
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (cmd_set_dpdk_if_hqos_tctbl, static) = {
+ .path = "set dpdk interface hqos tctbl",
+ .short_help = "set dpdk interface hqos tctbl <if-name> entry <n> tc <n> queue <n>",
+ .function = set_dpdk_if_hqos_tctbl,
+};
+/* *INDENT-ON* */
+
+static clib_error_t *
+set_dpdk_if_hqos_pktfield (vlib_main_t * vm, unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ unformat_input_t _line_input, *line_input = &_line_input;
+ vlib_thread_main_t *tm = vlib_get_thread_main ();
+ dpdk_main_t *dm = &dpdk_main;
+
+ /* Device specific data */
+ struct rte_eth_dev_info dev_info;
+ dpdk_device_config_t *devconf = 0;
+ vnet_hw_interface_t *hw;
+ dpdk_device_t *xd;
+ u32 hw_if_index = (u32) ~ 0;
+
+ /* Detect the set of worker threads */
+ uword *p = hash_get_mem (tm->thread_registrations_by_name, "workers");
+ /* Should never happen, shut up Coverity warning */
+ if (p == 0)
+ return clib_error_return (0, "no worker registrations?");
+
+ vlib_thread_registration_t *tr = (vlib_thread_registration_t *) p[0];
+ int worker_thread_first = tr->first_index;
+ int worker_thread_count = tr->count;
+
+ /* Packet field configuration */
+ u64 mask = (u64) ~ 0;
+ u32 id = (u32) ~ 0;
+ u32 offset = (u32) ~ 0;
+
+ /* HQoS params */
+ u32 n_subports_per_port, n_pipes_per_subport, tctbl_size;
+
+ u32 i;
+
+ /* Parse input arguments */
+ if (!unformat_user (input, unformat_line_input, line_input))
+ return 0;
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat
+ (line_input, "%U", unformat_vnet_hw_interface, dm->vnet_main,
+ &hw_if_index))
+ ;
+ else if (unformat (line_input, "id %d", &id))
+ ;
+ else if (unformat (line_input, "offset %d", &offset))
+ ;
+ else if (unformat (line_input, "mask %llx", &mask))
+ ;
+ else
+ return clib_error_return (0, "parse error: '%U'",
+ format_unformat_error, line_input);
+ }
+
+ unformat_free (line_input);
+
+ /* Get interface */
+ if (hw_if_index == (u32) ~ 0)
+ return clib_error_return (0, "please specify valid interface name");
+
+ hw = vnet_get_hw_interface (dm->vnet_main, hw_if_index);
+ xd = vec_elt_at_index (dm->devices, hw->dev_instance);
+
+ rte_eth_dev_info_get (xd->device_index, &dev_info);
+ if (dev_info.pci_dev)
+ { /* bonded interface has no pci info */
+ vlib_pci_addr_t pci_addr;
+
+ pci_addr.domain = dev_info.pci_dev->addr.domain;
+ pci_addr.bus = dev_info.pci_dev->addr.bus;
+ pci_addr.slot = dev_info.pci_dev->addr.devid;
+ pci_addr.function = dev_info.pci_dev->addr.function;
+
+ p =
+ hash_get (dm->conf->device_config_index_by_pci_addr, pci_addr.as_u32);
+ }
+
+ if (p)
+ devconf = pool_elt_at_index (dm->conf->dev_confs, p[0]);
+ else
+ devconf = &dm->conf->default_devconf;
+
+ if (devconf->hqos_enabled == 0)
+ {
+ vlib_cli_output (vm, "HQoS disabled for this interface");
+ return 0;
+ }
+
+ n_subports_per_port = devconf->hqos.port.n_subports_per_port;
+ n_pipes_per_subport = devconf->hqos.port.n_pipes_per_subport;
+ tctbl_size = RTE_DIM (devconf->hqos.tc_table);
+
+ /* Validate packet field configuration: id, offset and mask */
+ if (id >= 3)
+ return clib_error_return (0, "invalid packet field id");
+
+ switch (id)
+ {
+ case 0:
+ if (dpdk_hqos_validate_mask (mask, n_subports_per_port) != 0)
+ return clib_error_return (0, "invalid subport ID mask "
+ "(n_subports_per_port = %u)",
+ n_subports_per_port);
+ break;
+ case 1:
+ if (dpdk_hqos_validate_mask (mask, n_pipes_per_subport) != 0)
+ return clib_error_return (0, "invalid pipe ID mask "
+ "(n_pipes_per_subport = %u)",
+ n_pipes_per_subport);
+ break;
+ case 2:
+ default:
+ if (dpdk_hqos_validate_mask (mask, tctbl_size) != 0)
+ return clib_error_return (0, "invalid TC table index mask "
+ "(TC table size = %u)", tctbl_size);
+ }
+
+ /* Propagate packet field configuration to all workers */
+ for (i = 0; i < worker_thread_count; i++)
+ switch (id)
+ {
+ case 0:
+ xd->hqos_wt[worker_thread_first + i].hqos_field0_slabpos = offset;
+ xd->hqos_wt[worker_thread_first + i].hqos_field0_slabmask = mask;
+ xd->hqos_wt[worker_thread_first + i].hqos_field0_slabshr =
+ __builtin_ctzll (mask);
+ break;
+ case 1:
+ xd->hqos_wt[worker_thread_first + i].hqos_field1_slabpos = offset;
+ xd->hqos_wt[worker_thread_first + i].hqos_field1_slabmask = mask;
+ xd->hqos_wt[worker_thread_first + i].hqos_field1_slabshr =
+ __builtin_ctzll (mask);
+ break;
+ case 2:
+ default:
+ xd->hqos_wt[worker_thread_first + i].hqos_field2_slabpos = offset;
+ xd->hqos_wt[worker_thread_first + i].hqos_field2_slabmask = mask;
+ xd->hqos_wt[worker_thread_first + i].hqos_field2_slabshr =
+ __builtin_ctzll (mask);
+ }
+
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (cmd_set_dpdk_if_hqos_pktfield, static) = {
+ .path = "set dpdk interface hqos pktfield",
+ .short_help = "set dpdk interface hqos pktfield <if-name> id <n> offset <n> "
+ "mask <n>",
+ .function = set_dpdk_if_hqos_pktfield,
+};
+/* *INDENT-ON* */
+
+static clib_error_t *
+show_dpdk_if_hqos (vlib_main_t * vm, unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ unformat_input_t _line_input, *line_input = &_line_input;
+ vlib_thread_main_t *tm = vlib_get_thread_main ();
+ dpdk_main_t *dm = &dpdk_main;
+ vnet_hw_interface_t *hw;
+ dpdk_device_t *xd;
+ dpdk_device_config_hqos_t *cfg;
+ dpdk_device_hqos_per_hqos_thread_t *ht;
+ dpdk_device_hqos_per_worker_thread_t *wk;
+ u32 *tctbl;
+ u32 hw_if_index = (u32) ~ 0;
+ u32 profile_id, i;
+ struct rte_eth_dev_info dev_info;
+ dpdk_device_config_t *devconf = 0;
+ vlib_thread_registration_t *tr;
+ uword *p = 0;
+
+ if (!unformat_user (input, unformat_line_input, line_input))
+ return 0;
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat
+ (line_input, "%U", unformat_vnet_hw_interface, dm->vnet_main,
+ &hw_if_index))
+ ;
+ else
+ return clib_error_return (0, "parse error: '%U'",
+ format_unformat_error, line_input);
+ }
+
+ unformat_free (line_input);
+
+ if (hw_if_index == (u32) ~ 0)
+ return clib_error_return (0, "please specify interface name!!");
+
+ hw = vnet_get_hw_interface (dm->vnet_main, hw_if_index);
+ xd = vec_elt_at_index (dm->devices, hw->dev_instance);
+
+ rte_eth_dev_info_get (xd->device_index, &dev_info);
+ if (dev_info.pci_dev)
+ { /* bonded interface has no pci info */
+ vlib_pci_addr_t pci_addr;
+
+ pci_addr.domain = dev_info.pci_dev->addr.domain;
+ pci_addr.bus = dev_info.pci_dev->addr.bus;
+ pci_addr.slot = dev_info.pci_dev->addr.devid;
+ pci_addr.function = dev_info.pci_dev->addr.function;
+
+ p =
+ hash_get (dm->conf->device_config_index_by_pci_addr, pci_addr.as_u32);
+ }
+
+ if (p)
+ devconf = pool_elt_at_index (dm->conf->dev_confs, p[0]);
+ else
+ devconf = &dm->conf->default_devconf;
+
+ if (devconf->hqos_enabled == 0)
+ {
+ vlib_cli_output (vm, "HQoS disabled for this interface");
+ return 0;
+ }
+
+ /* Detect the set of worker threads */
+ p = hash_get_mem (tm->thread_registrations_by_name, "workers");
+
+ /* Should never happen, shut up Coverity warning */
+ if (p == 0)
+ return clib_error_return (0, "no worker registrations?");
+
+ tr = (vlib_thread_registration_t *) p[0];
+
+ cfg = &devconf->hqos;
+ ht = xd->hqos_ht;
+ wk = &xd->hqos_wt[tr->first_index];
+ tctbl = wk->hqos_tc_table;
+
+ vlib_cli_output (vm, " Thread:");
+ vlib_cli_output (vm, " Input SWQ size = %u packets", cfg->swq_size);
+ vlib_cli_output (vm, " Enqueue burst size = %u packets",
+ ht->hqos_burst_enq);
+ vlib_cli_output (vm, " Dequeue burst size = %u packets",
+ ht->hqos_burst_deq);
+
+ vlib_cli_output (vm,
+ " Packet field 0: slab position = %4u, slab bitmask = 0x%016llx",
+ wk->hqos_field0_slabpos, wk->hqos_field0_slabmask);
+ vlib_cli_output (vm,
+ " Packet field 1: slab position = %4u, slab bitmask = 0x%016llx",
+ wk->hqos_field1_slabpos, wk->hqos_field1_slabmask);
+ vlib_cli_output (vm,
+ " Packet field 2: slab position = %4u, slab bitmask = 0x%016llx",
+ wk->hqos_field2_slabpos, wk->hqos_field2_slabmask);
+ vlib_cli_output (vm, " Packet field 2 translation table:");
+ vlib_cli_output (vm, " [ 0 .. 15]: "
+ "%2u %2u %2u %2u %2u %2u %2u %2u %2u %2u %2u %2u %2u %2u %2u %2u",
+ tctbl[0], tctbl[1], tctbl[2], tctbl[3],
+ tctbl[4], tctbl[5], tctbl[6], tctbl[7],
+ tctbl[8], tctbl[9], tctbl[10], tctbl[11],
+ tctbl[12], tctbl[13], tctbl[14], tctbl[15]);
+ vlib_cli_output (vm, " [16 .. 31]: "
+ "%2u %2u %2u %2u %2u %2u %2u %2u %2u %2u %2u %2u %2u %2u %2u %2u",
+ tctbl[16], tctbl[17], tctbl[18], tctbl[19],
+ tctbl[20], tctbl[21], tctbl[22], tctbl[23],
+ tctbl[24], tctbl[25], tctbl[26], tctbl[27],
+ tctbl[28], tctbl[29], tctbl[30], tctbl[31]);
+ vlib_cli_output (vm, " [32 .. 47]: "
+ "%2u %2u %2u %2u %2u %2u %2u %2u %2u %2u %2u %2u %2u %2u %2u %2u",
+ tctbl[32], tctbl[33], tctbl[34], tctbl[35],
+ tctbl[36], tctbl[37], tctbl[38], tctbl[39],
+ tctbl[40], tctbl[41], tctbl[42], tctbl[43],
+ tctbl[44], tctbl[45], tctbl[46], tctbl[47]);
+ vlib_cli_output (vm, " [48 .. 63]: "
+ "%2u %2u %2u %2u %2u %2u %2u %2u %2u %2u %2u %2u %2u %2u %2u %2u",
+ tctbl[48], tctbl[49], tctbl[50], tctbl[51],
+ tctbl[52], tctbl[53], tctbl[54], tctbl[55],
+ tctbl[56], tctbl[57], tctbl[58], tctbl[59],
+ tctbl[60], tctbl[61], tctbl[62], tctbl[63]);
+
+ vlib_cli_output (vm, " Port:");
+ vlib_cli_output (vm, " Rate = %u bytes/second", cfg->port.rate);
+ vlib_cli_output (vm, " MTU = %u bytes", cfg->port.mtu);
+ vlib_cli_output (vm, " Frame overhead = %u bytes",
+ cfg->port.frame_overhead);
+ vlib_cli_output (vm, " Number of subports = %u",
+ cfg->port.n_subports_per_port);
+ vlib_cli_output (vm, " Number of pipes per subport = %u",
+ cfg->port.n_pipes_per_subport);
+ vlib_cli_output (vm,
+ " Packet queue size: TC0 = %u, TC1 = %u, TC2 = %u, TC3 = %u packets",
+ cfg->port.qsize[0], cfg->port.qsize[1], cfg->port.qsize[2],
+ cfg->port.qsize[3]);
+ vlib_cli_output (vm, " Number of pipe profiles = %u",
+ cfg->port.n_pipe_profiles);
+
+ for (profile_id = 0; profile_id < vec_len (cfg->pipe); profile_id++)
+ {
+ vlib_cli_output (vm, " Pipe profile %u:", profile_id);
+ vlib_cli_output (vm, " Rate = %u bytes/second",
+ cfg->pipe[profile_id].tb_rate);
+ vlib_cli_output (vm, " Token bucket size = %u bytes",
+ cfg->pipe[profile_id].tb_size);
+ vlib_cli_output (vm,
+ " Traffic class rate: TC0 = %u, TC1 = %u, TC2 = %u, TC3 = %u bytes/second",
+ cfg->pipe[profile_id].tc_rate[0],
+ cfg->pipe[profile_id].tc_rate[1],
+ cfg->pipe[profile_id].tc_rate[2],
+ cfg->pipe[profile_id].tc_rate[3]);
+ vlib_cli_output (vm, " TC period = %u milliseconds",
+ cfg->pipe[profile_id].tc_period);
+#ifdef RTE_SCHED_SUBPORT_TC_OV
+ vlib_cli_output (vm, " TC3 oversubscription_weight = %u",
+ cfg->pipe[profile_id].tc_ov_weight);
+#endif
+
+ for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
+ {
+ vlib_cli_output (vm,
+ " TC%u WRR weights: Q0 = %u, Q1 = %u, Q2 = %u, Q3 = %u",
+ i, cfg->pipe[profile_id].wrr_weights[i * 4],
+ cfg->pipe[profile_id].wrr_weights[i * 4 + 1],
+ cfg->pipe[profile_id].wrr_weights[i * 4 + 2],
+ cfg->pipe[profile_id].wrr_weights[i * 4 + 3]);
+ }
+ }
+
+#ifdef RTE_SCHED_RED
+ vlib_cli_output (vm, " Weighted Random Early Detection (WRED):");
+ for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
+ {
+ vlib_cli_output (vm, " TC%u min: G = %u, Y = %u, R = %u", i,
+ cfg->port.red_params[i][e_RTE_METER_GREEN].min_th,
+ cfg->port.red_params[i][e_RTE_METER_YELLOW].min_th,
+ cfg->port.red_params[i][e_RTE_METER_RED].min_th);
+
+ vlib_cli_output (vm, " TC%u max: G = %u, Y = %u, R = %u", i,
+ cfg->port.red_params[i][e_RTE_METER_GREEN].max_th,
+ cfg->port.red_params[i][e_RTE_METER_YELLOW].max_th,
+ cfg->port.red_params[i][e_RTE_METER_RED].max_th);
+
+ vlib_cli_output (vm,
+ " TC%u inverted probability: G = %u, Y = %u, R = %u",
+ i, cfg->port.red_params[i][e_RTE_METER_GREEN].maxp_inv,
+ cfg->port.red_params[i][e_RTE_METER_YELLOW].maxp_inv,
+ cfg->port.red_params[i][e_RTE_METER_RED].maxp_inv);
+
+ vlib_cli_output (vm, " TC%u weight: R = %u, Y = %u, R = %u", i,
+ cfg->port.red_params[i][e_RTE_METER_GREEN].wq_log2,
+ cfg->port.red_params[i][e_RTE_METER_YELLOW].wq_log2,
+ cfg->port.red_params[i][e_RTE_METER_RED].wq_log2);
+ }
+#endif
+
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (cmd_show_dpdk_if_hqos, static) = {
+ .path = "show dpdk interface hqos",
+ .short_help = "show dpdk interface hqos <if-name>",
+ .function = show_dpdk_if_hqos,
+};
+
+/* *INDENT-ON* */
+
+static clib_error_t *
+show_dpdk_hqos_queue_stats (vlib_main_t * vm, unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ unformat_input_t _line_input, *line_input = &_line_input;
+ dpdk_main_t *dm = &dpdk_main;
+ u32 hw_if_index = (u32) ~ 0;
+ u32 subport = (u32) ~ 0;
+ u32 pipe = (u32) ~ 0;
+ u32 tc = (u32) ~ 0;
+ u32 tc_q = (u32) ~ 0;
+ vnet_hw_interface_t *hw;
+ dpdk_device_t *xd;
+ uword *p = 0;
+ struct rte_eth_dev_info dev_info;
+ dpdk_device_config_t *devconf = 0;
+ u32 qindex;
+ struct rte_sched_queue_stats stats;
+ u16 qlen;
+
+ if (!unformat_user (input, unformat_line_input, line_input))
+ return 0;
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat
+ (line_input, "%U", unformat_vnet_hw_interface, dm->vnet_main,
+ &hw_if_index))
+ ;
+
+ else if (unformat (line_input, "subport %d", &subport))
+ ;
+
+ else if (unformat (line_input, "pipe %d", &pipe))
+ ;
+
+ else if (unformat (line_input, "tc %d", &tc))
+ ;
+
+ else if (unformat (line_input, "tc_q %d", &tc_q))
+ ;
+
+ else
+ return clib_error_return (0, "parse error: '%U'",
+ format_unformat_error, line_input);
+ }
+
+ unformat_free (line_input);
+
+ if (hw_if_index == (u32) ~ 0)
+ return clib_error_return (0, "please specify interface name!!");
+
+ hw = vnet_get_hw_interface (dm->vnet_main, hw_if_index);
+ xd = vec_elt_at_index (dm->devices, hw->dev_instance);
+
+ rte_eth_dev_info_get (xd->device_index, &dev_info);
+ if (dev_info.pci_dev)
+ { /* bonded interface has no pci info */
+ vlib_pci_addr_t pci_addr;
+
+ pci_addr.domain = dev_info.pci_dev->addr.domain;
+ pci_addr.bus = dev_info.pci_dev->addr.bus;
+ pci_addr.slot = dev_info.pci_dev->addr.devid;
+ pci_addr.function = dev_info.pci_dev->addr.function;
+
+ p =
+ hash_get (dm->conf->device_config_index_by_pci_addr, pci_addr.as_u32);
+ }
+
+ if (p)
+ devconf = pool_elt_at_index (dm->conf->dev_confs, p[0]);
+ else
+ devconf = &dm->conf->default_devconf;
+
+ if (devconf->hqos_enabled == 0)
+ {
+ vlib_cli_output (vm, "HQoS disabled for this interface");
+ return 0;
+ }
+
+ /*
+ * Figure out which queue to query. cf rte_sched_port_qindex. (Not sure why
+ * that method isn't made public by DPDK - how _should_ we get the queue ID?)
+ */
+ qindex = subport * devconf->hqos.port.n_pipes_per_subport + pipe;
+ qindex = qindex * RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE + tc;
+ qindex = qindex * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS + tc_q;
+
+ if (rte_sched_queue_read_stats (xd->hqos_ht->hqos, qindex, &stats, &qlen) !=
+ 0)
+ return clib_error_return (0, "failed to read stats");
+
+ vlib_cli_output (vm, "%=24s%=16s", "Stats Parameter", "Value");
+ vlib_cli_output (vm, "%=24s%=16d", "Packets", stats.n_pkts);
+ vlib_cli_output (vm, "%=24s%=16d", "Packets dropped", stats.n_pkts_dropped);
+#ifdef RTE_SCHED_RED
+ vlib_cli_output (vm, "%=24s%=16d", "Packets dropped (RED)",
+ stats.n_pkts_red_dropped);
+#endif
+ vlib_cli_output (vm, "%=24s%=16d", "Bytes", stats.n_bytes);
+ vlib_cli_output (vm, "%=24s%=16d", "Bytes dropped", stats.n_bytes_dropped);
+
+
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (cmd_show_dpdk_hqos_queue_stats, static) = {
+ .path = "show dpdk hqos queue",
+ .short_help = "show dpdk hqos queue <if-name> subport <subport> pipe <pipe> tc <tc> tc_q <tc_q>",
+ .function = show_dpdk_hqos_queue_stats,
+};
+/* *INDENT-ON* */
+
+clib_error_t *
+dpdk_cli_init (vlib_main_t * vm)
+{
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (dpdk_cli_init);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/devices/dpdk/device.c b/src/vnet/devices/dpdk/device.c
new file mode 100644
index 00000000000..b22fbf2e69e
--- /dev/null
+++ b/src/vnet/devices/dpdk/device.c
@@ -0,0 +1,840 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <vnet/vnet.h>
+#include <vppinfra/vec.h>
+#include <vppinfra/format.h>
+#include <vlib/unix/cj.h>
+#include <assert.h>
+
+#include <vnet/ethernet/ethernet.h>
+#include <vnet/devices/dpdk/dpdk.h>
+
+#include "dpdk_priv.h"
+#include <vppinfra/error.h>
+
+#define foreach_dpdk_tx_func_error \
+ _(BAD_RETVAL, "DPDK tx function returned an error") \
+ _(RING_FULL, "Tx packet drops (ring full)") \
+ _(PKT_DROP, "Tx packet drops (dpdk tx failure)") \
+ _(REPL_FAIL, "Tx packet drops (replication failure)")
+
+typedef enum
+{
+#define _(f,s) DPDK_TX_FUNC_ERROR_##f,
+ foreach_dpdk_tx_func_error
+#undef _
+ DPDK_TX_FUNC_N_ERROR,
+} dpdk_tx_func_error_t;
+
+static char *dpdk_tx_func_error_strings[] = {
+#define _(n,s) s,
+ foreach_dpdk_tx_func_error
+#undef _
+};
+
+clib_error_t *
+dpdk_set_mac_address (vnet_hw_interface_t * hi, char *address)
+{
+ int error;
+ dpdk_main_t *dm = &dpdk_main;
+ dpdk_device_t *xd = vec_elt_at_index (dm->devices, hi->dev_instance);
+
+ error = rte_eth_dev_default_mac_addr_set (xd->device_index,
+ (struct ether_addr *) address);
+
+ if (error)
+ {
+ return clib_error_return (0, "mac address set failed: %d", error);
+ }
+ else
+ {
+ return NULL;
+ }
+}
+
+clib_error_t *
+dpdk_set_mc_filter (vnet_hw_interface_t * hi,
+ struct ether_addr mc_addr_vec[], int naddr)
+{
+ int error;
+ dpdk_main_t *dm = &dpdk_main;
+ dpdk_device_t *xd = vec_elt_at_index (dm->devices, hi->dev_instance);
+
+ error = rte_eth_dev_set_mc_addr_list (xd->device_index, mc_addr_vec, naddr);
+
+ if (error)
+ {
+ return clib_error_return (0, "mc addr list failed: %d", error);
+ }
+ else
+ {
+ return NULL;
+ }
+}
+
+struct rte_mbuf *
+dpdk_replicate_packet_mb (vlib_buffer_t * b)
+{
+ vlib_main_t *vm = vlib_get_main ();
+ vlib_buffer_main_t *bm = vm->buffer_main;
+ struct rte_mbuf **mbufs = 0, *s, *d;
+ u8 nb_segs;
+ unsigned socket_id = rte_socket_id ();
+ int i;
+
+ ASSERT (bm->pktmbuf_pools[socket_id]);
+ s = rte_mbuf_from_vlib_buffer (b);
+ nb_segs = s->nb_segs;
+ vec_validate (mbufs, nb_segs - 1);
+
+ if (rte_pktmbuf_alloc_bulk (bm->pktmbuf_pools[socket_id], mbufs, nb_segs))
+ {
+ vec_free (mbufs);
+ return 0;
+ }
+
+ d = mbufs[0];
+ d->nb_segs = s->nb_segs;
+ d->data_len = s->data_len;
+ d->pkt_len = s->pkt_len;
+ d->data_off = s->data_off;
+ clib_memcpy (d->buf_addr, s->buf_addr, RTE_PKTMBUF_HEADROOM + s->data_len);
+
+ for (i = 1; i < nb_segs; i++)
+ {
+ d->next = mbufs[i];
+ d = mbufs[i];
+ s = s->next;
+ d->data_len = s->data_len;
+ clib_memcpy (d->buf_addr, s->buf_addr,
+ RTE_PKTMBUF_HEADROOM + s->data_len);
+ }
+
+ d = mbufs[0];
+ vec_free (mbufs);
+ return d;
+}
+
+static void
+dpdk_tx_trace_buffer (dpdk_main_t * dm,
+ vlib_node_runtime_t * node,
+ dpdk_device_t * xd,
+ u16 queue_id, u32 buffer_index, vlib_buffer_t * buffer)
+{
+ vlib_main_t *vm = vlib_get_main ();
+ dpdk_tx_dma_trace_t *t0;
+ struct rte_mbuf *mb;
+
+ mb = rte_mbuf_from_vlib_buffer (buffer);
+
+ t0 = vlib_add_trace (vm, node, buffer, sizeof (t0[0]));
+ t0->queue_index = queue_id;
+ t0->device_index = xd->device_index;
+ t0->buffer_index = buffer_index;
+ clib_memcpy (&t0->mb, mb, sizeof (t0->mb));
+ clib_memcpy (&t0->buffer, buffer,
+ sizeof (buffer[0]) - sizeof (buffer->pre_data));
+ clib_memcpy (t0->buffer.pre_data, buffer->data + buffer->current_data,
+ sizeof (t0->buffer.pre_data));
+}
+
+static_always_inline void
+dpdk_validate_rte_mbuf (vlib_main_t * vm, vlib_buffer_t * b,
+ int maybe_multiseg)
+{
+ struct rte_mbuf *mb, *first_mb, *last_mb;
+
+ /* buffer is coming from non-dpdk source so we need to init
+ rte_mbuf header */
+ if (PREDICT_FALSE ((b->flags & VNET_BUFFER_RTE_MBUF_VALID) == 0))
+ {
+ vlib_buffer_t *b2 = b;
+ last_mb = mb = rte_mbuf_from_vlib_buffer (b2);
+ rte_pktmbuf_reset (mb);
+ while (maybe_multiseg && (b2->flags & VLIB_BUFFER_NEXT_PRESENT))
+ {
+ b2 = vlib_get_buffer (vm, b2->next_buffer);
+ mb = rte_mbuf_from_vlib_buffer (b2);
+ last_mb->next = mb;
+ last_mb = mb;
+ rte_pktmbuf_reset (mb);
+ }
+ }
+
+ first_mb = mb = rte_mbuf_from_vlib_buffer (b);
+ first_mb->nb_segs = 1;
+ mb->data_len = b->current_length;
+ mb->pkt_len = maybe_multiseg ? vlib_buffer_length_in_chain (vm, b) :
+ b->current_length;
+ mb->data_off = VLIB_BUFFER_PRE_DATA_SIZE + b->current_data;
+
+ while (maybe_multiseg && (b->flags & VLIB_BUFFER_NEXT_PRESENT))
+ {
+ b = vlib_get_buffer (vm, b->next_buffer);
+ mb = rte_mbuf_from_vlib_buffer (b);
+ mb->data_len = b->current_length;
+ mb->pkt_len = b->current_length;
+ mb->data_off = VLIB_BUFFER_PRE_DATA_SIZE + b->current_data;
+ first_mb->nb_segs++;
+ }
+}
+
+/*
+ * This function calls the dpdk's tx_burst function to transmit the packets
+ * on the tx_vector. It manages a lock per-device if the device does not
+ * support multiple queues. It returns the number of packets untransmitted
+ * on the tx_vector. If all packets are transmitted (the normal case), the
+ * function returns 0.
+ *
+ * The function assumes there is at least one packet on the tx_vector.
+ */
+static_always_inline
+ u32 tx_burst_vector_internal (vlib_main_t * vm,
+ dpdk_device_t * xd,
+ struct rte_mbuf **tx_vector)
+{
+ dpdk_main_t *dm = &dpdk_main;
+ u32 n_packets;
+ u32 tx_head;
+ u32 tx_tail;
+ u32 n_retry;
+ int rv;
+ int queue_id;
+ tx_ring_hdr_t *ring;
+
+ ring = vec_header (tx_vector, sizeof (*ring));
+
+ n_packets = ring->tx_head - ring->tx_tail;
+
+ tx_head = ring->tx_head % xd->nb_tx_desc;
+
+ /*
+ * Ensure rte_eth_tx_burst is not called with 0 packets, which can lead to
+ * unpredictable results.
+ */
+ ASSERT (n_packets > 0);
+
+ /*
+ * Check for tx_vector overflow. If this fails it is a system configuration
+ * error. The ring should be sized big enough to handle the largest un-flowed
+ * off burst from a traffic manager. A larger size also helps performance
+ * a bit because it decreases the probability of having to issue two tx_burst
+ * calls due to a ring wrap.
+ */
+ ASSERT (n_packets < xd->nb_tx_desc);
+ ASSERT (ring->tx_tail == 0);
+
+ n_retry = 16;
+ queue_id = vm->cpu_index;
+
+ do
+ {
+ /* start the burst at the tail */
+ tx_tail = ring->tx_tail % xd->nb_tx_desc;
+
+ /*
+ * This device only supports one TX queue,
+ * and we're running multi-threaded...
+ */
+ if (PREDICT_FALSE (xd->lockp != 0))
+ {
+ queue_id = queue_id % xd->tx_q_used;
+ while (__sync_lock_test_and_set (xd->lockp[queue_id], 1))
+ /* zzzz */
+ queue_id = (queue_id + 1) % xd->tx_q_used;
+ }
+
+ if (PREDICT_FALSE (xd->flags & DPDK_DEVICE_FLAG_HQOS)) /* HQoS ON */
+ {
+ /* no wrap, transmit in one burst */
+ dpdk_device_hqos_per_worker_thread_t *hqos =
+ &xd->hqos_wt[vm->cpu_index];
+
+ ASSERT (hqos->swq != NULL);
+
+ dpdk_hqos_metadata_set (hqos,
+ &tx_vector[tx_tail], tx_head - tx_tail);
+ rv = rte_ring_sp_enqueue_burst (hqos->swq,
+ (void **) &tx_vector[tx_tail],
+ (uint16_t) (tx_head - tx_tail));
+ }
+ else if (PREDICT_TRUE (xd->flags & DPDK_DEVICE_FLAG_PMD))
+ {
+ /* no wrap, transmit in one burst */
+ rv = rte_eth_tx_burst (xd->device_index,
+ (uint16_t) queue_id,
+ &tx_vector[tx_tail],
+ (uint16_t) (tx_head - tx_tail));
+ }
+ else
+ {
+ ASSERT (0);
+ rv = 0;
+ }
+
+ if (PREDICT_FALSE (xd->lockp != 0))
+ *xd->lockp[queue_id] = 0;
+
+ if (PREDICT_FALSE (rv < 0))
+ {
+ // emit non-fatal message, bump counter
+ vnet_main_t *vnm = dm->vnet_main;
+ vnet_interface_main_t *im = &vnm->interface_main;
+ u32 node_index;
+
+ node_index = vec_elt_at_index (im->hw_interfaces,
+ xd->vlib_hw_if_index)->tx_node_index;
+
+ vlib_error_count (vm, node_index, DPDK_TX_FUNC_ERROR_BAD_RETVAL, 1);
+ clib_warning ("rte_eth_tx_burst[%d]: error %d", xd->device_index,
+ rv);
+ return n_packets; // untransmitted packets
+ }
+ ring->tx_tail += (u16) rv;
+ n_packets -= (uint16_t) rv;
+ }
+ while (rv && n_packets && (n_retry > 0));
+
+ return n_packets;
+}
+
+static_always_inline void
+dpdk_prefetch_buffer_by_index (vlib_main_t * vm, u32 bi)
+{
+ vlib_buffer_t *b;
+ struct rte_mbuf *mb;
+ b = vlib_get_buffer (vm, bi);
+ mb = rte_mbuf_from_vlib_buffer (b);
+ CLIB_PREFETCH (mb, CLIB_CACHE_LINE_BYTES, LOAD);
+ CLIB_PREFETCH (b, CLIB_CACHE_LINE_BYTES, LOAD);
+}
+
+static_always_inline void
+dpdk_buffer_recycle (vlib_main_t * vm, vlib_node_runtime_t * node,
+ vlib_buffer_t * b, u32 bi, struct rte_mbuf **mbp)
+{
+ dpdk_main_t *dm = &dpdk_main;
+ u32 my_cpu = vm->cpu_index;
+ struct rte_mbuf *mb_new;
+
+ if (PREDICT_FALSE (b->flags & VLIB_BUFFER_RECYCLE) == 0)
+ return;
+
+ mb_new = dpdk_replicate_packet_mb (b);
+ if (PREDICT_FALSE (mb_new == 0))
+ {
+ vlib_error_count (vm, node->node_index,
+ DPDK_TX_FUNC_ERROR_REPL_FAIL, 1);
+ b->flags |= VLIB_BUFFER_REPL_FAIL;
+ }
+ else
+ *mbp = mb_new;
+
+ vec_add1 (dm->recycle[my_cpu], bi);
+}
+
+/*
+ * Transmits the packets on the frame to the interface associated with the
+ * node. It first copies packets on the frame to a tx_vector containing the
+ * rte_mbuf pointers. It then passes this vector to tx_burst_vector_internal
+ * which calls the dpdk tx_burst function.
+ */
+static uword
+dpdk_interface_tx (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * f)
+{
+ dpdk_main_t *dm = &dpdk_main;
+ vnet_interface_output_runtime_t *rd = (void *) node->runtime_data;
+ dpdk_device_t *xd = vec_elt_at_index (dm->devices, rd->dev_instance);
+ u32 n_packets = f->n_vectors;
+ u32 n_left;
+ u32 *from;
+ struct rte_mbuf **tx_vector;
+ u16 i;
+ u16 nb_tx_desc = xd->nb_tx_desc;
+ int queue_id;
+ u32 my_cpu;
+ u32 tx_pkts = 0;
+ tx_ring_hdr_t *ring;
+ u32 n_on_ring;
+
+ my_cpu = vm->cpu_index;
+
+ queue_id = my_cpu;
+
+ tx_vector = xd->tx_vectors[queue_id];
+ ring = vec_header (tx_vector, sizeof (*ring));
+
+ n_on_ring = ring->tx_head - ring->tx_tail;
+ from = vlib_frame_vector_args (f);
+
+ ASSERT (n_packets <= VLIB_FRAME_SIZE);
+
+ if (PREDICT_FALSE (n_on_ring + n_packets > nb_tx_desc))
+ {
+ /*
+ * Overflowing the ring should never happen.
+ * If it does then drop the whole frame.
+ */
+ vlib_error_count (vm, node->node_index, DPDK_TX_FUNC_ERROR_RING_FULL,
+ n_packets);
+
+ while (n_packets--)
+ {
+ u32 bi0 = from[n_packets];
+ vlib_buffer_t *b0 = vlib_get_buffer (vm, bi0);
+ struct rte_mbuf *mb0 = rte_mbuf_from_vlib_buffer (b0);
+ rte_pktmbuf_free (mb0);
+ }
+ return n_on_ring;
+ }
+
+ if (PREDICT_FALSE (dm->tx_pcap_enable))
+ {
+ n_left = n_packets;
+ while (n_left > 0)
+ {
+ u32 bi0 = from[0];
+ vlib_buffer_t *b0 = vlib_get_buffer (vm, bi0);
+ if (dm->pcap_sw_if_index == 0 ||
+ dm->pcap_sw_if_index == vnet_buffer (b0)->sw_if_index[VLIB_TX])
+ pcap_add_buffer (&dm->pcap_main, vm, bi0, 512);
+ from++;
+ n_left--;
+ }
+ }
+
+ from = vlib_frame_vector_args (f);
+ n_left = n_packets;
+ i = ring->tx_head % nb_tx_desc;
+
+ while (n_left >= 8)
+ {
+ u32 bi0, bi1, bi2, bi3;
+ struct rte_mbuf *mb0, *mb1, *mb2, *mb3;
+ vlib_buffer_t *b0, *b1, *b2, *b3;
+ u32 or_flags;
+
+ dpdk_prefetch_buffer_by_index (vm, from[4]);
+ dpdk_prefetch_buffer_by_index (vm, from[5]);
+ dpdk_prefetch_buffer_by_index (vm, from[6]);
+ dpdk_prefetch_buffer_by_index (vm, from[7]);
+
+ bi0 = from[0];
+ bi1 = from[1];
+ bi2 = from[2];
+ bi3 = from[3];
+ from += 4;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ b1 = vlib_get_buffer (vm, bi1);
+ b2 = vlib_get_buffer (vm, bi2);
+ b3 = vlib_get_buffer (vm, bi3);
+
+ or_flags = b0->flags | b1->flags | b2->flags | b3->flags;
+
+ if (or_flags & VLIB_BUFFER_NEXT_PRESENT)
+ {
+ dpdk_validate_rte_mbuf (vm, b0, 1);
+ dpdk_validate_rte_mbuf (vm, b1, 1);
+ dpdk_validate_rte_mbuf (vm, b2, 1);
+ dpdk_validate_rte_mbuf (vm, b3, 1);
+ }
+ else
+ {
+ dpdk_validate_rte_mbuf (vm, b0, 0);
+ dpdk_validate_rte_mbuf (vm, b1, 0);
+ dpdk_validate_rte_mbuf (vm, b2, 0);
+ dpdk_validate_rte_mbuf (vm, b3, 0);
+ }
+
+ mb0 = rte_mbuf_from_vlib_buffer (b0);
+ mb1 = rte_mbuf_from_vlib_buffer (b1);
+ mb2 = rte_mbuf_from_vlib_buffer (b2);
+ mb3 = rte_mbuf_from_vlib_buffer (b3);
+
+ if (PREDICT_FALSE (or_flags & VLIB_BUFFER_RECYCLE))
+ {
+ dpdk_buffer_recycle (vm, node, b0, bi0, &mb0);
+ dpdk_buffer_recycle (vm, node, b1, bi1, &mb1);
+ dpdk_buffer_recycle (vm, node, b2, bi2, &mb2);
+ dpdk_buffer_recycle (vm, node, b3, bi3, &mb3);
+
+ /* dont enqueue packets if replication failed as they must
+ be sent back to recycle */
+ if (PREDICT_TRUE ((b0->flags & VLIB_BUFFER_REPL_FAIL) == 0))
+ tx_vector[i++ % nb_tx_desc] = mb0;
+ if (PREDICT_TRUE ((b1->flags & VLIB_BUFFER_REPL_FAIL) == 0))
+ tx_vector[i++ % nb_tx_desc] = mb1;
+ if (PREDICT_TRUE ((b2->flags & VLIB_BUFFER_REPL_FAIL) == 0))
+ tx_vector[i++ % nb_tx_desc] = mb2;
+ if (PREDICT_TRUE ((b3->flags & VLIB_BUFFER_REPL_FAIL) == 0))
+ tx_vector[i++ % nb_tx_desc] = mb3;
+ }
+ else
+ {
+ if (PREDICT_FALSE (i + 3 >= nb_tx_desc))
+ {
+ tx_vector[i++ % nb_tx_desc] = mb0;
+ tx_vector[i++ % nb_tx_desc] = mb1;
+ tx_vector[i++ % nb_tx_desc] = mb2;
+ tx_vector[i++ % nb_tx_desc] = mb3;
+ i %= nb_tx_desc;
+ }
+ else
+ {
+ tx_vector[i++] = mb0;
+ tx_vector[i++] = mb1;
+ tx_vector[i++] = mb2;
+ tx_vector[i++] = mb3;
+ }
+ }
+
+
+ if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE))
+ {
+ if (b0->flags & VLIB_BUFFER_IS_TRACED)
+ dpdk_tx_trace_buffer (dm, node, xd, queue_id, bi0, b0);
+ if (b1->flags & VLIB_BUFFER_IS_TRACED)
+ dpdk_tx_trace_buffer (dm, node, xd, queue_id, bi1, b1);
+ if (b2->flags & VLIB_BUFFER_IS_TRACED)
+ dpdk_tx_trace_buffer (dm, node, xd, queue_id, bi2, b2);
+ if (b3->flags & VLIB_BUFFER_IS_TRACED)
+ dpdk_tx_trace_buffer (dm, node, xd, queue_id, bi3, b3);
+ }
+
+ n_left -= 4;
+ }
+ while (n_left > 0)
+ {
+ u32 bi0;
+ struct rte_mbuf *mb0;
+ vlib_buffer_t *b0;
+
+ bi0 = from[0];
+ from++;
+
+ b0 = vlib_get_buffer (vm, bi0);
+
+ dpdk_validate_rte_mbuf (vm, b0, 1);
+
+ mb0 = rte_mbuf_from_vlib_buffer (b0);
+ dpdk_buffer_recycle (vm, node, b0, bi0, &mb0);
+
+ if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE))
+ if (b0->flags & VLIB_BUFFER_IS_TRACED)
+ dpdk_tx_trace_buffer (dm, node, xd, queue_id, bi0, b0);
+
+ if (PREDICT_TRUE ((b0->flags & VLIB_BUFFER_REPL_FAIL) == 0))
+ {
+ tx_vector[i % nb_tx_desc] = mb0;
+ i++;
+ }
+ n_left--;
+ }
+
+ /* account for additional packets in the ring */
+ ring->tx_head += n_packets;
+ n_on_ring = ring->tx_head - ring->tx_tail;
+
+ /* transmit as many packets as possible */
+ n_packets = tx_burst_vector_internal (vm, xd, tx_vector);
+
+ /*
+ * tx_pkts is the number of packets successfully transmitted
+ * This is the number originally on ring minus the number remaining on ring
+ */
+ tx_pkts = n_on_ring - n_packets;
+
+ {
+ /* If there is no callback then drop any non-transmitted packets */
+ if (PREDICT_FALSE (n_packets))
+ {
+ vlib_simple_counter_main_t *cm;
+ vnet_main_t *vnm = vnet_get_main ();
+
+ cm = vec_elt_at_index (vnm->interface_main.sw_if_counters,
+ VNET_INTERFACE_COUNTER_TX_ERROR);
+
+ vlib_increment_simple_counter (cm, my_cpu, xd->vlib_sw_if_index,
+ n_packets);
+
+ vlib_error_count (vm, node->node_index, DPDK_TX_FUNC_ERROR_PKT_DROP,
+ n_packets);
+
+ while (n_packets--)
+ rte_pktmbuf_free (tx_vector[ring->tx_tail + n_packets]);
+ }
+
+ /* Reset head/tail to avoid unnecessary wrap */
+ ring->tx_head = 0;
+ ring->tx_tail = 0;
+ }
+
+ /* Recycle replicated buffers */
+ if (PREDICT_FALSE (vec_len (dm->recycle[my_cpu])))
+ {
+ vlib_buffer_free (vm, dm->recycle[my_cpu],
+ vec_len (dm->recycle[my_cpu]));
+ _vec_len (dm->recycle[my_cpu]) = 0;
+ }
+
+ ASSERT (ring->tx_head >= ring->tx_tail);
+
+ return tx_pkts;
+}
+
+static void
+dpdk_clear_hw_interface_counters (u32 instance)
+{
+ dpdk_main_t *dm = &dpdk_main;
+ dpdk_device_t *xd = vec_elt_at_index (dm->devices, instance);
+
+ /*
+ * Set the "last_cleared_stats" to the current stats, so that
+ * things appear to clear from a display perspective.
+ */
+ dpdk_update_counters (xd, vlib_time_now (dm->vlib_main));
+
+ clib_memcpy (&xd->last_cleared_stats, &xd->stats, sizeof (xd->stats));
+ clib_memcpy (xd->last_cleared_xstats, xd->xstats,
+ vec_len (xd->last_cleared_xstats) *
+ sizeof (xd->last_cleared_xstats[0]));
+
+}
+
+static clib_error_t *
+dpdk_interface_admin_up_down (vnet_main_t * vnm, u32 hw_if_index, u32 flags)
+{
+ vnet_hw_interface_t *hif = vnet_get_hw_interface (vnm, hw_if_index);
+ uword is_up = (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) != 0;
+ dpdk_main_t *dm = &dpdk_main;
+ dpdk_device_t *xd = vec_elt_at_index (dm->devices, hif->dev_instance);
+ int rv = 0;
+
+ if (is_up)
+ {
+ f64 now = vlib_time_now (dm->vlib_main);
+
+ if ((xd->flags & DPDK_DEVICE_FLAG_ADMIN_UP) == 0)
+ rv = rte_eth_dev_start (xd->device_index);
+
+ if (xd->flags & DPDK_DEVICE_FLAG_PROMISC)
+ rte_eth_promiscuous_enable (xd->device_index);
+ else
+ rte_eth_promiscuous_disable (xd->device_index);
+
+ rte_eth_allmulticast_enable (xd->device_index);
+ xd->flags |= DPDK_DEVICE_FLAG_ADMIN_UP;
+ dpdk_update_counters (xd, now);
+ dpdk_update_link_state (xd, now);
+ }
+ else
+ {
+ xd->flags &= ~DPDK_DEVICE_FLAG_ADMIN_UP;
+
+ rte_eth_allmulticast_disable (xd->device_index);
+ vnet_hw_interface_set_flags (vnm, xd->vlib_hw_if_index, 0);
+ rte_eth_dev_stop (xd->device_index);
+
+ /* For bonded interface, stop slave links */
+ if (xd->pmd == VNET_DPDK_PMD_BOND)
+ {
+ u8 slink[16];
+ int nlink = rte_eth_bond_slaves_get (xd->device_index, slink, 16);
+ while (nlink >= 1)
+ {
+ u8 dpdk_port = slink[--nlink];
+ rte_eth_dev_stop (dpdk_port);
+ }
+ }
+ }
+
+ if (rv < 0)
+ clib_warning ("rte_eth_dev_%s error: %d", is_up ? "start" : "stop", rv);
+
+ return /* no error */ 0;
+}
+
+/*
+ * Dynamically redirect all pkts from a specific interface
+ * to the specified node
+ */
+static void
+dpdk_set_interface_next_node (vnet_main_t * vnm, u32 hw_if_index,
+ u32 node_index)
+{
+ dpdk_main_t *xm = &dpdk_main;
+ vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
+ dpdk_device_t *xd = vec_elt_at_index (xm->devices, hw->dev_instance);
+
+ /* Shut off redirection */
+ if (node_index == ~0)
+ {
+ xd->per_interface_next_index = node_index;
+ return;
+ }
+
+ xd->per_interface_next_index =
+ vlib_node_add_next (xm->vlib_main, dpdk_input_node.index, node_index);
+}
+
+
+static clib_error_t *
+dpdk_subif_add_del_function (vnet_main_t * vnm,
+ u32 hw_if_index,
+ struct vnet_sw_interface_t *st, int is_add)
+{
+ dpdk_main_t *xm = &dpdk_main;
+ vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
+ dpdk_device_t *xd = vec_elt_at_index (xm->devices, hw->dev_instance);
+ vnet_sw_interface_t *t = (vnet_sw_interface_t *) st;
+ int r, vlan_offload;
+ u32 prev_subifs = xd->num_subifs;
+ clib_error_t *err = 0;
+
+ if (is_add)
+ xd->num_subifs++;
+ else if (xd->num_subifs)
+ xd->num_subifs--;
+
+ if ((xd->flags & DPDK_DEVICE_FLAG_PMD) == 0)
+ goto done;
+
+ /* currently we program VLANS only for IXGBE VF and I40E VF */
+ if ((xd->pmd != VNET_DPDK_PMD_IXGBEVF) && (xd->pmd != VNET_DPDK_PMD_I40EVF))
+ goto done;
+
+ if (t->sub.eth.flags.no_tags == 1)
+ goto done;
+
+ if ((t->sub.eth.flags.one_tag != 1) || (t->sub.eth.flags.exact_match != 1))
+ {
+ xd->num_subifs = prev_subifs;
+ err = clib_error_return (0, "unsupported VLAN setup");
+ goto done;
+ }
+
+ vlan_offload = rte_eth_dev_get_vlan_offload (xd->device_index);
+ vlan_offload |= ETH_VLAN_FILTER_OFFLOAD;
+
+ if ((r = rte_eth_dev_set_vlan_offload (xd->device_index, vlan_offload)))
+ {
+ xd->num_subifs = prev_subifs;
+ err = clib_error_return (0, "rte_eth_dev_set_vlan_offload[%d]: err %d",
+ xd->device_index, r);
+ goto done;
+ }
+
+
+ if ((r =
+ rte_eth_dev_vlan_filter (xd->device_index, t->sub.eth.outer_vlan_id,
+ is_add)))
+ {
+ xd->num_subifs = prev_subifs;
+ err = clib_error_return (0, "rte_eth_dev_vlan_filter[%d]: err %d",
+ xd->device_index, r);
+ goto done;
+ }
+
+done:
+ if (xd->num_subifs)
+ xd->flags |= DPDK_DEVICE_FLAG_HAVE_SUBIF;
+ else
+ xd->flags &= ~DPDK_DEVICE_FLAG_HAVE_SUBIF;
+
+ return err;
+}
+
+/* *INDENT-OFF* */
+VNET_DEVICE_CLASS (dpdk_device_class) = {
+ .name = "dpdk",
+ .tx_function = dpdk_interface_tx,
+ .tx_function_n_errors = DPDK_TX_FUNC_N_ERROR,
+ .tx_function_error_strings = dpdk_tx_func_error_strings,
+ .format_device_name = format_dpdk_device_name,
+ .format_device = format_dpdk_device,
+ .format_tx_trace = format_dpdk_tx_dma_trace,
+ .clear_counters = dpdk_clear_hw_interface_counters,
+ .admin_up_down_function = dpdk_interface_admin_up_down,
+ .subif_add_del_function = dpdk_subif_add_del_function,
+ .rx_redirect_to_node = dpdk_set_interface_next_node,
+ .mac_addr_change_function = dpdk_set_mac_address,
+};
+
+VLIB_DEVICE_TX_FUNCTION_MULTIARCH (dpdk_device_class, dpdk_interface_tx)
+/* *INDENT-ON* */
+
+#define UP_DOWN_FLAG_EVENT 1
+
+uword
+admin_up_down_process (vlib_main_t * vm,
+ vlib_node_runtime_t * rt, vlib_frame_t * f)
+{
+ clib_error_t *error = 0;
+ uword event_type;
+ uword *event_data = 0;
+ u32 sw_if_index;
+ u32 flags;
+
+ while (1)
+ {
+ vlib_process_wait_for_event (vm);
+
+ event_type = vlib_process_get_events (vm, &event_data);
+
+ dpdk_main.admin_up_down_in_progress = 1;
+
+ switch (event_type)
+ {
+ case UP_DOWN_FLAG_EVENT:
+ {
+ if (vec_len (event_data) == 2)
+ {
+ sw_if_index = event_data[0];
+ flags = event_data[1];
+ error =
+ vnet_sw_interface_set_flags (vnet_get_main (), sw_if_index,
+ flags);
+ clib_error_report (error);
+ }
+ }
+ break;
+ }
+
+ vec_reset_length (event_data);
+
+ dpdk_main.admin_up_down_in_progress = 0;
+
+ }
+ return 0; /* or not */
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (admin_up_down_process_node,static) = {
+ .function = admin_up_down_process,
+ .type = VLIB_NODE_TYPE_PROCESS,
+ .name = "admin-up-down-process",
+ .process_log2_n_stack_bytes = 17, // 256KB
+};
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/devices/dpdk/dpdk.h b/src/vnet/devices/dpdk/dpdk.h
new file mode 100644
index 00000000000..d8f378d2b54
--- /dev/null
+++ b/src/vnet/devices/dpdk/dpdk.h
@@ -0,0 +1,534 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef __included_dpdk_h__
+#define __included_dpdk_h__
+
+/* $$$$ We should rename always_inline -> clib_always_inline */
+#undef always_inline
+
+#include <rte_config.h>
+
+#include <rte_common.h>
+#include <rte_dev.h>
+#include <rte_log.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_per_lcore.h>
+#include <rte_launch.h>
+#include <rte_atomic.h>
+#include <rte_cycles.h>
+#include <rte_prefetch.h>
+#include <rte_lcore.h>
+#include <rte_per_lcore.h>
+#include <rte_branch_prediction.h>
+#include <rte_interrupts.h>
+#include <rte_pci.h>
+#include <rte_random.h>
+#include <rte_debug.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_ring.h>
+#include <rte_mempool.h>
+#include <rte_mbuf.h>
+#include <rte_virtio_net.h>
+#include <rte_version.h>
+#include <rte_eth_bond.h>
+#include <rte_sched.h>
+
+#include <vnet/unix/pcap.h>
+#include <vnet/devices/devices.h>
+
+#if CLIB_DEBUG > 0
+#define always_inline static inline
+#else
+#define always_inline static inline __attribute__ ((__always_inline__))
+#endif
+
+#include <vlib/pci/pci.h>
+
+#define NB_MBUF (16<<10)
+
+extern vnet_device_class_t dpdk_device_class;
+extern vlib_node_registration_t dpdk_input_node;
+extern vlib_node_registration_t handoff_dispatch_node;
+
+#if RTE_VERSION >= RTE_VERSION_NUM(16, 11, 0, 0)
+#define foreach_dpdk_pmd \
+ _ ("net_thunderx", THUNDERX) \
+ _ ("net_e1000_em", E1000EM) \
+ _ ("net_e1000_igb", IGB) \
+ _ ("net_e1000_igb_vf", IGBVF) \
+ _ ("net_ixgbe", IXGBE) \
+ _ ("net_ixgbe_vf", IXGBEVF) \
+ _ ("net_i40e", I40E) \
+ _ ("net_i40e_vf", I40EVF) \
+ _ ("net_virtio", VIRTIO) \
+ _ ("net_enic", ENIC) \
+ _ ("net_vmxnet3", VMXNET3) \
+ _ ("net_af_packet", AF_PACKET) \
+ _ ("rte_bond_pmd", BOND) \
+ _ ("net_fm10k", FM10K) \
+ _ ("net_cxgbe", CXGBE) \
+ _ ("net_mlx5", MLX5) \
+ _ ("net_dpaa2", DPAA2)
+#else
+#define foreach_dpdk_pmd \
+ _ ("rte_nicvf_pmd", THUNDERX) \
+ _ ("rte_em_pmd", E1000EM) \
+ _ ("rte_igb_pmd", IGB) \
+ _ ("rte_igbvf_pmd", IGBVF) \
+ _ ("rte_ixgbe_pmd", IXGBE) \
+ _ ("rte_ixgbevf_pmd", IXGBEVF) \
+ _ ("rte_i40e_pmd", I40E) \
+ _ ("rte_i40evf_pmd", I40EVF) \
+ _ ("rte_virtio_pmd", VIRTIO) \
+ _ ("rte_enic_pmd", ENIC) \
+ _ ("rte_vmxnet3_pmd", VMXNET3) \
+ _ ("AF_PACKET PMD", AF_PACKET) \
+ _ ("rte_bond_pmd", BOND) \
+ _ ("rte_pmd_fm10k", FM10K) \
+ _ ("rte_cxgbe_pmd", CXGBE) \
+ _ ("rte_dpaa2_dpni", DPAA2)
+#endif
+
+typedef enum
+{
+ VNET_DPDK_PMD_NONE,
+#define _(s,f) VNET_DPDK_PMD_##f,
+ foreach_dpdk_pmd
+#undef _
+ VNET_DPDK_PMD_UNKNOWN, /* must be last */
+} dpdk_pmd_t;
+
+typedef enum
+{
+ VNET_DPDK_PORT_TYPE_ETH_1G,
+ VNET_DPDK_PORT_TYPE_ETH_10G,
+ VNET_DPDK_PORT_TYPE_ETH_40G,
+ VNET_DPDK_PORT_TYPE_ETH_100G,
+ VNET_DPDK_PORT_TYPE_ETH_BOND,
+ VNET_DPDK_PORT_TYPE_ETH_SWITCH,
+ VNET_DPDK_PORT_TYPE_AF_PACKET,
+ VNET_DPDK_PORT_TYPE_UNKNOWN,
+} dpdk_port_type_t;
+
+/*
+ * The header for the tx_vector in dpdk_device_t.
+ * Head and tail are indexes into the tx_vector and are of type
+ * u64 so they never overflow.
+ */
+typedef struct
+{
+ u64 tx_head;
+ u64 tx_tail;
+} tx_ring_hdr_t;
+
+typedef struct
+{
+ struct rte_ring *swq;
+
+ u64 hqos_field0_slabmask;
+ u32 hqos_field0_slabpos;
+ u32 hqos_field0_slabshr;
+ u64 hqos_field1_slabmask;
+ u32 hqos_field1_slabpos;
+ u32 hqos_field1_slabshr;
+ u64 hqos_field2_slabmask;
+ u32 hqos_field2_slabpos;
+ u32 hqos_field2_slabshr;
+ u32 hqos_tc_table[64];
+} dpdk_device_hqos_per_worker_thread_t;
+
+typedef struct
+{
+ struct rte_ring **swq;
+ struct rte_mbuf **pkts_enq;
+ struct rte_mbuf **pkts_deq;
+ struct rte_sched_port *hqos;
+ u32 hqos_burst_enq;
+ u32 hqos_burst_deq;
+ u32 pkts_enq_len;
+ u32 swq_pos;
+ u32 flush_count;
+} dpdk_device_hqos_per_hqos_thread_t;
+
+typedef struct
+{
+ CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
+ volatile u32 **lockp;
+
+ /* Instance ID */
+ u32 device_index;
+
+ u32 vlib_hw_if_index;
+ u32 vlib_sw_if_index;
+
+ /* next node index if we decide to steal the rx graph arc */
+ u32 per_interface_next_index;
+
+ /* dpdk rte_mbuf rx and tx vectors, VLIB_FRAME_SIZE */
+ struct rte_mbuf ***tx_vectors; /* one per worker thread */
+ struct rte_mbuf ***rx_vectors;
+
+ /* vector of traced contexts, per device */
+ u32 **d_trace_buffers;
+
+ dpdk_pmd_t pmd:8;
+ i8 cpu_socket;
+
+ u16 flags;
+#define DPDK_DEVICE_FLAG_ADMIN_UP (1 << 0)
+#define DPDK_DEVICE_FLAG_PROMISC (1 << 1)
+#define DPDK_DEVICE_FLAG_PMD (1 << 2)
+#define DPDK_DEVICE_FLAG_PMD_SUPPORTS_PTYPE (1 << 3)
+#define DPDK_DEVICE_FLAG_MAYBE_MULTISEG (1 << 4)
+#define DPDK_DEVICE_FLAG_HAVE_SUBIF (1 << 5)
+#define DPDK_DEVICE_FLAG_HQOS (1 << 6)
+
+ u16 nb_tx_desc;
+ CLIB_CACHE_LINE_ALIGN_MARK (cacheline1);
+
+ u8 *interface_name_suffix;
+
+ /* number of sub-interfaces */
+ u16 num_subifs;
+
+ /* PMD related */
+ u16 tx_q_used;
+ u16 rx_q_used;
+ u16 nb_rx_desc;
+ u16 *cpu_socket_id_by_queue;
+ struct rte_eth_conf port_conf;
+ struct rte_eth_txconf tx_conf;
+
+ /* HQoS related */
+ dpdk_device_hqos_per_worker_thread_t *hqos_wt;
+ dpdk_device_hqos_per_hqos_thread_t *hqos_ht;
+
+ /* af_packet */
+ u8 af_packet_port_id;
+
+ struct rte_eth_link link;
+ f64 time_last_link_update;
+
+ struct rte_eth_stats stats;
+ struct rte_eth_stats last_stats;
+ struct rte_eth_stats last_cleared_stats;
+ struct rte_eth_xstat *xstats;
+ struct rte_eth_xstat *last_cleared_xstats;
+ f64 time_last_stats_update;
+ dpdk_port_type_t port_type;
+} dpdk_device_t;
+
+#define DPDK_STATS_POLL_INTERVAL (10.0)
+#define DPDK_MIN_STATS_POLL_INTERVAL (0.001) /* 1msec */
+
+#define DPDK_LINK_POLL_INTERVAL (3.0)
+#define DPDK_MIN_LINK_POLL_INTERVAL (0.001) /* 1msec */
+
+typedef struct
+{
+ CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
+
+ /* total input packet counter */
+ u64 aggregate_rx_packets;
+} dpdk_worker_t;
+
+typedef struct
+{
+ CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
+
+ /* total input packet counter */
+ u64 aggregate_rx_packets;
+} dpdk_hqos_thread_t;
+
+typedef struct
+{
+ u32 device;
+ u16 queue_id;
+} dpdk_device_and_queue_t;
+
+#ifndef DPDK_HQOS_DBG_BYPASS
+#define DPDK_HQOS_DBG_BYPASS 0
+#endif
+
+#ifndef HQOS_FLUSH_COUNT_THRESHOLD
+#define HQOS_FLUSH_COUNT_THRESHOLD 100000
+#endif
+
+typedef struct dpdk_device_config_hqos_t
+{
+ u32 hqos_thread;
+ u32 hqos_thread_valid;
+
+ u32 swq_size;
+ u32 burst_enq;
+ u32 burst_deq;
+
+ u32 pktfield0_slabpos;
+ u32 pktfield1_slabpos;
+ u32 pktfield2_slabpos;
+ u64 pktfield0_slabmask;
+ u64 pktfield1_slabmask;
+ u64 pktfield2_slabmask;
+ u32 tc_table[64];
+
+ struct rte_sched_port_params port;
+ struct rte_sched_subport_params *subport;
+ struct rte_sched_pipe_params *pipe;
+ uint32_t *pipe_map;
+} dpdk_device_config_hqos_t;
+
+int dpdk_hqos_validate_mask (u64 mask, u32 n);
+void dpdk_device_config_hqos_pipe_profile_default (dpdk_device_config_hqos_t *
+ hqos, u32 pipe_profile_id);
+void dpdk_device_config_hqos_default (dpdk_device_config_hqos_t * hqos);
+clib_error_t *dpdk_port_setup_hqos (dpdk_device_t * xd,
+ dpdk_device_config_hqos_t * hqos);
+void dpdk_hqos_metadata_set (dpdk_device_hqos_per_worker_thread_t * hqos,
+ struct rte_mbuf **pkts, u32 n_pkts);
+
+#define foreach_dpdk_device_config_item \
+ _ (num_rx_queues) \
+ _ (num_tx_queues) \
+ _ (num_rx_desc) \
+ _ (num_tx_desc) \
+ _ (rss_fn)
+
+typedef struct
+{
+ vlib_pci_addr_t pci_addr;
+ u8 is_blacklisted;
+ u8 vlan_strip_offload;
+#define DPDK_DEVICE_VLAN_STRIP_DEFAULT 0
+#define DPDK_DEVICE_VLAN_STRIP_OFF 1
+#define DPDK_DEVICE_VLAN_STRIP_ON 2
+
+#define _(x) uword x;
+ foreach_dpdk_device_config_item
+#undef _
+ clib_bitmap_t * workers;
+ u32 hqos_enabled;
+ dpdk_device_config_hqos_t hqos;
+} dpdk_device_config_t;
+
+typedef struct
+{
+
+ /* Config stuff */
+ u8 **eal_init_args;
+ u8 *eal_init_args_str;
+ u8 *uio_driver_name;
+ u8 no_multi_seg;
+ u8 enable_tcp_udp_checksum;
+
+ /* Required config parameters */
+ u8 coremask_set_manually;
+ u8 nchannels_set_manually;
+ u32 coremask;
+ u32 nchannels;
+ u32 num_mbufs;
+ u8 num_kni; /* while kni_init allows u32, port_id in callback fn is only u8 */
+
+ /*
+ * format interface names ala xxxEthernet%d/%d/%d instead of
+ * xxxEthernet%x/%x/%x.
+ */
+ u8 interface_name_format_decimal;
+
+ /* per-device config */
+ dpdk_device_config_t default_devconf;
+ dpdk_device_config_t *dev_confs;
+ uword *device_config_index_by_pci_addr;
+
+} dpdk_config_main_t;
+
+dpdk_config_main_t dpdk_config_main;
+
+typedef struct
+{
+
+ /* Devices */
+ dpdk_device_t *devices;
+ dpdk_device_and_queue_t **devices_by_cpu;
+ dpdk_device_and_queue_t **devices_by_hqos_cpu;
+
+ /* per-thread recycle lists */
+ u32 **recycle;
+
+ /* buffer flags template, configurable to enable/disable tcp / udp cksum */
+ u32 buffer_flags_template;
+
+ /* vlib buffer free list, must be same size as an rte_mbuf */
+ u32 vlib_buffer_free_list_index;
+
+ /* dpdk worker "threads" */
+ dpdk_worker_t *workers;
+
+ /* dpdk HQoS "threads" */
+ dpdk_hqos_thread_t *hqos_threads;
+
+ /* Ethernet input node index */
+ u32 ethernet_input_node_index;
+
+ /* pcap tracing [only works if (CLIB_DEBUG > 0)] */
+ int tx_pcap_enable;
+ pcap_main_t pcap_main;
+ u8 *pcap_filename;
+ u32 pcap_sw_if_index;
+ u32 pcap_pkts_to_capture;
+
+ /* hashes */
+ uword *dpdk_device_by_kni_port_id;
+ uword *vu_sw_if_index_by_listener_fd;
+ uword *vu_sw_if_index_by_sock_fd;
+ u32 *vu_inactive_interfaces_device_index;
+
+ /*
+ * flag indicating that a posted admin up/down
+ * (via post_sw_interface_set_flags) is in progress
+ */
+ u8 admin_up_down_in_progress;
+
+ u8 use_rss;
+
+ /* which cpus are running dpdk-input */
+ int input_cpu_first_index;
+ int input_cpu_count;
+
+ /* which cpus are running I/O TX */
+ int hqos_cpu_first_index;
+ int hqos_cpu_count;
+
+ /* control interval of dpdk link state and stat polling */
+ f64 link_state_poll_interval;
+ f64 stat_poll_interval;
+
+ /* Sleep for this many MS after each device poll */
+ u32 poll_sleep;
+
+ /* convenience */
+ vlib_main_t *vlib_main;
+ vnet_main_t *vnet_main;
+ dpdk_config_main_t *conf;
+} dpdk_main_t;
+
+dpdk_main_t dpdk_main;
+
+typedef struct
+{
+ u32 buffer_index;
+ u16 device_index;
+ u8 queue_index;
+ struct rte_mbuf mb;
+ /* Copy of VLIB buffer; packet data stored in pre_data. */
+ vlib_buffer_t buffer;
+} dpdk_tx_dma_trace_t;
+
+typedef struct
+{
+ u32 buffer_index;
+ u16 device_index;
+ u16 queue_index;
+ struct rte_mbuf mb;
+ vlib_buffer_t buffer; /* Copy of VLIB buffer; pkt data stored in pre_data. */
+ u8 data[256]; /* First 256 data bytes, used for hexdump */
+} dpdk_rx_dma_trace_t;
+
+void vnet_buffer_needs_dpdk_mb (vlib_buffer_t * b);
+
+clib_error_t *dpdk_set_mac_address (vnet_hw_interface_t * hi, char *address);
+
+clib_error_t *dpdk_set_mc_filter (vnet_hw_interface_t * hi,
+ struct ether_addr mc_addr_vec[], int naddr);
+
+void dpdk_thread_input (dpdk_main_t * dm, dpdk_device_t * xd);
+
+clib_error_t *dpdk_port_setup (dpdk_main_t * dm, dpdk_device_t * xd);
+
+u32 dpdk_interface_tx_vector (vlib_main_t * vm, u32 dev_instance);
+
+struct rte_mbuf *dpdk_replicate_packet_mb (vlib_buffer_t * b);
+struct rte_mbuf *dpdk_zerocopy_replicate_packet_mb (vlib_buffer_t * b);
+
+#define foreach_dpdk_error \
+ _(NONE, "no error") \
+ _(RX_PACKET_ERROR, "Rx packet errors") \
+ _(RX_BAD_FCS, "Rx bad fcs") \
+ _(IP_CHECKSUM_ERROR, "Rx ip checksum errors") \
+ _(RX_ALLOC_FAIL, "rx buf alloc from free list failed") \
+ _(RX_ALLOC_NO_PHYSMEM, "rx buf alloc failed no physmem") \
+ _(RX_ALLOC_DROP_PKTS, "rx packets dropped due to alloc error")
+
+typedef enum
+{
+#define _(f,s) DPDK_ERROR_##f,
+ foreach_dpdk_error
+#undef _
+ DPDK_N_ERROR,
+} dpdk_error_t;
+
+int dpdk_set_stat_poll_interval (f64 interval);
+int dpdk_set_link_state_poll_interval (f64 interval);
+void dpdk_update_link_state (dpdk_device_t * xd, f64 now);
+void dpdk_device_lock_init (dpdk_device_t * xd);
+void dpdk_device_lock_free (dpdk_device_t * xd);
+
+static inline u64
+vnet_get_aggregate_rx_packets (void)
+{
+ dpdk_main_t *dm = &dpdk_main;
+ u64 sum = 0;
+ dpdk_worker_t *dw;
+
+ vec_foreach (dw, dm->workers) sum += dw->aggregate_rx_packets;
+
+ return sum;
+}
+
+void dpdk_rx_trace (dpdk_main_t * dm,
+ vlib_node_runtime_t * node,
+ dpdk_device_t * xd,
+ u16 queue_id, u32 * buffers, uword n_buffers);
+
+#define EFD_OPERATION_LESS_THAN 0
+#define EFD_OPERATION_GREATER_OR_EQUAL 1
+
+format_function_t format_dpdk_device_name;
+format_function_t format_dpdk_device;
+format_function_t format_dpdk_tx_dma_trace;
+format_function_t format_dpdk_rx_dma_trace;
+format_function_t format_dpdk_rte_mbuf;
+format_function_t format_dpdk_rx_rte_mbuf;
+unformat_function_t unformat_socket_mem;
+clib_error_t *unformat_rss_fn (unformat_input_t * input, uword * rss_fn);
+clib_error_t *unformat_hqos (unformat_input_t * input,
+ dpdk_device_config_hqos_t * hqos);
+
+uword
+admin_up_down_process (vlib_main_t * vm,
+ vlib_node_runtime_t * rt, vlib_frame_t * f);
+
+#endif /* __included_dpdk_h__ */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/devices/dpdk/dpdk_priv.h b/src/vnet/devices/dpdk/dpdk_priv.h
new file mode 100644
index 00000000000..0c81dbc3beb
--- /dev/null
+++ b/src/vnet/devices/dpdk/dpdk_priv.h
@@ -0,0 +1,132 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define DPDK_NB_RX_DESC_DEFAULT 1024
+#define DPDK_NB_TX_DESC_DEFAULT 1024
+#define DPDK_NB_RX_DESC_VIRTIO 256
+#define DPDK_NB_TX_DESC_VIRTIO 256
+
+#define I40E_DEV_ID_SFP_XL710 0x1572
+#define I40E_DEV_ID_QSFP_A 0x1583
+#define I40E_DEV_ID_QSFP_B 0x1584
+#define I40E_DEV_ID_QSFP_C 0x1585
+#define I40E_DEV_ID_10G_BASE_T 0x1586
+#define I40E_DEV_ID_VF 0x154C
+
+/* These args appear by themselves */
+#define foreach_eal_double_hyphen_predicate_arg \
+_(no-shconf) \
+_(no-hpet) \
+_(no-huge) \
+_(vmware-tsc-map)
+
+#define foreach_eal_single_hyphen_mandatory_arg \
+_(coremask, c) \
+_(nchannels, n) \
+
+#define foreach_eal_single_hyphen_arg \
+_(blacklist, b) \
+_(mem-alloc-request, m) \
+_(force-ranks, r)
+
+/* These args are preceeded by "--" and followed by a single string */
+#define foreach_eal_double_hyphen_arg \
+_(huge-dir) \
+_(proc-type) \
+_(file-prefix) \
+_(vdev)
+
+static inline void
+dpdk_get_xstats (dpdk_device_t * xd)
+{
+ int len;
+ if ((len = rte_eth_xstats_get (xd->device_index, NULL, 0)) > 0)
+ {
+ vec_validate (xd->xstats, len - 1);
+ vec_validate (xd->last_cleared_xstats, len - 1);
+
+ len =
+ rte_eth_xstats_get (xd->device_index, xd->xstats,
+ vec_len (xd->xstats));
+
+ ASSERT (vec_len (xd->xstats) == len);
+ ASSERT (vec_len (xd->last_cleared_xstats) == len);
+
+ _vec_len (xd->xstats) = len;
+ _vec_len (xd->last_cleared_xstats) = len;
+
+ }
+}
+
+
+static inline void
+dpdk_update_counters (dpdk_device_t * xd, f64 now)
+{
+ vlib_simple_counter_main_t *cm;
+ vnet_main_t *vnm = vnet_get_main ();
+ u32 my_cpu = os_get_cpu_number ();
+ u64 rxerrors, last_rxerrors;
+
+ /* only update counters for PMD interfaces */
+ if ((xd->flags & DPDK_DEVICE_FLAG_PMD) == 0)
+ return;
+
+ xd->time_last_stats_update = now ? now : xd->time_last_stats_update;
+ clib_memcpy (&xd->last_stats, &xd->stats, sizeof (xd->last_stats));
+ rte_eth_stats_get (xd->device_index, &xd->stats);
+
+ /* maybe bump interface rx no buffer counter */
+ if (PREDICT_FALSE (xd->stats.rx_nombuf != xd->last_stats.rx_nombuf))
+ {
+ cm = vec_elt_at_index (vnm->interface_main.sw_if_counters,
+ VNET_INTERFACE_COUNTER_RX_NO_BUF);
+
+ vlib_increment_simple_counter (cm, my_cpu, xd->vlib_sw_if_index,
+ xd->stats.rx_nombuf -
+ xd->last_stats.rx_nombuf);
+ }
+
+ /* missed pkt counter */
+ if (PREDICT_FALSE (xd->stats.imissed != xd->last_stats.imissed))
+ {
+ cm = vec_elt_at_index (vnm->interface_main.sw_if_counters,
+ VNET_INTERFACE_COUNTER_RX_MISS);
+
+ vlib_increment_simple_counter (cm, my_cpu, xd->vlib_sw_if_index,
+ xd->stats.imissed -
+ xd->last_stats.imissed);
+ }
+ rxerrors = xd->stats.ierrors;
+ last_rxerrors = xd->last_stats.ierrors;
+
+ if (PREDICT_FALSE (rxerrors != last_rxerrors))
+ {
+ cm = vec_elt_at_index (vnm->interface_main.sw_if_counters,
+ VNET_INTERFACE_COUNTER_RX_ERROR);
+
+ vlib_increment_simple_counter (cm, my_cpu, xd->vlib_sw_if_index,
+ rxerrors - last_rxerrors);
+ }
+
+ dpdk_get_xstats (xd);
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/devices/dpdk/format.c b/src/vnet/devices/dpdk/format.c
new file mode 100644
index 00000000000..ff7c7a5a41c
--- /dev/null
+++ b/src/vnet/devices/dpdk/format.c
@@ -0,0 +1,763 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <vnet/vnet.h>
+#include <vppinfra/vec.h>
+#include <vppinfra/format.h>
+#include <vlib/unix/cj.h>
+#include <assert.h>
+
+#include <vnet/ethernet/ethernet.h>
+#include <vnet/devices/dpdk/dpdk.h>
+
+#include "dpdk_priv.h"
+#include <vppinfra/error.h>
+
+#define foreach_dpdk_counter \
+ _ (tx_frames_ok, opackets) \
+ _ (tx_bytes_ok, obytes) \
+ _ (tx_errors, oerrors) \
+ _ (rx_frames_ok, ipackets) \
+ _ (rx_bytes_ok, ibytes) \
+ _ (rx_errors, ierrors) \
+ _ (rx_missed, imissed) \
+ _ (rx_no_bufs, rx_nombuf)
+
+#define foreach_dpdk_q_counter \
+ _ (rx_frames_ok, q_ipackets) \
+ _ (tx_frames_ok, q_opackets) \
+ _ (rx_bytes_ok, q_ibytes) \
+ _ (tx_bytes_ok, q_obytes) \
+ _ (rx_errors, q_errors)
+
+#define foreach_dpdk_rss_hf \
+ _(ETH_RSS_FRAG_IPV4, "ipv4-frag") \
+ _(ETH_RSS_NONFRAG_IPV4_TCP, "ipv4-tcp") \
+ _(ETH_RSS_NONFRAG_IPV4_UDP, "ipv4-udp") \
+ _(ETH_RSS_NONFRAG_IPV4_SCTP, "ipv4-sctp") \
+ _(ETH_RSS_NONFRAG_IPV4_OTHER, "ipv4-other") \
+ _(ETH_RSS_IPV4, "ipv4") \
+ _(ETH_RSS_IPV6_TCP_EX, "ipv6-tcp-ex") \
+ _(ETH_RSS_IPV6_UDP_EX, "ipv6-udp-ex") \
+ _(ETH_RSS_FRAG_IPV6, "ipv6-frag") \
+ _(ETH_RSS_NONFRAG_IPV6_TCP, "ipv6-tcp") \
+ _(ETH_RSS_NONFRAG_IPV6_UDP, "ipv6-udp") \
+ _(ETH_RSS_NONFRAG_IPV6_SCTP, "ipv6-sctp") \
+ _(ETH_RSS_NONFRAG_IPV6_OTHER, "ipv6-other") \
+ _(ETH_RSS_L2_PAYLOAD, "l2-payload") \
+ _(ETH_RSS_IPV6_EX, "ipv6-ex") \
+ _(ETH_RSS_IPV6, "ipv6")
+
+
+#define foreach_dpdk_rx_offload_caps \
+ _(DEV_RX_OFFLOAD_VLAN_STRIP, "vlan-strip") \
+ _(DEV_RX_OFFLOAD_IPV4_CKSUM, "ipv4-cksum") \
+ _(DEV_RX_OFFLOAD_UDP_CKSUM , "udp-cksum") \
+ _(DEV_RX_OFFLOAD_TCP_CKSUM , "tcp-cksum") \
+ _(DEV_RX_OFFLOAD_TCP_LRO , "rcp-lro") \
+ _(DEV_RX_OFFLOAD_QINQ_STRIP, "qinq-strip")
+
+#define foreach_dpdk_tx_offload_caps \
+ _(DEV_TX_OFFLOAD_VLAN_INSERT, "vlan-insert") \
+ _(DEV_TX_OFFLOAD_IPV4_CKSUM, "ipv4-cksum") \
+ _(DEV_TX_OFFLOAD_UDP_CKSUM , "udp-cksum") \
+ _(DEV_TX_OFFLOAD_TCP_CKSUM , "tcp-cksum") \
+ _(DEV_TX_OFFLOAD_SCTP_CKSUM , "sctp-cksum") \
+ _(DEV_TX_OFFLOAD_TCP_TSO , "tcp-tso") \
+ _(DEV_TX_OFFLOAD_UDP_TSO , "udp-tso") \
+ _(DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM, "outer-ipv4-cksum") \
+ _(DEV_TX_OFFLOAD_QINQ_INSERT, "qinq-insert")
+
+#if RTE_VERSION < RTE_VERSION_NUM(16, 11, 0, 0)
+/* New ol_flags bits added in DPDK-16.11 */
+#define PKT_RX_IP_CKSUM_GOOD (1ULL << 7)
+#define PKT_RX_L4_CKSUM_GOOD (1ULL << 8)
+#endif
+
+#define foreach_dpdk_pkt_rx_offload_flag \
+ _ (PKT_RX_VLAN_PKT, "RX packet is a 802.1q VLAN packet") \
+ _ (PKT_RX_RSS_HASH, "RX packet with RSS hash result") \
+ _ (PKT_RX_FDIR, "RX packet with FDIR infos") \
+ _ (PKT_RX_L4_CKSUM_BAD, "L4 cksum of RX pkt. is not OK") \
+ _ (PKT_RX_IP_CKSUM_BAD, "IP cksum of RX pkt. is not OK") \
+ _ (PKT_RX_VLAN_STRIPPED, "RX packet VLAN tag stripped") \
+ _ (PKT_RX_IP_CKSUM_GOOD, "IP cksum of RX pkt. is valid") \
+ _ (PKT_RX_L4_CKSUM_GOOD, "L4 cksum of RX pkt. is valid") \
+ _ (PKT_RX_IEEE1588_PTP, "RX IEEE1588 L2 Ethernet PT Packet") \
+ _ (PKT_RX_IEEE1588_TMST, "RX IEEE1588 L2/L4 timestamped packet") \
+ _ (PKT_RX_QINQ_STRIPPED, "RX packet QinQ tags stripped")
+
+#if RTE_VERSION < RTE_VERSION_NUM(16, 11, 0, 0)
+/* PTYPE added in DPDK-16.11 */
+#define RTE_PTYPE_L2_ETHER_VLAN 0x00000006
+#define RTE_PTYPE_L2_ETHER_QINQ 0x00000007
+#endif
+
+#define foreach_dpdk_pkt_type \
+ _ (L2, ETHER, "Ethernet packet") \
+ _ (L2, ETHER_TIMESYNC, "Ethernet packet for time sync") \
+ _ (L2, ETHER_ARP, "ARP packet") \
+ _ (L2, ETHER_LLDP, "LLDP (Link Layer Discovery Protocol) packet") \
+ _ (L2, ETHER_NSH, "NSH (Network Service Header) packet") \
+ _ (L2, ETHER_VLAN, "VLAN packet") \
+ _ (L2, ETHER_QINQ, "QinQ packet") \
+ _ (L3, IPV4, "IPv4 packet without extension headers") \
+ _ (L3, IPV4_EXT, "IPv4 packet with extension headers") \
+ _ (L3, IPV4_EXT_UNKNOWN, "IPv4 packet with or without extension headers") \
+ _ (L3, IPV6, "IPv6 packet without extension headers") \
+ _ (L3, IPV6_EXT, "IPv6 packet with extension headers") \
+ _ (L3, IPV6_EXT_UNKNOWN, "IPv6 packet with or without extension headers") \
+ _ (L4, TCP, "TCP packet") \
+ _ (L4, UDP, "UDP packet") \
+ _ (L4, FRAG, "Fragmented IP packet") \
+ _ (L4, SCTP, "SCTP (Stream Control Transmission Protocol) packet") \
+ _ (L4, ICMP, "ICMP packet") \
+ _ (L4, NONFRAG, "Non-fragmented IP packet") \
+ _ (TUNNEL, GRE, "GRE tunneling packet") \
+ _ (TUNNEL, VXLAN, "VXLAN tunneling packet") \
+ _ (TUNNEL, NVGRE, "NVGRE Tunneling packet") \
+ _ (TUNNEL, GENEVE, "GENEVE Tunneling packet") \
+ _ (TUNNEL, GRENAT, "Teredo, VXLAN or GRE Tunneling packet") \
+ _ (INNER_L2, ETHER, "Inner Ethernet packet") \
+ _ (INNER_L2, ETHER_VLAN, "Inner Ethernet packet with VLAN") \
+ _ (INNER_L3, IPV4, "Inner IPv4 packet without extension headers") \
+ _ (INNER_L3, IPV4_EXT, "Inner IPv4 packet with extension headers") \
+ _ (INNER_L3, IPV4_EXT_UNKNOWN, "Inner IPv4 packet with or without extension headers") \
+ _ (INNER_L3, IPV6, "Inner IPv6 packet without extension headers") \
+ _ (INNER_L3, IPV6_EXT, "Inner IPv6 packet with extension headers") \
+ _ (INNER_L3, IPV6_EXT_UNKNOWN, "Inner IPv6 packet with or without extension headers") \
+ _ (INNER_L4, TCP, "Inner TCP packet") \
+ _ (INNER_L4, UDP, "Inner UDP packet") \
+ _ (INNER_L4, FRAG, "Inner fagmented IP packet") \
+ _ (INNER_L4, SCTP, "Inner SCTP (Stream Control Transmission Protocol) packet") \
+ _ (INNER_L4, ICMP, "Inner ICMP packet") \
+ _ (INNER_L4, NONFRAG, "Inner non-fragmented IP packet")
+
+#define foreach_dpdk_pkt_tx_offload_flag \
+ _ (PKT_TX_VLAN_PKT, "TX packet is a 802.1q VLAN packet") \
+ _ (PKT_TX_IP_CKSUM, "IP cksum of TX pkt. computed by NIC") \
+ _ (PKT_TX_TCP_CKSUM, "TCP cksum of TX pkt. computed by NIC") \
+ _ (PKT_TX_SCTP_CKSUM, "SCTP cksum of TX pkt. computed by NIC") \
+ _ (PKT_TX_IEEE1588_TMST, "TX IEEE1588 packet to timestamp")
+
+#define foreach_dpdk_pkt_offload_flag \
+ foreach_dpdk_pkt_rx_offload_flag \
+ foreach_dpdk_pkt_tx_offload_flag
+
+u8 *
+format_dpdk_device_name (u8 * s, va_list * args)
+{
+ dpdk_main_t *dm = &dpdk_main;
+ char *devname_format;
+ char *device_name;
+ u32 i = va_arg (*args, u32);
+ struct rte_eth_dev_info dev_info;
+ u8 *ret;
+
+ if (dm->conf->interface_name_format_decimal)
+ devname_format = "%s%d/%d/%d";
+ else
+ devname_format = "%s%x/%x/%x";
+
+ switch (dm->devices[i].port_type)
+ {
+ case VNET_DPDK_PORT_TYPE_ETH_1G:
+ device_name = "GigabitEthernet";
+ break;
+
+ case VNET_DPDK_PORT_TYPE_ETH_10G:
+ device_name = "TenGigabitEthernet";
+ break;
+
+ case VNET_DPDK_PORT_TYPE_ETH_40G:
+ device_name = "FortyGigabitEthernet";
+ break;
+
+ case VNET_DPDK_PORT_TYPE_ETH_100G:
+ device_name = "HundredGigabitEthernet";
+ break;
+
+ case VNET_DPDK_PORT_TYPE_ETH_BOND:
+ return format (s, "BondEthernet%d", dm->devices[i].device_index);
+
+ case VNET_DPDK_PORT_TYPE_ETH_SWITCH:
+ device_name = "EthernetSwitch";
+ break;
+
+ case VNET_DPDK_PORT_TYPE_AF_PACKET:
+ rte_eth_dev_info_get (i, &dev_info);
+ return format (s, "af_packet%d", dm->devices[i].af_packet_port_id);
+
+ default:
+ case VNET_DPDK_PORT_TYPE_UNKNOWN:
+ device_name = "UnknownEthernet";
+ break;
+ }
+
+ rte_eth_dev_info_get (i, &dev_info);
+
+ if (dev_info.pci_dev)
+ ret = format (s, devname_format, device_name, dev_info.pci_dev->addr.bus,
+ dev_info.pci_dev->addr.devid,
+ dev_info.pci_dev->addr.function);
+ else
+ ret = format (s, "%s%d", device_name, dm->devices[i].device_index);
+
+ if (dm->devices[i].interface_name_suffix)
+ return format (ret, "/%s", dm->devices[i].interface_name_suffix);
+ return ret;
+}
+
+static u8 *
+format_dpdk_device_type (u8 * s, va_list * args)
+{
+ dpdk_main_t *dm = &dpdk_main;
+ char *dev_type;
+ u32 i = va_arg (*args, u32);
+
+ switch (dm->devices[i].pmd)
+ {
+ case VNET_DPDK_PMD_E1000EM:
+ dev_type = "Intel 82540EM (e1000)";
+ break;
+
+ case VNET_DPDK_PMD_IGB:
+ dev_type = "Intel e1000";
+ break;
+
+ case VNET_DPDK_PMD_I40E:
+ dev_type = "Intel X710/XL710 Family";
+ break;
+
+ case VNET_DPDK_PMD_I40EVF:
+ dev_type = "Intel X710/XL710 Family VF";
+ break;
+
+ case VNET_DPDK_PMD_FM10K:
+ dev_type = "Intel FM10000 Family Ethernet Switch";
+ break;
+
+ case VNET_DPDK_PMD_IGBVF:
+ dev_type = "Intel e1000 VF";
+ break;
+
+ case VNET_DPDK_PMD_VIRTIO:
+ dev_type = "Red Hat Virtio";
+ break;
+
+ case VNET_DPDK_PMD_IXGBEVF:
+ dev_type = "Intel 82599 VF";
+ break;
+
+ case VNET_DPDK_PMD_IXGBE:
+ dev_type = "Intel 82599";
+ break;
+
+ case VNET_DPDK_PMD_ENIC:
+ dev_type = "Cisco VIC";
+ break;
+
+ case VNET_DPDK_PMD_CXGBE:
+ dev_type = "Chelsio T4/T5";
+ break;
+
+ case VNET_DPDK_PMD_MLX5:
+ dev_type = "Mellanox ConnectX-4 Family";
+ break;
+
+ case VNET_DPDK_PMD_VMXNET3:
+ dev_type = "VMware VMXNET3";
+ break;
+
+ case VNET_DPDK_PMD_AF_PACKET:
+ dev_type = "af_packet";
+ break;
+
+ case VNET_DPDK_PMD_BOND:
+ dev_type = "Ethernet Bonding";
+ break;
+
+ case VNET_DPDK_PMD_DPAA2:
+ dev_type = "NXP DPAA2 Mac";
+ break;
+
+ default:
+ case VNET_DPDK_PMD_UNKNOWN:
+ dev_type = "### UNKNOWN ###";
+ break;
+ }
+
+ return format (s, dev_type);
+}
+
+static u8 *
+format_dpdk_link_status (u8 * s, va_list * args)
+{
+ dpdk_device_t *xd = va_arg (*args, dpdk_device_t *);
+ struct rte_eth_link *l = &xd->link;
+ vnet_main_t *vnm = vnet_get_main ();
+ vnet_hw_interface_t *hi = vnet_get_hw_interface (vnm, xd->vlib_hw_if_index);
+
+ s = format (s, "%s ", l->link_status ? "up" : "down");
+ if (l->link_status)
+ {
+ u32 promisc = rte_eth_promiscuous_get (xd->device_index);
+
+ s = format (s, "%s duplex ", (l->link_duplex == ETH_LINK_FULL_DUPLEX) ?
+ "full" : "half");
+ s = format (s, "speed %u mtu %d %s\n", l->link_speed,
+ hi->max_packet_bytes, promisc ? " promisc" : "");
+ }
+ else
+ s = format (s, "\n");
+
+ return s;
+}
+
+#define _line_len 72
+#define _(v, str) \
+if (bitmap & v) { \
+ if (format_get_indent (s) > next_split ) { \
+ next_split += _line_len; \
+ s = format(s,"\n%U", format_white_space, indent); \
+ } \
+ s = format(s, "%s ", str); \
+}
+
+static u8 *
+format_dpdk_rss_hf_name (u8 * s, va_list * args)
+{
+ u64 bitmap = va_arg (*args, u64);
+ int next_split = _line_len;
+ int indent = format_get_indent (s);
+
+ if (!bitmap)
+ return format (s, "none");
+
+ foreach_dpdk_rss_hf return s;
+}
+
+static u8 *
+format_dpdk_rx_offload_caps (u8 * s, va_list * args)
+{
+ u32 bitmap = va_arg (*args, u32);
+ int next_split = _line_len;
+ int indent = format_get_indent (s);
+
+ if (!bitmap)
+ return format (s, "none");
+
+ foreach_dpdk_rx_offload_caps return s;
+}
+
+static u8 *
+format_dpdk_tx_offload_caps (u8 * s, va_list * args)
+{
+ u32 bitmap = va_arg (*args, u32);
+ int next_split = _line_len;
+ int indent = format_get_indent (s);
+ if (!bitmap)
+ return format (s, "none");
+
+ foreach_dpdk_tx_offload_caps return s;
+}
+
+#undef _line_len
+#undef _
+
+u8 *
+format_dpdk_device (u8 * s, va_list * args)
+{
+ u32 dev_instance = va_arg (*args, u32);
+ int verbose = va_arg (*args, int);
+ dpdk_main_t *dm = &dpdk_main;
+ dpdk_device_t *xd = vec_elt_at_index (dm->devices, dev_instance);
+ uword indent = format_get_indent (s);
+ f64 now = vlib_time_now (dm->vlib_main);
+ struct rte_eth_dev_info di;
+
+ dpdk_update_counters (xd, now);
+ dpdk_update_link_state (xd, now);
+
+ s = format (s, "%U\n%Ucarrier %U",
+ format_dpdk_device_type, xd->device_index,
+ format_white_space, indent + 2, format_dpdk_link_status, xd);
+
+ rte_eth_dev_info_get (xd->device_index, &di);
+
+ if (verbose > 1 && xd->flags & DPDK_DEVICE_FLAG_PMD)
+ {
+ struct rte_pci_device *pci;
+ struct rte_eth_rss_conf rss_conf;
+ int vlan_off;
+ int retval;
+
+ rss_conf.rss_key = 0;
+ retval = rte_eth_dev_rss_hash_conf_get (xd->device_index, &rss_conf);
+ if (retval < 0)
+ clib_warning ("rte_eth_dev_rss_hash_conf_get returned %d", retval);
+ pci = di.pci_dev;
+
+ if (pci)
+ s =
+ format (s,
+ "%Upci id: device %04x:%04x subsystem %04x:%04x\n"
+ "%Upci address: %04x:%02x:%02x.%02x\n",
+ format_white_space, indent + 2, pci->id.vendor_id,
+ pci->id.device_id, pci->id.subsystem_vendor_id,
+ pci->id.subsystem_device_id, format_white_space, indent + 2,
+ pci->addr.domain, pci->addr.bus, pci->addr.devid,
+ pci->addr.function);
+ s =
+ format (s, "%Umax rx packet len: %d\n", format_white_space,
+ indent + 2, di.max_rx_pktlen);
+ s =
+ format (s, "%Umax num of queues: rx %d tx %d\n", format_white_space,
+ indent + 2, di.max_rx_queues, di.max_tx_queues);
+ s =
+ format (s, "%Upromiscuous: unicast %s all-multicast %s\n",
+ format_white_space, indent + 2,
+ rte_eth_promiscuous_get (xd->device_index) ? "on" : "off",
+ rte_eth_promiscuous_get (xd->device_index) ? "on" : "off");
+ vlan_off = rte_eth_dev_get_vlan_offload (xd->device_index);
+ s = format (s, "%Uvlan offload: strip %s filter %s qinq %s\n",
+ format_white_space, indent + 2,
+ vlan_off & ETH_VLAN_STRIP_OFFLOAD ? "on" : "off",
+ vlan_off & ETH_VLAN_FILTER_OFFLOAD ? "on" : "off",
+ vlan_off & ETH_VLAN_EXTEND_OFFLOAD ? "on" : "off");
+ s = format (s, "%Urx offload caps: %U\n",
+ format_white_space, indent + 2,
+ format_dpdk_rx_offload_caps, di.rx_offload_capa);
+ s = format (s, "%Utx offload caps: %U\n",
+ format_white_space, indent + 2,
+ format_dpdk_tx_offload_caps, di.tx_offload_capa);
+ s = format (s, "%Urss active: %U\n"
+ "%Urss supported: %U\n",
+ format_white_space, indent + 2,
+ format_dpdk_rss_hf_name, rss_conf.rss_hf,
+ format_white_space, indent + 2,
+ format_dpdk_rss_hf_name, di.flow_type_rss_offloads);
+ }
+
+ s = format (s, "%Urx queues %d, rx desc %d, tx queues %d, tx desc %d\n",
+ format_white_space, indent + 2,
+ xd->rx_q_used, xd->nb_rx_desc, xd->tx_q_used, xd->nb_tx_desc);
+
+ if (xd->cpu_socket > -1)
+ s = format (s, "%Ucpu socket %d\n",
+ format_white_space, indent + 2, xd->cpu_socket);
+
+ /* $$$ MIB counters */
+ {
+#define _(N, V) \
+ if ((xd->stats.V - xd->last_cleared_stats.V) != 0) { \
+ s = format (s, "\n%U%-40U%16Ld", \
+ format_white_space, indent + 2, \
+ format_c_identifier, #N, \
+ xd->stats.V - xd->last_cleared_stats.V); \
+ } \
+
+ foreach_dpdk_counter
+#undef _
+ }
+
+ u8 *xs = 0;
+ u32 i = 0;
+ struct rte_eth_xstat *xstat, *last_xstat;
+ struct rte_eth_xstat_name *xstat_names = 0;
+ int len = rte_eth_xstats_get_names (xd->device_index, NULL, 0);
+ vec_validate (xstat_names, len - 1);
+ rte_eth_xstats_get_names (xd->device_index, xstat_names, len);
+
+ ASSERT (vec_len (xd->xstats) == vec_len (xd->last_cleared_xstats));
+
+ /* *INDENT-OFF* */
+ vec_foreach_index(i, xd->xstats)
+ {
+ u64 delta = 0;
+ xstat = vec_elt_at_index(xd->xstats, i);
+ last_xstat = vec_elt_at_index(xd->last_cleared_xstats, i);
+
+ delta = xstat->value - last_xstat->value;
+ if (verbose == 2 || (verbose && delta))
+ {
+ /* format_c_identifier doesn't like c strings inside vector */
+ u8 * name = format(0,"%s", xstat_names[i].name);
+ xs = format(xs, "\n%U%-38U%16Ld",
+ format_white_space, indent + 4,
+ format_c_identifier, name, delta);
+ vec_free(name);
+ }
+ }
+ /* *INDENT-ON* */
+
+ vec_free (xstat_names);
+
+ if (xs)
+ {
+ s = format (s, "\n%Uextended stats:%v",
+ format_white_space, indent + 2, xs);
+ vec_free (xs);
+ }
+
+ return s;
+}
+
+u8 *
+format_dpdk_tx_dma_trace (u8 * s, va_list * va)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*va, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*va, vlib_node_t *);
+ CLIB_UNUSED (vnet_main_t * vnm) = vnet_get_main ();
+ dpdk_tx_dma_trace_t *t = va_arg (*va, dpdk_tx_dma_trace_t *);
+ dpdk_main_t *dm = &dpdk_main;
+ dpdk_device_t *xd = vec_elt_at_index (dm->devices, t->device_index);
+ uword indent = format_get_indent (s);
+ vnet_sw_interface_t *sw = vnet_get_sw_interface (vnm, xd->vlib_sw_if_index);
+
+ s = format (s, "%U tx queue %d",
+ format_vnet_sw_interface_name, vnm, sw, t->queue_index);
+
+ s = format (s, "\n%Ubuffer 0x%x: %U",
+ format_white_space, indent,
+ t->buffer_index, format_vlib_buffer, &t->buffer);
+
+ s = format (s, "\n%U%U", format_white_space, indent,
+ format_ethernet_header_with_length, t->buffer.pre_data,
+ sizeof (t->buffer.pre_data));
+
+ return s;
+}
+
+u8 *
+format_dpdk_rx_dma_trace (u8 * s, va_list * va)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*va, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*va, vlib_node_t *);
+ CLIB_UNUSED (vnet_main_t * vnm) = vnet_get_main ();
+ dpdk_rx_dma_trace_t *t = va_arg (*va, dpdk_rx_dma_trace_t *);
+ dpdk_main_t *dm = &dpdk_main;
+ dpdk_device_t *xd = vec_elt_at_index (dm->devices, t->device_index);
+ format_function_t *f;
+ uword indent = format_get_indent (s);
+ vnet_sw_interface_t *sw = vnet_get_sw_interface (vnm, xd->vlib_sw_if_index);
+
+ s = format (s, "%U rx queue %d",
+ format_vnet_sw_interface_name, vnm, sw, t->queue_index);
+
+ s = format (s, "\n%Ubuffer 0x%x: %U",
+ format_white_space, indent,
+ t->buffer_index, format_vlib_buffer, &t->buffer);
+
+ s = format (s, "\n%U%U",
+ format_white_space, indent,
+ format_dpdk_rte_mbuf, &t->mb, &t->data);
+
+ if (vm->trace_main.verbose)
+ {
+ s = format (s, "\n%UPacket Dump%s", format_white_space, indent + 2,
+ t->mb.data_len > sizeof (t->data) ? " (truncated)" : "");
+ s = format (s, "\n%U%U", format_white_space, indent + 4,
+ format_hexdump, &t->data,
+ t->mb.data_len >
+ sizeof (t->data) ? sizeof (t->data) : t->mb.data_len);
+ }
+ f = node->format_buffer;
+ if (!f)
+ f = format_hex_bytes;
+ s = format (s, "\n%U%U", format_white_space, indent,
+ f, t->buffer.pre_data, sizeof (t->buffer.pre_data));
+
+ return s;
+}
+
+
+static inline u8 *
+format_dpdk_pkt_types (u8 * s, va_list * va)
+{
+ u32 *pkt_types = va_arg (*va, u32 *);
+ uword indent __attribute__ ((unused)) = format_get_indent (s) + 2;
+
+ if (!*pkt_types)
+ return s;
+
+ s = format (s, "Packet Types");
+
+#define _(L, F, S) \
+ if ((*pkt_types & RTE_PTYPE_##L##_MASK) == RTE_PTYPE_##L##_##F) \
+ { \
+ s = format (s, "\n%U%s (0x%04x) %s", format_white_space, indent, \
+ "RTE_PTYPE_" #L "_" #F, RTE_PTYPE_##L##_##F, S); \
+ }
+
+ foreach_dpdk_pkt_type
+#undef _
+ return s;
+}
+
+static inline u8 *
+format_dpdk_pkt_offload_flags (u8 * s, va_list * va)
+{
+ u64 *ol_flags = va_arg (*va, u64 *);
+ uword indent = format_get_indent (s) + 2;
+
+ if (!*ol_flags)
+ return s;
+
+ s = format (s, "Packet Offload Flags");
+
+#define _(F, S) \
+ if (*ol_flags & F) \
+ { \
+ s = format (s, "\n%U%s (0x%04x) %s", \
+ format_white_space, indent, #F, F, S); \
+ }
+
+ foreach_dpdk_pkt_offload_flag
+#undef _
+ return s;
+}
+
+u8 *
+format_dpdk_rte_mbuf_vlan (u8 * s, va_list * va)
+{
+ ethernet_vlan_header_tv_t *vlan_hdr =
+ va_arg (*va, ethernet_vlan_header_tv_t *);
+
+ if (clib_net_to_host_u16 (vlan_hdr->type) == ETHERNET_TYPE_DOT1AD)
+ {
+ s = format (s, "%U 802.1q vlan ",
+ format_ethernet_vlan_tci,
+ clib_net_to_host_u16 (vlan_hdr->priority_cfi_and_id));
+ vlan_hdr++;
+ }
+
+ s = format (s, "%U",
+ format_ethernet_vlan_tci,
+ clib_net_to_host_u16 (vlan_hdr->priority_cfi_and_id));
+
+ return s;
+}
+
+u8 *
+format_dpdk_rte_mbuf (u8 * s, va_list * va)
+{
+ struct rte_mbuf *mb = va_arg (*va, struct rte_mbuf *);
+ ethernet_header_t *eth_hdr = va_arg (*va, ethernet_header_t *);
+ uword indent = format_get_indent (s) + 2;
+
+ s = format (s, "PKT MBUF: port %d, nb_segs %d, pkt_len %d"
+ "\n%Ubuf_len %d, data_len %d, ol_flags 0x%x, data_off %d, phys_addr 0x%x"
+ "\n%Upacket_type 0x%x",
+ mb->port, mb->nb_segs, mb->pkt_len,
+ format_white_space, indent,
+ mb->buf_len, mb->data_len, mb->ol_flags, mb->data_off,
+ mb->buf_physaddr, format_white_space, indent, mb->packet_type);
+
+ if (mb->ol_flags)
+ s = format (s, "\n%U%U", format_white_space, indent,
+ format_dpdk_pkt_offload_flags, &mb->ol_flags);
+
+ if ((mb->ol_flags & PKT_RX_VLAN_PKT) &&
+ ((mb->ol_flags & (PKT_RX_VLAN_STRIPPED | PKT_RX_QINQ_STRIPPED)) == 0))
+ {
+ ethernet_vlan_header_tv_t *vlan_hdr =
+ ((ethernet_vlan_header_tv_t *) & (eth_hdr->type));
+ s = format (s, " %U", format_dpdk_rte_mbuf_vlan, vlan_hdr);
+ }
+
+ if (mb->packet_type)
+ s = format (s, "\n%U%U", format_white_space, indent,
+ format_dpdk_pkt_types, &mb->packet_type);
+
+ return s;
+}
+
+uword
+unformat_socket_mem (unformat_input_t * input, va_list * va)
+{
+ uword **r = va_arg (*va, uword **);
+ int i = 0;
+ u32 mem;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, ","))
+ hash_set (*r, i, 1024);
+ else if (unformat (input, "%u,", &mem))
+ hash_set (*r, i, mem);
+ else if (unformat (input, "%u", &mem))
+ hash_set (*r, i, mem);
+ else
+ {
+ unformat_put_input (input);
+ goto done;
+ }
+ i++;
+ }
+
+done:
+ return 1;
+}
+
+clib_error_t *
+unformat_rss_fn (unformat_input_t * input, uword * rss_fn)
+{
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (0)
+ ;
+#undef _
+#define _(f, s) \
+ else if (unformat (input, s)) \
+ *rss_fn |= f;
+
+ foreach_dpdk_rss_hf
+#undef _
+ else
+ {
+ return clib_error_return (0, "unknown input `%U'",
+ format_unformat_error, input);
+ }
+ }
+ return 0;
+}
+
+clib_error_t *
+unformat_hqos (unformat_input_t * input, dpdk_device_config_hqos_t * hqos)
+{
+ clib_error_t *error = 0;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "hqos-thread %u", &hqos->hqos_thread))
+ hqos->hqos_thread_valid = 1;
+ else
+ {
+ error = clib_error_return (0, "unknown input `%U'",
+ format_unformat_error, input);
+ break;
+ }
+ }
+
+ return error;
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/devices/dpdk/hqos.c b/src/vnet/devices/dpdk/hqos.c
new file mode 100644
index 00000000000..d68bc48f80b
--- /dev/null
+++ b/src/vnet/devices/dpdk/hqos.c
@@ -0,0 +1,775 @@
+/*
+ * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <sys/stat.h>
+#include <sys/mount.h>
+#include <string.h>
+#include <fcntl.h>
+
+#include <vppinfra/vec.h>
+#include <vppinfra/error.h>
+#include <vppinfra/format.h>
+#include <vppinfra/bitmap.h>
+
+#include <vnet/vnet.h>
+#include <vnet/ethernet/ethernet.h>
+#include <vnet/devices/dpdk/dpdk.h>
+
+#include <vlib/unix/physmem.h>
+#include <vlib/pci/pci.h>
+#include <vlibmemory/api.h>
+#include <vlibmemory/vl_memory_msg_enum.h> /* enumerate all vlib messages */
+
+#define vl_typedefs /* define message structures */
+#include <vlibmemory/vl_memory_api_h.h>
+#undef vl_typedefs
+
+/* instantiate all the print functions we know about */
+#define vl_print(handle, ...) vlib_cli_output (handle, __VA_ARGS__)
+#define vl_printfun
+#include <vlibmemory/vl_memory_api_h.h>
+#undef vl_printfun
+
+#include "dpdk_priv.h"
+
+dpdk_main_t dpdk_main;
+
+/***
+ *
+ * HQoS default configuration values
+ *
+ ***/
+
+static dpdk_device_config_hqos_t hqos_params_default = {
+ .hqos_thread_valid = 0,
+
+ .swq_size = 4096,
+ .burst_enq = 256,
+ .burst_deq = 220,
+
+ /*
+ * Packet field to identify the subport.
+ *
+ * Default value: Since only one subport is defined by default (see below:
+ * n_subports_per_port = 1), the subport ID is hardcoded to 0.
+ */
+ .pktfield0_slabpos = 0,
+ .pktfield0_slabmask = 0,
+
+ /*
+ * Packet field to identify the pipe.
+ *
+ * Default value: Assuming Ethernet/IPv4/UDP packets, UDP payload bits 12 .. 23
+ */
+ .pktfield1_slabpos = 40,
+ .pktfield1_slabmask = 0x0000000FFF000000LLU,
+
+ /* Packet field used as index into TC translation table to identify the traffic
+ * class and queue.
+ *
+ * Default value: Assuming Ethernet/IPv4 packets, IPv4 DSCP field
+ */
+ .pktfield2_slabpos = 8,
+ .pktfield2_slabmask = 0x00000000000000FCLLU,
+ .tc_table = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+ },
+
+ /* port */
+ .port = {
+ .name = NULL, /* Set at init */
+ .socket = 0, /* Set at init */
+ .rate = 1250000000, /* Assuming 10GbE port */
+ .mtu = 14 + 1500, /* Assuming Ethernet/IPv4 pkt (Ethernet FCS not included) */
+ .frame_overhead = RTE_SCHED_FRAME_OVERHEAD_DEFAULT,
+ .n_subports_per_port = 1,
+ .n_pipes_per_subport = 4096,
+ .qsize = {64, 64, 64, 64},
+ .pipe_profiles = NULL, /* Set at config */
+ .n_pipe_profiles = 1,
+
+#ifdef RTE_SCHED_RED
+ .red_params = {
+ /* Traffic Class 0 Colors Green / Yellow / Red */
+ [0][0] = {.min_th = 48,.max_th = 64,.maxp_inv =
+ 10,.wq_log2 = 9},
+ [0][1] = {.min_th = 40,.max_th = 64,.maxp_inv =
+ 10,.wq_log2 = 9},
+ [0][2] = {.min_th = 32,.max_th = 64,.maxp_inv =
+ 10,.wq_log2 = 9},
+
+ /* Traffic Class 1 - Colors Green / Yellow / Red */
+ [1][0] = {.min_th = 48,.max_th = 64,.maxp_inv =
+ 10,.wq_log2 = 9},
+ [1][1] = {.min_th = 40,.max_th = 64,.maxp_inv =
+ 10,.wq_log2 = 9},
+ [1][2] = {.min_th = 32,.max_th = 64,.maxp_inv =
+ 10,.wq_log2 = 9},
+
+ /* Traffic Class 2 - Colors Green / Yellow / Red */
+ [2][0] = {.min_th = 48,.max_th = 64,.maxp_inv =
+ 10,.wq_log2 = 9},
+ [2][1] = {.min_th = 40,.max_th = 64,.maxp_inv =
+ 10,.wq_log2 = 9},
+ [2][2] = {.min_th = 32,.max_th = 64,.maxp_inv =
+ 10,.wq_log2 = 9},
+
+ /* Traffic Class 3 - Colors Green / Yellow / Red */
+ [3][0] = {.min_th = 48,.max_th = 64,.maxp_inv =
+ 10,.wq_log2 = 9},
+ [3][1] = {.min_th = 40,.max_th = 64,.maxp_inv =
+ 10,.wq_log2 = 9},
+ [3][2] = {.min_th = 32,.max_th = 64,.maxp_inv =
+ 10,.wq_log2 = 9}
+ },
+#endif /* RTE_SCHED_RED */
+ },
+};
+
+static struct rte_sched_subport_params hqos_subport_params_default = {
+ .tb_rate = 1250000000, /* 10GbE line rate (measured in bytes/second) */
+ .tb_size = 1000000,
+ .tc_rate = {1250000000, 1250000000, 1250000000, 1250000000},
+ .tc_period = 10,
+};
+
+static struct rte_sched_pipe_params hqos_pipe_params_default = {
+ .tb_rate = 305175, /* 10GbE line rate divided by 4K pipes */
+ .tb_size = 1000000,
+ .tc_rate = {305175, 305175, 305175, 305175},
+ .tc_period = 40,
+#ifdef RTE_SCHED_SUBPORT_TC_OV
+ .tc_ov_weight = 1,
+#endif
+ .wrr_weights = {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1},
+};
+
+/***
+ *
+ * HQoS configuration
+ *
+ ***/
+
+int
+dpdk_hqos_validate_mask (u64 mask, u32 n)
+{
+ int count = __builtin_popcountll (mask);
+ int pos_lead = sizeof (u64) * 8 - __builtin_clzll (mask);
+ int pos_trail = __builtin_ctzll (mask);
+ int count_expected = __builtin_popcount (n - 1);
+
+ /* Handle the exceptions */
+ if (n == 0)
+ return -1; /* Error */
+
+ if ((mask == 0) && (n == 1))
+ return 0; /* OK */
+
+ if (((mask == 0) && (n != 1)) || ((mask != 0) && (n == 1)))
+ return -2; /* Error */
+
+ /* Check that mask is contiguous */
+ if ((pos_lead - pos_trail) != count)
+ return -3; /* Error */
+
+ /* Check that mask contains the expected number of bits set */
+ if (count != count_expected)
+ return -4; /* Error */
+
+ return 0; /* OK */
+}
+
+void
+dpdk_device_config_hqos_pipe_profile_default (dpdk_device_config_hqos_t *
+ hqos, u32 pipe_profile_id)
+{
+ memcpy (&hqos->pipe[pipe_profile_id], &hqos_pipe_params_default,
+ sizeof (hqos_pipe_params_default));
+}
+
+void
+dpdk_device_config_hqos_default (dpdk_device_config_hqos_t * hqos)
+{
+ struct rte_sched_subport_params *subport_params;
+ struct rte_sched_pipe_params *pipe_params;
+ u32 *pipe_map;
+ u32 i;
+
+ memcpy (hqos, &hqos_params_default, sizeof (hqos_params_default));
+
+ /* pipe */
+ vec_add2 (hqos->pipe, pipe_params, hqos->port.n_pipe_profiles);
+
+ for (i = 0; i < vec_len (hqos->pipe); i++)
+ memcpy (&pipe_params[i],
+ &hqos_pipe_params_default, sizeof (hqos_pipe_params_default));
+
+ hqos->port.pipe_profiles = hqos->pipe;
+
+ /* subport */
+ vec_add2 (hqos->subport, subport_params, hqos->port.n_subports_per_port);
+
+ for (i = 0; i < vec_len (hqos->subport); i++)
+ memcpy (&subport_params[i],
+ &hqos_subport_params_default,
+ sizeof (hqos_subport_params_default));
+
+ /* pipe profile */
+ vec_add2 (hqos->pipe_map,
+ pipe_map,
+ hqos->port.n_subports_per_port * hqos->port.n_pipes_per_subport);
+
+ for (i = 0; i < vec_len (hqos->pipe_map); i++)
+ pipe_map[i] = 0;
+}
+
+/***
+ *
+ * HQoS init
+ *
+ ***/
+
+clib_error_t *
+dpdk_port_setup_hqos (dpdk_device_t * xd, dpdk_device_config_hqos_t * hqos)
+{
+ vlib_thread_main_t *tm = vlib_get_thread_main ();
+ char name[32];
+ u32 subport_id, i;
+ int rv;
+
+ /* Detect the set of worker threads */
+ int worker_thread_first = 0;
+ int worker_thread_count = 0;
+
+ uword *p = hash_get_mem (tm->thread_registrations_by_name, "workers");
+ vlib_thread_registration_t *tr =
+ p ? (vlib_thread_registration_t *) p[0] : 0;
+
+ if (tr && tr->count > 0)
+ {
+ worker_thread_first = tr->first_index;
+ worker_thread_count = tr->count;
+ }
+
+ /* Allocate the per-thread device data array */
+ vec_validate_aligned (xd->hqos_wt, tm->n_vlib_mains - 1,
+ CLIB_CACHE_LINE_BYTES);
+ memset (xd->hqos_wt, 0, tm->n_vlib_mains * sizeof (xd->hqos_wt[0]));
+
+ vec_validate_aligned (xd->hqos_ht, 0, CLIB_CACHE_LINE_BYTES);
+ memset (xd->hqos_ht, 0, sizeof (xd->hqos_ht[0]));
+
+ /* Allocate space for one SWQ per worker thread in the I/O TX thread data structure */
+ vec_validate (xd->hqos_ht->swq, worker_thread_count);
+
+ /* SWQ */
+ for (i = 0; i < worker_thread_count + 1; i++)
+ {
+ u32 swq_flags = RING_F_SP_ENQ | RING_F_SC_DEQ;
+
+ snprintf (name, sizeof (name), "SWQ-worker%u-to-device%u", i,
+ xd->device_index);
+ xd->hqos_ht->swq[i] =
+ rte_ring_create (name, hqos->swq_size, xd->cpu_socket, swq_flags);
+ if (xd->hqos_ht->swq[i] == NULL)
+ return clib_error_return (0,
+ "SWQ-worker%u-to-device%u: rte_ring_create err",
+ i, xd->device_index);
+ }
+
+ /*
+ * HQoS
+ */
+
+ /* HQoS port */
+ snprintf (name, sizeof (name), "HQoS%u", xd->device_index);
+ hqos->port.name = strdup (name);
+ if (hqos->port.name == NULL)
+ return clib_error_return (0, "HQoS%u: strdup err", xd->device_index);
+
+ hqos->port.socket = rte_eth_dev_socket_id (xd->device_index);
+ if (hqos->port.socket == SOCKET_ID_ANY)
+ hqos->port.socket = 0;
+
+ xd->hqos_ht->hqos = rte_sched_port_config (&hqos->port);
+ if (xd->hqos_ht->hqos == NULL)
+ return clib_error_return (0, "HQoS%u: rte_sched_port_config err",
+ xd->device_index);
+
+ /* HQoS subport */
+ for (subport_id = 0; subport_id < hqos->port.n_subports_per_port;
+ subport_id++)
+ {
+ u32 pipe_id;
+
+ rv =
+ rte_sched_subport_config (xd->hqos_ht->hqos, subport_id,
+ &hqos->subport[subport_id]);
+ if (rv)
+ return clib_error_return (0,
+ "HQoS%u subport %u: rte_sched_subport_config err (%d)",
+ xd->device_index, subport_id, rv);
+
+ /* HQoS pipe */
+ for (pipe_id = 0; pipe_id < hqos->port.n_pipes_per_subport; pipe_id++)
+ {
+ u32 pos = subport_id * hqos->port.n_pipes_per_subport + pipe_id;
+ u32 profile_id = hqos->pipe_map[pos];
+
+ rv =
+ rte_sched_pipe_config (xd->hqos_ht->hqos, subport_id, pipe_id,
+ profile_id);
+ if (rv)
+ return clib_error_return (0,
+ "HQoS%u subport %u pipe %u: rte_sched_pipe_config err (%d)",
+ xd->device_index, subport_id, pipe_id,
+ rv);
+ }
+ }
+
+ /* Set up per-thread device data for the I/O TX thread */
+ xd->hqos_ht->hqos_burst_enq = hqos->burst_enq;
+ xd->hqos_ht->hqos_burst_deq = hqos->burst_deq;
+ vec_validate (xd->hqos_ht->pkts_enq, 2 * hqos->burst_enq - 1);
+ vec_validate (xd->hqos_ht->pkts_deq, hqos->burst_deq - 1);
+ xd->hqos_ht->pkts_enq_len = 0;
+ xd->hqos_ht->swq_pos = 0;
+ xd->hqos_ht->flush_count = 0;
+
+ /* Set up per-thread device data for each worker thread */
+ for (i = 0; i < worker_thread_count + 1; i++)
+ {
+ u32 tid;
+ if (i)
+ tid = worker_thread_first + (i - 1);
+ else
+ tid = i;
+
+ xd->hqos_wt[tid].swq = xd->hqos_ht->swq[i];
+ xd->hqos_wt[tid].hqos_field0_slabpos = hqos->pktfield0_slabpos;
+ xd->hqos_wt[tid].hqos_field0_slabmask = hqos->pktfield0_slabmask;
+ xd->hqos_wt[tid].hqos_field0_slabshr =
+ __builtin_ctzll (hqos->pktfield0_slabmask);
+ xd->hqos_wt[tid].hqos_field1_slabpos = hqos->pktfield1_slabpos;
+ xd->hqos_wt[tid].hqos_field1_slabmask = hqos->pktfield1_slabmask;
+ xd->hqos_wt[tid].hqos_field1_slabshr =
+ __builtin_ctzll (hqos->pktfield1_slabmask);
+ xd->hqos_wt[tid].hqos_field2_slabpos = hqos->pktfield2_slabpos;
+ xd->hqos_wt[tid].hqos_field2_slabmask = hqos->pktfield2_slabmask;
+ xd->hqos_wt[tid].hqos_field2_slabshr =
+ __builtin_ctzll (hqos->pktfield2_slabmask);
+ memcpy (xd->hqos_wt[tid].hqos_tc_table, hqos->tc_table,
+ sizeof (hqos->tc_table));
+ }
+
+ return 0;
+}
+
+/***
+ *
+ * HQoS run-time
+ *
+ ***/
+/*
+ * dpdk_hqos_thread - Contains the main loop of an HQoS thread.
+ *
+ * w
+ * Information for the current thread
+ */
+static_always_inline void
+dpdk_hqos_thread_internal_hqos_dbg_bypass (vlib_main_t * vm)
+{
+ dpdk_main_t *dm = &dpdk_main;
+ u32 cpu_index = vm->cpu_index;
+ u32 dev_pos;
+
+ dev_pos = 0;
+ while (1)
+ {
+ vlib_worker_thread_barrier_check ();
+
+ u32 n_devs = vec_len (dm->devices_by_hqos_cpu[cpu_index]);
+ if (dev_pos >= n_devs)
+ dev_pos = 0;
+
+ dpdk_device_and_queue_t *dq =
+ vec_elt_at_index (dm->devices_by_hqos_cpu[cpu_index], dev_pos);
+ dpdk_device_t *xd = vec_elt_at_index (dm->devices, dq->device);
+
+ dpdk_device_hqos_per_hqos_thread_t *hqos = xd->hqos_ht;
+ u32 device_index = xd->device_index;
+ u16 queue_id = dq->queue_id;
+
+ struct rte_mbuf **pkts_enq = hqos->pkts_enq;
+ u32 pkts_enq_len = hqos->pkts_enq_len;
+ u32 swq_pos = hqos->swq_pos;
+ u32 n_swq = vec_len (hqos->swq), i;
+ u32 flush_count = hqos->flush_count;
+
+ for (i = 0; i < n_swq; i++)
+ {
+ /* Get current SWQ for this device */
+ struct rte_ring *swq = hqos->swq[swq_pos];
+
+ /* Read SWQ burst to packet buffer of this device */
+ pkts_enq_len += rte_ring_sc_dequeue_burst (swq,
+ (void **)
+ &pkts_enq[pkts_enq_len],
+ hqos->hqos_burst_enq);
+
+ /* Get next SWQ for this device */
+ swq_pos++;
+ if (swq_pos >= n_swq)
+ swq_pos = 0;
+ hqos->swq_pos = swq_pos;
+
+ /* HWQ TX enqueue when burst available */
+ if (pkts_enq_len >= hqos->hqos_burst_enq)
+ {
+ u32 n_pkts = rte_eth_tx_burst (device_index,
+ (uint16_t) queue_id,
+ pkts_enq,
+ (uint16_t) pkts_enq_len);
+
+ for (; n_pkts < pkts_enq_len; n_pkts++)
+ rte_pktmbuf_free (pkts_enq[n_pkts]);
+
+ pkts_enq_len = 0;
+ flush_count = 0;
+ break;
+ }
+ }
+ if (pkts_enq_len)
+ {
+ flush_count++;
+ if (PREDICT_FALSE (flush_count == HQOS_FLUSH_COUNT_THRESHOLD))
+ {
+ rte_sched_port_enqueue (hqos->hqos, pkts_enq, pkts_enq_len);
+
+ pkts_enq_len = 0;
+ flush_count = 0;
+ }
+ }
+ hqos->pkts_enq_len = pkts_enq_len;
+ hqos->flush_count = flush_count;
+
+ /* Advance to next device */
+ dev_pos++;
+ }
+}
+
+static_always_inline void
+dpdk_hqos_thread_internal (vlib_main_t * vm)
+{
+ dpdk_main_t *dm = &dpdk_main;
+ u32 cpu_index = vm->cpu_index;
+ u32 dev_pos;
+
+ dev_pos = 0;
+ while (1)
+ {
+ vlib_worker_thread_barrier_check ();
+
+ u32 n_devs = vec_len (dm->devices_by_hqos_cpu[cpu_index]);
+ if (PREDICT_FALSE (n_devs == 0))
+ {
+ dev_pos = 0;
+ continue;
+ }
+ if (dev_pos >= n_devs)
+ dev_pos = 0;
+
+ dpdk_device_and_queue_t *dq =
+ vec_elt_at_index (dm->devices_by_hqos_cpu[cpu_index], dev_pos);
+ dpdk_device_t *xd = vec_elt_at_index (dm->devices, dq->device);
+
+ dpdk_device_hqos_per_hqos_thread_t *hqos = xd->hqos_ht;
+ u32 device_index = xd->device_index;
+ u16 queue_id = dq->queue_id;
+
+ struct rte_mbuf **pkts_enq = hqos->pkts_enq;
+ struct rte_mbuf **pkts_deq = hqos->pkts_deq;
+ u32 pkts_enq_len = hqos->pkts_enq_len;
+ u32 swq_pos = hqos->swq_pos;
+ u32 n_swq = vec_len (hqos->swq), i;
+ u32 flush_count = hqos->flush_count;
+
+ /*
+ * SWQ dequeue and HQoS enqueue for current device
+ */
+ for (i = 0; i < n_swq; i++)
+ {
+ /* Get current SWQ for this device */
+ struct rte_ring *swq = hqos->swq[swq_pos];
+
+ /* Read SWQ burst to packet buffer of this device */
+ pkts_enq_len += rte_ring_sc_dequeue_burst (swq,
+ (void **)
+ &pkts_enq[pkts_enq_len],
+ hqos->hqos_burst_enq);
+
+ /* Get next SWQ for this device */
+ swq_pos++;
+ if (swq_pos >= n_swq)
+ swq_pos = 0;
+ hqos->swq_pos = swq_pos;
+
+ /* HQoS enqueue when burst available */
+ if (pkts_enq_len >= hqos->hqos_burst_enq)
+ {
+ rte_sched_port_enqueue (hqos->hqos, pkts_enq, pkts_enq_len);
+
+ pkts_enq_len = 0;
+ flush_count = 0;
+ break;
+ }
+ }
+ if (pkts_enq_len)
+ {
+ flush_count++;
+ if (PREDICT_FALSE (flush_count == HQOS_FLUSH_COUNT_THRESHOLD))
+ {
+ rte_sched_port_enqueue (hqos->hqos, pkts_enq, pkts_enq_len);
+
+ pkts_enq_len = 0;
+ flush_count = 0;
+ }
+ }
+ hqos->pkts_enq_len = pkts_enq_len;
+ hqos->flush_count = flush_count;
+
+ /*
+ * HQoS dequeue and HWQ TX enqueue for current device
+ */
+ {
+ u32 pkts_deq_len, n_pkts;
+
+ pkts_deq_len = rte_sched_port_dequeue (hqos->hqos,
+ pkts_deq,
+ hqos->hqos_burst_deq);
+
+ for (n_pkts = 0; n_pkts < pkts_deq_len;)
+ n_pkts += rte_eth_tx_burst (device_index,
+ (uint16_t) queue_id,
+ &pkts_deq[n_pkts],
+ (uint16_t) (pkts_deq_len - n_pkts));
+ }
+
+ /* Advance to next device */
+ dev_pos++;
+ }
+}
+
+void
+dpdk_hqos_thread (vlib_worker_thread_t * w)
+{
+ vlib_main_t *vm;
+ vlib_thread_main_t *tm = vlib_get_thread_main ();
+ dpdk_main_t *dm = &dpdk_main;
+
+ vm = vlib_get_main ();
+
+ ASSERT (vm->cpu_index == os_get_cpu_number ());
+
+ clib_time_init (&vm->clib_time);
+ clib_mem_set_heap (w->thread_mheap);
+
+ /* Wait until the dpdk init sequence is complete */
+ while (tm->worker_thread_release == 0)
+ vlib_worker_thread_barrier_check ();
+
+ if (vec_len (dm->devices_by_hqos_cpu[vm->cpu_index]) == 0)
+ return
+ clib_error
+ ("current I/O TX thread does not have any devices assigned to it");
+
+ if (DPDK_HQOS_DBG_BYPASS)
+ dpdk_hqos_thread_internal_hqos_dbg_bypass (vm);
+ else
+ dpdk_hqos_thread_internal (vm);
+}
+
+void
+dpdk_hqos_thread_fn (void *arg)
+{
+ vlib_worker_thread_t *w = (vlib_worker_thread_t *) arg;
+ vlib_worker_thread_init (w);
+ dpdk_hqos_thread (w);
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_THREAD (hqos_thread_reg, static) =
+{
+ .name = "hqos-threads",
+ .short_name = "hqos-threads",
+ .function = dpdk_hqos_thread_fn,
+};
+/* *INDENT-ON* */
+
+/*
+ * HQoS run-time code to be called by the worker threads
+ */
+#define BITFIELD(byte_array, slab_pos, slab_mask, slab_shr) \
+({ \
+ u64 slab = *((u64 *) &byte_array[slab_pos]); \
+ u64 val = (rte_be_to_cpu_64(slab) & slab_mask) >> slab_shr; \
+ val; \
+})
+
+#define RTE_SCHED_PORT_HIERARCHY(subport, pipe, traffic_class, queue, color) \
+ ((((u64) (queue)) & 0x3) | \
+ ((((u64) (traffic_class)) & 0x3) << 2) | \
+ ((((u64) (color)) & 0x3) << 4) | \
+ ((((u64) (subport)) & 0xFFFF) << 16) | \
+ ((((u64) (pipe)) & 0xFFFFFFFF) << 32))
+
+void
+dpdk_hqos_metadata_set (dpdk_device_hqos_per_worker_thread_t * hqos,
+ struct rte_mbuf **pkts, u32 n_pkts)
+{
+ u32 i;
+
+ for (i = 0; i < (n_pkts & (~0x3)); i += 4)
+ {
+ struct rte_mbuf *pkt0 = pkts[i];
+ struct rte_mbuf *pkt1 = pkts[i + 1];
+ struct rte_mbuf *pkt2 = pkts[i + 2];
+ struct rte_mbuf *pkt3 = pkts[i + 3];
+
+ u8 *pkt0_data = rte_pktmbuf_mtod (pkt0, u8 *);
+ u8 *pkt1_data = rte_pktmbuf_mtod (pkt1, u8 *);
+ u8 *pkt2_data = rte_pktmbuf_mtod (pkt2, u8 *);
+ u8 *pkt3_data = rte_pktmbuf_mtod (pkt3, u8 *);
+
+ u64 pkt0_subport = BITFIELD (pkt0_data, hqos->hqos_field0_slabpos,
+ hqos->hqos_field0_slabmask,
+ hqos->hqos_field0_slabshr);
+ u64 pkt0_pipe = BITFIELD (pkt0_data, hqos->hqos_field1_slabpos,
+ hqos->hqos_field1_slabmask,
+ hqos->hqos_field1_slabshr);
+ u64 pkt0_dscp = BITFIELD (pkt0_data, hqos->hqos_field2_slabpos,
+ hqos->hqos_field2_slabmask,
+ hqos->hqos_field2_slabshr);
+ u32 pkt0_tc = hqos->hqos_tc_table[pkt0_dscp & 0x3F] >> 2;
+ u32 pkt0_tc_q = hqos->hqos_tc_table[pkt0_dscp & 0x3F] & 0x3;
+
+ u64 pkt1_subport = BITFIELD (pkt1_data, hqos->hqos_field0_slabpos,
+ hqos->hqos_field0_slabmask,
+ hqos->hqos_field0_slabshr);
+ u64 pkt1_pipe = BITFIELD (pkt1_data, hqos->hqos_field1_slabpos,
+ hqos->hqos_field1_slabmask,
+ hqos->hqos_field1_slabshr);
+ u64 pkt1_dscp = BITFIELD (pkt1_data, hqos->hqos_field2_slabpos,
+ hqos->hqos_field2_slabmask,
+ hqos->hqos_field2_slabshr);
+ u32 pkt1_tc = hqos->hqos_tc_table[pkt1_dscp & 0x3F] >> 2;
+ u32 pkt1_tc_q = hqos->hqos_tc_table[pkt1_dscp & 0x3F] & 0x3;
+
+ u64 pkt2_subport = BITFIELD (pkt2_data, hqos->hqos_field0_slabpos,
+ hqos->hqos_field0_slabmask,
+ hqos->hqos_field0_slabshr);
+ u64 pkt2_pipe = BITFIELD (pkt2_data, hqos->hqos_field1_slabpos,
+ hqos->hqos_field1_slabmask,
+ hqos->hqos_field1_slabshr);
+ u64 pkt2_dscp = BITFIELD (pkt2_data, hqos->hqos_field2_slabpos,
+ hqos->hqos_field2_slabmask,
+ hqos->hqos_field2_slabshr);
+ u32 pkt2_tc = hqos->hqos_tc_table[pkt2_dscp & 0x3F] >> 2;
+ u32 pkt2_tc_q = hqos->hqos_tc_table[pkt2_dscp & 0x3F] & 0x3;
+
+ u64 pkt3_subport = BITFIELD (pkt3_data, hqos->hqos_field0_slabpos,
+ hqos->hqos_field0_slabmask,
+ hqos->hqos_field0_slabshr);
+ u64 pkt3_pipe = BITFIELD (pkt3_data, hqos->hqos_field1_slabpos,
+ hqos->hqos_field1_slabmask,
+ hqos->hqos_field1_slabshr);
+ u64 pkt3_dscp = BITFIELD (pkt3_data, hqos->hqos_field2_slabpos,
+ hqos->hqos_field2_slabmask,
+ hqos->hqos_field2_slabshr);
+ u32 pkt3_tc = hqos->hqos_tc_table[pkt3_dscp & 0x3F] >> 2;
+ u32 pkt3_tc_q = hqos->hqos_tc_table[pkt3_dscp & 0x3F] & 0x3;
+
+ u64 pkt0_sched = RTE_SCHED_PORT_HIERARCHY (pkt0_subport,
+ pkt0_pipe,
+ pkt0_tc,
+ pkt0_tc_q,
+ 0);
+ u64 pkt1_sched = RTE_SCHED_PORT_HIERARCHY (pkt1_subport,
+ pkt1_pipe,
+ pkt1_tc,
+ pkt1_tc_q,
+ 0);
+ u64 pkt2_sched = RTE_SCHED_PORT_HIERARCHY (pkt2_subport,
+ pkt2_pipe,
+ pkt2_tc,
+ pkt2_tc_q,
+ 0);
+ u64 pkt3_sched = RTE_SCHED_PORT_HIERARCHY (pkt3_subport,
+ pkt3_pipe,
+ pkt3_tc,
+ pkt3_tc_q,
+ 0);
+
+ pkt0->hash.sched.lo = pkt0_sched & 0xFFFFFFFF;
+ pkt0->hash.sched.hi = pkt0_sched >> 32;
+ pkt1->hash.sched.lo = pkt1_sched & 0xFFFFFFFF;
+ pkt1->hash.sched.hi = pkt1_sched >> 32;
+ pkt2->hash.sched.lo = pkt2_sched & 0xFFFFFFFF;
+ pkt2->hash.sched.hi = pkt2_sched >> 32;
+ pkt3->hash.sched.lo = pkt3_sched & 0xFFFFFFFF;
+ pkt3->hash.sched.hi = pkt3_sched >> 32;
+ }
+
+ for (; i < n_pkts; i++)
+ {
+ struct rte_mbuf *pkt = pkts[i];
+
+ u8 *pkt_data = rte_pktmbuf_mtod (pkt, u8 *);
+
+ u64 pkt_subport = BITFIELD (pkt_data, hqos->hqos_field0_slabpos,
+ hqos->hqos_field0_slabmask,
+ hqos->hqos_field0_slabshr);
+ u64 pkt_pipe = BITFIELD (pkt_data, hqos->hqos_field1_slabpos,
+ hqos->hqos_field1_slabmask,
+ hqos->hqos_field1_slabshr);
+ u64 pkt_dscp = BITFIELD (pkt_data, hqos->hqos_field2_slabpos,
+ hqos->hqos_field2_slabmask,
+ hqos->hqos_field2_slabshr);
+ u32 pkt_tc = hqos->hqos_tc_table[pkt_dscp & 0x3F] >> 2;
+ u32 pkt_tc_q = hqos->hqos_tc_table[pkt_dscp & 0x3F] & 0x3;
+
+ u64 pkt_sched = RTE_SCHED_PORT_HIERARCHY (pkt_subport,
+ pkt_pipe,
+ pkt_tc,
+ pkt_tc_q,
+ 0);
+
+ pkt->hash.sched.lo = pkt_sched & 0xFFFFFFFF;
+ pkt->hash.sched.hi = pkt_sched >> 32;
+ }
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/devices/dpdk/init.c b/src/vnet/devices/dpdk/init.c
new file mode 100755
index 00000000000..693ca985130
--- /dev/null
+++ b/src/vnet/devices/dpdk/init.c
@@ -0,0 +1,1803 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <vnet/vnet.h>
+#include <vppinfra/vec.h>
+#include <vppinfra/error.h>
+#include <vppinfra/format.h>
+#include <vppinfra/bitmap.h>
+
+#include <vnet/ethernet/ethernet.h>
+#include <vnet/devices/dpdk/dpdk.h>
+#include <vlib/unix/physmem.h>
+#include <vlib/pci/pci.h>
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <sys/stat.h>
+#include <sys/mount.h>
+#include <string.h>
+#include <fcntl.h>
+
+#include "dpdk_priv.h"
+
+dpdk_main_t dpdk_main;
+
+/* force linker to link functions used by vlib and declared weak */
+void *vlib_weakly_linked_functions[] = {
+ &rte_pktmbuf_init,
+ &rte_pktmbuf_pool_init,
+};
+
+#define LINK_STATE_ELOGS 0
+
+#define DEFAULT_HUGE_DIR "/run/vpp/hugepages"
+#define VPP_RUN_DIR "/run/vpp"
+
+/* Port configuration, mildly modified Intel app values */
+
+static struct rte_eth_conf port_conf_template = {
+ .rxmode = {
+ .split_hdr_size = 0,
+ .header_split = 0, /**< Header Split disabled */
+ .hw_ip_checksum = 0, /**< IP checksum offload disabled */
+ .hw_vlan_filter = 0, /**< VLAN filtering disabled */
+ .hw_strip_crc = 0, /**< CRC stripped by hardware */
+ },
+ .txmode = {
+ .mq_mode = ETH_MQ_TX_NONE,
+ },
+};
+
+clib_error_t *
+dpdk_port_setup (dpdk_main_t * dm, dpdk_device_t * xd)
+{
+ vlib_main_t *vm = vlib_get_main ();
+ vlib_buffer_main_t *bm = vm->buffer_main;
+ int rv;
+ int j;
+
+ ASSERT (os_get_cpu_number () == 0);
+
+ if (xd->flags & DPDK_DEVICE_FLAG_ADMIN_UP)
+ {
+ vnet_hw_interface_set_flags (dm->vnet_main, xd->vlib_hw_if_index, 0);
+ rte_eth_dev_stop (xd->device_index);
+ }
+
+ rv = rte_eth_dev_configure (xd->device_index, xd->rx_q_used,
+ xd->tx_q_used, &xd->port_conf);
+
+ if (rv < 0)
+ return clib_error_return (0, "rte_eth_dev_configure[%d]: err %d",
+ xd->device_index, rv);
+
+ /* Set up one TX-queue per worker thread */
+ for (j = 0; j < xd->tx_q_used; j++)
+ {
+ rv = rte_eth_tx_queue_setup (xd->device_index, j, xd->nb_tx_desc,
+ xd->cpu_socket, &xd->tx_conf);
+
+ /* retry with any other CPU socket */
+ if (rv < 0)
+ rv = rte_eth_tx_queue_setup (xd->device_index, j, xd->nb_tx_desc,
+ SOCKET_ID_ANY, &xd->tx_conf);
+ if (rv < 0)
+ break;
+ }
+
+ if (rv < 0)
+ return clib_error_return (0, "rte_eth_tx_queue_setup[%d]: err %d",
+ xd->device_index, rv);
+
+ for (j = 0; j < xd->rx_q_used; j++)
+ {
+
+ rv = rte_eth_rx_queue_setup (xd->device_index, j, xd->nb_rx_desc,
+ xd->cpu_socket, 0,
+ bm->
+ pktmbuf_pools[xd->cpu_socket_id_by_queue
+ [j]]);
+
+ /* retry with any other CPU socket */
+ if (rv < 0)
+ rv = rte_eth_rx_queue_setup (xd->device_index, j, xd->nb_rx_desc,
+ SOCKET_ID_ANY, 0,
+ bm->
+ pktmbuf_pools[xd->cpu_socket_id_by_queue
+ [j]]);
+ if (rv < 0)
+ return clib_error_return (0, "rte_eth_rx_queue_setup[%d]: err %d",
+ xd->device_index, rv);
+ }
+
+ if (xd->flags & DPDK_DEVICE_FLAG_ADMIN_UP)
+ {
+ int rv;
+ rv = rte_eth_dev_start (xd->device_index);
+ if (rv < 0)
+ clib_warning ("rte_eth_dev_start %d returned %d",
+ xd->device_index, rv);
+ }
+ return 0;
+}
+
+static u32
+dpdk_flag_change (vnet_main_t * vnm, vnet_hw_interface_t * hi, u32 flags)
+{
+ dpdk_main_t *dm = &dpdk_main;
+ dpdk_device_t *xd = vec_elt_at_index (dm->devices, hi->dev_instance);
+ u32 old = 0;
+
+ if (ETHERNET_INTERFACE_FLAG_CONFIG_PROMISC (flags))
+ {
+ old = (xd->flags & DPDK_DEVICE_FLAG_PROMISC) != 0;
+
+ if (flags & ETHERNET_INTERFACE_FLAG_ACCEPT_ALL)
+ xd->flags |= DPDK_DEVICE_FLAG_PROMISC;
+ else
+ xd->flags &= ~DPDK_DEVICE_FLAG_PROMISC;
+
+ if (xd->flags & DPDK_DEVICE_FLAG_ADMIN_UP)
+ {
+ if (xd->flags & DPDK_DEVICE_FLAG_PROMISC)
+ rte_eth_promiscuous_enable (xd->device_index);
+ else
+ rte_eth_promiscuous_disable (xd->device_index);
+ }
+ }
+ else if (ETHERNET_INTERFACE_FLAG_CONFIG_MTU (flags))
+ {
+ /*
+ * DAW-FIXME: The Cisco VIC firmware does not provide an api for a
+ * driver to dynamically change the mtu. If/when the
+ * VIC firmware gets fixed, then this should be removed.
+ */
+ if (xd->pmd == VNET_DPDK_PMD_ENIC)
+ {
+ struct rte_eth_dev_info dev_info;
+
+ /*
+ * Restore mtu to what has been set by CIMC in the firmware cfg.
+ */
+ rte_eth_dev_info_get (xd->device_index, &dev_info);
+ hi->max_packet_bytes = dev_info.max_rx_pktlen;
+
+ vlib_cli_output (vlib_get_main (),
+ "Cisco VIC mtu can only be changed "
+ "using CIMC then rebooting the server!");
+ }
+ else
+ {
+ int rv;
+
+ xd->port_conf.rxmode.max_rx_pkt_len = hi->max_packet_bytes;
+
+ if (xd->flags & DPDK_DEVICE_FLAG_ADMIN_UP)
+ rte_eth_dev_stop (xd->device_index);
+
+ rv = rte_eth_dev_configure
+ (xd->device_index, xd->rx_q_used, xd->tx_q_used, &xd->port_conf);
+
+ if (rv < 0)
+ vlib_cli_output (vlib_get_main (),
+ "rte_eth_dev_configure[%d]: err %d",
+ xd->device_index, rv);
+
+ rte_eth_dev_set_mtu (xd->device_index, hi->max_packet_bytes);
+
+ if (xd->flags & DPDK_DEVICE_FLAG_ADMIN_UP)
+ {
+ int rv = rte_eth_dev_start (xd->device_index);
+ if (rv < 0)
+ clib_warning ("rte_eth_dev_start %d returned %d",
+ xd->device_index, rv);
+ }
+ }
+ }
+ return old;
+}
+
+void
+dpdk_device_lock_init (dpdk_device_t * xd)
+{
+ int q;
+ vec_validate (xd->lockp, xd->tx_q_used - 1);
+ for (q = 0; q < xd->tx_q_used; q++)
+ {
+ xd->lockp[q] = clib_mem_alloc_aligned (CLIB_CACHE_LINE_BYTES,
+ CLIB_CACHE_LINE_BYTES);
+ memset ((void *) xd->lockp[q], 0, CLIB_CACHE_LINE_BYTES);
+ }
+}
+
+void
+dpdk_device_lock_free (dpdk_device_t * xd)
+{
+ int q;
+
+ for (q = 0; q < vec_len (xd->lockp); q++)
+ clib_mem_free ((void *) xd->lockp[q]);
+ vec_free (xd->lockp);
+ xd->lockp = 0;
+}
+
+static clib_error_t *
+dpdk_lib_init (dpdk_main_t * dm)
+{
+ u32 nports;
+ u32 nb_desc = 0;
+ int i;
+ clib_error_t *error;
+ vlib_main_t *vm = vlib_get_main ();
+ vlib_thread_main_t *tm = vlib_get_thread_main ();
+ vnet_sw_interface_t *sw;
+ vnet_hw_interface_t *hi;
+ dpdk_device_t *xd;
+ vlib_pci_addr_t last_pci_addr;
+ u32 last_pci_addr_port = 0;
+ vlib_thread_registration_t *tr, *tr_hqos;
+ uword *p, *p_hqos;
+
+ u32 next_cpu = 0, next_hqos_cpu = 0;
+ u8 af_packet_port_id = 0;
+ last_pci_addr.as_u32 = ~0;
+
+ dm->input_cpu_first_index = 0;
+ dm->input_cpu_count = 1;
+
+ /* find out which cpus will be used for input */
+ p = hash_get_mem (tm->thread_registrations_by_name, "workers");
+ tr = p ? (vlib_thread_registration_t *) p[0] : 0;
+
+ if (tr && tr->count > 0)
+ {
+ dm->input_cpu_first_index = tr->first_index;
+ dm->input_cpu_count = tr->count;
+ }
+
+ vec_validate_aligned (dm->devices_by_cpu, tm->n_vlib_mains - 1,
+ CLIB_CACHE_LINE_BYTES);
+
+ vec_validate_aligned (dm->workers, tm->n_vlib_mains - 1,
+ CLIB_CACHE_LINE_BYTES);
+
+ dm->hqos_cpu_first_index = 0;
+ dm->hqos_cpu_count = 0;
+
+ /* find out which cpus will be used for I/O TX */
+ p_hqos = hash_get_mem (tm->thread_registrations_by_name, "hqos-threads");
+ tr_hqos = p_hqos ? (vlib_thread_registration_t *) p_hqos[0] : 0;
+
+ if (tr_hqos && tr_hqos->count > 0)
+ {
+ dm->hqos_cpu_first_index = tr_hqos->first_index;
+ dm->hqos_cpu_count = tr_hqos->count;
+ }
+
+ vec_validate_aligned (dm->devices_by_hqos_cpu, tm->n_vlib_mains - 1,
+ CLIB_CACHE_LINE_BYTES);
+
+ vec_validate_aligned (dm->hqos_threads, tm->n_vlib_mains - 1,
+ CLIB_CACHE_LINE_BYTES);
+
+ nports = rte_eth_dev_count ();
+ if (nports < 1)
+ {
+ clib_warning ("DPDK drivers found no ports...");
+ }
+
+ if (CLIB_DEBUG > 0)
+ clib_warning ("DPDK drivers found %d ports...", nports);
+
+ /*
+ * All buffers are all allocated from the same rte_mempool.
+ * Thus they all have the same number of data bytes.
+ */
+ dm->vlib_buffer_free_list_index =
+ vlib_buffer_get_or_create_free_list (vm,
+ VLIB_BUFFER_DEFAULT_FREE_LIST_BYTES,
+ "dpdk rx");
+
+ if (dm->conf->enable_tcp_udp_checksum)
+ dm->buffer_flags_template &= ~(IP_BUFFER_L4_CHECKSUM_CORRECT
+ | IP_BUFFER_L4_CHECKSUM_COMPUTED);
+
+ for (i = 0; i < nports; i++)
+ {
+ u8 addr[6];
+ u8 vlan_strip = 0;
+ int j;
+ struct rte_eth_dev_info dev_info;
+ clib_error_t *rv;
+ struct rte_eth_link l;
+ dpdk_device_config_t *devconf = 0;
+ vlib_pci_addr_t pci_addr;
+ uword *p = 0;
+
+ rte_eth_dev_info_get (i, &dev_info);
+ if (dev_info.pci_dev) /* bonded interface has no pci info */
+ {
+ pci_addr.domain = dev_info.pci_dev->addr.domain;
+ pci_addr.bus = dev_info.pci_dev->addr.bus;
+ pci_addr.slot = dev_info.pci_dev->addr.devid;
+ pci_addr.function = dev_info.pci_dev->addr.function;
+ p =
+ hash_get (dm->conf->device_config_index_by_pci_addr,
+ pci_addr.as_u32);
+ }
+
+ if (p)
+ devconf = pool_elt_at_index (dm->conf->dev_confs, p[0]);
+ else
+ devconf = &dm->conf->default_devconf;
+
+ /* Create vnet interface */
+ vec_add2_aligned (dm->devices, xd, 1, CLIB_CACHE_LINE_BYTES);
+ xd->nb_rx_desc = DPDK_NB_RX_DESC_DEFAULT;
+ xd->nb_tx_desc = DPDK_NB_TX_DESC_DEFAULT;
+ xd->cpu_socket = (i8) rte_eth_dev_socket_id (i);
+
+ /* Handle interface naming for devices with multiple ports sharing same PCI ID */
+ if (dev_info.pci_dev)
+ {
+ struct rte_eth_dev_info di = { 0 };
+ rte_eth_dev_info_get (i + 1, &di);
+ if (di.pci_dev && pci_addr.as_u32 != last_pci_addr.as_u32 &&
+ memcmp (&dev_info.pci_dev->addr, &di.pci_dev->addr,
+ sizeof (struct rte_pci_addr)) == 0)
+ {
+ xd->interface_name_suffix = format (0, "0");
+ last_pci_addr.as_u32 = pci_addr.as_u32;
+ last_pci_addr_port = i;
+ }
+ else if (pci_addr.as_u32 == last_pci_addr.as_u32)
+ {
+ xd->interface_name_suffix =
+ format (0, "%u", i - last_pci_addr_port);
+ }
+ else
+ {
+ last_pci_addr.as_u32 = ~0;
+ }
+ }
+ else
+ last_pci_addr.as_u32 = ~0;
+
+ clib_memcpy (&xd->tx_conf, &dev_info.default_txconf,
+ sizeof (struct rte_eth_txconf));
+ if (dm->conf->no_multi_seg)
+ {
+ xd->tx_conf.txq_flags |= ETH_TXQ_FLAGS_NOMULTSEGS;
+ port_conf_template.rxmode.jumbo_frame = 0;
+ }
+ else
+ {
+ xd->tx_conf.txq_flags &= ~ETH_TXQ_FLAGS_NOMULTSEGS;
+ port_conf_template.rxmode.jumbo_frame = 1;
+ xd->flags |= DPDK_DEVICE_FLAG_MAYBE_MULTISEG;
+ }
+
+ clib_memcpy (&xd->port_conf, &port_conf_template,
+ sizeof (struct rte_eth_conf));
+
+ xd->tx_q_used = clib_min (dev_info.max_tx_queues, tm->n_vlib_mains);
+
+ if (devconf->num_tx_queues > 0
+ && devconf->num_tx_queues < xd->tx_q_used)
+ xd->tx_q_used = clib_min (xd->tx_q_used, devconf->num_tx_queues);
+
+ if (devconf->num_rx_queues > 1 && dm->use_rss == 0)
+ {
+ dm->use_rss = 1;
+ }
+
+ if (devconf->num_rx_queues > 1
+ && dev_info.max_rx_queues >= devconf->num_rx_queues)
+ {
+ xd->rx_q_used = devconf->num_rx_queues;
+ xd->port_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
+ if (devconf->rss_fn == 0)
+ xd->port_conf.rx_adv_conf.rss_conf.rss_hf =
+ ETH_RSS_IP | ETH_RSS_UDP | ETH_RSS_TCP;
+ else
+ xd->port_conf.rx_adv_conf.rss_conf.rss_hf = devconf->rss_fn;
+ }
+ else
+ xd->rx_q_used = 1;
+
+ xd->flags |= DPDK_DEVICE_FLAG_PMD;
+
+ /* workaround for drivers not setting driver_name */
+ if ((!dev_info.driver_name) && (dev_info.pci_dev))
+#if RTE_VERSION < RTE_VERSION_NUM(16, 11, 0, 0)
+ dev_info.driver_name = dev_info.pci_dev->driver->name;
+#else
+ dev_info.driver_name = dev_info.pci_dev->driver->driver.name;
+#endif
+ ASSERT (dev_info.driver_name);
+
+ if (!xd->pmd)
+ {
+
+
+#define _(s,f) else if (dev_info.driver_name && \
+ !strcmp(dev_info.driver_name, s)) \
+ xd->pmd = VNET_DPDK_PMD_##f;
+ if (0)
+ ;
+ foreach_dpdk_pmd
+#undef _
+ else
+ xd->pmd = VNET_DPDK_PMD_UNKNOWN;
+
+ xd->port_type = VNET_DPDK_PORT_TYPE_UNKNOWN;
+ xd->nb_rx_desc = DPDK_NB_RX_DESC_DEFAULT;
+ xd->nb_tx_desc = DPDK_NB_TX_DESC_DEFAULT;
+
+ switch (xd->pmd)
+ {
+ /* 1G adapters */
+ case VNET_DPDK_PMD_E1000EM:
+ case VNET_DPDK_PMD_IGB:
+ case VNET_DPDK_PMD_IGBVF:
+ xd->port_type = VNET_DPDK_PORT_TYPE_ETH_1G;
+ break;
+
+ /* 10G adapters */
+ case VNET_DPDK_PMD_IXGBE:
+ case VNET_DPDK_PMD_IXGBEVF:
+ case VNET_DPDK_PMD_THUNDERX:
+ xd->port_type = VNET_DPDK_PORT_TYPE_ETH_10G;
+ break;
+ case VNET_DPDK_PMD_DPAA2:
+ xd->port_type = VNET_DPDK_PORT_TYPE_ETH_10G;
+ break;
+
+ /* Cisco VIC */
+ case VNET_DPDK_PMD_ENIC:
+ rte_eth_link_get_nowait (i, &l);
+ xd->flags |= DPDK_DEVICE_FLAG_PMD_SUPPORTS_PTYPE;
+ if (l.link_speed == 40000)
+ xd->port_type = VNET_DPDK_PORT_TYPE_ETH_40G;
+ else
+ xd->port_type = VNET_DPDK_PORT_TYPE_ETH_10G;
+ break;
+
+ /* Intel Fortville */
+ case VNET_DPDK_PMD_I40E:
+ case VNET_DPDK_PMD_I40EVF:
+ xd->flags |= DPDK_DEVICE_FLAG_PMD_SUPPORTS_PTYPE;
+ xd->port_type = VNET_DPDK_PORT_TYPE_ETH_40G;
+
+ switch (dev_info.pci_dev->id.device_id)
+ {
+ case I40E_DEV_ID_10G_BASE_T:
+ case I40E_DEV_ID_SFP_XL710:
+ xd->port_type = VNET_DPDK_PORT_TYPE_ETH_10G;
+ break;
+ case I40E_DEV_ID_QSFP_A:
+ case I40E_DEV_ID_QSFP_B:
+ case I40E_DEV_ID_QSFP_C:
+ xd->port_type = VNET_DPDK_PORT_TYPE_ETH_40G;
+ break;
+ case I40E_DEV_ID_VF:
+ rte_eth_link_get_nowait (i, &l);
+ xd->port_type = l.link_speed == 10000 ?
+ VNET_DPDK_PORT_TYPE_ETH_10G : VNET_DPDK_PORT_TYPE_ETH_40G;
+ break;
+ default:
+ xd->port_type = VNET_DPDK_PORT_TYPE_UNKNOWN;
+ }
+ break;
+
+ case VNET_DPDK_PMD_CXGBE:
+ switch (dev_info.pci_dev->id.device_id)
+ {
+ case 0x540d: /* T580-CR */
+ case 0x5410: /* T580-LP-cr */
+ xd->port_type = VNET_DPDK_PORT_TYPE_ETH_40G;
+ break;
+ case 0x5403: /* T540-CR */
+ xd->port_type = VNET_DPDK_PORT_TYPE_ETH_10G;
+ break;
+ default:
+ xd->port_type = VNET_DPDK_PORT_TYPE_UNKNOWN;
+ }
+ break;
+
+ case VNET_DPDK_PMD_MLX5:
+ {
+ char *pn_100g[] = { "MCX415A-CCAT", "MCX416A-CCAT", 0 };
+ char *pn_40g[] = { "MCX413A-BCAT", "MCX414A-BCAT",
+ "MCX415A-BCAT", "MCX416A-BCAT", "MCX4131A-BCAT", 0
+ };
+ char *pn_10g[] = { "MCX4111A-XCAT", "MCX4121A-XCAT", 0 };
+
+ vlib_pci_device_t *pd = vlib_get_pci_device (&pci_addr);
+ u8 *pn = 0;
+ char **c;
+ int found = 0;
+ pn = format (0, "%U%c",
+ format_vlib_pci_vpd, pd->vpd_r, "PN", 0);
+
+ if (!pn)
+ break;
+
+ c = pn_100g;
+ while (!found && c[0])
+ {
+ if (strncmp ((char *) pn, c[0], strlen (c[0])) == 0)
+ {
+ xd->port_type = VNET_DPDK_PORT_TYPE_ETH_100G;
+ break;
+ }
+ c++;
+ }
+
+ c = pn_40g;
+ while (!found && c[0])
+ {
+ if (strncmp ((char *) pn, c[0], strlen (c[0])) == 0)
+ {
+ xd->port_type = VNET_DPDK_PORT_TYPE_ETH_40G;
+ break;
+ }
+ c++;
+ }
+
+ c = pn_10g;
+ while (!found && c[0])
+ {
+ if (strncmp ((char *) pn, c[0], strlen (c[0])) == 0)
+ {
+ xd->port_type = VNET_DPDK_PORT_TYPE_ETH_10G;
+ break;
+ }
+ c++;
+ }
+
+ vec_free (pn);
+ }
+
+ break;
+ /* Intel Red Rock Canyon */
+ case VNET_DPDK_PMD_FM10K:
+ xd->port_type = VNET_DPDK_PORT_TYPE_ETH_SWITCH;
+ break;
+
+ /* virtio */
+ case VNET_DPDK_PMD_VIRTIO:
+ xd->port_type = VNET_DPDK_PORT_TYPE_ETH_1G;
+ xd->nb_rx_desc = DPDK_NB_RX_DESC_VIRTIO;
+ xd->nb_tx_desc = DPDK_NB_TX_DESC_VIRTIO;
+ break;
+
+ /* vmxnet3 */
+ case VNET_DPDK_PMD_VMXNET3:
+ xd->port_type = VNET_DPDK_PORT_TYPE_ETH_1G;
+ xd->tx_conf.txq_flags |= ETH_TXQ_FLAGS_NOMULTSEGS;
+ break;
+
+ case VNET_DPDK_PMD_AF_PACKET:
+ xd->port_type = VNET_DPDK_PORT_TYPE_AF_PACKET;
+ xd->af_packet_port_id = af_packet_port_id++;
+ break;
+
+ case VNET_DPDK_PMD_BOND:
+ xd->flags |= DPDK_DEVICE_FLAG_PMD_SUPPORTS_PTYPE;
+ xd->port_type = VNET_DPDK_PORT_TYPE_ETH_BOND;
+ break;
+
+ default:
+ xd->port_type = VNET_DPDK_PORT_TYPE_UNKNOWN;
+ }
+
+ if (devconf->num_rx_desc)
+ xd->nb_rx_desc = devconf->num_rx_desc;
+
+ if (devconf->num_tx_desc)
+ xd->nb_tx_desc = devconf->num_tx_desc;
+ }
+
+ /*
+ * Ensure default mtu is not > the mtu read from the hardware.
+ * Otherwise rte_eth_dev_configure() will fail and the port will
+ * not be available.
+ */
+ if (ETHERNET_MAX_PACKET_BYTES > dev_info.max_rx_pktlen)
+ {
+ /*
+ * This device does not support the platforms's max frame
+ * size. Use it's advertised mru instead.
+ */
+ xd->port_conf.rxmode.max_rx_pkt_len = dev_info.max_rx_pktlen;
+ }
+ else
+ {
+ xd->port_conf.rxmode.max_rx_pkt_len = ETHERNET_MAX_PACKET_BYTES;
+
+ /*
+ * Some platforms do not account for Ethernet FCS (4 bytes) in
+ * MTU calculations. To interop with them increase mru but only
+ * if the device's settings can support it.
+ */
+ if ((dev_info.max_rx_pktlen >= (ETHERNET_MAX_PACKET_BYTES + 4)) &&
+ xd->port_conf.rxmode.hw_strip_crc)
+ {
+ /*
+ * Allow additional 4 bytes (for Ethernet FCS). These bytes are
+ * stripped by h/w and so will not consume any buffer memory.
+ */
+ xd->port_conf.rxmode.max_rx_pkt_len += 4;
+ }
+ }
+
+ if (xd->pmd == VNET_DPDK_PMD_AF_PACKET)
+ {
+ f64 now = vlib_time_now (vm);
+ u32 rnd;
+ rnd = (u32) (now * 1e6);
+ rnd = random_u32 (&rnd);
+ clib_memcpy (addr + 2, &rnd, sizeof (rnd));
+ addr[0] = 2;
+ addr[1] = 0xfe;
+ }
+ else
+ rte_eth_macaddr_get (i, (struct ether_addr *) addr);
+
+ if (xd->tx_q_used < tm->n_vlib_mains)
+ dpdk_device_lock_init (xd);
+
+ xd->device_index = xd - dm->devices;
+ ASSERT (i == xd->device_index);
+ xd->per_interface_next_index = ~0;
+
+ /* assign interface to input thread */
+ dpdk_device_and_queue_t *dq;
+ int q;
+
+ if (devconf->workers)
+ {
+ int i;
+ q = 0;
+ /* *INDENT-OFF* */
+ clib_bitmap_foreach (i, devconf->workers, ({
+ int cpu = dm->input_cpu_first_index + i;
+ unsigned lcore = vlib_worker_threads[cpu].lcore_id;
+ vec_validate(xd->cpu_socket_id_by_queue, q);
+ xd->cpu_socket_id_by_queue[q] = rte_lcore_to_socket_id(lcore);
+ vec_add2(dm->devices_by_cpu[cpu], dq, 1);
+ dq->device = xd->device_index;
+ dq->queue_id = q++;
+ }));
+ /* *INDENT-ON* */
+ }
+ else
+ for (q = 0; q < xd->rx_q_used; q++)
+ {
+ int cpu = dm->input_cpu_first_index + next_cpu;
+ unsigned lcore = vlib_worker_threads[cpu].lcore_id;
+
+ /*
+ * numa node for worker thread handling this queue
+ * needed for taking buffers from the right mempool
+ */
+ vec_validate (xd->cpu_socket_id_by_queue, q);
+ xd->cpu_socket_id_by_queue[q] = rte_lcore_to_socket_id (lcore);
+
+ /*
+ * construct vector of (device,queue) pairs for each worker thread
+ */
+ vec_add2 (dm->devices_by_cpu[cpu], dq, 1);
+ dq->device = xd->device_index;
+ dq->queue_id = q;
+
+ next_cpu++;
+ if (next_cpu == dm->input_cpu_count)
+ next_cpu = 0;
+ }
+
+
+ if (devconf->hqos_enabled)
+ {
+ xd->flags |= DPDK_DEVICE_FLAG_HQOS;
+
+ if (devconf->hqos.hqos_thread_valid)
+ {
+ int cpu = dm->hqos_cpu_first_index + devconf->hqos.hqos_thread;
+
+ if (devconf->hqos.hqos_thread >= dm->hqos_cpu_count)
+ return clib_error_return (0, "invalid HQoS thread index");
+
+ vec_add2 (dm->devices_by_hqos_cpu[cpu], dq, 1);
+ dq->device = xd->device_index;
+ dq->queue_id = 0;
+ }
+ else
+ {
+ int cpu = dm->hqos_cpu_first_index + next_hqos_cpu;
+
+ if (dm->hqos_cpu_count == 0)
+ return clib_error_return (0, "no HQoS threads available");
+
+ vec_add2 (dm->devices_by_hqos_cpu[cpu], dq, 1);
+ dq->device = xd->device_index;
+ dq->queue_id = 0;
+
+ next_hqos_cpu++;
+ if (next_hqos_cpu == dm->hqos_cpu_count)
+ next_hqos_cpu = 0;
+
+ devconf->hqos.hqos_thread_valid = 1;
+ devconf->hqos.hqos_thread = cpu;
+ }
+ }
+
+ vec_validate_aligned (xd->tx_vectors, tm->n_vlib_mains,
+ CLIB_CACHE_LINE_BYTES);
+ for (j = 0; j < tm->n_vlib_mains; j++)
+ {
+ vec_validate_ha (xd->tx_vectors[j], xd->nb_tx_desc,
+ sizeof (tx_ring_hdr_t), CLIB_CACHE_LINE_BYTES);
+ vec_reset_length (xd->tx_vectors[j]);
+ }
+
+ vec_validate_aligned (xd->rx_vectors, xd->rx_q_used,
+ CLIB_CACHE_LINE_BYTES);
+ for (j = 0; j < xd->rx_q_used; j++)
+ {
+ vec_validate_aligned (xd->rx_vectors[j], VLIB_FRAME_SIZE - 1,
+ CLIB_CACHE_LINE_BYTES);
+ vec_reset_length (xd->rx_vectors[j]);
+ }
+
+ vec_validate_aligned (xd->d_trace_buffers, tm->n_vlib_mains,
+ CLIB_CACHE_LINE_BYTES);
+
+ rv = dpdk_port_setup (dm, xd);
+
+ if (rv)
+ return rv;
+
+ if (devconf->hqos_enabled)
+ {
+ rv = dpdk_port_setup_hqos (xd, &devconf->hqos);
+ if (rv)
+ return rv;
+ }
+
+ /* count the number of descriptors used for this device */
+ nb_desc += xd->nb_rx_desc + xd->nb_tx_desc * xd->tx_q_used;
+
+ error = ethernet_register_interface
+ (dm->vnet_main, dpdk_device_class.index, xd->device_index,
+ /* ethernet address */ addr,
+ &xd->vlib_hw_if_index, dpdk_flag_change);
+ if (error)
+ return error;
+
+ sw = vnet_get_hw_sw_interface (dm->vnet_main, xd->vlib_hw_if_index);
+ xd->vlib_sw_if_index = sw->sw_if_index;
+ hi = vnet_get_hw_interface (dm->vnet_main, xd->vlib_hw_if_index);
+
+ /*
+ * DAW-FIXME: The Cisco VIC firmware does not provide an api for a
+ * driver to dynamically change the mtu. If/when the
+ * VIC firmware gets fixed, then this should be removed.
+ */
+ if (xd->pmd == VNET_DPDK_PMD_ENIC)
+ {
+ /*
+ * Initialize mtu to what has been set by CIMC in the firmware cfg.
+ */
+ hi->max_packet_bytes = dev_info.max_rx_pktlen;
+ if (devconf->vlan_strip_offload != DPDK_DEVICE_VLAN_STRIP_OFF)
+ vlan_strip = 1; /* remove vlan tag from VIC port by default */
+ else
+ clib_warning ("VLAN strip disabled for interface\n");
+ }
+ else if (devconf->vlan_strip_offload == DPDK_DEVICE_VLAN_STRIP_ON)
+ vlan_strip = 1;
+
+ if (vlan_strip)
+ {
+ int vlan_off;
+ vlan_off = rte_eth_dev_get_vlan_offload (xd->device_index);
+ vlan_off |= ETH_VLAN_STRIP_OFFLOAD;
+ xd->port_conf.rxmode.hw_vlan_strip = vlan_off;
+ if (rte_eth_dev_set_vlan_offload (xd->device_index, vlan_off) == 0)
+ clib_warning ("VLAN strip enabled for interface\n");
+ else
+ clib_warning ("VLAN strip cannot be supported by interface\n");
+ }
+
+ hi->max_l3_packet_bytes[VLIB_RX] = hi->max_l3_packet_bytes[VLIB_TX] =
+ xd->port_conf.rxmode.max_rx_pkt_len - sizeof (ethernet_header_t);
+
+ rte_eth_dev_set_mtu (xd->device_index, hi->max_packet_bytes);
+ }
+
+ if (nb_desc > dm->conf->num_mbufs)
+ clib_warning ("%d mbufs allocated but total rx/tx ring size is %d\n",
+ dm->conf->num_mbufs, nb_desc);
+
+ return 0;
+}
+
+static void
+dpdk_bind_devices_to_uio (dpdk_config_main_t * conf)
+{
+ vlib_pci_main_t *pm = &pci_main;
+ clib_error_t *error;
+ vlib_pci_device_t *d;
+ u8 *pci_addr = 0;
+ int num_whitelisted = vec_len (conf->dev_confs);
+
+ /* *INDENT-OFF* */
+ pool_foreach (d, pm->pci_devs, ({
+ dpdk_device_config_t * devconf = 0;
+ vec_reset_length (pci_addr);
+ pci_addr = format (pci_addr, "%U%c", format_vlib_pci_addr, &d->bus_address, 0);
+
+ if (d->device_class != PCI_CLASS_NETWORK_ETHERNET)
+ continue;
+
+ if (num_whitelisted)
+ {
+ uword * p = hash_get (conf->device_config_index_by_pci_addr, d->bus_address.as_u32);
+
+ if (!p)
+ continue;
+
+ devconf = pool_elt_at_index (conf->dev_confs, p[0]);
+ }
+
+ /* virtio */
+ if (d->vendor_id == 0x1af4 && d->device_id == 0x1000)
+ ;
+ /* vmxnet3 */
+ else if (d->vendor_id == 0x15ad && d->device_id == 0x07b0)
+ ;
+ /* all Intel devices */
+ else if (d->vendor_id == 0x8086)
+ ;
+ /* Cisco VIC */
+ else if (d->vendor_id == 0x1137 && d->device_id == 0x0043)
+ ;
+ /* Chelsio T4/T5 */
+ else if (d->vendor_id == 0x1425 && (d->device_id & 0xe000) == 0x4000)
+ ;
+ else
+ {
+ clib_warning ("Unsupported Ethernet PCI device 0x%04x:0x%04x found "
+ "at PCI address %s\n", (u16) d->vendor_id, (u16) d->device_id,
+ pci_addr);
+ continue;
+ }
+
+ error = vlib_pci_bind_to_uio (d, (char *) conf->uio_driver_name);
+
+ if (error)
+ {
+ if (devconf == 0)
+ {
+ pool_get (conf->dev_confs, devconf);
+ hash_set (conf->device_config_index_by_pci_addr, d->bus_address.as_u32,
+ devconf - conf->dev_confs);
+ devconf->pci_addr.as_u32 = d->bus_address.as_u32;
+ }
+ devconf->is_blacklisted = 1;
+ clib_error_report (error);
+ }
+ }));
+ /* *INDENT-ON* */
+ vec_free (pci_addr);
+}
+
+static clib_error_t *
+dpdk_device_config (dpdk_config_main_t * conf, vlib_pci_addr_t pci_addr,
+ unformat_input_t * input, u8 is_default)
+{
+ clib_error_t *error = 0;
+ uword *p;
+ dpdk_device_config_t *devconf;
+ unformat_input_t sub_input;
+
+ if (is_default)
+ {
+ devconf = &conf->default_devconf;
+ }
+ else
+ {
+ p = hash_get (conf->device_config_index_by_pci_addr, pci_addr.as_u32);
+
+ if (!p)
+ {
+ pool_get (conf->dev_confs, devconf);
+ hash_set (conf->device_config_index_by_pci_addr, pci_addr.as_u32,
+ devconf - conf->dev_confs);
+ }
+ else
+ return clib_error_return (0,
+ "duplicate configuration for PCI address %U",
+ format_vlib_pci_addr, &pci_addr);
+ }
+
+ devconf->pci_addr.as_u32 = pci_addr.as_u32;
+ devconf->hqos_enabled = 0;
+ dpdk_device_config_hqos_default (&devconf->hqos);
+
+ if (!input)
+ return 0;
+
+ unformat_skip_white_space (input);
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "num-rx-queues %u", &devconf->num_rx_queues))
+ ;
+ else if (unformat (input, "num-tx-queues %u", &devconf->num_tx_queues))
+ ;
+ else if (unformat (input, "num-rx-desc %u", &devconf->num_rx_desc))
+ ;
+ else if (unformat (input, "num-tx-desc %u", &devconf->num_tx_desc))
+ ;
+ else if (unformat (input, "workers %U", unformat_bitmap_list,
+ &devconf->workers))
+ ;
+ else
+ if (unformat
+ (input, "rss %U", unformat_vlib_cli_sub_input, &sub_input))
+ {
+ error = unformat_rss_fn (&sub_input, &devconf->rss_fn);
+ if (error)
+ break;
+ }
+ else if (unformat (input, "vlan-strip-offload off"))
+ devconf->vlan_strip_offload = DPDK_DEVICE_VLAN_STRIP_OFF;
+ else if (unformat (input, "vlan-strip-offload on"))
+ devconf->vlan_strip_offload = DPDK_DEVICE_VLAN_STRIP_ON;
+ else
+ if (unformat
+ (input, "hqos %U", unformat_vlib_cli_sub_input, &sub_input))
+ {
+ devconf->hqos_enabled = 1;
+ error = unformat_hqos (&sub_input, &devconf->hqos);
+ if (error)
+ break;
+ }
+ else if (unformat (input, "hqos"))
+ {
+ devconf->hqos_enabled = 1;
+ }
+ else
+ {
+ error = clib_error_return (0, "unknown input `%U'",
+ format_unformat_error, input);
+ break;
+ }
+ }
+
+ if (error)
+ return error;
+
+ if (devconf->workers && devconf->num_rx_queues == 0)
+ devconf->num_rx_queues = clib_bitmap_count_set_bits (devconf->workers);
+ else if (devconf->workers &&
+ clib_bitmap_count_set_bits (devconf->workers) !=
+ devconf->num_rx_queues)
+ error =
+ clib_error_return (0,
+ "%U: number of worker threadds must be "
+ "equal to number of rx queues", format_vlib_pci_addr,
+ &pci_addr);
+
+ return error;
+}
+
+static clib_error_t *
+dpdk_config (vlib_main_t * vm, unformat_input_t * input)
+{
+ clib_error_t *error = 0;
+ dpdk_main_t *dm = &dpdk_main;
+ dpdk_config_main_t *conf = &dpdk_config_main;
+ vlib_thread_main_t *tm = vlib_get_thread_main ();
+ dpdk_device_config_t *devconf;
+ vlib_pci_addr_t pci_addr;
+ unformat_input_t sub_input;
+ u8 *s, *tmp = 0;
+ u8 *rte_cmd = 0, *ethname = 0;
+ u32 log_level;
+ int ret, i;
+ int num_whitelisted = 0;
+ u8 no_pci = 0;
+ u8 no_huge = 0;
+ u8 huge_dir = 0;
+ u8 file_prefix = 0;
+ u8 *socket_mem = 0;
+
+ conf->device_config_index_by_pci_addr = hash_create (0, sizeof (uword));
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ /* Prime the pump */
+ if (unformat (input, "no-hugetlb"))
+ {
+ vec_add1 (conf->eal_init_args, (u8 *) "no-huge");
+ no_huge = 1;
+ }
+
+ else if (unformat (input, "enable-tcp-udp-checksum"))
+ conf->enable_tcp_udp_checksum = 1;
+
+ else if (unformat (input, "decimal-interface-names"))
+ conf->interface_name_format_decimal = 1;
+
+ else if (unformat (input, "no-multi-seg"))
+ conf->no_multi_seg = 1;
+
+ else if (unformat (input, "dev default %U", unformat_vlib_cli_sub_input,
+ &sub_input))
+ {
+ error =
+ dpdk_device_config (conf, (vlib_pci_addr_t) (u32) ~ 1, &sub_input,
+ 1);
+
+ if (error)
+ return error;
+ }
+ else
+ if (unformat
+ (input, "dev %U %U", unformat_vlib_pci_addr, &pci_addr,
+ unformat_vlib_cli_sub_input, &sub_input))
+ {
+ error = dpdk_device_config (conf, pci_addr, &sub_input, 0);
+
+ if (error)
+ return error;
+
+ num_whitelisted++;
+ }
+ else if (unformat (input, "dev %U", unformat_vlib_pci_addr, &pci_addr))
+ {
+ error = dpdk_device_config (conf, pci_addr, 0, 0);
+
+ if (error)
+ return error;
+
+ num_whitelisted++;
+ }
+ else if (unformat (input, "num-mbufs %d", &conf->num_mbufs))
+ ;
+ else if (unformat (input, "kni %d", &conf->num_kni))
+ ;
+ else if (unformat (input, "uio-driver %s", &conf->uio_driver_name))
+ ;
+ else if (unformat (input, "socket-mem %s", &socket_mem))
+ ;
+ else if (unformat (input, "no-pci"))
+ {
+ no_pci = 1;
+ tmp = format (0, "--no-pci%c", 0);
+ vec_add1 (conf->eal_init_args, tmp);
+ }
+ else if (unformat (input, "poll-sleep %d", &dm->poll_sleep))
+ ;
+
+#define _(a) \
+ else if (unformat(input, #a)) \
+ { \
+ tmp = format (0, "--%s%c", #a, 0); \
+ vec_add1 (conf->eal_init_args, tmp); \
+ }
+ foreach_eal_double_hyphen_predicate_arg
+#undef _
+#define _(a) \
+ else if (unformat(input, #a " %s", &s)) \
+ { \
+ if (!strncmp(#a, "huge-dir", 8)) \
+ huge_dir = 1; \
+ else if (!strncmp(#a, "file-prefix", 11)) \
+ file_prefix = 1; \
+ tmp = format (0, "--%s%c", #a, 0); \
+ vec_add1 (conf->eal_init_args, tmp); \
+ vec_add1 (s, 0); \
+ vec_add1 (conf->eal_init_args, s); \
+ }
+ foreach_eal_double_hyphen_arg
+#undef _
+#define _(a,b) \
+ else if (unformat(input, #a " %s", &s)) \
+ { \
+ tmp = format (0, "-%s%c", #b, 0); \
+ vec_add1 (conf->eal_init_args, tmp); \
+ vec_add1 (s, 0); \
+ vec_add1 (conf->eal_init_args, s); \
+ }
+ foreach_eal_single_hyphen_arg
+#undef _
+#define _(a,b) \
+ else if (unformat(input, #a " %s", &s)) \
+ { \
+ tmp = format (0, "-%s%c", #b, 0); \
+ vec_add1 (conf->eal_init_args, tmp); \
+ vec_add1 (s, 0); \
+ vec_add1 (conf->eal_init_args, s); \
+ conf->a##_set_manually = 1; \
+ }
+ foreach_eal_single_hyphen_mandatory_arg
+#undef _
+ else if (unformat (input, "default"))
+ ;
+
+ else if (unformat_skip_white_space (input))
+ ;
+ else
+ {
+ error = clib_error_return (0, "unknown input `%U'",
+ format_unformat_error, input);
+ goto done;
+ }
+ }
+
+ if (!conf->uio_driver_name)
+ conf->uio_driver_name = format (0, "igb_uio%c", 0);
+
+ /*
+ * Use 1G huge pages if available.
+ */
+ if (!no_huge && !huge_dir)
+ {
+ u32 x, *mem_by_socket = 0;
+ uword c = 0;
+ u8 use_1g = 1;
+ u8 use_2m = 1;
+ u8 less_than_1g = 1;
+ int rv;
+
+ umount (DEFAULT_HUGE_DIR);
+
+ /* Process "socket-mem" parameter value */
+ if (vec_len (socket_mem))
+ {
+ unformat_input_t in;
+ unformat_init_vector (&in, socket_mem);
+ while (unformat_check_input (&in) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (&in, "%u,", &x))
+ ;
+ else if (unformat (&in, "%u", &x))
+ ;
+ else if (unformat (&in, ","))
+ x = 0;
+ else
+ break;
+
+ vec_add1 (mem_by_socket, x);
+
+ if (x > 1023)
+ less_than_1g = 0;
+ }
+ /* Note: unformat_free vec_frees(in.buffer), aka socket_mem... */
+ unformat_free (&in);
+ socket_mem = 0;
+ }
+ else
+ {
+ /* *INDENT-OFF* */
+ clib_bitmap_foreach (c, tm->cpu_socket_bitmap, (
+ {
+ vec_validate(mem_by_socket, c);
+ mem_by_socket[c] = 256; /* default per-socket mem */
+ }
+ ));
+ /* *INDENT-ON* */
+ }
+
+ /* check if available enough 1GB pages for each socket */
+ /* *INDENT-OFF* */
+ clib_bitmap_foreach (c, tm->cpu_socket_bitmap, (
+ {
+ int pages_avail, page_size, mem;
+
+ vec_validate(mem_by_socket, c);
+ mem = mem_by_socket[c];
+
+ page_size = 1024;
+ pages_avail = vlib_sysfs_get_free_hugepages(c, page_size * 1024);
+
+ if (pages_avail < 0 || page_size * pages_avail < mem)
+ use_1g = 0;
+
+ page_size = 2;
+ pages_avail = vlib_sysfs_get_free_hugepages(c, page_size * 1024);
+
+ if (pages_avail < 0 || page_size * pages_avail < mem)
+ use_2m = 0;
+ }));
+ /* *INDENT-ON* */
+
+ if (mem_by_socket == 0)
+ {
+ error = clib_error_return (0, "mem_by_socket NULL");
+ goto done;
+ }
+ _vec_len (mem_by_socket) = c + 1;
+
+ /* regenerate socket_mem string */
+ vec_foreach_index (x, mem_by_socket)
+ socket_mem = format (socket_mem, "%s%u",
+ socket_mem ? "," : "", mem_by_socket[x]);
+ socket_mem = format (socket_mem, "%c", 0);
+
+ vec_free (mem_by_socket);
+
+ rv = mkdir (VPP_RUN_DIR, 0755);
+ if (rv && errno != EEXIST)
+ {
+ error = clib_error_return (0, "mkdir '%s' failed errno %d",
+ VPP_RUN_DIR, errno);
+ goto done;
+ }
+
+ rv = mkdir (DEFAULT_HUGE_DIR, 0755);
+ if (rv && errno != EEXIST)
+ {
+ error = clib_error_return (0, "mkdir '%s' failed errno %d",
+ DEFAULT_HUGE_DIR, errno);
+ goto done;
+ }
+
+ if (use_1g && !(less_than_1g && use_2m))
+ {
+ rv =
+ mount ("none", DEFAULT_HUGE_DIR, "hugetlbfs", 0, "pagesize=1G");
+ }
+ else if (use_2m)
+ {
+ rv = mount ("none", DEFAULT_HUGE_DIR, "hugetlbfs", 0, NULL);
+ }
+ else
+ {
+ return clib_error_return (0, "not enough free huge pages");
+ }
+
+ if (rv)
+ {
+ error = clib_error_return (0, "mount failed %d", errno);
+ goto done;
+ }
+
+ tmp = format (0, "--huge-dir%c", 0);
+ vec_add1 (conf->eal_init_args, tmp);
+ tmp = format (0, "%s%c", DEFAULT_HUGE_DIR, 0);
+ vec_add1 (conf->eal_init_args, tmp);
+ if (!file_prefix)
+ {
+ tmp = format (0, "--file-prefix%c", 0);
+ vec_add1 (conf->eal_init_args, tmp);
+ tmp = format (0, "vpp%c", 0);
+ vec_add1 (conf->eal_init_args, tmp);
+ }
+ }
+
+ vec_free (rte_cmd);
+ vec_free (ethname);
+
+ if (error)
+ return error;
+
+ /* I'll bet that -c and -n must be the first and second args... */
+ if (!conf->coremask_set_manually)
+ {
+ vlib_thread_registration_t *tr;
+ uword *coremask = 0;
+ int i;
+
+ /* main thread core */
+ coremask = clib_bitmap_set (coremask, tm->main_lcore, 1);
+
+ for (i = 0; i < vec_len (tm->registrations); i++)
+ {
+ tr = tm->registrations[i];
+ coremask = clib_bitmap_or (coremask, tr->coremask);
+ }
+
+ vec_insert (conf->eal_init_args, 2, 1);
+ conf->eal_init_args[1] = (u8 *) "-c";
+ tmp = format (0, "%U%c", format_bitmap_hex, coremask, 0);
+ conf->eal_init_args[2] = tmp;
+ clib_bitmap_free (coremask);
+ }
+
+ if (!conf->nchannels_set_manually)
+ {
+ vec_insert (conf->eal_init_args, 2, 3);
+ conf->eal_init_args[3] = (u8 *) "-n";
+ tmp = format (0, "%d", conf->nchannels);
+ conf->eal_init_args[4] = tmp;
+ }
+
+ if (no_pci == 0 && geteuid () == 0)
+ dpdk_bind_devices_to_uio (conf);
+
+#define _(x) \
+ if (devconf->x == 0 && conf->default_devconf.x > 0) \
+ devconf->x = conf->default_devconf.x ;
+
+ /* *INDENT-OFF* */
+ pool_foreach (devconf, conf->dev_confs, ({
+
+ /* default per-device config items */
+ foreach_dpdk_device_config_item
+
+ /* add DPDK EAL whitelist/blacklist entry */
+ if (num_whitelisted > 0 && devconf->is_blacklisted == 0)
+ {
+ tmp = format (0, "-w%c", 0);
+ vec_add1 (conf->eal_init_args, tmp);
+ tmp = format (0, "%U%c", format_vlib_pci_addr, &devconf->pci_addr, 0);
+ vec_add1 (conf->eal_init_args, tmp);
+ }
+ else if (num_whitelisted == 0 && devconf->is_blacklisted != 0)
+ {
+ tmp = format (0, "-b%c", 0);
+ vec_add1 (conf->eal_init_args, tmp);
+ tmp = format (0, "%U%c", format_vlib_pci_addr, &devconf->pci_addr, 0);
+ vec_add1 (conf->eal_init_args, tmp);
+ }
+ }));
+ /* *INDENT-ON* */
+
+#undef _
+
+ /* set master-lcore */
+ tmp = format (0, "--master-lcore%c", 0);
+ vec_add1 (conf->eal_init_args, tmp);
+ tmp = format (0, "%u%c", tm->main_lcore, 0);
+ vec_add1 (conf->eal_init_args, tmp);
+
+ /* set socket-mem */
+ tmp = format (0, "--socket-mem%c", 0);
+ vec_add1 (conf->eal_init_args, tmp);
+ tmp = format (0, "%s%c", socket_mem, 0);
+ vec_add1 (conf->eal_init_args, tmp);
+
+ /* NULL terminate the "argv" vector, in case of stupidity */
+ vec_add1 (conf->eal_init_args, 0);
+ _vec_len (conf->eal_init_args) -= 1;
+
+ /* Set up DPDK eal and packet mbuf pool early. */
+
+ log_level = (CLIB_DEBUG > 0) ? RTE_LOG_DEBUG : RTE_LOG_NOTICE;
+
+ rte_set_log_level (log_level);
+
+ vm = vlib_get_main ();
+
+ /* make copy of args as rte_eal_init tends to mess up with arg array */
+ for (i = 1; i < vec_len (conf->eal_init_args); i++)
+ conf->eal_init_args_str = format (conf->eal_init_args_str, "%s ",
+ conf->eal_init_args[i]);
+
+ ret =
+ rte_eal_init (vec_len (conf->eal_init_args),
+ (char **) conf->eal_init_args);
+
+ /* lazy umount hugepages */
+ umount2 (DEFAULT_HUGE_DIR, MNT_DETACH);
+
+ if (ret < 0)
+ return clib_error_return (0, "rte_eal_init returned %d", ret);
+
+ /* Dump the physical memory layout prior to creating the mbuf_pool */
+ fprintf (stdout, "DPDK physical memory layout:\n");
+ rte_dump_physmem_layout (stdout);
+
+ /* main thread 1st */
+ error = vlib_buffer_pool_create (vm, conf->num_mbufs, rte_socket_id ());
+ if (error)
+ return error;
+
+ for (i = 0; i < RTE_MAX_LCORE; i++)
+ {
+ error = vlib_buffer_pool_create (vm, conf->num_mbufs,
+ rte_lcore_to_socket_id (i));
+ if (error)
+ return error;
+ }
+
+done:
+ return error;
+}
+
+VLIB_CONFIG_FUNCTION (dpdk_config, "dpdk");
+
+void
+dpdk_update_link_state (dpdk_device_t * xd, f64 now)
+{
+ vnet_main_t *vnm = vnet_get_main ();
+ struct rte_eth_link prev_link = xd->link;
+ u32 hw_flags = 0;
+ u8 hw_flags_chg = 0;
+
+ /* only update link state for PMD interfaces */
+ if ((xd->flags & DPDK_DEVICE_FLAG_PMD) == 0)
+ return;
+
+ xd->time_last_link_update = now ? now : xd->time_last_link_update;
+ memset (&xd->link, 0, sizeof (xd->link));
+ rte_eth_link_get_nowait (xd->device_index, &xd->link);
+
+ if (LINK_STATE_ELOGS)
+ {
+ vlib_main_t *vm = vlib_get_main ();
+ ELOG_TYPE_DECLARE (e) =
+ {
+ .format =
+ "update-link-state: sw_if_index %d, admin_up %d,"
+ "old link_state %d new link_state %d",.format_args = "i4i1i1i1",};
+
+ struct
+ {
+ u32 sw_if_index;
+ u8 admin_up;
+ u8 old_link_state;
+ u8 new_link_state;
+ } *ed;
+ ed = ELOG_DATA (&vm->elog_main, e);
+ ed->sw_if_index = xd->vlib_sw_if_index;
+ ed->admin_up = (xd->flags & DPDK_DEVICE_FLAG_ADMIN_UP) != 0;
+ ed->old_link_state = (u8)
+ vnet_hw_interface_is_link_up (vnm, xd->vlib_hw_if_index);
+ ed->new_link_state = (u8) xd->link.link_status;
+ }
+
+ if ((xd->flags & DPDK_DEVICE_FLAG_ADMIN_UP) &&
+ ((xd->link.link_status != 0) ^
+ vnet_hw_interface_is_link_up (vnm, xd->vlib_hw_if_index)))
+ {
+ hw_flags_chg = 1;
+ hw_flags |= (xd->link.link_status ? VNET_HW_INTERFACE_FLAG_LINK_UP : 0);
+ }
+
+ if (hw_flags_chg || (xd->link.link_duplex != prev_link.link_duplex))
+ {
+ hw_flags_chg = 1;
+ switch (xd->link.link_duplex)
+ {
+ case ETH_LINK_HALF_DUPLEX:
+ hw_flags |= VNET_HW_INTERFACE_FLAG_HALF_DUPLEX;
+ break;
+ case ETH_LINK_FULL_DUPLEX:
+ hw_flags |= VNET_HW_INTERFACE_FLAG_FULL_DUPLEX;
+ break;
+ default:
+ break;
+ }
+ }
+ if (hw_flags_chg || (xd->link.link_speed != prev_link.link_speed))
+ {
+ hw_flags_chg = 1;
+ switch (xd->link.link_speed)
+ {
+ case ETH_SPEED_NUM_10M:
+ hw_flags |= VNET_HW_INTERFACE_FLAG_SPEED_10M;
+ break;
+ case ETH_SPEED_NUM_100M:
+ hw_flags |= VNET_HW_INTERFACE_FLAG_SPEED_100M;
+ break;
+ case ETH_SPEED_NUM_1G:
+ hw_flags |= VNET_HW_INTERFACE_FLAG_SPEED_1G;
+ break;
+ case ETH_SPEED_NUM_10G:
+ hw_flags |= VNET_HW_INTERFACE_FLAG_SPEED_10G;
+ break;
+ case ETH_SPEED_NUM_40G:
+ hw_flags |= VNET_HW_INTERFACE_FLAG_SPEED_40G;
+ break;
+ case 0:
+ break;
+ default:
+ clib_warning ("unknown link speed %d", xd->link.link_speed);
+ break;
+ }
+ }
+ if (hw_flags_chg)
+ {
+ if (LINK_STATE_ELOGS)
+ {
+ vlib_main_t *vm = vlib_get_main ();
+
+ ELOG_TYPE_DECLARE (e) =
+ {
+ .format =
+ "update-link-state: sw_if_index %d, new flags %d",.format_args
+ = "i4i4",};
+
+ struct
+ {
+ u32 sw_if_index;
+ u32 flags;
+ } *ed;
+ ed = ELOG_DATA (&vm->elog_main, e);
+ ed->sw_if_index = xd->vlib_sw_if_index;
+ ed->flags = hw_flags;
+ }
+ vnet_hw_interface_set_flags (vnm, xd->vlib_hw_if_index, hw_flags);
+ }
+}
+
+static uword
+dpdk_process (vlib_main_t * vm, vlib_node_runtime_t * rt, vlib_frame_t * f)
+{
+ clib_error_t *error;
+ vnet_main_t *vnm = vnet_get_main ();
+ dpdk_main_t *dm = &dpdk_main;
+ ethernet_main_t *em = &ethernet_main;
+ dpdk_device_t *xd;
+ vlib_thread_main_t *tm = vlib_get_thread_main ();
+ int i;
+
+ error = dpdk_lib_init (dm);
+
+ /*
+ * Turn on the input node if we found some devices to drive
+ * and we're not running worker threads or i/o threads
+ */
+
+ if (error == 0 && vec_len (dm->devices) > 0)
+ {
+ if (tm->n_vlib_mains == 1)
+ vlib_node_set_state (vm, dpdk_input_node.index,
+ VLIB_NODE_STATE_POLLING);
+ else
+ for (i = 0; i < tm->n_vlib_mains; i++)
+ if (vec_len (dm->devices_by_cpu[i]) > 0)
+ vlib_node_set_state (vlib_mains[i], dpdk_input_node.index,
+ VLIB_NODE_STATE_POLLING);
+ }
+
+ if (error)
+ clib_error_report (error);
+
+ tm->worker_thread_release = 1;
+
+ f64 now = vlib_time_now (vm);
+ vec_foreach (xd, dm->devices)
+ {
+ dpdk_update_link_state (xd, now);
+ }
+
+ {
+ /*
+ * Extra set up for bond interfaces:
+ * 1. Setup MACs for bond interfaces and their slave links which was set
+ * in dpdk_port_setup() but needs to be done again here to take effect.
+ * 2. Set up info for bond interface related CLI support.
+ */
+ int nports = rte_eth_dev_count ();
+ if (nports > 0)
+ {
+ for (i = 0; i < nports; i++)
+ {
+ struct rte_eth_dev_info dev_info;
+ rte_eth_dev_info_get (i, &dev_info);
+ if (!dev_info.driver_name)
+#if RTE_VERSION < RTE_VERSION_NUM(16, 11, 0, 0)
+ dev_info.driver_name = dev_info.pci_dev->driver->name;
+#else
+ dev_info.driver_name = dev_info.pci_dev->driver->driver.name;
+#endif
+ ASSERT (dev_info.driver_name);
+ if (strncmp (dev_info.driver_name, "rte_bond_pmd", 12) == 0)
+ {
+ u8 addr[6];
+ u8 slink[16];
+ int nlink = rte_eth_bond_slaves_get (i, slink, 16);
+ if (nlink > 0)
+ {
+ vnet_hw_interface_t *bhi;
+ ethernet_interface_t *bei;
+ int rv;
+
+ /* Get MAC of 1st slave link */
+ rte_eth_macaddr_get (slink[0],
+ (struct ether_addr *) addr);
+ /* Set MAC of bounded interface to that of 1st slave link */
+ rv =
+ rte_eth_bond_mac_address_set (i,
+ (struct ether_addr *)
+ addr);
+ if (rv < 0)
+ clib_warning ("Failed to set MAC address");
+
+ /* Populate MAC of bonded interface in VPP hw tables */
+ bhi =
+ vnet_get_hw_interface (vnm,
+ dm->devices[i].vlib_hw_if_index);
+ bei =
+ pool_elt_at_index (em->interfaces, bhi->hw_instance);
+ clib_memcpy (bhi->hw_address, addr, 6);
+ clib_memcpy (bei->address, addr, 6);
+ /* Init l3 packet size allowed on bonded interface */
+ bhi->max_packet_bytes = ETHERNET_MAX_PACKET_BYTES;
+ bhi->max_l3_packet_bytes[VLIB_RX] =
+ bhi->max_l3_packet_bytes[VLIB_TX] =
+ ETHERNET_MAX_PACKET_BYTES - sizeof (ethernet_header_t);
+ while (nlink >= 1)
+ { /* for all slave links */
+ int slave = slink[--nlink];
+ dpdk_device_t *sdev = &dm->devices[slave];
+ vnet_hw_interface_t *shi;
+ vnet_sw_interface_t *ssi;
+ /* Add MAC to all slave links except the first one */
+ if (nlink)
+ rte_eth_dev_mac_addr_add (slave,
+ (struct ether_addr *)
+ addr, 0);
+ /* Set slaves bitmap for bonded interface */
+ bhi->bond_info =
+ clib_bitmap_set (bhi->bond_info,
+ sdev->vlib_hw_if_index, 1);
+ /* Set slave link flags on slave interface */
+ shi =
+ vnet_get_hw_interface (vnm, sdev->vlib_hw_if_index);
+ ssi =
+ vnet_get_sw_interface (vnm, sdev->vlib_sw_if_index);
+ shi->bond_info = VNET_HW_INTERFACE_BOND_INFO_SLAVE;
+ ssi->flags |= VNET_SW_INTERFACE_FLAG_BOND_SLAVE;
+
+ /* Set l3 packet size allowed as the lowest of slave */
+ if (bhi->max_l3_packet_bytes[VLIB_RX] >
+ shi->max_l3_packet_bytes[VLIB_RX])
+ bhi->max_l3_packet_bytes[VLIB_RX] =
+ bhi->max_l3_packet_bytes[VLIB_TX] =
+ shi->max_l3_packet_bytes[VLIB_RX];
+
+ /* Set max packet size allowed as the lowest of slave */
+ if (bhi->max_packet_bytes > shi->max_packet_bytes)
+ bhi->max_packet_bytes = shi->max_packet_bytes;
+ }
+ }
+ }
+ }
+ }
+ }
+
+ while (1)
+ {
+ /*
+ * check each time through the loop in case intervals are changed
+ */
+ f64 min_wait = dm->link_state_poll_interval < dm->stat_poll_interval ?
+ dm->link_state_poll_interval : dm->stat_poll_interval;
+
+ vlib_process_wait_for_event_or_clock (vm, min_wait);
+
+ if (dm->admin_up_down_in_progress)
+ /* skip the poll if an admin up down is in progress (on any interface) */
+ continue;
+
+ vec_foreach (xd, dm->devices)
+ {
+ f64 now = vlib_time_now (vm);
+ if ((now - xd->time_last_stats_update) >= dm->stat_poll_interval)
+ dpdk_update_counters (xd, now);
+ if ((now - xd->time_last_link_update) >= dm->link_state_poll_interval)
+ dpdk_update_link_state (xd, now);
+
+ }
+ }
+
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (dpdk_process_node,static) = {
+ .function = dpdk_process,
+ .type = VLIB_NODE_TYPE_PROCESS,
+ .name = "dpdk-process",
+ .process_log2_n_stack_bytes = 17,
+};
+/* *INDENT-ON* */
+
+int
+dpdk_set_stat_poll_interval (f64 interval)
+{
+ if (interval < DPDK_MIN_STATS_POLL_INTERVAL)
+ return (VNET_API_ERROR_INVALID_VALUE);
+
+ dpdk_main.stat_poll_interval = interval;
+
+ return 0;
+}
+
+int
+dpdk_set_link_state_poll_interval (f64 interval)
+{
+ if (interval < DPDK_MIN_LINK_POLL_INTERVAL)
+ return (VNET_API_ERROR_INVALID_VALUE);
+
+ dpdk_main.link_state_poll_interval = interval;
+
+ return 0;
+}
+
+clib_error_t *
+dpdk_init (vlib_main_t * vm)
+{
+ dpdk_main_t *dm = &dpdk_main;
+ vlib_node_t *ei;
+ clib_error_t *error = 0;
+ vlib_thread_main_t *tm = vlib_get_thread_main ();
+
+ /* verify that structs are cacheline aligned */
+ STATIC_ASSERT (offsetof (dpdk_device_t, cacheline0) == 0,
+ "Cache line marker must be 1st element in dpdk_device_t");
+ STATIC_ASSERT (offsetof (dpdk_device_t, cacheline1) ==
+ CLIB_CACHE_LINE_BYTES,
+ "Data in cache line 0 is bigger than cache line size");
+ STATIC_ASSERT (offsetof (dpdk_worker_t, cacheline0) == 0,
+ "Cache line marker must be 1st element in dpdk_worker_t");
+ STATIC_ASSERT (offsetof (frame_queue_trace_t, cacheline0) == 0,
+ "Cache line marker must be 1st element in frame_queue_trace_t");
+
+ dm->vlib_main = vm;
+ dm->vnet_main = vnet_get_main ();
+ dm->conf = &dpdk_config_main;
+
+ ei = vlib_get_node_by_name (vm, (u8 *) "ethernet-input");
+ if (ei == 0)
+ return clib_error_return (0, "ethernet-input node AWOL");
+
+ dm->ethernet_input_node_index = ei->index;
+
+ dm->conf->nchannels = 4;
+ dm->conf->num_mbufs = dm->conf->num_mbufs ? dm->conf->num_mbufs : NB_MBUF;
+ vec_add1 (dm->conf->eal_init_args, (u8 *) "vnet");
+
+ dm->dpdk_device_by_kni_port_id = hash_create (0, sizeof (uword));
+ dm->vu_sw_if_index_by_listener_fd = hash_create (0, sizeof (uword));
+ dm->vu_sw_if_index_by_sock_fd = hash_create (0, sizeof (uword));
+
+ /* $$$ use n_thread_stacks since it's known-good at this point */
+ vec_validate (dm->recycle, tm->n_thread_stacks - 1);
+
+ /* Default vlib_buffer_t flags, DISABLES tcp/udp checksumming... */
+ dm->buffer_flags_template =
+ (VLIB_BUFFER_TOTAL_LENGTH_VALID | VNET_BUFFER_RTE_MBUF_VALID
+ | IP_BUFFER_L4_CHECKSUM_COMPUTED | IP_BUFFER_L4_CHECKSUM_CORRECT);
+
+ dm->stat_poll_interval = DPDK_STATS_POLL_INTERVAL;
+ dm->link_state_poll_interval = DPDK_LINK_POLL_INTERVAL;
+
+ /* init CLI */
+ if ((error = vlib_call_init_function (vm, dpdk_cli_init)))
+ return error;
+
+ return error;
+}
+
+VLIB_INIT_FUNCTION (dpdk_init);
+
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/devices/dpdk/ipsec/cli.c b/src/vnet/devices/dpdk/ipsec/cli.c
new file mode 100644
index 00000000000..3b634e036da
--- /dev/null
+++ b/src/vnet/devices/dpdk/ipsec/cli.c
@@ -0,0 +1,141 @@
+/*
+ * Copyright (c) 2016 Intel and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/vnet.h>
+#include <vnet/devices/dpdk/ipsec/ipsec.h>
+
+static void
+dpdk_ipsec_show_mapping (vlib_main_t * vm, u16 detail_display)
+{
+ dpdk_crypto_main_t *dcm = &dpdk_crypto_main;
+ vlib_thread_main_t *tm = vlib_get_thread_main ();
+ u32 i, skip_master;
+
+ if (detail_display)
+ vlib_cli_output (vm, "worker\t%10s\t%15s\tdir\tdev\tqp\n",
+ "cipher", "auth");
+ else
+ vlib_cli_output (vm, "worker\tcrypto device id(type)\n");
+
+ skip_master = vlib_num_workers () > 0;
+
+ for (i = 0; i < tm->n_vlib_mains; i++)
+ {
+ uword key, data;
+ u32 cpu_index = vlib_mains[i]->cpu_index;
+ crypto_worker_main_t *cwm = &dcm->workers_main[cpu_index];
+ u8 *s = 0;
+
+ if (skip_master)
+ {
+ skip_master = 0;
+ continue;
+ }
+
+ if (!detail_display)
+ {
+ i32 last_cdev = -1;
+ crypto_qp_data_t *qpd;
+
+ s = format (s, "%u\t", cpu_index);
+
+ /* *INDENT-OFF* */
+ vec_foreach (qpd, cwm->qp_data)
+ {
+ u32 dev_id = qpd->dev_id;
+
+ if ((u16) last_cdev != dev_id)
+ {
+ struct rte_cryptodev_info cdev_info;
+
+ rte_cryptodev_info_get (dev_id, &cdev_info);
+
+ s = format(s, "%u(%s)\t", dev_id, cdev_info.feature_flags &
+ RTE_CRYPTODEV_FF_HW_ACCELERATED ? "HW" : "SW");
+ }
+ last_cdev = dev_id;
+ }
+ /* *INDENT-ON* */
+ vlib_cli_output (vm, "%s", s);
+ }
+ else
+ {
+ char cipher_str[15], auth_str[15];
+ struct rte_cryptodev_capabilities cap;
+ crypto_worker_qp_key_t *p_key = (crypto_worker_qp_key_t *) & key;
+ /* *INDENT-OFF* */
+ hash_foreach (key, data, cwm->algo_qp_map,
+ ({
+ cap.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
+ cap.sym.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER;
+ cap.sym.cipher.algo = p_key->cipher_algo;
+ check_algo_is_supported (&cap, cipher_str);
+ cap.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
+ cap.sym.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH;
+ cap.sym.auth.algo = p_key->auth_algo;
+ check_algo_is_supported (&cap, auth_str);
+ vlib_cli_output (vm, "%u\t%10s\t%15s\t%3s\t%u\t%u\n",
+ vlib_mains[i]->cpu_index, cipher_str, auth_str,
+ p_key->is_outbound ? "out" : "in",
+ cwm->qp_data[data].dev_id,
+ cwm->qp_data[data].qp_id);
+ }));
+ /* *INDENT-ON* */
+ }
+ }
+}
+
+static clib_error_t *
+lcore_cryptodev_map_fn (vlib_main_t * vm, unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ unformat_input_t _line_input, *line_input = &_line_input;
+ u16 detail = 0;
+
+ if (!unformat_user (input, unformat_line_input, line_input))
+ return 0;
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "verbose"))
+ detail = 1;
+ else
+ return clib_error_return (0, "parse error: '%U'",
+ format_unformat_error, line_input);
+ }
+
+ unformat_free (line_input);
+
+ dpdk_ipsec_show_mapping (vm, detail);
+
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (lcore_cryptodev_map, static) = {
+ .path = "show crypto device mapping",
+ .short_help =
+ "show cryptodev device mapping <verbose>",
+ .function = lcore_cryptodev_map_fn,
+};
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/devices/dpdk/ipsec/crypto_node.c b/src/vnet/devices/dpdk/ipsec/crypto_node.c
new file mode 100644
index 00000000000..7b32704ec05
--- /dev/null
+++ b/src/vnet/devices/dpdk/ipsec/crypto_node.c
@@ -0,0 +1,210 @@
+/*
+ *------------------------------------------------------------------
+ * crypto_node.c - DPDK Cryptodev input node
+ *
+ * Copyright (c) 2016 Intel and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+
+#include <vlib/vlib.h>
+#include <vnet/ip/ip.h>
+#include <vnet/ethernet/ethernet.h>
+#include <vnet/ipsec/ipsec.h>
+
+#include <vnet/devices/dpdk/ipsec/ipsec.h>
+
+#define foreach_dpdk_crypto_input_next \
+ _(DROP, "error-drop") \
+ _(ENCRYPT_POST, "dpdk-esp-encrypt-post") \
+ _(DECRYPT_POST, "dpdk-esp-decrypt-post")
+
+typedef enum
+{
+#define _(f,s) DPDK_CRYPTO_INPUT_NEXT_##f,
+ foreach_dpdk_crypto_input_next
+#undef _
+ DPDK_CRYPTO_INPUT_N_NEXT,
+} dpdk_crypto_input_next_t;
+
+#define foreach_dpdk_crypto_input_error \
+ _(DQ_COPS, "Crypto ops dequeued") \
+ _(COP_FAILED, "Crypto op failed")
+
+typedef enum
+{
+#define _(f,s) DPDK_CRYPTO_INPUT_ERROR_##f,
+ foreach_dpdk_crypto_input_error
+#undef _
+ DPDK_CRYPTO_INPUT_N_ERROR,
+} dpdk_crypto_input_error_t;
+
+static char *dpdk_crypto_input_error_strings[] = {
+#define _(n, s) s,
+ foreach_dpdk_crypto_input_error
+#undef _
+};
+
+vlib_node_registration_t dpdk_crypto_input_node;
+
+typedef struct
+{
+ u32 cdev;
+ u32 qp;
+ u32 status;
+ u32 sa_idx;
+ u32 next_index;
+} dpdk_crypto_input_trace_t;
+
+static u8 *
+format_dpdk_crypto_input_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ dpdk_crypto_input_trace_t *t = va_arg (*args, dpdk_crypto_input_trace_t *);
+
+ s = format (s, "dpdk_crypto: cryptodev-id %u queue-pair %u next-index %d",
+ t->cdev, t->qp, t->next_index);
+
+ s = format (s, " status %u sa-idx %u\n", t->status, t->sa_idx);
+
+ return s;
+}
+
+static_always_inline u32
+dpdk_crypto_dequeue (vlib_main_t * vm, vlib_node_runtime_t * node,
+ crypto_qp_data_t * qpd)
+{
+ u32 n_deq, *to_next = 0, next_index, n_cops, def_next_index;
+ struct rte_crypto_op **cops = qpd->cops;
+
+ if (qpd->inflights == 0)
+ return 0;
+
+ if (qpd->is_outbound)
+ def_next_index = DPDK_CRYPTO_INPUT_NEXT_ENCRYPT_POST;
+ else
+ def_next_index = DPDK_CRYPTO_INPUT_NEXT_DECRYPT_POST;
+
+ n_cops = rte_cryptodev_dequeue_burst (qpd->dev_id, qpd->qp_id,
+ cops, VLIB_FRAME_SIZE);
+ n_deq = n_cops;
+ next_index = def_next_index;
+
+ qpd->inflights -= n_cops;
+ ASSERT (qpd->inflights >= 0);
+
+ while (n_cops > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_cops > 0 && n_left_to_next > 0)
+ {
+ u32 bi0, next0;
+ vlib_buffer_t *b0 = 0;
+ struct rte_crypto_op *cop;
+ struct rte_crypto_sym_op *sym_cop;
+
+ cop = cops[0];
+ cops += 1;
+ n_cops -= 1;
+ n_left_to_next -= 1;
+
+ next0 = def_next_index;
+
+ if (PREDICT_FALSE (cop->status != RTE_CRYPTO_OP_STATUS_SUCCESS))
+ {
+ next0 = DPDK_CRYPTO_INPUT_NEXT_DROP;
+ vlib_node_increment_counter (vm, dpdk_crypto_input_node.index,
+ DPDK_CRYPTO_INPUT_ERROR_COP_FAILED,
+ 1);
+ }
+ cop->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+
+ sym_cop = (struct rte_crypto_sym_op *) (cop + 1);
+ b0 = vlib_buffer_from_rte_mbuf (sym_cop->m_src);
+ bi0 = vlib_get_buffer_index (vm, b0);
+
+ to_next[0] = bi0;
+ to_next += 1;
+
+ if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ vlib_trace_next_frame (vm, node, next0);
+ dpdk_crypto_input_trace_t *tr =
+ vlib_add_trace (vm, node, b0, sizeof (*tr));
+ tr->cdev = qpd->dev_id;
+ tr->qp = qpd->qp_id;
+ tr->status = cop->status;
+ tr->next_index = next0;
+ tr->sa_idx = vnet_buffer (b0)->ipsec.sad_index;
+ }
+
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
+ n_left_to_next, bi0, next0);
+ }
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ crypto_free_cop (qpd, qpd->cops, n_deq);
+
+ vlib_node_increment_counter (vm, dpdk_crypto_input_node.index,
+ DPDK_CRYPTO_INPUT_ERROR_DQ_COPS, n_deq);
+ return n_deq;
+}
+
+static uword
+dpdk_crypto_input_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ u32 cpu_index = os_get_cpu_number ();
+ dpdk_crypto_main_t *dcm = &dpdk_crypto_main;
+ crypto_worker_main_t *cwm = &dcm->workers_main[cpu_index];
+ crypto_qp_data_t *qpd;
+ u32 n_deq = 0;
+
+ /* *INDENT-OFF* */
+ vec_foreach (qpd, cwm->qp_data)
+ n_deq += dpdk_crypto_dequeue(vm, node, qpd);
+ /* *INDENT-ON* */
+
+ return n_deq;
+}
+
+VLIB_REGISTER_NODE (dpdk_crypto_input_node) =
+{
+ .function = dpdk_crypto_input_fn,.name = "dpdk-crypto-input",.format_trace =
+ format_dpdk_crypto_input_trace,.type = VLIB_NODE_TYPE_INPUT,.state =
+ VLIB_NODE_STATE_DISABLED,.n_errors =
+ DPDK_CRYPTO_INPUT_N_ERROR,.error_strings =
+ dpdk_crypto_input_error_strings,.n_next_nodes =
+ DPDK_CRYPTO_INPUT_N_NEXT,.next_nodes =
+ {
+#define _(s,n) [DPDK_CRYPTO_INPUT_NEXT_##s] = n,
+ foreach_dpdk_crypto_input_next
+#undef _
+ }
+,};
+
+#if DPDK_CRYPTO==1
+VLIB_NODE_FUNCTION_MULTIARCH (dpdk_crypto_input_node, dpdk_crypto_input_fn)
+#endif
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/devices/dpdk/ipsec/dir.dox b/src/vnet/devices/dpdk/ipsec/dir.dox
new file mode 100644
index 00000000000..ffebfc4d62e
--- /dev/null
+++ b/src/vnet/devices/dpdk/ipsec/dir.dox
@@ -0,0 +1,18 @@
+/*
+ * Copyright (c) 2016 Intel and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ @dir vnet/vnet/devices/dpdk/ipsec
+ @brief IPSec ESP encrypt/decrypt using DPDK Cryptodev API
+*/
diff --git a/src/vnet/devices/dpdk/ipsec/dpdk_crypto_ipsec_doc.md b/src/vnet/devices/dpdk/ipsec/dpdk_crypto_ipsec_doc.md
new file mode 100644
index 00000000000..8089696f4a0
--- /dev/null
+++ b/src/vnet/devices/dpdk/ipsec/dpdk_crypto_ipsec_doc.md
@@ -0,0 +1,73 @@
+# VPP IPSec implementation using DPDK Cryptodev API {#dpdk_crypto_ipsec_doc}
+
+This document is meant to contain all related information about implementation and usability.
+
+
+## VPP IPsec with DPDK Cryptodev
+
+DPDK Cryptodev is an asynchronous crypto API that supports both Hardware and Software implementations (for more details refer to [DPDK Cryptography Device Library documentation](http://dpdk.org/doc/guides/prog_guide/cryptodev_lib.html)).
+
+When DPDK Cryptodev support is enabled, the node graph is modified by adding and replacing some of the nodes.
+
+The following nodes are replaced:
+* esp-encrypt -> dpdk-esp-encrypt
+* esp-decrypt -> dpdk-esp-decrypt
+
+The following nodes are added:
+* dpdk-crypto-input : polling input node, basically dequeuing from crypto devices.
+* dpdk-esp-encrypt-post : internal node.
+* dpdk-esp-decrypt-post : internal node.
+
+
+### How to enable VPP IPSec with DPDK Cryptodev support
+
+To enable DPDK Cryptodev support (disabled by default), we need the following env option:
+
+ vpp_uses_dpdk_cryptodev=yes
+
+A couple of ways to achive this:
+* uncomment/add it in the platforms config (ie. build-data/platforms/vpp.mk)
+* set the option when building vpp (ie. make vpp_uses_dpdk_cryptodev=yes build-release)
+
+
+### Crypto Resources allocation
+
+VPP allocates crypto resources based on a best effort approach:
+* first allocate Hardware crypto resources, then Software.
+* if there are not enough crypto resources for all workers, all packets will be dropped if they reach ESP encrypt/decrypt nodes, displaying the warning:
+
+ 0: dpdk_ipsec_init: not enough cryptodevs for ipsec
+
+
+### Configuration example
+
+No especial IPsec configuration is required.
+
+Once DPDK Cryptodev is enabled, the user just needs to provide cryptodevs in the startup.conf.
+
+Example startup.conf:
+
+```
+dpdk {
+ socket-mem 1024,1024
+ num-mbufs 131072
+ dev 0000:81:00.0
+ dev 0000:81:00.1
+ dev 0000:85:01.0
+ dev 0000:85:01.1
+ vdev cryptodev_aesni_mb_pmd,socket_id=1
+ vdev cryptodev_aesni_mb_pmd,socket_id=1
+}
+```
+
+In the above configuration:
+* 0000:85:01.0 and 0000:85:01.1 are crypto BDFs and they require the same driver binding as DPDK Ethernet devices but they do not support any extra configuration options.
+* Two AESNI-MB Software Cryptodev PMDs are created in NUMA node 1.
+
+For further details refer to [DPDK Crypto Device Driver documentation](http://dpdk.org/doc/guides/cryptodevs/index.html)
+
+### Operational data
+
+The following CLI command displays the Cryptodev/Worker mapping:
+
+ show crypto device mapping [verbose]
diff --git a/src/vnet/devices/dpdk/ipsec/esp.h b/src/vnet/devices/dpdk/ipsec/esp.h
new file mode 100644
index 00000000000..7ef90c49816
--- /dev/null
+++ b/src/vnet/devices/dpdk/ipsec/esp.h
@@ -0,0 +1,295 @@
+/*
+ * Copyright (c) 2016 Intel and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef __DPDK_ESP_H__
+#define __DPDK_ESP_H__
+
+#include <vnet/devices/dpdk/ipsec/ipsec.h>
+#include <vnet/ipsec/ipsec.h>
+#include <vnet/ipsec/esp.h>
+
+typedef struct
+{
+ enum rte_crypto_cipher_algorithm algo;
+ u8 key_len;
+ u8 iv_len;
+} dpdk_esp_crypto_alg_t;
+
+typedef struct
+{
+ enum rte_crypto_auth_algorithm algo;
+ u8 trunc_size;
+} dpdk_esp_integ_alg_t;
+
+typedef struct
+{
+ dpdk_esp_crypto_alg_t *esp_crypto_algs;
+ dpdk_esp_integ_alg_t *esp_integ_algs;
+} dpdk_esp_main_t;
+
+dpdk_esp_main_t dpdk_esp_main;
+
+static_always_inline void
+dpdk_esp_init ()
+{
+ dpdk_esp_main_t *em = &dpdk_esp_main;
+ dpdk_esp_integ_alg_t *i;
+ dpdk_esp_crypto_alg_t *c;
+
+ vec_validate (em->esp_crypto_algs, IPSEC_CRYPTO_N_ALG - 1);
+
+ c = &em->esp_crypto_algs[IPSEC_CRYPTO_ALG_AES_CBC_128];
+ c->algo = RTE_CRYPTO_CIPHER_AES_CBC;
+ c->key_len = 16;
+ c->iv_len = 16;
+
+ c = &em->esp_crypto_algs[IPSEC_CRYPTO_ALG_AES_CBC_192];
+ c->algo = RTE_CRYPTO_CIPHER_AES_CBC;
+ c->key_len = 24;
+ c->iv_len = 16;
+
+ c = &em->esp_crypto_algs[IPSEC_CRYPTO_ALG_AES_CBC_256];
+ c->algo = RTE_CRYPTO_CIPHER_AES_CBC;
+ c->key_len = 32;
+ c->iv_len = 16;
+
+ c = &em->esp_crypto_algs[IPSEC_CRYPTO_ALG_AES_GCM_128];
+ c->algo = RTE_CRYPTO_CIPHER_AES_GCM;
+ c->key_len = 16;
+ c->iv_len = 8;
+
+ vec_validate (em->esp_integ_algs, IPSEC_INTEG_N_ALG - 1);
+
+ i = &em->esp_integ_algs[IPSEC_INTEG_ALG_SHA1_96];
+ i->algo = RTE_CRYPTO_AUTH_SHA1_HMAC;
+ i->trunc_size = 12;
+
+ i = &em->esp_integ_algs[IPSEC_INTEG_ALG_SHA_256_96];
+ i->algo = RTE_CRYPTO_AUTH_SHA256_HMAC;
+ i->trunc_size = 12;
+
+ i = &em->esp_integ_algs[IPSEC_INTEG_ALG_SHA_256_128];
+ i->algo = RTE_CRYPTO_AUTH_SHA256_HMAC;
+ i->trunc_size = 16;
+
+ i = &em->esp_integ_algs[IPSEC_INTEG_ALG_SHA_384_192];
+ i->algo = RTE_CRYPTO_AUTH_SHA384_HMAC;
+ i->trunc_size = 24;
+
+ i = &em->esp_integ_algs[IPSEC_INTEG_ALG_SHA_512_256];
+ i->algo = RTE_CRYPTO_AUTH_SHA512_HMAC;
+ i->trunc_size = 32;
+
+ i = &em->esp_integ_algs[IPSEC_INTEG_ALG_AES_GCM_128];
+ i->algo = RTE_CRYPTO_AUTH_AES_GCM;
+ i->trunc_size = 16;
+}
+
+static_always_inline int
+add_del_sa_sess (u32 sa_index, u8 is_add)
+{
+ dpdk_crypto_main_t *dcm = &dpdk_crypto_main;
+ crypto_worker_main_t *cwm;
+ u8 skip_master = vlib_num_workers () > 0;
+
+ /* *INDENT-OFF* */
+ vec_foreach (cwm, dcm->workers_main)
+ {
+ crypto_sa_session_t *sa_sess;
+ u8 is_outbound;
+
+ if (skip_master)
+ {
+ skip_master = 0;
+ continue;
+ }
+
+ for (is_outbound = 0; is_outbound < 2; is_outbound++)
+ {
+ if (is_add)
+ {
+ pool_get (cwm->sa_sess_d[is_outbound], sa_sess);
+ }
+ else
+ {
+ u8 dev_id;
+
+ sa_sess = pool_elt_at_index (cwm->sa_sess_d[is_outbound], sa_index);
+ dev_id = cwm->qp_data[sa_sess->qp_index].dev_id;
+
+ if (!sa_sess->sess)
+ continue;
+
+ if (rte_cryptodev_sym_session_free(dev_id, sa_sess->sess))
+ {
+ clib_warning("failed to free session");
+ return -1;
+ }
+ memset(sa_sess, 0, sizeof(sa_sess[0]));
+ }
+ }
+ }
+ /* *INDENT-OFF* */
+
+ return 0;
+}
+
+static_always_inline int
+translate_crypto_algo(ipsec_crypto_alg_t crypto_algo,
+ struct rte_crypto_sym_xform *cipher_xform)
+{
+ switch (crypto_algo)
+ {
+ case IPSEC_CRYPTO_ALG_NONE:
+ cipher_xform->cipher.algo = RTE_CRYPTO_CIPHER_NULL;
+ break;
+ case IPSEC_CRYPTO_ALG_AES_CBC_128:
+ case IPSEC_CRYPTO_ALG_AES_CBC_192:
+ case IPSEC_CRYPTO_ALG_AES_CBC_256:
+ cipher_xform->cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
+ break;
+ case IPSEC_CRYPTO_ALG_AES_GCM_128:
+ cipher_xform->cipher.algo = RTE_CRYPTO_CIPHER_AES_GCM;
+ break;
+ default:
+ return -1;
+ }
+
+ cipher_xform->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
+
+ return 0;
+}
+
+static_always_inline int
+translate_integ_algo(ipsec_integ_alg_t integ_alg,
+ struct rte_crypto_sym_xform *auth_xform, int use_esn)
+{
+ switch (integ_alg) {
+ case IPSEC_INTEG_ALG_NONE:
+ auth_xform->auth.algo = RTE_CRYPTO_AUTH_NULL;
+ auth_xform->auth.digest_length = 0;
+ break;
+ case IPSEC_INTEG_ALG_SHA1_96:
+ auth_xform->auth.algo = RTE_CRYPTO_AUTH_SHA1_HMAC;
+ auth_xform->auth.digest_length = 12;
+ break;
+ case IPSEC_INTEG_ALG_SHA_256_96:
+ auth_xform->auth.algo = RTE_CRYPTO_AUTH_SHA256_HMAC;
+ auth_xform->auth.digest_length = 12;
+ break;
+ case IPSEC_INTEG_ALG_SHA_256_128:
+ auth_xform->auth.algo = RTE_CRYPTO_AUTH_SHA256_HMAC;
+ auth_xform->auth.digest_length = 16;
+ break;
+ case IPSEC_INTEG_ALG_SHA_384_192:
+ auth_xform->auth.algo = RTE_CRYPTO_AUTH_SHA384_HMAC;
+ auth_xform->auth.digest_length = 24;
+ break;
+ case IPSEC_INTEG_ALG_SHA_512_256:
+ auth_xform->auth.algo = RTE_CRYPTO_AUTH_SHA512_HMAC;
+ auth_xform->auth.digest_length = 32;
+ break;
+ case IPSEC_INTEG_ALG_AES_GCM_128:
+ auth_xform->auth.algo = RTE_CRYPTO_AUTH_AES_GCM;
+ auth_xform->auth.digest_length = 16;
+ auth_xform->auth.add_auth_data_length = use_esn? 12 : 8;
+ break;
+ default:
+ return -1;
+ }
+
+ auth_xform->type = RTE_CRYPTO_SYM_XFORM_AUTH;
+
+ return 0;
+}
+
+static_always_inline int
+create_sym_sess(ipsec_sa_t *sa, crypto_sa_session_t *sa_sess, u8 is_outbound)
+{
+ u32 cpu_index = os_get_cpu_number();
+ dpdk_crypto_main_t * dcm = &dpdk_crypto_main;
+ crypto_worker_main_t *cwm = &dcm->workers_main[cpu_index];
+ struct rte_crypto_sym_xform cipher_xform = {0};
+ struct rte_crypto_sym_xform auth_xform = {0};
+ struct rte_crypto_sym_xform *xfs;
+ uword key = 0, *data;
+ crypto_worker_qp_key_t *p_key = (crypto_worker_qp_key_t *)&key;
+
+ if (sa->crypto_alg == IPSEC_CRYPTO_ALG_AES_GCM_128)
+ {
+ sa->crypto_key_len -= 4;
+ clib_memcpy(&sa->salt, &sa->crypto_key[sa->crypto_key_len], 4);
+ }
+ else
+ {
+ sa->salt = (u32) rand();
+ }
+
+ cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
+ cipher_xform.cipher.key.data = sa->crypto_key;
+ cipher_xform.cipher.key.length = sa->crypto_key_len;
+
+ auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
+ auth_xform.auth.key.data = sa->integ_key;
+ auth_xform.auth.key.length = sa->integ_key_len;
+
+ if (translate_crypto_algo(sa->crypto_alg, &cipher_xform) < 0)
+ return -1;
+ p_key->cipher_algo = cipher_xform.cipher.algo;
+
+ if (translate_integ_algo(sa->integ_alg, &auth_xform, sa->use_esn) < 0)
+ return -1;
+ p_key->auth_algo = auth_xform.auth.algo;
+
+ if (is_outbound)
+ {
+ cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
+ auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
+ cipher_xform.next = &auth_xform;
+ xfs = &cipher_xform;
+ }
+ else
+ {
+ cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_DECRYPT;
+ auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
+ auth_xform.next = &cipher_xform;
+ xfs = &auth_xform;
+ }
+
+ p_key->is_outbound = is_outbound;
+
+ data = hash_get(cwm->algo_qp_map, key);
+ if (!data)
+ return -1;
+
+ sa_sess->sess =
+ rte_cryptodev_sym_session_create(cwm->qp_data[*data].dev_id, xfs);
+
+ if (!sa_sess->sess)
+ return -1;
+
+ sa_sess->qp_index = (u8)*data;
+
+ return 0;
+}
+
+#endif /* __DPDK_ESP_H__ */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/devices/dpdk/ipsec/esp_decrypt.c b/src/vnet/devices/dpdk/ipsec/esp_decrypt.c
new file mode 100644
index 00000000000..89ab9f9bc43
--- /dev/null
+++ b/src/vnet/devices/dpdk/ipsec/esp_decrypt.c
@@ -0,0 +1,583 @@
+/*
+ * esp_decrypt.c : IPSec ESP Decrypt node using DPDK Cryptodev
+ *
+ * Copyright (c) 2016 Intel and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/vnet.h>
+#include <vnet/api_errno.h>
+#include <vnet/ip/ip.h>
+
+#include <vnet/ipsec/ipsec.h>
+#include <vnet/devices/dpdk/ipsec/ipsec.h>
+#include <vnet/devices/dpdk/ipsec/esp.h>
+
+#define foreach_esp_decrypt_next \
+_(DROP, "error-drop") \
+_(IP4_INPUT, "ip4-input") \
+_(IP6_INPUT, "ip6-input")
+
+#define _(v, s) ESP_DECRYPT_NEXT_##v,
+typedef enum {
+ foreach_esp_decrypt_next
+#undef _
+ ESP_DECRYPT_N_NEXT,
+} esp_decrypt_next_t;
+
+#define foreach_esp_decrypt_error \
+ _(RX_PKTS, "ESP pkts received") \
+ _(DECRYPTION_FAILED, "ESP decryption failed") \
+ _(REPLAY, "SA replayed packet") \
+ _(NOT_IP, "Not IP packet (dropped)") \
+ _(ENQ_FAIL, "Enqueue failed (buffer full)") \
+ _(NO_CRYPTODEV, "Cryptodev not configured") \
+ _(BAD_LEN, "Invalid ciphertext length") \
+ _(UNSUPPORTED, "Cipher/Auth not supported")
+
+
+typedef enum {
+#define _(sym,str) ESP_DECRYPT_ERROR_##sym,
+ foreach_esp_decrypt_error
+#undef _
+ ESP_DECRYPT_N_ERROR,
+} esp_decrypt_error_t;
+
+static char * esp_decrypt_error_strings[] = {
+#define _(sym,string) string,
+ foreach_esp_decrypt_error
+#undef _
+};
+
+vlib_node_registration_t dpdk_esp_decrypt_node;
+
+typedef struct {
+ ipsec_crypto_alg_t crypto_alg;
+ ipsec_integ_alg_t integ_alg;
+} esp_decrypt_trace_t;
+
+/* packet trace format function */
+static u8 * format_esp_decrypt_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ esp_decrypt_trace_t * t = va_arg (*args, esp_decrypt_trace_t *);
+
+ s = format (s, "esp: crypto %U integrity %U",
+ format_ipsec_crypto_alg, t->crypto_alg,
+ format_ipsec_integ_alg, t->integ_alg);
+ return s;
+}
+
+static uword
+dpdk_esp_decrypt_node_fn (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame)
+{
+ u32 n_left_from, *from, *to_next, next_index;
+ ipsec_main_t *im = &ipsec_main;
+ u32 cpu_index = os_get_cpu_number();
+ dpdk_crypto_main_t * dcm = &dpdk_crypto_main;
+ dpdk_esp_main_t * em = &dpdk_esp_main;
+ u32 i;
+
+ from = vlib_frame_vector_args (from_frame);
+ n_left_from = from_frame->n_vectors;
+
+ if (PREDICT_FALSE(!dcm->workers_main))
+ {
+ vlib_node_increment_counter (vm, dpdk_esp_decrypt_node.index,
+ ESP_DECRYPT_ERROR_NO_CRYPTODEV, n_left_from);
+ vlib_buffer_free(vm, from, n_left_from);
+ return n_left_from;
+ }
+
+ crypto_worker_main_t *cwm = vec_elt_at_index(dcm->workers_main, cpu_index);
+ u32 n_qps = vec_len(cwm->qp_data);
+ struct rte_crypto_op ** cops_to_enq[n_qps];
+ u32 n_cop_qp[n_qps], * bi_to_enq[n_qps];
+
+ for (i = 0; i < n_qps; i++)
+ {
+ bi_to_enq[i] = cwm->qp_data[i].bi;
+ cops_to_enq[i] = cwm->qp_data[i].cops;
+ }
+
+ memset(n_cop_qp, 0, n_qps * sizeof(u32));
+
+ crypto_alloc_cops();
+
+ next_index = ESP_DECRYPT_NEXT_DROP;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 bi0, sa_index0 = ~0, seq, icv_size, iv_size;
+ vlib_buffer_t * b0;
+ esp_header_t * esp0;
+ ipsec_sa_t * sa0;
+ struct rte_mbuf * mb0 = 0;
+ const int BLOCK_SIZE = 16;
+ crypto_sa_session_t * sa_sess;
+ void * sess;
+ u16 qp_index;
+ struct rte_crypto_op * cop = 0;
+
+ bi0 = from[0];
+ from += 1;
+ n_left_from -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ esp0 = vlib_buffer_get_current (b0);
+
+ sa_index0 = vnet_buffer(b0)->ipsec.sad_index;
+ sa0 = pool_elt_at_index (im->sad, sa_index0);
+
+ seq = clib_host_to_net_u32(esp0->seq);
+
+ /* anti-replay check */
+ if (sa0->use_anti_replay)
+ {
+ int rv = 0;
+
+ if (PREDICT_TRUE(sa0->use_esn))
+ rv = esp_replay_check_esn(sa0, seq);
+ else
+ rv = esp_replay_check(sa0, seq);
+
+ if (PREDICT_FALSE(rv))
+ {
+ clib_warning ("anti-replay SPI %u seq %u", sa0->spi, seq);
+ vlib_node_increment_counter (vm, dpdk_esp_decrypt_node.index,
+ ESP_DECRYPT_ERROR_REPLAY, 1);
+ to_next[0] = bi0;
+ to_next += 1;
+ n_left_to_next -= 1;
+ goto trace;
+ }
+ }
+
+ if (PREDICT_FALSE(sa0->integ_alg == IPSEC_INTEG_ALG_NONE) ||
+ PREDICT_FALSE(sa0->crypto_alg == IPSEC_CRYPTO_ALG_NONE))
+ {
+ clib_warning ("SPI %u : only cipher + auth supported", sa0->spi);
+ vlib_node_increment_counter (vm, dpdk_esp_decrypt_node.index,
+ ESP_DECRYPT_ERROR_UNSUPPORTED, 1);
+ to_next[0] = bi0;
+ to_next += 1;
+ n_left_to_next -= 1;
+ goto trace;
+ }
+
+ sa_sess = pool_elt_at_index(cwm->sa_sess_d[0], sa_index0);
+
+ if (PREDICT_FALSE(!sa_sess->sess))
+ {
+ int ret = create_sym_sess(sa0, sa_sess, 0);
+ ASSERT(ret == 0);
+ }
+
+ sess = sa_sess->sess;
+ qp_index = sa_sess->qp_index;
+
+ ASSERT (vec_len (vec_elt (cwm->qp_data, qp_index).free_cops) > 0);
+ cop = vec_pop (vec_elt (cwm->qp_data, qp_index).free_cops);
+ ASSERT (cop->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED);
+
+ cops_to_enq[qp_index][0] = cop;
+ cops_to_enq[qp_index] += 1;
+ n_cop_qp[qp_index] += 1;
+ bi_to_enq[qp_index][0] = bi0;
+ bi_to_enq[qp_index] += 1;
+
+ rte_crypto_op_attach_sym_session(cop, sess);
+
+ icv_size = em->esp_integ_algs[sa0->integ_alg].trunc_size;
+ iv_size = em->esp_crypto_algs[sa0->crypto_alg].iv_len;
+
+ /* Convert vlib buffer to mbuf */
+ mb0 = rte_mbuf_from_vlib_buffer(b0);
+ mb0->data_len = b0->current_length;
+ mb0->pkt_len = b0->current_length;
+ mb0->data_off = RTE_PKTMBUF_HEADROOM + b0->current_data;
+
+ /* Outer IP header has already been stripped */
+ u16 payload_len = rte_pktmbuf_pkt_len(mb0) - sizeof (esp_header_t) -
+ iv_size - icv_size;
+
+ if ((payload_len & (BLOCK_SIZE - 1)) || (payload_len <= 0))
+ {
+ clib_warning ("payload %u not multiple of %d\n",
+ payload_len, BLOCK_SIZE);
+ vlib_node_increment_counter (vm, dpdk_esp_decrypt_node.index,
+ ESP_DECRYPT_ERROR_BAD_LEN, 1);
+ vec_add (vec_elt (cwm->qp_data, qp_index).free_cops, &cop, 1);
+ bi_to_enq[qp_index] -= 1;
+ cops_to_enq[qp_index] -= 1;
+ n_cop_qp[qp_index] -= 1;
+ to_next[0] = bi0;
+ to_next += 1;
+ n_left_to_next -= 1;
+ goto trace;
+ }
+
+ struct rte_crypto_sym_op *sym_cop = (struct rte_crypto_sym_op *)(cop + 1);
+
+ sym_cop->m_src = mb0;
+ sym_cop->cipher.data.offset = sizeof (esp_header_t) + iv_size;
+ sym_cop->cipher.data.length = payload_len;
+
+ u8 *iv = rte_pktmbuf_mtod_offset(mb0, void*, sizeof (esp_header_t));
+ dpdk_cop_priv_t * priv = (dpdk_cop_priv_t *)(sym_cop + 1);
+
+ if (sa0->crypto_alg == IPSEC_CRYPTO_ALG_AES_GCM_128)
+ {
+ dpdk_gcm_cnt_blk *icb = &priv->cb;
+ icb->salt = sa0->salt;
+ clib_memcpy(icb->iv, iv, 8);
+ icb->cnt = clib_host_to_net_u32(1);
+ sym_cop->cipher.iv.data = (u8 *)icb;
+ sym_cop->cipher.iv.phys_addr = cop->phys_addr +
+ (uintptr_t)icb - (uintptr_t)cop;
+ sym_cop->cipher.iv.length = 16;
+
+ u8 *aad = priv->aad;
+ clib_memcpy(aad, iv - sizeof(esp_header_t), 8);
+ sym_cop->auth.aad.data = aad;
+ sym_cop->auth.aad.phys_addr = cop->phys_addr +
+ (uintptr_t)aad - (uintptr_t)cop;
+ if (sa0->use_esn)
+ {
+ *((u32*)&aad[8]) = sa0->seq_hi;
+ sym_cop->auth.aad.length = 12;
+ }
+ else
+ {
+ sym_cop->auth.aad.length = 8;
+ }
+
+ sym_cop->auth.digest.data = rte_pktmbuf_mtod_offset(mb0, void*,
+ rte_pktmbuf_pkt_len(mb0) - icv_size);
+ sym_cop->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(mb0,
+ rte_pktmbuf_pkt_len(mb0) - icv_size);
+ sym_cop->auth.digest.length = icv_size;
+
+ }
+ else
+ {
+ sym_cop->cipher.iv.data = rte_pktmbuf_mtod_offset(mb0, void*,
+ sizeof (esp_header_t));
+ sym_cop->cipher.iv.phys_addr = rte_pktmbuf_mtophys_offset(mb0,
+ sizeof (esp_header_t));
+ sym_cop->cipher.iv.length = iv_size;
+
+ if (sa0->use_esn)
+ {
+ dpdk_cop_priv_t* priv = (dpdk_cop_priv_t*) (sym_cop + 1);
+ u8* payload_end = rte_pktmbuf_mtod_offset(
+ mb0, u8*, sizeof(esp_header_t) + iv_size + payload_len);
+
+ clib_memcpy (priv->icv, payload_end, icv_size);
+ *((u32*) payload_end) = sa0->seq_hi;
+ sym_cop->auth.data.offset = 0;
+ sym_cop->auth.data.length = sizeof(esp_header_t) + iv_size
+ + payload_len + sizeof(sa0->seq_hi);
+ sym_cop->auth.digest.data = priv->icv;
+ sym_cop->auth.digest.phys_addr = cop->phys_addr
+ + (uintptr_t) priv->icv - (uintptr_t) cop;
+ sym_cop->auth.digest.length = icv_size;
+ }
+ else
+ {
+ sym_cop->auth.data.offset = 0;
+ sym_cop->auth.data.length = sizeof(esp_header_t) +
+ iv_size + payload_len;
+
+ sym_cop->auth.digest.data = rte_pktmbuf_mtod_offset(mb0, void*,
+ rte_pktmbuf_pkt_len(mb0) - icv_size);
+ sym_cop->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(mb0,
+ rte_pktmbuf_pkt_len(mb0) - icv_size);
+ sym_cop->auth.digest.length = icv_size;
+ }
+ }
+
+trace:
+ if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ esp_decrypt_trace_t *tr = vlib_add_trace (vm, node, b0, sizeof (*tr));
+ tr->crypto_alg = sa0->crypto_alg;
+ tr->integ_alg = sa0->integ_alg;
+ }
+ }
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+ vlib_node_increment_counter (vm, dpdk_esp_decrypt_node.index,
+ ESP_DECRYPT_ERROR_RX_PKTS,
+ from_frame->n_vectors);
+ crypto_qp_data_t *qpd;
+ /* *INDENT-OFF* */
+ vec_foreach_index (i, cwm->qp_data)
+ {
+ u32 enq;
+
+ qpd = vec_elt_at_index(cwm->qp_data, i);
+ enq = rte_cryptodev_enqueue_burst(qpd->dev_id, qpd->qp_id,
+ qpd->cops, n_cop_qp[i]);
+ qpd->inflights += enq;
+
+ if (PREDICT_FALSE(enq < n_cop_qp[i]))
+ {
+ crypto_free_cop (qpd, &qpd->cops[enq], n_cop_qp[i] - enq);
+ vlib_buffer_free (vm, &qpd->bi[enq], n_cop_qp[i] - enq);
+
+ vlib_node_increment_counter (vm, dpdk_esp_decrypt_node.index,
+ ESP_DECRYPT_ERROR_ENQ_FAIL,
+ n_cop_qp[i] - enq);
+ }
+ }
+ /* *INDENT-ON* */
+
+ return from_frame->n_vectors;
+}
+
+VLIB_REGISTER_NODE (dpdk_esp_decrypt_node) = {
+ .function = dpdk_esp_decrypt_node_fn,
+ .name = "dpdk-esp-decrypt",
+ .vector_size = sizeof (u32),
+ .format_trace = format_esp_decrypt_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_errors = ARRAY_LEN(esp_decrypt_error_strings),
+ .error_strings = esp_decrypt_error_strings,
+
+ .n_next_nodes = ESP_DECRYPT_N_NEXT,
+ .next_nodes = {
+#define _(s,n) [ESP_DECRYPT_NEXT_##s] = n,
+ foreach_esp_decrypt_next
+#undef _
+ },
+};
+
+VLIB_NODE_FUNCTION_MULTIARCH (dpdk_esp_decrypt_node, dpdk_esp_decrypt_node_fn)
+
+/*
+ * Decrypt Post Node
+ */
+
+#define foreach_esp_decrypt_post_error \
+ _(PKTS, "ESP post pkts")
+
+typedef enum {
+#define _(sym,str) ESP_DECRYPT_POST_ERROR_##sym,
+ foreach_esp_decrypt_post_error
+#undef _
+ ESP_DECRYPT_POST_N_ERROR,
+} esp_decrypt_post_error_t;
+
+static char * esp_decrypt_post_error_strings[] = {
+#define _(sym,string) string,
+ foreach_esp_decrypt_post_error
+#undef _
+};
+
+vlib_node_registration_t dpdk_esp_decrypt_post_node;
+
+static u8 * format_esp_decrypt_post_trace (u8 * s, va_list * args)
+{
+ return s;
+}
+
+static uword
+dpdk_esp_decrypt_post_node_fn (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame)
+{
+ u32 n_left_from, *from, *to_next = 0, next_index;
+ ipsec_sa_t * sa0;
+ u32 sa_index0 = ~0;
+ ipsec_main_t *im = &ipsec_main;
+ dpdk_esp_main_t *em = &dpdk_esp_main;
+
+ from = vlib_frame_vector_args (from_frame);
+ n_left_from = from_frame->n_vectors;
+
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ esp_footer_t * f0;
+ u32 bi0, next0, icv_size, iv_size;
+ vlib_buffer_t * b0 = 0;
+ ip4_header_t *ih4 = 0, *oh4 = 0;
+ ip6_header_t *ih6 = 0, *oh6 = 0;
+ u8 tunnel_mode = 1;
+ u8 transport_ip6 = 0;
+
+ next0 = ESP_DECRYPT_NEXT_DROP;
+
+ bi0 = from[0];
+ from += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+
+ sa_index0 = vnet_buffer(b0)->ipsec.sad_index;
+ sa0 = pool_elt_at_index (im->sad, sa_index0);
+
+ to_next[0] = bi0;
+ to_next += 1;
+
+ icv_size = em->esp_integ_algs[sa0->integ_alg].trunc_size;
+ iv_size = em->esp_crypto_algs[sa0->crypto_alg].iv_len;
+
+ if (sa0->use_anti_replay)
+ {
+ esp_header_t * esp0 = vlib_buffer_get_current (b0);
+ u32 seq;
+ seq = clib_host_to_net_u32(esp0->seq);
+ if (PREDICT_TRUE(sa0->use_esn))
+ esp_replay_advance_esn(sa0, seq);
+ else
+ esp_replay_advance(sa0, seq);
+ }
+
+ ih4 = (ip4_header_t *) (b0->data + sizeof(ethernet_header_t));
+ vlib_buffer_advance (b0, sizeof (esp_header_t) + iv_size);
+
+ b0->current_length -= (icv_size + 2);
+ b0->flags = VLIB_BUFFER_TOTAL_LENGTH_VALID;
+ f0 = (esp_footer_t *) ((u8 *) vlib_buffer_get_current (b0) +
+ b0->current_length);
+ b0->current_length -= f0->pad_length;
+
+ /* transport mode */
+ if (PREDICT_FALSE(!sa0->is_tunnel && !sa0->is_tunnel_ip6))
+ {
+ tunnel_mode = 0;
+
+ if (PREDICT_TRUE((ih4->ip_version_and_header_length & 0xF0) != 0x40))
+ {
+ if (PREDICT_TRUE((ih4->ip_version_and_header_length & 0xF0) == 0x60))
+ transport_ip6 = 1;
+ else
+ {
+ clib_warning("next header: 0x%x", f0->next_header);
+ vlib_node_increment_counter (vm, dpdk_esp_decrypt_node.index,
+ ESP_DECRYPT_ERROR_NOT_IP, 1);
+ goto trace;
+ }
+ }
+ }
+
+ if (PREDICT_TRUE (tunnel_mode))
+ {
+ if (PREDICT_TRUE(f0->next_header == IP_PROTOCOL_IP_IN_IP))
+ next0 = ESP_DECRYPT_NEXT_IP4_INPUT;
+ else if (f0->next_header == IP_PROTOCOL_IPV6)
+ next0 = ESP_DECRYPT_NEXT_IP6_INPUT;
+ else
+ {
+ clib_warning("next header: 0x%x", f0->next_header);
+ vlib_node_increment_counter (vm, dpdk_esp_decrypt_node.index,
+ ESP_DECRYPT_ERROR_DECRYPTION_FAILED,
+ 1);
+ goto trace;
+ }
+ }
+ /* transport mode */
+ else
+ {
+ if (PREDICT_FALSE(transport_ip6))
+ {
+ ih6 = (ip6_header_t *) (b0->data + sizeof(ethernet_header_t));
+ vlib_buffer_advance (b0, -sizeof(ip6_header_t));
+ oh6 = vlib_buffer_get_current (b0);
+ memmove(oh6, ih6, sizeof(ip6_header_t));
+
+ next0 = ESP_DECRYPT_NEXT_IP6_INPUT;
+ oh6->protocol = f0->next_header;
+ oh6->payload_length =
+ clib_host_to_net_u16 (
+ vlib_buffer_length_in_chain(vm, b0) -
+ sizeof (ip6_header_t));
+ }
+ else
+ {
+ vlib_buffer_advance (b0, -sizeof(ip4_header_t));
+ oh4 = vlib_buffer_get_current (b0);
+ memmove(oh4, ih4, sizeof(ip4_header_t));
+
+ next0 = ESP_DECRYPT_NEXT_IP4_INPUT;
+ oh4->ip_version_and_header_length = 0x45;
+ oh4->fragment_id = 0;
+ oh4->flags_and_fragment_offset = 0;
+ oh4->protocol = f0->next_header;
+ oh4->length = clib_host_to_net_u16 (
+ vlib_buffer_length_in_chain (vm, b0));
+ oh4->checksum = ip4_header_checksum (oh4);
+ }
+ }
+
+ vnet_buffer (b0)->sw_if_index[VLIB_TX] = (u32)~0;
+
+trace:
+ if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ esp_decrypt_trace_t *tr = vlib_add_trace (vm, node, b0, sizeof (*tr));
+ tr->crypto_alg = sa0->crypto_alg;
+ tr->integ_alg = sa0->integ_alg;
+ }
+
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next, bi0, next0);
+ }
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+ vlib_node_increment_counter (vm, dpdk_esp_decrypt_post_node.index,
+ ESP_DECRYPT_POST_ERROR_PKTS,
+ from_frame->n_vectors);
+
+ return from_frame->n_vectors;
+}
+
+VLIB_REGISTER_NODE (dpdk_esp_decrypt_post_node) = {
+ .function = dpdk_esp_decrypt_post_node_fn,
+ .name = "dpdk-esp-decrypt-post",
+ .vector_size = sizeof (u32),
+ .format_trace = format_esp_decrypt_post_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_errors = ARRAY_LEN(esp_decrypt_post_error_strings),
+ .error_strings = esp_decrypt_post_error_strings,
+
+ .n_next_nodes = ESP_DECRYPT_N_NEXT,
+ .next_nodes = {
+#define _(s,n) [ESP_DECRYPT_NEXT_##s] = n,
+ foreach_esp_decrypt_next
+#undef _
+ },
+};
+
+VLIB_NODE_FUNCTION_MULTIARCH (dpdk_esp_decrypt_post_node, dpdk_esp_decrypt_post_node_fn)
diff --git a/src/vnet/devices/dpdk/ipsec/esp_encrypt.c b/src/vnet/devices/dpdk/ipsec/esp_encrypt.c
new file mode 100644
index 00000000000..10bb4616eef
--- /dev/null
+++ b/src/vnet/devices/dpdk/ipsec/esp_encrypt.c
@@ -0,0 +1,598 @@
+/*
+ * esp_encrypt.c : IPSec ESP encrypt node using DPDK Cryptodev
+ *
+ * Copyright (c) 2016 Intel and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/vnet.h>
+#include <vnet/api_errno.h>
+#include <vnet/ip/ip.h>
+
+#include <vnet/ipsec/ipsec.h>
+#include <vnet/devices/dpdk/ipsec/ipsec.h>
+#include <vnet/devices/dpdk/ipsec/esp.h>
+
+#define foreach_esp_encrypt_next \
+_(DROP, "error-drop") \
+_(IP4_LOOKUP, "ip4-lookup") \
+_(IP6_LOOKUP, "ip6-lookup") \
+_(INTERFACE_OUTPUT, "interface-output")
+
+#define _(v, s) ESP_ENCRYPT_NEXT_##v,
+typedef enum
+{
+ foreach_esp_encrypt_next
+#undef _
+ ESP_ENCRYPT_N_NEXT,
+} esp_encrypt_next_t;
+
+#define foreach_esp_encrypt_error \
+ _(RX_PKTS, "ESP pkts received") \
+ _(SEQ_CYCLED, "sequence number cycled") \
+ _(ENQ_FAIL, "Enqueue failed (buffer full)") \
+ _(NO_CRYPTODEV, "Cryptodev not configured") \
+ _(UNSUPPORTED, "Cipher/Auth not supported")
+
+
+typedef enum
+{
+#define _(sym,str) ESP_ENCRYPT_ERROR_##sym,
+ foreach_esp_encrypt_error
+#undef _
+ ESP_ENCRYPT_N_ERROR,
+} esp_encrypt_error_t;
+
+static char *esp_encrypt_error_strings[] = {
+#define _(sym,string) string,
+ foreach_esp_encrypt_error
+#undef _
+};
+
+vlib_node_registration_t dpdk_esp_encrypt_node;
+
+typedef struct
+{
+ u32 spi;
+ u32 seq;
+ ipsec_crypto_alg_t crypto_alg;
+ ipsec_integ_alg_t integ_alg;
+} esp_encrypt_trace_t;
+
+/* packet trace format function */
+static u8 *
+format_esp_encrypt_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ esp_encrypt_trace_t *t = va_arg (*args, esp_encrypt_trace_t *);
+
+ s = format (s, "esp: spi %u seq %u crypto %U integrity %U",
+ t->spi, t->seq,
+ format_ipsec_crypto_alg, t->crypto_alg,
+ format_ipsec_integ_alg, t->integ_alg);
+ return s;
+}
+
+static uword
+dpdk_esp_encrypt_node_fn (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame)
+{
+ u32 n_left_from, *from, *to_next, next_index;
+ ipsec_main_t *im = &ipsec_main;
+ u32 cpu_index = os_get_cpu_number ();
+ dpdk_crypto_main_t *dcm = &dpdk_crypto_main;
+ dpdk_esp_main_t *em = &dpdk_esp_main;
+ u32 i;
+
+ from = vlib_frame_vector_args (from_frame);
+ n_left_from = from_frame->n_vectors;
+
+ if (PREDICT_FALSE (!dcm->workers_main))
+ {
+ /* Likely there are not enough cryptodevs, so drop frame */
+ vlib_node_increment_counter (vm, dpdk_esp_encrypt_node.index,
+ ESP_ENCRYPT_ERROR_NO_CRYPTODEV,
+ n_left_from);
+ vlib_buffer_free (vm, from, n_left_from);
+ return n_left_from;
+ }
+
+ crypto_worker_main_t *cwm = vec_elt_at_index (dcm->workers_main, cpu_index);
+ u32 n_qps = vec_len (cwm->qp_data);
+ struct rte_crypto_op **cops_to_enq[n_qps];
+ u32 n_cop_qp[n_qps], *bi_to_enq[n_qps];
+
+ for (i = 0; i < n_qps; i++)
+ {
+ bi_to_enq[i] = cwm->qp_data[i].bi;
+ cops_to_enq[i] = cwm->qp_data[i].cops;
+ }
+
+ memset (n_cop_qp, 0, n_qps * sizeof (u32));
+
+ crypto_alloc_cops ();
+
+ next_index = ESP_ENCRYPT_NEXT_DROP;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 bi0, next0;
+ vlib_buffer_t *b0 = 0;
+ u32 sa_index0;
+ ipsec_sa_t *sa0;
+ ip4_and_esp_header_t *ih0, *oh0 = 0;
+ ip6_and_esp_header_t *ih6_0, *oh6_0 = 0;
+ struct rte_mbuf *mb0 = 0;
+ esp_footer_t *f0;
+ u8 is_ipv6;
+ u8 ip_hdr_size;
+ u8 next_hdr_type;
+ u8 transport_mode = 0;
+ const int BLOCK_SIZE = 16;
+ u32 iv_size;
+ u16 orig_sz;
+ crypto_sa_session_t *sa_sess;
+ void *sess;
+ struct rte_crypto_op *cop = 0;
+ u16 qp_index;
+
+ bi0 = from[0];
+ from += 1;
+ n_left_from -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ sa_index0 = vnet_buffer (b0)->ipsec.sad_index;
+ sa0 = pool_elt_at_index (im->sad, sa_index0);
+
+ if (PREDICT_FALSE (esp_seq_advance (sa0)))
+ {
+ clib_warning ("sequence number counter has cycled SPI %u",
+ sa0->spi);
+ vlib_node_increment_counter (vm, dpdk_esp_encrypt_node.index,
+ ESP_ENCRYPT_ERROR_SEQ_CYCLED, 1);
+ //TODO: rekey SA
+ to_next[0] = bi0;
+ to_next += 1;
+ n_left_to_next -= 1;
+ goto trace;
+ }
+
+ sa_sess = pool_elt_at_index (cwm->sa_sess_d[1], sa_index0);
+ if (PREDICT_FALSE (!sa_sess->sess))
+ {
+ int ret = create_sym_sess (sa0, sa_sess, 1);
+ ASSERT (ret == 0);
+ }
+
+ qp_index = sa_sess->qp_index;
+ sess = sa_sess->sess;
+
+ ASSERT (vec_len (vec_elt (cwm->qp_data, qp_index).free_cops) > 0);
+ cop = vec_pop (vec_elt (cwm->qp_data, qp_index).free_cops);
+ ASSERT (cop->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED);
+
+ cops_to_enq[qp_index][0] = cop;
+ cops_to_enq[qp_index] += 1;
+ n_cop_qp[qp_index] += 1;
+ bi_to_enq[qp_index][0] = bi0;
+ bi_to_enq[qp_index] += 1;
+
+ ssize_t adv;
+ iv_size = em->esp_crypto_algs[sa0->crypto_alg].iv_len;
+ ih0 = vlib_buffer_get_current (b0);
+ orig_sz = b0->current_length;
+ is_ipv6 = (ih0->ip4.ip_version_and_header_length & 0xF0) == 0x60;
+ /* is ipv6 */
+ if (PREDICT_TRUE (sa0->is_tunnel))
+ {
+ if (PREDICT_TRUE (!is_ipv6))
+ adv = -sizeof (ip4_and_esp_header_t);
+ else
+ adv = -sizeof (ip6_and_esp_header_t);
+ }
+ else
+ {
+ adv = -sizeof (esp_header_t);
+ if (PREDICT_TRUE (!is_ipv6))
+ orig_sz -= sizeof (ip4_header_t);
+ else
+ orig_sz -= sizeof (ip6_header_t);
+ }
+
+ /*transport mode save the eth header before it is overwritten */
+ if (PREDICT_FALSE (!sa0->is_tunnel))
+ {
+ ethernet_header_t *ieh0 = (ethernet_header_t *)
+ ((u8 *) vlib_buffer_get_current (b0) -
+ sizeof (ethernet_header_t));
+ ethernet_header_t *oeh0 =
+ (ethernet_header_t *) ((u8 *) ieh0 + (adv - iv_size));
+ clib_memcpy (oeh0, ieh0, sizeof (ethernet_header_t));
+ }
+
+ vlib_buffer_advance (b0, adv - iv_size);
+
+ /* XXX IP6/ip4 and IP4/IP6 not supported, only IP4/IP4 and IP6/IP6 */
+
+ /* is ipv6 */
+ if (PREDICT_FALSE (is_ipv6))
+ {
+ ih6_0 = (ip6_and_esp_header_t *) ih0;
+ ip_hdr_size = sizeof (ip6_header_t);
+ oh6_0 = vlib_buffer_get_current (b0);
+
+ if (PREDICT_TRUE (sa0->is_tunnel))
+ {
+ next_hdr_type = IP_PROTOCOL_IPV6;
+ oh6_0->ip6.ip_version_traffic_class_and_flow_label =
+ ih6_0->ip6.ip_version_traffic_class_and_flow_label;
+ }
+ else
+ {
+ next_hdr_type = ih6_0->ip6.protocol;
+ memmove (oh6_0, ih6_0, sizeof (ip6_header_t));
+ }
+
+ oh6_0->ip6.protocol = IP_PROTOCOL_IPSEC_ESP;
+ oh6_0->ip6.hop_limit = 254;
+ oh6_0->esp.spi = clib_net_to_host_u32 (sa0->spi);
+ oh6_0->esp.seq = clib_net_to_host_u32 (sa0->seq);
+ }
+ else
+ {
+ ip_hdr_size = sizeof (ip4_header_t);
+ oh0 = vlib_buffer_get_current (b0);
+
+ if (PREDICT_TRUE (sa0->is_tunnel))
+ {
+ next_hdr_type = IP_PROTOCOL_IP_IN_IP;
+ oh0->ip4.tos = ih0->ip4.tos;
+ }
+ else
+ {
+ next_hdr_type = ih0->ip4.protocol;
+ memmove (oh0, ih0, sizeof (ip4_header_t));
+ }
+
+ oh0->ip4.ip_version_and_header_length = 0x45;
+ oh0->ip4.fragment_id = 0;
+ oh0->ip4.flags_and_fragment_offset = 0;
+ oh0->ip4.ttl = 254;
+ oh0->ip4.protocol = IP_PROTOCOL_IPSEC_ESP;
+ oh0->esp.spi = clib_net_to_host_u32 (sa0->spi);
+ oh0->esp.seq = clib_net_to_host_u32 (sa0->seq);
+ }
+
+ if (PREDICT_TRUE (sa0->is_tunnel && !sa0->is_tunnel_ip6))
+ {
+ oh0->ip4.src_address.as_u32 = sa0->tunnel_src_addr.ip4.as_u32;
+ oh0->ip4.dst_address.as_u32 = sa0->tunnel_dst_addr.ip4.as_u32;
+
+ /* in tunnel mode send it back to FIB */
+ next0 = ESP_ENCRYPT_NEXT_IP4_LOOKUP;
+ vnet_buffer (b0)->sw_if_index[VLIB_TX] = (u32) ~ 0;
+ }
+ else if (sa0->is_tunnel && sa0->is_tunnel_ip6)
+ {
+ oh6_0->ip6.src_address.as_u64[0] =
+ sa0->tunnel_src_addr.ip6.as_u64[0];
+ oh6_0->ip6.src_address.as_u64[1] =
+ sa0->tunnel_src_addr.ip6.as_u64[1];
+ oh6_0->ip6.dst_address.as_u64[0] =
+ sa0->tunnel_dst_addr.ip6.as_u64[0];
+ oh6_0->ip6.dst_address.as_u64[1] =
+ sa0->tunnel_dst_addr.ip6.as_u64[1];
+
+ /* in tunnel mode send it back to FIB */
+ next0 = ESP_ENCRYPT_NEXT_IP6_LOOKUP;
+ vnet_buffer (b0)->sw_if_index[VLIB_TX] = (u32) ~ 0;
+ }
+ else
+ {
+ next0 = ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT;
+ transport_mode = 1;
+ }
+
+ ASSERT (sa0->crypto_alg < IPSEC_CRYPTO_N_ALG);
+ ASSERT (sa0->crypto_alg != IPSEC_CRYPTO_ALG_NONE);
+
+ int blocks = 1 + (orig_sz + 1) / BLOCK_SIZE;
+
+ /* pad packet in input buffer */
+ u8 pad_bytes = BLOCK_SIZE * blocks - 2 - orig_sz;
+ u8 i;
+ u8 *padding = vlib_buffer_get_current (b0) + b0->current_length;
+
+ for (i = 0; i < pad_bytes; ++i)
+ padding[i] = i + 1;
+
+ f0 = vlib_buffer_get_current (b0) + b0->current_length + pad_bytes;
+ f0->pad_length = pad_bytes;
+ f0->next_header = next_hdr_type;
+ b0->current_length += pad_bytes + 2 +
+ em->esp_integ_algs[sa0->integ_alg].trunc_size;
+
+ vnet_buffer (b0)->sw_if_index[VLIB_RX] =
+ vnet_buffer (b0)->sw_if_index[VLIB_RX];
+ b0->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
+
+ struct rte_crypto_sym_op *sym_cop;
+ sym_cop = (struct rte_crypto_sym_op *) (cop + 1);
+
+ dpdk_cop_priv_t *priv = (dpdk_cop_priv_t *) (sym_cop + 1);
+
+ vnet_buffer (b0)->unused[0] = next0;
+
+ mb0 = rte_mbuf_from_vlib_buffer (b0);
+ mb0->data_len = b0->current_length;
+ mb0->pkt_len = b0->current_length;
+ mb0->data_off = RTE_PKTMBUF_HEADROOM + b0->current_data;
+
+ rte_crypto_op_attach_sym_session (cop, sess);
+
+ sym_cop->m_src = mb0;
+
+ dpdk_gcm_cnt_blk *icb = &priv->cb;
+ icb->salt = sa0->salt;
+ icb->iv[0] = sa0->seq;
+ icb->iv[1] = sa0->seq_hi;
+
+ if (sa0->crypto_alg == IPSEC_CRYPTO_ALG_AES_GCM_128)
+ {
+ icb->cnt = clib_host_to_net_u32 (1);
+ clib_memcpy (vlib_buffer_get_current (b0) + ip_hdr_size +
+ sizeof (esp_header_t), icb->iv, 8);
+ sym_cop->cipher.data.offset =
+ ip_hdr_size + sizeof (esp_header_t) + iv_size;
+ sym_cop->cipher.data.length = BLOCK_SIZE * blocks;
+ sym_cop->cipher.iv.length = 16;
+ }
+ else
+ {
+ sym_cop->cipher.data.offset =
+ ip_hdr_size + sizeof (esp_header_t);
+ sym_cop->cipher.data.length = BLOCK_SIZE * blocks + iv_size;
+ sym_cop->cipher.iv.length = iv_size;
+ }
+
+ sym_cop->cipher.iv.data = (u8 *) icb;
+ sym_cop->cipher.iv.phys_addr = cop->phys_addr + (uintptr_t) icb
+ - (uintptr_t) cop;
+
+
+ ASSERT (sa0->integ_alg < IPSEC_INTEG_N_ALG);
+ ASSERT (sa0->integ_alg != IPSEC_INTEG_ALG_NONE);
+
+ if (PREDICT_FALSE (sa0->integ_alg == IPSEC_INTEG_ALG_AES_GCM_128))
+ {
+ u8 *aad = priv->aad;
+ clib_memcpy (aad, vlib_buffer_get_current (b0) + ip_hdr_size,
+ 8);
+ sym_cop->auth.aad.data = aad;
+ sym_cop->auth.aad.phys_addr = cop->phys_addr +
+ (uintptr_t) aad - (uintptr_t) cop;
+
+ if (PREDICT_FALSE (sa0->use_esn))
+ {
+ *((u32 *) & aad[8]) = sa0->seq_hi;
+ sym_cop->auth.aad.length = 12;
+ }
+ else
+ {
+ sym_cop->auth.aad.length = 8;
+ }
+ }
+ else
+ {
+ sym_cop->auth.data.offset = ip_hdr_size;
+ sym_cop->auth.data.length = b0->current_length - ip_hdr_size
+ - em->esp_integ_algs[sa0->integ_alg].trunc_size;
+
+ if (PREDICT_FALSE (sa0->use_esn))
+ {
+ u8 *payload_end =
+ vlib_buffer_get_current (b0) + b0->current_length;
+ *((u32 *) payload_end) = sa0->seq_hi;
+ sym_cop->auth.data.length += sizeof (sa0->seq_hi);
+ }
+ }
+ sym_cop->auth.digest.data = vlib_buffer_get_current (b0) +
+ b0->current_length -
+ em->esp_integ_algs[sa0->integ_alg].trunc_size;
+ sym_cop->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset (mb0,
+ b0->current_length
+ -
+ em->esp_integ_algs
+ [sa0->integ_alg].trunc_size);
+ sym_cop->auth.digest.length =
+ em->esp_integ_algs[sa0->integ_alg].trunc_size;
+
+
+ if (PREDICT_FALSE (is_ipv6))
+ {
+ oh6_0->ip6.payload_length =
+ clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0) -
+ sizeof (ip6_header_t));
+ }
+ else
+ {
+ oh0->ip4.length =
+ clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0));
+ oh0->ip4.checksum = ip4_header_checksum (&oh0->ip4);
+ }
+
+ if (transport_mode)
+ vlib_buffer_advance (b0, -sizeof (ethernet_header_t));
+
+ trace:
+ if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ esp_encrypt_trace_t *tr =
+ vlib_add_trace (vm, node, b0, sizeof (*tr));
+ tr->spi = sa0->spi;
+ tr->seq = sa0->seq - 1;
+ tr->crypto_alg = sa0->crypto_alg;
+ tr->integ_alg = sa0->integ_alg;
+ }
+ }
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+ vlib_node_increment_counter (vm, dpdk_esp_encrypt_node.index,
+ ESP_ENCRYPT_ERROR_RX_PKTS,
+ from_frame->n_vectors);
+ crypto_qp_data_t *qpd;
+ /* *INDENT-OFF* */
+ vec_foreach_index (i, cwm->qp_data)
+ {
+ u32 enq;
+
+ qpd = vec_elt_at_index(cwm->qp_data, i);
+ enq = rte_cryptodev_enqueue_burst(qpd->dev_id, qpd->qp_id,
+ qpd->cops, n_cop_qp[i]);
+ qpd->inflights += enq;
+
+ if (PREDICT_FALSE(enq < n_cop_qp[i]))
+ {
+ crypto_free_cop (qpd, &qpd->cops[enq], n_cop_qp[i] - enq);
+ vlib_buffer_free (vm, &qpd->bi[enq], n_cop_qp[i] - enq);
+
+ vlib_node_increment_counter (vm, dpdk_esp_encrypt_node.index,
+ ESP_ENCRYPT_ERROR_ENQ_FAIL,
+ n_cop_qp[i] - enq);
+ }
+ }
+ /* *INDENT-ON* */
+
+ return from_frame->n_vectors;
+}
+
+VLIB_REGISTER_NODE (dpdk_esp_encrypt_node) =
+{
+ .function = dpdk_esp_encrypt_node_fn,.name = "dpdk-esp-encrypt",.flags =
+ VLIB_NODE_FLAG_IS_OUTPUT,.vector_size = sizeof (u32),.format_trace =
+ format_esp_encrypt_trace,.n_errors =
+ ARRAY_LEN (esp_encrypt_error_strings),.error_strings =
+ esp_encrypt_error_strings,.n_next_nodes = 1,.next_nodes =
+ {
+ [ESP_ENCRYPT_NEXT_DROP] = "error-drop",}
+};
+
+VLIB_NODE_FUNCTION_MULTIARCH (dpdk_esp_encrypt_node, dpdk_esp_encrypt_node_fn)
+/*
+ * ESP Encrypt Post Node
+ */
+#define foreach_esp_encrypt_post_error \
+ _(PKTS, "ESP post pkts")
+ typedef enum
+ {
+#define _(sym,str) ESP_ENCRYPT_POST_ERROR_##sym,
+ foreach_esp_encrypt_post_error
+#undef _
+ ESP_ENCRYPT_POST_N_ERROR,
+ } esp_encrypt_post_error_t;
+
+ static char *esp_encrypt_post_error_strings[] = {
+#define _(sym,string) string,
+ foreach_esp_encrypt_post_error
+#undef _
+ };
+
+vlib_node_registration_t dpdk_esp_encrypt_post_node;
+
+static u8 *
+format_esp_encrypt_post_trace (u8 * s, va_list * args)
+{
+ return s;
+}
+
+static uword
+dpdk_esp_encrypt_post_node_fn (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame)
+{
+ u32 n_left_from, *from, *to_next = 0, next_index;
+
+ from = vlib_frame_vector_args (from_frame);
+ n_left_from = from_frame->n_vectors;
+
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 bi0, next0;
+ vlib_buffer_t *b0 = 0;
+
+ bi0 = from[0];
+ from += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+
+ to_next[0] = bi0;
+ to_next += 1;
+
+ next0 = vnet_buffer (b0)->unused[0];
+
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next, bi0,
+ next0);
+ }
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ vlib_node_increment_counter (vm, dpdk_esp_encrypt_post_node.index,
+ ESP_ENCRYPT_POST_ERROR_PKTS,
+ from_frame->n_vectors);
+
+ return from_frame->n_vectors;
+}
+
+VLIB_REGISTER_NODE (dpdk_esp_encrypt_post_node) =
+{
+ .function = dpdk_esp_encrypt_post_node_fn,.name =
+ "dpdk-esp-encrypt-post",.vector_size = sizeof (u32),.format_trace =
+ format_esp_encrypt_post_trace,.type = VLIB_NODE_TYPE_INTERNAL,.n_errors =
+ ARRAY_LEN (esp_encrypt_post_error_strings),.error_strings =
+ esp_encrypt_post_error_strings,.n_next_nodes =
+ ESP_ENCRYPT_N_NEXT,.next_nodes =
+ {
+#define _(s,n) [ESP_ENCRYPT_NEXT_##s] = n,
+ foreach_esp_encrypt_next
+#undef _
+ }
+};
+
+VLIB_NODE_FUNCTION_MULTIARCH (dpdk_esp_encrypt_post_node,
+ dpdk_esp_encrypt_post_node_fn)
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/devices/dpdk/ipsec/ipsec.c b/src/vnet/devices/dpdk/ipsec/ipsec.c
new file mode 100644
index 00000000000..de253f02636
--- /dev/null
+++ b/src/vnet/devices/dpdk/ipsec/ipsec.c
@@ -0,0 +1,313 @@
+/*
+ * Copyright (c) 2016 Intel and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <vnet/vnet.h>
+#include <vnet/ip/ip.h>
+#include <vnet/api_errno.h>
+#include <vnet/devices/dpdk/dpdk.h>
+#include <vnet/devices/dpdk/ipsec/ipsec.h>
+#include <vnet/devices/dpdk/ipsec/esp.h>
+#include <vnet/ipsec/ipsec.h>
+
+#define DPDK_CRYPTO_NB_OBJS 2048
+#define DPDK_CRYPTO_CACHE_SIZE 512
+#define DPDK_CRYPTO_PRIV_SIZE 128
+#define DPDK_CRYPTO_N_QUEUE_DESC 512
+#define DPDK_CRYPTO_NB_COPS (1024 * 4)
+
+/*
+ * return:
+ * -1: update failed
+ * 0: already exist
+ * 1: mapped
+ */
+static int
+update_qp_data (crypto_worker_main_t * cwm,
+ u8 cdev_id, u16 qp_id, u8 is_outbound, u16 * idx)
+{
+ crypto_qp_data_t *qpd;
+
+ /* *INDENT-OFF* */
+ vec_foreach_index (*idx, cwm->qp_data)
+ {
+ qpd = vec_elt_at_index(cwm->qp_data, *idx);
+
+ if (qpd->dev_id == cdev_id && qpd->qp_id == qp_id &&
+ qpd->is_outbound == is_outbound)
+ return 0;
+ }
+ /* *INDENT-ON* */
+
+ vec_add2 (cwm->qp_data, qpd, 1);
+
+ qpd->dev_id = cdev_id;
+ qpd->qp_id = qp_id;
+ qpd->is_outbound = is_outbound;
+
+ return 1;
+}
+
+/*
+ * return:
+ * -1: error
+ * 0: already exist
+ * 1: mapped
+ */
+static int
+add_mapping (crypto_worker_main_t * cwm,
+ u8 cdev_id, u16 qp, u8 is_outbound,
+ const struct rte_cryptodev_capabilities *cipher_cap,
+ const struct rte_cryptodev_capabilities *auth_cap)
+{
+ int mapped;
+ u16 qp_index;
+ uword key = 0, data, *ret;
+ crypto_worker_qp_key_t *p_key = (crypto_worker_qp_key_t *) & key;
+
+ p_key->cipher_algo = (u8) cipher_cap->sym.cipher.algo;
+ p_key->auth_algo = (u8) auth_cap->sym.auth.algo;
+ p_key->is_outbound = is_outbound;
+
+ ret = hash_get (cwm->algo_qp_map, key);
+ if (ret)
+ return 0;
+
+ mapped = update_qp_data (cwm, cdev_id, qp, is_outbound, &qp_index);
+ if (mapped < 0)
+ return -1;
+
+ data = (uword) qp_index;
+
+ ret = hash_set (cwm->algo_qp_map, key, data);
+ if (!ret)
+ rte_panic ("Failed to insert hash table\n");
+
+ return mapped;
+}
+
+/*
+ * return:
+ * 0: already exist
+ * 1: mapped
+ */
+static int
+add_cdev_mapping (crypto_worker_main_t * cwm,
+ struct rte_cryptodev_info *dev_info, u8 cdev_id,
+ u16 qp, u8 is_outbound)
+{
+ const struct rte_cryptodev_capabilities *i, *j;
+ u32 mapped = 0;
+
+ for (i = dev_info->capabilities; i->op != RTE_CRYPTO_OP_TYPE_UNDEFINED; i++)
+ {
+ if (i->sym.xform_type != RTE_CRYPTO_SYM_XFORM_CIPHER)
+ continue;
+
+ if (check_algo_is_supported (i, NULL) != 0)
+ continue;
+
+ for (j = dev_info->capabilities; j->op != RTE_CRYPTO_OP_TYPE_UNDEFINED;
+ j++)
+ {
+ int status = 0;
+
+ if (j->sym.xform_type != RTE_CRYPTO_SYM_XFORM_AUTH)
+ continue;
+
+ if (check_algo_is_supported (j, NULL) != 0)
+ continue;
+
+ status = add_mapping (cwm, cdev_id, qp, is_outbound, i, j);
+ if (status == 1)
+ mapped += 1;
+ if (status < 0)
+ return status;
+ }
+ }
+
+ return mapped;
+}
+
+static int
+check_cryptodev_queues ()
+{
+ u32 n_qs = 0;
+ u8 cdev_id;
+ u32 n_req_qs = 2;
+
+ if (vlib_num_workers () > 0)
+ n_req_qs = vlib_num_workers () * 2;
+
+ for (cdev_id = 0; cdev_id < rte_cryptodev_count (); cdev_id++)
+ {
+ struct rte_cryptodev_info cdev_info;
+
+ rte_cryptodev_info_get (cdev_id, &cdev_info);
+
+ if (!
+ (cdev_info.feature_flags & RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING))
+ continue;
+
+ n_qs += cdev_info.max_nb_queue_pairs;
+ }
+
+ if (n_qs >= n_req_qs)
+ return 0;
+ else
+ return -1;
+}
+
+static clib_error_t *
+dpdk_ipsec_init (vlib_main_t * vm)
+{
+ dpdk_crypto_main_t *dcm = &dpdk_crypto_main;
+ vlib_thread_main_t *tm = vlib_get_thread_main ();
+ struct rte_cryptodev_config dev_conf;
+ struct rte_cryptodev_qp_conf qp_conf;
+ struct rte_cryptodev_info cdev_info;
+ struct rte_mempool *rmp;
+ i32 dev_id, ret;
+ u32 i, skip_master;
+
+ if (check_cryptodev_queues () < 0)
+ return clib_error_return (0, "not enough cryptodevs for ipsec");
+
+ vec_alloc (dcm->workers_main, tm->n_vlib_mains);
+ _vec_len (dcm->workers_main) = tm->n_vlib_mains;
+
+ fprintf (stdout, "DPDK Cryptodevs info:\n");
+ fprintf (stdout, "dev_id\tn_qp\tnb_obj\tcache_size\n");
+ /* HW cryptodevs have higher dev_id, use HW first */
+ for (dev_id = rte_cryptodev_count () - 1; dev_id >= 0; dev_id--)
+ {
+ u16 max_nb_qp, qp = 0;
+ skip_master = vlib_num_workers () > 0;
+
+ rte_cryptodev_info_get (dev_id, &cdev_info);
+
+ if (!
+ (cdev_info.feature_flags & RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING))
+ continue;
+
+ max_nb_qp = cdev_info.max_nb_queue_pairs;
+
+ for (i = 0; i < tm->n_vlib_mains; i++)
+ {
+ u8 is_outbound;
+ crypto_worker_main_t *cwm;
+ uword *map;
+
+ if (skip_master)
+ {
+ skip_master = 0;
+ continue;
+ }
+
+ cwm = vec_elt_at_index (dcm->workers_main, i);
+ map = cwm->algo_qp_map;
+
+ if (!map)
+ {
+ map = hash_create (0, sizeof (crypto_worker_qp_key_t));
+ if (!map)
+ return clib_error_return (0, "unable to create hash table "
+ "for worker %u",
+ vlib_mains[i]->cpu_index);
+ cwm->algo_qp_map = map;
+ }
+
+ for (is_outbound = 0; is_outbound < 2 && qp < max_nb_qp;
+ is_outbound++)
+ {
+ int mapped = add_cdev_mapping (cwm, &cdev_info,
+ dev_id, qp, is_outbound);
+ if (mapped > 0)
+ qp++;
+
+ if (mapped < 0)
+ return clib_error_return (0,
+ "too many queues for one worker");
+ }
+ }
+
+ if (qp == 0)
+ continue;
+
+ dev_conf.socket_id = rte_cryptodev_socket_id (dev_id);
+ dev_conf.nb_queue_pairs = cdev_info.max_nb_queue_pairs;
+ dev_conf.session_mp.nb_objs = DPDK_CRYPTO_NB_OBJS;
+ dev_conf.session_mp.cache_size = DPDK_CRYPTO_CACHE_SIZE;
+
+ ret = rte_cryptodev_configure (dev_id, &dev_conf);
+ if (ret < 0)
+ return clib_error_return (0, "cryptodev %u config error", dev_id);
+
+ qp_conf.nb_descriptors = DPDK_CRYPTO_N_QUEUE_DESC;
+ for (qp = 0; qp < dev_conf.nb_queue_pairs; qp++)
+ {
+ ret = rte_cryptodev_queue_pair_setup (dev_id, qp, &qp_conf,
+ dev_conf.socket_id);
+ if (ret < 0)
+ return clib_error_return (0, "cryptodev %u qp %u setup error",
+ dev_id, qp);
+ }
+ fprintf (stdout, "%u\t%u\t%u\t%u\n", dev_id, dev_conf.nb_queue_pairs,
+ DPDK_CRYPTO_NB_OBJS, DPDK_CRYPTO_CACHE_SIZE);
+ }
+
+ u32 socket_id = rte_socket_id ();
+
+ vec_validate_aligned (dcm->cop_pools, socket_id, CLIB_CACHE_LINE_BYTES);
+
+ /* pool already exists, nothing to do */
+ if (dcm->cop_pools[socket_id])
+ return 0;
+
+ u8 *pool_name = format (0, "crypto_op_pool_socket%u%c", socket_id, 0);
+
+ rmp = rte_crypto_op_pool_create ((char *) pool_name,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ DPDK_CRYPTO_NB_COPS *
+ (1 + vlib_num_workers ()),
+ DPDK_CRYPTO_CACHE_SIZE,
+ DPDK_CRYPTO_PRIV_SIZE, socket_id);
+ vec_free (pool_name);
+
+ if (!rmp)
+ return clib_error_return (0, "failed to allocate mempool on socket %u",
+ socket_id);
+ dcm->cop_pools[socket_id] = rmp;
+
+ dpdk_esp_init ();
+
+ if (vec_len (vlib_mains) == 0)
+ vlib_node_set_state (&vlib_global_main, dpdk_crypto_input_node.index,
+ VLIB_NODE_STATE_POLLING);
+ else
+ for (i = 1; i < tm->n_vlib_mains; i++)
+ vlib_node_set_state (vlib_mains[i], dpdk_crypto_input_node.index,
+ VLIB_NODE_STATE_POLLING);
+
+ return 0;
+}
+
+VLIB_MAIN_LOOP_ENTER_FUNCTION (dpdk_ipsec_init);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/devices/dpdk/ipsec/ipsec.h b/src/vnet/devices/dpdk/ipsec/ipsec.h
new file mode 100644
index 00000000000..e6c7498c0d3
--- /dev/null
+++ b/src/vnet/devices/dpdk/ipsec/ipsec.h
@@ -0,0 +1,227 @@
+/*
+ * Copyright (c) 2016 Intel and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef __DPDK_IPSEC_H__
+#define __DPDK_IPSEC_H__
+
+#include <vnet/vnet.h>
+
+#undef always_inline
+#include <rte_crypto.h>
+#include <rte_cryptodev.h>
+
+#if CLIB_DEBUG > 0
+#define always_inline static inline
+#else
+#define always_inline static inline __attribute__ ((__always_inline__))
+#endif
+
+
+#define MAX_QP_PER_LCORE 16
+
+typedef struct
+{
+ u32 salt;
+ u32 iv[2];
+ u32 cnt;
+} dpdk_gcm_cnt_blk;
+
+typedef struct
+{
+ dpdk_gcm_cnt_blk cb;
+ union
+ {
+ u8 aad[12];
+ u8 icv[64];
+ };
+} dpdk_cop_priv_t;
+
+typedef struct
+{
+ u8 cipher_algo;
+ u8 auth_algo;
+ u8 is_outbound;
+} crypto_worker_qp_key_t;
+
+typedef struct
+{
+ u16 dev_id;
+ u16 qp_id;
+ u16 is_outbound;
+ i16 inflights;
+ u32 bi[VLIB_FRAME_SIZE];
+ struct rte_crypto_op *cops[VLIB_FRAME_SIZE];
+ struct rte_crypto_op **free_cops;
+} crypto_qp_data_t;
+
+typedef struct
+{
+ u8 qp_index;
+ void *sess;
+} crypto_sa_session_t;
+
+typedef struct
+{
+ crypto_sa_session_t *sa_sess_d[2];
+ crypto_qp_data_t *qp_data;
+ uword *algo_qp_map;
+} crypto_worker_main_t;
+
+typedef struct
+{
+ struct rte_mempool **cop_pools;
+ crypto_worker_main_t *workers_main;
+} dpdk_crypto_main_t;
+
+dpdk_crypto_main_t dpdk_crypto_main;
+
+extern vlib_node_registration_t dpdk_crypto_input_node;
+
+#define CRYPTO_N_FREE_COPS (VLIB_FRAME_SIZE * 3)
+
+static_always_inline void
+crypto_alloc_cops ()
+{
+ dpdk_crypto_main_t *dcm = &dpdk_crypto_main;
+ u32 cpu_index = os_get_cpu_number ();
+ crypto_worker_main_t *cwm = &dcm->workers_main[cpu_index];
+ unsigned socket_id = rte_socket_id ();
+ crypto_qp_data_t *qpd;
+
+ /* *INDENT-OFF* */
+ vec_foreach (qpd, cwm->qp_data)
+ {
+ u32 l = vec_len (qpd->free_cops);
+
+ if (PREDICT_FALSE (l < VLIB_FRAME_SIZE))
+ {
+ u32 n_alloc;
+
+ if (PREDICT_FALSE (!qpd->free_cops))
+ vec_alloc (qpd->free_cops, CRYPTO_N_FREE_COPS);
+
+ n_alloc = rte_crypto_op_bulk_alloc (dcm->cop_pools[socket_id],
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ &qpd->free_cops[l],
+ CRYPTO_N_FREE_COPS - l - 1);
+
+ _vec_len (qpd->free_cops) = l + n_alloc;
+ }
+ }
+ /* *INDENT-ON* */
+}
+
+static_always_inline void
+crypto_free_cop (crypto_qp_data_t * qpd, struct rte_crypto_op **cops, u32 n)
+{
+ u32 l = vec_len (qpd->free_cops);
+
+ if (l + n >= CRYPTO_N_FREE_COPS)
+ {
+ l -= VLIB_FRAME_SIZE;
+ rte_mempool_put_bulk (cops[0]->mempool,
+ (void **) &qpd->free_cops[l], VLIB_FRAME_SIZE);
+ }
+ clib_memcpy (&qpd->free_cops[l], cops, sizeof (*cops) * n);
+
+ _vec_len (qpd->free_cops) = l + n;
+}
+
+static_always_inline int
+check_algo_is_supported (const struct rte_cryptodev_capabilities *cap,
+ char *name)
+{
+ struct
+ {
+ uint8_t cipher_algo;
+ enum rte_crypto_sym_xform_type type;
+ union
+ {
+ enum rte_crypto_auth_algorithm auth;
+ enum rte_crypto_cipher_algorithm cipher;
+ };
+ char *name;
+ } supported_algo[] =
+ {
+ {
+ .type = RTE_CRYPTO_SYM_XFORM_CIPHER,.cipher =
+ RTE_CRYPTO_CIPHER_NULL,.name = "NULL"},
+ {
+ .type = RTE_CRYPTO_SYM_XFORM_CIPHER,.cipher =
+ RTE_CRYPTO_CIPHER_AES_CBC,.name = "AES_CBC"},
+ {
+ .type = RTE_CRYPTO_SYM_XFORM_CIPHER,.cipher =
+ RTE_CRYPTO_CIPHER_AES_CTR,.name = "AES_CTR"},
+ {
+ .type = RTE_CRYPTO_SYM_XFORM_CIPHER,.cipher =
+ RTE_CRYPTO_CIPHER_3DES_CBC,.name = "3DES-CBC"},
+ {
+ .type = RTE_CRYPTO_SYM_XFORM_CIPHER,.auth =
+ RTE_CRYPTO_CIPHER_AES_GCM,.name = "AES-GCM"},
+ {
+ .type = RTE_CRYPTO_SYM_XFORM_AUTH,.auth =
+ RTE_CRYPTO_AUTH_SHA1_HMAC,.name = "HMAC-SHA1"},
+ {
+ .type = RTE_CRYPTO_SYM_XFORM_AUTH,.auth =
+ RTE_CRYPTO_AUTH_SHA256_HMAC,.name = "HMAC-SHA256"},
+ {
+ .type = RTE_CRYPTO_SYM_XFORM_AUTH,.auth =
+ RTE_CRYPTO_AUTH_SHA384_HMAC,.name = "HMAC-SHA384"},
+ {
+ .type = RTE_CRYPTO_SYM_XFORM_AUTH,.auth =
+ RTE_CRYPTO_AUTH_SHA512_HMAC,.name = "HMAC-SHA512"},
+ {
+ .type = RTE_CRYPTO_SYM_XFORM_AUTH,.auth =
+ RTE_CRYPTO_AUTH_AES_XCBC_MAC,.name = "AES-XCBC-MAC"},
+ {
+ .type = RTE_CRYPTO_SYM_XFORM_AUTH,.auth =
+ RTE_CRYPTO_AUTH_AES_GCM,.name = "AES-GCM"},
+ {
+ /* tail */
+ .type = RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED},};
+ uint32_t i = 0;
+
+ if (cap->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
+ return -1;
+
+ while (supported_algo[i].type != RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED)
+ {
+ if (cap->sym.xform_type == supported_algo[i].type)
+ {
+ if ((cap->sym.xform_type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+ cap->sym.cipher.algo == supported_algo[i].cipher) ||
+ (cap->sym.xform_type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+ cap->sym.auth.algo == supported_algo[i].auth))
+ {
+ if (name)
+ strcpy (name, supported_algo[i].name);
+ return 0;
+ }
+ }
+
+ i++;
+ }
+
+ return -1;
+}
+
+#endif /* __DPDK_IPSEC_H__ */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/devices/dpdk/node.c b/src/vnet/devices/dpdk/node.c
new file mode 100644
index 00000000000..e541cdbcbd2
--- /dev/null
+++ b/src/vnet/devices/dpdk/node.c
@@ -0,0 +1,687 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <vnet/vnet.h>
+#include <vppinfra/vec.h>
+#include <vppinfra/error.h>
+#include <vppinfra/format.h>
+#include <vppinfra/xxhash.h>
+
+#include <vnet/ethernet/ethernet.h>
+#include <vnet/devices/dpdk/dpdk.h>
+#include <vnet/classify/vnet_classify.h>
+#include <vnet/mpls/packet.h>
+#include <vnet/handoff.h>
+#include <vnet/devices/devices.h>
+#include <vnet/feature/feature.h>
+
+#include "dpdk_priv.h"
+
+static char *dpdk_error_strings[] = {
+#define _(n,s) s,
+ foreach_dpdk_error
+#undef _
+};
+
+always_inline int
+vlib_buffer_is_ip4 (vlib_buffer_t * b)
+{
+ ethernet_header_t *h = (ethernet_header_t *) b->data;
+ return (h->type == clib_host_to_net_u16 (ETHERNET_TYPE_IP4));
+}
+
+always_inline int
+vlib_buffer_is_ip6 (vlib_buffer_t * b)
+{
+ ethernet_header_t *h = (ethernet_header_t *) b->data;
+ return (h->type == clib_host_to_net_u16 (ETHERNET_TYPE_IP6));
+}
+
+always_inline int
+vlib_buffer_is_mpls (vlib_buffer_t * b)
+{
+ ethernet_header_t *h = (ethernet_header_t *) b->data;
+ return (h->type == clib_host_to_net_u16 (ETHERNET_TYPE_MPLS_UNICAST));
+}
+
+#if RTE_VERSION < RTE_VERSION_NUM(16, 11, 0, 0)
+/* New ol_flags bits added in DPDK-16.11 */
+#define PKT_RX_IP_CKSUM_GOOD (1ULL << 7)
+#endif
+
+always_inline u32
+dpdk_rx_next_from_etype (struct rte_mbuf * mb, vlib_buffer_t * b0)
+{
+ if (PREDICT_TRUE (vlib_buffer_is_ip4 (b0)))
+ if (PREDICT_TRUE ((mb->ol_flags & PKT_RX_IP_CKSUM_GOOD) != 0))
+ return VNET_DEVICE_INPUT_NEXT_IP4_NCS_INPUT;
+ else
+ return VNET_DEVICE_INPUT_NEXT_IP4_INPUT;
+ else if (PREDICT_TRUE (vlib_buffer_is_ip6 (b0)))
+ return VNET_DEVICE_INPUT_NEXT_IP6_INPUT;
+ else if (PREDICT_TRUE (vlib_buffer_is_mpls (b0)))
+ return VNET_DEVICE_INPUT_NEXT_MPLS_INPUT;
+ else
+ return VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT;
+}
+
+always_inline int
+dpdk_mbuf_is_vlan (struct rte_mbuf *mb)
+{
+#if RTE_VERSION >= RTE_VERSION_NUM(16, 11, 0, 0)
+ return (mb->packet_type & RTE_PTYPE_L2_ETHER_VLAN) ==
+ RTE_PTYPE_L2_ETHER_VLAN;
+#else
+ return
+ (mb->ol_flags &
+ (PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED | PKT_RX_QINQ_STRIPPED)) ==
+ PKT_RX_VLAN_PKT;
+#endif
+}
+
+always_inline int
+dpdk_mbuf_is_ip4 (struct rte_mbuf *mb)
+{
+ return RTE_ETH_IS_IPV4_HDR (mb->packet_type) != 0;
+}
+
+always_inline int
+dpdk_mbuf_is_ip6 (struct rte_mbuf *mb)
+{
+ return RTE_ETH_IS_IPV6_HDR (mb->packet_type) != 0;
+}
+
+always_inline u32
+dpdk_rx_next_from_mb (struct rte_mbuf * mb, vlib_buffer_t * b0)
+{
+ if (PREDICT_FALSE (dpdk_mbuf_is_vlan (mb)))
+ return VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT;
+ else if (PREDICT_TRUE (dpdk_mbuf_is_ip4 (mb)))
+ return VNET_DEVICE_INPUT_NEXT_IP4_NCS_INPUT;
+ else if (PREDICT_TRUE (dpdk_mbuf_is_ip6 (mb)))
+ return VNET_DEVICE_INPUT_NEXT_IP6_INPUT;
+ else if (PREDICT_TRUE (vlib_buffer_is_mpls (b0)))
+ return VNET_DEVICE_INPUT_NEXT_MPLS_INPUT;
+ else
+ return dpdk_rx_next_from_etype (mb, b0);
+}
+
+always_inline void
+dpdk_rx_error_from_mb (struct rte_mbuf *mb, u32 * next, u8 * error)
+{
+ if (mb->ol_flags & PKT_RX_IP_CKSUM_BAD)
+ {
+ *error = DPDK_ERROR_IP_CHECKSUM_ERROR;
+ *next = VNET_DEVICE_INPUT_NEXT_DROP;
+ }
+ else
+ *error = DPDK_ERROR_NONE;
+}
+
+void
+dpdk_rx_trace (dpdk_main_t * dm,
+ vlib_node_runtime_t * node,
+ dpdk_device_t * xd,
+ u16 queue_id, u32 * buffers, uword n_buffers)
+{
+ vlib_main_t *vm = vlib_get_main ();
+ u32 *b, n_left;
+ u32 next0;
+
+ n_left = n_buffers;
+ b = buffers;
+
+ while (n_left >= 1)
+ {
+ u32 bi0;
+ vlib_buffer_t *b0;
+ dpdk_rx_dma_trace_t *t0;
+ struct rte_mbuf *mb;
+ u8 error0;
+
+ bi0 = b[0];
+ n_left -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ mb = rte_mbuf_from_vlib_buffer (b0);
+
+ if (PREDICT_FALSE (xd->per_interface_next_index != ~0))
+ next0 = xd->per_interface_next_index;
+ else if (PREDICT_TRUE
+ ((xd->flags & DPDK_DEVICE_FLAG_PMD_SUPPORTS_PTYPE) != 0))
+ next0 = dpdk_rx_next_from_mb (mb, b0);
+ else
+ next0 = dpdk_rx_next_from_etype (mb, b0);
+
+ dpdk_rx_error_from_mb (mb, &next0, &error0);
+
+ vlib_trace_buffer (vm, node, next0, b0, /* follow_chain */ 0);
+ t0 = vlib_add_trace (vm, node, b0, sizeof (t0[0]));
+ t0->queue_index = queue_id;
+ t0->device_index = xd->device_index;
+ t0->buffer_index = bi0;
+
+ clib_memcpy (&t0->mb, mb, sizeof (t0->mb));
+ clib_memcpy (&t0->buffer, b0, sizeof (b0[0]) - sizeof (b0->pre_data));
+ clib_memcpy (t0->buffer.pre_data, b0->data,
+ sizeof (t0->buffer.pre_data));
+ clib_memcpy (&t0->data, mb->buf_addr + mb->data_off, sizeof (t0->data));
+
+ b += 1;
+ }
+}
+
+static inline u32
+dpdk_rx_burst (dpdk_main_t * dm, dpdk_device_t * xd, u16 queue_id)
+{
+ u32 n_buffers;
+ u32 n_left;
+ u32 n_this_chunk;
+
+ n_left = VLIB_FRAME_SIZE;
+ n_buffers = 0;
+
+ if (PREDICT_TRUE (xd->flags & DPDK_DEVICE_FLAG_PMD))
+ {
+ while (n_left)
+ {
+ n_this_chunk = rte_eth_rx_burst (xd->device_index, queue_id,
+ xd->rx_vectors[queue_id] +
+ n_buffers, n_left);
+ n_buffers += n_this_chunk;
+ n_left -= n_this_chunk;
+
+ /* Empirically, DPDK r1.8 produces vectors w/ 32 or fewer elts */
+ if (n_this_chunk < 32)
+ break;
+ }
+ }
+ else
+ {
+ ASSERT (0);
+ }
+
+ return n_buffers;
+}
+
+
+static_always_inline void
+dpdk_process_subseq_segs (vlib_main_t * vm, vlib_buffer_t * b,
+ struct rte_mbuf *mb, vlib_buffer_free_list_t * fl)
+{
+ u8 nb_seg = 1;
+ struct rte_mbuf *mb_seg = 0;
+ vlib_buffer_t *b_seg, *b_chain = 0;
+ mb_seg = mb->next;
+ b_chain = b;
+
+ while ((mb->nb_segs > 1) && (nb_seg < mb->nb_segs))
+ {
+ ASSERT (mb_seg != 0);
+
+ b_seg = vlib_buffer_from_rte_mbuf (mb_seg);
+ vlib_buffer_init_for_free_list (b_seg, fl);
+
+ ASSERT ((b_seg->flags & VLIB_BUFFER_NEXT_PRESENT) == 0);
+ ASSERT (b_seg->current_data == 0);
+
+ /*
+ * The driver (e.g. virtio) may not put the packet data at the start
+ * of the segment, so don't assume b_seg->current_data == 0 is correct.
+ */
+ b_seg->current_data =
+ (mb_seg->buf_addr + mb_seg->data_off) - (void *) b_seg->data;
+
+ b_seg->current_length = mb_seg->data_len;
+ b->total_length_not_including_first_buffer += mb_seg->data_len;
+
+ b_chain->flags |= VLIB_BUFFER_NEXT_PRESENT;
+ b_chain->next_buffer = vlib_get_buffer_index (vm, b_seg);
+
+ b_chain = b_seg;
+ mb_seg = mb_seg->next;
+ nb_seg++;
+ }
+}
+
+static_always_inline void
+dpdk_prefetch_buffer (struct rte_mbuf *mb)
+{
+ vlib_buffer_t *b = vlib_buffer_from_rte_mbuf (mb);
+ CLIB_PREFETCH (mb, CLIB_CACHE_LINE_BYTES, LOAD);
+ CLIB_PREFETCH (b, CLIB_CACHE_LINE_BYTES, STORE);
+}
+
+/*
+ * This function is used when there are no worker threads.
+ * The main thread performs IO and forwards the packets.
+ */
+static_always_inline u32
+dpdk_device_input (dpdk_main_t * dm, dpdk_device_t * xd,
+ vlib_node_runtime_t * node, u32 cpu_index, u16 queue_id)
+{
+ u32 n_buffers;
+ u32 next_index = VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT;
+ u32 n_left_to_next, *to_next;
+ u32 mb_index;
+ vlib_main_t *vm = vlib_get_main ();
+ uword n_rx_bytes = 0;
+ u32 n_trace, trace_cnt __attribute__ ((unused));
+ vlib_buffer_free_list_t *fl;
+ u32 buffer_flags_template;
+
+ if ((xd->flags & DPDK_DEVICE_FLAG_ADMIN_UP) == 0)
+ return 0;
+
+ n_buffers = dpdk_rx_burst (dm, xd, queue_id);
+
+ if (n_buffers == 0)
+ {
+ return 0;
+ }
+
+ buffer_flags_template = dm->buffer_flags_template;
+
+ vec_reset_length (xd->d_trace_buffers[cpu_index]);
+ trace_cnt = n_trace = vlib_get_trace_count (vm, node);
+
+ if (n_trace > 0)
+ {
+ u32 n = clib_min (n_trace, n_buffers);
+ mb_index = 0;
+
+ while (n--)
+ {
+ struct rte_mbuf *mb = xd->rx_vectors[queue_id][mb_index++];
+ vlib_buffer_t *b = vlib_buffer_from_rte_mbuf (mb);
+ vec_add1 (xd->d_trace_buffers[cpu_index],
+ vlib_get_buffer_index (vm, b));
+ }
+ }
+
+ fl = vlib_buffer_get_free_list (vm, VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX);
+
+ mb_index = 0;
+
+ while (n_buffers > 0)
+ {
+ vlib_buffer_t *b0, *b1, *b2, *b3;
+ u32 bi0, next0, l3_offset0;
+ u32 bi1, next1, l3_offset1;
+ u32 bi2, next2, l3_offset2;
+ u32 bi3, next3, l3_offset3;
+ u8 error0, error1, error2, error3;
+ u64 or_ol_flags;
+
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_buffers > 8 && n_left_to_next > 4)
+ {
+ struct rte_mbuf *mb0 = xd->rx_vectors[queue_id][mb_index];
+ struct rte_mbuf *mb1 = xd->rx_vectors[queue_id][mb_index + 1];
+ struct rte_mbuf *mb2 = xd->rx_vectors[queue_id][mb_index + 2];
+ struct rte_mbuf *mb3 = xd->rx_vectors[queue_id][mb_index + 3];
+
+ dpdk_prefetch_buffer (xd->rx_vectors[queue_id][mb_index + 4]);
+ dpdk_prefetch_buffer (xd->rx_vectors[queue_id][mb_index + 5]);
+ dpdk_prefetch_buffer (xd->rx_vectors[queue_id][mb_index + 6]);
+ dpdk_prefetch_buffer (xd->rx_vectors[queue_id][mb_index + 7]);
+
+ if (xd->flags & DPDK_DEVICE_FLAG_MAYBE_MULTISEG)
+ {
+ if (PREDICT_FALSE (mb0->nb_segs > 1))
+ dpdk_prefetch_buffer (mb0->next);
+ if (PREDICT_FALSE (mb1->nb_segs > 1))
+ dpdk_prefetch_buffer (mb1->next);
+ if (PREDICT_FALSE (mb2->nb_segs > 1))
+ dpdk_prefetch_buffer (mb2->next);
+ if (PREDICT_FALSE (mb3->nb_segs > 1))
+ dpdk_prefetch_buffer (mb3->next);
+ }
+
+ ASSERT (mb0);
+ ASSERT (mb1);
+ ASSERT (mb2);
+ ASSERT (mb3);
+
+ or_ol_flags = (mb0->ol_flags | mb1->ol_flags |
+ mb2->ol_flags | mb3->ol_flags);
+ b0 = vlib_buffer_from_rte_mbuf (mb0);
+ b1 = vlib_buffer_from_rte_mbuf (mb1);
+ b2 = vlib_buffer_from_rte_mbuf (mb2);
+ b3 = vlib_buffer_from_rte_mbuf (mb3);
+
+ vlib_buffer_init_for_free_list (b0, fl);
+ vlib_buffer_init_for_free_list (b1, fl);
+ vlib_buffer_init_for_free_list (b2, fl);
+ vlib_buffer_init_for_free_list (b3, fl);
+
+ bi0 = vlib_get_buffer_index (vm, b0);
+ bi1 = vlib_get_buffer_index (vm, b1);
+ bi2 = vlib_get_buffer_index (vm, b2);
+ bi3 = vlib_get_buffer_index (vm, b3);
+
+ to_next[0] = bi0;
+ to_next[1] = bi1;
+ to_next[2] = bi2;
+ to_next[3] = bi3;
+ to_next += 4;
+ n_left_to_next -= 4;
+
+ if (PREDICT_FALSE (xd->per_interface_next_index != ~0))
+ {
+ next0 = next1 = next2 = next3 = xd->per_interface_next_index;
+ }
+ else if (PREDICT_TRUE
+ ((xd->flags & DPDK_DEVICE_FLAG_PMD_SUPPORTS_PTYPE) != 0))
+ {
+ next0 = dpdk_rx_next_from_mb (mb0, b0);
+ next1 = dpdk_rx_next_from_mb (mb1, b1);
+ next2 = dpdk_rx_next_from_mb (mb2, b2);
+ next3 = dpdk_rx_next_from_mb (mb3, b3);
+ }
+ else
+ {
+ next0 = dpdk_rx_next_from_etype (mb0, b0);
+ next1 = dpdk_rx_next_from_etype (mb1, b1);
+ next2 = dpdk_rx_next_from_etype (mb2, b2);
+ next3 = dpdk_rx_next_from_etype (mb3, b3);
+ }
+
+ if (PREDICT_FALSE (or_ol_flags & PKT_RX_IP_CKSUM_BAD))
+ {
+ dpdk_rx_error_from_mb (mb0, &next0, &error0);
+ dpdk_rx_error_from_mb (mb1, &next1, &error1);
+ dpdk_rx_error_from_mb (mb2, &next2, &error2);
+ dpdk_rx_error_from_mb (mb3, &next3, &error3);
+ b0->error = node->errors[error0];
+ b1->error = node->errors[error1];
+ b2->error = node->errors[error2];
+ b3->error = node->errors[error3];
+ }
+ else
+ {
+ b0->error = b1->error = node->errors[DPDK_ERROR_NONE];
+ b2->error = b3->error = node->errors[DPDK_ERROR_NONE];
+ }
+
+ l3_offset0 = device_input_next_node_advance[next0];
+ l3_offset1 = device_input_next_node_advance[next1];
+ l3_offset2 = device_input_next_node_advance[next2];
+ l3_offset3 = device_input_next_node_advance[next3];
+
+ b0->current_data = l3_offset0 + mb0->data_off;
+ b1->current_data = l3_offset1 + mb1->data_off;
+ b2->current_data = l3_offset2 + mb2->data_off;
+ b3->current_data = l3_offset3 + mb3->data_off;
+
+ b0->current_data -= RTE_PKTMBUF_HEADROOM;
+ b1->current_data -= RTE_PKTMBUF_HEADROOM;
+ b2->current_data -= RTE_PKTMBUF_HEADROOM;
+ b3->current_data -= RTE_PKTMBUF_HEADROOM;
+
+ b0->current_length = mb0->data_len - l3_offset0;
+ b1->current_length = mb1->data_len - l3_offset1;
+ b2->current_length = mb2->data_len - l3_offset2;
+ b3->current_length = mb3->data_len - l3_offset3;
+
+ b0->flags = buffer_flags_template;
+ b1->flags = buffer_flags_template;
+ b2->flags = buffer_flags_template;
+ b3->flags = buffer_flags_template;
+
+ vnet_buffer (b0)->sw_if_index[VLIB_RX] = xd->vlib_sw_if_index;
+ vnet_buffer (b1)->sw_if_index[VLIB_RX] = xd->vlib_sw_if_index;
+ vnet_buffer (b2)->sw_if_index[VLIB_RX] = xd->vlib_sw_if_index;
+ vnet_buffer (b3)->sw_if_index[VLIB_RX] = xd->vlib_sw_if_index;
+
+ vnet_buffer (b0)->sw_if_index[VLIB_TX] = (u32) ~ 0;
+ vnet_buffer (b1)->sw_if_index[VLIB_TX] = (u32) ~ 0;
+ vnet_buffer (b2)->sw_if_index[VLIB_TX] = (u32) ~ 0;
+ vnet_buffer (b3)->sw_if_index[VLIB_TX] = (u32) ~ 0;
+
+ n_rx_bytes += mb0->pkt_len;
+ n_rx_bytes += mb1->pkt_len;
+ n_rx_bytes += mb2->pkt_len;
+ n_rx_bytes += mb3->pkt_len;
+
+ /* Process subsequent segments of multi-segment packets */
+ if (xd->flags & DPDK_DEVICE_FLAG_MAYBE_MULTISEG)
+ {
+ dpdk_process_subseq_segs (vm, b0, mb0, fl);
+ dpdk_process_subseq_segs (vm, b1, mb1, fl);
+ dpdk_process_subseq_segs (vm, b2, mb2, fl);
+ dpdk_process_subseq_segs (vm, b3, mb3, fl);
+ }
+
+ /*
+ * Turn this on if you run into
+ * "bad monkey" contexts, and you want to know exactly
+ * which nodes they've visited... See main.c...
+ */
+ VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b0);
+ VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b1);
+ VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b2);
+ VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b3);
+
+ /* Do we have any driver RX features configured on the interface? */
+ vnet_feature_start_device_input_x4 (xd->vlib_sw_if_index,
+ &next0, &next1, &next2, &next3,
+ b0, b1, b2, b3,
+ l3_offset0, l3_offset1,
+ l3_offset2, l3_offset3);
+
+ vlib_validate_buffer_enqueue_x4 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, bi1, bi2, bi3,
+ next0, next1, next2, next3);
+ n_buffers -= 4;
+ mb_index += 4;
+ }
+ while (n_buffers > 0 && n_left_to_next > 0)
+ {
+ struct rte_mbuf *mb0 = xd->rx_vectors[queue_id][mb_index];
+
+ ASSERT (mb0);
+
+ b0 = vlib_buffer_from_rte_mbuf (mb0);
+
+ /* Prefetch one next segment if it exists. */
+ if (PREDICT_FALSE (mb0->nb_segs > 1))
+ dpdk_prefetch_buffer (mb0->next);
+
+ vlib_buffer_init_for_free_list (b0, fl);
+
+ bi0 = vlib_get_buffer_index (vm, b0);
+
+ to_next[0] = bi0;
+ to_next++;
+ n_left_to_next--;
+
+ if (PREDICT_FALSE (xd->per_interface_next_index != ~0))
+ next0 = xd->per_interface_next_index;
+ else if (PREDICT_TRUE
+ ((xd->flags & DPDK_DEVICE_FLAG_PMD_SUPPORTS_PTYPE) != 0))
+ next0 = dpdk_rx_next_from_mb (mb0, b0);
+ else
+ next0 = dpdk_rx_next_from_etype (mb0, b0);
+
+ dpdk_rx_error_from_mb (mb0, &next0, &error0);
+ b0->error = node->errors[error0];
+
+ l3_offset0 = device_input_next_node_advance[next0];
+
+ b0->current_data = l3_offset0;
+ b0->current_data += mb0->data_off - RTE_PKTMBUF_HEADROOM;
+ b0->current_length = mb0->data_len - l3_offset0;
+
+ b0->flags = buffer_flags_template;
+
+ vnet_buffer (b0)->sw_if_index[VLIB_RX] = xd->vlib_sw_if_index;
+ vnet_buffer (b0)->sw_if_index[VLIB_TX] = (u32) ~ 0;
+ n_rx_bytes += mb0->pkt_len;
+
+ /* Process subsequent segments of multi-segment packets */
+ dpdk_process_subseq_segs (vm, b0, mb0, fl);
+
+ /*
+ * Turn this on if you run into
+ * "bad monkey" contexts, and you want to know exactly
+ * which nodes they've visited... See main.c...
+ */
+ VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b0);
+
+ /* Do we have any driver RX features configured on the interface? */
+ vnet_feature_start_device_input_x1 (xd->vlib_sw_if_index, &next0,
+ b0, l3_offset0);
+
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, next0);
+ n_buffers--;
+ mb_index++;
+ }
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ if (PREDICT_FALSE (vec_len (xd->d_trace_buffers[cpu_index]) > 0))
+ {
+ dpdk_rx_trace (dm, node, xd, queue_id, xd->d_trace_buffers[cpu_index],
+ vec_len (xd->d_trace_buffers[cpu_index]));
+ vlib_set_trace_count (vm, node, n_trace -
+ vec_len (xd->d_trace_buffers[cpu_index]));
+ }
+
+ vlib_increment_combined_counter
+ (vnet_get_main ()->interface_main.combined_sw_if_counters
+ + VNET_INTERFACE_COUNTER_RX,
+ cpu_index, xd->vlib_sw_if_index, mb_index, n_rx_bytes);
+
+ dpdk_worker_t *dw = vec_elt_at_index (dm->workers, cpu_index);
+ dw->aggregate_rx_packets += mb_index;
+
+ return mb_index;
+}
+
+static inline void
+poll_rate_limit (dpdk_main_t * dm)
+{
+ /* Limit the poll rate by sleeping for N msec between polls */
+ if (PREDICT_FALSE (dm->poll_sleep != 0))
+ {
+ struct timespec ts, tsrem;
+
+ ts.tv_sec = 0;
+ ts.tv_nsec = 1000 * 1000 * dm->poll_sleep; /* 1ms */
+
+ while (nanosleep (&ts, &tsrem) < 0)
+ {
+ ts = tsrem;
+ }
+ }
+}
+
+/** \brief Main DPDK input node
+ @node dpdk-input
+
+ This is the main DPDK input node: across each assigned interface,
+ call rte_eth_rx_burst(...) or similar to obtain a vector of
+ packets to process. Handle early packet discard. Derive @c
+ vlib_buffer_t metadata from <code>struct rte_mbuf</code> metadata,
+ Depending on the resulting metadata: adjust <code>b->current_data,
+ b->current_length </code> and dispatch directly to
+ ip4-input-no-checksum, or ip6-input. Trace the packet if required.
+
+ @param vm vlib_main_t corresponding to the current thread
+ @param node vlib_node_runtime_t
+ @param f vlib_frame_t input-node, not used.
+
+ @par Graph mechanics: buffer metadata, next index usage
+
+ @em Uses:
+ - <code>struct rte_mbuf mb->ol_flags</code>
+ - PKT_RX_IP_CKSUM_BAD
+ - <code> RTE_ETH_IS_xxx_HDR(mb->packet_type) </code>
+ - packet classification result
+
+ @em Sets:
+ - <code>b->error</code> if the packet is to be dropped immediately
+ - <code>b->current_data, b->current_length</code>
+ - adjusted as needed to skip the L2 header in direct-dispatch cases
+ - <code>vnet_buffer(b)->sw_if_index[VLIB_RX]</code>
+ - rx interface sw_if_index
+ - <code>vnet_buffer(b)->sw_if_index[VLIB_TX] = ~0</code>
+ - required by ipX-lookup
+ - <code>b->flags</code>
+ - to indicate multi-segment pkts (VLIB_BUFFER_NEXT_PRESENT), etc.
+
+ <em>Next Nodes:</em>
+ - Static arcs to: error-drop, ethernet-input,
+ ip4-input-no-checksum, ip6-input, mpls-input
+ - per-interface redirection, controlled by
+ <code>xd->per_interface_next_index</code>
+*/
+
+static uword
+dpdk_input (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * f)
+{
+ dpdk_main_t *dm = &dpdk_main;
+ dpdk_device_t *xd;
+ uword n_rx_packets = 0;
+ dpdk_device_and_queue_t *dq;
+ u32 cpu_index = os_get_cpu_number ();
+
+ /*
+ * Poll all devices on this cpu for input/interrupts.
+ */
+ /* *INDENT-OFF* */
+ vec_foreach (dq, dm->devices_by_cpu[cpu_index])
+ {
+ xd = vec_elt_at_index(dm->devices, dq->device);
+ n_rx_packets += dpdk_device_input (dm, xd, node, cpu_index, dq->queue_id);
+ }
+ /* *INDENT-ON* */
+
+ poll_rate_limit (dm);
+
+ return n_rx_packets;
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (dpdk_input_node) = {
+ .function = dpdk_input,
+ .type = VLIB_NODE_TYPE_INPUT,
+ .name = "dpdk-input",
+ .sibling_of = "device-input",
+
+ /* Will be enabled if/when hardware is detected. */
+ .state = VLIB_NODE_STATE_DISABLED,
+
+ .format_buffer = format_ethernet_header_with_length,
+ .format_trace = format_dpdk_rx_dma_trace,
+
+ .n_errors = DPDK_N_ERROR,
+ .error_strings = dpdk_error_strings,
+};
+
+VLIB_NODE_FUNCTION_MULTIARCH (dpdk_input_node, dpdk_input);
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/devices/dpdk/qos_doc.md b/src/vnet/devices/dpdk/qos_doc.md
new file mode 100644
index 00000000000..9bd0659d616
--- /dev/null
+++ b/src/vnet/devices/dpdk/qos_doc.md
@@ -0,0 +1,404 @@
+# QoS Hierarchical Scheduler {#qos_doc}
+
+The Quality-of-Service (QoS) scheduler performs egress-traffic management by
+prioritizing the transmission of the packets of different type services and
+subcribers based on the Service Level Agreements (SLAs). The QoS scheduler can
+be enabled on one or more NIC output interfaces depending upon the
+requirement.
+
+
+## Overview
+
+The QoS schdeuler supports a number of scheduling and shaping levels which
+construct hierarchical-tree. The first level in the hierarchy is port (i.e.
+the physical interface) that constitutes the root node of the tree. The
+subsequent level is subport which represents the group of the
+users/subscribers. The individual user/subscriber is represented by the pipe
+at the next level. Each user can have different traffic type based on the
+criteria of specific loss rate, jitter, and latency. These traffic types are
+represented at the traffic-class level in the form of different traffic-
+classes. The last level contains number of queues which are grouped together
+to host the packets of the specific class type traffic.
+
+The QoS scheduler implementation requires flow classification, enqueue and
+dequeue operations. The flow classification is mandatory stage for HQoS where
+incoming packets are classified by mapping the packet fields information to
+5-tuple (HQoS subport, pipe, traffic class, queue within traffic class, and
+color) and storing that information in mbuf sched field. The enqueue operation
+uses this information to determine the queue for storing the packet, and at
+this stage, if the specific queue is full, QoS drops the packet. The dequeue
+operation consists of scheduling the packet based on its length and available
+credits, and handing over the scheduled packet to the output interface.
+
+For more information on QoS Scheduler, please refer DPDK Programmer's Guide-
+http://dpdk.org/doc/guides/prog_guide/qos_framework.html
+
+
+### QoS Schdeuler Parameters
+
+Following illustrates the default HQoS configuration for each 10GbE output
+port:
+
+Single subport (subport 0):
+ - Subport rate set to 100% of port rate
+ - Each of the 4 traffic classes has rate set to 100% of port rate
+
+4K pipes per subport 0 (pipes 0 .. 4095) with identical configuration:
+ - Pipe rate set to 1/4K of port rate
+ - Each of the 4 traffic classes has rate set to 100% of pipe rate
+ - Within each traffic class, the byte-level WRR weights for the 4 queues are set to 1:1:1:1
+
+
+#### Port configuration
+
+```
+port {
+ rate 1250000000 /* Assuming 10GbE port */
+ frame_overhead 24 /* Overhead fields per Ethernet frame:
+ * 7B (Preamble) +
+ * 1B (Start of Frame Delimiter (SFD)) +
+ * 4B (Frame Check Sequence (FCS)) +
+ * 12B (Inter Frame Gap (IFG))
+ */
+ mtu 1522 /* Assuming Ethernet/IPv4 pkt (FCS not included) */
+ n_subports_per_port 1 /* Number of subports per output interface */
+ n_pipes_per_subport 4096 /* Number of pipes (users/subscribers) */
+ queue_sizes 64 64 64 64 /* Packet queue size for each traffic class.
+ * All queues within the same pipe traffic class
+ * have the same size. Queues from different
+ * pipes serving the same traffic class have
+ * the same size. */
+}
+```
+
+
+#### Subport configuration
+
+```
+subport 0 {
+ tb_rate 1250000000 /* Subport level token bucket rate (bytes per second) */
+ tb_size 1000000 /* Subport level token bucket size (bytes) */
+ tc0_rate 1250000000 /* Subport level token bucket rate for traffic class 0 (bytes per second) */
+ tc1_rate 1250000000 /* Subport level token bucket rate for traffic class 1 (bytes per second) */
+ tc2_rate 1250000000 /* Subport level token bucket rate for traffic class 2 (bytes per second) */
+ tc3_rate 1250000000 /* Subport level token bucket rate for traffic class 3 (bytes per second) */
+ tc_period 10 /* Time interval for refilling the token bucket associated with traffic class (Milliseconds) */
+ pipe 0 4095 profile 0 /* pipes (users/subscribers) configured with pipe profile 0 */
+}
+```
+
+
+#### Pipe configuration
+
+```
+pipe_profile 0 {
+ tb_rate 305175 /* Pipe level token bucket rate (bytes per second) */
+ tb_size 1000000 /* Pipe level token bucket size (bytes) */
+ tc0_rate 305175 /* Pipe level token bucket rate for traffic class 0 (bytes per second) */
+ tc1_rate 305175 /* Pipe level token bucket rate for traffic class 1 (bytes per second) */
+ tc2_rate 305175 /* Pipe level token bucket rate for traffic class 2 (bytes per second) */
+ tc3_rate 305175 /* Pipe level token bucket rate for traffic class 3 (bytes per second) */
+ tc_period 40 /* Time interval for refilling the token bucket associated with traffic class at pipe level (Milliseconds) */
+ tc3_oversubscription_weight 1 /* Weight traffic class 3 oversubscription */
+ tc0_wrr_weights 1 1 1 1 /* Pipe queues WRR weights for traffic class 0 */
+ tc1_wrr_weights 1 1 1 1 /* Pipe queues WRR weights for traffic class 1 */
+ tc2_wrr_weights 1 1 1 1 /* Pipe queues WRR weights for traffic class 2 */
+ tc3_wrr_weights 1 1 1 1 /* Pipe queues WRR weights for traffic class 3 */
+}
+```
+
+
+#### Random Early Detection (RED) parameters per traffic class and color (Green / Yellow / Red)
+
+```
+red {
+ tc0_wred_min 48 40 32 /* Minimum threshold for traffic class 0 queue (min_th) in number of packets */
+ tc0_wred_max 64 64 64 /* Maximum threshold for traffic class 0 queue (max_th) in number of packets */
+ tc0_wred_inv_prob 10 10 10 /* Inverse of packet marking probability for traffic class 0 queue (maxp = 1 / maxp_inv) */
+ tc0_wred_weight 9 9 9 /* Traffic Class 0 queue weight */
+ tc1_wred_min 48 40 32 /* Minimum threshold for traffic class 1 queue (min_th) in number of packets */
+ tc1_wred_max 64 64 64 /* Maximum threshold for traffic class 1 queue (max_th) in number of packets */
+ tc1_wred_inv_prob 10 10 10 /* Inverse of packet marking probability for traffic class 1 queue (maxp = 1 / maxp_inv) */
+ tc1_wred_weight 9 9 9 /* Traffic Class 1 queue weight */
+ tc2_wred_min 48 40 32 /* Minimum threshold for traffic class 2 queue (min_th) in number of packets */
+ tc2_wred_max 64 64 64 /* Maximum threshold for traffic class 2 queue (max_th) in number of packets */
+ tc2_wred_inv_prob 10 10 10 /* Inverse of packet marking probability for traffic class 2 queue (maxp = 1 / maxp_inv) */
+ tc2_wred_weight 9 9 9 /* Traffic Class 2 queue weight */
+ tc3_wred_min 48 40 32 /* Minimum threshold for traffic class 3 queue (min_th) in number of packets */
+ tc3_wred_max 64 64 64 /* Maximum threshold for traffic class 3 queue (max_th) in number of packets */
+ tc3_wred_inv_prob 10 10 10 /* Inverse of packet marking probability for traffic class 3 queue (maxp = 1 / maxp_inv) */
+ tc3_wred_weight 9 9 9 /* Traffic Class 3 queue weight */
+}
+```
+
+
+### DPDK QoS Scheduler Integration in VPP
+
+The Hierarchical Quaity-of-Service (HQoS) scheduler object could be seen as
+part of the logical NIC output interface. To enable HQoS on specific output
+interface, vpp startup.conf file has to be configured accordingly. The output
+interface that requires HQoS, should have "hqos" parameter specified in dpdk
+section. Another optional parameter "hqos-thread" has been defined which can
+be used to associate the output interface with specific hqos thread. In cpu
+section of the config file, "corelist-hqos-threads" is introduced to assign
+logical cpu cores to run the HQoS threads. A HQoS thread can run multiple HQoS
+objects each associated with different output interfaces. All worker threads
+instead of writing packets to NIC TX queue directly, write the packets to a
+software queues. The hqos_threads read the software queues, and enqueue the
+packets to HQoS objects, as well as dequeue packets from HQOS objects and
+write them to NIC output interfaces. The worker threads need to be able to
+send the packets to any output interface, therefore, each HQoS object
+associated with NIC output interface should have software queues equal to
+worker threads count.
+
+Following illustrates the sample startup configuration file with 4x worker
+threads feeding 2x hqos threads that handle each QoS scheduler for 1x output
+interface.
+
+```
+dpdk {
+ socket-mem 16384,16384
+
+ dev 0000:02:00.0 {
+ num-rx-queues 2
+ hqos
+ }
+ dev 0000:06:00.0 {
+ num-rx-queues 2
+ hqos
+ }
+
+ num-mbufs 1000000
+}
+
+cpu {
+ main-core 0
+ corelist-workers 1, 2, 3, 4
+ corelist-hqos-threads 5, 6
+}
+```
+
+
+### QoS scheduler CLI Commands
+
+Each QoS scheduler instance is initialised with default parameters required to
+configure hqos port, subport, pipe and queues. Some of the parameters can be
+re-configured in run-time through CLI commands.
+
+
+#### Configuration
+
+Following commands can be used to configure QoS scheduler parameters.
+
+The command below can be used to set the subport level parameters such as
+token bucket rate (bytes per seconds), token bucket size (bytes), traffic
+class rates (bytes per seconds) and token update period (Milliseconds).
+
+```
+set dpdk interface hqos subport <if-name> subport <n> [rate <n>]
+ [bktsize <n>] [tc0 <n>] [tc1 <n>] [tc2 <n>] [tc3 <n>] [period <n>]
+```
+
+For setting the pipe profile, following command can be used.
+
+```
+set dpdk interface hqos pipe <if-name> subport <n> pipe <n> profile <n>
+```
+
+To assign QoS scheduler instance to the specific thread, following command can
+be used.
+
+```
+set dpdk interface hqos placement <if-name> thread <n>
+```
+
+The command below is used to set the packet fields required for classifiying
+the incoming packet. As a result of classification process, packet field
+information will be mapped to 5 tuples (subport, pipe, traffic class, pipe,
+color) and stored in packet mbuf.
+
+```
+set dpdk interface hqos pktfield <if-name> id <n> offset <n> mask <n>
+```
+
+The DSCP table entries used for idenfiying the traffic class and queue can be set using the command below;
+
+```
+set dpdk interface hqos tctbl <if-name> entry <n> tc <n> queue <n>
+```
+
+
+#### Show Command
+
+The QoS Scheduler configuration can displayed using the command below.
+
+```
+ vpp# show dpdk interface hqos TenGigabitEthernet2/0/0
+ Thread:
+ Input SWQ size = 4096 packets
+ Enqueue burst size = 256 packets
+ Dequeue burst size = 220 packets
+ Packet field 0: slab position = 0, slab bitmask = 0x0000000000000000
+ Packet field 1: slab position = 40, slab bitmask = 0x0000000fff000000
+ Packet field 2: slab position = 8, slab bitmask = 0x00000000000000fc
+ Packet field 2 translation table:
+ [ 0 .. 15]: 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
+ [16 .. 31]: 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
+ [32 .. 47]: 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
+ [48 .. 63]: 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
+ Port:
+ Rate = 1250000000 bytes/second
+ MTU = 1514 bytes
+ Frame overhead = 24 bytes
+ Number of subports = 1
+ Number of pipes per subport = 4096
+ Packet queue size: TC0 = 64, TC1 = 64, TC2 = 64, TC3 = 64 packets
+ Number of pipe profiles = 1
+ Pipe profile 0:
+ Rate = 305175 bytes/second
+ Token bucket size = 1000000 bytes
+ Traffic class rate: TC0 = 305175, TC1 = 305175, TC2 = 305175, TC3 = 305175 bytes/second
+ TC period = 40 milliseconds
+ TC0 WRR weights: Q0 = 1, Q1 = 1, Q2 = 1, Q3 = 1
+ TC1 WRR weights: Q0 = 1, Q1 = 1, Q2 = 1, Q3 = 1
+ TC2 WRR weights: Q0 = 1, Q1 = 1, Q2 = 1, Q3 = 1
+ TC3 WRR weights: Q0 = 1, Q1 = 1, Q2 = 1, Q3 = 1
+```
+
+The QoS Scheduler placement over the logical cpu cores can be displayed using
+below command.
+
+```
+ vpp# show dpdk interface hqos placement
+ Thread 5 (vpp_hqos-threads_0 at lcore 5):
+ TenGigabitEthernet2/0/0 queue 0
+ Thread 6 (vpp_hqos-threads_1 at lcore 6):
+ TenGigabitEthernet4/0/1 queue 0
+```
+
+
+### QoS Scheduler Binary APIs
+
+This section explans the available binary APIs for configuring QoS scheduler
+parameters in run-time.
+
+The following API can be used to set the pipe profile of a pipe that belongs
+to a given subport:
+
+```
+sw_interface_set_dpdk_hqos_pipe rx <intfc> | sw_if_index <id>
+ subport <subport-id> pipe <pipe-id> profile <profile-id>
+```
+
+The data structures used for set the pipe profile parameter are as follows;
+
+```
+ /** \\brief DPDK interface HQoS pipe profile set request
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param sw_if_index - the interface
+ @param subport - subport ID
+ @param pipe - pipe ID within its subport
+ @param profile - pipe profile ID
+ */
+ define sw_interface_set_dpdk_hqos_pipe {
+ u32 client_index;
+ u32 context;
+ u32 sw_if_index;
+ u32 subport;
+ u32 pipe;
+ u32 profile;
+ };
+
+ /** \\brief DPDK interface HQoS pipe profile set reply
+ @param context - sender context, to match reply w/ request
+ @param retval - request return code
+ */
+ define sw_interface_set_dpdk_hqos_pipe_reply {
+ u32 context;
+ i32 retval;
+ };
+```
+
+The following API can be used to set the subport level parameters, for
+example- token bucket rate (bytes per seconds), token bucket size (bytes),
+traffic class rate (bytes per seconds) and tokens update period.
+
+```
+sw_interface_set_dpdk_hqos_subport rx <intfc> | sw_if_index <id>
+ subport <subport-id> [rate <n>] [bktsize <n>]
+ [tc0 <n>] [tc1 <n>] [tc2 <n>] [tc3 <n>] [period <n>]
+```
+
+The data structures used for set the subport level parameter are as follows;
+
+```
+ /** \\brief DPDK interface HQoS subport parameters set request
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param sw_if_index - the interface
+ @param subport - subport ID
+ @param tb_rate - subport token bucket rate (measured in bytes/second)
+ @param tb_size - subport token bucket size (measured in credits)
+ @param tc_rate - subport traffic class 0 .. 3 rates (measured in bytes/second)
+ @param tc_period - enforcement period for rates (measured in milliseconds)
+ */
+ define sw_interface_set_dpdk_hqos_subport {
+ u32 client_index;
+ u32 context;
+ u32 sw_if_index;
+ u32 subport;
+ u32 tb_rate;
+ u32 tb_size;
+ u32 tc_rate[4];
+ u32 tc_period;
+ };
+
+ /** \\brief DPDK interface HQoS subport parameters set reply
+ @param context - sender context, to match reply w/ request
+ @param retval - request return code
+ */
+ define sw_interface_set_dpdk_hqos_subport_reply {
+ u32 context;
+ i32 retval;
+ };
+```
+
+The following API can be used set the DSCP table entry. The DSCP table have
+64 entries to map the packet DSCP field onto traffic class and hqos input
+queue.
+
+```
+sw_interface_set_dpdk_hqos_tctbl rx <intfc> | sw_if_index <id>
+ entry <n> tc <n> queue <n>
+```
+
+The data structures used for setting DSCP table entries are given below.
+
+```
+ /** \\brief DPDK interface HQoS tctbl entry set request
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param sw_if_index - the interface
+ @param entry - entry index ID
+ @param tc - traffic class (0 .. 3)
+ @param queue - traffic class queue (0 .. 3)
+ */
+ define sw_interface_set_dpdk_hqos_tctbl {
+ u32 client_index;
+ u32 context;
+ u32 sw_if_index;
+ u32 entry;
+ u32 tc;
+ u32 queue;
+ };
+
+ /** \\brief DPDK interface HQoS tctbl entry set reply
+ @param context - sender context, to match reply w/ request
+ @param retval - request return code
+ */
+ define sw_interface_set_dpdk_hqos_tctbl_reply {
+ u32 context;
+ i32 retval;
+ };
+```
diff --git a/src/vnet/devices/netmap/cli.c b/src/vnet/devices/netmap/cli.c
new file mode 100644
index 00000000000..68695250506
--- /dev/null
+++ b/src/vnet/devices/netmap/cli.c
@@ -0,0 +1,146 @@
+/*
+ *------------------------------------------------------------------
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+#include <stdint.h>
+#include <net/if.h>
+#include <sys/ioctl.h>
+
+#include <vlib/vlib.h>
+#include <vlib/unix/unix.h>
+#include <vnet/ethernet/ethernet.h>
+
+#include <vnet/devices/netmap/net_netmap.h>
+#include <vnet/devices/netmap/netmap.h>
+
+static clib_error_t *
+netmap_create_command_fn (vlib_main_t * vm, unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ unformat_input_t _line_input, *line_input = &_line_input;
+ u8 *host_if_name = NULL;
+ u8 hwaddr[6];
+ u8 *hw_addr_ptr = 0;
+ int r;
+ u8 is_pipe = 0;
+ u8 is_master = 0;
+ u32 sw_if_index = ~0;
+
+ /* Get a line of input. */
+ if (!unformat_user (input, unformat_line_input, line_input))
+ return 0;
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "name %s", &host_if_name))
+ ;
+ else
+ if (unformat
+ (line_input, "hw-addr %U", unformat_ethernet_address, hwaddr))
+ hw_addr_ptr = hwaddr;
+ else if (unformat (line_input, "pipe"))
+ is_pipe = 1;
+ else if (unformat (line_input, "master"))
+ is_master = 1;
+ else if (unformat (line_input, "slave"))
+ is_master = 0;
+ else
+ return clib_error_return (0, "unknown input `%U'",
+ format_unformat_error, input);
+ }
+ unformat_free (line_input);
+
+ if (host_if_name == NULL)
+ return clib_error_return (0, "missing host interface name");
+
+ r =
+ netmap_create_if (vm, host_if_name, hw_addr_ptr, is_pipe, is_master,
+ &sw_if_index);
+
+ if (r == VNET_API_ERROR_SYSCALL_ERROR_1)
+ return clib_error_return (0, "%s (errno %d)", strerror (errno), errno);
+
+ if (r == VNET_API_ERROR_INVALID_INTERFACE)
+ return clib_error_return (0, "Invalid interface name");
+
+ if (r == VNET_API_ERROR_SUBIF_ALREADY_EXISTS)
+ return clib_error_return (0, "Interface already exists");
+
+ vlib_cli_output (vm, "%U\n", format_vnet_sw_if_index_name, vnet_get_main (),
+ sw_if_index);
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (netmap_create_command, static) = {
+ .path = "create netmap",
+ .short_help = "create netmap name [<intf name>|valeXXX:YYY] "
+ "[hw-addr <mac>] [pipe] [master|slave]",
+ .function = netmap_create_command_fn,
+};
+/* *INDENT-ON* */
+
+static clib_error_t *
+netmap_delete_command_fn (vlib_main_t * vm, unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ unformat_input_t _line_input, *line_input = &_line_input;
+ u8 *host_if_name = NULL;
+
+ /* Get a line of input. */
+ if (!unformat_user (input, unformat_line_input, line_input))
+ return 0;
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "name %s", &host_if_name))
+ ;
+ else
+ return clib_error_return (0, "unknown input `%U'",
+ format_unformat_error, input);
+ }
+ unformat_free (line_input);
+
+ if (host_if_name == NULL)
+ return clib_error_return (0, "missing host interface name");
+
+ netmap_delete_if (vm, host_if_name);
+
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (netmap_delete_command, static) = {
+ .path = "delete netmap",
+ .short_help = "delete netmap name <interface name>",
+ .function = netmap_delete_command_fn,
+};
+/* *INDENT-ON* */
+
+clib_error_t *
+netmap_cli_init (vlib_main_t * vm)
+{
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (netmap_cli_init);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/devices/netmap/device.c b/src/vnet/devices/netmap/device.c
new file mode 100644
index 00000000000..2152824f733
--- /dev/null
+++ b/src/vnet/devices/netmap/device.c
@@ -0,0 +1,261 @@
+/*
+ *------------------------------------------------------------------
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+
+#include <stdint.h>
+#include <net/if.h>
+#include <sys/ioctl.h>
+
+#include <vlib/vlib.h>
+#include <vlib/unix/unix.h>
+#include <vnet/ethernet/ethernet.h>
+
+#include <vnet/devices/netmap/net_netmap.h>
+#include <vnet/devices/netmap/netmap.h>
+
+#define foreach_netmap_tx_func_error \
+_(NO_FREE_SLOTS, "no free tx slots") \
+_(PENDING_MSGS, "pending msgs in tx ring")
+
+typedef enum
+{
+#define _(f,s) NETMAP_TX_ERROR_##f,
+ foreach_netmap_tx_func_error
+#undef _
+ NETMAP_TX_N_ERROR,
+} netmap_tx_func_error_t;
+
+static char *netmap_tx_func_error_strings[] = {
+#define _(n,s) s,
+ foreach_netmap_tx_func_error
+#undef _
+};
+
+
+static u8 *
+format_netmap_device_name (u8 * s, va_list * args)
+{
+ u32 i = va_arg (*args, u32);
+ netmap_main_t *apm = &netmap_main;
+ netmap_if_t *nif = pool_elt_at_index (apm->interfaces, i);
+
+ s = format (s, "netmap-%s", nif->host_if_name);
+ return s;
+}
+
+static u8 *
+format_netmap_device (u8 * s, va_list * args)
+{
+ u32 dev_instance = va_arg (*args, u32);
+ int verbose = va_arg (*args, int);
+ netmap_main_t *nm = &netmap_main;
+ netmap_if_t *nif = vec_elt_at_index (nm->interfaces, dev_instance);
+ uword indent = format_get_indent (s);
+
+ s = format (s, "NETMAP interface");
+ if (verbose)
+ {
+ s = format (s, "\n%U version %d flags 0x%x"
+ "\n%U region %u memsize 0x%x offset 0x%x"
+ "\n%U tx_slots %u rx_slots %u tx_rings %u rx_rings %u",
+ format_white_space, indent + 2,
+ nif->req->nr_version,
+ nif->req->nr_flags,
+ format_white_space, indent + 2,
+ nif->mem_region,
+ nif->req->nr_memsize,
+ nif->req->nr_offset,
+ format_white_space, indent + 2,
+ nif->req->nr_tx_slots,
+ nif->req->nr_rx_slots,
+ nif->req->nr_tx_rings, nif->req->nr_rx_rings);
+ }
+ return s;
+}
+
+static u8 *
+format_netmap_tx_trace (u8 * s, va_list * args)
+{
+ s = format (s, "Unimplemented...");
+ return s;
+}
+
+static uword
+netmap_interface_tx (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ netmap_main_t *nm = &netmap_main;
+ u32 *buffers = vlib_frame_args (frame);
+ u32 n_left = frame->n_vectors;
+ f64 const time_constant = 1e3;
+ vnet_interface_output_runtime_t *rd = (void *) node->runtime_data;
+ netmap_if_t *nif = pool_elt_at_index (nm->interfaces, rd->dev_instance);
+ int cur_ring;
+
+ if (PREDICT_FALSE (nif->lockp != 0))
+ {
+ while (__sync_lock_test_and_set (nif->lockp, 1))
+ ;
+ }
+
+ cur_ring = nif->first_tx_ring;
+
+ while (n_left && cur_ring <= nif->last_tx_ring)
+ {
+ struct netmap_ring *ring = NETMAP_TXRING (nif->nifp, cur_ring);
+ int n_free_slots = nm_ring_space (ring);
+ uint cur = ring->cur;
+
+ if (nm_tx_pending (ring))
+ {
+ if (ioctl (nif->fd, NIOCTXSYNC, NULL) < 0)
+ clib_unix_warning ("NIOCTXSYNC");
+ clib_cpu_time_wait (time_constant);
+
+ if (nm_tx_pending (ring) && !n_free_slots)
+ {
+ cur_ring++;
+ continue;
+ }
+ }
+
+ while (n_left && n_free_slots)
+ {
+ vlib_buffer_t *b0 = 0;
+ u32 bi = buffers[0];
+ u32 len;
+ u32 offset = 0;
+ buffers++;
+
+ struct netmap_slot *slot = &ring->slot[cur];
+
+ do
+ {
+ b0 = vlib_get_buffer (vm, bi);
+ len = b0->current_length;
+ /* memcpy */
+ clib_memcpy ((u8 *) NETMAP_BUF (ring, slot->buf_idx) + offset,
+ vlib_buffer_get_current (b0), len);
+ offset += len;
+ }
+ while ((bi = b0->next_buffer));
+
+ slot->len = offset;
+ cur = (cur + 1) % ring->num_slots;
+ n_free_slots--;
+ n_left--;
+ }
+ CLIB_MEMORY_BARRIER ();
+ ring->head = ring->cur = cur;
+ }
+
+ if (n_left < frame->n_vectors)
+ ioctl (nif->fd, NIOCTXSYNC, NULL);
+
+ if (PREDICT_FALSE (nif->lockp != 0))
+ *nif->lockp = 0;
+
+ if (n_left)
+ vlib_error_count (vm, node->node_index,
+ (n_left ==
+ frame->n_vectors ? NETMAP_TX_ERROR_PENDING_MSGS :
+ NETMAP_TX_ERROR_NO_FREE_SLOTS), n_left);
+
+ vlib_buffer_free (vm, vlib_frame_args (frame), frame->n_vectors);
+ return frame->n_vectors;
+}
+
+static void
+netmap_set_interface_next_node (vnet_main_t * vnm, u32 hw_if_index,
+ u32 node_index)
+{
+ netmap_main_t *apm = &netmap_main;
+ vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
+ netmap_if_t *nif = pool_elt_at_index (apm->interfaces, hw->dev_instance);
+
+ /* Shut off redirection */
+ if (node_index == ~0)
+ {
+ nif->per_interface_next_index = node_index;
+ return;
+ }
+
+ nif->per_interface_next_index =
+ vlib_node_add_next (vlib_get_main (), netmap_input_node.index,
+ node_index);
+}
+
+static void
+netmap_clear_hw_interface_counters (u32 instance)
+{
+ /* Nothing for now */
+}
+
+static clib_error_t *
+netmap_interface_admin_up_down (vnet_main_t * vnm, u32 hw_if_index, u32 flags)
+{
+ netmap_main_t *apm = &netmap_main;
+ vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
+ netmap_if_t *nif = pool_elt_at_index (apm->interfaces, hw->dev_instance);
+ u32 hw_flags;
+
+ nif->is_admin_up = (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) != 0;
+
+ if (nif->is_admin_up)
+ hw_flags = VNET_HW_INTERFACE_FLAG_LINK_UP;
+ else
+ hw_flags = 0;
+
+ vnet_hw_interface_set_flags (vnm, hw_if_index, hw_flags);
+
+ return 0;
+}
+
+static clib_error_t *
+netmap_subif_add_del_function (vnet_main_t * vnm,
+ u32 hw_if_index,
+ struct vnet_sw_interface_t *st, int is_add)
+{
+ /* Nothing for now */
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VNET_DEVICE_CLASS (netmap_device_class) = {
+ .name = "netmap",
+ .tx_function = netmap_interface_tx,
+ .format_device_name = format_netmap_device_name,
+ .format_device = format_netmap_device,
+ .format_tx_trace = format_netmap_tx_trace,
+ .tx_function_n_errors = NETMAP_TX_N_ERROR,
+ .tx_function_error_strings = netmap_tx_func_error_strings,
+ .rx_redirect_to_node = netmap_set_interface_next_node,
+ .clear_counters = netmap_clear_hw_interface_counters,
+ .admin_up_down_function = netmap_interface_admin_up_down,
+ .subif_add_del_function = netmap_subif_add_del_function,
+};
+
+VLIB_DEVICE_TX_FUNCTION_MULTIARCH(netmap_device_class,
+ netmap_interface_tx)
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/devices/netmap/net_netmap.h b/src/vnet/devices/netmap/net_netmap.h
new file mode 100644
index 00000000000..fd4253b7c0c
--- /dev/null
+++ b/src/vnet/devices/netmap/net_netmap.h
@@ -0,0 +1,650 @@
+/*
+ * Copyright (C) 2011-2014 Matteo Landi, Luigi Rizzo. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``S IS''AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * $FreeBSD: head/sys/net/netmap.h 251139 2013-05-30 14:07:14Z luigi $
+ *
+ * Definitions of constants and the structures used by the netmap
+ * framework, for the part visible to both kernel and userspace.
+ * Detailed info on netmap is available with "man netmap" or at
+ *
+ * http://info.iet.unipi.it/~luigi/netmap/
+ *
+ * This API is also used to communicate with the VALE software switch
+ */
+
+#ifndef _NET_NETMAP_H_
+#define _NET_NETMAP_H_
+
+#define NETMAP_API 11 /* current API version */
+
+#define NETMAP_MIN_API 11 /* min and max versions accepted */
+#define NETMAP_MAX_API 15
+/*
+ * Some fields should be cache-aligned to reduce contention.
+ * The alignment is architecture and OS dependent, but rather than
+ * digging into OS headers to find the exact value we use an estimate
+ * that should cover most architectures.
+ */
+#define NM_CACHE_ALIGN 128
+
+/*
+ * --- Netmap data structures ---
+ *
+ * The userspace data structures used by netmap are shown below.
+ * They are allocated by the kernel and mmap()ed by userspace threads.
+ * Pointers are implemented as memory offsets or indexes,
+ * so that they can be easily dereferenced in kernel and userspace.
+
+ KERNEL (opaque, obviously)
+
+ ====================================================================
+ |
+ USERSPACE | struct netmap_ring
+ +---->+---------------+
+ / | head,cur,tail |
+ struct netmap_if (nifp, 1 per fd) / | buf_ofs |
+ +---------------+ / | other fields |
+ | ni_tx_rings | / +===============+
+ | ni_rx_rings | / | buf_idx, len | slot[0]
+ | | / | flags, ptr |
+ | | / +---------------+
+ +===============+ / | buf_idx, len | slot[1]
+ | txring_ofs[0] | (rel.to nifp)--' | flags, ptr |
+ | txring_ofs[1] | +---------------+
+ (tx+1 entries) (num_slots entries)
+ | txring_ofs[t] | | buf_idx, len | slot[n-1]
+ +---------------+ | flags, ptr |
+ | rxring_ofs[0] | +---------------+
+ | rxring_ofs[1] |
+ (rx+1 entries)
+ | rxring_ofs[r] |
+ +---------------+
+
+ * For each "interface" (NIC, host stack, PIPE, VALE switch port) bound to
+ * a file descriptor, the mmap()ed region contains a (logically readonly)
+ * struct netmap_if pointing to struct netmap_ring's.
+ *
+ * There is one netmap_ring per physical NIC ring, plus one tx/rx ring
+ * pair attached to the host stack (this pair is unused for non-NIC ports).
+ *
+ * All physical/host stack ports share the same memory region,
+ * so that zero-copy can be implemented between them.
+ * VALE switch ports instead have separate memory regions.
+ *
+ * The netmap_ring is the userspace-visible replica of the NIC ring.
+ * Each slot has the index of a buffer (MTU-sized and residing in the
+ * mmapped region), its length and some flags. An extra 64-bit pointer
+ * is provided for user-supplied buffers in the tx path.
+ *
+ * In user space, the buffer address is computed as
+ * (char *)ring + buf_ofs + index * NETMAP_BUF_SIZE
+ *
+ * Added in NETMAP_API 11:
+ *
+ * + NIOCREGIF can request the allocation of extra spare buffers from
+ * the same memory pool. The desired number of buffers must be in
+ * nr_arg3. The ioctl may return fewer buffers, depending on memory
+ * availability. nr_arg3 will return the actual value, and, once
+ * mapped, nifp->ni_bufs_head will be the index of the first buffer.
+ *
+ * The buffers are linked to each other using the first uint32_t
+ * as the index. On close, ni_bufs_head must point to the list of
+ * buffers to be released.
+ *
+ * + NIOCREGIF can request space for extra rings (and buffers)
+ * allocated in the same memory space. The number of extra rings
+ * is in nr_arg1, and is advisory. This is a no-op on NICs where
+ * the size of the memory space is fixed.
+ *
+ * + NIOCREGIF can attach to PIPE rings sharing the same memory
+ * space with a parent device. The ifname indicates the parent device,
+ * which must already exist. Flags in nr_flags indicate if we want to
+ * bind the master or slave side, the index (from nr_ringid)
+ * is just a cookie and does not need to be sequential.
+ *
+ * + NIOCREGIF can also attach to 'monitor' rings that replicate
+ * the content of specific rings, also from the same memory space.
+ *
+ * Extra flags in nr_flags support the above functions.
+ * Application libraries may use the following naming scheme:
+ * netmap:foo all NIC ring pairs
+ * netmap:foo^ only host ring pair
+ * netmap:foo+ all NIC ring + host ring pairs
+ * netmap:foo-k the k-th NIC ring pair
+ * netmap:foo{k PIPE ring pair k, master side
+ * netmap:foo}k PIPE ring pair k, slave side
+ */
+
+/*
+ * struct netmap_slot is a buffer descriptor
+ */
+struct netmap_slot {
+ uint32_t buf_idx; /* buffer index */
+ uint16_t len; /* length for this slot */
+ uint16_t flags; /* buf changed, etc. */
+ uint64_t ptr; /* pointer for indirect buffers */
+};
+
+/*
+ * The following flags control how the slot is used
+ */
+
+#define NS_BUF_CHANGED 0x0001 /* buf_idx changed */
+ /*
+ * must be set whenever buf_idx is changed (as it might be
+ * necessary to recompute the physical address and mapping)
+ *
+ * It is also set by the kernel whenever the buf_idx is
+ * changed internally (e.g., by pipes). Applications may
+ * use this information to know when they can reuse the
+ * contents of previously prepared buffers.
+ */
+
+#define NS_REPORT 0x0002 /* ask the hardware to report results */
+ /*
+ * Request notification when slot is used by the hardware.
+ * Normally transmit completions are handled lazily and
+ * may be unreported. This flag lets us know when a slot
+ * has been sent (e.g. to terminate the sender).
+ */
+
+#define NS_FORWARD 0x0004 /* pass packet 'forward' */
+ /*
+ * (Only for physical ports, rx rings with NR_FORWARD set).
+ * Slot released to the kernel (i.e. before ring->head) with
+ * this flag set are passed to the peer ring (host/NIC),
+ * thus restoring the host-NIC connection for these slots.
+ * This supports efficient traffic monitoring or firewalling.
+ */
+
+#define NS_NO_LEARN 0x0008 /* disable bridge learning */
+ /*
+ * On a VALE switch, do not 'learn' the source port for
+ * this buffer.
+ */
+
+#define NS_INDIRECT 0x0010 /* userspace buffer */
+ /*
+ * (VALE tx rings only) data is in a userspace buffer,
+ * whose address is in the 'ptr' field in the slot.
+ */
+
+#define NS_MOREFRAG 0x0020 /* packet has more fragments */
+ /*
+ * (VALE ports only)
+ * Set on all but the last slot of a multi-segment packet.
+ * The 'len' field refers to the individual fragment.
+ */
+
+#define NS_PORT_SHIFT 8
+#define NS_PORT_MASK (0xff << NS_PORT_SHIFT)
+ /*
+ * The high 8 bits of the flag, if not zero, indicate the
+ * destination port for the VALE switch, overriding
+ * the lookup table.
+ */
+
+#define NS_RFRAGS(_slot) ( ((_slot)->flags >> 8) & 0xff)
+ /*
+ * (VALE rx rings only) the high 8 bits
+ * are the number of fragments.
+ */
+
+
+/*
+ * struct netmap_ring
+ *
+ * Netmap representation of a TX or RX ring (also known as "queue").
+ * This is a queue implemented as a fixed-size circular array.
+ * At the software level the important fields are: head, cur, tail.
+ *
+ * In TX rings:
+ *
+ * head first slot available for transmission.
+ * cur wakeup point. select() and poll() will unblock
+ * when 'tail' moves past 'cur'
+ * tail (readonly) first slot reserved to the kernel
+ *
+ * [head .. tail-1] can be used for new packets to send;
+ * 'head' and 'cur' must be incremented as slots are filled
+ * with new packets to be sent;
+ * 'cur' can be moved further ahead if we need more space
+ * for new transmissions. XXX todo (2014-03-12)
+ *
+ * In RX rings:
+ *
+ * head first valid received packet
+ * cur wakeup point. select() and poll() will unblock
+ * when 'tail' moves past 'cur'
+ * tail (readonly) first slot reserved to the kernel
+ *
+ * [head .. tail-1] contain received packets;
+ * 'head' and 'cur' must be incremented as slots are consumed
+ * and can be returned to the kernel;
+ * 'cur' can be moved further ahead if we want to wait for
+ * new packets without returning the previous ones.
+ *
+ * DATA OWNERSHIP/LOCKING:
+ * The netmap_ring, and all slots and buffers in the range
+ * [head .. tail-1] are owned by the user program;
+ * the kernel only accesses them during a netmap system call
+ * and in the user thread context.
+ *
+ * Other slots and buffers are reserved for use by the kernel
+ */
+struct netmap_ring {
+ /*
+ * buf_ofs is meant to be used through macros.
+ * It contains the offset of the buffer region from this
+ * descriptor.
+ */
+ const int64_t buf_ofs;
+ const uint32_t num_slots; /* number of slots in the ring. */
+ const uint32_t nr_buf_size;
+ const uint16_t ringid;
+ const uint16_t dir; /* 0: tx, 1: rx */
+
+ uint32_t head; /* (u) first user slot */
+ uint32_t cur; /* (u) wakeup point */
+ uint32_t tail; /* (k) first kernel slot */
+
+ uint32_t flags;
+
+ struct timeval ts; /* (k) time of last *sync() */
+
+ /* opaque room for a mutex or similar object */
+#if !defined(_WIN32) || defined(__CYGWIN__)
+ uint8_t __attribute__((__aligned__(NM_CACHE_ALIGN))) sem[128];
+#else
+ uint8_t __declspec(align(NM_CACHE_ALIGN)) sem[128];
+#endif
+
+ /* the slots follow. This struct has variable size */
+ struct netmap_slot slot[0]; /* array of slots. */
+};
+
+
+/*
+ * RING FLAGS
+ */
+#define NR_TIMESTAMP 0x0002 /* set timestamp on *sync() */
+ /*
+ * updates the 'ts' field on each netmap syscall. This saves
+ * saves a separate gettimeofday(), and is not much worse than
+ * software timestamps generated in the interrupt handler.
+ */
+
+#define NR_FORWARD 0x0004 /* enable NS_FORWARD for ring */
+ /*
+ * Enables the NS_FORWARD slot flag for the ring.
+ */
+
+
+/*
+ * Netmap representation of an interface and its queue(s).
+ * This is initialized by the kernel when binding a file
+ * descriptor to a port, and should be considered as readonly
+ * by user programs. The kernel never uses it.
+ *
+ * There is one netmap_if for each file descriptor on which we want
+ * to select/poll.
+ * select/poll operates on one or all pairs depending on the value of
+ * nmr_queueid passed on the ioctl.
+ */
+struct netmap_if {
+ char ni_name[IFNAMSIZ]; /* name of the interface. */
+ const uint32_t ni_version; /* API version, currently unused */
+ const uint32_t ni_flags; /* properties */
+#define NI_PRIV_MEM 0x1 /* private memory region */
+
+ /*
+ * The number of packet rings available in netmap mode.
+ * Physical NICs can have different numbers of tx and rx rings.
+ * Physical NICs also have a 'host' ring pair.
+ * Additionally, clients can request additional ring pairs to
+ * be used for internal communication.
+ */
+ const uint32_t ni_tx_rings; /* number of HW tx rings */
+ const uint32_t ni_rx_rings; /* number of HW rx rings */
+
+ uint32_t ni_bufs_head; /* head index for extra bufs */
+ uint32_t ni_spare1[5];
+ /*
+ * The following array contains the offset of each netmap ring
+ * from this structure, in the following order:
+ * NIC tx rings (ni_tx_rings); host tx ring (1); extra tx rings;
+ * NIC rx rings (ni_rx_rings); host tx ring (1); extra rx rings.
+ *
+ * The area is filled up by the kernel on NIOCREGIF,
+ * and then only read by userspace code.
+ */
+ const ssize_t ring_ofs[0];
+};
+
+
+#ifndef NIOCREGIF
+/*
+ * ioctl names and related fields
+ *
+ * NIOCTXSYNC, NIOCRXSYNC synchronize tx or rx queues,
+ * whose identity is set in NIOCREGIF through nr_ringid.
+ * These are non blocking and take no argument.
+ *
+ * NIOCGINFO takes a struct ifreq, the interface name is the input,
+ * the outputs are number of queues and number of descriptor
+ * for each queue (useful to set number of threads etc.).
+ * The info returned is only advisory and may change before
+ * the interface is bound to a file descriptor.
+ *
+ * NIOCREGIF takes an interface name within a struct nmre,
+ * and activates netmap mode on the interface (if possible).
+ *
+ * The argument to NIOCGINFO/NIOCREGIF overlays struct ifreq so we
+ * can pass it down to other NIC-related ioctls.
+ *
+ * The actual argument (struct nmreq) has a number of options to request
+ * different functions.
+ * The following are used in NIOCREGIF when nr_cmd == 0:
+ *
+ * nr_name (in)
+ * The name of the port (em0, valeXXX:YYY, etc.)
+ * limited to IFNAMSIZ for backward compatibility.
+ *
+ * nr_version (in/out)
+ * Must match NETMAP_API as used in the kernel, error otherwise.
+ * Always returns the desired value on output.
+ *
+ * nr_tx_slots, nr_tx_slots, nr_tx_rings, nr_rx_rings (in/out)
+ * On input, non-zero values may be used to reconfigure the port
+ * according to the requested values, but this is not guaranteed.
+ * On output the actual values in use are reported.
+ *
+ * nr_ringid (in)
+ * Indicates how rings should be bound to the file descriptors.
+ * If nr_flags != 0, then the low bits (in NETMAP_RING_MASK)
+ * are used to indicate the ring number, and nr_flags specifies
+ * the actual rings to bind. NETMAP_NO_TX_POLL is unaffected.
+ *
+ * NOTE: THE FOLLOWING (nr_flags == 0) IS DEPRECATED:
+ * If nr_flags == 0, NETMAP_HW_RING and NETMAP_SW_RING control
+ * the binding as follows:
+ * 0 (default) binds all physical rings
+ * NETMAP_HW_RING | ring number binds a single ring pair
+ * NETMAP_SW_RING binds only the host tx/rx rings
+ *
+ * NETMAP_NO_TX_POLL can be OR-ed to make select()/poll() push
+ * packets on tx rings only if POLLOUT is set.
+ * The default is to push any pending packet.
+ *
+ * NETMAP_DO_RX_POLL can be OR-ed to make select()/poll() release
+ * packets on rx rings also when POLLIN is NOT set.
+ * The default is to touch the rx ring only with POLLIN.
+ * Note that this is the opposite of TX because it
+ * reflects the common usage.
+ *
+ * NOTE: NETMAP_PRIV_MEM IS DEPRECATED, use nr_arg2 instead.
+ * NETMAP_PRIV_MEM is set on return for ports that do not use
+ * the global memory allocator.
+ * This information is not significant and applications
+ * should look at the region id in nr_arg2
+ *
+ * nr_flags is the recommended mode to indicate which rings should
+ * be bound to a file descriptor. Values are NR_REG_*
+ *
+ * nr_arg1 (in) The number of extra rings to be reserved.
+ * Especially when allocating a VALE port the system only
+ * allocates the amount of memory needed for the port.
+ * If more shared memory rings are desired (e.g. for pipes),
+ * the first invocation for the same basename/allocator
+ * should specify a suitable number. Memory cannot be
+ * extended after the first allocation without closing
+ * all ports on the same region.
+ *
+ * nr_arg2 (in/out) The identity of the memory region used.
+ * On input, 0 means the system decides autonomously,
+ * other values may try to select a specific region.
+ * On return the actual value is reported.
+ * Region '1' is the global allocator, normally shared
+ * by all interfaces. Other values are private regions.
+ * If two ports the same region zero-copy is possible.
+ *
+ * nr_arg3 (in/out) number of extra buffers to be allocated.
+ *
+ *
+ *
+ * nr_cmd (in) if non-zero indicates a special command:
+ * NETMAP_BDG_ATTACH and nr_name = vale*:ifname
+ * attaches the NIC to the switch; nr_ringid specifies
+ * which rings to use. Used by vale-ctl -a ...
+ * nr_arg1 = NETMAP_BDG_HOST also attaches the host port
+ * as in vale-ctl -h ...
+ *
+ * NETMAP_BDG_DETACH and nr_name = vale*:ifname
+ * disconnects a previously attached NIC.
+ * Used by vale-ctl -d ...
+ *
+ * NETMAP_BDG_LIST
+ * list the configuration of VALE switches.
+ *
+ * NETMAP_BDG_VNET_HDR
+ * Set the virtio-net header length used by the client
+ * of a VALE switch port.
+ *
+ * NETMAP_BDG_NEWIF
+ * create a persistent VALE port with name nr_name.
+ * Used by vale-ctl -n ...
+ *
+ * NETMAP_BDG_DELIF
+ * delete a persistent VALE port. Used by vale-ctl -d ...
+ *
+ * nr_arg1, nr_arg2, nr_arg3 (in/out) command specific
+ *
+ *
+ *
+ */
+
+
+/*
+ * struct nmreq overlays a struct ifreq (just the name)
+ */
+struct nmreq {
+ char nr_name[IFNAMSIZ];
+ uint32_t nr_version; /* API version */
+ uint32_t nr_offset; /* nifp offset in the shared region */
+ uint32_t nr_memsize; /* size of the shared region */
+ uint32_t nr_tx_slots; /* slots in tx rings */
+ uint32_t nr_rx_slots; /* slots in rx rings */
+ uint16_t nr_tx_rings; /* number of tx rings */
+ uint16_t nr_rx_rings; /* number of rx rings */
+
+ uint16_t nr_ringid; /* ring(s) we care about */
+#define NETMAP_HW_RING 0x4000 /* single NIC ring pair */
+#define NETMAP_SW_RING 0x2000 /* only host ring pair */
+
+#define NETMAP_RING_MASK 0x0fff /* the ring number */
+
+#define NETMAP_NO_TX_POLL 0x1000 /* no automatic txsync on poll */
+
+#define NETMAP_DO_RX_POLL 0x8000 /* DO automatic rxsync on poll */
+
+ uint16_t nr_cmd;
+#define NETMAP_BDG_ATTACH 1 /* attach the NIC */
+#define NETMAP_BDG_DETACH 2 /* detach the NIC */
+#define NETMAP_BDG_REGOPS 3 /* register bridge callbacks */
+#define NETMAP_BDG_LIST 4 /* get bridge's info */
+#define NETMAP_BDG_VNET_HDR 5 /* set the port virtio-net-hdr length */
+#define NETMAP_BDG_OFFSET NETMAP_BDG_VNET_HDR /* deprecated alias */
+#define NETMAP_BDG_NEWIF 6 /* create a virtual port */
+#define NETMAP_BDG_DELIF 7 /* destroy a virtual port */
+#define NETMAP_PT_HOST_CREATE 8 /* create ptnetmap kthreads */
+#define NETMAP_PT_HOST_DELETE 9 /* delete ptnetmap kthreads */
+#define NETMAP_BDG_POLLING_ON 10 /* delete polling kthread */
+#define NETMAP_BDG_POLLING_OFF 11 /* delete polling kthread */
+#define NETMAP_VNET_HDR_GET 12 /* get the port virtio-net-hdr length */
+ uint16_t nr_arg1; /* reserve extra rings in NIOCREGIF */
+#define NETMAP_BDG_HOST 1 /* attach the host stack on ATTACH */
+
+ uint16_t nr_arg2;
+ uint32_t nr_arg3; /* req. extra buffers in NIOCREGIF */
+ uint32_t nr_flags;
+ /* various modes, extends nr_ringid */
+ uint32_t spare2[1];
+};
+
+#define NR_REG_MASK 0xf /* values for nr_flags */
+enum { NR_REG_DEFAULT = 0, /* backward compat, should not be used. */
+ NR_REG_ALL_NIC = 1,
+ NR_REG_SW = 2,
+ NR_REG_NIC_SW = 3,
+ NR_REG_ONE_NIC = 4,
+ NR_REG_PIPE_MASTER = 5,
+ NR_REG_PIPE_SLAVE = 6,
+};
+/* monitor uses the NR_REG to select the rings to monitor */
+#define NR_MONITOR_TX 0x100
+#define NR_MONITOR_RX 0x200
+#define NR_ZCOPY_MON 0x400
+/* request exclusive access to the selected rings */
+#define NR_EXCLUSIVE 0x800
+/* request ptnetmap host support */
+#define NR_PASSTHROUGH_HOST NR_PTNETMAP_HOST /* deprecated */
+#define NR_PTNETMAP_HOST 0x1000
+#define NR_RX_RINGS_ONLY 0x2000
+#define NR_TX_RINGS_ONLY 0x4000
+/* Applications set this flag if they are able to deal with virtio-net headers,
+ * that is send/receive frames that start with a virtio-net header.
+ * If not set, NIOCREGIF will fail with netmap ports that require applications
+ * to use those headers. If the flag is set, the application can use the
+ * NETMAP_VNET_HDR_GET command to figure out the header length. */
+#define NR_ACCEPT_VNET_HDR 0x8000
+
+
+/*
+ * Windows does not have _IOWR(). _IO(), _IOW() and _IOR() are defined
+ * in ws2def.h but not sure if they are in the form we need.
+ * XXX so we redefine them
+ * in a convenient way to use for DeviceIoControl signatures
+ */
+#ifdef _WIN32
+#undef _IO // ws2def.h
+#define _WIN_NM_IOCTL_TYPE 40000
+#define _IO(_c, _n) CTL_CODE(_WIN_NM_IOCTL_TYPE, ((_n) + 0x800) , \
+ METHOD_BUFFERED, FILE_ANY_ACCESS )
+#define _IO_direct(_c, _n) CTL_CODE(_WIN_NM_IOCTL_TYPE, ((_n) + 0x800) , \
+ METHOD_OUT_DIRECT, FILE_ANY_ACCESS )
+
+#define _IOWR(_c, _n, _s) _IO(_c, _n)
+
+/* We havesome internal sysctl in addition to the externally visible ones */
+#define NETMAP_MMAP _IO_direct('i', 160) // note METHOD_OUT_DIRECT
+#define NETMAP_POLL _IO('i', 162)
+
+/* and also two setsockopt for sysctl emulation */
+#define NETMAP_SETSOCKOPT _IO('i', 140)
+#define NETMAP_GETSOCKOPT _IO('i', 141)
+
+
+//These linknames are for the Netmap Core Driver
+#define NETMAP_NT_DEVICE_NAME L"\\Device\\NETMAP"
+#define NETMAP_DOS_DEVICE_NAME L"\\DosDevices\\netmap"
+
+//Definition of a structure used to pass a virtual address within an IOCTL
+typedef struct _MEMORY_ENTRY {
+ PVOID pUsermodeVirtualAddress;
+} MEMORY_ENTRY, *PMEMORY_ENTRY;
+
+typedef struct _POLL_REQUEST_DATA {
+ int events;
+ int timeout;
+ int revents;
+} POLL_REQUEST_DATA;
+
+#endif /* _WIN32 */
+
+/*
+ * FreeBSD uses the size value embedded in the _IOWR to determine
+ * how much to copy in/out. So we need it to match the actual
+ * data structure we pass. We put some spares in the structure
+ * to ease compatibility with other versions
+ */
+#define NIOCGINFO _IOWR('i', 145, struct nmreq) /* return IF info */
+#define NIOCREGIF _IOWR('i', 146, struct nmreq) /* interface register */
+#define NIOCTXSYNC _IO('i', 148) /* sync tx queues */
+#define NIOCRXSYNC _IO('i', 149) /* sync rx queues */
+#define NIOCCONFIG _IOWR('i',150, struct nm_ifreq) /* for ext. modules */
+#endif /* !NIOCREGIF */
+
+
+/*
+ * Helper functions for kernel and userspace
+ */
+
+/*
+ * check if space is available in the ring.
+ */
+static inline int
+nm_ring_empty(struct netmap_ring *ring)
+{
+ return (ring->cur == ring->tail);
+}
+
+/*
+ * Opaque structure that is passed to an external kernel
+ * module via ioctl(fd, NIOCCONFIG, req) for a user-owned
+ * bridge port (at this point ephemeral VALE interface).
+ */
+#define NM_IFRDATA_LEN 256
+struct nm_ifreq {
+ char nifr_name[IFNAMSIZ];
+ char data[NM_IFRDATA_LEN];
+};
+
+/*
+ * netmap kernel thread configuration
+ */
+/* bhyve/vmm.ko MSIX parameters for IOCTL */
+struct ptn_vmm_ioctl_msix {
+ uint64_t msg;
+ uint64_t addr;
+};
+
+/* IOCTL parameters */
+struct nm_kth_ioctl {
+ u_long com;
+ /* TODO: use union */
+ union {
+ struct ptn_vmm_ioctl_msix msix;
+ } data;
+};
+
+/* Configuration of a ptnetmap ring */
+struct ptnet_ring_cfg {
+ uint64_t ioeventfd; /* eventfd in linux, tsleep() parameter in FreeBSD */
+ uint64_t irqfd; /* eventfd in linux, ioctl fd in FreeBSD */
+ struct nm_kth_ioctl ioctl; /* ioctl parameter to send irq (only used in bhyve/FreeBSD) */
+};
+#endif /* _NET_NETMAP_H_ */
diff --git a/src/vnet/devices/netmap/netmap.api b/src/vnet/devices/netmap/netmap.api
new file mode 100644
index 00000000000..377ccffda4c
--- /dev/null
+++ b/src/vnet/devices/netmap/netmap.api
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2015-2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/** \brief Create netmap
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param netmap_if_name - interface name
+ @param hw_addr - interface MAC
+ @param use_random_hw_addr - use random generated MAC
+ @param is_pipe - is pipe
+ @param is_master - 0=slave, 1=master
+*/
+define netmap_create
+{
+ u32 client_index;
+ u32 context;
+
+ u8 netmap_if_name[64];
+ u8 hw_addr[6];
+ u8 use_random_hw_addr;
+ u8 is_pipe;
+ u8 is_master;
+};
+
+/** \brief Create netmap response
+ @param context - sender context, to match reply w/ request
+ @param retval - return value for request
+*/
+define netmap_create_reply
+{
+ u32 context;
+ i32 retval;
+};
+
+/** \brief Delete netmap
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param netmap_if_name - interface name
+*/
+define netmap_delete
+{
+ u32 client_index;
+ u32 context;
+
+ u8 netmap_if_name[64];
+};
+
+/** \brief Delete netmap response
+ @param context - sender context, to match reply w/ request
+ @param retval - return value for request
+*/
+define netmap_delete_reply
+{
+ u32 context;
+ i32 retval;
+};
+
+/*
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/devices/netmap/netmap.c b/src/vnet/devices/netmap/netmap.c
new file mode 100644
index 00000000000..3bdb442dda2
--- /dev/null
+++ b/src/vnet/devices/netmap/netmap.c
@@ -0,0 +1,316 @@
+/*
+ *------------------------------------------------------------------
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+
+#include <stdint.h>
+#include <net/if.h>
+#include <sys/ioctl.h>
+#include <sys/types.h>
+#include <fcntl.h>
+#include <vnet/devices/netmap/net_netmap.h>
+
+#include <vlib/vlib.h>
+#include <vlib/unix/unix.h>
+#include <vnet/ethernet/ethernet.h>
+#include <vnet/devices/netmap/netmap.h>
+
+static u32
+netmap_eth_flag_change (vnet_main_t * vnm, vnet_hw_interface_t * hi,
+ u32 flags)
+{
+ /* nothing for now */
+ return 0;
+}
+
+static clib_error_t *
+netmap_fd_read_ready (unix_file_t * uf)
+{
+ vlib_main_t *vm = vlib_get_main ();
+ netmap_main_t *nm = &netmap_main;
+ u32 idx = uf->private_data;
+
+ nm->pending_input_bitmap =
+ clib_bitmap_set (nm->pending_input_bitmap, idx, 1);
+
+ /* Schedule the rx node */
+ vlib_node_set_interrupt_pending (vm, netmap_input_node.index);
+
+ return 0;
+}
+
+static void
+close_netmap_if (netmap_main_t * nm, netmap_if_t * nif)
+{
+ if (nif->unix_file_index != ~0)
+ {
+ unix_file_del (&unix_main, unix_main.file_pool + nif->unix_file_index);
+ nif->unix_file_index = ~0;
+ }
+ else if (nif->fd > -1)
+ close (nif->fd);
+
+ if (nif->mem_region)
+ {
+ netmap_mem_region_t *reg = &nm->mem_regions[nif->mem_region];
+ if (--reg->refcnt == 0)
+ {
+ munmap (reg->mem, reg->region_size);
+ reg->region_size = 0;
+ }
+ }
+
+
+ mhash_unset (&nm->if_index_by_host_if_name, nif->host_if_name,
+ &nif->if_index);
+ vec_free (nif->host_if_name);
+ vec_free (nif->req);
+
+ memset (nif, 0, sizeof (*nif));
+ pool_put (nm->interfaces, nif);
+}
+
+int
+netmap_worker_thread_enable ()
+{
+ /* if worker threads are enabled, switch to polling mode */
+ foreach_vlib_main ((
+ {
+ vlib_node_set_state (this_vlib_main,
+ netmap_input_node.index,
+ VLIB_NODE_STATE_POLLING);
+ }));
+
+ return 0;
+}
+
+int
+netmap_worker_thread_disable ()
+{
+ foreach_vlib_main ((
+ {
+ vlib_node_set_state (this_vlib_main,
+ netmap_input_node.index,
+ VLIB_NODE_STATE_INTERRUPT);
+ }));
+
+ return 0;
+}
+
+int
+netmap_create_if (vlib_main_t * vm, u8 * if_name, u8 * hw_addr_set,
+ u8 is_pipe, u8 is_master, u32 * sw_if_index)
+{
+ netmap_main_t *nm = &netmap_main;
+ int ret = 0;
+ netmap_if_t *nif = 0;
+ u8 hw_addr[6];
+ clib_error_t *error = 0;
+ vnet_sw_interface_t *sw;
+ vnet_main_t *vnm = vnet_get_main ();
+ uword *p;
+ struct nmreq *req = 0;
+ netmap_mem_region_t *reg;
+ vlib_thread_main_t *tm = vlib_get_thread_main ();
+ int fd;
+
+ p = mhash_get (&nm->if_index_by_host_if_name, if_name);
+ if (p)
+ return VNET_API_ERROR_SUBIF_ALREADY_EXISTS;
+
+ fd = open ("/dev/netmap", O_RDWR);
+ if (fd < 0)
+ return VNET_API_ERROR_SUBIF_ALREADY_EXISTS;
+
+ pool_get (nm->interfaces, nif);
+ nif->if_index = nif - nm->interfaces;
+ nif->fd = fd;
+ nif->unix_file_index = ~0;
+
+ vec_validate (req, 0);
+ nif->req = req;
+ req->nr_version = NETMAP_API;
+ req->nr_flags = NR_REG_ALL_NIC;
+
+ if (is_pipe)
+ req->nr_flags = is_master ? NR_REG_PIPE_MASTER : NR_REG_PIPE_SLAVE;
+ else
+ req->nr_flags = NR_REG_ALL_NIC;
+
+ req->nr_flags |= NR_ACCEPT_VNET_HDR;
+ snprintf (req->nr_name, IFNAMSIZ, "%s", if_name);
+ req->nr_name[IFNAMSIZ - 1] = 0;
+
+ if (ioctl (nif->fd, NIOCREGIF, req))
+ {
+ ret = VNET_API_ERROR_NOT_CONNECTED;
+ goto error;
+ }
+
+ nif->mem_region = req->nr_arg2;
+ vec_validate (nm->mem_regions, nif->mem_region);
+ reg = &nm->mem_regions[nif->mem_region];
+ if (reg->region_size == 0)
+ {
+ reg->mem = mmap (NULL, req->nr_memsize, PROT_READ | PROT_WRITE,
+ MAP_SHARED, fd, 0);
+ clib_warning ("mem %p", reg->mem);
+ if (reg->mem == MAP_FAILED)
+ {
+ ret = VNET_API_ERROR_NOT_CONNECTED;
+ goto error;
+ }
+ reg->region_size = req->nr_memsize;
+ }
+ reg->refcnt++;
+
+ nif->nifp = NETMAP_IF (reg->mem, req->nr_offset);
+ nif->first_rx_ring = 0;
+ nif->last_rx_ring = 0;
+ nif->first_tx_ring = 0;
+ nif->last_tx_ring = 0;
+ nif->host_if_name = if_name;
+ nif->per_interface_next_index = ~0;
+
+ if (tm->n_vlib_mains > 1)
+ {
+ nif->lockp = clib_mem_alloc_aligned (CLIB_CACHE_LINE_BYTES,
+ CLIB_CACHE_LINE_BYTES);
+ memset ((void *) nif->lockp, 0, CLIB_CACHE_LINE_BYTES);
+ }
+
+ {
+ unix_file_t template = { 0 };
+ template.read_function = netmap_fd_read_ready;
+ template.file_descriptor = nif->fd;
+ template.private_data = nif->if_index;
+ nif->unix_file_index = unix_file_add (&unix_main, &template);
+ }
+
+ /*use configured or generate random MAC address */
+ if (hw_addr_set)
+ memcpy (hw_addr, hw_addr_set, 6);
+ else
+ {
+ f64 now = vlib_time_now (vm);
+ u32 rnd;
+ rnd = (u32) (now * 1e6);
+ rnd = random_u32 (&rnd);
+
+ memcpy (hw_addr + 2, &rnd, sizeof (rnd));
+ hw_addr[0] = 2;
+ hw_addr[1] = 0xfe;
+ }
+
+ error = ethernet_register_interface (vnm, netmap_device_class.index,
+ nif->if_index, hw_addr,
+ &nif->hw_if_index,
+ netmap_eth_flag_change);
+
+ if (error)
+ {
+ clib_error_report (error);
+ ret = VNET_API_ERROR_SYSCALL_ERROR_1;
+ goto error;
+ }
+
+ sw = vnet_get_hw_sw_interface (vnm, nif->hw_if_index);
+ nif->sw_if_index = sw->sw_if_index;
+
+ mhash_set_mem (&nm->if_index_by_host_if_name, if_name, &nif->if_index, 0);
+
+ if (sw_if_index)
+ *sw_if_index = nif->sw_if_index;
+
+ if (tm->n_vlib_mains > 1 && pool_elts (nm->interfaces) == 1)
+ netmap_worker_thread_enable ();
+
+ return 0;
+
+error:
+ close_netmap_if (nm, nif);
+ return ret;
+}
+
+int
+netmap_delete_if (vlib_main_t * vm, u8 * host_if_name)
+{
+ vnet_main_t *vnm = vnet_get_main ();
+ netmap_main_t *nm = &netmap_main;
+ netmap_if_t *nif;
+ uword *p;
+ vlib_thread_main_t *tm = vlib_get_thread_main ();
+
+ p = mhash_get (&nm->if_index_by_host_if_name, host_if_name);
+ if (p == NULL)
+ {
+ clib_warning ("Host interface %s does not exist", host_if_name);
+ return VNET_API_ERROR_SYSCALL_ERROR_1;
+ }
+ nif = pool_elt_at_index (nm->interfaces, p[0]);
+
+ /* bring down the interface */
+ vnet_hw_interface_set_flags (vnm, nif->hw_if_index, 0);
+
+ ethernet_delete_interface (vnm, nif->hw_if_index);
+
+ close_netmap_if (nm, nif);
+
+ if (tm->n_vlib_mains > 1 && pool_elts (nm->interfaces) == 0)
+ netmap_worker_thread_disable ();
+
+ return 0;
+}
+
+static clib_error_t *
+netmap_init (vlib_main_t * vm)
+{
+ netmap_main_t *nm = &netmap_main;
+ vlib_thread_main_t *tm = vlib_get_thread_main ();
+ vlib_thread_registration_t *tr;
+ uword *p;
+
+ memset (nm, 0, sizeof (netmap_main_t));
+
+ nm->input_cpu_first_index = 0;
+ nm->input_cpu_count = 1;
+
+ /* find out which cpus will be used for input */
+ p = hash_get_mem (tm->thread_registrations_by_name, "workers");
+ tr = p ? (vlib_thread_registration_t *) p[0] : 0;
+
+ if (tr && tr->count > 0)
+ {
+ nm->input_cpu_first_index = tr->first_index;
+ nm->input_cpu_count = tr->count;
+ }
+
+ mhash_init_vec_string (&nm->if_index_by_host_if_name, sizeof (uword));
+
+ vec_validate_aligned (nm->rx_buffers, tm->n_vlib_mains - 1,
+ CLIB_CACHE_LINE_BYTES);
+
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (netmap_init);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/devices/netmap/netmap.h b/src/vnet/devices/netmap/netmap.h
new file mode 100644
index 00000000000..39a94043c3c
--- /dev/null
+++ b/src/vnet/devices/netmap/netmap.h
@@ -0,0 +1,164 @@
+/*
+ *------------------------------------------------------------------
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+/*
+ * Copyright (C) 2011-2014 Universita` di Pisa. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+typedef struct
+{
+ CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
+ volatile u32 *lockp;
+ u8 *host_if_name;
+ uword if_index;
+ u32 hw_if_index;
+ u32 sw_if_index;
+ u32 unix_file_index;
+
+ u32 per_interface_next_index;
+ u8 is_admin_up;
+
+ /* netmap */
+ struct nmreq *req;
+ u16 mem_region;
+ int fd;
+ struct netmap_if *nifp;
+ u16 first_tx_ring;
+ u16 last_tx_ring;
+ u16 first_rx_ring;
+ u16 last_rx_ring;
+
+} netmap_if_t;
+
+typedef struct
+{
+ char *mem;
+ u32 region_size;
+ int refcnt;
+} netmap_mem_region_t;
+
+typedef struct
+{
+ CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
+ netmap_if_t *interfaces;
+
+ /* bitmap of pending rx interfaces */
+ uword *pending_input_bitmap;
+
+ /* rx buffer cache */
+ u32 **rx_buffers;
+
+ /* hash of host interface names */
+ mhash_t if_index_by_host_if_name;
+
+ /* vector of memory regions */
+ netmap_mem_region_t *mem_regions;
+
+ /* first cpu index */
+ u32 input_cpu_first_index;
+
+ /* total cpu count */
+ u32 input_cpu_count;
+} netmap_main_t;
+
+netmap_main_t netmap_main;
+extern vnet_device_class_t netmap_device_class;
+extern vlib_node_registration_t netmap_input_node;
+
+int netmap_create_if (vlib_main_t * vm, u8 * host_if_name, u8 * hw_addr_set,
+ u8 is_pipe, u8 is_master, u32 * sw_if_index);
+int netmap_delete_if (vlib_main_t * vm, u8 * host_if_name);
+
+
+/* Macros and helper functions from sys/net/netmap_user.h */
+
+#ifdef _NET_NETMAP_H_
+
+#define _NETMAP_OFFSET(type, ptr, offset) \
+ ((type)(void *)((char *)(ptr) + (offset)))
+
+#define NETMAP_IF(_base, _ofs) _NETMAP_OFFSET(struct netmap_if *, _base, _ofs)
+
+#define NETMAP_TXRING(nifp, index) _NETMAP_OFFSET(struct netmap_ring *, \
+ nifp, (nifp)->ring_ofs[index] )
+
+#define NETMAP_RXRING(nifp, index) _NETMAP_OFFSET(struct netmap_ring *, \
+ nifp, (nifp)->ring_ofs[index + (nifp)->ni_tx_rings + 1] )
+
+#define NETMAP_BUF(ring, index) \
+ ((char *)(ring) + (ring)->buf_ofs + ((index)*(ring)->nr_buf_size))
+
+#define NETMAP_BUF_IDX(ring, buf) \
+ ( ((char *)(buf) - ((char *)(ring) + (ring)->buf_ofs) ) / \
+ (ring)->nr_buf_size )
+
+static inline uint32_t
+nm_ring_next (struct netmap_ring *ring, uint32_t i)
+{
+ return (PREDICT_FALSE (i + 1 == ring->num_slots) ? 0 : i + 1);
+}
+
+
+/*
+ * Return 1 if we have pending transmissions in the tx ring.
+ * When everything is complete ring->head = ring->tail + 1 (modulo ring size)
+ */
+static inline int
+nm_tx_pending (struct netmap_ring *ring)
+{
+ return nm_ring_next (ring, ring->tail) != ring->head;
+}
+
+static inline uint32_t
+nm_ring_space (struct netmap_ring *ring)
+{
+ int ret = ring->tail - ring->cur;
+ if (ret < 0)
+ ret += ring->num_slots;
+ return ret;
+}
+#endif
+
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/devices/netmap/netmap_api.c b/src/vnet/devices/netmap/netmap_api.c
new file mode 100644
index 00000000000..9a393b1fda4
--- /dev/null
+++ b/src/vnet/devices/netmap/netmap_api.c
@@ -0,0 +1,137 @@
+/*
+ *------------------------------------------------------------------
+ * netmap_api.c - netmap api
+ *
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+
+#include <vnet/vnet.h>
+#include <vlibmemory/api.h>
+
+#include <vnet/interface.h>
+#include <vnet/api_errno.h>
+#include <vnet/devices/netmap/netmap.h>
+
+#include <vnet/vnet_msg_enum.h>
+
+#define vl_typedefs /* define message structures */
+#include <vnet/vnet_all_api_h.h>
+#undef vl_typedefs
+
+#define vl_endianfun /* define message structures */
+#include <vnet/vnet_all_api_h.h>
+#undef vl_endianfun
+
+/* instantiate all the print functions we know about */
+#define vl_print(handle, ...) vlib_cli_output (handle, __VA_ARGS__)
+#define vl_printfun
+#include <vnet/vnet_all_api_h.h>
+#undef vl_printfun
+
+#include <vlibapi/api_helper_macros.h>
+
+#define foreach_vpe_api_msg \
+_(NETMAP_CREATE, netmap_create) \
+_(NETMAP_DELETE, netmap_delete) \
+
+static void
+vl_api_netmap_create_t_handler (vl_api_netmap_create_t * mp)
+{
+ vlib_main_t *vm = vlib_get_main ();
+ vl_api_netmap_create_reply_t *rmp;
+ int rv = 0;
+ u8 *if_name = NULL;
+
+ if_name = format (0, "%s", mp->netmap_if_name);
+ vec_add1 (if_name, 0);
+
+ rv =
+ netmap_create_if (vm, if_name, mp->use_random_hw_addr ? 0 : mp->hw_addr,
+ mp->is_pipe, mp->is_master, 0);
+
+ vec_free (if_name);
+
+ REPLY_MACRO (VL_API_NETMAP_CREATE_REPLY);
+}
+
+static void
+vl_api_netmap_delete_t_handler (vl_api_netmap_delete_t * mp)
+{
+ vlib_main_t *vm = vlib_get_main ();
+ vl_api_netmap_delete_reply_t *rmp;
+ int rv = 0;
+ u8 *if_name = NULL;
+
+ if_name = format (0, "%s", mp->netmap_if_name);
+ vec_add1 (if_name, 0);
+
+ rv = netmap_delete_if (vm, if_name);
+
+ vec_free (if_name);
+
+ REPLY_MACRO (VL_API_NETMAP_DELETE_REPLY);
+}
+
+/*
+ * netmap_api_hookup
+ * Add vpe's API message handlers to the table.
+ * vlib has alread mapped shared memory and
+ * added the client registration handlers.
+ * See .../vlib-api/vlibmemory/memclnt_vlib.c:memclnt_process()
+ */
+#define vl_msg_name_crc_list
+#include <vnet/vnet_all_api_h.h>
+#undef vl_msg_name_crc_list
+
+static void
+setup_message_id_table (api_main_t * am)
+{
+#define _(id,n,crc) vl_msg_api_add_msg_name_crc (am, #n "_" #crc, id);
+ foreach_vl_msg_name_crc_netmap;
+#undef _
+}
+
+static clib_error_t *
+netmap_api_hookup (vlib_main_t * vm)
+{
+ api_main_t *am = &api_main;
+
+#define _(N,n) \
+ vl_msg_api_set_handlers(VL_API_##N, #n, \
+ vl_api_##n##_t_handler, \
+ vl_noop_handler, \
+ vl_api_##n##_t_endian, \
+ vl_api_##n##_t_print, \
+ sizeof(vl_api_##n##_t), 1);
+ foreach_vpe_api_msg;
+#undef _
+
+ /*
+ * Set up the (msg_name, crc, message-id) table
+ */
+ setup_message_id_table (am);
+
+ return 0;
+}
+
+VLIB_API_INIT_FUNCTION (netmap_api_hookup);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/devices/netmap/node.c b/src/vnet/devices/netmap/node.c
new file mode 100644
index 00000000000..19895e4754a
--- /dev/null
+++ b/src/vnet/devices/netmap/node.c
@@ -0,0 +1,300 @@
+/*
+ *------------------------------------------------------------------
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+
+#include <stdint.h>
+#include <net/if.h>
+#include <sys/ioctl.h>
+
+#include <vlib/vlib.h>
+#include <vlib/unix/unix.h>
+#include <vnet/ethernet/ethernet.h>
+#include <vnet/devices/devices.h>
+#include <vnet/feature/feature.h>
+
+#include <vnet/devices/netmap/net_netmap.h>
+#include <vnet/devices/netmap/netmap.h>
+
+#define foreach_netmap_input_error
+
+typedef enum
+{
+#define _(f,s) NETMAP_INPUT_ERROR_##f,
+ foreach_netmap_input_error
+#undef _
+ NETMAP_INPUT_N_ERROR,
+} netmap_input_error_t;
+
+static char *netmap_input_error_strings[] = {
+#define _(n,s) s,
+ foreach_netmap_input_error
+#undef _
+};
+
+typedef struct
+{
+ u32 next_index;
+ u32 hw_if_index;
+ struct netmap_slot slot;
+} netmap_input_trace_t;
+
+static u8 *
+format_netmap_input_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ netmap_input_trace_t *t = va_arg (*args, netmap_input_trace_t *);
+ uword indent = format_get_indent (s);
+
+ s = format (s, "netmap: hw_if_index %d next-index %d",
+ t->hw_if_index, t->next_index);
+ s = format (s, "\n%Uslot: flags 0x%x len %u buf_idx %u",
+ format_white_space, indent + 2,
+ t->slot.flags, t->slot.len, t->slot.buf_idx);
+ return s;
+}
+
+always_inline void
+buffer_add_to_chain (vlib_main_t * vm, u32 bi, u32 first_bi, u32 prev_bi)
+{
+ vlib_buffer_t *b = vlib_get_buffer (vm, bi);
+ vlib_buffer_t *first_b = vlib_get_buffer (vm, first_bi);
+ vlib_buffer_t *prev_b = vlib_get_buffer (vm, prev_bi);
+
+ /* update first buffer */
+ first_b->total_length_not_including_first_buffer += b->current_length;
+
+ /* update previous buffer */
+ prev_b->next_buffer = bi;
+ prev_b->flags |= VLIB_BUFFER_NEXT_PRESENT;
+
+ /* update current buffer */
+ b->next_buffer = 0;
+}
+
+always_inline uword
+netmap_device_input_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
+ vlib_frame_t * frame, netmap_if_t * nif)
+{
+ u32 next_index = VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT;
+ uword n_trace = vlib_get_trace_count (vm, node);
+ netmap_main_t *nm = &netmap_main;
+ u32 n_rx_packets = 0;
+ u32 n_rx_bytes = 0;
+ u32 *to_next = 0;
+ u32 n_free_bufs;
+ struct netmap_ring *ring;
+ int cur_ring;
+ u32 cpu_index = os_get_cpu_number ();
+ u32 n_buffer_bytes = vlib_buffer_free_list_buffer_size (vm,
+ VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX);
+
+ if (nif->per_interface_next_index != ~0)
+ next_index = nif->per_interface_next_index;
+
+ n_free_bufs = vec_len (nm->rx_buffers[cpu_index]);
+ if (PREDICT_FALSE (n_free_bufs < VLIB_FRAME_SIZE))
+ {
+ vec_validate (nm->rx_buffers[cpu_index],
+ VLIB_FRAME_SIZE + n_free_bufs - 1);
+ n_free_bufs +=
+ vlib_buffer_alloc (vm, &nm->rx_buffers[cpu_index][n_free_bufs],
+ VLIB_FRAME_SIZE);
+ _vec_len (nm->rx_buffers[cpu_index]) = n_free_bufs;
+ }
+
+ cur_ring = nif->first_rx_ring;
+ while (cur_ring <= nif->last_rx_ring && n_free_bufs)
+ {
+ int r = 0;
+ u32 cur_slot_index;
+ ring = NETMAP_RXRING (nif->nifp, cur_ring);
+ r = nm_ring_space (ring);
+
+ if (!r)
+ {
+ cur_ring++;
+ continue;
+ }
+
+ if (r > n_free_bufs)
+ r = n_free_bufs;
+
+ cur_slot_index = ring->cur;
+ while (r)
+ {
+ u32 n_left_to_next;
+ u32 next0 = next_index;
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (r && n_left_to_next)
+ {
+ vlib_buffer_t *first_b0 = 0;
+ u32 offset = 0;
+ u32 bi0 = 0, first_bi0 = 0, prev_bi0;
+ u32 next_slot_index = (cur_slot_index + 1) % ring->num_slots;
+ u32 next2_slot_index = (cur_slot_index + 2) % ring->num_slots;
+ struct netmap_slot *slot = &ring->slot[cur_slot_index];
+ u32 data_len = slot->len;
+
+ /* prefetch 2 slots in advance */
+ CLIB_PREFETCH (&ring->slot[next2_slot_index],
+ CLIB_CACHE_LINE_BYTES, LOAD);
+ /* prefetch start of next packet */
+ CLIB_PREFETCH (NETMAP_BUF
+ (ring, ring->slot[next_slot_index].buf_idx),
+ CLIB_CACHE_LINE_BYTES, LOAD);
+
+ while (data_len && n_free_bufs)
+ {
+ vlib_buffer_t *b0;
+ /* grab free buffer */
+ u32 last_empty_buffer =
+ vec_len (nm->rx_buffers[cpu_index]) - 1;
+ prev_bi0 = bi0;
+ bi0 = nm->rx_buffers[cpu_index][last_empty_buffer];
+ b0 = vlib_get_buffer (vm, bi0);
+ _vec_len (nm->rx_buffers[cpu_index]) = last_empty_buffer;
+ n_free_bufs--;
+
+ /* copy data */
+ u32 bytes_to_copy =
+ data_len > n_buffer_bytes ? n_buffer_bytes : data_len;
+ b0->current_data = 0;
+ clib_memcpy (vlib_buffer_get_current (b0),
+ (u8 *) NETMAP_BUF (ring,
+ slot->buf_idx) + offset,
+ bytes_to_copy);
+
+ /* fill buffer header */
+ b0->current_length = bytes_to_copy;
+
+ if (offset == 0)
+ {
+ b0->total_length_not_including_first_buffer = 0;
+ b0->flags = VLIB_BUFFER_TOTAL_LENGTH_VALID;
+ vnet_buffer (b0)->sw_if_index[VLIB_RX] =
+ nif->sw_if_index;
+ vnet_buffer (b0)->sw_if_index[VLIB_TX] = (u32) ~ 0;
+ first_bi0 = bi0;
+ first_b0 = vlib_get_buffer (vm, first_bi0);
+ }
+ else
+ buffer_add_to_chain (vm, bi0, first_bi0, prev_bi0);
+
+ offset += bytes_to_copy;
+ data_len -= bytes_to_copy;
+ }
+
+ /* trace */
+ VLIB_BUFFER_TRACE_TRAJECTORY_INIT (first_b0);
+ if (PREDICT_FALSE (n_trace > 0))
+ {
+ if (PREDICT_TRUE (first_b0 != 0))
+ {
+ netmap_input_trace_t *tr;
+ vlib_trace_buffer (vm, node, next0, first_b0,
+ /* follow_chain */ 0);
+ vlib_set_trace_count (vm, node, --n_trace);
+ tr = vlib_add_trace (vm, node, first_b0, sizeof (*tr));
+ tr->next_index = next0;
+ tr->hw_if_index = nif->hw_if_index;
+ memcpy (&tr->slot, slot, sizeof (struct netmap_slot));
+ }
+ }
+
+ /* redirect if feature path enabled */
+ vnet_feature_start_device_input_x1 (nif->sw_if_index, &next0,
+ first_b0, 0);
+
+ /* enque and take next packet */
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
+ n_left_to_next, first_bi0,
+ next0);
+
+ /* next packet */
+ n_rx_packets++;
+ n_rx_bytes += slot->len;
+ to_next[0] = first_bi0;
+ to_next += 1;
+ n_left_to_next--;
+ cur_slot_index = next_slot_index;
+
+ r--;
+ }
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+ ring->head = ring->cur = cur_slot_index;
+ cur_ring++;
+ }
+
+ if (n_rx_packets)
+ ioctl (nif->fd, NIOCRXSYNC, NULL);
+
+ vlib_increment_combined_counter
+ (vnet_get_main ()->interface_main.combined_sw_if_counters
+ + VNET_INTERFACE_COUNTER_RX,
+ os_get_cpu_number (), nif->hw_if_index, n_rx_packets, n_rx_bytes);
+
+ return n_rx_packets;
+}
+
+static uword
+netmap_input_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ int i;
+ u32 n_rx_packets = 0;
+ u32 cpu_index = os_get_cpu_number ();
+ netmap_main_t *nm = &netmap_main;
+ netmap_if_t *nmi;
+
+ for (i = 0; i < vec_len (nm->interfaces); i++)
+ {
+ nmi = vec_elt_at_index (nm->interfaces, i);
+ if (nmi->is_admin_up &&
+ (i % nm->input_cpu_count) ==
+ (cpu_index - nm->input_cpu_first_index))
+ n_rx_packets += netmap_device_input_fn (vm, node, frame, nmi);
+ }
+
+ return n_rx_packets;
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (netmap_input_node) = {
+ .function = netmap_input_fn,
+ .name = "netmap-input",
+ .sibling_of = "device-input",
+ .format_trace = format_netmap_input_trace,
+ .type = VLIB_NODE_TYPE_INPUT,
+ /* default state is INTERRUPT mode, switch to POLLING if worker threads are enabled */
+ .state = VLIB_NODE_STATE_INTERRUPT,
+ .n_errors = NETMAP_INPUT_N_ERROR,
+ .error_strings = netmap_input_error_strings,
+};
+
+VLIB_NODE_FUNCTION_MULTIARCH (netmap_input_node, netmap_input_fn)
+/* *INDENT-ON* */
+
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/devices/nic/ixge.c b/src/vnet/devices/nic/ixge.c
new file mode 100644
index 00000000000..d4c4c6b7414
--- /dev/null
+++ b/src/vnet/devices/nic/ixge.c
@@ -0,0 +1,2938 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * WARNING!
+ * This driver is not intended for production use and it is unsupported.
+ * It is provided for educational use only.
+ * Please use supported DPDK driver instead.
+ */
+
+#if __x86_64__
+#include <vppinfra/vector.h>
+
+#ifndef CLIB_HAVE_VEC128
+#warning HACK: ixge driver wont really work, missing u32x4
+typedef unsigned long long u32x4;
+#endif
+
+#include <vlib/vlib.h>
+#include <vlib/unix/unix.h>
+#include <vlib/pci/pci.h>
+#include <vnet/vnet.h>
+#include <vnet/devices/nic/ixge.h>
+#include <vnet/ethernet/ethernet.h>
+
+#define IXGE_ALWAYS_POLL 0
+
+#define EVENT_SET_FLAGS 0
+#define IXGE_HWBP_RACE_ELOG 0
+
+#define PCI_VENDOR_ID_INTEL 0x8086
+
+/* 10 GIG E (XGE) PHY IEEE 802.3 clause 45 definitions. */
+#define XGE_PHY_DEV_TYPE_PMA_PMD 1
+#define XGE_PHY_DEV_TYPE_PHY_XS 4
+#define XGE_PHY_ID1 0x2
+#define XGE_PHY_ID2 0x3
+#define XGE_PHY_CONTROL 0x0
+#define XGE_PHY_CONTROL_RESET (1 << 15)
+
+ixge_main_t ixge_main;
+static vlib_node_registration_t ixge_input_node;
+static vlib_node_registration_t ixge_process_node;
+
+static void
+ixge_semaphore_get (ixge_device_t * xd)
+{
+ ixge_main_t *xm = &ixge_main;
+ vlib_main_t *vm = xm->vlib_main;
+ ixge_regs_t *r = xd->regs;
+ u32 i;
+
+ i = 0;
+ while (!(r->software_semaphore & (1 << 0)))
+ {
+ if (i > 0)
+ vlib_process_suspend (vm, 100e-6);
+ i++;
+ }
+ do
+ {
+ r->software_semaphore |= 1 << 1;
+ }
+ while (!(r->software_semaphore & (1 << 1)));
+}
+
+static void
+ixge_semaphore_release (ixge_device_t * xd)
+{
+ ixge_regs_t *r = xd->regs;
+ r->software_semaphore &= ~3;
+}
+
+static void
+ixge_software_firmware_sync (ixge_device_t * xd, u32 sw_mask)
+{
+ ixge_main_t *xm = &ixge_main;
+ vlib_main_t *vm = xm->vlib_main;
+ ixge_regs_t *r = xd->regs;
+ u32 fw_mask = sw_mask << 5;
+ u32 m, done = 0;
+
+ while (!done)
+ {
+ ixge_semaphore_get (xd);
+ m = r->software_firmware_sync;
+ done = (m & fw_mask) == 0;
+ if (done)
+ r->software_firmware_sync = m | sw_mask;
+ ixge_semaphore_release (xd);
+ if (!done)
+ vlib_process_suspend (vm, 10e-3);
+ }
+}
+
+static void
+ixge_software_firmware_sync_release (ixge_device_t * xd, u32 sw_mask)
+{
+ ixge_regs_t *r = xd->regs;
+ ixge_semaphore_get (xd);
+ r->software_firmware_sync &= ~sw_mask;
+ ixge_semaphore_release (xd);
+}
+
+u32
+ixge_read_write_phy_reg (ixge_device_t * xd, u32 dev_type, u32 reg_index,
+ u32 v, u32 is_read)
+{
+ ixge_regs_t *r = xd->regs;
+ const u32 busy_bit = 1 << 30;
+ u32 x;
+
+ ASSERT (xd->phy_index < 2);
+ ixge_software_firmware_sync (xd, 1 << (1 + xd->phy_index));
+
+ ASSERT (reg_index < (1 << 16));
+ ASSERT (dev_type < (1 << 5));
+ if (!is_read)
+ r->xge_mac.phy_data = v;
+
+ /* Address cycle. */
+ x =
+ reg_index | (dev_type << 16) | (xd->
+ phys[xd->phy_index].mdio_address << 21);
+ r->xge_mac.phy_command = x | busy_bit;
+ /* Busy wait timed to take 28e-6 secs. No suspend. */
+ while (r->xge_mac.phy_command & busy_bit)
+ ;
+
+ r->xge_mac.phy_command = x | ((is_read ? 2 : 1) << 26) | busy_bit;
+ while (r->xge_mac.phy_command & busy_bit)
+ ;
+
+ if (is_read)
+ v = r->xge_mac.phy_data >> 16;
+
+ ixge_software_firmware_sync_release (xd, 1 << (1 + xd->phy_index));
+
+ return v;
+}
+
+static u32
+ixge_read_phy_reg (ixge_device_t * xd, u32 dev_type, u32 reg_index)
+{
+ return ixge_read_write_phy_reg (xd, dev_type, reg_index, 0, /* is_read */
+ 1);
+}
+
+static void
+ixge_write_phy_reg (ixge_device_t * xd, u32 dev_type, u32 reg_index, u32 v)
+{
+ (void) ixge_read_write_phy_reg (xd, dev_type, reg_index, v, /* is_read */
+ 0);
+}
+
+static void
+ixge_i2c_put_bits (i2c_bus_t * b, int scl, int sda)
+{
+ ixge_main_t *xm = &ixge_main;
+ ixge_device_t *xd = vec_elt_at_index (xm->devices, b->private_data);
+ u32 v;
+
+ v = 0;
+ v |= (sda != 0) << 3;
+ v |= (scl != 0) << 1;
+ xd->regs->i2c_control = v;
+}
+
+static void
+ixge_i2c_get_bits (i2c_bus_t * b, int *scl, int *sda)
+{
+ ixge_main_t *xm = &ixge_main;
+ ixge_device_t *xd = vec_elt_at_index (xm->devices, b->private_data);
+ u32 v;
+
+ v = xd->regs->i2c_control;
+ *sda = (v & (1 << 2)) != 0;
+ *scl = (v & (1 << 0)) != 0;
+}
+
+static u16
+ixge_read_eeprom (ixge_device_t * xd, u32 address)
+{
+ ixge_regs_t *r = xd->regs;
+ u32 v;
+ r->eeprom_read = (( /* start bit */ (1 << 0)) | (address << 2));
+ /* Wait for done bit. */
+ while (!((v = r->eeprom_read) & (1 << 1)))
+ ;
+ return v >> 16;
+}
+
+static void
+ixge_sfp_enable_disable_laser (ixge_device_t * xd, uword enable)
+{
+ u32 tx_disable_bit = 1 << 3;
+ if (enable)
+ xd->regs->sdp_control &= ~tx_disable_bit;
+ else
+ xd->regs->sdp_control |= tx_disable_bit;
+}
+
+static void
+ixge_sfp_enable_disable_10g (ixge_device_t * xd, uword enable)
+{
+ u32 is_10g_bit = 1 << 5;
+ if (enable)
+ xd->regs->sdp_control |= is_10g_bit;
+ else
+ xd->regs->sdp_control &= ~is_10g_bit;
+}
+
+static clib_error_t *
+ixge_sfp_phy_init_from_eeprom (ixge_device_t * xd, u16 sfp_type)
+{
+ u16 a, id, reg_values_addr = 0;
+
+ a = ixge_read_eeprom (xd, 0x2b);
+ if (a == 0 || a == 0xffff)
+ return clib_error_create ("no init sequence in eeprom");
+
+ while (1)
+ {
+ id = ixge_read_eeprom (xd, ++a);
+ if (id == 0xffff)
+ break;
+ reg_values_addr = ixge_read_eeprom (xd, ++a);
+ if (id == sfp_type)
+ break;
+ }
+ if (id != sfp_type)
+ return clib_error_create ("failed to find id 0x%x", sfp_type);
+
+ ixge_software_firmware_sync (xd, 1 << 3);
+ while (1)
+ {
+ u16 v = ixge_read_eeprom (xd, ++reg_values_addr);
+ if (v == 0xffff)
+ break;
+ xd->regs->core_analog_config = v;
+ }
+ ixge_software_firmware_sync_release (xd, 1 << 3);
+
+ /* Make sure laser is off. We'll turn on the laser when
+ the interface is brought up. */
+ ixge_sfp_enable_disable_laser (xd, /* enable */ 0);
+ ixge_sfp_enable_disable_10g (xd, /* is_10g */ 1);
+
+ return 0;
+}
+
+static void
+ixge_sfp_device_up_down (ixge_device_t * xd, uword is_up)
+{
+ u32 v;
+
+ if (is_up)
+ {
+ /* pma/pmd 10g serial SFI. */
+ xd->regs->xge_mac.auto_negotiation_control2 &= ~(3 << 16);
+ xd->regs->xge_mac.auto_negotiation_control2 |= 2 << 16;
+
+ v = xd->regs->xge_mac.auto_negotiation_control;
+ v &= ~(7 << 13);
+ v |= (0 << 13);
+ /* Restart autoneg. */
+ v |= (1 << 12);
+ xd->regs->xge_mac.auto_negotiation_control = v;
+
+ while (!(xd->regs->xge_mac.link_partner_ability[0] & 0xf0000))
+ ;
+
+ v = xd->regs->xge_mac.auto_negotiation_control;
+
+ /* link mode 10g sfi serdes */
+ v &= ~(7 << 13);
+ v |= (3 << 13);
+
+ /* Restart autoneg. */
+ v |= (1 << 12);
+ xd->regs->xge_mac.auto_negotiation_control = v;
+
+ xd->regs->xge_mac.link_status;
+ }
+
+ ixge_sfp_enable_disable_laser (xd, /* enable */ is_up);
+
+ /* Give time for link partner to notice that we're up. */
+ if (is_up && vlib_in_process_context (vlib_get_main ()))
+ {
+ vlib_process_suspend (vlib_get_main (), 300e-3);
+ }
+}
+
+always_inline ixge_dma_regs_t *
+get_dma_regs (ixge_device_t * xd, vlib_rx_or_tx_t rt, u32 qi)
+{
+ ixge_regs_t *r = xd->regs;
+ ASSERT (qi < 128);
+ if (rt == VLIB_RX)
+ return qi < 64 ? &r->rx_dma0[qi] : &r->rx_dma1[qi - 64];
+ else
+ return &r->tx_dma[qi];
+}
+
+static clib_error_t *
+ixge_interface_admin_up_down (vnet_main_t * vnm, u32 hw_if_index, u32 flags)
+{
+ vnet_hw_interface_t *hif = vnet_get_hw_interface (vnm, hw_if_index);
+ uword is_up = (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) != 0;
+ ixge_main_t *xm = &ixge_main;
+ ixge_device_t *xd = vec_elt_at_index (xm->devices, hif->dev_instance);
+ ixge_dma_regs_t *dr = get_dma_regs (xd, VLIB_RX, 0);
+
+ if (is_up)
+ {
+ xd->regs->rx_enable |= 1;
+ xd->regs->tx_dma_control |= 1;
+ dr->control |= 1 << 25;
+ while (!(dr->control & (1 << 25)))
+ ;
+ }
+ else
+ {
+ xd->regs->rx_enable &= ~1;
+ xd->regs->tx_dma_control &= ~1;
+ }
+
+ ixge_sfp_device_up_down (xd, is_up);
+
+ return /* no error */ 0;
+}
+
+static void
+ixge_sfp_phy_init (ixge_device_t * xd)
+{
+ ixge_phy_t *phy = xd->phys + xd->phy_index;
+ i2c_bus_t *ib = &xd->i2c_bus;
+
+ ib->private_data = xd->device_index;
+ ib->put_bits = ixge_i2c_put_bits;
+ ib->get_bits = ixge_i2c_get_bits;
+ vlib_i2c_init (ib);
+
+ vlib_i2c_read_eeprom (ib, 0x50, 0, 128, (u8 *) & xd->sfp_eeprom);
+
+ if (vlib_i2c_bus_timed_out (ib) || !sfp_eeprom_is_valid (&xd->sfp_eeprom))
+ xd->sfp_eeprom.id = SFP_ID_unknown;
+ else
+ {
+ /* FIXME 5 => SR/LR eeprom ID. */
+ clib_error_t *e =
+ ixge_sfp_phy_init_from_eeprom (xd, 5 + xd->pci_function);
+ if (e)
+ clib_error_report (e);
+ }
+
+ phy->mdio_address = ~0;
+}
+
+static void
+ixge_phy_init (ixge_device_t * xd)
+{
+ ixge_main_t *xm = &ixge_main;
+ vlib_main_t *vm = xm->vlib_main;
+ ixge_phy_t *phy = xd->phys + xd->phy_index;
+
+ switch (xd->device_id)
+ {
+ case IXGE_82599_sfp:
+ case IXGE_82599_sfp_em:
+ case IXGE_82599_sfp_fcoe:
+ /* others? */
+ return ixge_sfp_phy_init (xd);
+
+ default:
+ break;
+ }
+
+ /* Probe address of phy. */
+ {
+ u32 i, v;
+
+ phy->mdio_address = ~0;
+ for (i = 0; i < 32; i++)
+ {
+ phy->mdio_address = i;
+ v = ixge_read_phy_reg (xd, XGE_PHY_DEV_TYPE_PMA_PMD, XGE_PHY_ID1);
+ if (v != 0xffff && v != 0)
+ break;
+ }
+
+ /* No PHY found? */
+ if (i >= 32)
+ return;
+ }
+
+ phy->id =
+ ((ixge_read_phy_reg (xd, XGE_PHY_DEV_TYPE_PMA_PMD, XGE_PHY_ID1) << 16) |
+ ixge_read_phy_reg (xd, XGE_PHY_DEV_TYPE_PMA_PMD, XGE_PHY_ID2));
+
+ {
+ ELOG_TYPE_DECLARE (e) =
+ {
+ .function = (char *) __FUNCTION__,.format =
+ "ixge %d, phy id 0x%d mdio address %d",.format_args = "i4i4i4",};
+ struct
+ {
+ u32 instance, id, address;
+ } *ed;
+ ed = ELOG_DATA (&vm->elog_main, e);
+ ed->instance = xd->device_index;
+ ed->id = phy->id;
+ ed->address = phy->mdio_address;
+ }
+
+ /* Reset phy. */
+ ixge_write_phy_reg (xd, XGE_PHY_DEV_TYPE_PHY_XS, XGE_PHY_CONTROL,
+ XGE_PHY_CONTROL_RESET);
+
+ /* Wait for self-clearning reset bit to clear. */
+ do
+ {
+ vlib_process_suspend (vm, 1e-3);
+ }
+ while (ixge_read_phy_reg (xd, XGE_PHY_DEV_TYPE_PHY_XS, XGE_PHY_CONTROL) &
+ XGE_PHY_CONTROL_RESET);
+}
+
+static u8 *
+format_ixge_rx_from_hw_descriptor (u8 * s, va_list * va)
+{
+ ixge_rx_from_hw_descriptor_t *d =
+ va_arg (*va, ixge_rx_from_hw_descriptor_t *);
+ u32 s0 = d->status[0], s2 = d->status[2];
+ u32 is_ip4, is_ip6, is_ip, is_tcp, is_udp;
+ uword indent = format_get_indent (s);
+
+ s = format (s, "%s-owned",
+ (s2 & IXGE_RX_DESCRIPTOR_STATUS2_IS_OWNED_BY_SOFTWARE) ? "sw" :
+ "hw");
+ s =
+ format (s, ", length this descriptor %d, l3 offset %d",
+ d->n_packet_bytes_this_descriptor,
+ IXGE_RX_DESCRIPTOR_STATUS0_L3_OFFSET (s0));
+ if (s2 & IXGE_RX_DESCRIPTOR_STATUS2_IS_END_OF_PACKET)
+ s = format (s, ", end-of-packet");
+
+ s = format (s, "\n%U", format_white_space, indent);
+
+ if (s2 & IXGE_RX_DESCRIPTOR_STATUS2_ETHERNET_ERROR)
+ s = format (s, "layer2 error");
+
+ if (s0 & IXGE_RX_DESCRIPTOR_STATUS0_IS_LAYER2)
+ {
+ s = format (s, "layer 2 type %d", (s0 & 0x1f));
+ return s;
+ }
+
+ if (s2 & IXGE_RX_DESCRIPTOR_STATUS2_IS_VLAN)
+ s = format (s, "vlan header 0x%x\n%U", d->vlan_tag,
+ format_white_space, indent);
+
+ if ((is_ip4 = (s0 & IXGE_RX_DESCRIPTOR_STATUS0_IS_IP4)))
+ {
+ s = format (s, "ip4%s",
+ (s0 & IXGE_RX_DESCRIPTOR_STATUS0_IS_IP4_EXT) ? " options" :
+ "");
+ if (s2 & IXGE_RX_DESCRIPTOR_STATUS2_IS_IP4_CHECKSUMMED)
+ s = format (s, " checksum %s",
+ (s2 & IXGE_RX_DESCRIPTOR_STATUS2_IP4_CHECKSUM_ERROR) ?
+ "bad" : "ok");
+ }
+ if ((is_ip6 = (s0 & IXGE_RX_DESCRIPTOR_STATUS0_IS_IP6)))
+ s = format (s, "ip6%s",
+ (s0 & IXGE_RX_DESCRIPTOR_STATUS0_IS_IP6_EXT) ? " extended" :
+ "");
+ is_tcp = is_udp = 0;
+ if ((is_ip = (is_ip4 | is_ip6)))
+ {
+ is_tcp = (s0 & IXGE_RX_DESCRIPTOR_STATUS0_IS_TCP) != 0;
+ is_udp = (s0 & IXGE_RX_DESCRIPTOR_STATUS0_IS_UDP) != 0;
+ if (is_tcp)
+ s = format (s, ", tcp");
+ if (is_udp)
+ s = format (s, ", udp");
+ }
+
+ if (s2 & IXGE_RX_DESCRIPTOR_STATUS2_IS_TCP_CHECKSUMMED)
+ s = format (s, ", tcp checksum %s",
+ (s2 & IXGE_RX_DESCRIPTOR_STATUS2_TCP_CHECKSUM_ERROR) ? "bad" :
+ "ok");
+ if (s2 & IXGE_RX_DESCRIPTOR_STATUS2_IS_UDP_CHECKSUMMED)
+ s = format (s, ", udp checksum %s",
+ (s2 & IXGE_RX_DESCRIPTOR_STATUS2_UDP_CHECKSUM_ERROR) ? "bad" :
+ "ok");
+
+ return s;
+}
+
+static u8 *
+format_ixge_tx_descriptor (u8 * s, va_list * va)
+{
+ ixge_tx_descriptor_t *d = va_arg (*va, ixge_tx_descriptor_t *);
+ u32 s0 = d->status0, s1 = d->status1;
+ uword indent = format_get_indent (s);
+ u32 v;
+
+ s = format (s, "buffer 0x%Lx, %d packet bytes, %d bytes this buffer",
+ d->buffer_address, s1 >> 14, d->n_bytes_this_buffer);
+
+ s = format (s, "\n%U", format_white_space, indent);
+
+ if ((v = (s0 >> 0) & 3))
+ s = format (s, "reserved 0x%x, ", v);
+
+ if ((v = (s0 >> 2) & 3))
+ s = format (s, "mac 0x%x, ", v);
+
+ if ((v = (s0 >> 4) & 0xf) != 3)
+ s = format (s, "type 0x%x, ", v);
+
+ s = format (s, "%s%s%s%s%s%s%s%s",
+ (s0 & (1 << 8)) ? "eop, " : "",
+ (s0 & (1 << 9)) ? "insert-fcs, " : "",
+ (s0 & (1 << 10)) ? "reserved26, " : "",
+ (s0 & (1 << 11)) ? "report-status, " : "",
+ (s0 & (1 << 12)) ? "reserved28, " : "",
+ (s0 & (1 << 13)) ? "is-advanced, " : "",
+ (s0 & (1 << 14)) ? "vlan-enable, " : "",
+ (s0 & (1 << 15)) ? "tx-segmentation, " : "");
+
+ if ((v = s1 & 0xf) != 0)
+ s = format (s, "status 0x%x, ", v);
+
+ if ((v = (s1 >> 4) & 0xf))
+ s = format (s, "context 0x%x, ", v);
+
+ if ((v = (s1 >> 8) & 0x3f))
+ s = format (s, "options 0x%x, ", v);
+
+ return s;
+}
+
+typedef struct
+{
+ ixge_descriptor_t before, after;
+
+ u32 buffer_index;
+
+ u16 device_index;
+
+ u8 queue_index;
+
+ u8 is_start_of_packet;
+
+ /* Copy of VLIB buffer; packet data stored in pre_data. */
+ vlib_buffer_t buffer;
+} ixge_rx_dma_trace_t;
+
+static u8 *
+format_ixge_rx_dma_trace (u8 * s, va_list * va)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*va, vlib_main_t *);
+ vlib_node_t *node = va_arg (*va, vlib_node_t *);
+ vnet_main_t *vnm = vnet_get_main ();
+ ixge_rx_dma_trace_t *t = va_arg (*va, ixge_rx_dma_trace_t *);
+ ixge_main_t *xm = &ixge_main;
+ ixge_device_t *xd = vec_elt_at_index (xm->devices, t->device_index);
+ format_function_t *f;
+ uword indent = format_get_indent (s);
+
+ {
+ vnet_sw_interface_t *sw =
+ vnet_get_sw_interface (vnm, xd->vlib_sw_if_index);
+ s =
+ format (s, "%U rx queue %d", format_vnet_sw_interface_name, vnm, sw,
+ t->queue_index);
+ }
+
+ s = format (s, "\n%Ubefore: %U",
+ format_white_space, indent,
+ format_ixge_rx_from_hw_descriptor, &t->before);
+ s = format (s, "\n%Uafter : head/tail address 0x%Lx/0x%Lx",
+ format_white_space, indent,
+ t->after.rx_to_hw.head_address, t->after.rx_to_hw.tail_address);
+
+ s = format (s, "\n%Ubuffer 0x%x: %U",
+ format_white_space, indent,
+ t->buffer_index, format_vlib_buffer, &t->buffer);
+
+ s = format (s, "\n%U", format_white_space, indent);
+
+ f = node->format_buffer;
+ if (!f || !t->is_start_of_packet)
+ f = format_hex_bytes;
+ s = format (s, "%U", f, t->buffer.pre_data, sizeof (t->buffer.pre_data));
+
+ return s;
+}
+
+#define foreach_ixge_error \
+ _ (none, "no error") \
+ _ (tx_full_drops, "tx ring full drops") \
+ _ (ip4_checksum_error, "ip4 checksum errors") \
+ _ (rx_alloc_fail, "rx buf alloc from free list failed") \
+ _ (rx_alloc_no_physmem, "rx buf alloc failed no physmem")
+
+typedef enum
+{
+#define _(f,s) IXGE_ERROR_##f,
+ foreach_ixge_error
+#undef _
+ IXGE_N_ERROR,
+} ixge_error_t;
+
+always_inline void
+ixge_rx_next_and_error_from_status_x1 (ixge_device_t * xd,
+ u32 s00, u32 s02,
+ u8 * next0, u8 * error0, u32 * flags0)
+{
+ u8 is0_ip4, is0_ip6, n0, e0;
+ u32 f0;
+
+ e0 = IXGE_ERROR_none;
+ n0 = IXGE_RX_NEXT_ETHERNET_INPUT;
+
+ is0_ip4 = s02 & IXGE_RX_DESCRIPTOR_STATUS2_IS_IP4_CHECKSUMMED;
+ n0 = is0_ip4 ? IXGE_RX_NEXT_IP4_INPUT : n0;
+
+ e0 = (is0_ip4 && (s02 & IXGE_RX_DESCRIPTOR_STATUS2_IP4_CHECKSUM_ERROR)
+ ? IXGE_ERROR_ip4_checksum_error : e0);
+
+ is0_ip6 = s00 & IXGE_RX_DESCRIPTOR_STATUS0_IS_IP6;
+ n0 = is0_ip6 ? IXGE_RX_NEXT_IP6_INPUT : n0;
+
+ n0 = (xd->per_interface_next_index != ~0) ?
+ xd->per_interface_next_index : n0;
+
+ /* Check for error. */
+ n0 = e0 != IXGE_ERROR_none ? IXGE_RX_NEXT_DROP : n0;
+
+ f0 = ((s02 & (IXGE_RX_DESCRIPTOR_STATUS2_IS_TCP_CHECKSUMMED
+ | IXGE_RX_DESCRIPTOR_STATUS2_IS_UDP_CHECKSUMMED))
+ ? IP_BUFFER_L4_CHECKSUM_COMPUTED : 0);
+
+ f0 |= ((s02 & (IXGE_RX_DESCRIPTOR_STATUS2_TCP_CHECKSUM_ERROR
+ | IXGE_RX_DESCRIPTOR_STATUS2_UDP_CHECKSUM_ERROR))
+ ? 0 : IP_BUFFER_L4_CHECKSUM_CORRECT);
+
+ *error0 = e0;
+ *next0 = n0;
+ *flags0 = f0;
+}
+
+always_inline void
+ixge_rx_next_and_error_from_status_x2 (ixge_device_t * xd,
+ u32 s00, u32 s02,
+ u32 s10, u32 s12,
+ u8 * next0, u8 * error0, u32 * flags0,
+ u8 * next1, u8 * error1, u32 * flags1)
+{
+ u8 is0_ip4, is0_ip6, n0, e0;
+ u8 is1_ip4, is1_ip6, n1, e1;
+ u32 f0, f1;
+
+ e0 = e1 = IXGE_ERROR_none;
+ n0 = n1 = IXGE_RX_NEXT_IP4_INPUT;
+
+ is0_ip4 = s02 & IXGE_RX_DESCRIPTOR_STATUS2_IS_IP4_CHECKSUMMED;
+ is1_ip4 = s12 & IXGE_RX_DESCRIPTOR_STATUS2_IS_IP4_CHECKSUMMED;
+
+ n0 = is0_ip4 ? IXGE_RX_NEXT_IP4_INPUT : n0;
+ n1 = is1_ip4 ? IXGE_RX_NEXT_IP4_INPUT : n1;
+
+ e0 = (is0_ip4 && (s02 & IXGE_RX_DESCRIPTOR_STATUS2_IP4_CHECKSUM_ERROR)
+ ? IXGE_ERROR_ip4_checksum_error : e0);
+ e1 = (is1_ip4 && (s12 & IXGE_RX_DESCRIPTOR_STATUS2_IP4_CHECKSUM_ERROR)
+ ? IXGE_ERROR_ip4_checksum_error : e1);
+
+ is0_ip6 = s00 & IXGE_RX_DESCRIPTOR_STATUS0_IS_IP6;
+ is1_ip6 = s10 & IXGE_RX_DESCRIPTOR_STATUS0_IS_IP6;
+
+ n0 = is0_ip6 ? IXGE_RX_NEXT_IP6_INPUT : n0;
+ n1 = is1_ip6 ? IXGE_RX_NEXT_IP6_INPUT : n1;
+
+ n0 = (xd->per_interface_next_index != ~0) ?
+ xd->per_interface_next_index : n0;
+ n1 = (xd->per_interface_next_index != ~0) ?
+ xd->per_interface_next_index : n1;
+
+ /* Check for error. */
+ n0 = e0 != IXGE_ERROR_none ? IXGE_RX_NEXT_DROP : n0;
+ n1 = e1 != IXGE_ERROR_none ? IXGE_RX_NEXT_DROP : n1;
+
+ *error0 = e0;
+ *error1 = e1;
+
+ *next0 = n0;
+ *next1 = n1;
+
+ f0 = ((s02 & (IXGE_RX_DESCRIPTOR_STATUS2_IS_TCP_CHECKSUMMED
+ | IXGE_RX_DESCRIPTOR_STATUS2_IS_UDP_CHECKSUMMED))
+ ? IP_BUFFER_L4_CHECKSUM_COMPUTED : 0);
+ f1 = ((s12 & (IXGE_RX_DESCRIPTOR_STATUS2_IS_TCP_CHECKSUMMED
+ | IXGE_RX_DESCRIPTOR_STATUS2_IS_UDP_CHECKSUMMED))
+ ? IP_BUFFER_L4_CHECKSUM_COMPUTED : 0);
+
+ f0 |= ((s02 & (IXGE_RX_DESCRIPTOR_STATUS2_TCP_CHECKSUM_ERROR
+ | IXGE_RX_DESCRIPTOR_STATUS2_UDP_CHECKSUM_ERROR))
+ ? 0 : IP_BUFFER_L4_CHECKSUM_CORRECT);
+ f1 |= ((s12 & (IXGE_RX_DESCRIPTOR_STATUS2_TCP_CHECKSUM_ERROR
+ | IXGE_RX_DESCRIPTOR_STATUS2_UDP_CHECKSUM_ERROR))
+ ? 0 : IP_BUFFER_L4_CHECKSUM_CORRECT);
+
+ *flags0 = f0;
+ *flags1 = f1;
+}
+
+static void
+ixge_rx_trace (ixge_main_t * xm,
+ ixge_device_t * xd,
+ ixge_dma_queue_t * dq,
+ ixge_descriptor_t * before_descriptors,
+ u32 * before_buffers,
+ ixge_descriptor_t * after_descriptors, uword n_descriptors)
+{
+ vlib_main_t *vm = xm->vlib_main;
+ vlib_node_runtime_t *node = dq->rx.node;
+ ixge_rx_from_hw_descriptor_t *bd;
+ ixge_rx_to_hw_descriptor_t *ad;
+ u32 *b, n_left, is_sop, next_index_sop;
+
+ n_left = n_descriptors;
+ b = before_buffers;
+ bd = &before_descriptors->rx_from_hw;
+ ad = &after_descriptors->rx_to_hw;
+ is_sop = dq->rx.is_start_of_packet;
+ next_index_sop = dq->rx.saved_start_of_packet_next_index;
+
+ while (n_left >= 2)
+ {
+ u32 bi0, bi1, flags0, flags1;
+ vlib_buffer_t *b0, *b1;
+ ixge_rx_dma_trace_t *t0, *t1;
+ u8 next0, error0, next1, error1;
+
+ bi0 = b[0];
+ bi1 = b[1];
+ n_left -= 2;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ b1 = vlib_get_buffer (vm, bi1);
+
+ ixge_rx_next_and_error_from_status_x2 (xd,
+ bd[0].status[0], bd[0].status[2],
+ bd[1].status[0], bd[1].status[2],
+ &next0, &error0, &flags0,
+ &next1, &error1, &flags1);
+
+ next_index_sop = is_sop ? next0 : next_index_sop;
+ vlib_trace_buffer (vm, node, next_index_sop, b0, /* follow_chain */ 0);
+ t0 = vlib_add_trace (vm, node, b0, sizeof (t0[0]));
+ t0->is_start_of_packet = is_sop;
+ is_sop = (b0->flags & VLIB_BUFFER_NEXT_PRESENT) == 0;
+
+ next_index_sop = is_sop ? next1 : next_index_sop;
+ vlib_trace_buffer (vm, node, next_index_sop, b1, /* follow_chain */ 0);
+ t1 = vlib_add_trace (vm, node, b1, sizeof (t1[0]));
+ t1->is_start_of_packet = is_sop;
+ is_sop = (b1->flags & VLIB_BUFFER_NEXT_PRESENT) == 0;
+
+ t0->queue_index = dq->queue_index;
+ t1->queue_index = dq->queue_index;
+ t0->device_index = xd->device_index;
+ t1->device_index = xd->device_index;
+ t0->before.rx_from_hw = bd[0];
+ t1->before.rx_from_hw = bd[1];
+ t0->after.rx_to_hw = ad[0];
+ t1->after.rx_to_hw = ad[1];
+ t0->buffer_index = bi0;
+ t1->buffer_index = bi1;
+ memcpy (&t0->buffer, b0, sizeof (b0[0]) - sizeof (b0->pre_data));
+ memcpy (&t1->buffer, b1, sizeof (b1[0]) - sizeof (b0->pre_data));
+ memcpy (t0->buffer.pre_data, b0->data + b0->current_data,
+ sizeof (t0->buffer.pre_data));
+ memcpy (t1->buffer.pre_data, b1->data + b1->current_data,
+ sizeof (t1->buffer.pre_data));
+
+ b += 2;
+ bd += 2;
+ ad += 2;
+ }
+
+ while (n_left >= 1)
+ {
+ u32 bi0, flags0;
+ vlib_buffer_t *b0;
+ ixge_rx_dma_trace_t *t0;
+ u8 next0, error0;
+
+ bi0 = b[0];
+ n_left -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+
+ ixge_rx_next_and_error_from_status_x1 (xd,
+ bd[0].status[0], bd[0].status[2],
+ &next0, &error0, &flags0);
+
+ next_index_sop = is_sop ? next0 : next_index_sop;
+ vlib_trace_buffer (vm, node, next_index_sop, b0, /* follow_chain */ 0);
+ t0 = vlib_add_trace (vm, node, b0, sizeof (t0[0]));
+ t0->is_start_of_packet = is_sop;
+ is_sop = (b0->flags & VLIB_BUFFER_NEXT_PRESENT) == 0;
+
+ t0->queue_index = dq->queue_index;
+ t0->device_index = xd->device_index;
+ t0->before.rx_from_hw = bd[0];
+ t0->after.rx_to_hw = ad[0];
+ t0->buffer_index = bi0;
+ memcpy (&t0->buffer, b0, sizeof (b0[0]) - sizeof (b0->pre_data));
+ memcpy (t0->buffer.pre_data, b0->data + b0->current_data,
+ sizeof (t0->buffer.pre_data));
+
+ b += 1;
+ bd += 1;
+ ad += 1;
+ }
+}
+
+typedef struct
+{
+ ixge_tx_descriptor_t descriptor;
+
+ u32 buffer_index;
+
+ u16 device_index;
+
+ u8 queue_index;
+
+ u8 is_start_of_packet;
+
+ /* Copy of VLIB buffer; packet data stored in pre_data. */
+ vlib_buffer_t buffer;
+} ixge_tx_dma_trace_t;
+
+static u8 *
+format_ixge_tx_dma_trace (u8 * s, va_list * va)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*va, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*va, vlib_node_t *);
+ ixge_tx_dma_trace_t *t = va_arg (*va, ixge_tx_dma_trace_t *);
+ vnet_main_t *vnm = vnet_get_main ();
+ ixge_main_t *xm = &ixge_main;
+ ixge_device_t *xd = vec_elt_at_index (xm->devices, t->device_index);
+ format_function_t *f;
+ uword indent = format_get_indent (s);
+
+ {
+ vnet_sw_interface_t *sw =
+ vnet_get_sw_interface (vnm, xd->vlib_sw_if_index);
+ s =
+ format (s, "%U tx queue %d", format_vnet_sw_interface_name, vnm, sw,
+ t->queue_index);
+ }
+
+ s = format (s, "\n%Udescriptor: %U",
+ format_white_space, indent,
+ format_ixge_tx_descriptor, &t->descriptor);
+
+ s = format (s, "\n%Ubuffer 0x%x: %U",
+ format_white_space, indent,
+ t->buffer_index, format_vlib_buffer, &t->buffer);
+
+ s = format (s, "\n%U", format_white_space, indent);
+
+ f = format_ethernet_header_with_length;
+ if (!f || !t->is_start_of_packet)
+ f = format_hex_bytes;
+ s = format (s, "%U", f, t->buffer.pre_data, sizeof (t->buffer.pre_data));
+
+ return s;
+}
+
+typedef struct
+{
+ vlib_node_runtime_t *node;
+
+ u32 is_start_of_packet;
+
+ u32 n_bytes_in_packet;
+
+ ixge_tx_descriptor_t *start_of_packet_descriptor;
+} ixge_tx_state_t;
+
+static void
+ixge_tx_trace (ixge_main_t * xm,
+ ixge_device_t * xd,
+ ixge_dma_queue_t * dq,
+ ixge_tx_state_t * tx_state,
+ ixge_tx_descriptor_t * descriptors,
+ u32 * buffers, uword n_descriptors)
+{
+ vlib_main_t *vm = xm->vlib_main;
+ vlib_node_runtime_t *node = tx_state->node;
+ ixge_tx_descriptor_t *d;
+ u32 *b, n_left, is_sop;
+
+ n_left = n_descriptors;
+ b = buffers;
+ d = descriptors;
+ is_sop = tx_state->is_start_of_packet;
+
+ while (n_left >= 2)
+ {
+ u32 bi0, bi1;
+ vlib_buffer_t *b0, *b1;
+ ixge_tx_dma_trace_t *t0, *t1;
+
+ bi0 = b[0];
+ bi1 = b[1];
+ n_left -= 2;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ b1 = vlib_get_buffer (vm, bi1);
+
+ t0 = vlib_add_trace (vm, node, b0, sizeof (t0[0]));
+ t0->is_start_of_packet = is_sop;
+ is_sop = (b0->flags & VLIB_BUFFER_NEXT_PRESENT) == 0;
+
+ t1 = vlib_add_trace (vm, node, b1, sizeof (t1[0]));
+ t1->is_start_of_packet = is_sop;
+ is_sop = (b1->flags & VLIB_BUFFER_NEXT_PRESENT) == 0;
+
+ t0->queue_index = dq->queue_index;
+ t1->queue_index = dq->queue_index;
+ t0->device_index = xd->device_index;
+ t1->device_index = xd->device_index;
+ t0->descriptor = d[0];
+ t1->descriptor = d[1];
+ t0->buffer_index = bi0;
+ t1->buffer_index = bi1;
+ memcpy (&t0->buffer, b0, sizeof (b0[0]) - sizeof (b0->pre_data));
+ memcpy (&t1->buffer, b1, sizeof (b1[0]) - sizeof (b0->pre_data));
+ memcpy (t0->buffer.pre_data, b0->data + b0->current_data,
+ sizeof (t0->buffer.pre_data));
+ memcpy (t1->buffer.pre_data, b1->data + b1->current_data,
+ sizeof (t1->buffer.pre_data));
+
+ b += 2;
+ d += 2;
+ }
+
+ while (n_left >= 1)
+ {
+ u32 bi0;
+ vlib_buffer_t *b0;
+ ixge_tx_dma_trace_t *t0;
+
+ bi0 = b[0];
+ n_left -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+
+ t0 = vlib_add_trace (vm, node, b0, sizeof (t0[0]));
+ t0->is_start_of_packet = is_sop;
+ is_sop = (b0->flags & VLIB_BUFFER_NEXT_PRESENT) == 0;
+
+ t0->queue_index = dq->queue_index;
+ t0->device_index = xd->device_index;
+ t0->descriptor = d[0];
+ t0->buffer_index = bi0;
+ memcpy (&t0->buffer, b0, sizeof (b0[0]) - sizeof (b0->pre_data));
+ memcpy (t0->buffer.pre_data, b0->data + b0->current_data,
+ sizeof (t0->buffer.pre_data));
+
+ b += 1;
+ d += 1;
+ }
+}
+
+always_inline uword
+ixge_ring_sub (ixge_dma_queue_t * q, u32 i0, u32 i1)
+{
+ i32 d = i1 - i0;
+ ASSERT (i0 < q->n_descriptors);
+ ASSERT (i1 < q->n_descriptors);
+ return d < 0 ? q->n_descriptors + d : d;
+}
+
+always_inline uword
+ixge_ring_add (ixge_dma_queue_t * q, u32 i0, u32 i1)
+{
+ u32 d = i0 + i1;
+ ASSERT (i0 < q->n_descriptors);
+ ASSERT (i1 < q->n_descriptors);
+ d -= d >= q->n_descriptors ? q->n_descriptors : 0;
+ return d;
+}
+
+always_inline uword
+ixge_tx_descriptor_matches_template (ixge_main_t * xm,
+ ixge_tx_descriptor_t * d)
+{
+ u32 cmp;
+
+ cmp = ((d->status0 & xm->tx_descriptor_template_mask.status0)
+ ^ xm->tx_descriptor_template.status0);
+ if (cmp)
+ return 0;
+ cmp = ((d->status1 & xm->tx_descriptor_template_mask.status1)
+ ^ xm->tx_descriptor_template.status1);
+ if (cmp)
+ return 0;
+
+ return 1;
+}
+
+static uword
+ixge_tx_no_wrap (ixge_main_t * xm,
+ ixge_device_t * xd,
+ ixge_dma_queue_t * dq,
+ u32 * buffers,
+ u32 start_descriptor_index,
+ u32 n_descriptors, ixge_tx_state_t * tx_state)
+{
+ vlib_main_t *vm = xm->vlib_main;
+ ixge_tx_descriptor_t *d, *d_sop;
+ u32 n_left = n_descriptors;
+ u32 *to_free = vec_end (xm->tx_buffers_pending_free);
+ u32 *to_tx =
+ vec_elt_at_index (dq->descriptor_buffer_indices, start_descriptor_index);
+ u32 is_sop = tx_state->is_start_of_packet;
+ u32 len_sop = tx_state->n_bytes_in_packet;
+ u16 template_status = xm->tx_descriptor_template.status0;
+ u32 descriptor_prefetch_rotor = 0;
+
+ ASSERT (start_descriptor_index + n_descriptors <= dq->n_descriptors);
+ d = &dq->descriptors[start_descriptor_index].tx;
+ d_sop = is_sop ? d : tx_state->start_of_packet_descriptor;
+
+ while (n_left >= 4)
+ {
+ vlib_buffer_t *b0, *b1;
+ u32 bi0, fi0, len0;
+ u32 bi1, fi1, len1;
+ u8 is_eop0, is_eop1;
+
+ /* Prefetch next iteration. */
+ vlib_prefetch_buffer_with_index (vm, buffers[2], LOAD);
+ vlib_prefetch_buffer_with_index (vm, buffers[3], LOAD);
+
+ if ((descriptor_prefetch_rotor & 0x3) == 0)
+ CLIB_PREFETCH (d + 4, CLIB_CACHE_LINE_BYTES, STORE);
+
+ descriptor_prefetch_rotor += 2;
+
+ bi0 = buffers[0];
+ bi1 = buffers[1];
+
+ to_free[0] = fi0 = to_tx[0];
+ to_tx[0] = bi0;
+ to_free += fi0 != 0;
+
+ to_free[0] = fi1 = to_tx[1];
+ to_tx[1] = bi1;
+ to_free += fi1 != 0;
+
+ buffers += 2;
+ n_left -= 2;
+ to_tx += 2;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ b1 = vlib_get_buffer (vm, bi1);
+
+ is_eop0 = (b0->flags & VLIB_BUFFER_NEXT_PRESENT) == 0;
+ is_eop1 = (b1->flags & VLIB_BUFFER_NEXT_PRESENT) == 0;
+
+ len0 = b0->current_length;
+ len1 = b1->current_length;
+
+ ASSERT (ixge_tx_descriptor_matches_template (xm, d + 0));
+ ASSERT (ixge_tx_descriptor_matches_template (xm, d + 1));
+
+ d[0].buffer_address =
+ vlib_get_buffer_data_physical_address (vm, bi0) + b0->current_data;
+ d[1].buffer_address =
+ vlib_get_buffer_data_physical_address (vm, bi1) + b1->current_data;
+
+ d[0].n_bytes_this_buffer = len0;
+ d[1].n_bytes_this_buffer = len1;
+
+ d[0].status0 =
+ template_status | (is_eop0 <<
+ IXGE_TX_DESCRIPTOR_STATUS0_LOG2_IS_END_OF_PACKET);
+ d[1].status0 =
+ template_status | (is_eop1 <<
+ IXGE_TX_DESCRIPTOR_STATUS0_LOG2_IS_END_OF_PACKET);
+
+ len_sop = (is_sop ? 0 : len_sop) + len0;
+ d_sop[0].status1 =
+ IXGE_TX_DESCRIPTOR_STATUS1_N_BYTES_IN_PACKET (len_sop);
+ d += 1;
+ d_sop = is_eop0 ? d : d_sop;
+
+ is_sop = is_eop0;
+
+ len_sop = (is_sop ? 0 : len_sop) + len1;
+ d_sop[0].status1 =
+ IXGE_TX_DESCRIPTOR_STATUS1_N_BYTES_IN_PACKET (len_sop);
+ d += 1;
+ d_sop = is_eop1 ? d : d_sop;
+
+ is_sop = is_eop1;
+ }
+
+ while (n_left > 0)
+ {
+ vlib_buffer_t *b0;
+ u32 bi0, fi0, len0;
+ u8 is_eop0;
+
+ bi0 = buffers[0];
+
+ to_free[0] = fi0 = to_tx[0];
+ to_tx[0] = bi0;
+ to_free += fi0 != 0;
+
+ buffers += 1;
+ n_left -= 1;
+ to_tx += 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+
+ is_eop0 = (b0->flags & VLIB_BUFFER_NEXT_PRESENT) == 0;
+
+ len0 = b0->current_length;
+
+ ASSERT (ixge_tx_descriptor_matches_template (xm, d + 0));
+
+ d[0].buffer_address =
+ vlib_get_buffer_data_physical_address (vm, bi0) + b0->current_data;
+
+ d[0].n_bytes_this_buffer = len0;
+
+ d[0].status0 =
+ template_status | (is_eop0 <<
+ IXGE_TX_DESCRIPTOR_STATUS0_LOG2_IS_END_OF_PACKET);
+
+ len_sop = (is_sop ? 0 : len_sop) + len0;
+ d_sop[0].status1 =
+ IXGE_TX_DESCRIPTOR_STATUS1_N_BYTES_IN_PACKET (len_sop);
+ d += 1;
+ d_sop = is_eop0 ? d : d_sop;
+
+ is_sop = is_eop0;
+ }
+
+ if (tx_state->node->flags & VLIB_NODE_FLAG_TRACE)
+ {
+ to_tx =
+ vec_elt_at_index (dq->descriptor_buffer_indices,
+ start_descriptor_index);
+ ixge_tx_trace (xm, xd, dq, tx_state,
+ &dq->descriptors[start_descriptor_index].tx, to_tx,
+ n_descriptors);
+ }
+
+ _vec_len (xm->tx_buffers_pending_free) =
+ to_free - xm->tx_buffers_pending_free;
+
+ /* When we are done d_sop can point to end of ring. Wrap it if so. */
+ {
+ ixge_tx_descriptor_t *d_start = &dq->descriptors[0].tx;
+
+ ASSERT (d_sop - d_start <= dq->n_descriptors);
+ d_sop = d_sop - d_start == dq->n_descriptors ? d_start : d_sop;
+ }
+
+ tx_state->is_start_of_packet = is_sop;
+ tx_state->start_of_packet_descriptor = d_sop;
+ tx_state->n_bytes_in_packet = len_sop;
+
+ return n_descriptors;
+}
+
+static uword
+ixge_interface_tx (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * f)
+{
+ ixge_main_t *xm = &ixge_main;
+ vnet_interface_output_runtime_t *rd = (void *) node->runtime_data;
+ ixge_device_t *xd = vec_elt_at_index (xm->devices, rd->dev_instance);
+ ixge_dma_queue_t *dq;
+ u32 *from, n_left_tx, n_descriptors_to_tx, n_tail_drop;
+ u32 queue_index = 0; /* fixme parameter */
+ ixge_tx_state_t tx_state;
+
+ tx_state.node = node;
+ tx_state.is_start_of_packet = 1;
+ tx_state.start_of_packet_descriptor = 0;
+ tx_state.n_bytes_in_packet = 0;
+
+ from = vlib_frame_vector_args (f);
+
+ dq = vec_elt_at_index (xd->dma_queues[VLIB_TX], queue_index);
+
+ dq->head_index = dq->tx.head_index_write_back[0];
+
+ /* Since head == tail means ring is empty we can send up to dq->n_descriptors - 1. */
+ n_left_tx = dq->n_descriptors - 1;
+ n_left_tx -= ixge_ring_sub (dq, dq->head_index, dq->tail_index);
+
+ _vec_len (xm->tx_buffers_pending_free) = 0;
+
+ n_descriptors_to_tx = f->n_vectors;
+ n_tail_drop = 0;
+ if (PREDICT_FALSE (n_descriptors_to_tx > n_left_tx))
+ {
+ i32 i, n_ok, i_eop, i_sop;
+
+ i_sop = i_eop = ~0;
+ for (i = n_left_tx - 1; i >= 0; i--)
+ {
+ vlib_buffer_t *b = vlib_get_buffer (vm, from[i]);
+ if (!(b->flags & VLIB_BUFFER_NEXT_PRESENT))
+ {
+ if (i_sop != ~0 && i_eop != ~0)
+ break;
+ i_eop = i;
+ i_sop = i + 1;
+ }
+ }
+ if (i == 0)
+ n_ok = 0;
+ else
+ n_ok = i_eop + 1;
+
+ {
+ ELOG_TYPE_DECLARE (e) =
+ {
+ .function = (char *) __FUNCTION__,.format =
+ "ixge %d, ring full to tx %d head %d tail %d",.format_args =
+ "i2i2i2i2",};
+ struct
+ {
+ u16 instance, to_tx, head, tail;
+ } *ed;
+ ed = ELOG_DATA (&vm->elog_main, e);
+ ed->instance = xd->device_index;
+ ed->to_tx = n_descriptors_to_tx;
+ ed->head = dq->head_index;
+ ed->tail = dq->tail_index;
+ }
+
+ if (n_ok < n_descriptors_to_tx)
+ {
+ n_tail_drop = n_descriptors_to_tx - n_ok;
+ vec_add (xm->tx_buffers_pending_free, from + n_ok, n_tail_drop);
+ vlib_error_count (vm, ixge_input_node.index,
+ IXGE_ERROR_tx_full_drops, n_tail_drop);
+ }
+
+ n_descriptors_to_tx = n_ok;
+ }
+
+ dq->tx.n_buffers_on_ring += n_descriptors_to_tx;
+
+ /* Process from tail to end of descriptor ring. */
+ if (n_descriptors_to_tx > 0 && dq->tail_index < dq->n_descriptors)
+ {
+ u32 n =
+ clib_min (dq->n_descriptors - dq->tail_index, n_descriptors_to_tx);
+ n = ixge_tx_no_wrap (xm, xd, dq, from, dq->tail_index, n, &tx_state);
+ from += n;
+ n_descriptors_to_tx -= n;
+ dq->tail_index += n;
+ ASSERT (dq->tail_index <= dq->n_descriptors);
+ if (dq->tail_index == dq->n_descriptors)
+ dq->tail_index = 0;
+ }
+
+ if (n_descriptors_to_tx > 0)
+ {
+ u32 n =
+ ixge_tx_no_wrap (xm, xd, dq, from, 0, n_descriptors_to_tx, &tx_state);
+ from += n;
+ ASSERT (n == n_descriptors_to_tx);
+ dq->tail_index += n;
+ ASSERT (dq->tail_index <= dq->n_descriptors);
+ if (dq->tail_index == dq->n_descriptors)
+ dq->tail_index = 0;
+ }
+
+ /* We should only get full packets. */
+ ASSERT (tx_state.is_start_of_packet);
+
+ /* Report status when last descriptor is done. */
+ {
+ u32 i = dq->tail_index == 0 ? dq->n_descriptors - 1 : dq->tail_index - 1;
+ ixge_tx_descriptor_t *d = &dq->descriptors[i].tx;
+ d->status0 |= IXGE_TX_DESCRIPTOR_STATUS0_REPORT_STATUS;
+ }
+
+ /* Give new descriptors to hardware. */
+ {
+ ixge_dma_regs_t *dr = get_dma_regs (xd, VLIB_TX, queue_index);
+
+ CLIB_MEMORY_BARRIER ();
+
+ dr->tail_index = dq->tail_index;
+ }
+
+ /* Free any buffers that are done. */
+ {
+ u32 n = _vec_len (xm->tx_buffers_pending_free);
+ if (n > 0)
+ {
+ vlib_buffer_free_no_next (vm, xm->tx_buffers_pending_free, n);
+ _vec_len (xm->tx_buffers_pending_free) = 0;
+ ASSERT (dq->tx.n_buffers_on_ring >= n);
+ dq->tx.n_buffers_on_ring -= (n - n_tail_drop);
+ }
+ }
+
+ return f->n_vectors;
+}
+
+static uword
+ixge_rx_queue_no_wrap (ixge_main_t * xm,
+ ixge_device_t * xd,
+ ixge_dma_queue_t * dq,
+ u32 start_descriptor_index, u32 n_descriptors)
+{
+ vlib_main_t *vm = xm->vlib_main;
+ vlib_node_runtime_t *node = dq->rx.node;
+ ixge_descriptor_t *d;
+ static ixge_descriptor_t *d_trace_save;
+ static u32 *d_trace_buffers;
+ u32 n_descriptors_left = n_descriptors;
+ u32 *to_rx =
+ vec_elt_at_index (dq->descriptor_buffer_indices, start_descriptor_index);
+ u32 *to_add;
+ u32 bi_sop = dq->rx.saved_start_of_packet_buffer_index;
+ u32 bi_last = dq->rx.saved_last_buffer_index;
+ u32 next_index_sop = dq->rx.saved_start_of_packet_next_index;
+ u32 is_sop = dq->rx.is_start_of_packet;
+ u32 next_index, n_left_to_next, *to_next;
+ u32 n_packets = 0;
+ u32 n_bytes = 0;
+ u32 n_trace = vlib_get_trace_count (vm, node);
+ vlib_buffer_t *b_last, b_dummy;
+
+ ASSERT (start_descriptor_index + n_descriptors <= dq->n_descriptors);
+ d = &dq->descriptors[start_descriptor_index];
+
+ b_last = bi_last != ~0 ? vlib_get_buffer (vm, bi_last) : &b_dummy;
+ next_index = dq->rx.next_index;
+
+ if (n_trace > 0)
+ {
+ u32 n = clib_min (n_trace, n_descriptors);
+ if (d_trace_save)
+ {
+ _vec_len (d_trace_save) = 0;
+ _vec_len (d_trace_buffers) = 0;
+ }
+ vec_add (d_trace_save, (ixge_descriptor_t *) d, n);
+ vec_add (d_trace_buffers, to_rx, n);
+ }
+
+ {
+ uword l = vec_len (xm->rx_buffers_to_add);
+
+ if (l < n_descriptors_left)
+ {
+ u32 n_to_alloc = 2 * dq->n_descriptors - l;
+ u32 n_allocated;
+
+ vec_resize (xm->rx_buffers_to_add, n_to_alloc);
+
+ _vec_len (xm->rx_buffers_to_add) = l;
+ n_allocated = vlib_buffer_alloc_from_free_list
+ (vm, xm->rx_buffers_to_add + l, n_to_alloc,
+ xm->vlib_buffer_free_list_index);
+ _vec_len (xm->rx_buffers_to_add) += n_allocated;
+
+ /* Handle transient allocation failure */
+ if (PREDICT_FALSE (l + n_allocated <= n_descriptors_left))
+ {
+ if (n_allocated == 0)
+ vlib_error_count (vm, ixge_input_node.index,
+ IXGE_ERROR_rx_alloc_no_physmem, 1);
+ else
+ vlib_error_count (vm, ixge_input_node.index,
+ IXGE_ERROR_rx_alloc_fail, 1);
+
+ n_descriptors_left = l + n_allocated;
+ }
+ n_descriptors = n_descriptors_left;
+ }
+
+ /* Add buffers from end of vector going backwards. */
+ to_add = vec_end (xm->rx_buffers_to_add) - 1;
+ }
+
+ while (n_descriptors_left > 0)
+ {
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_descriptors_left >= 4 && n_left_to_next >= 2)
+ {
+ vlib_buffer_t *b0, *b1;
+ u32 bi0, fi0, len0, l3_offset0, s20, s00, flags0;
+ u32 bi1, fi1, len1, l3_offset1, s21, s01, flags1;
+ u8 is_eop0, error0, next0;
+ u8 is_eop1, error1, next1;
+ ixge_descriptor_t d0, d1;
+
+ vlib_prefetch_buffer_with_index (vm, to_rx[2], STORE);
+ vlib_prefetch_buffer_with_index (vm, to_rx[3], STORE);
+
+ CLIB_PREFETCH (d + 2, 32, STORE);
+
+ d0.as_u32x4 = d[0].as_u32x4;
+ d1.as_u32x4 = d[1].as_u32x4;
+
+ s20 = d0.rx_from_hw.status[2];
+ s21 = d1.rx_from_hw.status[2];
+
+ s00 = d0.rx_from_hw.status[0];
+ s01 = d1.rx_from_hw.status[0];
+
+ if (!
+ ((s20 & s21) & IXGE_RX_DESCRIPTOR_STATUS2_IS_OWNED_BY_SOFTWARE))
+ goto found_hw_owned_descriptor_x2;
+
+ bi0 = to_rx[0];
+ bi1 = to_rx[1];
+
+ ASSERT (to_add - 1 >= xm->rx_buffers_to_add);
+ fi0 = to_add[0];
+ fi1 = to_add[-1];
+
+ to_rx[0] = fi0;
+ to_rx[1] = fi1;
+ to_rx += 2;
+ to_add -= 2;
+
+ ASSERT (VLIB_BUFFER_KNOWN_ALLOCATED ==
+ vlib_buffer_is_known (vm, bi0));
+ ASSERT (VLIB_BUFFER_KNOWN_ALLOCATED ==
+ vlib_buffer_is_known (vm, bi1));
+ ASSERT (VLIB_BUFFER_KNOWN_ALLOCATED ==
+ vlib_buffer_is_known (vm, fi0));
+ ASSERT (VLIB_BUFFER_KNOWN_ALLOCATED ==
+ vlib_buffer_is_known (vm, fi1));
+
+ b0 = vlib_get_buffer (vm, bi0);
+ b1 = vlib_get_buffer (vm, bi1);
+
+ /*
+ * Turn this on if you run into
+ * "bad monkey" contexts, and you want to know exactly
+ * which nodes they've visited... See main.c...
+ */
+ VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b0);
+ VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b1);
+
+ CLIB_PREFETCH (b0->data, CLIB_CACHE_LINE_BYTES, LOAD);
+ CLIB_PREFETCH (b1->data, CLIB_CACHE_LINE_BYTES, LOAD);
+
+ is_eop0 = (s20 & IXGE_RX_DESCRIPTOR_STATUS2_IS_END_OF_PACKET) != 0;
+ is_eop1 = (s21 & IXGE_RX_DESCRIPTOR_STATUS2_IS_END_OF_PACKET) != 0;
+
+ ixge_rx_next_and_error_from_status_x2 (xd, s00, s20, s01, s21,
+ &next0, &error0, &flags0,
+ &next1, &error1, &flags1);
+
+ next0 = is_sop ? next0 : next_index_sop;
+ next1 = is_eop0 ? next1 : next0;
+ next_index_sop = next1;
+
+ b0->flags |= flags0 | (!is_eop0 << VLIB_BUFFER_LOG2_NEXT_PRESENT);
+ b1->flags |= flags1 | (!is_eop1 << VLIB_BUFFER_LOG2_NEXT_PRESENT);
+
+ vnet_buffer (b0)->sw_if_index[VLIB_RX] = xd->vlib_sw_if_index;
+ vnet_buffer (b1)->sw_if_index[VLIB_RX] = xd->vlib_sw_if_index;
+ vnet_buffer (b0)->sw_if_index[VLIB_TX] = (u32) ~ 0;
+ vnet_buffer (b1)->sw_if_index[VLIB_TX] = (u32) ~ 0;
+
+ b0->error = node->errors[error0];
+ b1->error = node->errors[error1];
+
+ len0 = d0.rx_from_hw.n_packet_bytes_this_descriptor;
+ len1 = d1.rx_from_hw.n_packet_bytes_this_descriptor;
+ n_bytes += len0 + len1;
+ n_packets += is_eop0 + is_eop1;
+
+ /* Give new buffers to hardware. */
+ d0.rx_to_hw.tail_address =
+ vlib_get_buffer_data_physical_address (vm, fi0);
+ d1.rx_to_hw.tail_address =
+ vlib_get_buffer_data_physical_address (vm, fi1);
+ d0.rx_to_hw.head_address = d[0].rx_to_hw.tail_address;
+ d1.rx_to_hw.head_address = d[1].rx_to_hw.tail_address;
+ d[0].as_u32x4 = d0.as_u32x4;
+ d[1].as_u32x4 = d1.as_u32x4;
+
+ d += 2;
+ n_descriptors_left -= 2;
+
+ /* Point to either l2 or l3 header depending on next. */
+ l3_offset0 = (is_sop && (next0 != IXGE_RX_NEXT_ETHERNET_INPUT))
+ ? IXGE_RX_DESCRIPTOR_STATUS0_L3_OFFSET (s00) : 0;
+ l3_offset1 = (is_eop0 && (next1 != IXGE_RX_NEXT_ETHERNET_INPUT))
+ ? IXGE_RX_DESCRIPTOR_STATUS0_L3_OFFSET (s01) : 0;
+
+ b0->current_length = len0 - l3_offset0;
+ b1->current_length = len1 - l3_offset1;
+ b0->current_data = l3_offset0;
+ b1->current_data = l3_offset1;
+
+ b_last->next_buffer = is_sop ? ~0 : bi0;
+ b0->next_buffer = is_eop0 ? ~0 : bi1;
+ bi_last = bi1;
+ b_last = b1;
+
+ if (CLIB_DEBUG > 0)
+ {
+ u32 bi_sop0 = is_sop ? bi0 : bi_sop;
+ u32 bi_sop1 = is_eop0 ? bi1 : bi_sop0;
+
+ if (is_eop0)
+ {
+ u8 *msg = vlib_validate_buffer (vm, bi_sop0,
+ /* follow_buffer_next */ 1);
+ ASSERT (!msg);
+ }
+ if (is_eop1)
+ {
+ u8 *msg = vlib_validate_buffer (vm, bi_sop1,
+ /* follow_buffer_next */ 1);
+ ASSERT (!msg);
+ }
+ }
+ if (0) /* "Dave" version */
+ {
+ u32 bi_sop0 = is_sop ? bi0 : bi_sop;
+ u32 bi_sop1 = is_eop0 ? bi1 : bi_sop0;
+
+ if (is_eop0)
+ {
+ to_next[0] = bi_sop0;
+ to_next++;
+ n_left_to_next--;
+
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi_sop0, next0);
+ }
+ if (is_eop1)
+ {
+ to_next[0] = bi_sop1;
+ to_next++;
+ n_left_to_next--;
+
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi_sop1, next1);
+ }
+ is_sop = is_eop1;
+ bi_sop = bi_sop1;
+ }
+ if (1) /* "Eliot" version */
+ {
+ /* Speculatively enqueue to cached next. */
+ u8 saved_is_sop = is_sop;
+ u32 bi_sop_save = bi_sop;
+
+ bi_sop = saved_is_sop ? bi0 : bi_sop;
+ to_next[0] = bi_sop;
+ to_next += is_eop0;
+ n_left_to_next -= is_eop0;
+
+ bi_sop = is_eop0 ? bi1 : bi_sop;
+ to_next[0] = bi_sop;
+ to_next += is_eop1;
+ n_left_to_next -= is_eop1;
+
+ is_sop = is_eop1;
+
+ if (PREDICT_FALSE
+ (!(next0 == next_index && next1 == next_index)))
+ {
+ /* Undo speculation. */
+ to_next -= is_eop0 + is_eop1;
+ n_left_to_next += is_eop0 + is_eop1;
+
+ /* Re-do both descriptors being careful about where we enqueue. */
+ bi_sop = saved_is_sop ? bi0 : bi_sop_save;
+ if (is_eop0)
+ {
+ if (next0 != next_index)
+ vlib_set_next_frame_buffer (vm, node, next0, bi_sop);
+ else
+ {
+ to_next[0] = bi_sop;
+ to_next += 1;
+ n_left_to_next -= 1;
+ }
+ }
+
+ bi_sop = is_eop0 ? bi1 : bi_sop;
+ if (is_eop1)
+ {
+ if (next1 != next_index)
+ vlib_set_next_frame_buffer (vm, node, next1, bi_sop);
+ else
+ {
+ to_next[0] = bi_sop;
+ to_next += 1;
+ n_left_to_next -= 1;
+ }
+ }
+
+ /* Switch cached next index when next for both packets is the same. */
+ if (is_eop0 && is_eop1 && next0 == next1)
+ {
+ vlib_put_next_frame (vm, node, next_index,
+ n_left_to_next);
+ next_index = next0;
+ vlib_get_next_frame (vm, node, next_index,
+ to_next, n_left_to_next);
+ }
+ }
+ }
+ }
+
+ /* Bail out of dual loop and proceed with single loop. */
+ found_hw_owned_descriptor_x2:
+
+ while (n_descriptors_left > 0 && n_left_to_next > 0)
+ {
+ vlib_buffer_t *b0;
+ u32 bi0, fi0, len0, l3_offset0, s20, s00, flags0;
+ u8 is_eop0, error0, next0;
+ ixge_descriptor_t d0;
+
+ d0.as_u32x4 = d[0].as_u32x4;
+
+ s20 = d0.rx_from_hw.status[2];
+ s00 = d0.rx_from_hw.status[0];
+
+ if (!(s20 & IXGE_RX_DESCRIPTOR_STATUS2_IS_OWNED_BY_SOFTWARE))
+ goto found_hw_owned_descriptor_x1;
+
+ bi0 = to_rx[0];
+ ASSERT (to_add >= xm->rx_buffers_to_add);
+ fi0 = to_add[0];
+
+ to_rx[0] = fi0;
+ to_rx += 1;
+ to_add -= 1;
+
+ ASSERT (VLIB_BUFFER_KNOWN_ALLOCATED ==
+ vlib_buffer_is_known (vm, bi0));
+ ASSERT (VLIB_BUFFER_KNOWN_ALLOCATED ==
+ vlib_buffer_is_known (vm, fi0));
+
+ b0 = vlib_get_buffer (vm, bi0);
+
+ /*
+ * Turn this on if you run into
+ * "bad monkey" contexts, and you want to know exactly
+ * which nodes they've visited...
+ */
+ VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b0);
+
+ is_eop0 = (s20 & IXGE_RX_DESCRIPTOR_STATUS2_IS_END_OF_PACKET) != 0;
+ ixge_rx_next_and_error_from_status_x1
+ (xd, s00, s20, &next0, &error0, &flags0);
+
+ next0 = is_sop ? next0 : next_index_sop;
+ next_index_sop = next0;
+
+ b0->flags |= flags0 | (!is_eop0 << VLIB_BUFFER_LOG2_NEXT_PRESENT);
+
+ vnet_buffer (b0)->sw_if_index[VLIB_RX] = xd->vlib_sw_if_index;
+ vnet_buffer (b0)->sw_if_index[VLIB_TX] = (u32) ~ 0;
+
+ b0->error = node->errors[error0];
+
+ len0 = d0.rx_from_hw.n_packet_bytes_this_descriptor;
+ n_bytes += len0;
+ n_packets += is_eop0;
+
+ /* Give new buffer to hardware. */
+ d0.rx_to_hw.tail_address =
+ vlib_get_buffer_data_physical_address (vm, fi0);
+ d0.rx_to_hw.head_address = d0.rx_to_hw.tail_address;
+ d[0].as_u32x4 = d0.as_u32x4;
+
+ d += 1;
+ n_descriptors_left -= 1;
+
+ /* Point to either l2 or l3 header depending on next. */
+ l3_offset0 = (is_sop && (next0 != IXGE_RX_NEXT_ETHERNET_INPUT))
+ ? IXGE_RX_DESCRIPTOR_STATUS0_L3_OFFSET (s00) : 0;
+ b0->current_length = len0 - l3_offset0;
+ b0->current_data = l3_offset0;
+
+ b_last->next_buffer = is_sop ? ~0 : bi0;
+ bi_last = bi0;
+ b_last = b0;
+
+ bi_sop = is_sop ? bi0 : bi_sop;
+
+ if (CLIB_DEBUG > 0 && is_eop0)
+ {
+ u8 *msg =
+ vlib_validate_buffer (vm, bi_sop, /* follow_buffer_next */ 1);
+ ASSERT (!msg);
+ }
+
+ if (0) /* "Dave" version */
+ {
+ if (is_eop0)
+ {
+ to_next[0] = bi_sop;
+ to_next++;
+ n_left_to_next--;
+
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi_sop, next0);
+ }
+ }
+ if (1) /* "Eliot" version */
+ {
+ if (PREDICT_TRUE (next0 == next_index))
+ {
+ to_next[0] = bi_sop;
+ to_next += is_eop0;
+ n_left_to_next -= is_eop0;
+ }
+ else
+ {
+ if (next0 != next_index && is_eop0)
+ vlib_set_next_frame_buffer (vm, node, next0, bi_sop);
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ next_index = next0;
+ vlib_get_next_frame (vm, node, next_index,
+ to_next, n_left_to_next);
+ }
+ }
+ is_sop = is_eop0;
+ }
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+found_hw_owned_descriptor_x1:
+ if (n_descriptors_left > 0)
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+
+ _vec_len (xm->rx_buffers_to_add) = (to_add + 1) - xm->rx_buffers_to_add;
+
+ {
+ u32 n_done = n_descriptors - n_descriptors_left;
+
+ if (n_trace > 0 && n_done > 0)
+ {
+ u32 n = clib_min (n_trace, n_done);
+ ixge_rx_trace (xm, xd, dq,
+ d_trace_save,
+ d_trace_buffers,
+ &dq->descriptors[start_descriptor_index], n);
+ vlib_set_trace_count (vm, node, n_trace - n);
+ }
+ if (d_trace_save)
+ {
+ _vec_len (d_trace_save) = 0;
+ _vec_len (d_trace_buffers) = 0;
+ }
+
+ /* Don't keep a reference to b_last if we don't have to.
+ Otherwise we can over-write a next_buffer pointer after already haven
+ enqueued a packet. */
+ if (is_sop)
+ {
+ b_last->next_buffer = ~0;
+ bi_last = ~0;
+ }
+
+ dq->rx.n_descriptors_done_this_call = n_done;
+ dq->rx.n_descriptors_done_total += n_done;
+ dq->rx.is_start_of_packet = is_sop;
+ dq->rx.saved_start_of_packet_buffer_index = bi_sop;
+ dq->rx.saved_last_buffer_index = bi_last;
+ dq->rx.saved_start_of_packet_next_index = next_index_sop;
+ dq->rx.next_index = next_index;
+ dq->rx.n_bytes += n_bytes;
+
+ return n_packets;
+ }
+}
+
+static uword
+ixge_rx_queue (ixge_main_t * xm,
+ ixge_device_t * xd,
+ vlib_node_runtime_t * node, u32 queue_index)
+{
+ ixge_dma_queue_t *dq =
+ vec_elt_at_index (xd->dma_queues[VLIB_RX], queue_index);
+ ixge_dma_regs_t *dr = get_dma_regs (xd, VLIB_RX, dq->queue_index);
+ uword n_packets = 0;
+ u32 hw_head_index, sw_head_index;
+
+ /* One time initialization. */
+ if (!dq->rx.node)
+ {
+ dq->rx.node = node;
+ dq->rx.is_start_of_packet = 1;
+ dq->rx.saved_start_of_packet_buffer_index = ~0;
+ dq->rx.saved_last_buffer_index = ~0;
+ }
+
+ dq->rx.next_index = node->cached_next_index;
+
+ dq->rx.n_descriptors_done_total = 0;
+ dq->rx.n_descriptors_done_this_call = 0;
+ dq->rx.n_bytes = 0;
+
+ /* Fetch head from hardware and compare to where we think we are. */
+ hw_head_index = dr->head_index;
+ sw_head_index = dq->head_index;
+
+ if (hw_head_index == sw_head_index)
+ goto done;
+
+ if (hw_head_index < sw_head_index)
+ {
+ u32 n_tried = dq->n_descriptors - sw_head_index;
+ n_packets += ixge_rx_queue_no_wrap (xm, xd, dq, sw_head_index, n_tried);
+ sw_head_index =
+ ixge_ring_add (dq, sw_head_index,
+ dq->rx.n_descriptors_done_this_call);
+
+ if (dq->rx.n_descriptors_done_this_call != n_tried)
+ goto done;
+ }
+ if (hw_head_index >= sw_head_index)
+ {
+ u32 n_tried = hw_head_index - sw_head_index;
+ n_packets += ixge_rx_queue_no_wrap (xm, xd, dq, sw_head_index, n_tried);
+ sw_head_index =
+ ixge_ring_add (dq, sw_head_index,
+ dq->rx.n_descriptors_done_this_call);
+ }
+
+done:
+ dq->head_index = sw_head_index;
+ dq->tail_index =
+ ixge_ring_add (dq, dq->tail_index, dq->rx.n_descriptors_done_total);
+
+ /* Give tail back to hardware. */
+ CLIB_MEMORY_BARRIER ();
+
+ dr->tail_index = dq->tail_index;
+
+ vlib_increment_combined_counter (vnet_main.
+ interface_main.combined_sw_if_counters +
+ VNET_INTERFACE_COUNTER_RX,
+ 0 /* cpu_index */ ,
+ xd->vlib_sw_if_index, n_packets,
+ dq->rx.n_bytes);
+
+ return n_packets;
+}
+
+static void
+ixge_interrupt (ixge_main_t * xm, ixge_device_t * xd, u32 i)
+{
+ vlib_main_t *vm = xm->vlib_main;
+ ixge_regs_t *r = xd->regs;
+
+ if (i != 20)
+ {
+ ELOG_TYPE_DECLARE (e) =
+ {
+ .function = (char *) __FUNCTION__,.format =
+ "ixge %d, %s",.format_args = "i1t1",.n_enum_strings =
+ 16,.enum_strings =
+ {
+ "flow director",
+ "rx miss",
+ "pci exception",
+ "mailbox",
+ "link status change",
+ "linksec key exchange",
+ "manageability event",
+ "reserved23",
+ "sdp0",
+ "sdp1",
+ "sdp2",
+ "sdp3",
+ "ecc", "descriptor handler error", "tcp timer", "other",},};
+ struct
+ {
+ u8 instance;
+ u8 index;
+ } *ed;
+ ed = ELOG_DATA (&vm->elog_main, e);
+ ed->instance = xd->device_index;
+ ed->index = i - 16;
+ }
+ else
+ {
+ u32 v = r->xge_mac.link_status;
+ uword is_up = (v & (1 << 30)) != 0;
+
+ ELOG_TYPE_DECLARE (e) =
+ {
+ .function = (char *) __FUNCTION__,.format =
+ "ixge %d, link status change 0x%x",.format_args = "i4i4",};
+ struct
+ {
+ u32 instance, link_status;
+ } *ed;
+ ed = ELOG_DATA (&vm->elog_main, e);
+ ed->instance = xd->device_index;
+ ed->link_status = v;
+ xd->link_status_at_last_link_change = v;
+
+ vlib_process_signal_event (vm, ixge_process_node.index,
+ EVENT_SET_FLAGS,
+ ((is_up << 31) | xd->vlib_hw_if_index));
+ }
+}
+
+always_inline u32
+clean_block (u32 * b, u32 * t, u32 n_left)
+{
+ u32 *t0 = t;
+
+ while (n_left >= 4)
+ {
+ u32 bi0, bi1, bi2, bi3;
+
+ t[0] = bi0 = b[0];
+ b[0] = 0;
+ t += bi0 != 0;
+
+ t[0] = bi1 = b[1];
+ b[1] = 0;
+ t += bi1 != 0;
+
+ t[0] = bi2 = b[2];
+ b[2] = 0;
+ t += bi2 != 0;
+
+ t[0] = bi3 = b[3];
+ b[3] = 0;
+ t += bi3 != 0;
+
+ b += 4;
+ n_left -= 4;
+ }
+
+ while (n_left > 0)
+ {
+ u32 bi0;
+
+ t[0] = bi0 = b[0];
+ b[0] = 0;
+ t += bi0 != 0;
+ b += 1;
+ n_left -= 1;
+ }
+
+ return t - t0;
+}
+
+static void
+ixge_tx_queue (ixge_main_t * xm, ixge_device_t * xd, u32 queue_index)
+{
+ vlib_main_t *vm = xm->vlib_main;
+ ixge_dma_queue_t *dq =
+ vec_elt_at_index (xd->dma_queues[VLIB_TX], queue_index);
+ u32 n_clean, *b, *t, *t0;
+ i32 n_hw_owned_descriptors;
+ i32 first_to_clean, last_to_clean;
+ u64 hwbp_race = 0;
+
+ /* Handle case where head write back pointer update
+ * arrives after the interrupt during high PCI bus loads.
+ */
+ while ((dq->head_index == dq->tx.head_index_write_back[0]) &&
+ dq->tx.n_buffers_on_ring && (dq->head_index != dq->tail_index))
+ {
+ hwbp_race++;
+ if (IXGE_HWBP_RACE_ELOG && (hwbp_race == 1))
+ {
+ ELOG_TYPE_DECLARE (e) =
+ {
+ .function = (char *) __FUNCTION__,.format =
+ "ixge %d tx head index race: head %4d, tail %4d, buffs %4d",.format_args
+ = "i4i4i4i4",};
+ struct
+ {
+ u32 instance, head_index, tail_index, n_buffers_on_ring;
+ } *ed;
+ ed = ELOG_DATA (&vm->elog_main, e);
+ ed->instance = xd->device_index;
+ ed->head_index = dq->head_index;
+ ed->tail_index = dq->tail_index;
+ ed->n_buffers_on_ring = dq->tx.n_buffers_on_ring;
+ }
+ }
+
+ dq->head_index = dq->tx.head_index_write_back[0];
+ n_hw_owned_descriptors = ixge_ring_sub (dq, dq->head_index, dq->tail_index);
+ ASSERT (dq->tx.n_buffers_on_ring >= n_hw_owned_descriptors);
+ n_clean = dq->tx.n_buffers_on_ring - n_hw_owned_descriptors;
+
+ if (IXGE_HWBP_RACE_ELOG && hwbp_race)
+ {
+ ELOG_TYPE_DECLARE (e) =
+ {
+ .function = (char *) __FUNCTION__,.format =
+ "ixge %d tx head index race: head %4d, hw_owned %4d, n_clean %4d, retries %d",.format_args
+ = "i4i4i4i4i4",};
+ struct
+ {
+ u32 instance, head_index, n_hw_owned_descriptors, n_clean, retries;
+ } *ed;
+ ed = ELOG_DATA (&vm->elog_main, e);
+ ed->instance = xd->device_index;
+ ed->head_index = dq->head_index;
+ ed->n_hw_owned_descriptors = n_hw_owned_descriptors;
+ ed->n_clean = n_clean;
+ ed->retries = hwbp_race;
+ }
+
+ /*
+ * This function used to wait until hardware owned zero descriptors.
+ * At high PPS rates, that doesn't happen until the TX ring is
+ * completely full of descriptors which need to be cleaned up.
+ * That, in turn, causes TX ring-full drops and/or long RX service
+ * interruptions.
+ */
+ if (n_clean == 0)
+ return;
+
+ /* Clean the n_clean descriptors prior to the reported hardware head */
+ last_to_clean = dq->head_index - 1;
+ last_to_clean = (last_to_clean < 0) ? last_to_clean + dq->n_descriptors :
+ last_to_clean;
+
+ first_to_clean = (last_to_clean) - (n_clean - 1);
+ first_to_clean = (first_to_clean < 0) ? first_to_clean + dq->n_descriptors :
+ first_to_clean;
+
+ vec_resize (xm->tx_buffers_pending_free, dq->n_descriptors - 1);
+ t0 = t = xm->tx_buffers_pending_free;
+ b = dq->descriptor_buffer_indices + first_to_clean;
+
+ /* Wrap case: clean from first to end, then start to last */
+ if (first_to_clean > last_to_clean)
+ {
+ t += clean_block (b, t, (dq->n_descriptors - 1) - first_to_clean);
+ first_to_clean = 0;
+ b = dq->descriptor_buffer_indices;
+ }
+
+ /* Typical case: clean from first to last */
+ if (first_to_clean <= last_to_clean)
+ t += clean_block (b, t, (last_to_clean - first_to_clean) + 1);
+
+ if (t > t0)
+ {
+ u32 n = t - t0;
+ vlib_buffer_free_no_next (vm, t0, n);
+ ASSERT (dq->tx.n_buffers_on_ring >= n);
+ dq->tx.n_buffers_on_ring -= n;
+ _vec_len (xm->tx_buffers_pending_free) = 0;
+ }
+}
+
+/* RX queue interrupts 0 thru 7; TX 8 thru 15. */
+always_inline uword
+ixge_interrupt_is_rx_queue (uword i)
+{
+ return i < 8;
+}
+
+always_inline uword
+ixge_interrupt_is_tx_queue (uword i)
+{
+ return i >= 8 && i < 16;
+}
+
+always_inline uword
+ixge_tx_queue_to_interrupt (uword i)
+{
+ return 8 + i;
+}
+
+always_inline uword
+ixge_rx_queue_to_interrupt (uword i)
+{
+ return 0 + i;
+}
+
+always_inline uword
+ixge_interrupt_rx_queue (uword i)
+{
+ ASSERT (ixge_interrupt_is_rx_queue (i));
+ return i - 0;
+}
+
+always_inline uword
+ixge_interrupt_tx_queue (uword i)
+{
+ ASSERT (ixge_interrupt_is_tx_queue (i));
+ return i - 8;
+}
+
+static uword
+ixge_device_input (ixge_main_t * xm,
+ ixge_device_t * xd, vlib_node_runtime_t * node)
+{
+ ixge_regs_t *r = xd->regs;
+ u32 i, s;
+ uword n_rx_packets = 0;
+
+ s = r->interrupt.status_write_1_to_set;
+ if (s)
+ r->interrupt.status_write_1_to_clear = s;
+
+ /* *INDENT-OFF* */
+ foreach_set_bit (i, s, ({
+ if (ixge_interrupt_is_rx_queue (i))
+ n_rx_packets += ixge_rx_queue (xm, xd, node, ixge_interrupt_rx_queue (i));
+
+ else if (ixge_interrupt_is_tx_queue (i))
+ ixge_tx_queue (xm, xd, ixge_interrupt_tx_queue (i));
+
+ else
+ ixge_interrupt (xm, xd, i);
+ }));
+ /* *INDENT-ON* */
+
+ return n_rx_packets;
+}
+
+static uword
+ixge_input (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * f)
+{
+ ixge_main_t *xm = &ixge_main;
+ ixge_device_t *xd;
+ uword n_rx_packets = 0;
+
+ if (node->state == VLIB_NODE_STATE_INTERRUPT)
+ {
+ uword i;
+
+ /* Loop over devices with interrupts. */
+ /* *INDENT-OFF* */
+ foreach_set_bit (i, node->runtime_data[0], ({
+ xd = vec_elt_at_index (xm->devices, i);
+ n_rx_packets += ixge_device_input (xm, xd, node);
+
+ /* Re-enable interrupts since we're going to stay in interrupt mode. */
+ if (! (node->flags & VLIB_NODE_FLAG_SWITCH_FROM_INTERRUPT_TO_POLLING_MODE))
+ xd->regs->interrupt.enable_write_1_to_set = ~0;
+ }));
+ /* *INDENT-ON* */
+
+ /* Clear mask of devices with pending interrupts. */
+ node->runtime_data[0] = 0;
+ }
+ else
+ {
+ /* Poll all devices for input/interrupts. */
+ vec_foreach (xd, xm->devices)
+ {
+ n_rx_packets += ixge_device_input (xm, xd, node);
+
+ /* Re-enable interrupts when switching out of polling mode. */
+ if (node->flags &
+ VLIB_NODE_FLAG_SWITCH_FROM_POLLING_TO_INTERRUPT_MODE)
+ xd->regs->interrupt.enable_write_1_to_set = ~0;
+ }
+ }
+
+ return n_rx_packets;
+}
+
+static char *ixge_error_strings[] = {
+#define _(n,s) s,
+ foreach_ixge_error
+#undef _
+};
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (ixge_input_node, static) = {
+ .function = ixge_input,
+ .type = VLIB_NODE_TYPE_INPUT,
+ .name = "ixge-input",
+
+ /* Will be enabled if/when hardware is detected. */
+ .state = VLIB_NODE_STATE_DISABLED,
+
+ .format_buffer = format_ethernet_header_with_length,
+ .format_trace = format_ixge_rx_dma_trace,
+
+ .n_errors = IXGE_N_ERROR,
+ .error_strings = ixge_error_strings,
+
+ .n_next_nodes = IXGE_RX_N_NEXT,
+ .next_nodes = {
+ [IXGE_RX_NEXT_DROP] = "error-drop",
+ [IXGE_RX_NEXT_ETHERNET_INPUT] = "ethernet-input",
+ [IXGE_RX_NEXT_IP4_INPUT] = "ip4-input",
+ [IXGE_RX_NEXT_IP6_INPUT] = "ip6-input",
+ },
+};
+
+VLIB_NODE_FUNCTION_MULTIARCH_CLONE (ixge_input)
+CLIB_MULTIARCH_SELECT_FN (ixge_input)
+/* *INDENT-ON* */
+
+static u8 *
+format_ixge_device_name (u8 * s, va_list * args)
+{
+ u32 i = va_arg (*args, u32);
+ ixge_main_t *xm = &ixge_main;
+ ixge_device_t *xd = vec_elt_at_index (xm->devices, i);
+ return format (s, "TenGigabitEthernet%U",
+ format_vlib_pci_handle, &xd->pci_device.bus_address);
+}
+
+#define IXGE_COUNTER_IS_64_BIT (1 << 0)
+#define IXGE_COUNTER_NOT_CLEAR_ON_READ (1 << 1)
+
+static u8 ixge_counter_flags[] = {
+#define _(a,f) 0,
+#define _64(a,f) IXGE_COUNTER_IS_64_BIT,
+ foreach_ixge_counter
+#undef _
+#undef _64
+};
+
+static void
+ixge_update_counters (ixge_device_t * xd)
+{
+ /* Byte offset for counter registers. */
+ static u32 reg_offsets[] = {
+#define _(a,f) (a) / sizeof (u32),
+#define _64(a,f) _(a,f)
+ foreach_ixge_counter
+#undef _
+#undef _64
+ };
+ volatile u32 *r = (volatile u32 *) xd->regs;
+ int i;
+
+ for (i = 0; i < ARRAY_LEN (xd->counters); i++)
+ {
+ u32 o = reg_offsets[i];
+ xd->counters[i] += r[o];
+ if (ixge_counter_flags[i] & IXGE_COUNTER_NOT_CLEAR_ON_READ)
+ r[o] = 0;
+ if (ixge_counter_flags[i] & IXGE_COUNTER_IS_64_BIT)
+ xd->counters[i] += (u64) r[o + 1] << (u64) 32;
+ }
+}
+
+static u8 *
+format_ixge_device_id (u8 * s, va_list * args)
+{
+ u32 device_id = va_arg (*args, u32);
+ char *t = 0;
+ switch (device_id)
+ {
+#define _(f,n) case n: t = #f; break;
+ foreach_ixge_pci_device_id;
+#undef _
+ default:
+ t = 0;
+ break;
+ }
+ if (t == 0)
+ s = format (s, "unknown 0x%x", device_id);
+ else
+ s = format (s, "%s", t);
+ return s;
+}
+
+static u8 *
+format_ixge_link_status (u8 * s, va_list * args)
+{
+ ixge_device_t *xd = va_arg (*args, ixge_device_t *);
+ u32 v = xd->link_status_at_last_link_change;
+
+ s = format (s, "%s", (v & (1 << 30)) ? "up" : "down");
+
+ {
+ char *modes[] = {
+ "1g", "10g parallel", "10g serial", "autoneg",
+ };
+ char *speeds[] = {
+ "unknown", "100m", "1g", "10g",
+ };
+ s = format (s, ", mode %s, speed %s",
+ modes[(v >> 26) & 3], speeds[(v >> 28) & 3]);
+ }
+
+ return s;
+}
+
+static u8 *
+format_ixge_device (u8 * s, va_list * args)
+{
+ u32 dev_instance = va_arg (*args, u32);
+ CLIB_UNUSED (int verbose) = va_arg (*args, int);
+ ixge_main_t *xm = &ixge_main;
+ ixge_device_t *xd = vec_elt_at_index (xm->devices, dev_instance);
+ ixge_phy_t *phy = xd->phys + xd->phy_index;
+ uword indent = format_get_indent (s);
+
+ ixge_update_counters (xd);
+ xd->link_status_at_last_link_change = xd->regs->xge_mac.link_status;
+
+ s = format (s, "Intel 8259X: id %U\n%Ulink %U",
+ format_ixge_device_id, xd->device_id,
+ format_white_space, indent + 2, format_ixge_link_status, xd);
+
+ {
+
+ s = format (s, "\n%UPCIe %U", format_white_space, indent + 2,
+ format_vlib_pci_link_speed, &xd->pci_device);
+ }
+
+ s = format (s, "\n%U", format_white_space, indent + 2);
+ if (phy->mdio_address != ~0)
+ s = format (s, "PHY address %d, id 0x%x", phy->mdio_address, phy->id);
+ else if (xd->sfp_eeprom.id == SFP_ID_sfp)
+ s = format (s, "SFP %U", format_sfp_eeprom, &xd->sfp_eeprom);
+ else
+ s = format (s, "PHY not found");
+
+ /* FIXME */
+ {
+ ixge_dma_queue_t *dq = vec_elt_at_index (xd->dma_queues[VLIB_RX], 0);
+ ixge_dma_regs_t *dr = get_dma_regs (xd, VLIB_RX, 0);
+ u32 hw_head_index = dr->head_index;
+ u32 sw_head_index = dq->head_index;
+ u32 nitems;
+
+ nitems = ixge_ring_sub (dq, hw_head_index, sw_head_index);
+ s = format (s, "\n%U%d unprocessed, %d total buffers on rx queue 0 ring",
+ format_white_space, indent + 2, nitems, dq->n_descriptors);
+
+ s = format (s, "\n%U%d buffers in driver rx cache",
+ format_white_space, indent + 2,
+ vec_len (xm->rx_buffers_to_add));
+
+ s = format (s, "\n%U%d buffers on tx queue 0 ring",
+ format_white_space, indent + 2,
+ xd->dma_queues[VLIB_TX][0].tx.n_buffers_on_ring);
+ }
+ {
+ u32 i;
+ u64 v;
+ static char *names[] = {
+#define _(a,f) #f,
+#define _64(a,f) _(a,f)
+ foreach_ixge_counter
+#undef _
+#undef _64
+ };
+
+ for (i = 0; i < ARRAY_LEN (names); i++)
+ {
+ v = xd->counters[i] - xd->counters_last_clear[i];
+ if (v != 0)
+ s = format (s, "\n%U%-40U%16Ld",
+ format_white_space, indent + 2,
+ format_c_identifier, names[i], v);
+ }
+ }
+
+ return s;
+}
+
+static void
+ixge_clear_hw_interface_counters (u32 instance)
+{
+ ixge_main_t *xm = &ixge_main;
+ ixge_device_t *xd = vec_elt_at_index (xm->devices, instance);
+ ixge_update_counters (xd);
+ memcpy (xd->counters_last_clear, xd->counters, sizeof (xd->counters));
+}
+
+/*
+ * Dynamically redirect all pkts from a specific interface
+ * to the specified node
+ */
+static void
+ixge_set_interface_next_node (vnet_main_t * vnm, u32 hw_if_index,
+ u32 node_index)
+{
+ ixge_main_t *xm = &ixge_main;
+ vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
+ ixge_device_t *xd = vec_elt_at_index (xm->devices, hw->dev_instance);
+
+ /* Shut off redirection */
+ if (node_index == ~0)
+ {
+ xd->per_interface_next_index = node_index;
+ return;
+ }
+
+ xd->per_interface_next_index =
+ vlib_node_add_next (xm->vlib_main, ixge_input_node.index, node_index);
+}
+
+
+/* *INDENT-OFF* */
+VNET_DEVICE_CLASS (ixge_device_class) = {
+ .name = "ixge",
+ .tx_function = ixge_interface_tx,
+ .format_device_name = format_ixge_device_name,
+ .format_device = format_ixge_device,
+ .format_tx_trace = format_ixge_tx_dma_trace,
+ .clear_counters = ixge_clear_hw_interface_counters,
+ .admin_up_down_function = ixge_interface_admin_up_down,
+ .rx_redirect_to_node = ixge_set_interface_next_node,
+ .flatten_output_chains = 1,
+};
+/* *INDENT-ON* */
+
+#define IXGE_N_BYTES_IN_RX_BUFFER (2048) // DAW-HACK: Set Rx buffer size so all packets < ETH_MTU_SIZE fit in the buffer (i.e. sop & eop for all descriptors).
+
+static clib_error_t *
+ixge_dma_init (ixge_device_t * xd, vlib_rx_or_tx_t rt, u32 queue_index)
+{
+ ixge_main_t *xm = &ixge_main;
+ vlib_main_t *vm = xm->vlib_main;
+ ixge_dma_queue_t *dq;
+ clib_error_t *error = 0;
+
+ vec_validate (xd->dma_queues[rt], queue_index);
+ dq = vec_elt_at_index (xd->dma_queues[rt], queue_index);
+
+ if (!xm->n_descriptors_per_cache_line)
+ xm->n_descriptors_per_cache_line =
+ CLIB_CACHE_LINE_BYTES / sizeof (dq->descriptors[0]);
+
+ if (!xm->n_bytes_in_rx_buffer)
+ xm->n_bytes_in_rx_buffer = IXGE_N_BYTES_IN_RX_BUFFER;
+ xm->n_bytes_in_rx_buffer = round_pow2 (xm->n_bytes_in_rx_buffer, 1024);
+ if (!xm->vlib_buffer_free_list_index)
+ {
+ xm->vlib_buffer_free_list_index =
+ vlib_buffer_get_or_create_free_list (vm, xm->n_bytes_in_rx_buffer,
+ "ixge rx");
+ ASSERT (xm->vlib_buffer_free_list_index != 0);
+ }
+
+ if (!xm->n_descriptors[rt])
+ xm->n_descriptors[rt] = 4 * VLIB_FRAME_SIZE;
+
+ dq->queue_index = queue_index;
+ dq->n_descriptors =
+ round_pow2 (xm->n_descriptors[rt], xm->n_descriptors_per_cache_line);
+ dq->head_index = dq->tail_index = 0;
+
+ dq->descriptors = vlib_physmem_alloc_aligned (vm, &error,
+ dq->n_descriptors *
+ sizeof (dq->descriptors[0]),
+ 128 /* per chip spec */ );
+ if (error)
+ return error;
+
+ memset (dq->descriptors, 0,
+ dq->n_descriptors * sizeof (dq->descriptors[0]));
+ vec_resize (dq->descriptor_buffer_indices, dq->n_descriptors);
+
+ if (rt == VLIB_RX)
+ {
+ u32 n_alloc, i;
+
+ n_alloc = vlib_buffer_alloc_from_free_list
+ (vm, dq->descriptor_buffer_indices,
+ vec_len (dq->descriptor_buffer_indices),
+ xm->vlib_buffer_free_list_index);
+ ASSERT (n_alloc == vec_len (dq->descriptor_buffer_indices));
+ for (i = 0; i < n_alloc; i++)
+ {
+ vlib_buffer_t *b =
+ vlib_get_buffer (vm, dq->descriptor_buffer_indices[i]);
+ dq->descriptors[i].rx_to_hw.tail_address =
+ vlib_physmem_virtual_to_physical (vm, b->data);
+ }
+ }
+ else
+ {
+ u32 i;
+
+ dq->tx.head_index_write_back =
+ vlib_physmem_alloc (vm, &error, CLIB_CACHE_LINE_BYTES);
+
+ for (i = 0; i < dq->n_descriptors; i++)
+ dq->descriptors[i].tx = xm->tx_descriptor_template;
+
+ vec_validate (xm->tx_buffers_pending_free, dq->n_descriptors - 1);
+ }
+
+ {
+ ixge_dma_regs_t *dr = get_dma_regs (xd, rt, queue_index);
+ u64 a;
+
+ a = vlib_physmem_virtual_to_physical (vm, dq->descriptors);
+ dr->descriptor_address[0] = a & 0xFFFFFFFF;
+ dr->descriptor_address[1] = a >> (u64) 32;
+ dr->n_descriptor_bytes = dq->n_descriptors * sizeof (dq->descriptors[0]);
+ dq->head_index = dq->tail_index = 0;
+
+ if (rt == VLIB_RX)
+ {
+ ASSERT ((xm->n_bytes_in_rx_buffer / 1024) < 32);
+ dr->rx_split_control =
+ ( /* buffer size */ ((xm->n_bytes_in_rx_buffer / 1024) << 0)
+ | ( /* lo free descriptor threshold (units of 64 descriptors) */
+ (1 << 22)) | ( /* descriptor type: advanced one buffer */
+ (1 << 25)) | ( /* drop if no descriptors available */
+ (1 << 28)));
+
+ /* Give hardware all but last 16 cache lines' worth of descriptors. */
+ dq->tail_index = dq->n_descriptors -
+ 16 * xm->n_descriptors_per_cache_line;
+ }
+ else
+ {
+ /* Make sure its initialized before hardware can get to it. */
+ dq->tx.head_index_write_back[0] = dq->head_index;
+
+ a =
+ vlib_physmem_virtual_to_physical (vm, dq->tx.head_index_write_back);
+ dr->tx.head_index_write_back_address[0] = /* enable bit */ 1 | a;
+ dr->tx.head_index_write_back_address[1] = (u64) a >> (u64) 32;
+ }
+
+ /* DMA on 82599 does not work with [13] rx data write relaxed ordering
+ and [12] undocumented set. */
+ if (rt == VLIB_RX)
+ dr->dca_control &= ~((1 << 13) | (1 << 12));
+
+ CLIB_MEMORY_BARRIER ();
+
+ if (rt == VLIB_TX)
+ {
+ xd->regs->tx_dma_control |= (1 << 0);
+ dr->control |= ((32 << 0) /* prefetch threshold */
+ | (64 << 8) /* host threshold */
+ | (0 << 16) /* writeback threshold */ );
+ }
+
+ /* Enable this queue and wait for hardware to initialize
+ before adding to tail. */
+ if (rt == VLIB_TX)
+ {
+ dr->control |= 1 << 25;
+ while (!(dr->control & (1 << 25)))
+ ;
+ }
+
+ /* Set head/tail indices and enable DMA. */
+ dr->head_index = dq->head_index;
+ dr->tail_index = dq->tail_index;
+ }
+
+ return error;
+}
+
+static u32
+ixge_flag_change (vnet_main_t * vnm, vnet_hw_interface_t * hw, u32 flags)
+{
+ ixge_device_t *xd;
+ ixge_regs_t *r;
+ u32 old;
+ ixge_main_t *xm = &ixge_main;
+
+ xd = vec_elt_at_index (xm->devices, hw->dev_instance);
+ r = xd->regs;
+
+ old = r->filter_control;
+
+ if (flags & ETHERNET_INTERFACE_FLAG_ACCEPT_ALL)
+ r->filter_control = old | (1 << 9) /* unicast promiscuous */ ;
+ else
+ r->filter_control = old & ~(1 << 9);
+
+ return old;
+}
+
+static void
+ixge_device_init (ixge_main_t * xm)
+{
+ vnet_main_t *vnm = vnet_get_main ();
+ ixge_device_t *xd;
+
+ /* Reset chip(s). */
+ vec_foreach (xd, xm->devices)
+ {
+ ixge_regs_t *r = xd->regs;
+ const u32 reset_bit = (1 << 26) | (1 << 3);
+
+ r->control |= reset_bit;
+
+ /* No need to suspend. Timed to take ~1e-6 secs */
+ while (r->control & reset_bit)
+ ;
+
+ /* Software loaded. */
+ r->extended_control |= (1 << 28);
+
+ ixge_phy_init (xd);
+
+ /* Register ethernet interface. */
+ {
+ u8 addr8[6];
+ u32 i, addr32[2];
+ clib_error_t *error;
+
+ addr32[0] = r->rx_ethernet_address0[0][0];
+ addr32[1] = r->rx_ethernet_address0[0][1];
+ for (i = 0; i < 6; i++)
+ addr8[i] = addr32[i / 4] >> ((i % 4) * 8);
+
+ error = ethernet_register_interface
+ (vnm, ixge_device_class.index, xd->device_index,
+ /* ethernet address */ addr8,
+ &xd->vlib_hw_if_index, ixge_flag_change);
+ if (error)
+ clib_error_report (error);
+ }
+
+ {
+ vnet_sw_interface_t *sw =
+ vnet_get_hw_sw_interface (vnm, xd->vlib_hw_if_index);
+ xd->vlib_sw_if_index = sw->sw_if_index;
+ }
+
+ ixge_dma_init (xd, VLIB_RX, /* queue_index */ 0);
+
+ xm->n_descriptors[VLIB_TX] = 20 * VLIB_FRAME_SIZE;
+
+ ixge_dma_init (xd, VLIB_TX, /* queue_index */ 0);
+
+ /* RX/TX queue 0 gets mapped to interrupt bits 0 & 8. */
+ r->interrupt.queue_mapping[0] = (( /* valid bit */ (1 << 7) |
+ ixge_rx_queue_to_interrupt (0)) << 0);
+
+ r->interrupt.queue_mapping[0] |= (( /* valid bit */ (1 << 7) |
+ ixge_tx_queue_to_interrupt (0)) << 8);
+
+ /* No use in getting too many interrupts.
+ Limit them to one every 3/4 ring size at line rate
+ min sized packets.
+ No need for this since kernel/vlib main loop provides adequate interrupt
+ limiting scheme. */
+ if (0)
+ {
+ f64 line_rate_max_pps =
+ 10e9 / (8 * (64 + /* interframe padding */ 20));
+ ixge_throttle_queue_interrupt (r, 0,
+ .75 * xm->n_descriptors[VLIB_RX] /
+ line_rate_max_pps);
+ }
+
+ /* Accept all multicast and broadcast packets. Should really add them
+ to the dst_ethernet_address register array. */
+ r->filter_control |= (1 << 10) | (1 << 8);
+
+ /* Enable frames up to size in mac frame size register. */
+ r->xge_mac.control |= 1 << 2;
+ r->xge_mac.rx_max_frame_size = (9216 + 14) << 16;
+
+ /* Enable all interrupts. */
+ if (!IXGE_ALWAYS_POLL)
+ r->interrupt.enable_write_1_to_set = ~0;
+ }
+}
+
+static uword
+ixge_process (vlib_main_t * vm, vlib_node_runtime_t * rt, vlib_frame_t * f)
+{
+ vnet_main_t *vnm = vnet_get_main ();
+ ixge_main_t *xm = &ixge_main;
+ ixge_device_t *xd;
+ uword event_type, *event_data = 0;
+ f64 timeout, link_debounce_deadline;
+
+ ixge_device_init (xm);
+
+ /* Clear all counters. */
+ vec_foreach (xd, xm->devices)
+ {
+ ixge_update_counters (xd);
+ memset (xd->counters, 0, sizeof (xd->counters));
+ }
+
+ timeout = 30.0;
+ link_debounce_deadline = 1e70;
+
+ while (1)
+ {
+ /* 36 bit stat counters could overflow in ~50 secs.
+ We poll every 30 secs to be conservative. */
+ vlib_process_wait_for_event_or_clock (vm, timeout);
+
+ event_type = vlib_process_get_events (vm, &event_data);
+
+ switch (event_type)
+ {
+ case EVENT_SET_FLAGS:
+ /* 1 ms */
+ link_debounce_deadline = vlib_time_now (vm) + 1e-3;
+ timeout = 1e-3;
+ break;
+
+ case ~0:
+ /* No events found: timer expired. */
+ if (vlib_time_now (vm) > link_debounce_deadline)
+ {
+ vec_foreach (xd, xm->devices)
+ {
+ ixge_regs_t *r = xd->regs;
+ u32 v = r->xge_mac.link_status;
+ uword is_up = (v & (1 << 30)) != 0;
+
+ vnet_hw_interface_set_flags
+ (vnm, xd->vlib_hw_if_index,
+ is_up ? VNET_HW_INTERFACE_FLAG_LINK_UP : 0);
+ }
+ link_debounce_deadline = 1e70;
+ timeout = 30.0;
+ }
+ break;
+
+ default:
+ ASSERT (0);
+ }
+
+ if (event_data)
+ _vec_len (event_data) = 0;
+
+ /* Query stats every 30 secs. */
+ {
+ f64 now = vlib_time_now (vm);
+ if (now - xm->time_last_stats_update > 30)
+ {
+ xm->time_last_stats_update = now;
+ vec_foreach (xd, xm->devices) ixge_update_counters (xd);
+ }
+ }
+ }
+
+ return 0;
+}
+
+static vlib_node_registration_t ixge_process_node = {
+ .function = ixge_process,
+ .type = VLIB_NODE_TYPE_PROCESS,
+ .name = "ixge-process",
+};
+
+clib_error_t *
+ixge_init (vlib_main_t * vm)
+{
+ ixge_main_t *xm = &ixge_main;
+ clib_error_t *error;
+
+ xm->vlib_main = vm;
+ memset (&xm->tx_descriptor_template, 0,
+ sizeof (xm->tx_descriptor_template));
+ memset (&xm->tx_descriptor_template_mask, 0,
+ sizeof (xm->tx_descriptor_template_mask));
+ xm->tx_descriptor_template.status0 =
+ (IXGE_TX_DESCRIPTOR_STATUS0_ADVANCED |
+ IXGE_TX_DESCRIPTOR_STATUS0_IS_ADVANCED |
+ IXGE_TX_DESCRIPTOR_STATUS0_INSERT_FCS);
+ xm->tx_descriptor_template_mask.status0 = 0xffff;
+ xm->tx_descriptor_template_mask.status1 = 0x00003fff;
+
+ xm->tx_descriptor_template_mask.status0 &=
+ ~(IXGE_TX_DESCRIPTOR_STATUS0_IS_END_OF_PACKET
+ | IXGE_TX_DESCRIPTOR_STATUS0_REPORT_STATUS);
+ xm->tx_descriptor_template_mask.status1 &=
+ ~(IXGE_TX_DESCRIPTOR_STATUS1_DONE);
+
+ error = vlib_call_init_function (vm, pci_bus_init);
+
+ return error;
+}
+
+VLIB_INIT_FUNCTION (ixge_init);
+
+
+static void
+ixge_pci_intr_handler (vlib_pci_device_t * dev)
+{
+ ixge_main_t *xm = &ixge_main;
+ vlib_main_t *vm = xm->vlib_main;
+
+ vlib_node_set_interrupt_pending (vm, ixge_input_node.index);
+
+ /* Let node know which device is interrupting. */
+ {
+ vlib_node_runtime_t *rt =
+ vlib_node_get_runtime (vm, ixge_input_node.index);
+ rt->runtime_data[0] |= 1 << dev->private_data;
+ }
+}
+
+static clib_error_t *
+ixge_pci_init (vlib_main_t * vm, vlib_pci_device_t * dev)
+{
+ ixge_main_t *xm = &ixge_main;
+ clib_error_t *error;
+ void *r;
+ ixge_device_t *xd;
+
+ /* Device found: make sure we have dma memory. */
+ if (unix_physmem_is_fake (vm))
+ return clib_error_return (0, "no physical memory available");
+
+ error = vlib_pci_map_resource (dev, 0, &r);
+ if (error)
+ return error;
+
+ vec_add2 (xm->devices, xd, 1);
+
+ if (vec_len (xm->devices) == 1)
+ {
+ ixge_input_node.function = ixge_input_multiarch_select ();
+ }
+
+ xd->pci_device = dev[0];
+ xd->device_id = xd->pci_device.config0.header.device_id;
+ xd->regs = r;
+ xd->device_index = xd - xm->devices;
+ xd->pci_function = dev->bus_address.function;
+ xd->per_interface_next_index = ~0;
+
+
+ /* Chip found so enable node. */
+ {
+ vlib_node_set_state (vm, ixge_input_node.index,
+ (IXGE_ALWAYS_POLL
+ ? VLIB_NODE_STATE_POLLING
+ : VLIB_NODE_STATE_INTERRUPT));
+
+ dev->private_data = xd->device_index;
+ }
+
+ if (vec_len (xm->devices) == 1)
+ {
+ vlib_register_node (vm, &ixge_process_node);
+ xm->process_node_index = ixge_process_node.index;
+ }
+
+ error = vlib_pci_bus_master_enable (dev);
+
+ if (error)
+ return error;
+
+ return vlib_pci_intr_enable (dev);
+}
+
+/* *INDENT-OFF* */
+PCI_REGISTER_DEVICE (ixge_pci_device_registration,static) = {
+ .init_function = ixge_pci_init,
+ .interrupt_handler = ixge_pci_intr_handler,
+ .supported_devices = {
+#define _(t,i) { .vendor_id = PCI_VENDOR_ID_INTEL, .device_id = i, },
+ foreach_ixge_pci_device_id
+#undef _
+ { 0 },
+ },
+};
+/* *INDENT-ON* */
+
+void
+ixge_set_next_node (ixge_rx_next_t next, char *name)
+{
+ vlib_node_registration_t *r = &ixge_input_node;
+
+ switch (next)
+ {
+ case IXGE_RX_NEXT_IP4_INPUT:
+ case IXGE_RX_NEXT_IP6_INPUT:
+ case IXGE_RX_NEXT_ETHERNET_INPUT:
+ r->next_nodes[next] = name;
+ break;
+
+ default:
+ clib_warning ("%s: illegal next %d\n", __FUNCTION__, next);
+ break;
+ }
+}
+#endif
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/devices/nic/ixge.h b/src/vnet/devices/nic/ixge.h
new file mode 100644
index 00000000000..a8e652dcdab
--- /dev/null
+++ b/src/vnet/devices/nic/ixge.h
@@ -0,0 +1,1293 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef included_ixge_h
+#define included_ixge_h
+
+#include <vnet/vnet.h>
+#include <vlib/pci/pci.h>
+#include <vlib/i2c.h>
+#include <vnet/devices/nic/sfp.h>
+#include <vnet/ip/ip4_packet.h>
+#include <vnet/ip/ip6_packet.h>
+
+typedef volatile struct
+{
+ /* [31:7] 128 byte aligned. */
+ u32 descriptor_address[2];
+ u32 n_descriptor_bytes;
+
+ /* [5] rx/tx descriptor dca enable
+ [6] rx packet head dca enable
+ [7] rx packet tail dca enable
+ [9] rx/tx descriptor relaxed order
+ [11] rx/tx descriptor write back relaxed order
+ [13] rx/tx data write/read relaxed order
+ [15] rx head data write relaxed order
+ [31:24] apic id for cpu's cache. */
+ u32 dca_control;
+
+ u32 head_index;
+
+ /* [4:0] tail buffer size (in 1k byte units)
+ [13:8] head buffer size (in 64 byte units)
+ [24:22] lo free descriptors threshold (units of 64 descriptors)
+ [27:25] descriptor type 0 = legacy, 1 = advanced one buffer (e.g. tail),
+ 2 = advanced header splitting (head + tail), 5 = advanced header
+ splitting (head only).
+ [28] drop if no descriptors available. */
+ u32 rx_split_control;
+
+ u32 tail_index;
+ CLIB_PAD_FROM_TO (0x1c, 0x28);
+
+ /* [7:0] rx/tx prefetch threshold
+ [15:8] rx/tx host threshold
+ [24:16] rx/tx write back threshold
+ [25] rx/tx enable
+ [26] tx descriptor writeback flush
+ [30] rx strip vlan enable */
+ u32 control;
+
+ u32 rx_coallesce_control;
+
+ union
+ {
+ struct
+ {
+ /* packets bytes lo hi */
+ u32 stats[3];
+
+ u32 unused;
+ } rx;
+
+ struct
+ {
+ u32 unused[2];
+
+ /* [0] enables head write back. */
+ u32 head_index_write_back_address[2];
+ } tx;
+ };
+} ixge_dma_regs_t;
+
+/* Only advanced descriptors are supported. */
+typedef struct
+{
+ u64 tail_address;
+ u64 head_address;
+} ixge_rx_to_hw_descriptor_t;
+
+typedef struct
+{
+ u32 status[3];
+ u16 n_packet_bytes_this_descriptor;
+ u16 vlan_tag;
+} ixge_rx_from_hw_descriptor_t;
+
+#define IXGE_RX_DESCRIPTOR_STATUS0_IS_LAYER2 (1 << (4 + 11))
+/* Valid if not layer2. */
+#define IXGE_RX_DESCRIPTOR_STATUS0_IS_IP4 (1 << (4 + 0))
+#define IXGE_RX_DESCRIPTOR_STATUS0_IS_IP4_EXT (1 << (4 + 1))
+#define IXGE_RX_DESCRIPTOR_STATUS0_IS_IP6 (1 << (4 + 2))
+#define IXGE_RX_DESCRIPTOR_STATUS0_IS_IP6_EXT (1 << (4 + 3))
+#define IXGE_RX_DESCRIPTOR_STATUS0_IS_TCP (1 << (4 + 4))
+#define IXGE_RX_DESCRIPTOR_STATUS0_IS_UDP (1 << (4 + 5))
+#define IXGE_RX_DESCRIPTOR_STATUS0_L3_OFFSET(s) (((s) >> 21) & 0x3ff)
+
+#define IXGE_RX_DESCRIPTOR_STATUS2_IS_OWNED_BY_SOFTWARE (1 << (0 + 0))
+#define IXGE_RX_DESCRIPTOR_STATUS2_IS_END_OF_PACKET (1 << (0 + 1))
+#define IXGE_RX_DESCRIPTOR_STATUS2_IS_VLAN (1 << (0 + 3))
+#define IXGE_RX_DESCRIPTOR_STATUS2_IS_UDP_CHECKSUMMED (1 << (0 + 4))
+#define IXGE_RX_DESCRIPTOR_STATUS2_IS_TCP_CHECKSUMMED (1 << (0 + 5))
+#define IXGE_RX_DESCRIPTOR_STATUS2_IS_IP4_CHECKSUMMED (1 << (0 + 6))
+#define IXGE_RX_DESCRIPTOR_STATUS2_NOT_UNICAST (1 << (0 + 7))
+#define IXGE_RX_DESCRIPTOR_STATUS2_IS_DOUBLE_VLAN (1 << (0 + 9))
+#define IXGE_RX_DESCRIPTOR_STATUS2_UDP_CHECKSUM_ERROR (1 << (0 + 10))
+#define IXGE_RX_DESCRIPTOR_STATUS2_ETHERNET_ERROR (1 << (20 + 9))
+#define IXGE_RX_DESCRIPTOR_STATUS2_TCP_CHECKSUM_ERROR (1 << (20 + 10))
+#define IXGE_RX_DESCRIPTOR_STATUS2_IP4_CHECKSUM_ERROR (1 << (20 + 11))
+
+/* For layer2 packets stats0 bottom 3 bits give ether type index from filter. */
+#define IXGE_RX_DESCRIPTOR_STATUS0_LAYER2_ETHERNET_TYPE(s) ((s) & 7)
+
+typedef struct
+{
+ u64 buffer_address;
+ u16 n_bytes_this_buffer;
+ u16 status0;
+ u32 status1;
+#define IXGE_TX_DESCRIPTOR_STATUS0_ADVANCED (3 << 4)
+#define IXGE_TX_DESCRIPTOR_STATUS0_IS_ADVANCED (1 << (8 + 5))
+#define IXGE_TX_DESCRIPTOR_STATUS0_LOG2_REPORT_STATUS (8 + 3)
+#define IXGE_TX_DESCRIPTOR_STATUS0_REPORT_STATUS (1 << IXGE_TX_DESCRIPTOR_STATUS0_LOG2_REPORT_STATUS)
+#define IXGE_TX_DESCRIPTOR_STATUS0_INSERT_FCS (1 << (8 + 1))
+#define IXGE_TX_DESCRIPTOR_STATUS0_LOG2_IS_END_OF_PACKET (8 + 0)
+#define IXGE_TX_DESCRIPTOR_STATUS0_IS_END_OF_PACKET (1 << IXGE_TX_DESCRIPTOR_STATUS0_LOG2_IS_END_OF_PACKET)
+#define IXGE_TX_DESCRIPTOR_STATUS1_DONE (1 << 0)
+#define IXGE_TX_DESCRIPTOR_STATUS1_CONTEXT(i) (/* valid */ (1 << 7) | ((i) << 4))
+#define IXGE_TX_DESCRIPTOR_STATUS1_IPSEC_OFFLOAD (1 << (8 + 2))
+#define IXGE_TX_DESCRIPTOR_STATUS1_INSERT_TCP_UDP_CHECKSUM (1 << (8 + 1))
+#define IXGE_TX_DESCRIPTOR_STATUS1_INSERT_IP4_CHECKSUM (1 << (8 + 0))
+#define IXGE_TX_DESCRIPTOR_STATUS0_N_BYTES_THIS_BUFFER(l) ((l) << 0)
+#define IXGE_TX_DESCRIPTOR_STATUS1_N_BYTES_IN_PACKET(l) ((l) << 14)
+} ixge_tx_descriptor_t;
+
+typedef struct
+{
+ struct
+ {
+ u8 checksum_start_offset;
+ u8 checksum_insert_offset;
+ u16 checksum_end_offset;
+ } ip, tcp;
+ u32 status0;
+
+ u8 status1;
+
+ /* Byte offset after UDP/TCP header. */
+ u8 payload_offset;
+
+ u16 max_tcp_segment_size;
+} __attribute__ ((packed)) ixge_tx_context_descriptor_t;
+
+typedef union
+{
+ ixge_rx_to_hw_descriptor_t rx_to_hw;
+ ixge_rx_from_hw_descriptor_t rx_from_hw;
+ ixge_tx_descriptor_t tx;
+ u32x4 as_u32x4;
+} ixge_descriptor_t;
+
+typedef volatile struct
+{
+ /* [2] pcie master disable
+ [3] mac reset
+ [26] global device reset */
+ u32 control;
+ u32 control_alias;
+ /* [3:2] device id (0 or 1 for dual port chips)
+ [7] link is up
+ [17:10] num vfs
+ [18] io active
+ [19] pcie master enable status */
+ u32 status_read_only;
+ CLIB_PAD_FROM_TO (0xc, 0x18);
+ /* [14] pf reset done
+ [17] relaxed ordering disable
+ [26] extended vlan enable
+ [28] driver loaded */
+ u32 extended_control;
+ CLIB_PAD_FROM_TO (0x1c, 0x20);
+
+ /* software definable pins.
+ sdp_data [7:0]
+ sdp_is_output [15:8]
+ sdp_is_native [23:16]
+ sdp_function [31:24].
+ */
+ u32 sdp_control;
+ CLIB_PAD_FROM_TO (0x24, 0x28);
+
+ /* [0] i2c clock in
+ [1] i2c clock out
+ [2] i2c data in
+ [3] i2c data out */
+ u32 i2c_control;
+ CLIB_PAD_FROM_TO (0x2c, 0x4c);
+ u32 tcp_timer;
+
+ CLIB_PAD_FROM_TO (0x50, 0x200);
+
+ u32 led_control;
+
+ CLIB_PAD_FROM_TO (0x204, 0x600);
+ u32 core_spare;
+ CLIB_PAD_FROM_TO (0x604, 0x700);
+
+ struct
+ {
+ u32 vflr_events_clear[4];
+ u32 mailbox_interrupt_status[4];
+ u32 mailbox_interrupt_enable[4];
+ CLIB_PAD_FROM_TO (0x730, 0x800);
+ } pf_foo;
+
+ struct
+ {
+ u32 status_write_1_to_clear;
+ CLIB_PAD_FROM_TO (0x804, 0x808);
+ u32 status_write_1_to_set;
+ CLIB_PAD_FROM_TO (0x80c, 0x810);
+ u32 status_auto_clear_enable;
+ CLIB_PAD_FROM_TO (0x814, 0x820);
+
+ /* [11:3] minimum inter-interrupt interval
+ (2e-6 units; 20e-6 units for fast ethernet).
+ [15] low-latency interrupt moderation enable
+ [20:16] low-latency interrupt credit
+ [27:21] interval counter
+ [31] write disable for credit and counter (write only). */
+ u32 throttle0[24];
+
+ u32 enable_write_1_to_set;
+ CLIB_PAD_FROM_TO (0x884, 0x888);
+ u32 enable_write_1_to_clear;
+ CLIB_PAD_FROM_TO (0x88c, 0x890);
+ u32 enable_auto_clear;
+ u32 msi_to_eitr_select;
+ /* [3:0] spd 0-3 interrupt detection enable
+ [4] msi-x enable
+ [5] other clear disable (makes other bits in status not clear on read)
+ etc. */
+ u32 control;
+ CLIB_PAD_FROM_TO (0x89c, 0x900);
+
+ /* Defines interrupt mapping for 128 rx + 128 tx queues.
+ 64 x 4 8 bit entries.
+ For register [i]:
+ [5:0] bit in interrupt status for rx queue 2*i + 0
+ [7] valid bit
+ [13:8] bit for tx queue 2*i + 0
+ [15] valid bit
+ similar for rx 2*i + 1 and tx 2*i + 1. */
+ u32 queue_mapping[64];
+
+ /* tcp timer [7:0] and other interrupts [15:8] */
+ u32 misc_mapping;
+ CLIB_PAD_FROM_TO (0xa04, 0xa90);
+
+ /* 64 interrupts determined by mappings. */
+ u32 status1_write_1_to_clear[4];
+ u32 enable1_write_1_to_set[4];
+ u32 enable1_write_1_to_clear[4];
+ CLIB_PAD_FROM_TO (0xac0, 0xad0);
+ u32 status1_enable_auto_clear[4];
+ CLIB_PAD_FROM_TO (0xae0, 0x1000);
+ } interrupt;
+
+ ixge_dma_regs_t rx_dma0[64];
+
+ CLIB_PAD_FROM_TO (0x2000, 0x2140);
+ u32 dcb_rx_packet_plane_t4_config[8];
+ u32 dcb_rx_packet_plane_t4_status[8];
+ CLIB_PAD_FROM_TO (0x2180, 0x2300);
+
+ /* reg i defines mapping for 4 rx queues starting at 4*i + 0. */
+ u32 rx_queue_stats_mapping[32];
+ u32 rx_queue_stats_control;
+
+ CLIB_PAD_FROM_TO (0x2384, 0x2410);
+ u32 fc_user_descriptor_ptr[2];
+ u32 fc_buffer_control;
+ CLIB_PAD_FROM_TO (0x241c, 0x2420);
+ u32 fc_rx_dma;
+ CLIB_PAD_FROM_TO (0x2424, 0x2430);
+ u32 dcb_packet_plane_control;
+ CLIB_PAD_FROM_TO (0x2434, 0x2f00);
+
+ u32 rx_dma_control;
+ u32 pf_queue_drop_enable;
+ CLIB_PAD_FROM_TO (0x2f08, 0x2f20);
+ u32 rx_dma_descriptor_cache_config;
+ CLIB_PAD_FROM_TO (0x2f24, 0x3000);
+
+ /* 1 bit. */
+ u32 rx_enable;
+ CLIB_PAD_FROM_TO (0x3004, 0x3008);
+ /* [15:0] ether type (little endian)
+ [31:16] opcode (big endian) */
+ u32 flow_control_control;
+ CLIB_PAD_FROM_TO (0x300c, 0x3020);
+ /* 3 bit traffic class for each of 8 priorities. */
+ u32 rx_priority_to_traffic_class;
+ CLIB_PAD_FROM_TO (0x3024, 0x3028);
+ u32 rx_coallesce_data_buffer_control;
+ CLIB_PAD_FROM_TO (0x302c, 0x3190);
+ u32 rx_packet_buffer_flush_detect;
+ CLIB_PAD_FROM_TO (0x3194, 0x3200);
+ u32 flow_control_tx_timers[4]; /* 2 timer values */
+ CLIB_PAD_FROM_TO (0x3210, 0x3220);
+ u32 flow_control_rx_threshold_lo[8];
+ CLIB_PAD_FROM_TO (0x3240, 0x3260);
+ u32 flow_control_rx_threshold_hi[8];
+ CLIB_PAD_FROM_TO (0x3280, 0x32a0);
+ u32 flow_control_refresh_threshold;
+ CLIB_PAD_FROM_TO (0x32a4, 0x3c00);
+ /* For each of 8 traffic classes (units of bytes). */
+ u32 rx_packet_buffer_size[8];
+ CLIB_PAD_FROM_TO (0x3c20, 0x3d00);
+ u32 flow_control_config;
+ CLIB_PAD_FROM_TO (0x3d04, 0x4200);
+
+ struct
+ {
+ u32 pcs_config;
+ CLIB_PAD_FROM_TO (0x4204, 0x4208);
+ u32 link_control;
+ u32 link_status;
+ u32 pcs_debug[2];
+ u32 auto_negotiation;
+ u32 link_partner_ability;
+ u32 auto_negotiation_tx_next_page;
+ u32 auto_negotiation_link_partner_next_page;
+ CLIB_PAD_FROM_TO (0x4228, 0x4240);
+ } gige_mac;
+
+ struct
+ {
+ /* [0] tx crc enable
+ [2] enable frames up to max frame size register [31:16]
+ [10] pad frames < 64 bytes if specified by user
+ [15] loopback enable
+ [16] mdc hi speed
+ [17] turn off mdc between mdio packets */
+ u32 control;
+
+ /* [5] rx symbol error (all bits clear on read)
+ [6] rx illegal symbol
+ [7] rx idle error
+ [8] rx local fault
+ [9] rx remote fault */
+ u32 status;
+
+ u32 pause_and_pace_control;
+ CLIB_PAD_FROM_TO (0x424c, 0x425c);
+ u32 phy_command;
+ u32 phy_data;
+ CLIB_PAD_FROM_TO (0x4264, 0x4268);
+
+ /* [31:16] max frame size in bytes. */
+ u32 rx_max_frame_size;
+ CLIB_PAD_FROM_TO (0x426c, 0x4288);
+
+ /* [0]
+ [2] pcs receive link up? (latch lo)
+ [7] local fault
+ [1]
+ [0] pcs 10g base r capable
+ [1] pcs 10g base x capable
+ [2] pcs 10g base w capable
+ [10] rx local fault
+ [11] tx local fault
+ [15:14] 2 => device present at this address (else not present) */
+ u32 xgxs_status[2];
+
+ u32 base_x_pcs_status;
+
+ /* [0] pass unrecognized flow control frames
+ [1] discard pause frames
+ [2] rx priority flow control enable (only in dcb mode)
+ [3] rx flow control enable. */
+ u32 flow_control;
+
+ /* [3:0] tx lanes change polarity
+ [7:4] rx lanes change polarity
+ [11:8] swizzle tx lanes
+ [15:12] swizzle rx lanes
+ 4 x 2 bit tx lane swap
+ 4 x 2 bit rx lane swap. */
+ u32 serdes_control;
+
+ u32 fifo_control;
+
+ /* [0] force link up
+ [1] autoneg ack2 bit to transmit
+ [6:2] autoneg selector field to transmit
+ [8:7] 10g pma/pmd type 0 => xaui, 1 kx4, 2 cx4
+ [9] 1g pma/pmd type 0 => sfi, 1 => kx/bx
+ [10] disable 10g on without main power
+ [11] restart autoneg on transition to dx power state
+ [12] restart autoneg
+ [15:13] link mode:
+ 0 => 1g no autoneg
+ 1 => 10g kx4 parallel link no autoneg
+ 2 => 1g bx autoneg
+ 3 => 10g sfi serdes
+ 4 => kx4/kx/kr
+ 5 => xgmii 1g/100m
+ 6 => kx4/kx/kr 1g an
+ 7 kx4/kx/kr sgmii.
+ [16] kr support
+ [17] fec requested
+ [18] fec ability
+ etc. */
+ u32 auto_negotiation_control;
+
+ /* [0] signal detect 1g/100m
+ [1] fec signal detect
+ [2] 10g serial pcs fec block lock
+ [3] 10g serial high error rate
+ [4] 10g serial pcs block lock
+ [5] kx/kx4/kr autoneg next page received
+ [6] kx/kx4/kr backplane autoneg next page received
+ [7] link status clear to read
+ [11:8] 10g signal detect (4 lanes) (for serial just lane 0)
+ [12] 10g serial signal detect
+ [16:13] 10g parallel lane sync status
+ [17] 10g parallel align status
+ [18] 1g sync status
+ [19] kx/kx4/kr backplane autoneg is idle
+ [20] 1g autoneg enabled
+ [21] 1g pcs enabled for sgmii
+ [22] 10g xgxs enabled
+ [23] 10g serial fec enabled (forward error detection)
+ [24] 10g kr pcs enabled
+ [25] sgmii enabled
+ [27:26] mac link mode
+ 0 => 1g
+ 1 => 10g parallel
+ 2 => 10g serial
+ 3 => autoneg
+ [29:28] link speed
+ 1 => 100m
+ 2 => 1g
+ 3 => 10g
+ [30] link is up
+ [31] kx/kx4/kr backplane autoneg completed successfully. */
+ u32 link_status;
+
+ /* [17:16] pma/pmd for 10g serial
+ 0 => kr, 2 => sfi
+ [18] disable dme pages */
+ u32 auto_negotiation_control2;
+
+ CLIB_PAD_FROM_TO (0x42ac, 0x42b0);
+ u32 link_partner_ability[2];
+ CLIB_PAD_FROM_TO (0x42b8, 0x42d0);
+ u32 manageability_control;
+ u32 link_partner_next_page[2];
+ CLIB_PAD_FROM_TO (0x42dc, 0x42e0);
+ u32 kr_pcs_control;
+ u32 kr_pcs_status;
+ u32 fec_status[2];
+ CLIB_PAD_FROM_TO (0x42f0, 0x4314);
+ u32 sgmii_control;
+ CLIB_PAD_FROM_TO (0x4318, 0x4324);
+ u32 link_status2;
+ CLIB_PAD_FROM_TO (0x4328, 0x4900);
+ } xge_mac;
+
+ u32 tx_dcb_control;
+ u32 tx_dcb_descriptor_plane_queue_select;
+ u32 tx_dcb_descriptor_plane_t1_config;
+ u32 tx_dcb_descriptor_plane_t1_status;
+ CLIB_PAD_FROM_TO (0x4910, 0x4950);
+
+ /* For each TC in units of 1k bytes. */
+ u32 tx_packet_buffer_thresholds[8];
+ CLIB_PAD_FROM_TO (0x4970, 0x4980);
+ struct
+ {
+ u32 mmw;
+ u32 config;
+ u32 status;
+ u32 rate_drift;
+ } dcb_tx_rate_scheduler;
+ CLIB_PAD_FROM_TO (0x4990, 0x4a80);
+ u32 tx_dma_control;
+ CLIB_PAD_FROM_TO (0x4a84, 0x4a88);
+ u32 tx_dma_tcp_flags_control[2];
+ CLIB_PAD_FROM_TO (0x4a90, 0x4b00);
+ u32 pf_mailbox[64];
+ CLIB_PAD_FROM_TO (0x4c00, 0x5000);
+
+ /* RX */
+ u32 checksum_control;
+ CLIB_PAD_FROM_TO (0x5004, 0x5008);
+ u32 rx_filter_control;
+ CLIB_PAD_FROM_TO (0x500c, 0x5010);
+ u32 management_vlan_tag[8];
+ u32 management_udp_tcp_ports[8];
+ CLIB_PAD_FROM_TO (0x5050, 0x5078);
+ /* little endian. */
+ u32 extended_vlan_ether_type;
+ CLIB_PAD_FROM_TO (0x507c, 0x5080);
+ /* [1] store/dma bad packets
+ [8] accept all multicast
+ [9] accept all unicast
+ [10] accept all broadcast. */
+ u32 filter_control;
+ CLIB_PAD_FROM_TO (0x5084, 0x5088);
+ /* [15:0] vlan ethernet type (0x8100) little endian
+ [28] cfi bit expected
+ [29] drop packets with unexpected cfi bit
+ [30] vlan filter enable. */
+ u32 vlan_control;
+ CLIB_PAD_FROM_TO (0x508c, 0x5090);
+ /* [1:0] hi bit of ethernet address for 12 bit index into multicast table
+ 0 => 47, 1 => 46, 2 => 45, 3 => 43.
+ [2] enable multicast filter
+ */
+ u32 multicast_control;
+ CLIB_PAD_FROM_TO (0x5094, 0x5100);
+ u32 fcoe_rx_control;
+ CLIB_PAD_FROM_TO (0x5104, 0x5108);
+ u32 fc_flt_context;
+ CLIB_PAD_FROM_TO (0x510c, 0x5110);
+ u32 fc_filter_control;
+ CLIB_PAD_FROM_TO (0x5114, 0x5120);
+ u32 rx_message_type_lo;
+ CLIB_PAD_FROM_TO (0x5124, 0x5128);
+ /* [15:0] ethernet type (little endian)
+ [18:16] matche pri in vlan tag
+ [19] priority match enable
+ [25:20] virtualization pool
+ [26] pool enable
+ [27] is fcoe
+ [30] ieee 1588 timestamp enable
+ [31] filter enable.
+ (See ethernet_type_queue_select.) */
+ u32 ethernet_type_queue_filter[8];
+ CLIB_PAD_FROM_TO (0x5148, 0x5160);
+ /* [7:0] l2 ethernet type and
+ [15:8] l2 ethernet type or */
+ u32 management_decision_filters1[8];
+ u32 vf_vm_tx_switch_loopback_enable[2];
+ u32 rx_time_sync_control;
+ CLIB_PAD_FROM_TO (0x518c, 0x5190);
+ u32 management_ethernet_type_filters[4];
+ u32 rx_timestamp_attributes_lo;
+ u32 rx_timestamp_hi;
+ u32 rx_timestamp_attributes_hi;
+ CLIB_PAD_FROM_TO (0x51ac, 0x51b0);
+ u32 pf_virtual_control;
+ CLIB_PAD_FROM_TO (0x51b4, 0x51d8);
+ u32 fc_offset_parameter;
+ CLIB_PAD_FROM_TO (0x51dc, 0x51e0);
+ u32 vf_rx_enable[2];
+ u32 rx_timestamp_lo;
+ CLIB_PAD_FROM_TO (0x51ec, 0x5200);
+ /* 12 bits determined by multicast_control
+ lookup bits in this vector. */
+ u32 multicast_enable[128];
+
+ /* [0] ethernet address [31:0]
+ [1] [15:0] ethernet address [47:32]
+ [31] valid bit.
+ Index 0 is read from eeprom after reset. */
+ u32 rx_ethernet_address0[16][2];
+
+ CLIB_PAD_FROM_TO (0x5480, 0x5800);
+ u32 wake_up_control;
+ CLIB_PAD_FROM_TO (0x5804, 0x5808);
+ u32 wake_up_filter_control;
+ CLIB_PAD_FROM_TO (0x580c, 0x5818);
+ u32 multiple_rx_queue_command_82598;
+ CLIB_PAD_FROM_TO (0x581c, 0x5820);
+ u32 management_control;
+ u32 management_filter_control;
+ CLIB_PAD_FROM_TO (0x5828, 0x5838);
+ u32 wake_up_ip4_address_valid;
+ CLIB_PAD_FROM_TO (0x583c, 0x5840);
+ u32 wake_up_ip4_address_table[4];
+ u32 management_control_to_host;
+ CLIB_PAD_FROM_TO (0x5854, 0x5880);
+ u32 wake_up_ip6_address_table[4];
+
+ /* unicast_and broadcast_and vlan_and ip_address_and
+ etc. */
+ u32 management_decision_filters[8];
+
+ u32 management_ip4_or_ip6_address_filters[4][4];
+ CLIB_PAD_FROM_TO (0x58f0, 0x5900);
+ u32 wake_up_packet_length;
+ CLIB_PAD_FROM_TO (0x5904, 0x5910);
+ u32 management_ethernet_address_filters[4][2];
+ CLIB_PAD_FROM_TO (0x5930, 0x5a00);
+ u32 wake_up_packet_memory[32];
+ CLIB_PAD_FROM_TO (0x5a80, 0x5c00);
+ u32 redirection_table_82598[32];
+ u32 rss_random_keys_82598[10];
+ CLIB_PAD_FROM_TO (0x5ca8, 0x6000);
+
+ ixge_dma_regs_t tx_dma[128];
+
+ u32 pf_vm_vlan_insert[64];
+ u32 tx_dma_tcp_max_alloc_size_requests;
+ CLIB_PAD_FROM_TO (0x8104, 0x8110);
+ u32 vf_tx_enable[2];
+ CLIB_PAD_FROM_TO (0x8118, 0x8120);
+ /* [0] dcb mode enable
+ [1] virtualization mode enable
+ [3:2] number of tcs/qs per pool. */
+ u32 multiple_tx_queues_command;
+ CLIB_PAD_FROM_TO (0x8124, 0x8200);
+ u32 pf_vf_anti_spoof[8];
+ u32 pf_dma_tx_switch_control;
+ CLIB_PAD_FROM_TO (0x8224, 0x82e0);
+ u32 tx_strict_low_latency_queues[4];
+ CLIB_PAD_FROM_TO (0x82f0, 0x8600);
+ u32 tx_queue_stats_mapping_82599[32];
+ u32 tx_queue_packet_counts[32];
+ u32 tx_queue_byte_counts[32][2];
+
+ struct
+ {
+ u32 control;
+ u32 status;
+ u32 buffer_almost_full;
+ CLIB_PAD_FROM_TO (0x880c, 0x8810);
+ u32 buffer_min_ifg;
+ CLIB_PAD_FROM_TO (0x8814, 0x8900);
+ } tx_security;
+
+ struct
+ {
+ u32 index;
+ u32 salt;
+ u32 key[4];
+ CLIB_PAD_FROM_TO (0x8918, 0x8a00);
+ } tx_ipsec;
+
+ struct
+ {
+ u32 capabilities;
+ u32 control;
+ u32 tx_sci[2];
+ u32 sa;
+ u32 sa_pn[2];
+ u32 key[2][4];
+ /* untagged packets, encrypted packets, protected packets,
+ encrypted bytes, protected bytes */
+ u32 stats[5];
+ CLIB_PAD_FROM_TO (0x8a50, 0x8c00);
+ } tx_link_security;
+
+ struct
+ {
+ u32 control;
+ u32 timestamp_value[2];
+ u32 system_time[2];
+ u32 increment_attributes;
+ u32 time_adjustment_offset[2];
+ u32 aux_control;
+ u32 target_time[2][2];
+ CLIB_PAD_FROM_TO (0x8c34, 0x8c3c);
+ u32 aux_time_stamp[2][2];
+ CLIB_PAD_FROM_TO (0x8c4c, 0x8d00);
+ } tx_timesync;
+
+ struct
+ {
+ u32 control;
+ u32 status;
+ CLIB_PAD_FROM_TO (0x8d08, 0x8e00);
+ } rx_security;
+
+ struct
+ {
+ u32 index;
+ u32 ip_address[4];
+ u32 spi;
+ u32 ip_index;
+ u32 key[4];
+ u32 salt;
+ u32 mode;
+ CLIB_PAD_FROM_TO (0x8e34, 0x8f00);
+ } rx_ipsec;
+
+ struct
+ {
+ u32 capabilities;
+ u32 control;
+ u32 sci[2];
+ u32 sa[2];
+ u32 sa_pn[2];
+ u32 key[2][4];
+ /* see datasheet */
+ u32 stats[17];
+ CLIB_PAD_FROM_TO (0x8f84, 0x9000);
+ } rx_link_security;
+
+ /* 4 wake up, 2 management, 2 wake up. */
+ u32 flexible_filters[8][16][4];
+ CLIB_PAD_FROM_TO (0x9800, 0xa000);
+
+ /* 4096 bits. */
+ u32 vlan_filter[128];
+
+ /* [0] ethernet address [31:0]
+ [1] [15:0] ethernet address [47:32]
+ [31] valid bit.
+ Index 0 is read from eeprom after reset. */
+ u32 rx_ethernet_address1[128][2];
+
+ /* select one of 64 pools for each rx address. */
+ u32 rx_ethernet_address_pool_select[128][2];
+ CLIB_PAD_FROM_TO (0xaa00, 0xc800);
+ u32 tx_priority_to_traffic_class;
+ CLIB_PAD_FROM_TO (0xc804, 0xcc00);
+
+ /* In bytes units of 1k. Total packet buffer is 160k. */
+ u32 tx_packet_buffer_size[8];
+
+ CLIB_PAD_FROM_TO (0xcc20, 0xcd10);
+ u32 tx_manageability_tc_mapping;
+ CLIB_PAD_FROM_TO (0xcd14, 0xcd20);
+ u32 dcb_tx_packet_plane_t2_config[8];
+ u32 dcb_tx_packet_plane_t2_status[8];
+ CLIB_PAD_FROM_TO (0xcd60, 0xce00);
+
+ u32 tx_flow_control_status;
+ CLIB_PAD_FROM_TO (0xce04, 0xd000);
+
+ ixge_dma_regs_t rx_dma1[64];
+
+ struct
+ {
+ /* Bigendian ip4 src/dst address. */
+ u32 src_address[128];
+ u32 dst_address[128];
+
+ /* TCP/UDP ports [15:0] src [31:16] dst; bigendian. */
+ u32 tcp_udp_port[128];
+
+ /* [1:0] protocol tcp, udp, sctp, other
+ [4:2] match priority (highest wins)
+ [13:8] pool
+ [25] src address match disable
+ [26] dst address match disable
+ [27] src port match disable
+ [28] dst port match disable
+ [29] protocol match disable
+ [30] pool match disable
+ [31] enable. */
+ u32 control[128];
+
+ /* [12] size bypass
+ [19:13] must be 0x80
+ [20] low-latency interrupt
+ [27:21] rx queue. */
+ u32 interrupt[128];
+ } ip4_filters;
+
+ CLIB_PAD_FROM_TO (0xea00, 0xeb00);
+ /* 4 bit rss output index indexed by 7 bit hash.
+ 128 8 bit fields = 32 registers. */
+ u32 redirection_table_82599[32];
+
+ u32 rss_random_key_82599[10];
+ CLIB_PAD_FROM_TO (0xeba8, 0xec00);
+ /* [15:0] reserved
+ [22:16] rx queue index
+ [29] low-latency interrupt on match
+ [31] enable */
+ u32 ethernet_type_queue_select[8];
+ CLIB_PAD_FROM_TO (0xec20, 0xec30);
+ u32 syn_packet_queue_filter;
+ CLIB_PAD_FROM_TO (0xec34, 0xec60);
+ u32 immediate_interrupt_rx_vlan_priority;
+ CLIB_PAD_FROM_TO (0xec64, 0xec70);
+ u32 rss_queues_per_traffic_class;
+ CLIB_PAD_FROM_TO (0xec74, 0xec90);
+ u32 lli_size_threshold;
+ CLIB_PAD_FROM_TO (0xec94, 0xed00);
+
+ struct
+ {
+ u32 control;
+ CLIB_PAD_FROM_TO (0xed04, 0xed10);
+ u32 table[8];
+ CLIB_PAD_FROM_TO (0xed30, 0xee00);
+ } fcoe_redirection;
+
+ struct
+ {
+ /* [1:0] packet buffer allocation 0 => disabled, else 64k*2^(f-1)
+ [3] packet buffer initialization done
+ [4] perfetch match mode
+ [5] report status in rss field of rx descriptors
+ [7] report status always
+ [14:8] drop queue
+ [20:16] flex 2 byte packet offset (units of 2 bytes)
+ [27:24] max linked list length
+ [31:28] full threshold. */
+ u32 control;
+ CLIB_PAD_FROM_TO (0xee04, 0xee0c);
+
+ u32 data[8];
+
+ /* [1:0] 0 => no action, 1 => add, 2 => remove, 3 => query.
+ [2] valid filter found by query command
+ [3] filter update override
+ [4] ip6 adress table
+ [6:5] l4 protocol reserved, udp, tcp, sctp
+ [7] is ip6
+ [8] clear head/tail
+ [9] packet drop action
+ [10] matched packet generates low-latency interrupt
+ [11] last in linked list
+ [12] collision
+ [15] rx queue enable
+ [22:16] rx queue
+ [29:24] pool. */
+ u32 command;
+
+ CLIB_PAD_FROM_TO (0xee30, 0xee3c);
+ /* ip4 dst/src address, tcp ports, udp ports.
+ set bits mean bit is ignored. */
+ u32 ip4_masks[4];
+ u32 filter_length;
+ u32 usage_stats;
+ u32 failed_usage_stats;
+ u32 filters_match_stats;
+ u32 filters_miss_stats;
+ CLIB_PAD_FROM_TO (0xee60, 0xee68);
+ /* Lookup, signature. */
+ u32 hash_keys[2];
+ /* [15:0] ip6 src address 1 bit per byte
+ [31:16] ip6 dst address. */
+ u32 ip6_mask;
+ /* [0] vlan id
+ [1] vlan priority
+ [2] pool
+ [3] ip protocol
+ [4] flex
+ [5] dst ip6. */
+ u32 other_mask;
+ CLIB_PAD_FROM_TO (0xee78, 0xf000);
+ } flow_director;
+
+ struct
+ {
+ u32 l2_control[64];
+ u32 vlan_pool_filter[64];
+ u32 vlan_pool_filter_bitmap[128];
+ u32 dst_ethernet_address[128];
+ u32 mirror_rule[4];
+ u32 mirror_rule_vlan[8];
+ u32 mirror_rule_pool[8];
+ CLIB_PAD_FROM_TO (0xf650, 0x10010);
+ } pf_bar;
+
+ u32 eeprom_flash_control;
+ /* [0] start
+ [1] done
+ [15:2] address
+ [31:16] read data. */
+ u32 eeprom_read;
+ CLIB_PAD_FROM_TO (0x10018, 0x1001c);
+ u32 flash_access;
+ CLIB_PAD_FROM_TO (0x10020, 0x10114);
+ u32 flash_data;
+ u32 flash_control;
+ u32 flash_read_data;
+ CLIB_PAD_FROM_TO (0x10120, 0x1013c);
+ u32 flash_opcode;
+ u32 software_semaphore;
+ CLIB_PAD_FROM_TO (0x10144, 0x10148);
+ u32 firmware_semaphore;
+ CLIB_PAD_FROM_TO (0x1014c, 0x10160);
+ u32 software_firmware_sync;
+ CLIB_PAD_FROM_TO (0x10164, 0x10200);
+ u32 general_rx_control;
+ CLIB_PAD_FROM_TO (0x10204, 0x11000);
+
+ struct
+ {
+ u32 control;
+ CLIB_PAD_FROM_TO (0x11004, 0x11010);
+ /* [3:0] enable counters
+ [7:4] leaky bucket counter mode
+ [29] reset
+ [30] stop
+ [31] start. */
+ u32 counter_control;
+ /* [7:0],[15:8],[23:16],[31:24] event for counters 0-3.
+ event codes:
+ 0x0 bad tlp
+ 0x10 reqs that reached timeout
+ etc. */
+ u32 counter_event;
+ CLIB_PAD_FROM_TO (0x11018, 0x11020);
+ u32 counters_clear_on_read[4];
+ u32 counter_config[4];
+ struct
+ {
+ u32 address;
+ u32 data;
+ } indirect_access;
+ CLIB_PAD_FROM_TO (0x11048, 0x11050);
+ u32 extended_control;
+ CLIB_PAD_FROM_TO (0x11054, 0x11064);
+ u32 mirrored_revision_id;
+ CLIB_PAD_FROM_TO (0x11068, 0x11070);
+ u32 dca_requester_id_information;
+
+ /* [0] global disable
+ [4:1] mode: 0 => legacy, 1 => dca 1.0. */
+ u32 dca_control;
+ CLIB_PAD_FROM_TO (0x11078, 0x110b0);
+ /* [0] pci completion abort
+ [1] unsupported i/o address
+ [2] wrong byte enable
+ [3] pci timeout */
+ u32 pcie_interrupt_status;
+ CLIB_PAD_FROM_TO (0x110b4, 0x110b8);
+ u32 pcie_interrupt_enable;
+ CLIB_PAD_FROM_TO (0x110bc, 0x110c0);
+ u32 msi_x_pba_clear[8];
+ CLIB_PAD_FROM_TO (0x110e0, 0x12300);
+ } pcie;
+
+ u32 interrupt_throttle1[128 - 24];
+ CLIB_PAD_FROM_TO (0x124a0, 0x14f00);
+
+ u32 core_analog_config;
+ CLIB_PAD_FROM_TO (0x14f04, 0x14f10);
+ u32 core_common_config;
+ CLIB_PAD_FROM_TO (0x14f14, 0x15f14);
+
+ u32 link_sec_software_firmware_interface;
+} ixge_regs_t;
+
+typedef union
+{
+ struct
+ {
+ /* Addresses bigendian. */
+ union
+ {
+ struct
+ {
+ ip6_address_t src_address;
+ u32 unused[1];
+ } ip6;
+ struct
+ {
+ u32 unused[3];
+ ip4_address_t src_address, dst_address;
+ } ip4;
+ };
+
+ /* [15:0] src port (little endian).
+ [31:16] dst port. */
+ u32 tcp_udp_ports;
+
+ /* [15:0] vlan (cfi bit set to 0).
+ [31:16] flex bytes. bigendian. */
+ u32 vlan_and_flex_word;
+
+ /* [14:0] hash
+ [15] bucket valid
+ [31:16] signature (signature filers)/sw-index (perfect match). */
+ u32 hash;
+ };
+
+ u32 as_u32[8];
+} ixge_flow_director_key_t;
+
+always_inline void
+ixge_throttle_queue_interrupt (ixge_regs_t * r,
+ u32 queue_interrupt_index,
+ f64 inter_interrupt_interval_in_secs)
+{
+ volatile u32 *tr =
+ (queue_interrupt_index < ARRAY_LEN (r->interrupt.throttle0)
+ ? &r->interrupt.throttle0[queue_interrupt_index]
+ : &r->interrupt_throttle1[queue_interrupt_index]);
+ ASSERT (queue_interrupt_index < 128);
+ u32 v;
+ i32 i, mask = (1 << 9) - 1;
+
+ i = flt_round_nearest (inter_interrupt_interval_in_secs / 2e-6);
+ i = i < 1 ? 1 : i;
+ i = i >= mask ? mask : i;
+
+ v = tr[0];
+ v &= ~(mask << 3);
+ v |= i << 3;
+ tr[0] = v;
+}
+
+#define foreach_ixge_counter \
+ _ (0x40d0, rx_total_packets) \
+ _64 (0x40c0, rx_total_bytes) \
+ _ (0x41b0, rx_good_packets_before_filtering) \
+ _64 (0x41b4, rx_good_bytes_before_filtering) \
+ _ (0x2f50, rx_dma_good_packets) \
+ _64 (0x2f54, rx_dma_good_bytes) \
+ _ (0x2f5c, rx_dma_duplicated_good_packets) \
+ _64 (0x2f60, rx_dma_duplicated_good_bytes) \
+ _ (0x2f68, rx_dma_good_loopback_packets) \
+ _64 (0x2f6c, rx_dma_good_loopback_bytes) \
+ _ (0x2f74, rx_dma_good_duplicated_loopback_packets) \
+ _64 (0x2f78, rx_dma_good_duplicated_loopback_bytes) \
+ _ (0x4074, rx_good_packets) \
+ _64 (0x4088, rx_good_bytes) \
+ _ (0x407c, rx_multicast_packets) \
+ _ (0x4078, rx_broadcast_packets) \
+ _ (0x405c, rx_64_byte_packets) \
+ _ (0x4060, rx_65_127_byte_packets) \
+ _ (0x4064, rx_128_255_byte_packets) \
+ _ (0x4068, rx_256_511_byte_packets) \
+ _ (0x406c, rx_512_1023_byte_packets) \
+ _ (0x4070, rx_gt_1023_byte_packets) \
+ _ (0x4000, rx_crc_errors) \
+ _ (0x4120, rx_ip_checksum_errors) \
+ _ (0x4004, rx_illegal_symbol_errors) \
+ _ (0x4008, rx_error_symbol_errors) \
+ _ (0x4034, rx_mac_local_faults) \
+ _ (0x4038, rx_mac_remote_faults) \
+ _ (0x4040, rx_length_errors) \
+ _ (0x41a4, rx_xons) \
+ _ (0x41a8, rx_xoffs) \
+ _ (0x40a4, rx_undersize_packets) \
+ _ (0x40a8, rx_fragments) \
+ _ (0x40ac, rx_oversize_packets) \
+ _ (0x40b0, rx_jabbers) \
+ _ (0x40b4, rx_management_packets) \
+ _ (0x40b8, rx_management_drops) \
+ _ (0x3fa0, rx_missed_packets_pool_0) \
+ _ (0x40d4, tx_total_packets) \
+ _ (0x4080, tx_good_packets) \
+ _64 (0x4090, tx_good_bytes) \
+ _ (0x40f0, tx_multicast_packets) \
+ _ (0x40f4, tx_broadcast_packets) \
+ _ (0x87a0, tx_dma_good_packets) \
+ _64 (0x87a4, tx_dma_good_bytes) \
+ _ (0x40d8, tx_64_byte_packets) \
+ _ (0x40dc, tx_65_127_byte_packets) \
+ _ (0x40e0, tx_128_255_byte_packets) \
+ _ (0x40e4, tx_256_511_byte_packets) \
+ _ (0x40e8, tx_512_1023_byte_packets) \
+ _ (0x40ec, tx_gt_1023_byte_packets) \
+ _ (0x4010, tx_undersize_drops) \
+ _ (0x8780, switch_security_violation_packets) \
+ _ (0x5118, fc_crc_errors) \
+ _ (0x241c, fc_rx_drops) \
+ _ (0x2424, fc_last_error_count) \
+ _ (0x2428, fcoe_rx_packets) \
+ _ (0x242c, fcoe_rx_dwords) \
+ _ (0x8784, fcoe_tx_packets) \
+ _ (0x8788, fcoe_tx_dwords) \
+ _ (0x1030, queue_0_rx_count) \
+ _ (0x1430, queue_0_drop_count) \
+ _ (0x1070, queue_1_rx_count) \
+ _ (0x1470, queue_1_drop_count) \
+ _ (0x10b0, queue_2_rx_count) \
+ _ (0x14b0, queue_2_drop_count) \
+ _ (0x10f0, queue_3_rx_count) \
+ _ (0x14f0, queue_3_drop_count) \
+ _ (0x1130, queue_4_rx_count) \
+ _ (0x1530, queue_4_drop_count) \
+ _ (0x1170, queue_5_rx_count) \
+ _ (0x1570, queue_5_drop_count) \
+ _ (0x11b0, queue_6_rx_count) \
+ _ (0x15b0, queue_6_drop_count) \
+ _ (0x11f0, queue_7_rx_count) \
+ _ (0x15f0, queue_7_drop_count) \
+ _ (0x1230, queue_8_rx_count) \
+ _ (0x1630, queue_8_drop_count) \
+ _ (0x1270, queue_9_rx_count) \
+ _ (0x1270, queue_9_drop_count)
+
+
+
+
+typedef enum
+{
+#define _(a,f) IXGE_COUNTER_##f,
+#define _64(a,f) _(a,f)
+ foreach_ixge_counter
+#undef _
+#undef _64
+ IXGE_N_COUNTER,
+} ixge_counter_type_t;
+
+typedef struct
+{
+ u32 mdio_address;
+
+ /* 32 bit ID read from ID registers. */
+ u32 id;
+} ixge_phy_t;
+
+typedef struct
+{
+ /* Cache aligned descriptors. */
+ ixge_descriptor_t *descriptors;
+
+ /* Number of descriptors in table. */
+ u32 n_descriptors;
+
+ /* Software head and tail pointers into descriptor ring. */
+ u32 head_index, tail_index;
+
+ /* Index into dma_queues vector. */
+ u32 queue_index;
+
+ /* Buffer indices corresponding to each active descriptor. */
+ u32 *descriptor_buffer_indices;
+
+ union
+ {
+ struct
+ {
+ u32 *volatile head_index_write_back;
+
+ u32 n_buffers_on_ring;
+ } tx;
+
+ struct
+ {
+ /* Buffer indices to use to replenish each descriptor. */
+ u32 *replenish_buffer_indices;
+
+ vlib_node_runtime_t *node;
+ u32 next_index;
+
+ u32 saved_start_of_packet_buffer_index;
+
+ u32 saved_start_of_packet_next_index;
+ u32 saved_last_buffer_index;
+
+ u32 is_start_of_packet;
+
+ u32 n_descriptors_done_total;
+
+ u32 n_descriptors_done_this_call;
+
+ u32 n_bytes;
+ } rx;
+ };
+} ixge_dma_queue_t;
+
+#define foreach_ixge_pci_device_id \
+ _ (82598, 0x10b6) \
+ _ (82598_bx, 0x1508) \
+ _ (82598af_dual_port, 0x10c6) \
+ _ (82598af_single_port, 0x10c7) \
+ _ (82598at, 0x10c8) \
+ _ (82598at2, 0x150b) \
+ _ (82598eb_sfp_lom, 0x10db) \
+ _ (82598eb_cx4, 0x10dd) \
+ _ (82598_cx4_dual_port, 0x10ec) \
+ _ (82598_da_dual_port, 0x10f1) \
+ _ (82598_sr_dual_port_em, 0x10e1) \
+ _ (82598eb_xf_lr, 0x10f4) \
+ _ (82599_kx4, 0x10f7) \
+ _ (82599_kx4_mezz, 0x1514) \
+ _ (82599_kr, 0x1517) \
+ _ (82599_combo_backplane, 0x10f8) \
+ _ (82599_cx4, 0x10f9) \
+ _ (82599_sfp, 0x10fb) \
+ _ (82599_backplane_fcoe, 0x152a) \
+ _ (82599_sfp_fcoe, 0x1529) \
+ _ (82599_sfp_em, 0x1507) \
+ _ (82599_xaui_lom, 0x10fc) \
+ _ (82599_t3_lom, 0x151c) \
+ _ (x540t, 0x1528)
+
+typedef enum
+{
+#define _(f,n) IXGE_##f = n,
+ foreach_ixge_pci_device_id
+#undef _
+} ixge_pci_device_id_t;
+
+typedef struct
+{
+ /* registers */
+ ixge_regs_t *regs;
+
+ /* Specific next index when using dynamic redirection */
+ u32 per_interface_next_index;
+
+ /* PCI bus info. */
+ vlib_pci_device_t pci_device;
+
+ /* From PCI config space header. */
+ ixge_pci_device_id_t device_id;
+
+ u16 device_index;
+
+ /* 0 or 1. */
+ u16 pci_function;
+
+ /* VLIB interface for this instance. */
+ u32 vlib_hw_if_index, vlib_sw_if_index;
+
+ ixge_dma_queue_t *dma_queues[VLIB_N_RX_TX];
+
+ /* Phy index (0 or 1) and address on MDI bus. */
+ u32 phy_index;
+ ixge_phy_t phys[2];
+
+ /* Value of link_status register at last link change. */
+ u32 link_status_at_last_link_change;
+
+ i2c_bus_t i2c_bus;
+ sfp_eeprom_t sfp_eeprom;
+
+ /* Counters. */
+ u64 counters[IXGE_N_COUNTER], counters_last_clear[IXGE_N_COUNTER];
+} ixge_device_t;
+
+typedef struct
+{
+ vlib_main_t *vlib_main;
+
+ /* Vector of devices. */
+ ixge_device_t *devices;
+
+ /* Descriptor ring sizes. */
+ u32 n_descriptors[VLIB_N_RX_TX];
+
+ /* RX buffer size. Must be at least 1k; will be rounded to
+ next largest 1k size. */
+ u32 n_bytes_in_rx_buffer;
+
+ u32 n_descriptors_per_cache_line;
+
+ u32 vlib_buffer_free_list_index;
+
+ u32 process_node_index;
+
+ /* Template and mask for initializing/validating TX descriptors. */
+ ixge_tx_descriptor_t tx_descriptor_template, tx_descriptor_template_mask;
+
+ /* Vector of buffers for which TX is done and can be freed. */
+ u32 *tx_buffers_pending_free;
+
+ u32 *rx_buffers_to_add;
+
+ f64 time_last_stats_update;
+} ixge_main_t;
+
+ixge_main_t ixge_main;
+vnet_device_class_t ixge_device_class;
+
+typedef enum
+{
+ IXGE_RX_NEXT_IP4_INPUT,
+ IXGE_RX_NEXT_IP6_INPUT,
+ IXGE_RX_NEXT_ETHERNET_INPUT,
+ IXGE_RX_NEXT_DROP,
+ IXGE_RX_N_NEXT,
+} ixge_rx_next_t;
+
+void ixge_set_next_node (ixge_rx_next_t, char *);
+
+#endif /* included_ixge_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/devices/nic/sfp.c b/src/vnet/devices/nic/sfp.c
new file mode 100644
index 00000000000..9e9c008dc15
--- /dev/null
+++ b/src/vnet/devices/nic/sfp.c
@@ -0,0 +1,117 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/devices/nic/sfp.h>
+
+static u8 *
+format_space_terminated (u8 * s, va_list * args)
+{
+ u32 l = va_arg (*args, u32);
+ u8 *v = va_arg (*args, u8 *);
+ u8 *p;
+
+ for (p = v + l - 1; p >= v && p[0] == ' '; p--)
+ ;
+ vec_add (s, v, clib_min (p - v + 1, l));
+ return s;
+}
+
+static u8 *
+format_sfp_id (u8 * s, va_list * args)
+{
+ u32 id = va_arg (*args, u32);
+ char *t = 0;
+ switch (id)
+ {
+#define _(f) case SFP_ID_##f: t = #f; break;
+ foreach_sfp_id
+#undef _
+ default:
+ return format (s, "unknown 0x%x", id);
+ }
+ return format (s, "%s", t);
+}
+
+static u8 *
+format_sfp_compatibility (u8 * s, va_list * args)
+{
+ u32 c = va_arg (*args, u32);
+ char *t = 0;
+ switch (c)
+ {
+#define _(a,b,f) case SFP_COMPATIBILITY_##f: t = #f; break;
+ foreach_sfp_compatibility
+#undef _
+ default:
+ return format (s, "unknown 0x%x", c);
+ }
+ return format (s, "%s", t);
+}
+
+u32
+sfp_is_comatible (sfp_eeprom_t * e, sfp_compatibility_t c)
+{
+ static struct
+ {
+ u8 byte, bit;
+ } t[] =
+ {
+#define _(a,b,f) { .byte = a, .bit = b, },
+ foreach_sfp_compatibility
+#undef _
+ };
+
+ ASSERT (c < ARRAY_LEN (t));
+ return (e->compatibility[t[c].byte] & (1 << t[c].bit)) != 0;
+}
+
+u8 *
+format_sfp_eeprom (u8 * s, va_list * args)
+{
+ sfp_eeprom_t *e = va_arg (*args, sfp_eeprom_t *);
+ uword indent = format_get_indent (s);
+ int i;
+
+ if (e->id != SFP_ID_sfp)
+ s = format (s, "id %U, ", format_sfp_id, e->id);
+
+ s = format (s, "compatibility:");
+ for (i = 0; i < SFP_N_COMPATIBILITY; i++)
+ if (sfp_is_comatible (e, i))
+ s = format (s, " %U", format_sfp_compatibility, i);
+
+ s = format (s, "\n%Uvendor: %U, part %U",
+ format_white_space, indent,
+ format_space_terminated, sizeof (e->vendor_name),
+ e->vendor_name, format_space_terminated,
+ sizeof (e->vendor_part_number), e->vendor_part_number);
+ s =
+ format (s, "\n%Urevision: %U, serial: %U, date code: %U",
+ format_white_space, indent, format_space_terminated,
+ sizeof (e->vendor_revision), e->vendor_revision,
+ format_space_terminated, sizeof (e->vendor_serial_number),
+ e->vendor_serial_number, format_space_terminated,
+ sizeof (e->vendor_date_code), e->vendor_date_code);
+
+ return s;
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/devices/nic/sfp.h b/src/vnet/devices/nic/sfp.h
new file mode 100644
index 00000000000..a1ac7997a44
--- /dev/null
+++ b/src/vnet/devices/nic/sfp.h
@@ -0,0 +1,117 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef included_vnet_optics_sfp_h
+#define included_vnet_optics_sfp_h
+
+#include <vppinfra/format.h>
+
+#define foreach_sfp_id \
+ _ (unknown) \
+ _ (gbic) \
+ _ (on_motherboard) \
+ _ (sfp)
+
+typedef enum
+{
+#define _(f) SFP_ID_##f,
+ foreach_sfp_id
+#undef _
+} sfp_id_t;
+
+typedef struct
+{
+ u8 id;
+ u8 extended_id;
+ u8 connector_type;
+ u8 compatibility[8];
+ u8 encoding;
+ u8 nominal_bit_rate_100mbits_per_sec;
+ u8 reserved13;
+ u8 link_length[5];
+ u8 reserved19;
+ u8 vendor_name[16];
+ u8 reserved36;
+ u8 vendor_oui[3];
+ u8 vendor_part_number[16];
+ u8 vendor_revision[4];
+ /* 16 bit value network byte order. */
+ u8 laser_wavelength_in_nm[2];
+ u8 reserved62;
+ u8 checksum_0_to_62;
+
+ u8 options[2];
+ u8 max_bit_rate_margin_percent;
+ u8 min_bit_rate_margin_percent;
+ u8 vendor_serial_number[16];
+ u8 vendor_date_code[8];
+ u8 reserved92[3];
+ u8 checksum_63_to_94;
+ u8 vendor_specific[32];
+ u8 reserved128[384];
+
+ /* Vendor specific data follows. */
+ u8 vendor_specific1[0];
+} sfp_eeprom_t;
+
+always_inline uword
+sfp_eeprom_is_valid (sfp_eeprom_t * e)
+{
+ int i;
+ u8 sum = 0;
+ for (i = 0; i < 63; i++)
+ sum += ((u8 *) e)[i];
+ return sum == e->checksum_0_to_62;
+}
+
+/* _ (byte_index, bit_index, name) */
+#define foreach_sfp_compatibility \
+ _ (0, 4, 10g_base_sr) \
+ _ (0, 5, 10g_base_lr) \
+ _ (1, 2, oc48_long_reach) \
+ _ (1, 1, oc48_intermediate_reach) \
+ _ (1, 0, oc48_short_reach) \
+ _ (2, 6, oc12_long_reach) \
+ _ (2, 5, oc12_intermediate_reach) \
+ _ (2, 4, oc12_short_reach) \
+ _ (2, 2, oc3_long_reach) \
+ _ (2, 1, oc3_intermediate_reach) \
+ _ (2, 0, oc3_short_reach) \
+ _ (3, 3, 1g_base_t) \
+ _ (3, 2, 1g_base_cx) \
+ _ (3, 1, 1g_base_lx) \
+ _ (3, 0, 1g_base_sx)
+
+typedef enum
+{
+#define _(a,b,f) SFP_COMPATIBILITY_##f,
+ foreach_sfp_compatibility
+#undef _
+ SFP_N_COMPATIBILITY,
+} sfp_compatibility_t;
+
+u32 sfp_is_comatible (sfp_eeprom_t * e, sfp_compatibility_t c);
+
+format_function_t format_sfp_eeprom;
+
+#endif /* included_vnet_optics_sfp_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/devices/ssvm/node.c b/src/vnet/devices/ssvm/node.c
new file mode 100644
index 00000000000..3a695b1d8c0
--- /dev/null
+++ b/src/vnet/devices/ssvm/node.c
@@ -0,0 +1,343 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "ssvm_eth.h"
+
+vlib_node_registration_t ssvm_eth_input_node;
+
+typedef struct
+{
+ u32 next_index;
+ u32 sw_if_index;
+} ssvm_eth_input_trace_t;
+
+/* packet trace format function */
+static u8 *
+format_ssvm_eth_input_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ ssvm_eth_input_trace_t *t = va_arg (*args, ssvm_eth_input_trace_t *);
+
+ s = format (s, "SSVM_ETH_INPUT: sw_if_index %d, next index %d",
+ t->sw_if_index, t->next_index);
+ return s;
+}
+
+vlib_node_registration_t ssvm_eth_input_node;
+
+#define foreach_ssvm_eth_input_error \
+_(NO_BUFFERS, "Rx packet drops (no buffers)")
+
+typedef enum
+{
+#define _(sym,str) SSVM_ETH_INPUT_ERROR_##sym,
+ foreach_ssvm_eth_input_error
+#undef _
+ SSVM_ETH_INPUT_N_ERROR,
+} ssvm_eth_input_error_t;
+
+static char *ssvm_eth_input_error_strings[] = {
+#define _(sym,string) string,
+ foreach_ssvm_eth_input_error
+#undef _
+};
+
+typedef enum
+{
+ SSVM_ETH_INPUT_NEXT_DROP,
+ SSVM_ETH_INPUT_NEXT_ETHERNET_INPUT,
+ SSVM_ETH_INPUT_NEXT_IP4_INPUT,
+ SSVM_ETH_INPUT_NEXT_IP6_INPUT,
+ SSVM_ETH_INPUT_NEXT_MPLS_INPUT,
+ SSVM_ETH_INPUT_N_NEXT,
+} ssvm_eth_input_next_t;
+
+static inline uword
+ssvm_eth_device_input (ssvm_eth_main_t * em,
+ ssvm_private_t * intfc, vlib_node_runtime_t * node)
+{
+ ssvm_shared_header_t *sh = intfc->sh;
+ vlib_main_t *vm = em->vlib_main;
+ unix_shared_memory_queue_t *q;
+ ssvm_eth_queue_elt_t *elt, *elts;
+ u32 elt_index;
+ u32 my_pid = intfc->my_pid;
+ int rx_queue_index;
+ u32 n_to_alloc = VLIB_FRAME_SIZE * 2;
+ u32 n_allocated, n_present_in_cache;
+ u32 next_index = VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT;
+ vlib_buffer_free_list_t *fl;
+ u32 n_left_to_next, *to_next;
+ u32 next0;
+ u32 n_buffers;
+ u32 n_available;
+ u32 bi0, saved_bi0;
+ vlib_buffer_t *b0, *prev;
+ u32 saved_cache_size = 0;
+ ethernet_header_t *eh0;
+ u16 type0;
+ u32 n_rx_bytes = 0, l3_offset0;
+ u32 cpu_index = os_get_cpu_number ();
+ u32 trace_cnt __attribute__ ((unused)) = vlib_get_trace_count (vm, node);
+ volatile u32 *lock;
+ u32 *elt_indices;
+ uword n_trace = vlib_get_trace_count (vm, node);
+
+ /* Either side down? buh-bye... */
+ if (pointer_to_uword (sh->opaque[MASTER_ADMIN_STATE_INDEX]) == 0 ||
+ pointer_to_uword (sh->opaque[SLAVE_ADMIN_STATE_INDEX]) == 0)
+ return 0;
+
+ if (intfc->i_am_master)
+ q = (unix_shared_memory_queue_t *) (sh->opaque[TO_MASTER_Q_INDEX]);
+ else
+ q = (unix_shared_memory_queue_t *) (sh->opaque[TO_SLAVE_Q_INDEX]);
+
+ /* Nothing to do? */
+ if (q->cursize == 0)
+ return 0;
+
+ fl = vlib_buffer_get_free_list (vm, VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX);
+
+ vec_reset_length (intfc->rx_queue);
+
+ lock = (u32 *) q;
+ while (__sync_lock_test_and_set (lock, 1))
+ ;
+ while (q->cursize > 0)
+ {
+ unix_shared_memory_queue_sub_raw (q, (u8 *) & elt_index);
+ ASSERT (elt_index < 2048);
+ vec_add1 (intfc->rx_queue, elt_index);
+ }
+ CLIB_MEMORY_BARRIER ();
+ *lock = 0;
+
+ n_present_in_cache = vec_len (em->buffer_cache);
+
+ if (vec_len (em->buffer_cache) < vec_len (intfc->rx_queue) * 2)
+ {
+ vec_validate (em->buffer_cache,
+ n_to_alloc + vec_len (em->buffer_cache) - 1);
+ n_allocated =
+ vlib_buffer_alloc (vm, &em->buffer_cache[n_present_in_cache],
+ n_to_alloc);
+
+ n_present_in_cache += n_allocated;
+ _vec_len (em->buffer_cache) = n_present_in_cache;
+ }
+
+ elts = (ssvm_eth_queue_elt_t *) (sh->opaque[CHUNK_POOL_INDEX]);
+
+ n_buffers = vec_len (intfc->rx_queue);
+ rx_queue_index = 0;
+
+ while (n_buffers > 0)
+ {
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_buffers > 0 && n_left_to_next > 0)
+ {
+ elt = elts + intfc->rx_queue[rx_queue_index];
+
+ saved_cache_size = n_present_in_cache;
+ if (PREDICT_FALSE (saved_cache_size == 0))
+ {
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ goto out;
+ }
+ saved_bi0 = bi0 = em->buffer_cache[--n_present_in_cache];
+ b0 = vlib_get_buffer (vm, bi0);
+ prev = 0;
+
+ while (1)
+ {
+ vlib_buffer_init_for_free_list (b0, fl);
+
+ b0->current_data = elt->current_data_hint;
+ b0->current_length = elt->length_this_buffer;
+ b0->total_length_not_including_first_buffer =
+ elt->total_length_not_including_first_buffer;
+
+ clib_memcpy (b0->data + b0->current_data, elt->data,
+ b0->current_length);
+
+ if (PREDICT_FALSE (prev != 0))
+ prev->next_buffer = bi0;
+
+ if (PREDICT_FALSE (elt->flags & SSVM_BUFFER_NEXT_PRESENT))
+ {
+ prev = b0;
+ if (PREDICT_FALSE (n_present_in_cache == 0))
+ {
+ vlib_put_next_frame (vm, node, next_index,
+ n_left_to_next);
+ goto out;
+ }
+ bi0 = em->buffer_cache[--n_present_in_cache];
+ b0 = vlib_get_buffer (vm, bi0);
+ }
+ else
+ break;
+ }
+
+ saved_cache_size = n_present_in_cache;
+
+ to_next[0] = saved_bi0;
+ to_next++;
+ n_left_to_next--;
+
+ b0 = vlib_get_buffer (vm, saved_bi0);
+ eh0 = vlib_buffer_get_current (b0);
+
+ type0 = clib_net_to_host_u16 (eh0->type);
+
+ next0 = SSVM_ETH_INPUT_NEXT_ETHERNET_INPUT;
+
+ if (type0 == ETHERNET_TYPE_IP4)
+ next0 = SSVM_ETH_INPUT_NEXT_IP4_INPUT;
+ else if (type0 == ETHERNET_TYPE_IP6)
+ next0 = SSVM_ETH_INPUT_NEXT_IP6_INPUT;
+ else if (type0 == ETHERNET_TYPE_MPLS_UNICAST)
+ next0 = SSVM_ETH_INPUT_NEXT_MPLS_INPUT;
+
+ l3_offset0 = ((next0 == SSVM_ETH_INPUT_NEXT_IP4_INPUT ||
+ next0 == SSVM_ETH_INPUT_NEXT_IP6_INPUT ||
+ next0 == SSVM_ETH_INPUT_NEXT_MPLS_INPUT) ?
+ sizeof (ethernet_header_t) : 0);
+
+ n_rx_bytes += b0->current_length
+ + b0->total_length_not_including_first_buffer;
+
+ b0->current_data += l3_offset0;
+ b0->current_length -= l3_offset0;
+ b0->flags = VLIB_BUFFER_TOTAL_LENGTH_VALID;
+
+ vnet_buffer (b0)->sw_if_index[VLIB_RX] = intfc->vlib_hw_if_index;
+ vnet_buffer (b0)->sw_if_index[VLIB_TX] = (u32) ~ 0;
+
+ /*
+ * Turn this on if you run into
+ * "bad monkey" contexts, and you want to know exactly
+ * which nodes they've visited... See main.c...
+ */
+ VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b0);
+
+ if (PREDICT_FALSE (n_trace > 0))
+ {
+ ssvm_eth_input_trace_t *tr;
+
+ vlib_trace_buffer (vm, node, next0, b0, /* follow_chain */ 1);
+ vlib_set_trace_count (vm, node, --n_trace);
+
+ tr = vlib_add_trace (vm, node, b0, sizeof (*tr));
+
+ tr->next_index = next0;
+ tr->sw_if_index = intfc->vlib_hw_if_index;
+ }
+
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, next0);
+ n_buffers--;
+ rx_queue_index++;
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+out:
+ if (em->buffer_cache)
+ _vec_len (em->buffer_cache) = saved_cache_size;
+ else
+ ASSERT (saved_cache_size == 0);
+
+ ssvm_lock (sh, my_pid, 2);
+
+ ASSERT (vec_len (intfc->rx_queue) > 0);
+
+ n_available = (u32) pointer_to_uword (sh->opaque[CHUNK_POOL_NFREE]);
+ elt_indices = (u32 *) (sh->opaque[CHUNK_POOL_FREELIST_INDEX]);
+
+ clib_memcpy (&elt_indices[n_available], intfc->rx_queue,
+ vec_len (intfc->rx_queue) * sizeof (u32));
+
+ n_available += vec_len (intfc->rx_queue);
+ sh->opaque[CHUNK_POOL_NFREE] = uword_to_pointer (n_available, void *);
+
+ ssvm_unlock (sh);
+
+ vlib_error_count (vm, node->node_index, SSVM_ETH_INPUT_ERROR_NO_BUFFERS,
+ n_buffers);
+
+ vlib_increment_combined_counter
+ (vnet_get_main ()->interface_main.combined_sw_if_counters
+ + VNET_INTERFACE_COUNTER_RX, cpu_index,
+ intfc->vlib_hw_if_index, rx_queue_index, n_rx_bytes);
+
+ return rx_queue_index;
+}
+
+static uword
+ssvm_eth_input_node_fn (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ ssvm_eth_main_t *em = &ssvm_eth_main;
+ ssvm_private_t *intfc;
+ uword n_rx_packets = 0;
+
+ vec_foreach (intfc, em->intfcs)
+ {
+ n_rx_packets += ssvm_eth_device_input (em, intfc, node);
+ }
+
+ return n_rx_packets;
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (ssvm_eth_input_node) = {
+ .function = ssvm_eth_input_node_fn,
+ .name = "ssvm_eth_input",
+ .vector_size = sizeof (u32),
+ .format_trace = format_ssvm_eth_input_trace,
+ .type = VLIB_NODE_TYPE_INPUT,
+ .state = VLIB_NODE_STATE_DISABLED,
+
+ .n_errors = ARRAY_LEN(ssvm_eth_input_error_strings),
+ .error_strings = ssvm_eth_input_error_strings,
+
+ .n_next_nodes = SSVM_ETH_INPUT_N_NEXT,
+
+ /* edit / add dispositions here */
+ .next_nodes = {
+ [SSVM_ETH_INPUT_NEXT_DROP] = "error-drop",
+ [SSVM_ETH_INPUT_NEXT_ETHERNET_INPUT] = "ethernet-input",
+ [SSVM_ETH_INPUT_NEXT_IP4_INPUT] = "ip4-input",
+ [SSVM_ETH_INPUT_NEXT_IP6_INPUT] = "ip6-input",
+ [SSVM_ETH_INPUT_NEXT_MPLS_INPUT] = "mpls-input",
+ },
+};
+
+VLIB_NODE_FUNCTION_MULTIARCH (ssvm_eth_input_node, ssvm_eth_input_node_fn)
+/* *INDENT-ON* */
+
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/devices/ssvm/ssvm_eth.c b/src/vnet/devices/ssvm/ssvm_eth.c
new file mode 100644
index 00000000000..db4fafa9a14
--- /dev/null
+++ b/src/vnet/devices/ssvm/ssvm_eth.c
@@ -0,0 +1,491 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "ssvm_eth.h"
+
+ssvm_eth_main_t ssvm_eth_main;
+
+#define foreach_ssvm_eth_tx_func_error \
+_(RING_FULL, "Tx packet drops (ring full)") \
+_(NO_BUFFERS, "Tx packet drops (no buffers)") \
+_(ADMIN_DOWN, "Tx packet drops (admin down)")
+
+typedef enum
+{
+#define _(f,s) SSVM_ETH_TX_ERROR_##f,
+ foreach_ssvm_eth_tx_func_error
+#undef _
+ SSVM_ETH_TX_N_ERROR,
+} ssvm_eth_tx_func_error_t;
+
+static u32 ssvm_eth_flag_change (vnet_main_t * vnm,
+ vnet_hw_interface_t * hi, u32 flags);
+
+int
+ssvm_eth_create (ssvm_eth_main_t * em, u8 * name, int is_master)
+{
+ ssvm_private_t *intfc;
+ void *oldheap;
+ clib_error_t *e;
+ unix_shared_memory_queue_t *q;
+ ssvm_shared_header_t *sh;
+ ssvm_eth_queue_elt_t *elts;
+ u32 *elt_indices;
+ u8 enet_addr[6];
+ int i, rv;
+
+ vec_add2 (em->intfcs, intfc, 1);
+
+ intfc->ssvm_size = em->segment_size;
+ intfc->i_am_master = 1;
+ intfc->name = name;
+ intfc->my_pid = getpid ();
+ if (is_master == 0)
+ {
+ rv = ssvm_slave_init (intfc, 20 /* timeout in seconds */ );
+ if (rv < 0)
+ return rv;
+ goto create_vnet_interface;
+ }
+
+ intfc->requested_va = em->next_base_va;
+ em->next_base_va += em->segment_size;
+ rv = ssvm_master_init (intfc, intfc - em->intfcs /* master index */ );
+
+ if (rv < 0)
+ return rv;
+
+ /* OK, segment created, set up queues and so forth. */
+
+ sh = intfc->sh;
+ oldheap = ssvm_push_heap (sh);
+
+ q = unix_shared_memory_queue_init (em->queue_elts, sizeof (u32),
+ 0 /* consumer pid not interesting */ ,
+ 0 /* signal not sent */ );
+ sh->opaque[TO_MASTER_Q_INDEX] = (void *) q;
+ q = unix_shared_memory_queue_init (em->queue_elts, sizeof (u32),
+ 0 /* consumer pid not interesting */ ,
+ 0 /* signal not sent */ );
+ sh->opaque[TO_SLAVE_Q_INDEX] = (void *) q;
+
+ /*
+ * Preallocate the requested number of buffer chunks
+ * There must be a better way to do this, etc.
+ * Add some slop to avoid pool reallocation, which will not go well
+ */
+ elts = 0;
+ elt_indices = 0;
+
+ vec_validate_aligned (elts, em->nbuffers - 1, CLIB_CACHE_LINE_BYTES);
+ vec_validate_aligned (elt_indices, em->nbuffers - 1, CLIB_CACHE_LINE_BYTES);
+
+ for (i = 0; i < em->nbuffers; i++)
+ elt_indices[i] = i;
+
+ sh->opaque[CHUNK_POOL_INDEX] = (void *) elts;
+ sh->opaque[CHUNK_POOL_FREELIST_INDEX] = (void *) elt_indices;
+ sh->opaque[CHUNK_POOL_NFREE] = (void *) (uword) em->nbuffers;
+
+ ssvm_pop_heap (oldheap);
+
+create_vnet_interface:
+
+ sh = intfc->sh;
+
+ memset (enet_addr, 0, sizeof (enet_addr));
+ enet_addr[0] = 2;
+ enet_addr[1] = 0xFE;
+ enet_addr[2] = is_master;
+ enet_addr[5] = sh->master_index;
+
+ e = ethernet_register_interface
+ (em->vnet_main, ssvm_eth_device_class.index, intfc - em->intfcs,
+ /* ethernet address */ enet_addr,
+ &intfc->vlib_hw_if_index, ssvm_eth_flag_change);
+
+ if (e)
+ {
+ clib_error_report (e);
+ /* $$$$ unmap offending region? */
+ return VNET_API_ERROR_INVALID_INTERFACE;
+ }
+
+ /* Declare link up */
+ vnet_hw_interface_set_flags (em->vnet_main, intfc->vlib_hw_if_index,
+ VNET_HW_INTERFACE_FLAG_LINK_UP);
+
+ /* Let the games begin... */
+ if (is_master)
+ sh->ready = 1;
+ return 0;
+}
+
+static clib_error_t *
+ssvm_config (vlib_main_t * vm, unformat_input_t * input)
+{
+ u8 *name;
+ int is_master = 1;
+ int i, rv;
+ ssvm_eth_main_t *em = &ssvm_eth_main;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "base-va %llx", &em->next_base_va))
+ ;
+ else if (unformat (input, "segment-size %lld", &em->segment_size))
+ em->segment_size = 1ULL << (max_log2 (em->segment_size));
+ else if (unformat (input, "nbuffers %lld", &em->nbuffers))
+ ;
+ else if (unformat (input, "queue-elts %lld", &em->queue_elts))
+ ;
+ else if (unformat (input, "slave"))
+ is_master = 0;
+ else if (unformat (input, "%s", &name))
+ vec_add1 (em->names, name);
+ else
+ break;
+ }
+
+ /* No configured instances, we're done... */
+ if (vec_len (em->names) == 0)
+ return 0;
+
+ for (i = 0; i < vec_len (em->names); i++)
+ {
+ rv = ssvm_eth_create (em, em->names[i], is_master);
+ if (rv < 0)
+ return clib_error_return (0, "ssvm_eth_create '%s' failed, error %d",
+ em->names[i], rv);
+ }
+
+ vlib_node_set_state (vm, ssvm_eth_input_node.index,
+ VLIB_NODE_STATE_POLLING);
+
+ return 0;
+}
+
+VLIB_CONFIG_FUNCTION (ssvm_config, "ssvm_eth");
+
+
+static clib_error_t *
+ssvm_eth_init (vlib_main_t * vm)
+{
+ ssvm_eth_main_t *em = &ssvm_eth_main;
+
+ if (((sizeof (ssvm_eth_queue_elt_t) / CLIB_CACHE_LINE_BYTES)
+ * CLIB_CACHE_LINE_BYTES) != sizeof (ssvm_eth_queue_elt_t))
+ clib_warning ("ssvm_eth_queue_elt_t size %d not a multiple of %d",
+ sizeof (ssvm_eth_queue_elt_t), CLIB_CACHE_LINE_BYTES);
+
+ em->vlib_main = vm;
+ em->vnet_main = vnet_get_main ();
+ em->elog_main = &vm->elog_main;
+
+ /* default config param values... */
+
+ em->next_base_va = 0x600000000ULL;
+ /*
+ * Allocate 2 full superframes in each dir (256 x 2 x 2 x 2048 bytes),
+ * 2mb; double that so we have plenty of space... 4mb
+ */
+ em->segment_size = 8 << 20;
+ em->nbuffers = 1024;
+ em->queue_elts = 512;
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (ssvm_eth_init);
+
+static char *ssvm_eth_tx_func_error_strings[] = {
+#define _(n,s) s,
+ foreach_ssvm_eth_tx_func_error
+#undef _
+};
+
+static u8 *
+format_ssvm_eth_device_name (u8 * s, va_list * args)
+{
+ u32 i = va_arg (*args, u32);
+
+ s = format (s, "ssvmEthernet%d", i);
+ return s;
+}
+
+static u8 *
+format_ssvm_eth_device (u8 * s, va_list * args)
+{
+ s = format (s, "SSVM Ethernet");
+ return s;
+}
+
+static u8 *
+format_ssvm_eth_tx_trace (u8 * s, va_list * args)
+{
+ s = format (s, "Unimplemented...");
+ return s;
+}
+
+
+static uword
+ssvm_eth_interface_tx (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * f)
+{
+ ssvm_eth_main_t *em = &ssvm_eth_main;
+ vnet_interface_output_runtime_t *rd = (void *) node->runtime_data;
+ ssvm_private_t *intfc = vec_elt_at_index (em->intfcs, rd->dev_instance);
+ ssvm_shared_header_t *sh = intfc->sh;
+ unix_shared_memory_queue_t *q;
+ u32 *from;
+ u32 n_left;
+ ssvm_eth_queue_elt_t *elts, *elt, *prev_elt;
+ u32 my_pid = intfc->my_pid;
+ vlib_buffer_t *b0;
+ u32 bi0;
+ u32 size_this_buffer;
+ u32 chunks_this_buffer;
+ u8 i_am_master = intfc->i_am_master;
+ u32 elt_index;
+ int is_ring_full, interface_down;
+ int i;
+ volatile u32 *queue_lock;
+ u32 n_to_alloc = VLIB_FRAME_SIZE;
+ u32 n_allocated, n_present_in_cache, n_available;
+ u32 *elt_indices;
+
+ if (i_am_master)
+ q = (unix_shared_memory_queue_t *) sh->opaque[TO_SLAVE_Q_INDEX];
+ else
+ q = (unix_shared_memory_queue_t *) sh->opaque[TO_MASTER_Q_INDEX];
+
+ queue_lock = (u32 *) q;
+
+ from = vlib_frame_vector_args (f);
+ n_left = f->n_vectors;
+ is_ring_full = 0;
+ interface_down = 0;
+
+ n_present_in_cache = vec_len (em->chunk_cache);
+
+ /* admin / link up/down check */
+ if (sh->opaque[MASTER_ADMIN_STATE_INDEX] == 0 ||
+ sh->opaque[SLAVE_ADMIN_STATE_INDEX] == 0)
+ {
+ interface_down = 1;
+ goto out;
+ }
+
+ ssvm_lock (sh, my_pid, 1);
+
+ elts = (ssvm_eth_queue_elt_t *) (sh->opaque[CHUNK_POOL_INDEX]);
+ elt_indices = (u32 *) (sh->opaque[CHUNK_POOL_FREELIST_INDEX]);
+ n_available = (u32) pointer_to_uword (sh->opaque[CHUNK_POOL_NFREE]);
+
+ if (n_present_in_cache < n_left * 2)
+ {
+ vec_validate (em->chunk_cache, n_to_alloc + n_present_in_cache - 1);
+
+ n_allocated = n_to_alloc < n_available ? n_to_alloc : n_available;
+
+ if (PREDICT_TRUE (n_allocated > 0))
+ {
+ clib_memcpy (&em->chunk_cache[n_present_in_cache],
+ &elt_indices[n_available - n_allocated],
+ sizeof (u32) * n_allocated);
+ }
+
+ n_present_in_cache += n_allocated;
+ n_available -= n_allocated;
+ sh->opaque[CHUNK_POOL_NFREE] = uword_to_pointer (n_available, void *);
+ _vec_len (em->chunk_cache) = n_present_in_cache;
+ }
+
+ ssvm_unlock (sh);
+
+ while (n_left)
+ {
+ bi0 = from[0];
+ b0 = vlib_get_buffer (vm, bi0);
+
+ size_this_buffer = vlib_buffer_length_in_chain (vm, b0);
+ chunks_this_buffer = (size_this_buffer + (SSVM_BUFFER_SIZE - 1))
+ / SSVM_BUFFER_SIZE;
+
+ /* If we're not going to be able to enqueue the buffer, tail drop. */
+ if (q->cursize >= q->maxsize)
+ {
+ is_ring_full = 1;
+ break;
+ }
+
+ prev_elt = 0;
+ elt_index = ~0;
+ for (i = 0; i < chunks_this_buffer; i++)
+ {
+ if (PREDICT_FALSE (n_present_in_cache == 0))
+ goto out;
+
+ elt_index = em->chunk_cache[--n_present_in_cache];
+ elt = elts + elt_index;
+
+ elt->type = SSVM_PACKET_TYPE;
+ elt->flags = 0;
+ elt->total_length_not_including_first_buffer =
+ b0->total_length_not_including_first_buffer;
+ elt->length_this_buffer = b0->current_length;
+ elt->current_data_hint = b0->current_data;
+ elt->owner = !i_am_master;
+ elt->tag = 1;
+
+ clib_memcpy (elt->data, b0->data + b0->current_data,
+ b0->current_length);
+
+ if (PREDICT_FALSE (prev_elt != 0))
+ prev_elt->next_index = elt - elts;
+
+ if (PREDICT_FALSE (i < (chunks_this_buffer - 1)))
+ {
+ elt->flags = SSVM_BUFFER_NEXT_PRESENT;
+ ASSERT (b0->flags & VLIB_BUFFER_NEXT_PRESENT);
+ b0 = vlib_get_buffer (vm, b0->next_buffer);
+ }
+ prev_elt = elt;
+ }
+
+ while (__sync_lock_test_and_set (queue_lock, 1))
+ ;
+
+ unix_shared_memory_queue_add_raw (q, (u8 *) & elt_index);
+ CLIB_MEMORY_BARRIER ();
+ *queue_lock = 0;
+
+ from++;
+ n_left--;
+ }
+
+out:
+ if (PREDICT_FALSE (n_left))
+ {
+ if (is_ring_full)
+ vlib_error_count (vm, node->node_index, SSVM_ETH_TX_ERROR_RING_FULL,
+ n_left);
+ else if (interface_down)
+ vlib_error_count (vm, node->node_index, SSVM_ETH_TX_ERROR_ADMIN_DOWN,
+ n_left);
+ else
+ vlib_error_count (vm, node->node_index, SSVM_ETH_TX_ERROR_NO_BUFFERS,
+ n_left);
+
+ vlib_buffer_free (vm, from, n_left);
+ }
+ else
+ vlib_buffer_free (vm, vlib_frame_vector_args (f), f->n_vectors);
+
+ if (PREDICT_TRUE (vec_len (em->chunk_cache)))
+ _vec_len (em->chunk_cache) = n_present_in_cache;
+
+ return f->n_vectors;
+}
+
+static void
+ssvm_eth_clear_hw_interface_counters (u32 instance)
+{
+ /* Nothing for now */
+}
+
+static clib_error_t *
+ssvm_eth_interface_admin_up_down (vnet_main_t * vnm, u32 hw_if_index,
+ u32 flags)
+{
+ vnet_hw_interface_t *hif = vnet_get_hw_interface (vnm, hw_if_index);
+ uword is_up = (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) != 0;
+ ssvm_eth_main_t *em = &ssvm_eth_main;
+ ssvm_private_t *intfc = vec_elt_at_index (em->intfcs, hif->dev_instance);
+ ssvm_shared_header_t *sh;
+
+ /* publish link-state in shared-memory, to discourage buffer-wasting */
+ sh = intfc->sh;
+ if (intfc->i_am_master)
+ sh->opaque[MASTER_ADMIN_STATE_INDEX] = (void *) is_up;
+ else
+ sh->opaque[SLAVE_ADMIN_STATE_INDEX] = (void *) is_up;
+
+ return 0;
+}
+
+static clib_error_t *
+ssvm_eth_subif_add_del_function (vnet_main_t * vnm,
+ u32 hw_if_index,
+ struct vnet_sw_interface_t *st, int is_add)
+{
+ /* Nothing for now */
+ return 0;
+}
+
+/*
+ * Dynamically redirect all pkts from a specific interface
+ * to the specified node
+ */
+static void
+ssvm_eth_set_interface_next_node (vnet_main_t * vnm, u32 hw_if_index,
+ u32 node_index)
+{
+ ssvm_eth_main_t *em = &ssvm_eth_main;
+ vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
+ ssvm_private_t *intfc = pool_elt_at_index (em->intfcs, hw->dev_instance);
+
+ /* Shut off redirection */
+ if (node_index == ~0)
+ {
+ intfc->per_interface_next_index = node_index;
+ return;
+ }
+
+ intfc->per_interface_next_index =
+ vlib_node_add_next (em->vlib_main, ssvm_eth_input_node.index, node_index);
+}
+
+static u32
+ssvm_eth_flag_change (vnet_main_t * vnm, vnet_hw_interface_t * hi, u32 flags)
+{
+ /* nothing for now */
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VNET_DEVICE_CLASS (ssvm_eth_device_class) = {
+ .name = "ssvm-eth",
+ .tx_function = ssvm_eth_interface_tx,
+ .tx_function_n_errors = SSVM_ETH_TX_N_ERROR,
+ .tx_function_error_strings = ssvm_eth_tx_func_error_strings,
+ .format_device_name = format_ssvm_eth_device_name,
+ .format_device = format_ssvm_eth_device,
+ .format_tx_trace = format_ssvm_eth_tx_trace,
+ .clear_counters = ssvm_eth_clear_hw_interface_counters,
+ .admin_up_down_function = ssvm_eth_interface_admin_up_down,
+ .subif_add_del_function = ssvm_eth_subif_add_del_function,
+ .rx_redirect_to_node = ssvm_eth_set_interface_next_node,
+};
+
+VLIB_DEVICE_TX_FUNCTION_MULTIARCH (ssvm_eth_device_class,
+ ssvm_eth_interface_tx)
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/devices/ssvm/ssvm_eth.h b/src/vnet/devices/ssvm/ssvm_eth.h
new file mode 100644
index 00000000000..f877df3cd33
--- /dev/null
+++ b/src/vnet/devices/ssvm/ssvm_eth.h
@@ -0,0 +1,141 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef __included_ssvm_eth_h__
+#define __included_ssvm_eth_h__
+
+#include <vnet/vnet.h>
+
+#include <vppinfra/elog.h>
+#include <vppinfra/error.h>
+#include <vppinfra/format.h>
+#include <vppinfra/hash.h>
+#include <vppinfra/vec.h>
+#include <vppinfra/elog.h>
+#include <vlib/vlib.h>
+#include <vnet/ethernet/ethernet.h>
+#include <vnet/devices/devices.h>
+#include <vnet/ip/ip.h>
+#include <vnet/pg/pg.h>
+#include <vlibmemory/unix_shared_memory_queue.h>
+
+#include <svm/ssvm.h>
+
+extern vnet_device_class_t ssvm_eth_device_class;
+extern vlib_node_registration_t ssvm_eth_input_node;
+
+#define SSVM_BUFFER_SIZE \
+ (VLIB_BUFFER_DATA_SIZE + VLIB_BUFFER_PRE_DATA_SIZE)
+#define SSVM_PACKET_TYPE 1
+
+typedef struct
+{
+ /* Type of queue element */
+ u8 type;
+ u8 flags;
+#define SSVM_BUFFER_NEXT_PRESENT (1<<0)
+ u8 owner;
+ u8 tag;
+ i16 current_data_hint;
+ u16 length_this_buffer;
+ u16 total_length_not_including_first_buffer;
+ u16 pad;
+ u32 next_index;
+ /* offset 16 */
+ u8 data[SSVM_BUFFER_SIZE];
+ /* pad to an even multiple of 64 octets */
+ u8 pad2[CLIB_CACHE_LINE_BYTES - 16];
+} ssvm_eth_queue_elt_t;
+
+typedef struct
+{
+ /* vector of point-to-point connections */
+ ssvm_private_t *intfcs;
+
+ u32 *buffer_cache;
+ u32 *chunk_cache;
+
+ /* Configurable parameters */
+ /* base address for next placement */
+ u64 next_base_va;
+ u64 segment_size;
+ u64 nbuffers;
+ u64 queue_elts;
+
+ /* Segment names */
+ u8 **names;
+
+ /* convenience */
+ vlib_main_t *vlib_main;
+ vnet_main_t *vnet_main;
+ elog_main_t *elog_main;
+} ssvm_eth_main_t;
+
+ssvm_eth_main_t ssvm_eth_main;
+
+typedef enum
+{
+ CHUNK_POOL_FREELIST_INDEX = 0,
+ CHUNK_POOL_INDEX,
+ CHUNK_POOL_NFREE,
+ TO_MASTER_Q_INDEX,
+ TO_SLAVE_Q_INDEX,
+ MASTER_ADMIN_STATE_INDEX,
+ SLAVE_ADMIN_STATE_INDEX,
+} ssvm_eth_opaque_index_t;
+
+/*
+ * debug scaffolding.
+ */
+static inline void
+ssvm_eth_validate_freelists (int need_lock)
+{
+#if CLIB_DEBUG > 0
+ ssvm_eth_main_t *em = &ssvm_eth_main;
+ ssvm_private_t *intfc;
+ ssvm_shared_header_t *sh;
+ u32 *elt_indices;
+ u32 n_available;
+ int i;
+
+ for (i = 0; i < vec_len (em->intfcs); i++)
+ {
+ intfc = em->intfcs + i;
+ sh = intfc->sh;
+ u32 my_pid = intfc->my_pid;
+
+ if (need_lock)
+ ssvm_lock (sh, my_pid, 15);
+
+ elt_indices = (u32 *) (sh->opaque[CHUNK_POOL_FREELIST_INDEX]);
+ n_available = (u32) (uword) (sh->opaque[CHUNK_POOL_NFREE]);
+
+ for (i = 0; i < n_available; i++)
+ ASSERT (elt_indices[i] < 2048);
+
+ if (need_lock)
+ ssvm_unlock (sh);
+ }
+#endif
+}
+
+#endif /* __included_ssvm_eth_h__ */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/devices/virtio/dir.dox b/src/vnet/devices/virtio/dir.dox
new file mode 100644
index 00000000000..50150799e62
--- /dev/null
+++ b/src/vnet/devices/virtio/dir.dox
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* Doxygen directory documentation */
+
+/**
+@dir
+@brief vHost User Interface Implementation.
+
+This directory contains the source code for vHost User driver.
+
+*/
+/*? %%clicmd:group_label vHost User %% ?*/
+/*? %%syscfg:group_label vHost User %% ?*/
diff --git a/src/vnet/devices/virtio/vhost-user.c b/src/vnet/devices/virtio/vhost-user.c
new file mode 100644
index 00000000000..bde8106c501
--- /dev/null
+++ b/src/vnet/devices/virtio/vhost-user.c
@@ -0,0 +1,3314 @@
+/*
+ *------------------------------------------------------------------
+ * vhost.c - vhost-user
+ *
+ * Copyright (c) 2014 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+
+#include <fcntl.h> /* for open */
+#include <sys/ioctl.h>
+#include <sys/socket.h>
+#include <sys/un.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <sys/uio.h> /* for iovec */
+#include <netinet/in.h>
+#include <sys/vfs.h>
+
+#include <linux/if_arp.h>
+#include <linux/if_tun.h>
+
+#include <vlib/vlib.h>
+#include <vlib/unix/unix.h>
+
+#include <vnet/ip/ip.h>
+
+#include <vnet/ethernet/ethernet.h>
+#include <vnet/devices/devices.h>
+#include <vnet/feature/feature.h>
+
+#include <vnet/devices/virtio/vhost-user.h>
+
+/**
+ * @file
+ * @brief vHost User Device Driver.
+ *
+ * This file contains the source code for vHost User interface.
+ */
+
+
+#define VHOST_USER_DEBUG_SOCKET 0
+#define VHOST_DEBUG_VQ 0
+
+#if VHOST_USER_DEBUG_SOCKET == 1
+#define DBG_SOCK(args...) clib_warning(args);
+#else
+#define DBG_SOCK(args...)
+#endif
+
+#if VHOST_DEBUG_VQ == 1
+#define DBG_VQ(args...) clib_warning(args);
+#else
+#define DBG_VQ(args...)
+#endif
+
+/*
+ * When an RX queue is down but active, received packets
+ * must be discarded. This value controls up to how many
+ * packets will be discarded during each round.
+ */
+#define VHOST_USER_DOWN_DISCARD_COUNT 256
+
+/*
+ * When the number of available buffers gets under this threshold,
+ * RX node will start discarding packets.
+ */
+#define VHOST_USER_RX_BUFFER_STARVATION 32
+
+/*
+ * On the receive side, the host should free descriptors as soon
+ * as possible in order to avoid TX drop in the VM.
+ * This value controls the number of copy operations that are stacked
+ * before copy is done for all and descriptors are given back to
+ * the guest.
+ * The value 64 was obtained by testing (48 and 128 were not as good).
+ */
+#define VHOST_USER_RX_COPY_THRESHOLD 64
+
+#define UNIX_GET_FD(unixfd_idx) \
+ (unixfd_idx != ~0) ? \
+ pool_elt_at_index (unix_main.file_pool, \
+ unixfd_idx)->file_descriptor : -1;
+
+#define foreach_virtio_trace_flags \
+ _ (SIMPLE_CHAINED, 0, "Simple descriptor chaining") \
+ _ (SINGLE_DESC, 1, "Single descriptor packet") \
+ _ (INDIRECT, 2, "Indirect descriptor") \
+ _ (MAP_ERROR, 4, "Memory mapping error")
+
+typedef enum
+{
+#define _(n,i,s) VIRTIO_TRACE_F_##n,
+ foreach_virtio_trace_flags
+#undef _
+} virtio_trace_flag_t;
+
+vlib_node_registration_t vhost_user_input_node;
+
+#define foreach_vhost_user_tx_func_error \
+ _(NONE, "no error") \
+ _(NOT_READY, "vhost vring not ready") \
+ _(DOWN, "vhost interface is down") \
+ _(PKT_DROP_NOBUF, "tx packet drops (no available descriptors)") \
+ _(PKT_DROP_NOMRG, "tx packet drops (cannot merge descriptors)") \
+ _(MMAP_FAIL, "mmap failure") \
+ _(INDIRECT_OVERFLOW, "indirect descriptor table overflow")
+
+typedef enum
+{
+#define _(f,s) VHOST_USER_TX_FUNC_ERROR_##f,
+ foreach_vhost_user_tx_func_error
+#undef _
+ VHOST_USER_TX_FUNC_N_ERROR,
+} vhost_user_tx_func_error_t;
+
+static char *vhost_user_tx_func_error_strings[] = {
+#define _(n,s) s,
+ foreach_vhost_user_tx_func_error
+#undef _
+};
+
+#define foreach_vhost_user_input_func_error \
+ _(NO_ERROR, "no error") \
+ _(NO_BUFFER, "no available buffer") \
+ _(MMAP_FAIL, "mmap failure") \
+ _(INDIRECT_OVERFLOW, "indirect descriptor overflows table") \
+ _(UNDERSIZED_FRAME, "undersized ethernet frame received (< 14 bytes)") \
+ _(FULL_RX_QUEUE, "full rx queue (possible driver tx drop)")
+
+typedef enum
+{
+#define _(f,s) VHOST_USER_INPUT_FUNC_ERROR_##f,
+ foreach_vhost_user_input_func_error
+#undef _
+ VHOST_USER_INPUT_FUNC_N_ERROR,
+} vhost_user_input_func_error_t;
+
+static char *vhost_user_input_func_error_strings[] = {
+#define _(n,s) s,
+ foreach_vhost_user_input_func_error
+#undef _
+};
+
+/* *INDENT-OFF* */
+static vhost_user_main_t vhost_user_main = {
+ .mtu_bytes = 1518,
+};
+
+VNET_HW_INTERFACE_CLASS (vhost_interface_class, static) = {
+ .name = "vhost-user",
+};
+/* *INDENT-ON* */
+
+static u8 *
+format_vhost_user_interface_name (u8 * s, va_list * args)
+{
+ u32 i = va_arg (*args, u32);
+ u32 show_dev_instance = ~0;
+ vhost_user_main_t *vum = &vhost_user_main;
+
+ if (i < vec_len (vum->show_dev_instance_by_real_dev_instance))
+ show_dev_instance = vum->show_dev_instance_by_real_dev_instance[i];
+
+ if (show_dev_instance != ~0)
+ i = show_dev_instance;
+
+ s = format (s, "VirtualEthernet0/0/%d", i);
+ return s;
+}
+
+static int
+vhost_user_name_renumber (vnet_hw_interface_t * hi, u32 new_dev_instance)
+{
+ // FIXME: check if the new dev instance is already used
+ vhost_user_main_t *vum = &vhost_user_main;
+ vec_validate_init_empty (vum->show_dev_instance_by_real_dev_instance,
+ hi->dev_instance, ~0);
+
+ vum->show_dev_instance_by_real_dev_instance[hi->dev_instance] =
+ new_dev_instance;
+
+ DBG_SOCK ("renumbered vhost-user interface dev_instance %d to %d",
+ hi->dev_instance, new_dev_instance);
+
+ return 0;
+}
+
+static_always_inline void *
+map_guest_mem (vhost_user_intf_t * vui, uword addr, u32 * hint)
+{
+ int i = *hint;
+ if (PREDICT_TRUE ((vui->regions[i].guest_phys_addr <= addr) &&
+ ((vui->regions[i].guest_phys_addr +
+ vui->regions[i].memory_size) > addr)))
+ {
+ return (void *) (vui->region_mmap_addr[i] + addr -
+ vui->regions[i].guest_phys_addr);
+ }
+#if __SSE4_2__
+ __m128i rl, rh, al, ah, r;
+ al = _mm_set1_epi64x (addr + 1);
+ ah = _mm_set1_epi64x (addr);
+
+ rl = _mm_loadu_si128 ((__m128i *) & vui->region_guest_addr_lo[0]);
+ rl = _mm_cmpgt_epi64 (al, rl);
+ rh = _mm_loadu_si128 ((__m128i *) & vui->region_guest_addr_hi[0]);
+ rh = _mm_cmpgt_epi64 (rh, ah);
+ r = _mm_and_si128 (rl, rh);
+
+ rl = _mm_loadu_si128 ((__m128i *) & vui->region_guest_addr_lo[2]);
+ rl = _mm_cmpgt_epi64 (al, rl);
+ rh = _mm_loadu_si128 ((__m128i *) & vui->region_guest_addr_hi[2]);
+ rh = _mm_cmpgt_epi64 (rh, ah);
+ r = _mm_blend_epi16 (r, _mm_and_si128 (rl, rh), 0x22);
+
+ rl = _mm_loadu_si128 ((__m128i *) & vui->region_guest_addr_lo[4]);
+ rl = _mm_cmpgt_epi64 (al, rl);
+ rh = _mm_loadu_si128 ((__m128i *) & vui->region_guest_addr_hi[4]);
+ rh = _mm_cmpgt_epi64 (rh, ah);
+ r = _mm_blend_epi16 (r, _mm_and_si128 (rl, rh), 0x44);
+
+ rl = _mm_loadu_si128 ((__m128i *) & vui->region_guest_addr_lo[6]);
+ rl = _mm_cmpgt_epi64 (al, rl);
+ rh = _mm_loadu_si128 ((__m128i *) & vui->region_guest_addr_hi[6]);
+ rh = _mm_cmpgt_epi64 (rh, ah);
+ r = _mm_blend_epi16 (r, _mm_and_si128 (rl, rh), 0x88);
+
+ r = _mm_shuffle_epi8 (r, _mm_set_epi64x (0, 0x0e060c040a020800));
+ i = __builtin_ctzll (_mm_movemask_epi8 (r));
+
+ if (i < vui->nregions)
+ {
+ *hint = i;
+ return (void *) (vui->region_mmap_addr[i] + addr -
+ vui->regions[i].guest_phys_addr);
+ }
+
+#else
+ for (i = 0; i < vui->nregions; i++)
+ {
+ if ((vui->regions[i].guest_phys_addr <= addr) &&
+ ((vui->regions[i].guest_phys_addr + vui->regions[i].memory_size) >
+ addr))
+ {
+ *hint = i;
+ return (void *) (vui->region_mmap_addr[i] + addr -
+ vui->regions[i].guest_phys_addr);
+ }
+ }
+#endif
+ DBG_VQ ("failed to map guest mem addr %llx", addr);
+ *hint = 0;
+ return 0;
+}
+
+static inline void *
+map_user_mem (vhost_user_intf_t * vui, uword addr)
+{
+ int i;
+ for (i = 0; i < vui->nregions; i++)
+ {
+ if ((vui->regions[i].userspace_addr <= addr) &&
+ ((vui->regions[i].userspace_addr + vui->regions[i].memory_size) >
+ addr))
+ {
+ return (void *) (vui->region_mmap_addr[i] + addr -
+ vui->regions[i].userspace_addr);
+ }
+ }
+ return 0;
+}
+
+static long
+get_huge_page_size (int fd)
+{
+ struct statfs s;
+ fstatfs (fd, &s);
+ return s.f_bsize;
+}
+
+static void
+unmap_all_mem_regions (vhost_user_intf_t * vui)
+{
+ int i, r;
+ for (i = 0; i < vui->nregions; i++)
+ {
+ if (vui->region_mmap_addr[i] != (void *) -1)
+ {
+
+ long page_sz = get_huge_page_size (vui->region_mmap_fd[i]);
+
+ ssize_t map_sz = (vui->regions[i].memory_size +
+ vui->regions[i].mmap_offset +
+ page_sz) & ~(page_sz - 1);
+
+ r =
+ munmap (vui->region_mmap_addr[i] - vui->regions[i].mmap_offset,
+ map_sz);
+
+ DBG_SOCK
+ ("unmap memory region %d addr 0x%lx len 0x%lx page_sz 0x%x", i,
+ vui->region_mmap_addr[i], map_sz, page_sz);
+
+ vui->region_mmap_addr[i] = (void *) -1;
+
+ if (r == -1)
+ {
+ clib_warning ("failed to unmap memory region (errno %d)",
+ errno);
+ }
+ close (vui->region_mmap_fd[i]);
+ }
+ }
+ vui->nregions = 0;
+}
+
+static void
+vhost_user_tx_thread_placement (vhost_user_intf_t * vui)
+{
+ //Let's try to assign one queue to each thread
+ u32 qid = 0;
+ u32 cpu_index = 0;
+ vui->use_tx_spinlock = 0;
+ while (1)
+ {
+ for (qid = 0; qid < VHOST_VRING_MAX_N / 2; qid++)
+ {
+ vhost_user_vring_t *rxvq = &vui->vrings[VHOST_VRING_IDX_RX (qid)];
+ if (!rxvq->started || !rxvq->enabled)
+ continue;
+
+ vui->per_cpu_tx_qid[cpu_index] = qid;
+ cpu_index++;
+ if (cpu_index == vlib_get_thread_main ()->n_vlib_mains)
+ return;
+ }
+ //We need to loop, meaning the spinlock has to be used
+ vui->use_tx_spinlock = 1;
+ if (cpu_index == 0)
+ {
+ //Could not find a single valid one
+ for (cpu_index = 0;
+ cpu_index < vlib_get_thread_main ()->n_vlib_mains; cpu_index++)
+ {
+ vui->per_cpu_tx_qid[cpu_index] = 0;
+ }
+ return;
+ }
+ }
+}
+
+static void
+vhost_user_rx_thread_placement ()
+{
+ vhost_user_main_t *vum = &vhost_user_main;
+ vhost_user_intf_t *vui;
+ vhost_cpu_t *vhc;
+ u32 *workers = 0;
+
+ //Let's list all workers cpu indexes
+ u32 i;
+ for (i = vum->input_cpu_first_index;
+ i < vum->input_cpu_first_index + vum->input_cpu_count; i++)
+ {
+ vlib_node_set_state (vlib_mains ? vlib_mains[i] : &vlib_global_main,
+ vhost_user_input_node.index,
+ VLIB_NODE_STATE_DISABLED);
+ vec_add1 (workers, i);
+ }
+
+ vec_foreach (vhc, vum->cpus)
+ {
+ vec_reset_length (vhc->rx_queues);
+ }
+
+ i = 0;
+ vhost_iface_and_queue_t iaq;
+ /* *INDENT-OFF* */
+ pool_foreach (vui, vum->vhost_user_interfaces, {
+ u32 *vui_workers = vec_len (vui->workers) ? vui->workers : workers;
+ u32 qid;
+ for (qid = 0; qid < VHOST_VRING_MAX_N / 2; qid++)
+ {
+ vhost_user_vring_t *txvq =
+ &vui->vrings[VHOST_VRING_IDX_TX (qid)];
+ if (!txvq->started)
+ continue;
+
+ i %= vec_len (vui_workers);
+ u32 cpu_index = vui_workers[i];
+ i++;
+ vhc = &vum->cpus[cpu_index];
+
+ iaq.qid = qid;
+ iaq.vhost_iface_index = vui - vum->vhost_user_interfaces;
+ vec_add1 (vhc->rx_queues, iaq);
+ vlib_node_set_state (vlib_mains ? vlib_mains[cpu_index] :
+ &vlib_global_main, vhost_user_input_node.index,
+ VLIB_NODE_STATE_POLLING);
+ }
+ });
+ /* *INDENT-ON* */
+}
+
+static int
+vhost_user_thread_placement (u32 sw_if_index, u32 worker_thread_index, u8 del)
+{
+ vhost_user_main_t *vum = &vhost_user_main;
+ vhost_user_intf_t *vui;
+ vnet_hw_interface_t *hw;
+
+ if (worker_thread_index < vum->input_cpu_first_index ||
+ worker_thread_index >=
+ vum->input_cpu_first_index + vum->input_cpu_count)
+ return -1;
+
+ if (!(hw = vnet_get_sup_hw_interface (vnet_get_main (), sw_if_index)))
+ return -2;
+
+ vui = pool_elt_at_index (vum->vhost_user_interfaces, hw->dev_instance);
+ u32 found = ~0, *w;
+ vec_foreach (w, vui->workers)
+ {
+ if (*w == worker_thread_index)
+ {
+ found = w - vui->workers;
+ break;
+ }
+ }
+
+ if (del)
+ {
+ if (found == ~0)
+ return -3;
+ vec_del1 (vui->workers, found);
+ }
+ else if (found == ~0)
+ {
+ vec_add1 (vui->workers, worker_thread_index);
+ }
+
+ vhost_user_rx_thread_placement ();
+ return 0;
+}
+
+/** @brief Returns whether at least one TX and one RX vring are enabled */
+int
+vhost_user_intf_ready (vhost_user_intf_t * vui)
+{
+ int i, found[2] = { }; //RX + TX
+
+ for (i = 0; i < VHOST_VRING_MAX_N; i++)
+ if (vui->vrings[i].started && vui->vrings[i].enabled)
+ found[i & 1] = 1;
+
+ return found[0] && found[1];
+}
+
+static void
+vhost_user_update_iface_state (vhost_user_intf_t * vui)
+{
+ /* if we have pointers to descriptor table, go up */
+ int is_up = vhost_user_intf_ready (vui);
+ if (is_up != vui->is_up)
+ {
+ DBG_SOCK ("interface %d %s", vui->sw_if_index,
+ is_up ? "ready" : "down");
+ vnet_hw_interface_set_flags (vnet_get_main (), vui->hw_if_index,
+ is_up ? VNET_HW_INTERFACE_FLAG_LINK_UP :
+ 0);
+ vui->is_up = is_up;
+ }
+ vhost_user_rx_thread_placement ();
+ vhost_user_tx_thread_placement (vui);
+}
+
+static clib_error_t *
+vhost_user_callfd_read_ready (unix_file_t * uf)
+{
+ __attribute__ ((unused)) int n;
+ u8 buff[8];
+ n = read (uf->file_descriptor, ((char *) &buff), 8);
+ return 0;
+}
+
+static clib_error_t *
+vhost_user_kickfd_read_ready (unix_file_t * uf)
+{
+ __attribute__ ((unused)) int n;
+ u8 buff[8];
+ vhost_user_intf_t *vui =
+ pool_elt_at_index (vhost_user_main.vhost_user_interfaces,
+ uf->private_data >> 8);
+ u32 qid = uf->private_data & 0xff;
+ n = read (uf->file_descriptor, ((char *) &buff), 8);
+ DBG_SOCK ("if %d KICK queue %d", uf->private_data >> 8, qid);
+
+ vlib_worker_thread_barrier_sync (vlib_get_main ());
+ vui->vrings[qid].started = 1;
+ vhost_user_update_iface_state (vui);
+ vlib_worker_thread_barrier_release (vlib_get_main ());
+ return 0;
+}
+
+/**
+ * @brief Try once to lock the vring
+ * @return 0 on success, non-zero on failure.
+ */
+static inline int
+vhost_user_vring_try_lock (vhost_user_intf_t * vui, u32 qid)
+{
+ return __sync_lock_test_and_set (vui->vring_locks[qid], 1);
+}
+
+/**
+ * @brief Spin until the vring is successfully locked
+ */
+static inline void
+vhost_user_vring_lock (vhost_user_intf_t * vui, u32 qid)
+{
+ while (vhost_user_vring_try_lock (vui, qid))
+ ;
+}
+
+/**
+ * @brief Unlock the vring lock
+ */
+static inline void
+vhost_user_vring_unlock (vhost_user_intf_t * vui, u32 qid)
+{
+ *vui->vring_locks[qid] = 0;
+}
+
+static inline void
+vhost_user_vring_init (vhost_user_intf_t * vui, u32 qid)
+{
+ vhost_user_vring_t *vring = &vui->vrings[qid];
+ memset (vring, 0, sizeof (*vring));
+ vring->kickfd_idx = ~0;
+ vring->callfd_idx = ~0;
+ vring->errfd = -1;
+
+ /*
+ * We have a bug with some qemu 2.5, and this may be a fix.
+ * Feel like interpretation holy text, but this is from vhost-user.txt.
+ * "
+ * One queue pair is enabled initially. More queues are enabled
+ * dynamically, by sending message VHOST_USER_SET_VRING_ENABLE.
+ * "
+ * Don't know who's right, but this is what DPDK does.
+ */
+ if (qid == 0 || qid == 1)
+ vring->enabled = 1;
+}
+
+static inline void
+vhost_user_vring_close (vhost_user_intf_t * vui, u32 qid)
+{
+ vhost_user_vring_t *vring = &vui->vrings[qid];
+ if (vring->kickfd_idx != ~0)
+ {
+ unix_file_t *uf = pool_elt_at_index (unix_main.file_pool,
+ vring->kickfd_idx);
+ unix_file_del (&unix_main, uf);
+ vring->kickfd_idx = ~0;
+ }
+ if (vring->callfd_idx != ~0)
+ {
+ unix_file_t *uf = pool_elt_at_index (unix_main.file_pool,
+ vring->callfd_idx);
+ unix_file_del (&unix_main, uf);
+ vring->callfd_idx = ~0;
+ }
+ if (vring->errfd != -1)
+ close (vring->errfd);
+ vhost_user_vring_init (vui, qid);
+}
+
+static inline void
+vhost_user_if_disconnect (vhost_user_intf_t * vui)
+{
+ vnet_main_t *vnm = vnet_get_main ();
+ int q;
+
+ vnet_hw_interface_set_flags (vnm, vui->hw_if_index, 0);
+
+ if (vui->unix_file_index != ~0)
+ {
+ unix_file_del (&unix_main, unix_main.file_pool + vui->unix_file_index);
+ vui->unix_file_index = ~0;
+ }
+
+ vui->is_up = 0;
+
+ for (q = 0; q < VHOST_VRING_MAX_N; q++)
+ vhost_user_vring_close (vui, q);
+
+ unmap_all_mem_regions (vui);
+ DBG_SOCK ("interface ifindex %d disconnected", vui->sw_if_index);
+}
+
+#define VHOST_LOG_PAGE 0x1000
+static_always_inline void
+vhost_user_log_dirty_pages_2 (vhost_user_intf_t * vui,
+ u64 addr, u64 len, u8 is_host_address)
+{
+ if (PREDICT_TRUE (vui->log_base_addr == 0
+ || !(vui->features & (1 << FEAT_VHOST_F_LOG_ALL))))
+ {
+ return;
+ }
+ if (is_host_address)
+ {
+ addr = (u64) map_user_mem (vui, (uword) addr);
+ }
+ if (PREDICT_FALSE ((addr + len - 1) / VHOST_LOG_PAGE / 8 >= vui->log_size))
+ {
+ DBG_SOCK ("vhost_user_log_dirty_pages(): out of range\n");
+ return;
+ }
+
+ CLIB_MEMORY_BARRIER ();
+ u64 page = addr / VHOST_LOG_PAGE;
+ while (page * VHOST_LOG_PAGE < addr + len)
+ {
+ ((u8 *) vui->log_base_addr)[page / 8] |= 1 << page % 8;
+ page++;
+ }
+}
+
+static_always_inline void
+vhost_user_log_dirty_pages (vhost_user_intf_t * vui, u64 addr, u64 len)
+{
+ vhost_user_log_dirty_pages_2 (vui, addr, len, 0);
+}
+
+#define vhost_user_log_dirty_ring(vui, vq, member) \
+ if (PREDICT_FALSE(vq->log_used)) { \
+ vhost_user_log_dirty_pages(vui, vq->log_guest_addr + STRUCT_OFFSET_OF(vring_used_t, member), \
+ sizeof(vq->used->member)); \
+ }
+
+static clib_error_t *
+vhost_user_socket_read (unix_file_t * uf)
+{
+ int n, i;
+ int fd, number_of_fds = 0;
+ int fds[VHOST_MEMORY_MAX_NREGIONS];
+ vhost_user_msg_t msg;
+ struct msghdr mh;
+ struct iovec iov[1];
+ vhost_user_main_t *vum = &vhost_user_main;
+ vhost_user_intf_t *vui;
+ struct cmsghdr *cmsg;
+ u8 q;
+ unix_file_t template = { 0 };
+ vnet_main_t *vnm = vnet_get_main ();
+
+ vui = pool_elt_at_index (vum->vhost_user_interfaces, uf->private_data);
+
+ char control[CMSG_SPACE (VHOST_MEMORY_MAX_NREGIONS * sizeof (int))];
+
+ memset (&mh, 0, sizeof (mh));
+ memset (control, 0, sizeof (control));
+
+ for (i = 0; i < VHOST_MEMORY_MAX_NREGIONS; i++)
+ fds[i] = -1;
+
+ /* set the payload */
+ iov[0].iov_base = (void *) &msg;
+ iov[0].iov_len = VHOST_USER_MSG_HDR_SZ;
+
+ mh.msg_iov = iov;
+ mh.msg_iovlen = 1;
+ mh.msg_control = control;
+ mh.msg_controllen = sizeof (control);
+
+ n = recvmsg (uf->file_descriptor, &mh, 0);
+
+ /* Stop workers to avoid end of the world */
+ vlib_worker_thread_barrier_sync (vlib_get_main ());
+
+ if (n != VHOST_USER_MSG_HDR_SZ)
+ {
+ if (n == -1)
+ {
+ DBG_SOCK ("recvmsg returned error %d %s", errno, strerror (errno));
+ }
+ else
+ {
+ DBG_SOCK ("n (%d) != VHOST_USER_MSG_HDR_SZ (%d)",
+ n, VHOST_USER_MSG_HDR_SZ);
+ }
+ goto close_socket;
+ }
+
+ if (mh.msg_flags & MSG_CTRUNC)
+ {
+ DBG_SOCK ("MSG_CTRUNC is set");
+ goto close_socket;
+ }
+
+ cmsg = CMSG_FIRSTHDR (&mh);
+
+ if (cmsg && (cmsg->cmsg_len > 0) && (cmsg->cmsg_level == SOL_SOCKET) &&
+ (cmsg->cmsg_type == SCM_RIGHTS) &&
+ (cmsg->cmsg_len - CMSG_LEN (0) <=
+ VHOST_MEMORY_MAX_NREGIONS * sizeof (int)))
+ {
+ number_of_fds = (cmsg->cmsg_len - CMSG_LEN (0)) / sizeof (int);
+ clib_memcpy (fds, CMSG_DATA (cmsg), number_of_fds * sizeof (int));
+ }
+
+ /* version 1, no reply bit set */
+ if ((msg.flags & 7) != 1)
+ {
+ DBG_SOCK ("malformed message received. closing socket");
+ goto close_socket;
+ }
+
+ {
+ int rv;
+ rv =
+ read (uf->file_descriptor, ((char *) &msg) + VHOST_USER_MSG_HDR_SZ,
+ msg.size);
+ if (rv < 0)
+ {
+ DBG_SOCK ("read failed %s", strerror (errno));
+ goto close_socket;
+ }
+ else if (rv != msg.size)
+ {
+ DBG_SOCK ("message too short (read %dB should be %dB)", rv, msg.size);
+ goto close_socket;
+ }
+ }
+
+ switch (msg.request)
+ {
+ case VHOST_USER_GET_FEATURES:
+ msg.flags |= 4;
+ msg.u64 = (1ULL << FEAT_VIRTIO_NET_F_MRG_RXBUF) |
+ (1ULL << FEAT_VIRTIO_NET_F_CTRL_VQ) |
+ (1ULL << FEAT_VIRTIO_F_ANY_LAYOUT) |
+ (1ULL << FEAT_VIRTIO_F_INDIRECT_DESC) |
+ (1ULL << FEAT_VHOST_F_LOG_ALL) |
+ (1ULL << FEAT_VIRTIO_NET_F_GUEST_ANNOUNCE) |
+ (1ULL << FEAT_VIRTIO_NET_F_MQ) |
+ (1ULL << FEAT_VHOST_USER_F_PROTOCOL_FEATURES) |
+ (1ULL << FEAT_VIRTIO_F_VERSION_1);
+ msg.u64 &= vui->feature_mask;
+ msg.size = sizeof (msg.u64);
+ DBG_SOCK ("if %d msg VHOST_USER_GET_FEATURES - reply 0x%016llx",
+ vui->hw_if_index, msg.u64);
+ break;
+
+ case VHOST_USER_SET_FEATURES:
+ DBG_SOCK ("if %d msg VHOST_USER_SET_FEATURES features 0x%016llx",
+ vui->hw_if_index, msg.u64);
+
+ vui->features = msg.u64;
+
+ if (vui->features &
+ ((1 << FEAT_VIRTIO_NET_F_MRG_RXBUF) |
+ (1ULL << FEAT_VIRTIO_F_VERSION_1)))
+ vui->virtio_net_hdr_sz = 12;
+ else
+ vui->virtio_net_hdr_sz = 10;
+
+ vui->is_any_layout =
+ (vui->features & (1 << FEAT_VIRTIO_F_ANY_LAYOUT)) ? 1 : 0;
+
+ ASSERT (vui->virtio_net_hdr_sz < VLIB_BUFFER_PRE_DATA_SIZE);
+ vnet_hw_interface_set_flags (vnm, vui->hw_if_index, 0);
+ vui->is_up = 0;
+
+ /*for (q = 0; q < VHOST_VRING_MAX_N; q++)
+ vhost_user_vring_close(&vui->vrings[q]); */
+
+ break;
+
+ case VHOST_USER_SET_MEM_TABLE:
+ DBG_SOCK ("if %d msg VHOST_USER_SET_MEM_TABLE nregions %d",
+ vui->hw_if_index, msg.memory.nregions);
+
+ if ((msg.memory.nregions < 1) ||
+ (msg.memory.nregions > VHOST_MEMORY_MAX_NREGIONS))
+ {
+
+ DBG_SOCK ("number of mem regions must be between 1 and %i",
+ VHOST_MEMORY_MAX_NREGIONS);
+
+ goto close_socket;
+ }
+
+ if (msg.memory.nregions != number_of_fds)
+ {
+ DBG_SOCK ("each memory region must have FD");
+ goto close_socket;
+ }
+ unmap_all_mem_regions (vui);
+ for (i = 0; i < msg.memory.nregions; i++)
+ {
+ clib_memcpy (&(vui->regions[i]), &msg.memory.regions[i],
+ sizeof (vhost_user_memory_region_t));
+
+ long page_sz = get_huge_page_size (fds[i]);
+
+ /* align size to 2M page */
+ ssize_t map_sz = (vui->regions[i].memory_size +
+ vui->regions[i].mmap_offset +
+ page_sz) & ~(page_sz - 1);
+
+ vui->region_mmap_addr[i] = mmap (0, map_sz, PROT_READ | PROT_WRITE,
+ MAP_SHARED, fds[i], 0);
+ vui->region_guest_addr_lo[i] = vui->regions[i].guest_phys_addr;
+ vui->region_guest_addr_hi[i] = vui->regions[i].guest_phys_addr +
+ vui->regions[i].memory_size;
+
+ DBG_SOCK
+ ("map memory region %d addr 0 len 0x%lx fd %d mapped 0x%lx "
+ "page_sz 0x%x", i, map_sz, fds[i], vui->region_mmap_addr[i],
+ page_sz);
+
+ if (vui->region_mmap_addr[i] == MAP_FAILED)
+ {
+ clib_warning ("failed to map memory. errno is %d", errno);
+ goto close_socket;
+ }
+ vui->region_mmap_addr[i] += vui->regions[i].mmap_offset;
+ vui->region_mmap_fd[i] = fds[i];
+ }
+ vui->nregions = msg.memory.nregions;
+ break;
+
+ case VHOST_USER_SET_VRING_NUM:
+ DBG_SOCK ("if %d msg VHOST_USER_SET_VRING_NUM idx %d num %d",
+ vui->hw_if_index, msg.state.index, msg.state.num);
+
+ if ((msg.state.num > 32768) || /* maximum ring size is 32768 */
+ (msg.state.num == 0) || /* it cannot be zero */
+ ((msg.state.num - 1) & msg.state.num)) /* must be power of 2 */
+ goto close_socket;
+ vui->vrings[msg.state.index].qsz = msg.state.num;
+ break;
+
+ case VHOST_USER_SET_VRING_ADDR:
+ DBG_SOCK ("if %d msg VHOST_USER_SET_VRING_ADDR idx %d",
+ vui->hw_if_index, msg.state.index);
+
+ if (msg.state.index >= VHOST_VRING_MAX_N)
+ {
+ DBG_SOCK ("invalid vring index VHOST_USER_SET_VRING_ADDR:"
+ " %d >= %d", msg.state.index, VHOST_VRING_MAX_N);
+ goto close_socket;
+ }
+
+ if (msg.size < sizeof (msg.addr))
+ {
+ DBG_SOCK ("vhost message is too short (%d < %d)",
+ msg.size, sizeof (msg.addr));
+ goto close_socket;
+ }
+
+ vui->vrings[msg.state.index].desc = (vring_desc_t *)
+ map_user_mem (vui, msg.addr.desc_user_addr);
+ vui->vrings[msg.state.index].used = (vring_used_t *)
+ map_user_mem (vui, msg.addr.used_user_addr);
+ vui->vrings[msg.state.index].avail = (vring_avail_t *)
+ map_user_mem (vui, msg.addr.avail_user_addr);
+
+ if ((vui->vrings[msg.state.index].desc == NULL) ||
+ (vui->vrings[msg.state.index].used == NULL) ||
+ (vui->vrings[msg.state.index].avail == NULL))
+ {
+ DBG_SOCK ("failed to map user memory for hw_if_index %d",
+ vui->hw_if_index);
+ goto close_socket;
+ }
+
+ vui->vrings[msg.state.index].log_guest_addr = msg.addr.log_guest_addr;
+ vui->vrings[msg.state.index].log_used =
+ (msg.addr.flags & (1 << VHOST_VRING_F_LOG)) ? 1 : 0;
+
+ /* Spec says: If VHOST_USER_F_PROTOCOL_FEATURES has not been negotiated,
+ the ring is initialized in an enabled state. */
+ if (!(vui->features & (1 << FEAT_VHOST_USER_F_PROTOCOL_FEATURES)))
+ {
+ vui->vrings[msg.state.index].enabled = 1;
+ }
+
+ vui->vrings[msg.state.index].last_used_idx =
+ vui->vrings[msg.state.index].last_avail_idx =
+ vui->vrings[msg.state.index].used->idx;
+
+ /* tell driver that we don't want interrupts */
+ vui->vrings[msg.state.index].used->flags = VRING_USED_F_NO_NOTIFY;
+ break;
+
+ case VHOST_USER_SET_OWNER:
+ DBG_SOCK ("if %d msg VHOST_USER_SET_OWNER", vui->hw_if_index);
+ break;
+
+ case VHOST_USER_RESET_OWNER:
+ DBG_SOCK ("if %d msg VHOST_USER_RESET_OWNER", vui->hw_if_index);
+ break;
+
+ case VHOST_USER_SET_VRING_CALL:
+ DBG_SOCK ("if %d msg VHOST_USER_SET_VRING_CALL u64 %d",
+ vui->hw_if_index, msg.u64);
+
+ q = (u8) (msg.u64 & 0xFF);
+
+ /* if there is old fd, delete and close it */
+ if (vui->vrings[q].callfd_idx != ~0)
+ {
+ unix_file_t *uf = pool_elt_at_index (unix_main.file_pool,
+ vui->vrings[q].callfd_idx);
+ unix_file_del (&unix_main, uf);
+ vui->vrings[q].callfd_idx = ~0;
+ }
+
+ if (!(msg.u64 & 0x100))
+ {
+ if (number_of_fds != 1)
+ {
+ DBG_SOCK ("More than one fd received !");
+ goto close_socket;
+ }
+
+ template.read_function = vhost_user_callfd_read_ready;
+ template.file_descriptor = fds[0];
+ template.private_data =
+ ((vui - vhost_user_main.vhost_user_interfaces) << 8) + q;
+ vui->vrings[q].callfd_idx = unix_file_add (&unix_main, &template);
+ }
+ else
+ vui->vrings[q].callfd_idx = ~0;
+ break;
+
+ case VHOST_USER_SET_VRING_KICK:
+ DBG_SOCK ("if %d msg VHOST_USER_SET_VRING_KICK u64 %d",
+ vui->hw_if_index, msg.u64);
+
+ q = (u8) (msg.u64 & 0xFF);
+
+ if (vui->vrings[q].kickfd_idx != ~0)
+ {
+ unix_file_t *uf = pool_elt_at_index (unix_main.file_pool,
+ vui->vrings[q].kickfd_idx);
+ unix_file_del (&unix_main, uf);
+ vui->vrings[q].kickfd_idx = ~0;
+ }
+
+ if (!(msg.u64 & 0x100))
+ {
+ if (number_of_fds != 1)
+ {
+ DBG_SOCK ("More than one fd received !");
+ goto close_socket;
+ }
+
+ template.read_function = vhost_user_kickfd_read_ready;
+ template.file_descriptor = fds[0];
+ template.private_data =
+ (((uword) (vui - vhost_user_main.vhost_user_interfaces)) << 8) +
+ q;
+ vui->vrings[q].kickfd_idx = unix_file_add (&unix_main, &template);
+ }
+ else
+ {
+ //When no kickfd is set, the queue is initialized as started
+ vui->vrings[q].kickfd_idx = ~0;
+ vui->vrings[q].started = 1;
+ }
+
+ break;
+
+ case VHOST_USER_SET_VRING_ERR:
+ DBG_SOCK ("if %d msg VHOST_USER_SET_VRING_ERR u64 %d",
+ vui->hw_if_index, msg.u64);
+
+ q = (u8) (msg.u64 & 0xFF);
+
+ if (vui->vrings[q].errfd != -1)
+ close (vui->vrings[q].errfd);
+
+ if (!(msg.u64 & 0x100))
+ {
+ if (number_of_fds != 1)
+ goto close_socket;
+
+ vui->vrings[q].errfd = fds[0];
+ }
+ else
+ vui->vrings[q].errfd = -1;
+
+ break;
+
+ case VHOST_USER_SET_VRING_BASE:
+ DBG_SOCK ("if %d msg VHOST_USER_SET_VRING_BASE idx %d num %d",
+ vui->hw_if_index, msg.state.index, msg.state.num);
+
+ vui->vrings[msg.state.index].last_avail_idx = msg.state.num;
+ break;
+
+ case VHOST_USER_GET_VRING_BASE:
+ DBG_SOCK ("if %d msg VHOST_USER_GET_VRING_BASE idx %d num %d",
+ vui->hw_if_index, msg.state.index, msg.state.num);
+
+ if (msg.state.index >= VHOST_VRING_MAX_N)
+ {
+ DBG_SOCK ("invalid vring index VHOST_USER_GET_VRING_BASE:"
+ " %d >= %d", msg.state.index, VHOST_VRING_MAX_N);
+ goto close_socket;
+ }
+
+ /* Spec says: Client must [...] stop ring upon receiving VHOST_USER_GET_VRING_BASE. */
+ vhost_user_vring_close (vui, msg.state.index);
+
+ msg.state.num = vui->vrings[msg.state.index].last_avail_idx;
+ msg.flags |= 4;
+ msg.size = sizeof (msg.state);
+ break;
+
+ case VHOST_USER_NONE:
+ DBG_SOCK ("if %d msg VHOST_USER_NONE", vui->hw_if_index);
+
+ break;
+
+ case VHOST_USER_SET_LOG_BASE:
+ {
+ DBG_SOCK ("if %d msg VHOST_USER_SET_LOG_BASE", vui->hw_if_index);
+
+ if (msg.size != sizeof (msg.log))
+ {
+ DBG_SOCK
+ ("invalid msg size for VHOST_USER_SET_LOG_BASE: %d instead of %d",
+ msg.size, sizeof (msg.log));
+ goto close_socket;
+ }
+
+ if (!
+ (vui->protocol_features & (1 << VHOST_USER_PROTOCOL_F_LOG_SHMFD)))
+ {
+ DBG_SOCK
+ ("VHOST_USER_PROTOCOL_F_LOG_SHMFD not set but VHOST_USER_SET_LOG_BASE received");
+ goto close_socket;
+ }
+
+ fd = fds[0];
+ /* align size to 2M page */
+ long page_sz = get_huge_page_size (fd);
+ ssize_t map_sz =
+ (msg.log.size + msg.log.offset + page_sz) & ~(page_sz - 1);
+
+ vui->log_base_addr = mmap (0, map_sz, PROT_READ | PROT_WRITE,
+ MAP_SHARED, fd, 0);
+
+ DBG_SOCK
+ ("map log region addr 0 len 0x%lx off 0x%lx fd %d mapped 0x%lx",
+ map_sz, msg.log.offset, fd, vui->log_base_addr);
+
+ if (vui->log_base_addr == MAP_FAILED)
+ {
+ clib_warning ("failed to map memory. errno is %d", errno);
+ goto close_socket;
+ }
+
+ vui->log_base_addr += msg.log.offset;
+ vui->log_size = msg.log.size;
+
+ msg.flags |= 4;
+ msg.size = sizeof (msg.u64);
+
+ break;
+ }
+
+ case VHOST_USER_SET_LOG_FD:
+ DBG_SOCK ("if %d msg VHOST_USER_SET_LOG_FD", vui->hw_if_index);
+
+ break;
+
+ case VHOST_USER_GET_PROTOCOL_FEATURES:
+ DBG_SOCK ("if %d msg VHOST_USER_GET_PROTOCOL_FEATURES",
+ vui->hw_if_index);
+
+ msg.flags |= 4;
+ msg.u64 = (1 << VHOST_USER_PROTOCOL_F_LOG_SHMFD) |
+ (1 << VHOST_USER_PROTOCOL_F_MQ);
+ msg.size = sizeof (msg.u64);
+ break;
+
+ case VHOST_USER_SET_PROTOCOL_FEATURES:
+ DBG_SOCK ("if %d msg VHOST_USER_SET_PROTOCOL_FEATURES features 0x%lx",
+ vui->hw_if_index, msg.u64);
+
+ vui->protocol_features = msg.u64;
+
+ break;
+
+ case VHOST_USER_GET_QUEUE_NUM:
+ DBG_SOCK ("if %d msg VHOST_USER_GET_QUEUE_NUM", vui->hw_if_index);
+ msg.flags |= 4;
+ msg.u64 = VHOST_VRING_MAX_N;
+ msg.size = sizeof (msg.u64);
+ break;
+
+ case VHOST_USER_SET_VRING_ENABLE:
+ DBG_SOCK ("if %d VHOST_USER_SET_VRING_ENABLE: %s queue %d",
+ vui->hw_if_index, msg.state.num ? "enable" : "disable",
+ msg.state.index);
+ if (msg.state.index >= VHOST_VRING_MAX_N)
+ {
+ DBG_SOCK ("invalid vring index VHOST_USER_SET_VRING_ENABLE:"
+ " %d >= %d", msg.state.index, VHOST_VRING_MAX_N);
+ goto close_socket;
+ }
+
+ vui->vrings[msg.state.index].enabled = msg.state.num;
+ break;
+
+ default:
+ DBG_SOCK ("unknown vhost-user message %d received. closing socket",
+ msg.request);
+ goto close_socket;
+ }
+
+ /* if we need to reply */
+ if (msg.flags & 4)
+ {
+ n =
+ send (uf->file_descriptor, &msg, VHOST_USER_MSG_HDR_SZ + msg.size, 0);
+ if (n != (msg.size + VHOST_USER_MSG_HDR_SZ))
+ {
+ DBG_SOCK ("could not send message response");
+ goto close_socket;
+ }
+ }
+
+ vhost_user_update_iface_state (vui);
+ vlib_worker_thread_barrier_release (vlib_get_main ());
+ return 0;
+
+close_socket:
+ vhost_user_if_disconnect (vui);
+ vhost_user_update_iface_state (vui);
+ vlib_worker_thread_barrier_release (vlib_get_main ());
+ return 0;
+}
+
+static clib_error_t *
+vhost_user_socket_error (unix_file_t * uf)
+{
+ vlib_main_t *vm = vlib_get_main ();
+ vhost_user_main_t *vum = &vhost_user_main;
+ vhost_user_intf_t *vui =
+ pool_elt_at_index (vum->vhost_user_interfaces, uf->private_data);
+
+ DBG_SOCK ("socket error on if %d", vui->sw_if_index);
+ vlib_worker_thread_barrier_sync (vm);
+ vhost_user_if_disconnect (vui);
+ vhost_user_rx_thread_placement ();
+ vlib_worker_thread_barrier_release (vm);
+ return 0;
+}
+
+static clib_error_t *
+vhost_user_socksvr_accept_ready (unix_file_t * uf)
+{
+ int client_fd, client_len;
+ struct sockaddr_un client;
+ unix_file_t template = { 0 };
+ vhost_user_main_t *vum = &vhost_user_main;
+ vhost_user_intf_t *vui;
+
+ vui = pool_elt_at_index (vum->vhost_user_interfaces, uf->private_data);
+
+ client_len = sizeof (client);
+ client_fd = accept (uf->file_descriptor,
+ (struct sockaddr *) &client,
+ (socklen_t *) & client_len);
+
+ if (client_fd < 0)
+ return clib_error_return_unix (0, "accept");
+
+ DBG_SOCK ("New client socket for vhost interface %d", vui->sw_if_index);
+ template.read_function = vhost_user_socket_read;
+ template.error_function = vhost_user_socket_error;
+ template.file_descriptor = client_fd;
+ template.private_data = vui - vhost_user_main.vhost_user_interfaces;
+ vui->unix_file_index = unix_file_add (&unix_main, &template);
+ return 0;
+}
+
+static clib_error_t *
+vhost_user_init (vlib_main_t * vm)
+{
+ clib_error_t *error;
+ vhost_user_main_t *vum = &vhost_user_main;
+ vlib_thread_main_t *tm = vlib_get_thread_main ();
+ vlib_thread_registration_t *tr;
+ uword *p;
+
+ error = vlib_call_init_function (vm, ip4_init);
+ if (error)
+ return error;
+
+ vum->coalesce_frames = 32;
+ vum->coalesce_time = 1e-3;
+
+ vec_validate (vum->cpus, tm->n_vlib_mains - 1);
+
+ vhost_cpu_t *cpu;
+ vec_foreach (cpu, vum->cpus)
+ {
+ /* This is actually not necessary as validate already zeroes it
+ * Just keeping the loop here for later because I am lazy. */
+ cpu->rx_buffers_len = 0;
+ }
+
+ /* find out which cpus will be used for input */
+ vum->input_cpu_first_index = 0;
+ vum->input_cpu_count = 1;
+ p = hash_get_mem (tm->thread_registrations_by_name, "workers");
+ tr = p ? (vlib_thread_registration_t *) p[0] : 0;
+
+ if (tr && tr->count > 0)
+ {
+ vum->input_cpu_first_index = tr->first_index;
+ vum->input_cpu_count = tr->count;
+ }
+
+ vum->random = random_default_seed ();
+
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (vhost_user_init);
+
+static clib_error_t *
+vhost_user_exit (vlib_main_t * vm)
+{
+ /* TODO cleanup */
+ return 0;
+}
+
+VLIB_MAIN_LOOP_EXIT_FUNCTION (vhost_user_exit);
+
+static u8 *
+format_vhost_trace (u8 * s, va_list * va)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*va, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*va, vlib_node_t *);
+ CLIB_UNUSED (vnet_main_t * vnm) = vnet_get_main ();
+ vhost_user_main_t *vum = &vhost_user_main;
+ vhost_trace_t *t = va_arg (*va, vhost_trace_t *);
+ vhost_user_intf_t *vui = pool_elt_at_index (vum->vhost_user_interfaces,
+ t->device_index);
+
+ vnet_sw_interface_t *sw = vnet_get_sw_interface (vnm, vui->sw_if_index);
+
+ uword indent = format_get_indent (s);
+
+ s = format (s, "%U %U queue %d\n", format_white_space, indent,
+ format_vnet_sw_interface_name, vnm, sw, t->qid);
+
+ s = format (s, "%U virtio flags:\n", format_white_space, indent);
+#define _(n,i,st) \
+ if (t->virtio_ring_flags & (1 << VIRTIO_TRACE_F_##n)) \
+ s = format (s, "%U %s %s\n", format_white_space, indent, #n, st);
+ foreach_virtio_trace_flags
+#undef _
+ s = format (s, "%U virtio_net_hdr first_desc_len %u\n",
+ format_white_space, indent, t->first_desc_len);
+
+ s = format (s, "%U flags 0x%02x gso_type %u\n",
+ format_white_space, indent,
+ t->hdr.hdr.flags, t->hdr.hdr.gso_type);
+
+ if (vui->virtio_net_hdr_sz == 12)
+ s = format (s, "%U num_buff %u",
+ format_white_space, indent, t->hdr.num_buffers);
+
+ return s;
+}
+
+void
+vhost_user_rx_trace (vhost_trace_t * t,
+ vhost_user_intf_t * vui, u16 qid,
+ vlib_buffer_t * b, vhost_user_vring_t * txvq)
+{
+ vhost_user_main_t *vum = &vhost_user_main;
+ u32 qsz_mask = txvq->qsz - 1;
+ u32 last_avail_idx = txvq->last_avail_idx;
+ u32 desc_current = txvq->avail->ring[last_avail_idx & qsz_mask];
+ vring_desc_t *hdr_desc = 0;
+ virtio_net_hdr_mrg_rxbuf_t *hdr;
+ u32 hint = 0;
+
+ memset (t, 0, sizeof (*t));
+ t->device_index = vui - vum->vhost_user_interfaces;
+ t->qid = qid;
+
+ hdr_desc = &txvq->desc[desc_current];
+ if (txvq->desc[desc_current].flags & VIRTQ_DESC_F_INDIRECT)
+ {
+ t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_INDIRECT;
+ /* Header is the first here */
+ hdr_desc = map_guest_mem (vui, txvq->desc[desc_current].addr, &hint);
+ }
+ if (txvq->desc[desc_current].flags & VIRTQ_DESC_F_NEXT)
+ {
+ t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_SIMPLE_CHAINED;
+ }
+ if (!(txvq->desc[desc_current].flags & VIRTQ_DESC_F_NEXT) &&
+ !(txvq->desc[desc_current].flags & VIRTQ_DESC_F_INDIRECT))
+ {
+ t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_SINGLE_DESC;
+ }
+
+ t->first_desc_len = hdr_desc ? hdr_desc->len : 0;
+
+ if (!hdr_desc || !(hdr = map_guest_mem (vui, hdr_desc->addr, &hint)))
+ {
+ t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_MAP_ERROR;
+ }
+ else
+ {
+ u32 len = vui->virtio_net_hdr_sz;
+ memcpy (&t->hdr, hdr, len > hdr_desc->len ? hdr_desc->len : len);
+ }
+}
+
+static inline void
+vhost_user_send_call (vlib_main_t * vm, vhost_user_vring_t * vq)
+{
+ vhost_user_main_t *vum = &vhost_user_main;
+ u64 x = 1;
+ int fd = UNIX_GET_FD (vq->callfd_idx);
+ int rv __attribute__ ((unused));
+ /* TODO: pay attention to rv */
+ rv = write (fd, &x, sizeof (x));
+ vq->n_since_last_int = 0;
+ vq->int_deadline = vlib_time_now (vm) + vum->coalesce_time;
+}
+
+static_always_inline u32
+vhost_user_input_copy (vhost_user_intf_t * vui, vhost_copy_t * cpy,
+ u16 copy_len, u32 * map_hint)
+{
+ void *src0, *src1, *src2, *src3;
+ if (PREDICT_TRUE (copy_len >= 4))
+ {
+ if (PREDICT_FALSE (!(src2 = map_guest_mem (vui, cpy[0].src, map_hint))))
+ return 1;
+ if (PREDICT_FALSE (!(src3 = map_guest_mem (vui, cpy[1].src, map_hint))))
+ return 1;
+
+ while (PREDICT_TRUE (copy_len >= 4))
+ {
+ src0 = src2;
+ src1 = src3;
+
+ if (PREDICT_FALSE
+ (!(src2 = map_guest_mem (vui, cpy[2].src, map_hint))))
+ return 1;
+ if (PREDICT_FALSE
+ (!(src3 = map_guest_mem (vui, cpy[3].src, map_hint))))
+ return 1;
+
+ CLIB_PREFETCH (src2, 64, LOAD);
+ CLIB_PREFETCH (src3, 64, LOAD);
+
+ clib_memcpy ((void *) cpy[0].dst, src0, cpy[0].len);
+ clib_memcpy ((void *) cpy[1].dst, src1, cpy[1].len);
+ copy_len -= 2;
+ cpy += 2;
+ }
+ }
+ while (copy_len)
+ {
+ if (PREDICT_FALSE (!(src0 = map_guest_mem (vui, cpy->src, map_hint))))
+ return 1;
+ clib_memcpy ((void *) cpy->dst, src0, cpy->len);
+ copy_len -= 1;
+ cpy += 1;
+ }
+ return 0;
+}
+
+/**
+ * Try to discard packets from the tx ring (VPP RX path).
+ * Returns the number of discarded packets.
+ */
+u32
+vhost_user_rx_discard_packet (vlib_main_t * vm,
+ vhost_user_intf_t * vui,
+ vhost_user_vring_t * txvq, u32 discard_max)
+{
+ /*
+ * On the RX side, each packet corresponds to one descriptor
+ * (it is the same whether it is a shallow descriptor, chained, or indirect).
+ * Therefore, discarding a packet is like discarding a descriptor.
+ */
+ u32 discarded_packets = 0;
+ u32 avail_idx = txvq->avail->idx;
+ u16 qsz_mask = txvq->qsz - 1;
+ while (discarded_packets != discard_max)
+ {
+ if (avail_idx == txvq->last_avail_idx)
+ goto out;
+
+ u16 desc_chain_head =
+ txvq->avail->ring[txvq->last_avail_idx & qsz_mask];
+ txvq->last_avail_idx++;
+ txvq->used->ring[txvq->last_used_idx & qsz_mask].id = desc_chain_head;
+ txvq->used->ring[txvq->last_used_idx & qsz_mask].len = 0;
+ vhost_user_log_dirty_ring (vui, txvq,
+ ring[txvq->last_used_idx & qsz_mask]);
+ txvq->last_used_idx++;
+ discarded_packets++;
+ }
+
+out:
+ CLIB_MEMORY_BARRIER ();
+ txvq->used->idx = txvq->last_used_idx;
+ vhost_user_log_dirty_ring (vui, txvq, idx);
+ return discarded_packets;
+}
+
+/*
+ * In case of overflow, we need to rewind the array of allocated buffers.
+ */
+static void
+vhost_user_input_rewind_buffers (vlib_main_t * vm,
+ vhost_cpu_t * cpu, vlib_buffer_t * b_head)
+{
+ u32 bi_current = cpu->rx_buffers[cpu->rx_buffers_len];
+ vlib_buffer_t *b_current = vlib_get_buffer (vm, bi_current);
+ b_current->current_length = 0;
+ b_current->flags = 0;
+ while (b_current != b_head)
+ {
+ cpu->rx_buffers_len++;
+ bi_current = cpu->rx_buffers[cpu->rx_buffers_len];
+ b_current = vlib_get_buffer (vm, bi_current);
+ b_current->current_length = 0;
+ b_current->flags = 0;
+ }
+}
+
+static u32
+vhost_user_if_input (vlib_main_t * vm,
+ vhost_user_main_t * vum,
+ vhost_user_intf_t * vui,
+ u16 qid, vlib_node_runtime_t * node)
+{
+ vhost_user_vring_t *txvq = &vui->vrings[VHOST_VRING_IDX_TX (qid)];
+ u16 n_rx_packets = 0;
+ u32 n_rx_bytes = 0;
+ u16 n_left;
+ u32 n_left_to_next, *to_next;
+ u32 next_index = VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT;
+ u32 n_trace = vlib_get_trace_count (vm, node);
+ u16 qsz_mask;
+ u32 map_hint = 0;
+ u16 cpu_index = os_get_cpu_number ();
+ u16 copy_len = 0;
+
+ {
+ /* do we have pending interrupts ? */
+ vhost_user_vring_t *rxvq = &vui->vrings[VHOST_VRING_IDX_RX (qid)];
+ f64 now = vlib_time_now (vm);
+
+ if ((txvq->n_since_last_int) && (txvq->int_deadline < now))
+ vhost_user_send_call (vm, txvq);
+
+ if ((rxvq->n_since_last_int) && (rxvq->int_deadline < now))
+ vhost_user_send_call (vm, rxvq);
+ }
+
+ if (PREDICT_FALSE (txvq->avail->flags & 0xFFFE))
+ return 0;
+
+ n_left = (u16) (txvq->avail->idx - txvq->last_avail_idx);
+
+ /* nothing to do */
+ if (PREDICT_FALSE (n_left == 0))
+ return 0;
+
+ if (PREDICT_FALSE (!vui->admin_up || !(txvq->enabled)))
+ {
+ /*
+ * Discard input packet if interface is admin down or vring is not
+ * enabled.
+ * "For example, for a networking device, in the disabled state
+ * client must not supply any new RX packets, but must process
+ * and discard any TX packets."
+ */
+ vhost_user_rx_discard_packet (vm, vui, txvq,
+ VHOST_USER_DOWN_DISCARD_COUNT);
+ return 0;
+ }
+
+ if (PREDICT_FALSE (n_left == txvq->qsz))
+ {
+ /*
+ * Informational error logging when VPP is not
+ * receiving packets fast enough.
+ */
+ vlib_error_count (vm, node->node_index,
+ VHOST_USER_INPUT_FUNC_ERROR_FULL_RX_QUEUE, 1);
+ }
+
+ qsz_mask = txvq->qsz - 1;
+
+ if (n_left > VLIB_FRAME_SIZE)
+ n_left = VLIB_FRAME_SIZE;
+
+ /*
+ * For small packets (<2kB), we will not need more than one vlib buffer
+ * per packet. In case packets are bigger, we will just yeld at some point
+ * in the loop and come back later. This is not an issue as for big packet,
+ * processing cost really comes from the memory copy.
+ */
+ if (PREDICT_FALSE (vum->cpus[cpu_index].rx_buffers_len < n_left + 1))
+ {
+ u32 curr_len = vum->cpus[cpu_index].rx_buffers_len;
+ vum->cpus[cpu_index].rx_buffers_len +=
+ vlib_buffer_alloc_from_free_list (vm,
+ vum->cpus[cpu_index].rx_buffers +
+ curr_len,
+ VHOST_USER_RX_BUFFERS_N - curr_len,
+ VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX);
+
+ if (PREDICT_FALSE
+ (vum->cpus[cpu_index].rx_buffers_len <
+ VHOST_USER_RX_BUFFER_STARVATION))
+ {
+ /* In case of buffer starvation, discard some packets from the queue
+ * and log the event.
+ * We keep doing best effort for the remaining packets. */
+ u32 flush = (n_left + 1 > vum->cpus[cpu_index].rx_buffers_len) ?
+ n_left + 1 - vum->cpus[cpu_index].rx_buffers_len : 1;
+ flush = vhost_user_rx_discard_packet (vm, vui, txvq, flush);
+
+ n_left -= flush;
+ vlib_increment_simple_counter (vnet_main.
+ interface_main.sw_if_counters +
+ VNET_INTERFACE_COUNTER_DROP,
+ os_get_cpu_number (),
+ vui->sw_if_index, flush);
+
+ vlib_error_count (vm, vhost_user_input_node.index,
+ VHOST_USER_INPUT_FUNC_ERROR_NO_BUFFER, flush);
+ }
+ }
+
+ while (n_left > 0)
+ {
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left > 0 && n_left_to_next > 0)
+ {
+ vlib_buffer_t *b_head, *b_current;
+ u32 bi_current;
+ u16 desc_current;
+ u32 desc_data_offset;
+ vring_desc_t *desc_table = txvq->desc;
+
+ if (PREDICT_FALSE (vum->cpus[cpu_index].rx_buffers_len <= 1))
+ {
+ /* Not enough rx_buffers
+ * Note: We yeld on 1 so we don't need to do an additional
+ * check for the next buffer prefetch.
+ */
+ n_left = 0;
+ break;
+ }
+
+ desc_current = txvq->avail->ring[txvq->last_avail_idx & qsz_mask];
+ vum->cpus[cpu_index].rx_buffers_len--;
+ bi_current = (vum->cpus[cpu_index].rx_buffers)
+ [vum->cpus[cpu_index].rx_buffers_len];
+ b_head = b_current = vlib_get_buffer (vm, bi_current);
+ to_next[0] = bi_current; //We do that now so we can forget about bi_current
+ to_next++;
+ n_left_to_next--;
+
+ vlib_prefetch_buffer_with_index (vm,
+ (vum->cpus[cpu_index].rx_buffers)
+ [vum->cpus[cpu_index].
+ rx_buffers_len - 1], LOAD);
+
+ /* Just preset the used descriptor id and length for later */
+ txvq->used->ring[txvq->last_used_idx & qsz_mask].id = desc_current;
+ txvq->used->ring[txvq->last_used_idx & qsz_mask].len = 0;
+ vhost_user_log_dirty_ring (vui, txvq,
+ ring[txvq->last_used_idx & qsz_mask]);
+
+ /* The buffer should already be initialized */
+ b_head->total_length_not_including_first_buffer = 0;
+ b_head->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
+
+ if (PREDICT_FALSE (n_trace))
+ {
+ //TODO: next_index is not exactly known at that point
+ vlib_trace_buffer (vm, node, next_index, b_head,
+ /* follow_chain */ 0);
+ vhost_trace_t *t0 =
+ vlib_add_trace (vm, node, b_head, sizeof (t0[0]));
+ vhost_user_rx_trace (t0, vui, qid, b_head, txvq);
+ n_trace--;
+ vlib_set_trace_count (vm, node, n_trace);
+ }
+
+ /* This depends on the setup but is very consistent
+ * So I think the CPU branch predictor will make a pretty good job
+ * at optimizing the decision. */
+ if (txvq->desc[desc_current].flags & VIRTQ_DESC_F_INDIRECT)
+ {
+ desc_table = map_guest_mem (vui, txvq->desc[desc_current].addr,
+ &map_hint);
+ desc_current = 0;
+ if (PREDICT_FALSE (desc_table == 0))
+ {
+ //FIXME: Handle error by shutdown the queue
+ goto out;
+ }
+ }
+
+ if (PREDICT_TRUE (vui->is_any_layout) ||
+ (!(desc_table[desc_current].flags & VIRTQ_DESC_F_NEXT)))
+ {
+ /* ANYLAYOUT or single buffer */
+ desc_data_offset = vui->virtio_net_hdr_sz;
+ }
+ else
+ {
+ /* CSR case without ANYLAYOUT, skip 1st buffer */
+ desc_data_offset = desc_table[desc_current].len;
+ }
+
+ while (1)
+ {
+ /* Get more input if necessary. Or end of packet. */
+ if (desc_data_offset == desc_table[desc_current].len)
+ {
+ if (PREDICT_FALSE (desc_table[desc_current].flags &
+ VIRTQ_DESC_F_NEXT))
+ {
+ desc_current = desc_table[desc_current].next;
+ desc_data_offset = 0;
+ }
+ else
+ {
+ goto out;
+ }
+ }
+
+ /* Get more output if necessary. Or end of packet. */
+ if (PREDICT_FALSE
+ (b_current->current_length == VLIB_BUFFER_DATA_SIZE))
+ {
+ if (PREDICT_FALSE
+ (vum->cpus[cpu_index].rx_buffers_len == 0))
+ {
+ /*
+ * Checking if there are some left buffers.
+ * If not, just rewind the used buffers and stop.
+ * Note: Scheduled copies are not cancelled. This is
+ * not an issue as they would still be valid. Useless,
+ * but valid.
+ */
+ vhost_user_input_rewind_buffers (vm,
+ &vum->cpus[cpu_index],
+ b_head);
+ n_left = 0;
+ goto stop;
+ }
+
+ /* Get next output */
+ vum->cpus[cpu_index].rx_buffers_len--;
+ u32 bi_next =
+ (vum->cpus[cpu_index].rx_buffers)[vum->cpus
+ [cpu_index].rx_buffers_len];
+ b_current->next_buffer = bi_next;
+ b_current->flags |= VLIB_BUFFER_NEXT_PRESENT;
+ bi_current = bi_next;
+ b_current = vlib_get_buffer (vm, bi_current);
+ }
+
+ /* Prepare a copy order executed later for the data */
+ vhost_copy_t *cpy = &vum->cpus[cpu_index].copy[copy_len];
+ copy_len++;
+ u32 desc_data_l =
+ desc_table[desc_current].len - desc_data_offset;
+ cpy->len = VLIB_BUFFER_DATA_SIZE - b_current->current_length;
+ cpy->len = (cpy->len > desc_data_l) ? desc_data_l : cpy->len;
+ cpy->dst = (uword) vlib_buffer_get_current (b_current);
+ cpy->src = desc_table[desc_current].addr + desc_data_offset;
+
+ desc_data_offset += cpy->len;
+
+ b_current->current_length += cpy->len;
+ b_head->total_length_not_including_first_buffer += cpy->len;
+ }
+
+ out:
+ CLIB_PREFETCH (&n_left, sizeof (n_left), LOAD);
+
+ n_rx_bytes += b_head->total_length_not_including_first_buffer;
+ n_rx_packets++;
+
+ b_head->total_length_not_including_first_buffer -=
+ b_head->current_length;
+
+ /* consume the descriptor and return it as used */
+ txvq->last_avail_idx++;
+ txvq->last_used_idx++;
+
+ VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b_head);
+
+ vnet_buffer (b_head)->sw_if_index[VLIB_RX] = vui->sw_if_index;
+ vnet_buffer (b_head)->sw_if_index[VLIB_TX] = (u32) ~ 0;
+ b_head->error = 0;
+
+ {
+ u32 next0 = VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT;
+
+ /* redirect if feature path enabled */
+ vnet_feature_start_device_input_x1 (vui->sw_if_index, &next0,
+ b_head, 0);
+
+ u32 bi = to_next[-1]; //Cannot use to_next[-1] in the macro
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi, next0);
+ }
+
+ n_left--;
+
+ /*
+ * Although separating memory copies from virtio ring parsing
+ * is beneficial, we can offer to perform the copies from time
+ * to time in order to free some space in the ring.
+ */
+ if (PREDICT_FALSE (copy_len >= VHOST_USER_RX_COPY_THRESHOLD))
+ {
+ if (PREDICT_FALSE
+ (vhost_user_input_copy (vui, vum->cpus[cpu_index].copy,
+ copy_len, &map_hint)))
+ {
+ clib_warning
+ ("Memory mapping error on interface hw_if_index=%d "
+ "(Shutting down - Switch interface down and up to restart)",
+ vui->hw_if_index);
+ vui->admin_up = 0;
+ copy_len = 0;
+ break;
+ }
+ copy_len = 0;
+
+ /* give buffers back to driver */
+ CLIB_MEMORY_BARRIER ();
+ txvq->used->idx = txvq->last_used_idx;
+ vhost_user_log_dirty_ring (vui, txvq, idx);
+ }
+ }
+ stop:
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ /* Do the memory copies */
+ if (PREDICT_FALSE
+ (vhost_user_input_copy (vui, vum->cpus[cpu_index].copy,
+ copy_len, &map_hint)))
+ {
+ clib_warning ("Memory mapping error on interface hw_if_index=%d "
+ "(Shutting down - Switch interface down and up to restart)",
+ vui->hw_if_index);
+ vui->admin_up = 0;
+ }
+
+ /* give buffers back to driver */
+ CLIB_MEMORY_BARRIER ();
+ txvq->used->idx = txvq->last_used_idx;
+ vhost_user_log_dirty_ring (vui, txvq, idx);
+
+ /* interrupt (call) handling */
+ if ((txvq->callfd_idx != ~0) && !(txvq->avail->flags & 1))
+ {
+ txvq->n_since_last_int += n_rx_packets;
+
+ if (txvq->n_since_last_int > vum->coalesce_frames)
+ vhost_user_send_call (vm, txvq);
+ }
+
+ /* increase rx counters */
+ vlib_increment_combined_counter
+ (vnet_main.interface_main.combined_sw_if_counters
+ + VNET_INTERFACE_COUNTER_RX,
+ os_get_cpu_number (), vui->sw_if_index, n_rx_packets, n_rx_bytes);
+
+ return n_rx_packets;
+}
+
+static uword
+vhost_user_input (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * f)
+{
+ vhost_user_main_t *vum = &vhost_user_main;
+ uword n_rx_packets = 0;
+ u32 cpu_index = os_get_cpu_number ();
+
+
+ vhost_iface_and_queue_t *vhiq;
+ vec_foreach (vhiq, vum->cpus[cpu_index].rx_queues)
+ {
+ vhost_user_intf_t *vui =
+ &vum->vhost_user_interfaces[vhiq->vhost_iface_index];
+ n_rx_packets += vhost_user_if_input (vm, vum, vui, vhiq->qid, node);
+ }
+
+ return n_rx_packets;
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (vhost_user_input_node) = {
+ .function = vhost_user_input,
+ .type = VLIB_NODE_TYPE_INPUT,
+ .name = "vhost-user-input",
+ .sibling_of = "device-input",
+
+ /* Will be enabled if/when hardware is detected. */
+ .state = VLIB_NODE_STATE_DISABLED,
+
+ .format_buffer = format_ethernet_header_with_length,
+ .format_trace = format_vhost_trace,
+
+ .n_errors = VHOST_USER_INPUT_FUNC_N_ERROR,
+ .error_strings = vhost_user_input_func_error_strings,
+};
+
+VLIB_NODE_FUNCTION_MULTIARCH (vhost_user_input_node, vhost_user_input)
+/* *INDENT-ON* */
+
+
+void
+vhost_user_tx_trace (vhost_trace_t * t,
+ vhost_user_intf_t * vui, u16 qid,
+ vlib_buffer_t * b, vhost_user_vring_t * rxvq)
+{
+ vhost_user_main_t *vum = &vhost_user_main;
+ u32 qsz_mask = rxvq->qsz - 1;
+ u32 last_avail_idx = rxvq->last_avail_idx;
+ u32 desc_current = rxvq->avail->ring[last_avail_idx & qsz_mask];
+ vring_desc_t *hdr_desc = 0;
+ u32 hint = 0;
+
+ memset (t, 0, sizeof (*t));
+ t->device_index = vui - vum->vhost_user_interfaces;
+ t->qid = qid;
+
+ hdr_desc = &rxvq->desc[desc_current];
+ if (rxvq->desc[desc_current].flags & VIRTQ_DESC_F_INDIRECT)
+ {
+ t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_INDIRECT;
+ /* Header is the first here */
+ hdr_desc = map_guest_mem (vui, rxvq->desc[desc_current].addr, &hint);
+ }
+ if (rxvq->desc[desc_current].flags & VIRTQ_DESC_F_NEXT)
+ {
+ t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_SIMPLE_CHAINED;
+ }
+ if (!(rxvq->desc[desc_current].flags & VIRTQ_DESC_F_NEXT) &&
+ !(rxvq->desc[desc_current].flags & VIRTQ_DESC_F_INDIRECT))
+ {
+ t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_SINGLE_DESC;
+ }
+
+ t->first_desc_len = hdr_desc ? hdr_desc->len : 0;
+}
+
+static_always_inline u32
+vhost_user_tx_copy (vhost_user_intf_t * vui, vhost_copy_t * cpy,
+ u16 copy_len, u32 * map_hint)
+{
+ void *dst0, *dst1, *dst2, *dst3;
+ if (PREDICT_TRUE (copy_len >= 4))
+ {
+ if (PREDICT_FALSE (!(dst2 = map_guest_mem (vui, cpy[0].dst, map_hint))))
+ return 1;
+ if (PREDICT_FALSE (!(dst3 = map_guest_mem (vui, cpy[1].dst, map_hint))))
+ return 1;
+ while (PREDICT_TRUE (copy_len >= 4))
+ {
+ dst0 = dst2;
+ dst1 = dst3;
+
+ if (PREDICT_FALSE
+ (!(dst2 = map_guest_mem (vui, cpy[2].dst, map_hint))))
+ return 1;
+ if (PREDICT_FALSE
+ (!(dst3 = map_guest_mem (vui, cpy[3].dst, map_hint))))
+ return 1;
+
+ CLIB_PREFETCH ((void *) cpy[2].src, 64, LOAD);
+ CLIB_PREFETCH ((void *) cpy[3].src, 64, LOAD);
+
+ clib_memcpy (dst0, (void *) cpy[0].src, cpy[0].len);
+ clib_memcpy (dst1, (void *) cpy[1].src, cpy[1].len);
+
+ vhost_user_log_dirty_pages_2 (vui, cpy[0].dst, cpy[0].len, 1);
+ vhost_user_log_dirty_pages_2 (vui, cpy[1].dst, cpy[1].len, 1);
+ copy_len -= 2;
+ cpy += 2;
+ }
+ }
+ while (copy_len)
+ {
+ if (PREDICT_FALSE (!(dst0 = map_guest_mem (vui, cpy->dst, map_hint))))
+ return 1;
+ clib_memcpy (dst0, (void *) cpy->src, cpy->len);
+ vhost_user_log_dirty_pages_2 (vui, cpy->dst, cpy->len, 1);
+ copy_len -= 1;
+ cpy += 1;
+ }
+ return 0;
+}
+
+
+static uword
+vhost_user_tx (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ u32 *buffers = vlib_frame_args (frame);
+ u32 n_left = frame->n_vectors;
+ vhost_user_main_t *vum = &vhost_user_main;
+ vnet_interface_output_runtime_t *rd = (void *) node->runtime_data;
+ vhost_user_intf_t *vui =
+ pool_elt_at_index (vum->vhost_user_interfaces, rd->dev_instance);
+ u32 qid = ~0;
+ vhost_user_vring_t *rxvq;
+ u16 qsz_mask;
+ u8 error;
+ u32 cpu_index = os_get_cpu_number ();
+ u32 map_hint = 0;
+ u8 retry = 8;
+ u16 copy_len;
+ u16 tx_headers_len;
+
+ if (PREDICT_FALSE (!vui->admin_up))
+ {
+ error = VHOST_USER_TX_FUNC_ERROR_DOWN;
+ goto done3;
+ }
+
+ if (PREDICT_FALSE (!vui->is_up))
+ {
+ error = VHOST_USER_TX_FUNC_ERROR_NOT_READY;
+ goto done3;
+ }
+
+ qid =
+ VHOST_VRING_IDX_RX (*vec_elt_at_index
+ (vui->per_cpu_tx_qid, os_get_cpu_number ()));
+ rxvq = &vui->vrings[qid];
+ if (PREDICT_FALSE (vui->use_tx_spinlock))
+ vhost_user_vring_lock (vui, qid);
+
+ qsz_mask = rxvq->qsz - 1; /* qsz is always power of 2 */
+
+retry:
+ error = VHOST_USER_TX_FUNC_ERROR_NONE;
+ tx_headers_len = 0;
+ copy_len = 0;
+ while (n_left > 0)
+ {
+ vlib_buffer_t *b0, *current_b0;
+ u16 desc_head, desc_index, desc_len;
+ vring_desc_t *desc_table;
+ uword buffer_map_addr;
+ u32 buffer_len;
+ u16 bytes_left;
+
+ if (PREDICT_TRUE (n_left > 1))
+ vlib_prefetch_buffer_with_index (vm, buffers[1], LOAD);
+
+ b0 = vlib_get_buffer (vm, buffers[0]);
+
+ if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ vum->cpus[cpu_index].current_trace =
+ vlib_add_trace (vm, node, b0,
+ sizeof (*vum->cpus[cpu_index].current_trace));
+ vhost_user_tx_trace (vum->cpus[cpu_index].current_trace,
+ vui, qid / 2, b0, rxvq);
+ }
+
+ if (PREDICT_FALSE (rxvq->last_avail_idx == rxvq->avail->idx))
+ {
+ error = VHOST_USER_TX_FUNC_ERROR_PKT_DROP_NOBUF;
+ goto done;
+ }
+
+ desc_table = rxvq->desc;
+ desc_head = desc_index =
+ rxvq->avail->ring[rxvq->last_avail_idx & qsz_mask];
+
+ /* Go deeper in case of indirect descriptor
+ * I don't know of any driver providing indirect for RX. */
+ if (PREDICT_FALSE (rxvq->desc[desc_head].flags & VIRTQ_DESC_F_INDIRECT))
+ {
+ if (PREDICT_FALSE
+ (rxvq->desc[desc_head].len < sizeof (vring_desc_t)))
+ {
+ error = VHOST_USER_TX_FUNC_ERROR_INDIRECT_OVERFLOW;
+ goto done;
+ }
+ if (PREDICT_FALSE
+ (!(desc_table =
+ map_guest_mem (vui, rxvq->desc[desc_index].addr,
+ &map_hint))))
+ {
+ error = VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL;
+ goto done;
+ }
+ desc_index = 0;
+ }
+
+ desc_len = vui->virtio_net_hdr_sz;
+ buffer_map_addr = desc_table[desc_index].addr;
+ buffer_len = desc_table[desc_index].len;
+
+ {
+ // Get a header from the header array
+ virtio_net_hdr_mrg_rxbuf_t *hdr =
+ &vum->cpus[cpu_index].tx_headers[tx_headers_len];
+ tx_headers_len++;
+ hdr->hdr.flags = 0;
+ hdr->hdr.gso_type = 0;
+ hdr->num_buffers = 1; //This is local, no need to check
+
+ // Prepare a copy order executed later for the header
+ vhost_copy_t *cpy = &vum->cpus[cpu_index].copy[copy_len];
+ copy_len++;
+ cpy->len = vui->virtio_net_hdr_sz;
+ cpy->dst = buffer_map_addr;
+ cpy->src = (uword) hdr;
+ }
+
+ buffer_map_addr += vui->virtio_net_hdr_sz;
+ buffer_len -= vui->virtio_net_hdr_sz;
+ bytes_left = b0->current_length;
+ current_b0 = b0;
+ while (1)
+ {
+ if (buffer_len == 0)
+ { //Get new output
+ if (desc_table[desc_index].flags & VIRTQ_DESC_F_NEXT)
+ {
+ //Next one is chained
+ desc_index = desc_table[desc_index].next;
+ buffer_map_addr = desc_table[desc_index].addr;
+ buffer_len = desc_table[desc_index].len;
+ }
+ else if (vui->virtio_net_hdr_sz == 12) //MRG is available
+ {
+ virtio_net_hdr_mrg_rxbuf_t *hdr =
+ &vum->cpus[cpu_index].tx_headers[tx_headers_len - 1];
+
+ //Move from available to used buffer
+ rxvq->used->ring[rxvq->last_used_idx & qsz_mask].id =
+ desc_head;
+ rxvq->used->ring[rxvq->last_used_idx & qsz_mask].len =
+ desc_len;
+ vhost_user_log_dirty_ring (vui, rxvq,
+ ring[rxvq->last_used_idx &
+ qsz_mask]);
+
+ rxvq->last_avail_idx++;
+ rxvq->last_used_idx++;
+ hdr->num_buffers++;
+ desc_len = 0;
+
+ if (PREDICT_FALSE
+ (rxvq->last_avail_idx == rxvq->avail->idx))
+ {
+ //Dequeue queued descriptors for this packet
+ rxvq->last_used_idx -= hdr->num_buffers - 1;
+ rxvq->last_avail_idx -= hdr->num_buffers - 1;
+ error = VHOST_USER_TX_FUNC_ERROR_PKT_DROP_NOBUF;
+ goto done;
+ }
+
+ desc_table = rxvq->desc;
+ desc_head = desc_index =
+ rxvq->avail->ring[rxvq->last_avail_idx & qsz_mask];
+ if (PREDICT_FALSE
+ (rxvq->desc[desc_head].flags & VIRTQ_DESC_F_INDIRECT))
+ {
+ //It is seriously unlikely that a driver will put indirect descriptor
+ //after non-indirect descriptor.
+ if (PREDICT_FALSE
+ (rxvq->desc[desc_head].len < sizeof (vring_desc_t)))
+ {
+ error = VHOST_USER_TX_FUNC_ERROR_INDIRECT_OVERFLOW;
+ goto done;
+ }
+ if (PREDICT_FALSE
+ (!(desc_table =
+ map_guest_mem (vui,
+ rxvq->desc[desc_index].addr,
+ &map_hint))))
+ {
+ error = VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL;
+ goto done;
+ }
+ desc_index = 0;
+ }
+ buffer_map_addr = desc_table[desc_index].addr;
+ buffer_len = desc_table[desc_index].len;
+ }
+ else
+ {
+ error = VHOST_USER_TX_FUNC_ERROR_PKT_DROP_NOMRG;
+ goto done;
+ }
+ }
+
+ {
+ vhost_copy_t *cpy = &vum->cpus[cpu_index].copy[copy_len];
+ copy_len++;
+ cpy->len = bytes_left;
+ cpy->len = (cpy->len > buffer_len) ? buffer_len : cpy->len;
+ cpy->dst = buffer_map_addr;
+ cpy->src = (uword) vlib_buffer_get_current (current_b0) +
+ current_b0->current_length - bytes_left;
+
+ bytes_left -= cpy->len;
+ buffer_len -= cpy->len;
+ buffer_map_addr += cpy->len;
+ desc_len += cpy->len;
+
+ CLIB_PREFETCH (&rxvq->desc, CLIB_CACHE_LINE_BYTES, LOAD);
+ }
+
+ // Check if vlib buffer has more data. If not, get more or break.
+ if (PREDICT_TRUE (!bytes_left))
+ {
+ if (PREDICT_FALSE
+ (current_b0->flags & VLIB_BUFFER_NEXT_PRESENT))
+ {
+ current_b0 = vlib_get_buffer (vm, current_b0->next_buffer);
+ bytes_left = current_b0->current_length;
+ }
+ else
+ {
+ //End of packet
+ break;
+ }
+ }
+ }
+
+ //Move from available to used ring
+ rxvq->used->ring[rxvq->last_used_idx & qsz_mask].id = desc_head;
+ rxvq->used->ring[rxvq->last_used_idx & qsz_mask].len = desc_len;
+ vhost_user_log_dirty_ring (vui, rxvq,
+ ring[rxvq->last_used_idx & qsz_mask]);
+ rxvq->last_avail_idx++;
+ rxvq->last_used_idx++;
+
+ if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ vum->cpus[cpu_index].current_trace->hdr =
+ vum->cpus[cpu_index].tx_headers[tx_headers_len - 1];
+ }
+
+ n_left--; //At the end for error counting when 'goto done' is invoked
+ buffers++;
+ }
+
+done:
+ //Do the memory copies
+ if (PREDICT_FALSE
+ (vhost_user_tx_copy (vui, vum->cpus[cpu_index].copy,
+ copy_len, &map_hint)))
+ {
+ clib_warning ("Memory mapping error on interface hw_if_index=%d "
+ "(Shutting down - Switch interface down and up to restart)",
+ vui->hw_if_index);
+ vui->admin_up = 0;
+ }
+
+ CLIB_MEMORY_BARRIER ();
+ rxvq->used->idx = rxvq->last_used_idx;
+ vhost_user_log_dirty_ring (vui, rxvq, idx);
+
+ /*
+ * When n_left is set, error is always set to something too.
+ * In case error is due to lack of remaining buffers, we go back up and
+ * retry.
+ * The idea is that it is better to waste some time on packets
+ * that have been processed already than dropping them and get
+ * more fresh packets with a good likelyhood that they will be dropped too.
+ * This technique also gives more time to VM driver to pick-up packets.
+ * In case the traffic flows from physical to virtual interfaces, this
+ * technique will end-up leveraging the physical NIC buffer in order to
+ * absorb the VM's CPU jitter.
+ */
+ if (n_left && (error == VHOST_USER_TX_FUNC_ERROR_PKT_DROP_NOBUF) && retry)
+ {
+ retry--;
+ goto retry;
+ }
+
+ /* interrupt (call) handling */
+ if ((rxvq->callfd_idx != ~0) && !(rxvq->avail->flags & 1))
+ {
+ rxvq->n_since_last_int += frame->n_vectors - n_left;
+
+ if (rxvq->n_since_last_int > vum->coalesce_frames)
+ vhost_user_send_call (vm, rxvq);
+ }
+
+ vhost_user_vring_unlock (vui, qid);
+
+done3:
+ if (PREDICT_FALSE (n_left && error != VHOST_USER_TX_FUNC_ERROR_NONE))
+ {
+ vlib_error_count (vm, node->node_index, error, n_left);
+ vlib_increment_simple_counter
+ (vnet_main.interface_main.sw_if_counters
+ + VNET_INTERFACE_COUNTER_DROP,
+ os_get_cpu_number (), vui->sw_if_index, n_left);
+ }
+
+ vlib_buffer_free (vm, vlib_frame_args (frame), frame->n_vectors);
+ return frame->n_vectors;
+}
+
+static clib_error_t *
+vhost_user_interface_admin_up_down (vnet_main_t * vnm, u32 hw_if_index,
+ u32 flags)
+{
+ vnet_hw_interface_t *hif = vnet_get_hw_interface (vnm, hw_if_index);
+ uword is_up = (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) != 0;
+ vhost_user_main_t *vum = &vhost_user_main;
+ vhost_user_intf_t *vui =
+ pool_elt_at_index (vum->vhost_user_interfaces, hif->dev_instance);
+
+ vui->admin_up = is_up;
+
+ if (is_up)
+ vnet_hw_interface_set_flags (vnm, vui->hw_if_index,
+ VNET_HW_INTERFACE_FLAG_LINK_UP);
+
+ return /* no error */ 0;
+}
+
+/* *INDENT-OFF* */
+VNET_DEVICE_CLASS (vhost_user_dev_class,static) = {
+ .name = "vhost-user",
+ .tx_function = vhost_user_tx,
+ .tx_function_n_errors = VHOST_USER_TX_FUNC_N_ERROR,
+ .tx_function_error_strings = vhost_user_tx_func_error_strings,
+ .format_device_name = format_vhost_user_interface_name,
+ .name_renumber = vhost_user_name_renumber,
+ .admin_up_down_function = vhost_user_interface_admin_up_down,
+ .format_tx_trace = format_vhost_trace,
+};
+
+VLIB_DEVICE_TX_FUNCTION_MULTIARCH (vhost_user_dev_class,
+ vhost_user_tx)
+/* *INDENT-ON* */
+
+static uword
+vhost_user_process (vlib_main_t * vm,
+ vlib_node_runtime_t * rt, vlib_frame_t * f)
+{
+ vhost_user_main_t *vum = &vhost_user_main;
+ vhost_user_intf_t *vui;
+ struct sockaddr_un sun;
+ int sockfd;
+ unix_file_t template = { 0 };
+ f64 timeout = 3153600000.0 /* 100 years */ ;
+ uword *event_data = 0;
+
+ sockfd = socket (AF_UNIX, SOCK_STREAM, 0);
+ sun.sun_family = AF_UNIX;
+ template.read_function = vhost_user_socket_read;
+ template.error_function = vhost_user_socket_error;
+
+ if (sockfd < 0)
+ return 0;
+
+ while (1)
+ {
+ vlib_process_wait_for_event_or_clock (vm, timeout);
+ vlib_process_get_events (vm, &event_data);
+ vec_reset_length (event_data);
+
+ timeout = 3.0;
+
+ /* *INDENT-OFF* */
+ pool_foreach (vui, vum->vhost_user_interfaces, {
+
+ if (vui->unix_server_index == ~0) { //Nothing to do for server sockets
+ if (vui->unix_file_index == ~0)
+ {
+ /* try to connect */
+ strncpy (sun.sun_path, (char *) vui->sock_filename,
+ sizeof (sun.sun_path) - 1);
+
+ if (connect (sockfd, (struct sockaddr *) &sun,
+ sizeof (struct sockaddr_un)) == 0)
+ {
+ vui->sock_errno = 0;
+ template.file_descriptor = sockfd;
+ template.private_data =
+ vui - vhost_user_main.vhost_user_interfaces;
+ vui->unix_file_index = unix_file_add (&unix_main, &template);
+
+ //Re-open for next connect
+ if ((sockfd = socket (AF_UNIX, SOCK_STREAM, 0)) < 0) {
+ clib_warning("Critical: Could not open unix socket");
+ return 0;
+ }
+ }
+ else
+ {
+ vui->sock_errno = errno;
+ }
+ }
+ else
+ {
+ /* check if socket is alive */
+ int error = 0;
+ socklen_t len = sizeof (error);
+ int fd = UNIX_GET_FD(vui->unix_file_index);
+ int retval =
+ getsockopt (fd, SOL_SOCKET, SO_ERROR, &error, &len);
+
+ if (retval)
+ {
+ DBG_SOCK ("getsockopt returned %d", retval);
+ vhost_user_if_disconnect (vui);
+ }
+ }
+ }
+ });
+ /* *INDENT-ON* */
+ }
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (vhost_user_process_node,static) = {
+ .function = vhost_user_process,
+ .type = VLIB_NODE_TYPE_PROCESS,
+ .name = "vhost-user-process",
+};
+/* *INDENT-ON* */
+
+/**
+ * Disables and reset interface structure.
+ * It can then be either init again, or removed from used interfaces.
+ */
+static void
+vhost_user_term_if (vhost_user_intf_t * vui)
+{
+ // Delete configured thread pinning
+ vec_reset_length (vui->workers);
+ // disconnect interface sockets
+ vhost_user_if_disconnect (vui);
+ vhost_user_update_iface_state (vui);
+
+ if (vui->unix_server_index != ~0)
+ {
+ //Close server socket
+ unix_file_t *uf = pool_elt_at_index (unix_main.file_pool,
+ vui->unix_server_index);
+ unix_file_del (&unix_main, uf);
+ vui->unix_server_index = ~0;
+ }
+}
+
+int
+vhost_user_delete_if (vnet_main_t * vnm, vlib_main_t * vm, u32 sw_if_index)
+{
+ vhost_user_main_t *vum = &vhost_user_main;
+ vhost_user_intf_t *vui;
+ int rv = 0;
+ vnet_hw_interface_t *hwif;
+
+ if (!(hwif = vnet_get_sup_hw_interface (vnm, sw_if_index)) ||
+ hwif->dev_class_index != vhost_user_dev_class.index)
+ return VNET_API_ERROR_INVALID_SW_IF_INDEX;
+
+ DBG_SOCK ("Deleting vhost-user interface %s (instance %d)",
+ hwif->name, hwif->dev_instance);
+
+ vui = pool_elt_at_index (vum->vhost_user_interfaces, hwif->dev_instance);
+
+ // Disable and reset interface
+ vhost_user_term_if (vui);
+
+ // Back to pool
+ pool_put (vum->vhost_user_interfaces, vui);
+
+ // Reset renumbered iface
+ if (hwif->dev_instance <
+ vec_len (vum->show_dev_instance_by_real_dev_instance))
+ vum->show_dev_instance_by_real_dev_instance[hwif->dev_instance] = ~0;
+
+ // Delete ethernet interface
+ ethernet_delete_interface (vnm, vui->hw_if_index);
+ return rv;
+}
+
+/**
+ * Open server unix socket on specified sock_filename.
+ */
+static int
+vhost_user_init_server_sock (const char *sock_filename, int *sock_fd)
+{
+ int rv = 0;
+ struct sockaddr_un un = { };
+ int fd;
+ /* create listening socket */
+ if ((fd = socket (AF_UNIX, SOCK_STREAM, 0)) < 0)
+ return VNET_API_ERROR_SYSCALL_ERROR_1;
+
+ un.sun_family = AF_UNIX;
+ strncpy ((char *) un.sun_path, (char *) sock_filename,
+ sizeof (un.sun_path) - 1);
+
+ /* remove if exists */
+ unlink ((char *) sock_filename);
+
+ if (bind (fd, (struct sockaddr *) &un, sizeof (un)) == -1)
+ {
+ rv = VNET_API_ERROR_SYSCALL_ERROR_2;
+ goto error;
+ }
+
+ if (listen (fd, 1) == -1)
+ {
+ rv = VNET_API_ERROR_SYSCALL_ERROR_3;
+ goto error;
+ }
+
+ *sock_fd = fd;
+ return 0;
+
+error:
+ close (fd);
+ return rv;
+}
+
+/**
+ * Create ethernet interface for vhost user interface.
+ */
+static void
+vhost_user_create_ethernet (vnet_main_t * vnm, vlib_main_t * vm,
+ vhost_user_intf_t * vui, u8 * hwaddress)
+{
+ vhost_user_main_t *vum = &vhost_user_main;
+ u8 hwaddr[6];
+ clib_error_t *error;
+
+ /* create hw and sw interface */
+ if (hwaddress)
+ {
+ clib_memcpy (hwaddr, hwaddress, 6);
+ }
+ else
+ {
+ random_u32 (&vum->random);
+ clib_memcpy (hwaddr + 2, &vum->random, sizeof (vum->random));
+ hwaddr[0] = 2;
+ hwaddr[1] = 0xfe;
+ }
+
+ error = ethernet_register_interface
+ (vnm,
+ vhost_user_dev_class.index,
+ vui - vum->vhost_user_interfaces /* device instance */ ,
+ hwaddr /* ethernet address */ ,
+ &vui->hw_if_index, 0 /* flag change */ );
+
+ if (error)
+ clib_error_report (error);
+
+ vnet_hw_interface_t *hi = vnet_get_hw_interface (vnm, vui->hw_if_index);
+ hi->max_l3_packet_bytes[VLIB_RX] = hi->max_l3_packet_bytes[VLIB_TX] = 9000;
+}
+
+/*
+ * Initialize vui with specified attributes
+ */
+static void
+vhost_user_vui_init (vnet_main_t * vnm,
+ vhost_user_intf_t * vui,
+ int server_sock_fd,
+ const char *sock_filename,
+ u64 feature_mask, u32 * sw_if_index)
+{
+ vnet_sw_interface_t *sw;
+ sw = vnet_get_hw_sw_interface (vnm, vui->hw_if_index);
+ int q;
+
+ if (server_sock_fd != -1)
+ {
+ unix_file_t template = { 0 };
+ template.read_function = vhost_user_socksvr_accept_ready;
+ template.file_descriptor = server_sock_fd;
+ template.private_data = vui - vhost_user_main.vhost_user_interfaces; //hw index
+ vui->unix_server_index = unix_file_add (&unix_main, &template);
+ }
+ else
+ {
+ vui->unix_server_index = ~0;
+ }
+
+ vui->sw_if_index = sw->sw_if_index;
+ strncpy (vui->sock_filename, sock_filename,
+ ARRAY_LEN (vui->sock_filename) - 1);
+ vui->sock_errno = 0;
+ vui->is_up = 0;
+ vui->feature_mask = feature_mask;
+ vui->unix_file_index = ~0;
+ vui->log_base_addr = 0;
+
+ for (q = 0; q < VHOST_VRING_MAX_N; q++)
+ vhost_user_vring_init (vui, q);
+
+ vnet_hw_interface_set_flags (vnm, vui->hw_if_index, 0);
+
+ if (sw_if_index)
+ *sw_if_index = vui->sw_if_index;
+
+ for (q = 0; q < VHOST_VRING_MAX_N; q++)
+ {
+ vui->vring_locks[q] = clib_mem_alloc_aligned (CLIB_CACHE_LINE_BYTES,
+ CLIB_CACHE_LINE_BYTES);
+ memset ((void *) vui->vring_locks[q], 0, CLIB_CACHE_LINE_BYTES);
+ }
+
+ vec_validate (vui->per_cpu_tx_qid,
+ vlib_get_thread_main ()->n_vlib_mains - 1);
+ vhost_user_tx_thread_placement (vui);
+}
+
+int
+vhost_user_create_if (vnet_main_t * vnm, vlib_main_t * vm,
+ const char *sock_filename,
+ u8 is_server,
+ u32 * sw_if_index,
+ u64 feature_mask,
+ u8 renumber, u32 custom_dev_instance, u8 * hwaddr)
+{
+ vhost_user_intf_t *vui = NULL;
+ u32 sw_if_idx = ~0;
+ int rv = 0;
+ int server_sock_fd = -1;
+
+ if (is_server)
+ {
+ if ((rv =
+ vhost_user_init_server_sock (sock_filename, &server_sock_fd)) != 0)
+ {
+ return rv;
+ }
+ }
+
+ pool_get (vhost_user_main.vhost_user_interfaces, vui);
+
+ vhost_user_create_ethernet (vnm, vm, vui, hwaddr);
+ vhost_user_vui_init (vnm, vui, server_sock_fd, sock_filename,
+ feature_mask, &sw_if_idx);
+
+ if (renumber)
+ vnet_interface_name_renumber (sw_if_idx, custom_dev_instance);
+
+ if (sw_if_index)
+ *sw_if_index = sw_if_idx;
+
+ // Process node must connect
+ vlib_process_signal_event (vm, vhost_user_process_node.index, 0, 0);
+ return rv;
+}
+
+int
+vhost_user_modify_if (vnet_main_t * vnm, vlib_main_t * vm,
+ const char *sock_filename,
+ u8 is_server,
+ u32 sw_if_index,
+ u64 feature_mask, u8 renumber, u32 custom_dev_instance)
+{
+ vhost_user_main_t *vum = &vhost_user_main;
+ vhost_user_intf_t *vui = NULL;
+ u32 sw_if_idx = ~0;
+ int server_sock_fd = -1;
+ int rv = 0;
+ vnet_hw_interface_t *hwif;
+
+ if (!(hwif = vnet_get_sup_hw_interface (vnm, sw_if_index)) ||
+ hwif->dev_class_index != vhost_user_dev_class.index)
+ return VNET_API_ERROR_INVALID_SW_IF_INDEX;
+
+ vui = vec_elt_at_index (vum->vhost_user_interfaces, hwif->dev_instance);
+
+ // First try to open server socket
+ if (is_server)
+ if ((rv = vhost_user_init_server_sock (sock_filename,
+ &server_sock_fd)) != 0)
+ return rv;
+
+ vhost_user_term_if (vui);
+ vhost_user_vui_init (vnm, vui, server_sock_fd,
+ sock_filename, feature_mask, &sw_if_idx);
+
+ if (renumber)
+ vnet_interface_name_renumber (sw_if_idx, custom_dev_instance);
+
+ // Process node must connect
+ vlib_process_signal_event (vm, vhost_user_process_node.index, 0, 0);
+ return rv;
+}
+
+clib_error_t *
+vhost_user_connect_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ unformat_input_t _line_input, *line_input = &_line_input;
+ u8 *sock_filename = NULL;
+ u32 sw_if_index;
+ u8 is_server = 0;
+ u64 feature_mask = (u64) ~ (0ULL);
+ u8 renumber = 0;
+ u32 custom_dev_instance = ~0;
+ u8 hwaddr[6];
+ u8 *hw = NULL;
+
+ /* Get a line of input. */
+ if (!unformat_user (input, unformat_line_input, line_input))
+ return 0;
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "socket %s", &sock_filename))
+ ;
+ else if (unformat (line_input, "server"))
+ is_server = 1;
+ else if (unformat (line_input, "feature-mask 0x%llx", &feature_mask))
+ ;
+ else
+ if (unformat
+ (line_input, "hwaddr %U", unformat_ethernet_address, hwaddr))
+ hw = hwaddr;
+ else if (unformat (line_input, "renumber %d", &custom_dev_instance))
+ {
+ renumber = 1;
+ }
+ else
+ return clib_error_return (0, "unknown input `%U'",
+ format_unformat_error, input);
+ }
+ unformat_free (line_input);
+
+ vnet_main_t *vnm = vnet_get_main ();
+
+ int rv;
+ if ((rv = vhost_user_create_if (vnm, vm, (char *) sock_filename,
+ is_server, &sw_if_index, feature_mask,
+ renumber, custom_dev_instance, hw)))
+ {
+ vec_free (sock_filename);
+ return clib_error_return (0, "vhost_user_create_if returned %d", rv);
+ }
+
+ vec_free (sock_filename);
+ vlib_cli_output (vm, "%U\n", format_vnet_sw_if_index_name, vnet_get_main (),
+ sw_if_index);
+ return 0;
+}
+
+clib_error_t *
+vhost_user_delete_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ unformat_input_t _line_input, *line_input = &_line_input;
+ u32 sw_if_index = ~0;
+ vnet_main_t *vnm = vnet_get_main ();
+
+ /* Get a line of input. */
+ if (!unformat_user (input, unformat_line_input, line_input))
+ return 0;
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "sw_if_index %d", &sw_if_index))
+ ;
+ else if (unformat
+ (line_input, "%U", unformat_vnet_sw_interface, vnm,
+ &sw_if_index))
+ {
+ vnet_hw_interface_t *hwif =
+ vnet_get_sup_hw_interface (vnm, sw_if_index);
+ if (hwif == NULL ||
+ vhost_user_dev_class.index != hwif->dev_class_index)
+ return clib_error_return (0, "Not a vhost interface");
+ }
+ else
+ return clib_error_return (0, "unknown input `%U'",
+ format_unformat_error, input);
+ }
+ unformat_free (line_input);
+ vhost_user_delete_if (vnm, vm, sw_if_index);
+ return 0;
+}
+
+int
+vhost_user_dump_ifs (vnet_main_t * vnm, vlib_main_t * vm,
+ vhost_user_intf_details_t ** out_vuids)
+{
+ int rv = 0;
+ vhost_user_main_t *vum = &vhost_user_main;
+ vhost_user_intf_t *vui;
+ vhost_user_intf_details_t *r_vuids = NULL;
+ vhost_user_intf_details_t *vuid = NULL;
+ u32 *hw_if_indices = 0;
+ vnet_hw_interface_t *hi;
+ u8 *s = NULL;
+ int i;
+
+ if (!out_vuids)
+ return -1;
+
+ pool_foreach (vui, vum->vhost_user_interfaces,
+ vec_add1 (hw_if_indices, vui->hw_if_index);
+ );
+
+ for (i = 0; i < vec_len (hw_if_indices); i++)
+ {
+ hi = vnet_get_hw_interface (vnm, hw_if_indices[i]);
+ vui = pool_elt_at_index (vum->vhost_user_interfaces, hi->dev_instance);
+
+ vec_add2 (r_vuids, vuid, 1);
+ vuid->sw_if_index = vui->sw_if_index;
+ vuid->virtio_net_hdr_sz = vui->virtio_net_hdr_sz;
+ vuid->features = vui->features;
+ vuid->num_regions = vui->nregions;
+ vuid->sock_errno = vui->sock_errno;
+ strncpy ((char *) vuid->sock_filename, (char *) vui->sock_filename,
+ ARRAY_LEN (vuid->sock_filename) - 1);
+
+ s = format (s, "%v%c", hi->name, 0);
+
+ strncpy ((char *) vuid->if_name, (char *) s,
+ ARRAY_LEN (vuid->if_name) - 1);
+ _vec_len (s) = 0;
+ }
+
+ vec_free (s);
+ vec_free (hw_if_indices);
+
+ *out_vuids = r_vuids;
+
+ return rv;
+}
+
+clib_error_t *
+show_vhost_user_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ clib_error_t *error = 0;
+ vnet_main_t *vnm = vnet_get_main ();
+ vhost_user_main_t *vum = &vhost_user_main;
+ vhost_user_intf_t *vui;
+ u32 hw_if_index, *hw_if_indices = 0;
+ vnet_hw_interface_t *hi;
+ vhost_cpu_t *vhc;
+ vhost_iface_and_queue_t *vhiq;
+ u32 ci;
+
+ int i, j, q;
+ int show_descr = 0;
+ struct feat_struct
+ {
+ u8 bit;
+ char *str;
+ };
+ struct feat_struct *feat_entry;
+
+ static struct feat_struct feat_array[] = {
+#define _(s,b) { .str = #s, .bit = b, },
+ foreach_virtio_net_feature
+#undef _
+ {.str = NULL}
+ };
+
+#define foreach_protocol_feature \
+ _(VHOST_USER_PROTOCOL_F_MQ) \
+ _(VHOST_USER_PROTOCOL_F_LOG_SHMFD)
+
+ static struct feat_struct proto_feat_array[] = {
+#define _(s) { .str = #s, .bit = s},
+ foreach_protocol_feature
+#undef _
+ {.str = NULL}
+ };
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat
+ (input, "%U", unformat_vnet_hw_interface, vnm, &hw_if_index))
+ {
+ vec_add1 (hw_if_indices, hw_if_index);
+ }
+ else if (unformat (input, "descriptors") || unformat (input, "desc"))
+ show_descr = 1;
+ else
+ {
+ error = clib_error_return (0, "unknown input `%U'",
+ format_unformat_error, input);
+ goto done;
+ }
+ }
+ if (vec_len (hw_if_indices) == 0)
+ {
+ pool_foreach (vui, vum->vhost_user_interfaces,
+ vec_add1 (hw_if_indices, vui->hw_if_index);
+ );
+ }
+ vlib_cli_output (vm, "Virtio vhost-user interfaces");
+ vlib_cli_output (vm, "Global:\n coalesce frames %d time %e",
+ vum->coalesce_frames, vum->coalesce_time);
+
+ for (i = 0; i < vec_len (hw_if_indices); i++)
+ {
+ hi = vnet_get_hw_interface (vnm, hw_if_indices[i]);
+ vui = pool_elt_at_index (vum->vhost_user_interfaces, hi->dev_instance);
+ vlib_cli_output (vm, "Interface: %s (ifindex %d)",
+ hi->name, hw_if_indices[i]);
+
+ vlib_cli_output (vm, "virtio_net_hdr_sz %d\n"
+ " features mask (0x%llx): \n"
+ " features (0x%llx): \n",
+ vui->virtio_net_hdr_sz, vui->feature_mask,
+ vui->features);
+
+ feat_entry = (struct feat_struct *) &feat_array;
+ while (feat_entry->str)
+ {
+ if (vui->features & (1ULL << feat_entry->bit))
+ vlib_cli_output (vm, " %s (%d)", feat_entry->str,
+ feat_entry->bit);
+ feat_entry++;
+ }
+
+ vlib_cli_output (vm, " protocol features (0x%llx)",
+ vui->protocol_features);
+ feat_entry = (struct feat_struct *) &proto_feat_array;
+ while (feat_entry->str)
+ {
+ if (vui->protocol_features & (1ULL << feat_entry->bit))
+ vlib_cli_output (vm, " %s (%d)", feat_entry->str,
+ feat_entry->bit);
+ feat_entry++;
+ }
+
+ vlib_cli_output (vm, "\n");
+
+ vlib_cli_output (vm, " socket filename %s type %s errno \"%s\"\n\n",
+ vui->sock_filename,
+ (vui->unix_server_index != ~0) ? "server" : "client",
+ strerror (vui->sock_errno));
+
+ vlib_cli_output (vm, " rx placement: ");
+ vec_foreach (vhc, vum->cpus)
+ {
+ vec_foreach (vhiq, vhc->rx_queues)
+ {
+ if (vhiq->vhost_iface_index == vui - vum->vhost_user_interfaces)
+ vlib_cli_output (vm, " thread %d on vring %d\n",
+ vhc - vum->cpus, VHOST_VRING_IDX_TX (vhiq->qid));
+ }
+ }
+
+ vlib_cli_output (vm, " tx placement: %s\n",
+ vui->use_tx_spinlock ? "spin-lock" : "lock-free");
+
+ vec_foreach_index (ci, vui->per_cpu_tx_qid)
+ {
+ vlib_cli_output (vm, " thread %d on vring %d\n", ci,
+ VHOST_VRING_IDX_RX (vui->per_cpu_tx_qid[ci]));
+ }
+
+ vlib_cli_output (vm, "\n");
+
+ vlib_cli_output (vm, " Memory regions (total %d)\n", vui->nregions);
+
+ if (vui->nregions)
+ {
+ vlib_cli_output (vm,
+ " region fd guest_phys_addr memory_size userspace_addr mmap_offset mmap_addr\n");
+ vlib_cli_output (vm,
+ " ====== ===== ================== ================== ================== ================== ==================\n");
+ }
+ for (j = 0; j < vui->nregions; j++)
+ {
+ vlib_cli_output (vm,
+ " %d %-5d 0x%016lx 0x%016lx 0x%016lx 0x%016lx 0x%016lx\n",
+ j, vui->region_mmap_fd[j],
+ vui->regions[j].guest_phys_addr,
+ vui->regions[j].memory_size,
+ vui->regions[j].userspace_addr,
+ vui->regions[j].mmap_offset,
+ pointer_to_uword (vui->region_mmap_addr[j]));
+ }
+ for (q = 0; q < VHOST_VRING_MAX_N; q++)
+ {
+ if (!vui->vrings[q].started)
+ continue;
+
+ vlib_cli_output (vm, "\n Virtqueue %d (%s%s)\n", q,
+ (q & 1) ? "RX" : "TX",
+ vui->vrings[q].enabled ? "" : " disabled");
+
+ vlib_cli_output (vm,
+ " qsz %d last_avail_idx %d last_used_idx %d\n",
+ vui->vrings[q].qsz, vui->vrings[q].last_avail_idx,
+ vui->vrings[q].last_used_idx);
+
+ if (vui->vrings[q].avail && vui->vrings[q].used)
+ vlib_cli_output (vm,
+ " avail.flags %x avail.idx %d used.flags %x used.idx %d\n",
+ vui->vrings[q].avail->flags,
+ vui->vrings[q].avail->idx,
+ vui->vrings[q].used->flags,
+ vui->vrings[q].used->idx);
+
+ int kickfd = UNIX_GET_FD (vui->vrings[q].kickfd_idx);
+ int callfd = UNIX_GET_FD (vui->vrings[q].callfd_idx);
+ vlib_cli_output (vm, " kickfd %d callfd %d errfd %d\n",
+ kickfd, callfd, vui->vrings[q].errfd);
+
+ if (show_descr)
+ {
+ vlib_cli_output (vm, "\n descriptor table:\n");
+ vlib_cli_output (vm,
+ " id addr len flags next user_addr\n");
+ vlib_cli_output (vm,
+ " ===== ================== ===== ====== ===== ==================\n");
+ for (j = 0; j < vui->vrings[q].qsz; j++)
+ {
+ u32 mem_hint = 0;
+ vlib_cli_output (vm,
+ " %-5d 0x%016lx %-5d 0x%04x %-5d 0x%016lx\n",
+ j, vui->vrings[q].desc[j].addr,
+ vui->vrings[q].desc[j].len,
+ vui->vrings[q].desc[j].flags,
+ vui->vrings[q].desc[j].next,
+ pointer_to_uword (map_guest_mem
+ (vui,
+ vui->vrings[q].desc[j].
+ addr, &mem_hint)));
+ }
+ }
+ }
+ vlib_cli_output (vm, "\n");
+ }
+done:
+ vec_free (hw_if_indices);
+ return error;
+}
+
+/*
+ * CLI functions
+ */
+
+/*?
+ * Create a vHost User interface. Once created, a new virtual interface
+ * will exist with the name '<em>VirtualEthernet0/0/x</em>', where '<em>x</em>'
+ * is the next free index.
+ *
+ * There are several parameters associated with a vHost interface:
+ *
+ * - <b>socket <socket-filename></b> - Name of the linux socket used by QEMU/VM and
+ * VPP to manage the vHost interface. If socket does not already exist, VPP will
+ * create the socket.
+ *
+ * - <b>server</b> - Optional flag to indicate that VPP should be the server for the
+ * linux socket. If not provided, VPP will be the client.
+ *
+ * - <b>feature-mask <hex></b> - Optional virtio/vhost feature set negotiated at
+ * startup. By default, all supported features will be advertised. Otherwise,
+ * provide the set of features desired.
+ * - 0x000008000 (15) - VIRTIO_NET_F_MRG_RXBUF
+ * - 0x000020000 (17) - VIRTIO_NET_F_CTRL_VQ
+ * - 0x000200000 (21) - VIRTIO_NET_F_GUEST_ANNOUNCE
+ * - 0x000400000 (22) - VIRTIO_NET_F_MQ
+ * - 0x004000000 (26) - VHOST_F_LOG_ALL
+ * - 0x008000000 (27) - VIRTIO_F_ANY_LAYOUT
+ * - 0x010000000 (28) - VIRTIO_F_INDIRECT_DESC
+ * - 0x040000000 (30) - VHOST_USER_F_PROTOCOL_FEATURES
+ * - 0x100000000 (32) - VIRTIO_F_VERSION_1
+ *
+ * - <b>hwaddr <mac-addr></b> - Optional ethernet address, can be in either
+ * X:X:X:X:X:X unix or X.X.X cisco format.
+ *
+ * - <b>renumber <dev_instance></b> - Optional parameter which allows the instance
+ * in the name to be specified. If instance already exists, name will be used
+ * anyway and multiple instances will have the same name. Use with caution.
+ *
+ * @cliexpar
+ * Example of how to create a vhost interface with VPP as the client and all features enabled:
+ * @cliexstart{create vhost-user socket /tmp/vhost1.sock}
+ * VirtualEthernet0/0/0
+ * @cliexend
+ * Example of how to create a vhost interface with VPP as the server and with just
+ * multiple queues enabled:
+ * @cliexstart{create vhost-user socket /tmp/vhost2.sock server feature-mask 0x40400000}
+ * VirtualEthernet0/0/1
+ * @cliexend
+ * Once the vHost interface is created, enable the interface using:
+ * @cliexcmd{set interface state VirtualEthernet0/0/0 up}
+?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (vhost_user_connect_command, static) = {
+ .path = "create vhost-user",
+ .short_help = "create vhost-user socket <socket-filename> [server] [feature-mask <hex>] [hwaddr <mac-addr>] [renumber <dev_instance>]",
+ .function = vhost_user_connect_command_fn,
+};
+/* *INDENT-ON* */
+
+/*?
+ * Delete a vHost User interface using the interface name or the
+ * software interface index. Use the '<em>show interfaces</em>'
+ * command to determine the software interface index. On deletion,
+ * the linux socket will not be deleted.
+ *
+ * @cliexpar
+ * Example of how to delete a vhost interface by name:
+ * @cliexcmd{delete vhost-user VirtualEthernet0/0/1}
+ * Example of how to delete a vhost interface by software interface index:
+ * @cliexcmd{delete vhost-user sw_if_index 1}
+?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (vhost_user_delete_command, static) = {
+ .path = "delete vhost-user",
+ .short_help = "delete vhost-user {<interface> | sw_if_index <sw_idx>}",
+ .function = vhost_user_delete_command_fn,
+};
+
+/*?
+ * Display the attributes of a single vHost User interface (provide interface
+ * name), multiple vHost User interfaces (provide a list of interface names seperated
+ * by spaces) or all Vhost User interfaces (omit an interface name to display all
+ * vHost interfaces).
+ *
+ * @cliexpar
+ * @parblock
+ * Example of how to display a vhost interface:
+ * @cliexstart{show vhost-user VirtualEthernet0/0/0}
+ * Virtio vhost-user interfaces
+ * Global:
+ * coalesce frames 32 time 1e-3
+ * Interface: VirtualEthernet0/0/0 (ifindex 1)
+ * virtio_net_hdr_sz 12
+ * features mask (0xffffffffffffffff):
+ * features (0x50408000):
+ * VIRTIO_NET_F_MRG_RXBUF (15)
+ * VIRTIO_NET_F_MQ (22)
+ * VIRTIO_F_INDIRECT_DESC (28)
+ * VHOST_USER_F_PROTOCOL_FEATURES (30)
+ * protocol features (0x3)
+ * VHOST_USER_PROTOCOL_F_MQ (0)
+ * VHOST_USER_PROTOCOL_F_LOG_SHMFD (1)
+ *
+ * socket filename /tmp/vhost1.sock type client errno "Success"
+ *
+ * rx placement:
+ * thread 1 on vring 1
+ * thread 1 on vring 5
+ * thread 2 on vring 3
+ * thread 2 on vring 7
+ * tx placement: spin-lock
+ * thread 0 on vring 0
+ * thread 1 on vring 2
+ * thread 2 on vring 0
+ *
+ * Memory regions (total 2)
+ * region fd guest_phys_addr memory_size userspace_addr mmap_offset mmap_addr
+ * ====== ===== ================== ================== ================== ================== ==================
+ * 0 60 0x0000000000000000 0x00000000000a0000 0x00002aaaaac00000 0x0000000000000000 0x00002aab2b400000
+ * 1 61 0x00000000000c0000 0x000000003ff40000 0x00002aaaaacc0000 0x00000000000c0000 0x00002aababcc0000
+ *
+ * Virtqueue 0 (TX)
+ * qsz 256 last_avail_idx 0 last_used_idx 0
+ * avail.flags 1 avail.idx 128 used.flags 1 used.idx 0
+ * kickfd 62 callfd 64 errfd -1
+ *
+ * Virtqueue 1 (RX)
+ * qsz 256 last_avail_idx 0 last_used_idx 0
+ * avail.flags 1 avail.idx 0 used.flags 1 used.idx 0
+ * kickfd 65 callfd 66 errfd -1
+ *
+ * Virtqueue 2 (TX)
+ * qsz 256 last_avail_idx 0 last_used_idx 0
+ * avail.flags 1 avail.idx 128 used.flags 1 used.idx 0
+ * kickfd 63 callfd 70 errfd -1
+ *
+ * Virtqueue 3 (RX)
+ * qsz 256 last_avail_idx 0 last_used_idx 0
+ * avail.flags 1 avail.idx 0 used.flags 1 used.idx 0
+ * kickfd 72 callfd 74 errfd -1
+ *
+ * Virtqueue 4 (TX disabled)
+ * qsz 256 last_avail_idx 0 last_used_idx 0
+ * avail.flags 1 avail.idx 0 used.flags 1 used.idx 0
+ * kickfd 76 callfd 78 errfd -1
+ *
+ * Virtqueue 5 (RX disabled)
+ * qsz 256 last_avail_idx 0 last_used_idx 0
+ * avail.flags 1 avail.idx 0 used.flags 1 used.idx 0
+ * kickfd 80 callfd 82 errfd -1
+ *
+ * Virtqueue 6 (TX disabled)
+ * qsz 256 last_avail_idx 0 last_used_idx 0
+ * avail.flags 1 avail.idx 0 used.flags 1 used.idx 0
+ * kickfd 84 callfd 86 errfd -1
+ *
+ * Virtqueue 7 (RX disabled)
+ * qsz 256 last_avail_idx 0 last_used_idx 0
+ * avail.flags 1 avail.idx 0 used.flags 1 used.idx 0
+ * kickfd 88 callfd 90 errfd -1
+ *
+ * @cliexend
+ *
+ * The optional '<em>descriptors</em>' parameter will display the same output as
+ * the previous example but will include the descriptor table for each queue.
+ * The output is truncated below:
+ * @cliexstart{show vhost-user VirtualEthernet0/0/0 descriptors}
+ * Virtio vhost-user interfaces
+ * Global:
+ * coalesce frames 32 time 1e-3
+ * Interface: VirtualEthernet0/0/0 (ifindex 1)
+ * virtio_net_hdr_sz 12
+ * features mask (0xffffffffffffffff):
+ * features (0x50408000):
+ * VIRTIO_NET_F_MRG_RXBUF (15)
+ * VIRTIO_NET_F_MQ (22)
+ * :
+ * Virtqueue 0 (TX)
+ * qsz 256 last_avail_idx 0 last_used_idx 0
+ * avail.flags 1 avail.idx 128 used.flags 1 used.idx 0
+ * kickfd 62 callfd 64 errfd -1
+ *
+ * descriptor table:
+ * id addr len flags next user_addr
+ * ===== ================== ===== ====== ===== ==================
+ * 0 0x0000000010b6e974 2060 0x0002 1 0x00002aabbc76e974
+ * 1 0x0000000010b6e034 2060 0x0002 2 0x00002aabbc76e034
+ * 2 0x0000000010b6d6f4 2060 0x0002 3 0x00002aabbc76d6f4
+ * 3 0x0000000010b6cdb4 2060 0x0002 4 0x00002aabbc76cdb4
+ * 4 0x0000000010b6c474 2060 0x0002 5 0x00002aabbc76c474
+ * 5 0x0000000010b6bb34 2060 0x0002 6 0x00002aabbc76bb34
+ * 6 0x0000000010b6b1f4 2060 0x0002 7 0x00002aabbc76b1f4
+ * 7 0x0000000010b6a8b4 2060 0x0002 8 0x00002aabbc76a8b4
+ * 8 0x0000000010b69f74 2060 0x0002 9 0x00002aabbc769f74
+ * 9 0x0000000010b69634 2060 0x0002 10 0x00002aabbc769634
+ * 10 0x0000000010b68cf4 2060 0x0002 11 0x00002aabbc768cf4
+ * :
+ * 249 0x0000000000000000 0 0x0000 250 0x00002aab2b400000
+ * 250 0x0000000000000000 0 0x0000 251 0x00002aab2b400000
+ * 251 0x0000000000000000 0 0x0000 252 0x00002aab2b400000
+ * 252 0x0000000000000000 0 0x0000 253 0x00002aab2b400000
+ * 253 0x0000000000000000 0 0x0000 254 0x00002aab2b400000
+ * 254 0x0000000000000000 0 0x0000 255 0x00002aab2b400000
+ * 255 0x0000000000000000 0 0x0000 32768 0x00002aab2b400000
+ *
+ * Virtqueue 1 (RX)
+ * qsz 256 last_avail_idx 0 last_used_idx 0
+ * :
+ * @cliexend
+ * @endparblock
+?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (show_vhost_user_command, static) = {
+ .path = "show vhost-user",
+ .short_help = "show vhost-user [<interface> [<interface> [..]]] [descriptors]",
+ .function = show_vhost_user_command_fn,
+};
+/* *INDENT-ON* */
+
+static clib_error_t *
+vhost_user_config (vlib_main_t * vm, unformat_input_t * input)
+{
+ vhost_user_main_t *vum = &vhost_user_main;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "coalesce-frames %d", &vum->coalesce_frames))
+ ;
+ else if (unformat (input, "coalesce-time %f", &vum->coalesce_time))
+ ;
+ else if (unformat (input, "dont-dump-memory"))
+ vum->dont_dump_vhost_user_memory = 1;
+ else
+ return clib_error_return (0, "unknown input `%U'",
+ format_unformat_error, input);
+ }
+
+ return 0;
+}
+
+/* vhost-user { ... } configuration. */
+VLIB_CONFIG_FUNCTION (vhost_user_config, "vhost-user");
+
+void
+vhost_user_unmap_all (void)
+{
+ vhost_user_main_t *vum = &vhost_user_main;
+ vhost_user_intf_t *vui;
+
+ if (vum->dont_dump_vhost_user_memory)
+ {
+ pool_foreach (vui, vum->vhost_user_interfaces,
+ unmap_all_mem_regions (vui);
+ );
+ }
+}
+
+static clib_error_t *
+vhost_thread_command_fn (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ unformat_input_t _line_input, *line_input = &_line_input;
+ u32 worker_thread_index;
+ u32 sw_if_index;
+ u8 del = 0;
+ int rv;
+
+ /* Get a line of input. */
+ if (!unformat_user (input, unformat_line_input, line_input))
+ return 0;
+
+ if (!unformat
+ (line_input, "%U %d", unformat_vnet_sw_interface, vnet_get_main (),
+ &sw_if_index, &worker_thread_index))
+ {
+ unformat_free (line_input);
+ return clib_error_return (0, "unknown input `%U'",
+ format_unformat_error, input);
+ }
+
+ if (unformat (line_input, "del"))
+ del = 1;
+
+ if ((rv =
+ vhost_user_thread_placement (sw_if_index, worker_thread_index, del)))
+ return clib_error_return (0, "vhost_user_thread_placement returned %d",
+ rv);
+ return 0;
+}
+
+
+/*?
+ * This command is used to move the RX processing for the given
+ * interfaces to the provided thread. If the '<em>del</em>' option is used,
+ * the forced thread assignment is removed and the thread assigment is
+ * reassigned automatically. Use '<em>show vhost-user <interface></em>'
+ * to see the thread assignment.
+ *
+ * @cliexpar
+ * Example of how to move the RX processing for a given interface to a given thread:
+ * @cliexcmd{vhost thread VirtualEthernet0/0/0 1}
+ * Example of how to remove the forced thread assignment for a given interface:
+ * @cliexcmd{vhost thread VirtualEthernet0/0/0 1 del}
+?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (vhost_user_thread_command, static) = {
+ .path = "vhost thread",
+ .short_help = "vhost thread <iface> <worker-index> [del]",
+ .function = vhost_thread_command_fn,
+};
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/devices/virtio/vhost-user.h b/src/vnet/devices/virtio/vhost-user.h
new file mode 100644
index 00000000000..3083b614016
--- /dev/null
+++ b/src/vnet/devices/virtio/vhost-user.h
@@ -0,0 +1,350 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef __VIRTIO_VHOST_USER_H__
+#define __VIRTIO_VHOST_USER_H__
+/* vhost-user data structures */
+
+#define VHOST_MEMORY_MAX_NREGIONS 8
+#define VHOST_USER_MSG_HDR_SZ 12
+#define VHOST_VRING_MAX_SIZE 32768
+#define VHOST_VRING_MAX_N 16 //8TX + 8RX
+#define VHOST_VRING_IDX_RX(qid) (2*qid)
+#define VHOST_VRING_IDX_TX(qid) (2*qid + 1)
+
+#define VIRTQ_DESC_F_NEXT 1
+#define VIRTQ_DESC_F_INDIRECT 4
+#define VHOST_USER_REPLY_MASK (0x1 << 2)
+
+#define VHOST_USER_PROTOCOL_F_MQ 0
+#define VHOST_USER_PROTOCOL_F_LOG_SHMFD 1
+#define VHOST_VRING_F_LOG 0
+
+#define VHOST_USER_F_PROTOCOL_FEATURES 30
+#define VHOST_USER_PROTOCOL_FEATURES ((1ULL << VHOST_USER_PROTOCOL_F_MQ) | \
+ (1ULL << VHOST_USER_PROTOCOL_F_LOG_SHMFD))
+
+/* If multiqueue is provided by host, then we suppport it. */
+#define VIRTIO_NET_CTRL_MQ 4
+#define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET 0
+#define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN 1
+#define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX 0x8000
+
+#define VRING_USED_F_NO_NOTIFY 1
+
+#define foreach_virtio_net_feature \
+ _ (VIRTIO_NET_F_MRG_RXBUF, 15) \
+ _ (VIRTIO_NET_F_CTRL_VQ, 17) \
+ _ (VIRTIO_NET_F_GUEST_ANNOUNCE, 21) \
+ _ (VIRTIO_NET_F_MQ, 22) \
+ _ (VHOST_F_LOG_ALL, 26) \
+ _ (VIRTIO_F_ANY_LAYOUT, 27) \
+ _ (VIRTIO_F_INDIRECT_DESC, 28) \
+ _ (VHOST_USER_F_PROTOCOL_FEATURES, 30) \
+ _ (VIRTIO_F_VERSION_1, 32)
+
+
+typedef enum
+{
+#define _(f,n) FEAT_##f = (n),
+ foreach_virtio_net_feature
+#undef _
+} virtio_net_feature_t;
+
+int vhost_user_create_if (vnet_main_t * vnm, vlib_main_t * vm,
+ const char *sock_filename, u8 is_server,
+ u32 * sw_if_index, u64 feature_mask,
+ u8 renumber, u32 custom_dev_instance, u8 * hwaddr);
+int vhost_user_modify_if (vnet_main_t * vnm, vlib_main_t * vm,
+ const char *sock_filename, u8 is_server,
+ u32 sw_if_index, u64 feature_mask,
+ u8 renumber, u32 custom_dev_instance);
+int vhost_user_delete_if (vnet_main_t * vnm, vlib_main_t * vm,
+ u32 sw_if_index);
+
+/* *INDENT-OFF* */
+typedef struct vhost_user_memory_region
+{
+ u64 guest_phys_addr;
+ u64 memory_size;
+ u64 userspace_addr;
+ u64 mmap_offset;
+} __attribute ((packed)) vhost_user_memory_region_t;
+
+typedef struct vhost_user_memory
+{
+ u32 nregions;
+ u32 padding;
+ vhost_user_memory_region_t regions[VHOST_MEMORY_MAX_NREGIONS];
+} __attribute ((packed)) vhost_user_memory_t;
+
+typedef struct
+{
+ u32 index, num;
+} __attribute ((packed)) vhost_vring_state_t;
+
+typedef struct
+{
+ u32 index, flags;
+ u64 desc_user_addr, used_user_addr, avail_user_addr, log_guest_addr;
+} __attribute ((packed)) vhost_vring_addr_t;
+
+typedef struct vhost_user_log
+{
+ u64 size;
+ u64 offset;
+} __attribute ((packed)) vhost_user_log_t;
+
+typedef enum vhost_user_req
+{
+ VHOST_USER_NONE = 0,
+ VHOST_USER_GET_FEATURES = 1,
+ VHOST_USER_SET_FEATURES = 2,
+ VHOST_USER_SET_OWNER = 3,
+ VHOST_USER_RESET_OWNER = 4,
+ VHOST_USER_SET_MEM_TABLE = 5,
+ VHOST_USER_SET_LOG_BASE = 6,
+ VHOST_USER_SET_LOG_FD = 7,
+ VHOST_USER_SET_VRING_NUM = 8,
+ VHOST_USER_SET_VRING_ADDR = 9,
+ VHOST_USER_SET_VRING_BASE = 10,
+ VHOST_USER_GET_VRING_BASE = 11,
+ VHOST_USER_SET_VRING_KICK = 12,
+ VHOST_USER_SET_VRING_CALL = 13,
+ VHOST_USER_SET_VRING_ERR = 14,
+ VHOST_USER_GET_PROTOCOL_FEATURES = 15,
+ VHOST_USER_SET_PROTOCOL_FEATURES = 16,
+ VHOST_USER_GET_QUEUE_NUM = 17,
+ VHOST_USER_SET_VRING_ENABLE = 18,
+ VHOST_USER_MAX
+} vhost_user_req_t;
+
+// vring_desc I/O buffer descriptor
+typedef struct
+{
+ uint64_t addr; // packet data buffer address
+ uint32_t len; // packet data buffer size
+ uint16_t flags; // (see below)
+ uint16_t next; // optional index next descriptor in chain
+} __attribute ((packed)) vring_desc_t;
+
+typedef struct
+{
+ uint16_t flags;
+ volatile uint16_t idx;
+ uint16_t ring[VHOST_VRING_MAX_SIZE];
+} __attribute ((packed)) vring_avail_t;
+
+typedef struct
+{
+ uint16_t flags;
+ uint16_t idx;
+ struct /* vring_used_elem */
+ {
+ uint32_t id;
+ uint32_t len;
+ } ring[VHOST_VRING_MAX_SIZE];
+} __attribute ((packed)) vring_used_t;
+
+typedef struct
+{
+ u8 flags;
+ u8 gso_type;
+ u16 hdr_len;
+ u16 gso_size;
+ u16 csum_start;
+ u16 csum_offset;
+} __attribute ((packed)) virtio_net_hdr_t;
+
+typedef struct {
+ virtio_net_hdr_t hdr;
+ u16 num_buffers;
+} __attribute ((packed)) virtio_net_hdr_mrg_rxbuf_t;
+
+typedef struct vhost_user_msg {
+ vhost_user_req_t request;
+ u32 flags;
+ u32 size;
+ union
+ {
+ u64 u64;
+ vhost_vring_state_t state;
+ vhost_vring_addr_t addr;
+ vhost_user_memory_t memory;
+ vhost_user_log_t log;
+ };
+} __attribute ((packed)) vhost_user_msg_t;
+/* *INDENT-ON* */
+
+typedef struct
+{
+ CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
+ u16 qsz;
+ u16 last_avail_idx;
+ u16 last_used_idx;
+ u16 n_since_last_int;
+ vring_desc_t *desc;
+ vring_avail_t *avail;
+ vring_used_t *used;
+ f64 int_deadline;
+ u8 started;
+ u8 enabled;
+ u8 log_used;
+ //Put non-runtime in a different cache line
+ CLIB_CACHE_LINE_ALIGN_MARK (cacheline1);
+ int errfd;
+ u32 callfd_idx;
+ u32 kickfd_idx;
+ u64 log_guest_addr;
+} vhost_user_vring_t;
+
+typedef struct
+{
+ CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
+ u32 is_up;
+ u32 admin_up;
+ u32 unix_server_index;
+ u32 unix_file_index;
+ char sock_filename[256];
+ int sock_errno;
+ u32 hw_if_index, sw_if_index;
+
+ //Feature negotiation
+ u64 features;
+ u64 feature_mask;
+ u64 protocol_features;
+
+ //Memory region information
+ u32 nregions;
+ vhost_user_memory_region_t regions[VHOST_MEMORY_MAX_NREGIONS];
+ void *region_mmap_addr[VHOST_MEMORY_MAX_NREGIONS];
+ u64 region_guest_addr_lo[VHOST_MEMORY_MAX_NREGIONS];
+ u64 region_guest_addr_hi[VHOST_MEMORY_MAX_NREGIONS];
+ u32 region_mmap_fd[VHOST_MEMORY_MAX_NREGIONS];
+
+ //Virtual rings
+ vhost_user_vring_t vrings[VHOST_VRING_MAX_N];
+ volatile u32 *vring_locks[VHOST_VRING_MAX_N];
+
+ int virtio_net_hdr_sz;
+ int is_any_layout;
+
+ void *log_base_addr;
+ u64 log_size;
+
+ /* Whether to use spinlock or per_cpu_tx_qid assignment */
+ u8 use_tx_spinlock;
+ u16 *per_cpu_tx_qid;
+
+ /* Vector of workers for this interface */
+ u32 *workers;
+} vhost_user_intf_t;
+
+typedef struct
+{
+ u16 vhost_iface_index;
+ u16 qid;
+} vhost_iface_and_queue_t;
+
+typedef struct
+{
+ uword dst;
+ uword src;
+ u32 len;
+} vhost_copy_t;
+
+typedef struct
+{
+ u16 qid; /** The interface queue index (Not the virtio vring idx) */
+ u16 device_index; /** The device index */
+ u32 virtio_ring_flags; /** Runtime queue flags **/
+ u16 first_desc_len; /** Length of the first data descriptor **/
+ virtio_net_hdr_mrg_rxbuf_t hdr; /** Virtio header **/
+} vhost_trace_t;
+
+
+#define VHOST_USER_RX_BUFFERS_N (2 * VLIB_FRAME_SIZE + 2)
+#define VHOST_USER_COPY_ARRAY_N (4 * VLIB_FRAME_SIZE)
+
+typedef struct
+{
+ vhost_iface_and_queue_t *rx_queues;
+ u32 rx_buffers_len;
+ u32 rx_buffers[VHOST_USER_RX_BUFFERS_N];
+
+ virtio_net_hdr_mrg_rxbuf_t tx_headers[VLIB_FRAME_SIZE];
+ vhost_copy_t copy[VHOST_USER_COPY_ARRAY_N];
+
+ /* This is here so it doesn't end-up
+ * using stack or registers. */
+ vhost_trace_t *current_trace;
+} vhost_cpu_t;
+
+typedef struct
+{
+ u32 mtu_bytes;
+ vhost_user_intf_t *vhost_user_interfaces;
+ u32 *show_dev_instance_by_real_dev_instance;
+ u32 coalesce_frames;
+ f64 coalesce_time;
+ int dont_dump_vhost_user_memory;
+
+ /** first cpu index */
+ u32 input_cpu_first_index;
+
+ /** total cpu count */
+ u32 input_cpu_count;
+
+ /** Per-CPU data for vhost-user */
+ vhost_cpu_t *cpus;
+
+ /** Pseudo random iterator */
+ u32 random;
+} vhost_user_main_t;
+
+typedef struct
+{
+ u8 if_name[64];
+ u32 sw_if_index;
+ u32 virtio_net_hdr_sz;
+ u64 features;
+ u8 is_server;
+ u8 sock_filename[256];
+ u32 num_regions;
+ int sock_errno;
+} vhost_user_intf_details_t;
+
+int vhost_user_dump_ifs (vnet_main_t * vnm, vlib_main_t * vm,
+ vhost_user_intf_details_t ** out_vuids);
+
+// CLI commands to be used from dpdk
+clib_error_t *vhost_user_connect_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd);
+clib_error_t *vhost_user_delete_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd);
+clib_error_t *show_vhost_user_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd);
+
+#endif
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/devices/virtio/vhost_user.api b/src/vnet/devices/virtio/vhost_user.api
new file mode 100644
index 00000000000..21e42298361
--- /dev/null
+++ b/src/vnet/devices/virtio/vhost_user.api
@@ -0,0 +1,125 @@
+/*
+ * Copyright (c) 2015-2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/** \brief vhost-user interface create request
+ @param client_index - opaque cookie to identify the sender
+ @param is_server - our side is socket server
+ @param sock_filename - unix socket filename, used to speak with frontend
+ @param use_custom_mac - enable or disable the use of the provided hardware address
+ @param mac_address - hardware address to use if 'use_custom_mac' is set
+*/
+define create_vhost_user_if
+{
+ u32 client_index;
+ u32 context;
+ u8 is_server;
+ u8 sock_filename[256];
+ u8 renumber;
+ u32 custom_dev_instance;
+ u8 use_custom_mac;
+ u8 mac_address[6];
+ u8 tag[64];
+};
+
+/** \brief vhost-user interface create response
+ @param context - sender context, to match reply w/ request
+ @param retval - return code for the request
+ @param sw_if_index - interface the operation is applied to
+*/
+define create_vhost_user_if_reply
+{
+ u32 context;
+ i32 retval;
+ u32 sw_if_index;
+};
+
+/** \brief vhost-user interface modify request
+ @param client_index - opaque cookie to identify the sender
+ @param is_server - our side is socket server
+ @param sock_filename - unix socket filename, used to speak with frontend
+*/
+define modify_vhost_user_if
+{
+ u32 client_index;
+ u32 context;
+ u32 sw_if_index;
+ u8 is_server;
+ u8 sock_filename[256];
+ u8 renumber;
+ u32 custom_dev_instance;
+};
+
+/** \brief vhost-user interface modify response
+ @param context - sender context, to match reply w/ request
+ @param retval - return code for the request
+*/
+define modify_vhost_user_if_reply
+{
+ u32 context;
+ i32 retval;
+};
+
+/** \brief vhost-user interface delete request
+ @param client_index - opaque cookie to identify the sender
+*/
+define delete_vhost_user_if
+{
+ u32 client_index;
+ u32 context;
+ u32 sw_if_index;
+};
+
+/** \brief vhost-user interface delete response
+ @param context - sender context, to match reply w/ request
+ @param retval - return code for the request
+*/
+define delete_vhost_user_if_reply
+{
+ u32 context;
+ i32 retval;
+};
+
+/** \brief Vhost-user interface details structure (fix this)
+ @param sw_if_index - index of the interface
+ @param interface_name - name of interface
+ @param virtio_net_hdr_sz - net header size
+ @param features - interface features
+ @param is_server - vhost-user server socket
+ @param sock_filename - socket filename
+ @param num_regions - number of used memory regions
+*/
+define sw_interface_vhost_user_details
+{
+ u32 context;
+ u32 sw_if_index;
+ u8 interface_name[64];
+ u32 virtio_net_hdr_sz;
+ u64 features;
+ u8 is_server;
+ u8 sock_filename[256];
+ u32 num_regions;
+ i32 sock_errno;
+};
+
+define sw_interface_vhost_user_dump
+{
+ u32 client_index;
+ u32 context;
+};
+/*
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/devices/virtio/vhost_user_api.c b/src/vnet/devices/virtio/vhost_user_api.c
new file mode 100644
index 00000000000..dd517c26f55
--- /dev/null
+++ b/src/vnet/devices/virtio/vhost_user_api.c
@@ -0,0 +1,262 @@
+/*
+ *------------------------------------------------------------------
+ * vhost-user_api.c - vhost-user api
+ *
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+
+#include <vnet/vnet.h>
+#include <vlibmemory/api.h>
+
+#include <vnet/interface.h>
+#include <vnet/api_errno.h>
+#include <vnet/devices/virtio/vhost-user.h>
+
+#include <vnet/vnet_msg_enum.h>
+
+#define vl_typedefs /* define message structures */
+#include <vnet/vnet_all_api_h.h>
+#undef vl_typedefs
+
+#define vl_endianfun /* define message structures */
+#include <vnet/vnet_all_api_h.h>
+#undef vl_endianfun
+
+/* instantiate all the print functions we know about */
+#define vl_print(handle, ...) vlib_cli_output (handle, __VA_ARGS__)
+#define vl_printfun
+#include <vnet/vnet_all_api_h.h>
+#undef vl_printfun
+
+#include <vlibapi/api_helper_macros.h>
+
+#define foreach_vpe_api_msg \
+_(CREATE_VHOST_USER_IF, create_vhost_user_if) \
+_(MODIFY_VHOST_USER_IF, modify_vhost_user_if) \
+_(DELETE_VHOST_USER_IF, delete_vhost_user_if) \
+_(SW_INTERFACE_VHOST_USER_DUMP, sw_interface_vhost_user_dump) \
+_(SW_INTERFACE_VHOST_USER_DETAILS, sw_interface_vhost_user_details)
+
+/*
+ * WARNING: replicated pending api refactor completion
+ */
+static void
+send_sw_interface_flags_deleted (vpe_api_main_t * am,
+ unix_shared_memory_queue_t * q,
+ u32 sw_if_index)
+{
+ vl_api_sw_interface_set_flags_t *mp;
+
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ memset (mp, 0, sizeof (*mp));
+ mp->_vl_msg_id = ntohs (VL_API_SW_INTERFACE_SET_FLAGS);
+ mp->sw_if_index = ntohl (sw_if_index);
+
+ mp->admin_up_down = 0;
+ mp->link_up_down = 0;
+ mp->deleted = 1;
+ vl_msg_api_send_shmem (q, (u8 *) & mp);
+}
+
+static void
+vl_api_create_vhost_user_if_t_handler (vl_api_create_vhost_user_if_t * mp)
+{
+ int rv = 0;
+ vl_api_create_vhost_user_if_reply_t *rmp;
+ u32 sw_if_index = (u32) ~ 0;
+ vnet_main_t *vnm = vnet_get_main ();
+ vlib_main_t *vm = vlib_get_main ();
+
+ rv = vhost_user_create_if (vnm, vm, (char *) mp->sock_filename,
+ mp->is_server, &sw_if_index, (u64) ~ 0,
+ mp->renumber, ntohl (mp->custom_dev_instance),
+ (mp->use_custom_mac) ? mp->mac_address : NULL);
+
+ /* Remember an interface tag for the new interface */
+ if (rv == 0)
+ {
+ /* If a tag was supplied... */
+ if (mp->tag[0])
+ {
+ /* Make sure it's a proper C-string */
+ mp->tag[ARRAY_LEN (mp->tag) - 1] = 0;
+ u8 *tag = format (0, "%s%c", mp->tag, 0);
+ vnet_set_sw_interface_tag (vnm, tag, sw_if_index);
+ }
+ }
+
+ /* *INDENT-OFF* */
+ REPLY_MACRO2(VL_API_CREATE_VHOST_USER_IF_REPLY,
+ ({
+ rmp->sw_if_index = ntohl (sw_if_index);
+ }));
+ /* *INDENT-ON* */
+}
+
+static void
+vl_api_modify_vhost_user_if_t_handler (vl_api_modify_vhost_user_if_t * mp)
+{
+ int rv = 0;
+ vl_api_modify_vhost_user_if_reply_t *rmp;
+ u32 sw_if_index = ntohl (mp->sw_if_index);
+
+ vnet_main_t *vnm = vnet_get_main ();
+ vlib_main_t *vm = vlib_get_main ();
+
+ rv = vhost_user_modify_if (vnm, vm, (char *) mp->sock_filename,
+ mp->is_server, sw_if_index, (u64) ~ 0,
+ mp->renumber, ntohl (mp->custom_dev_instance));
+
+ REPLY_MACRO (VL_API_MODIFY_VHOST_USER_IF_REPLY);
+}
+
+static void
+vl_api_delete_vhost_user_if_t_handler (vl_api_delete_vhost_user_if_t * mp)
+{
+ int rv = 0;
+ vl_api_delete_vhost_user_if_reply_t *rmp;
+ vpe_api_main_t *vam = &vpe_api_main;
+ u32 sw_if_index = ntohl (mp->sw_if_index);
+
+ vnet_main_t *vnm = vnet_get_main ();
+ vlib_main_t *vm = vlib_get_main ();
+
+ rv = vhost_user_delete_if (vnm, vm, sw_if_index);
+
+ REPLY_MACRO (VL_API_DELETE_VHOST_USER_IF_REPLY);
+ if (!rv)
+ {
+ unix_shared_memory_queue_t *q =
+ vl_api_client_index_to_input_queue (mp->client_index);
+ if (!q)
+ return;
+
+ vnet_clear_sw_interface_tag (vnm, sw_if_index);
+ send_sw_interface_flags_deleted (vam, q, sw_if_index);
+ }
+}
+
+static void
+ vl_api_sw_interface_vhost_user_details_t_handler
+ (vl_api_sw_interface_vhost_user_details_t * mp)
+{
+ clib_warning ("BUG");
+}
+
+static void
+send_sw_interface_vhost_user_details (vpe_api_main_t * am,
+ unix_shared_memory_queue_t * q,
+ vhost_user_intf_details_t * vui,
+ u32 context)
+{
+ vl_api_sw_interface_vhost_user_details_t *mp;
+
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ memset (mp, 0, sizeof (*mp));
+ mp->_vl_msg_id = ntohs (VL_API_SW_INTERFACE_VHOST_USER_DETAILS);
+ mp->sw_if_index = ntohl (vui->sw_if_index);
+ mp->virtio_net_hdr_sz = ntohl (vui->virtio_net_hdr_sz);
+ mp->features = clib_net_to_host_u64 (vui->features);
+ mp->is_server = vui->is_server;
+ mp->num_regions = ntohl (vui->num_regions);
+ mp->sock_errno = ntohl (vui->sock_errno);
+ mp->context = context;
+
+ strncpy ((char *) mp->sock_filename,
+ (char *) vui->sock_filename, ARRAY_LEN (mp->sock_filename) - 1);
+ strncpy ((char *) mp->interface_name,
+ (char *) vui->if_name, ARRAY_LEN (mp->interface_name) - 1);
+
+ vl_msg_api_send_shmem (q, (u8 *) & mp);
+}
+
+static void
+ vl_api_sw_interface_vhost_user_dump_t_handler
+ (vl_api_sw_interface_vhost_user_dump_t * mp)
+{
+ int rv = 0;
+ vpe_api_main_t *am = &vpe_api_main;
+ vnet_main_t *vnm = vnet_get_main ();
+ vlib_main_t *vm = vlib_get_main ();
+ vhost_user_intf_details_t *ifaces = NULL;
+ vhost_user_intf_details_t *vuid = NULL;
+ unix_shared_memory_queue_t *q;
+
+ q = vl_api_client_index_to_input_queue (mp->client_index);
+ if (q == 0)
+ return;
+
+ rv = vhost_user_dump_ifs (vnm, vm, &ifaces);
+ if (rv)
+ return;
+
+ vec_foreach (vuid, ifaces)
+ {
+ send_sw_interface_vhost_user_details (am, q, vuid, mp->context);
+ }
+ vec_free (ifaces);
+}
+
+/*
+ * vhost-user_api_hookup
+ * Add vpe's API message handlers to the table.
+ * vlib has alread mapped shared memory and
+ * added the client registration handlers.
+ * See .../vlib-api/vlibmemory/memclnt_vlib.c:memclnt_process()
+ */
+#define vl_msg_name_crc_list
+#include <vnet/vnet_all_api_h.h>
+#undef vl_msg_name_crc_list
+
+static void
+setup_message_id_table (api_main_t * am)
+{
+#define _(id,n,crc) vl_msg_api_add_msg_name_crc (am, #n "_" #crc, id);
+ foreach_vl_msg_name_crc_vhost_user;
+#undef _
+}
+
+static clib_error_t *
+vhost_user_api_hookup (vlib_main_t * vm)
+{
+ api_main_t *am = &api_main;
+
+#define _(N,n) \
+ vl_msg_api_set_handlers(VL_API_##N, #n, \
+ vl_api_##n##_t_handler, \
+ vl_noop_handler, \
+ vl_api_##n##_t_endian, \
+ vl_api_##n##_t_print, \
+ sizeof(vl_api_##n##_t), 1);
+ foreach_vpe_api_msg;
+#undef _
+
+ /*
+ * Set up the (msg_name, crc, message-id) table
+ */
+ setup_message_id_table (am);
+
+ return 0;
+}
+
+VLIB_API_INIT_FUNCTION (vhost_user_api_hookup);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/dhcp/client.c b/src/vnet/dhcp/client.c
new file mode 100644
index 00000000000..c352e3109ee
--- /dev/null
+++ b/src/vnet/dhcp/client.c
@@ -0,0 +1,1031 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <vlib/vlib.h>
+#include <vnet/dhcp/proxy.h>
+#include <vnet/fib/fib_table.h>
+
+dhcp_client_main_t dhcp_client_main;
+static u8 * format_dhcp_client_state (u8 * s, va_list * va);
+static vlib_node_registration_t dhcp_client_process_node;
+
+static void
+dhcp_client_acquire_address (dhcp_client_main_t * dcm, dhcp_client_t * c)
+{
+ /*
+ * Install any/all info gleaned from dhcp, right here
+ */
+ ip4_add_del_interface_address (dcm->vlib_main, c->sw_if_index,
+ (void *) &c->leased_address,
+ c->subnet_mask_width, 0 /*is_del*/);
+}
+
+static void
+dhcp_client_release_address (dhcp_client_main_t * dcm, dhcp_client_t * c)
+{
+ /*
+ * Remove any/all info gleaned from dhcp, right here. Caller(s)
+ * have not wiped out the info yet.
+ */
+
+ ip4_add_del_interface_address (dcm->vlib_main, c->sw_if_index,
+ (void *) &c->leased_address,
+ c->subnet_mask_width, 1 /*is_del*/);
+}
+
+static void
+set_l2_rewrite (dhcp_client_main_t * dcm, dhcp_client_t * c)
+{
+ /* Acquire the L2 rewrite string for the indicated sw_if_index */
+ c->l2_rewrite = vnet_build_rewrite_for_sw_interface(
+ dcm->vnet_main,
+ c->sw_if_index,
+ VNET_LINK_IP4,
+ 0 /* broadcast */);
+}
+
+/*
+ * dhcp_client_for_us - server-to-client callback.
+ * Called from proxy_node.c:dhcp_proxy_to_client_input().
+ * This function first decides that the packet in question is
+ * actually for the dhcp client code in case we're also acting as
+ * a dhcp proxy. Ay caramba, what a folly!
+ */
+int dhcp_client_for_us (u32 bi, vlib_buffer_t * b,
+ ip4_header_t * ip,
+ udp_header_t * udp,
+ dhcp_header_t * dhcp)
+{
+ dhcp_client_main_t * dcm = &dhcp_client_main;
+ vlib_main_t * vm = dcm->vlib_main;
+ dhcp_client_t * c;
+ uword * p;
+ f64 now = vlib_time_now (dcm->vlib_main);
+ u8 dhcp_message_type = 0;
+ dhcp_option_t * o;
+
+ /*
+ * Doing dhcp client on this interface?
+ * Presumably we will always receive dhcp clnt for-us pkts on
+ * the interface that's asking for an address.
+ */
+ p = hash_get (dcm->client_by_sw_if_index,
+ vnet_buffer(b)->sw_if_index [VLIB_RX]);
+ if (p == 0)
+ return 0; /* no */
+
+ c = pool_elt_at_index (dcm->clients, p[0]);
+
+ /* Mixing dhcp relay and dhcp proxy? DGMS... */
+ if (c->state == DHCP_BOUND && c->retry_count == 0)
+ return 0;
+
+ /* parse through the packet, learn what we can */
+ if (dhcp->your_ip_address.as_u32)
+ c->leased_address.as_u32 = dhcp->your_ip_address.as_u32;
+
+ o = (dhcp_option_t *) dhcp->options;
+
+ while (o->option != 0xFF /* end of options */ &&
+ (u8 *) o < (b->data + b->current_data + b->current_length))
+ {
+ switch (o->option)
+ {
+ case 53: /* dhcp message type */
+ dhcp_message_type = o->data[0];
+ break;
+
+ case 51: /* lease time */
+ {
+ u32 lease_time_in_seconds =
+ clib_host_to_net_u32 (o->data_as_u32[0]);
+ c->lease_expires = now + (f64) lease_time_in_seconds;
+ c->lease_lifetime = lease_time_in_seconds;
+ /* Set a sensible default, in case we don't get opt 58 */
+ c->lease_renewal_interval = lease_time_in_seconds / 2;
+ }
+ break;
+
+ case 58: /* lease renew time in seconds */
+ {
+ u32 lease_renew_time_in_seconds =
+ clib_host_to_net_u32 (o->data_as_u32[0]);
+ c->lease_renewal_interval = lease_renew_time_in_seconds;
+ }
+ break;
+
+ case 54: /* dhcp server address */
+ c->dhcp_server.as_u32 = o->data_as_u32[0];
+ break;
+
+ case 1: /* subnet mask */
+ {
+ u32 subnet_mask =
+ clib_host_to_net_u32 (o->data_as_u32[0]);
+ c->subnet_mask_width = count_set_bits (subnet_mask);
+ }
+ break;
+ case 3: /* router address */
+ {
+ u32 router_address = o->data_as_u32[0];
+ c->router_address.as_u32 = router_address;
+ }
+ break;
+
+ case 12: /* hostname */
+ {
+ /* Replace the existing hostname if necessary */
+ vec_free (c->hostname);
+ vec_validate (c->hostname, o->length - 1);
+ clib_memcpy (c->hostname, o->data, o->length);
+ }
+ break;
+
+ /* $$$$ Your message in this space, parse more options */
+ default:
+ break;
+ }
+
+ o = (dhcp_option_t *) (((uword) o) + (o->length + 2));
+ }
+
+ switch (c->state)
+ {
+ case DHCP_DISCOVER:
+ if (dhcp_message_type != DHCP_PACKET_OFFER)
+ {
+ clib_warning ("sw_if_index %d state %U message type %d",
+ c->sw_if_index, format_dhcp_client_state,
+ c->state, dhcp_message_type);
+ c->next_transmit = now + 5.0;
+ break;
+ }
+ /* Received an offer, go send a request */
+ c->state = DHCP_REQUEST;
+ c->retry_count = 0;
+ c->next_transmit = 0; /* send right now... */
+ /* Poke the client process, which will send the request */
+ vlib_process_signal_event (vm, dhcp_client_process_node.index,
+ EVENT_DHCP_CLIENT_WAKEUP, c - dcm->clients);
+ break;
+
+ case DHCP_BOUND:
+ case DHCP_REQUEST:
+ if (dhcp_message_type != DHCP_PACKET_ACK)
+ {
+ clib_warning ("sw_if_index %d state %U message type %d",
+ c->sw_if_index, format_dhcp_client_state,
+ c->state, dhcp_message_type);
+ c->next_transmit = now + 5.0;
+ break;
+ }
+ /* OK, we own the address (etc), add to the routing table(s) */
+ if (c->state == DHCP_REQUEST)
+ {
+ void (*fp)(u32, u32, u8 *, u8, u8 *, u8 *, u8 *) = c->event_callback;
+
+ dhcp_client_acquire_address (dcm, c);
+
+ /*
+ * Configure default IP route:
+ */
+ if (c->router_address.as_u32)
+ {
+ fib_prefix_t all_0s =
+ {
+ .fp_len = 0,
+ .fp_addr.ip4.as_u32 = 0x0,
+ .fp_proto = FIB_PROTOCOL_IP4,
+ };
+ ip46_address_t nh =
+ {
+ .ip4 = c->router_address,
+ };
+
+ fib_table_entry_path_add (fib_table_get_index_for_sw_if_index(
+ FIB_PROTOCOL_IP4,
+ c->sw_if_index),
+ &all_0s,
+ FIB_SOURCE_DHCP,
+ FIB_ENTRY_FLAG_NONE,
+ FIB_PROTOCOL_IP4,
+ &nh,
+ c->sw_if_index,
+ ~0,
+ 1,
+ NULL, // no label stack
+ FIB_ROUTE_PATH_FLAG_NONE);
+ }
+
+ /*
+ * Call the user's event callback to report DHCP information
+ */
+ if (fp)
+ (*fp) (c->client_index, /* clinet index */
+ c->pid,
+ c->hostname,
+ 0, /* is_ipv6 */
+ (u8 *)&c->leased_address, /* host IP address */
+ (u8 *)&c->router_address, /* router IP address */
+ (u8 *)(c->l2_rewrite + 6));/* host MAC address */
+ }
+
+ c->state = DHCP_BOUND;
+ c->retry_count = 0;
+ c->next_transmit = now + (f64) c->lease_renewal_interval;
+ c->lease_expires = now + (f64) c->lease_lifetime;
+ break;
+
+ default:
+ clib_warning ("client %d bogus state %d",
+ c - dcm->clients, c->state);
+ break;
+ }
+
+ /* drop the pkt, return 1 */
+ vlib_buffer_free (vm, &bi, 1);
+ return 1;
+}
+
+static void
+send_dhcp_pkt (dhcp_client_main_t * dcm, dhcp_client_t * c,
+ dhcp_packet_type_t type, int is_broadcast)
+{
+ vlib_main_t * vm = dcm->vlib_main;
+ vnet_main_t * vnm = dcm->vnet_main;
+ vnet_hw_interface_t * hw = vnet_get_sup_hw_interface (vnm, c->sw_if_index);
+ vnet_sw_interface_t * sup_sw
+ = vnet_get_sup_sw_interface (vnm, c->sw_if_index);
+ vnet_sw_interface_t * sw = vnet_get_sw_interface (vnm, c->sw_if_index);
+ vlib_buffer_t * b;
+ u32 bi;
+ ip4_header_t * ip;
+ udp_header_t * udp;
+ dhcp_header_t * dhcp;
+ u32 * to_next;
+ vlib_frame_t * f;
+ dhcp_option_t * o;
+ u16 udp_length, ip_length;
+
+ /* Interface(s) down? */
+ if ((hw->flags & VNET_HW_INTERFACE_FLAG_LINK_UP) == 0)
+ return;
+ if ((sup_sw->flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) == 0)
+ return;
+ if ((sw->flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) == 0)
+ return;
+
+ if (vlib_buffer_alloc (vm, &bi, 1) != 1) {
+ clib_warning ("buffer allocation failure");
+ c->next_transmit = 0;
+ return;
+ }
+
+ /* Build a dhcpv4 pkt from whole cloth */
+ b = vlib_get_buffer (vm, bi);
+
+ ASSERT (b->current_data == 0);
+
+ vnet_buffer(b)->sw_if_index[VLIB_RX] = c->sw_if_index;
+ if (is_broadcast)
+ {
+ f = vlib_get_frame_to_node (vm, hw->output_node_index);
+ vnet_buffer(b)->sw_if_index[VLIB_TX] = c->sw_if_index;
+ clib_memcpy (b->data, c->l2_rewrite, vec_len(c->l2_rewrite));
+ ip = (void *)
+ (((u8 *)vlib_buffer_get_current (b)) + vec_len (c->l2_rewrite));
+ }
+ else
+ {
+ f = vlib_get_frame_to_node (vm, ip4_lookup_node.index);
+ vnet_buffer(b)->sw_if_index[VLIB_TX] = ~0; /* use interface VRF */
+ ip = vlib_buffer_get_current (b);
+ }
+
+ /* Enqueue the packet right now */
+ to_next = vlib_frame_vector_args (f);
+ to_next[0] = bi;
+ f->n_vectors = 1;
+
+ if (is_broadcast)
+ vlib_put_frame_to_node (vm, hw->output_node_index, f);
+ else
+ vlib_put_frame_to_node (vm, ip4_lookup_node.index, f);
+
+ udp = (udp_header_t *)(ip+1);
+ dhcp = (dhcp_header_t *)(udp+1);
+
+ /* $$$ optimize, maybe */
+ memset (ip, 0, sizeof (*ip) + sizeof (*udp) + sizeof (*dhcp));
+
+ ip->ip_version_and_header_length = 0x45;
+ ip->ttl = 128;
+ ip->protocol = IP_PROTOCOL_UDP;
+
+ if (is_broadcast)
+ {
+ /* src = 0.0.0.0, dst = 255.255.255.255 */
+ ip->dst_address.as_u32 = ~0;
+ }
+ else
+ {
+ /* Renewing an active lease, plain old ip4 src/dst */
+ ip->src_address.as_u32 = c->leased_address.as_u32;
+ ip->dst_address.as_u32 = c->dhcp_server.as_u32;
+ }
+
+ udp->src_port = clib_host_to_net_u16 (UDP_DST_PORT_dhcp_to_client);
+ udp->dst_port = clib_host_to_net_u16 (UDP_DST_PORT_dhcp_to_server);
+
+ /* Send the interface MAC address */
+ clib_memcpy (dhcp->client_hardware_address, c->l2_rewrite + 6, 6);
+
+ /* Lease renewal, set up client_ip_address */
+ if (is_broadcast == 0)
+ dhcp->client_ip_address.as_u32 = c->leased_address.as_u32;
+
+ dhcp->opcode = 1; /* request, all we send */
+ dhcp->hardware_type = 1; /* ethernet */
+ dhcp->hardware_address_length = 6;
+ dhcp->transaction_identifier = c->transaction_id;
+ dhcp->flags = clib_host_to_net_u16(is_broadcast ? DHCP_FLAG_BROADCAST : 0);
+ dhcp->magic_cookie.as_u32 = DHCP_MAGIC;
+
+ o = (dhcp_option_t * )dhcp->options;
+
+ /* Send option 53, the DHCP message type */
+ o->option = 53;
+ o->length = 1;
+ o->data[0] = type;
+ o = (dhcp_option_t *) (((uword) o) + (o->length + 2));
+
+ /* Send option 57, max msg length */
+ if (0 /* not needed, apparently */)
+ {
+ o->option = 57;
+ o->length = 2;
+ {
+ u16 *o2 = (u16 *) o->data;
+ *o2 = clib_host_to_net_u16 (1152);
+ o = (dhcp_option_t *) (((uword) o) + (o->length + 2));
+ }
+ }
+
+ /*
+ * If server ip address is available with non-zero value,
+ * option 54 (DHCP Server Identifier) is sent.
+ */
+ if (c->dhcp_server.as_u32)
+ {
+ o->option = 54;
+ o->length = 4;
+ clib_memcpy (o->data, &c->dhcp_server.as_u32, 4);
+ o = (dhcp_option_t *) (((uword) o) + (o->length + 2));
+ }
+
+ /* send option 50, requested IP address */
+ if (c->leased_address.as_u32)
+ {
+ o->option = 50;
+ o->length = 4;
+ clib_memcpy (o->data, &c->leased_address.as_u32, 4);
+ o = (dhcp_option_t *) (((uword) o) + (o->length + 2));
+ }
+
+ /* send option 12, host name */
+ if (vec_len (c->hostname))
+ {
+ o->option = 12;
+ o->length = vec_len (c->hostname);
+ clib_memcpy (o->data, c->hostname, vec_len (c->hostname));
+ o = (dhcp_option_t *) (((uword) o) + (o->length + 2));
+ }
+
+ /* $$ maybe send the client s/w version if anyone cares */
+
+ /*
+ * send option 55, parameter request list
+ * The current list - see below, matches the Linux dhcp client's list
+ * Any specific dhcp server config and/or dhcp server may or may
+ * not yield specific options.
+ */
+ o->option = 55;
+ o->length = vec_len (c->option_55_data);
+ clib_memcpy (o->data, c->option_55_data, vec_len(c->option_55_data));
+ o = (dhcp_option_t *) (((uword) o) + (o->length + 2));
+
+ /* End of list */
+ o->option = 0xff;
+ o->length = 0;
+ o++;
+
+ b->current_length = ((u8 *)o) - b->data;
+
+ /* fix ip length, checksum and udp length */
+ ip_length = vlib_buffer_length_in_chain (vm, b);
+ if (is_broadcast)
+ ip_length -= vec_len (c->l2_rewrite);
+
+ ip->length = clib_host_to_net_u16(ip_length);
+ ip->checksum = ip4_header_checksum(ip);
+
+ udp_length = ip_length - (sizeof (*ip));
+ udp->length = clib_host_to_net_u16 (udp_length);
+}
+
+static int
+dhcp_discover_state (dhcp_client_main_t * dcm, dhcp_client_t * c, f64 now)
+{
+ /*
+ * State machine "DISCOVER" state. Send a dhcp discover packet,
+ * eventually back off the retry rate.
+ */
+ send_dhcp_pkt (dcm, c, DHCP_PACKET_DISCOVER, 1 /* is_broadcast */);
+
+ c->retry_count++;
+ if (c->retry_count > 10)
+ c->next_transmit = now + 5.0;
+ else
+ c->next_transmit = now + 1.0;
+ return 0;
+}
+
+static int
+dhcp_request_state (dhcp_client_main_t * dcm, dhcp_client_t * c, f64 now)
+{
+ /*
+ * State machine "REQUEST" state. Send a dhcp request packet,
+ * eventually drop back to the discover state.
+ */
+ send_dhcp_pkt (dcm, c, DHCP_PACKET_REQUEST, 1 /* is_broadcast */);
+
+ c->retry_count++;
+ if (c->retry_count > 7 /* lucky you */)
+ {
+ c->state = DHCP_DISCOVER;
+ c->next_transmit = now;
+ c->retry_count = 0;
+ return 1;
+ }
+ c->next_transmit = now + 1.0;
+ return 0;
+}
+
+static int
+dhcp_bound_state (dhcp_client_main_t * dcm, dhcp_client_t * c, f64 now)
+{
+ /*
+ * State machine "BOUND" state. Send a dhcp request packet,
+ * eventually, when the lease expires, forget the dhcp data
+ * and go back to the stone age.
+ */
+ send_dhcp_pkt (dcm, c, DHCP_PACKET_REQUEST, 0 /* is_broadcast */);
+
+ c->retry_count++;
+ if (c->retry_count > 10)
+ c->next_transmit = now + 5.0;
+ else
+ c->next_transmit = now + 1.0;
+
+ if (now > c->lease_expires)
+ {
+ if (c->router_address.as_u32)
+ {
+ fib_prefix_t all_0s =
+ {
+ .fp_len = 0,
+ .fp_addr.ip4.as_u32 = 0x0,
+ .fp_proto = FIB_PROTOCOL_IP4,
+ };
+ ip46_address_t nh = {
+ .ip4 = c->router_address,
+ };
+
+ fib_table_entry_path_remove(fib_table_get_index_for_sw_if_index(
+ FIB_PROTOCOL_IP4,
+ c->sw_if_index),
+ &all_0s,
+ FIB_SOURCE_DHCP,
+ FIB_PROTOCOL_IP4,
+ &nh,
+ c->sw_if_index,
+ ~0,
+ 1,
+ FIB_ROUTE_PATH_FLAG_NONE);
+ }
+
+ dhcp_client_release_address (dcm, c);
+ c->state = DHCP_DISCOVER;
+ c->next_transmit = now;
+ c->retry_count = 0;
+ /* Wipe out any memory of the address we had... */
+ c->leased_address.as_u32 = 0;
+ c->subnet_mask_width = 0;
+ c->router_address.as_u32 = 0;
+ c->lease_renewal_interval = 0;
+ c->dhcp_server.as_u32 = 0;
+ return 1;
+ }
+ return 0;
+}
+
+static f64 dhcp_client_sm (f64 now, f64 timeout, uword pool_index)
+{
+ dhcp_client_main_t * dcm = &dhcp_client_main;
+ dhcp_client_t * c;
+
+ /* deleted, pooched, yadda yadda yadda */
+ if (pool_is_free_index (dcm->clients, pool_index))
+ return timeout;
+
+ c = pool_elt_at_index (dcm->clients, pool_index);
+
+ /* Time for us to do something with this client? */
+ if (now < c->next_transmit)
+ return timeout;
+
+ again:
+ switch (c->state)
+ {
+ case DHCP_DISCOVER: /* send a discover */
+ if (dhcp_discover_state (dcm, c, now))
+ goto again;
+ break;
+
+ case DHCP_REQUEST: /* send a request */
+ if (dhcp_request_state (dcm, c, now))
+ goto again;
+ break;
+
+ case DHCP_BOUND: /* bound, renew needed? */
+ if (dhcp_bound_state (dcm, c, now))
+ goto again;
+ break;
+
+ default:
+ clib_warning ("dhcp client %d bogus state %d",
+ c - dcm->clients, c->state);
+ break;
+ }
+
+ if (c->next_transmit < now + timeout)
+ return c->next_transmit - now;
+
+ return timeout;
+}
+
+static uword
+dhcp_client_process (vlib_main_t * vm,
+ vlib_node_runtime_t * rt,
+ vlib_frame_t * f)
+{
+ f64 timeout = 100.0;
+ f64 now;
+ uword event_type;
+ uword * event_data = 0;
+ dhcp_client_main_t * dcm = &dhcp_client_main;
+ dhcp_client_t * c;
+ int i;
+
+ while (1)
+ {
+ vlib_process_wait_for_event_or_clock (vm, timeout);
+
+ event_type = vlib_process_get_events (vm, &event_data);
+
+ now = vlib_time_now (vm);
+
+ switch (event_type)
+ {
+ case EVENT_DHCP_CLIENT_WAKEUP:
+ for (i = 0; i < vec_len (event_data); i++)
+ timeout = dhcp_client_sm (now, timeout, event_data[i]);
+ break;
+
+ case ~0:
+ pool_foreach (c, dcm->clients,
+ ({
+ timeout = dhcp_client_sm (now, timeout,
+ (uword)(c - dcm->clients));
+ }));
+ if (pool_elts (dcm->clients) == 0)
+ timeout = 100.0;
+ break;
+ }
+
+ vec_reset_length (event_data);
+ }
+
+ /* NOTREACHED */
+ return 0;
+}
+
+VLIB_REGISTER_NODE (dhcp_client_process_node,static) = {
+ .function = dhcp_client_process,
+ .type = VLIB_NODE_TYPE_PROCESS,
+ .name = "dhcp-client-process",
+ .process_log2_n_stack_bytes = 16,
+};
+
+static u8 * format_dhcp_client_state (u8 * s, va_list * va)
+{
+ dhcp_client_state_t state = va_arg (*va, dhcp_client_state_t);
+ char * str = "BOGUS!";
+
+ switch (state)
+ {
+#define _(a) \
+ case a: \
+ str = #a; \
+ break;
+ foreach_dhcp_client_state;
+#undef _
+ default:
+ break;
+ }
+
+ s = format (s, "%s", str);
+ return s;
+}
+
+static u8 * format_dhcp_client (u8 * s, va_list * va)
+{
+ dhcp_client_main_t * dcm = va_arg (*va, dhcp_client_main_t *);
+ dhcp_client_t * c = va_arg (*va, dhcp_client_t *);
+ int verbose = va_arg (*va, int);
+
+ s = format (s, "[%d] %U state %U ", c - dcm->clients,
+ format_vnet_sw_if_index_name, dcm->vnet_main, c->sw_if_index,
+ format_dhcp_client_state, c->state);
+
+ if (c->leased_address.as_u32)
+ s = format (s, "addr %U/%d gw %U\n",
+ format_ip4_address, &c->leased_address,
+ c->subnet_mask_width, format_ip4_address, &c->router_address);
+ else
+ s = format (s, "no address\n");
+
+ if (verbose)
+ {
+ s = format (s, "retry count %d, next xmt %.2f",
+ c->retry_count, c->next_transmit);
+ }
+ return s;
+}
+
+static clib_error_t *
+show_dhcp_client_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ dhcp_client_main_t * dcm = &dhcp_client_main;
+ dhcp_client_t * c;
+ int verbose = 0;
+ u32 sw_if_index = ~0;
+ uword * p;
+
+ while (unformat_check_input(input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "intfc %U",
+ unformat_vnet_sw_interface, dcm->vnet_main,
+ &sw_if_index))
+ ;
+ else if (unformat (input, "verbose"))
+ verbose = 1;
+ else
+ break;
+ }
+
+ if (sw_if_index != ~0)
+ {
+ p = hash_get (dcm->client_by_sw_if_index, sw_if_index);
+ if (p == 0)
+ return clib_error_return (0, "dhcp client not configured");
+ c = pool_elt_at_index (dcm->clients, p[0]);
+ vlib_cli_output (vm, "%U", format_dhcp_client, dcm, c, verbose);
+ return 0;
+ }
+
+ pool_foreach (c, dcm->clients,
+ ({
+ vlib_cli_output (vm, "%U", format_dhcp_client, dcm, c, verbose);
+ }));
+
+ return 0;
+}
+
+VLIB_CLI_COMMAND (show_dhcp_client_command, static) = {
+ .path = "show dhcp client",
+ .short_help = "show dhcp client [intfc <intfc>][verbose]",
+ .function = show_dhcp_client_command_fn,
+};
+
+
+int dhcp_client_add_del (dhcp_client_add_del_args_t * a)
+{
+ dhcp_client_main_t * dcm = &dhcp_client_main;
+ vlib_main_t * vm = dcm->vlib_main;
+ dhcp_client_t * c;
+ uword * p;
+ fib_prefix_t all_1s =
+ {
+ .fp_len = 32,
+ .fp_addr.ip4.as_u32 = 0xffffffff,
+ .fp_proto = FIB_PROTOCOL_IP4,
+ };
+ fib_prefix_t all_0s =
+ {
+ .fp_len = 0,
+ .fp_addr.ip4.as_u32 = 0x0,
+ .fp_proto = FIB_PROTOCOL_IP4,
+ };
+
+ p = hash_get (dcm->client_by_sw_if_index, a->sw_if_index);
+
+ if ((p && a->is_add) || (!p && a->is_add == 0))
+ return VNET_API_ERROR_INVALID_VALUE;
+
+ if (a->is_add)
+ {
+ pool_get (dcm->clients, c);
+ memset (c, 0, sizeof (*c));
+ c->state = DHCP_DISCOVER;
+ c->sw_if_index = a->sw_if_index;
+ c->client_index = a->client_index;
+ c->pid = a->pid;
+ c->event_callback = a->event_callback;
+ c->option_55_data = a->option_55_data;
+ c->hostname = a->hostname;
+ c->client_identifier = a->client_identifier;
+ do {
+ c->transaction_id = random_u32 (&dcm->seed);
+ } while (c->transaction_id == 0);
+ set_l2_rewrite (dcm, c);
+ hash_set (dcm->client_by_sw_if_index, a->sw_if_index, c - dcm->clients);
+
+ /* this add is ref counted by FIB so we can add for each itf */
+ fib_table_entry_special_add(fib_table_get_index_for_sw_if_index(
+ FIB_PROTOCOL_IP4,
+ c->sw_if_index),
+ &all_1s,
+ FIB_SOURCE_DHCP,
+ FIB_ENTRY_FLAG_LOCAL,
+ ADJ_INDEX_INVALID);
+
+ /*
+ * enable the interface to RX IPv4 packets
+ * this is also ref counted
+ */
+ ip4_sw_interface_enable_disable (c->sw_if_index, 1);
+
+ vlib_process_signal_event (vm, dhcp_client_process_node.index,
+ EVENT_DHCP_CLIENT_WAKEUP, c - dcm->clients);
+ }
+ else
+ {
+ c = pool_elt_at_index (dcm->clients, p[0]);
+
+ fib_table_entry_special_remove(fib_table_get_index_for_sw_if_index(
+ FIB_PROTOCOL_IP4,
+ c->sw_if_index),
+ &all_1s,
+ FIB_SOURCE_DHCP);
+
+ if (c->router_address.as_u32)
+ {
+ ip46_address_t nh = {
+ .ip4 = c->router_address,
+ };
+
+ fib_table_entry_path_remove(fib_table_get_index_for_sw_if_index(
+ FIB_PROTOCOL_IP4,
+ c->sw_if_index),
+ &all_0s,
+ FIB_SOURCE_DHCP,
+ FIB_PROTOCOL_IP4,
+ &nh,
+ c->sw_if_index,
+ ~0,
+ 1,
+ FIB_ROUTE_PATH_FLAG_NONE);
+ }
+ ip4_sw_interface_enable_disable (c->sw_if_index, 0);
+
+ vec_free (c->option_55_data);
+ vec_free (c->hostname);
+ vec_free (c->client_identifier);
+ vec_free (c->l2_rewrite);
+ hash_unset (dcm->client_by_sw_if_index, c->sw_if_index);
+ pool_put (dcm->clients, c);
+ }
+ return 0;
+}
+
+int
+dhcp_client_config (vlib_main_t * vm,
+ u32 sw_if_index,
+ u8 * hostname,
+ u32 is_add,
+ u32 client_index,
+ void * event_callback,
+ u32 pid)
+{
+ dhcp_client_add_del_args_t _a, *a = &_a;
+ int rv;
+
+ memset (a, 0, sizeof (*a));
+ a->is_add = is_add;
+ a->sw_if_index = sw_if_index;
+ a->client_index = client_index;
+ a->pid = pid;
+ a->event_callback = event_callback;
+ vec_validate(a->hostname, strlen((char *)hostname) - 1);
+ strncpy((char *)a->hostname, (char *)hostname, vec_len(a->hostname));
+ a->client_identifier = format (0, "vpe 1.0%c", 0);
+ /*
+ * Option 55 request list. These data precisely match
+ * the Ubuntu dhcp client. YMMV.
+ */
+
+ /* Subnet Mask */
+ vec_add1 (a->option_55_data, 1);
+ /* Broadcast address */
+ vec_add1 (a->option_55_data, 28);
+ /* time offset */
+ vec_add1 (a->option_55_data, 2);
+ /* Router */
+ vec_add1 (a->option_55_data, 3);
+ /* Domain Name */
+ vec_add1 (a->option_55_data, 15);
+ /* DNS */
+ vec_add1 (a->option_55_data, 6);
+ /* Domain search */
+ vec_add1 (a->option_55_data, 119);
+ /* Host name */
+ vec_add1 (a->option_55_data, 12);
+ /* NetBIOS name server */
+ vec_add1 (a->option_55_data, 44);
+ /* NetBIOS Scope */
+ vec_add1 (a->option_55_data, 47);
+ /* MTU */
+ vec_add1 (a->option_55_data, 26);
+ /* Classless static route */
+ vec_add1 (a->option_55_data, 121);
+ /* NTP servers */
+ vec_add1 (a->option_55_data, 42);
+
+ rv = dhcp_client_add_del (a);
+
+ switch (rv)
+ {
+ case 0:
+ break;
+
+ case VNET_API_ERROR_INVALID_VALUE:
+
+ vec_free (a->hostname);
+ vec_free (a->client_identifier);
+ vec_free (a->option_55_data);
+
+ if (is_add)
+ clib_warning ("dhcp client already enabled on intf_idx %d",
+ sw_if_index);
+ else
+ clib_warning ("dhcp client not enabled on on intf_idx %d",
+ sw_if_index);
+ break;
+
+ default:
+ clib_warning ("dhcp_client_add_del returned %d", rv);
+ }
+
+ return rv;
+}
+
+static clib_error_t *
+dhcp_client_set_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+
+ dhcp_client_main_t * dcm = &dhcp_client_main;
+ u32 sw_if_index;
+ u8 * hostname = 0;
+ u8 sw_if_index_set = 0;
+ int is_add = 1;
+ dhcp_client_add_del_args_t _a, *a = &_a;
+ int rv;
+
+ while (unformat_check_input(input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "intfc %U",
+ unformat_vnet_sw_interface, dcm->vnet_main,
+ &sw_if_index))
+ sw_if_index_set = 1;
+ else if (unformat (input, "hostname %v", &hostname))
+ ;
+ else if (unformat (input, "del"))
+ is_add = 0;
+ else
+ break;
+ }
+
+ if (sw_if_index_set == 0)
+ return clib_error_return (0, "interface not specified");
+
+ memset (a, 0, sizeof (*a));
+ a->is_add = is_add;
+ a->sw_if_index = sw_if_index;
+ a->hostname = hostname;
+ a->client_identifier = format (0, "vpe 1.0%c", 0);
+
+ /*
+ * Option 55 request list. These data precisely match
+ * the Ubuntu dhcp client. YMMV.
+ */
+
+ /* Subnet Mask */
+ vec_add1 (a->option_55_data, 1);
+ /* Broadcast address */
+ vec_add1 (a->option_55_data, 28);
+ /* time offset */
+ vec_add1 (a->option_55_data, 2);
+ /* Router */
+ vec_add1 (a->option_55_data, 3);
+ /* Domain Name */
+ vec_add1 (a->option_55_data, 15);
+ /* DNS */
+ vec_add1 (a->option_55_data, 6);
+ /* Domain search */
+ vec_add1 (a->option_55_data, 119);
+ /* Host name */
+ vec_add1 (a->option_55_data, 12);
+ /* NetBIOS name server */
+ vec_add1 (a->option_55_data, 44);
+ /* NetBIOS Scope */
+ vec_add1 (a->option_55_data, 47);
+ /* MTU */
+ vec_add1 (a->option_55_data, 26);
+ /* Classless static route */
+ vec_add1 (a->option_55_data, 121);
+ /* NTP servers */
+ vec_add1 (a->option_55_data, 42);
+
+ rv = dhcp_client_add_del (a);
+
+ switch (rv)
+ {
+ case 0:
+ break;
+
+ case VNET_API_ERROR_INVALID_VALUE:
+
+ vec_free (a->hostname);
+ vec_free (a->client_identifier);
+ vec_free (a->option_55_data);
+ if (is_add)
+ return clib_error_return (0, "dhcp client already enabled on %U",
+ format_vnet_sw_if_index_name,
+ dcm->vnet_main, sw_if_index);
+ else
+ return clib_error_return (0, "dhcp client not enabled on %U",
+ format_vnet_sw_if_index_name,
+ dcm->vnet_main, sw_if_index);
+ break;
+
+ default:
+ vlib_cli_output (vm, "dhcp_client_add_del returned %d", rv);
+ }
+
+ return 0;
+}
+
+VLIB_CLI_COMMAND (dhcp_client_set_command, static) = {
+ .path = "set dhcp client",
+ .short_help = "set dhcp client [del] intfc <interface> [hostname <name>]",
+ .function = dhcp_client_set_command_fn,
+};
+
+static clib_error_t *
+dhcp_client_init (vlib_main_t * vm)
+{
+ dhcp_client_main_t * dcm = &dhcp_client_main;
+
+ dcm->vlib_main = vm;
+ dcm->vnet_main = vnet_get_main();
+ dcm->seed = 0xdeaddabe;
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (dhcp_client_init);
diff --git a/src/vnet/dhcp/client.h b/src/vnet/dhcp/client.h
new file mode 100644
index 00000000000..d15e686b636
--- /dev/null
+++ b/src/vnet/dhcp/client.h
@@ -0,0 +1,118 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * client.h: dhcp client
+ */
+
+#ifndef included_dhcp_client_h
+#define included_dhcp_client_h
+
+#define foreach_dhcp_client_state \
+_(DHCP_DISCOVER) \
+_(DHCP_REQUEST) \
+_(DHCP_BOUND)
+
+typedef enum {
+#define _(a) a,
+ foreach_dhcp_client_state
+#undef _
+} dhcp_client_state_t;
+
+typedef struct {
+ dhcp_client_state_t state;
+
+ /* the interface in question */
+ u32 sw_if_index;
+
+ /* State machine retry counter */
+ u32 retry_count;
+
+ /* Send next pkt at this time */
+ f64 next_transmit;
+ f64 lease_expires;
+
+ /* DHCP transaction ID, a random number */
+ u32 transaction_id;
+
+ /* leased address, other learned info DHCP */
+ ip4_address_t leased_address; /* from your_ip_address field */
+ ip4_address_t dhcp_server;
+ u32 subnet_mask_width; /* option 1 */
+ ip4_address_t router_address; /* option 3 */
+ u32 lease_renewal_interval; /* option 51 */
+ u32 lease_lifetime; /* option 59 */
+
+ /* Requested data (option 55) */
+ u8 * option_55_data;
+
+ u8 * l2_rewrite;
+
+ /* hostname and software client identifiers */
+ u8 * hostname;
+ u8 * client_identifier; /* software version, e.g. vpe 1.0*/
+
+ /* Information used for event callback */
+ u32 client_index;
+ u32 pid;
+ void * event_callback;
+} dhcp_client_t;
+
+typedef struct {
+ /* DHCP client pool */
+ dhcp_client_t * clients;
+ uword * client_by_sw_if_index;
+ u32 seed;
+
+ /* convenience */
+ vlib_main_t * vlib_main;
+ vnet_main_t * vnet_main;
+} dhcp_client_main_t;
+
+typedef struct {
+ int is_add;
+ u32 sw_if_index;
+
+ /* vectors, consumed by dhcp client code */
+ u8 * hostname;
+ u8 * client_identifier;
+
+ /* Bytes containing requested option numbers */
+ u8 * option_55_data;
+
+ /* Information used for event callback */
+ u32 client_index;
+ u32 pid;
+ void * event_callback;
+} dhcp_client_add_del_args_t;
+
+dhcp_client_main_t dhcp_client_main;
+
+#define EVENT_DHCP_CLIENT_WAKEUP 1
+
+int dhcp_client_for_us (u32 bi0,
+ vlib_buffer_t * b0,
+ ip4_header_t * ip0,
+ udp_header_t * u0,
+ dhcp_header_t * dh0);
+
+int dhcp_client_config (vlib_main_t * vm,
+ u32 sw_if_index,
+ u8 * hostname,
+ u32 is_add,
+ u32 client_index,
+ void *event_callback,
+ u32 pid);
+
+#endif /* included_dhcp_client_h */
diff --git a/src/vnet/dhcp/packet.h b/src/vnet/dhcp/packet.h
new file mode 100644
index 00000000000..267a8eafc93
--- /dev/null
+++ b/src/vnet/dhcp/packet.h
@@ -0,0 +1,61 @@
+#ifndef included_vnet_dhcp_packet_h
+#define included_vnet_dhcp_packet_h
+
+/*
+ * DHCP packet format
+ *
+ * Copyright (c) 2013 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <vnet/ip/ip4_packet.h>
+
+typedef struct {
+ u8 opcode; /* 1 = request, 2 = reply */
+ u8 hardware_type; /* 1 = ethernet */
+ u8 hardware_address_length;
+ u8 hops;
+ u32 transaction_identifier;
+ u16 seconds;
+ u16 flags;
+#define DHCP_FLAG_BROADCAST (1<<15)
+ ip4_address_t client_ip_address;
+ ip4_address_t your_ip_address; /* use this one */
+ ip4_address_t server_ip_address;
+ ip4_address_t gateway_ip_address; /* use option 3, not this one */
+ u8 client_hardware_address[16];
+ u8 server_name[64];
+ u8 boot_filename[128];
+ ip4_address_t magic_cookie;
+ u8 options[0];
+} dhcp_header_t;
+
+typedef struct {
+ u8 option;
+ u8 length;
+ union {
+ u8 data[0];
+ u32 data_as_u32[0];
+ };
+} __attribute__((packed)) dhcp_option_t;
+
+typedef enum {
+ DHCP_PACKET_DISCOVER=1,
+ DHCP_PACKET_OFFER,
+ DHCP_PACKET_REQUEST,
+ DHCP_PACKET_ACK=5,
+} dhcp_packet_type_t;
+
+/* charming antique: 99.130.83.99 is the dhcp magic cookie */
+#define DHCP_MAGIC (clib_host_to_net_u32(0x63825363))
+
+#endif /* included_vnet_dhcp_packet_h */
diff --git a/src/vnet/dhcp/proxy.h b/src/vnet/dhcp/proxy.h
new file mode 100644
index 00000000000..e12c0d001b5
--- /dev/null
+++ b/src/vnet/dhcp/proxy.h
@@ -0,0 +1,92 @@
+/*
+ * proxy.h: dhcp proxy
+ *
+ * Copyright (c) 2013 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef included_dhcp_proxy_h
+#define included_dhcp_proxy_h
+
+#include <vnet/vnet.h>
+#include <vnet/dhcp/packet.h>
+#include <vnet/ethernet/ethernet.h>
+#include <vnet/ip/ip.h>
+#include <vnet/ip/ip4.h>
+#include <vnet/ip/ip4_packet.h>
+#include <vnet/pg/pg.h>
+#include <vnet/ip/format.h>
+#include <vnet/ip/udp.h>
+#include <vnet/dhcp/client.h>
+
+typedef enum {
+#define dhcp_proxy_error(n,s) DHCP_PROXY_ERROR_##n,
+#include <vnet/dhcp/proxy_error.def>
+#undef dhcp_proxy_error
+ DHCP_PROXY_N_ERROR,
+} dhcp_proxy_error_t;
+
+typedef struct {
+ u32 oui;
+ u32 fib_id;
+} vss_id;
+
+typedef union {
+ u8 as_u8[8];
+ vss_id vpn_id;
+} vss_info;
+
+typedef struct {
+ ip4_address_t dhcp_server;
+ ip4_address_t dhcp_src_address;
+ u32 insert_option_82;
+ u32 server_fib_index;
+ u32 valid;
+} dhcp_server_t;
+
+typedef struct {
+ /* Pool of DHCP servers */
+ dhcp_server_t * dhcp_servers;
+
+ /* Pool of selected DHCP server. Zero is the default server */
+ u32 * dhcp_server_index_by_rx_fib_index;
+
+ /* to drop pkts in server-to-client direction */
+ u32 error_drop_node_index;
+
+ vss_info *opt82vss;
+
+ /* hash lookup specific vrf_id -> option 82 vss suboption */
+ uword * opt82vss_index_by_vrf_id;
+
+ /* convenience */
+ dhcp_client_main_t * dhcp_client_main;
+ vlib_main_t * vlib_main;
+ vnet_main_t * vnet_main;
+} dhcp_proxy_main_t;
+
+dhcp_proxy_main_t dhcp_proxy_main;
+
+int dhcp_proxy_set_server (ip4_address_t *addr, ip4_address_t *src_address,
+ u32 fib_id, int insert_option_82, int is_del);
+
+int dhcp_proxy_set_server_2 (ip4_address_t *addr, ip4_address_t *src_address,
+ u32 rx_fib_id,
+ u32 server_fib_id,
+ int insert_option_82, int is_del);
+
+int dhcp_proxy_set_option82_vss(u32 vrf_id,
+ u32 oui,
+ u32 fib_id,
+ int is_del);
+#endif /* included_dhcp_proxy_h */
diff --git a/src/vnet/dhcp/proxy_error.def b/src/vnet/dhcp/proxy_error.def
new file mode 100644
index 00000000000..6aa06eb5120
--- /dev/null
+++ b/src/vnet/dhcp/proxy_error.def
@@ -0,0 +1,30 @@
+/*
+ * dhcp_proxy_error.def: dhcp proxy errors
+ *
+ * Copyright (c) 2013 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+dhcp_proxy_error (NONE, "no error")
+dhcp_proxy_error (NO_SERVER, "no dhcp server configured")
+dhcp_proxy_error (RELAY_TO_SERVER, "DHCP packets relayed to the server")
+dhcp_proxy_error (RELAY_TO_CLIENT, "DHCP packets relayed to clients")
+dhcp_proxy_error (OPTION_82_ERROR, "DHCP failed to insert option 82")
+dhcp_proxy_error (NO_OPTION_82, "DHCP option 82 missing")
+dhcp_proxy_error (BAD_OPTION_82, "Bad DHCP option 82 value")
+dhcp_proxy_error (BAD_FIB_ID, "DHCP option 82 fib-id to fib-index map failure")
+dhcp_proxy_error (NO_INTERFACE_ADDRESS, "DHCP no interface address")
+dhcp_proxy_error (OPTION_82_VSS_NOT_PROCESSED, "DHCP VSS not processed by DHCP server")
+dhcp_proxy_error (BAD_YIADDR, "DHCP packets with bad your_ip_address fields")
+dhcp_proxy_error (BAD_SVR_FIB_OR_ADDRESS, "DHCP packets not from DHCP server or server FIB.")
+dhcp_proxy_error (PKT_TOO_BIG, "DHCP packets which are too big.")
diff --git a/src/vnet/dhcp/proxy_node.c b/src/vnet/dhcp/proxy_node.c
new file mode 100644
index 00000000000..d0d99d7e03b
--- /dev/null
+++ b/src/vnet/dhcp/proxy_node.c
@@ -0,0 +1,1114 @@
+/*
+ * proxy_node.c: dhcp proxy node processing
+ *
+ * Copyright (c) 2013 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vlib/vlib.h>
+#include <vnet/pg/pg.h>
+#include <vnet/dhcp/proxy.h>
+#include <vnet/fib/ip4_fib.h>
+
+static char * dhcp_proxy_error_strings[] = {
+#define dhcp_proxy_error(n,s) s,
+#include "proxy_error.def"
+#undef dhcp_proxy_error
+};
+
+#define foreach_dhcp_proxy_to_server_input_next \
+ _ (DROP, "error-drop") \
+ _ (LOOKUP, "ip4-lookup") \
+ _ (SEND_TO_CLIENT, "dhcp-proxy-to-client")
+
+typedef enum {
+#define _(s,n) DHCP_PROXY_TO_SERVER_INPUT_NEXT_##s,
+ foreach_dhcp_proxy_to_server_input_next
+#undef _
+ DHCP_PROXY_TO_SERVER_INPUT_N_NEXT,
+} dhcp_proxy_to_server_input_next_t;
+
+typedef struct {
+ /* 0 => to server, 1 => to client */
+ int which;
+ ip4_address_t trace_ip4_address;
+ u32 error;
+ u32 sw_if_index;
+ u32 original_sw_if_index;
+} dhcp_proxy_trace_t;
+
+#define VPP_DHCP_OPTION82_SUB1_SIZE 6
+#define VPP_DHCP_OPTION82_SUB5_SIZE 6
+#define VPP_DHCP_OPTION82_VSS_SIZE 12
+#define VPP_DHCP_OPTION82_SIZE (VPP_DHCP_OPTION82_SUB1_SIZE + \
+ VPP_DHCP_OPTION82_SUB5_SIZE + \
+ VPP_DHCP_OPTION82_VSS_SIZE +3)
+
+vlib_node_registration_t dhcp_proxy_to_server_node;
+vlib_node_registration_t dhcp_proxy_to_client_node;
+
+u8 * format_dhcp_proxy_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ dhcp_proxy_trace_t * t = va_arg (*args, dhcp_proxy_trace_t *);
+
+ if (t->which == 0)
+ s = format (s, "DHCP proxy: sent to server %U\n",
+ format_ip4_address, &t->trace_ip4_address, t->error);
+ else
+ s = format (s, "DHCP proxy: broadcast to client from %U\n",
+ format_ip4_address, &t->trace_ip4_address);
+
+ if (t->error != (u32)~0)
+ s = format (s, " error: %s\n", dhcp_proxy_error_strings[t->error]);
+
+ s = format (s, " original_sw_if_index: %d, sw_if_index: %d\n",
+ t->original_sw_if_index, t->sw_if_index);
+
+ return s;
+}
+
+u8 * format_dhcp_proxy_header_with_length (u8 * s, va_list * args)
+{
+ dhcp_header_t * h = va_arg (*args, dhcp_header_t *);
+ u32 max_header_bytes = va_arg (*args, u32);
+ u32 header_bytes;
+
+ header_bytes = sizeof (h[0]);
+ if (max_header_bytes != 0 && header_bytes > max_header_bytes)
+ return format (s, "dhcp header truncated");
+
+ s = format (s, "DHCP Proxy");
+
+ return s;
+}
+
+static uword
+dhcp_proxy_to_server_input (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame)
+{
+ u32 n_left_from, next_index, * from, * to_next;
+ dhcp_proxy_main_t * dpm = &dhcp_proxy_main;
+ from = vlib_frame_vector_args (from_frame);
+ n_left_from = from_frame->n_vectors;
+ u32 pkts_to_server=0, pkts_to_client=0, pkts_no_server=0;
+ u32 pkts_no_interface_address=0;
+ u32 pkts_too_big=0;
+ ip4_main_t * im = &ip4_main;
+
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame (vm, node, next_index,
+ to_next, n_left_to_next);
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 bi0;
+ vlib_buffer_t * b0;
+ udp_header_t * u0;
+ dhcp_header_t * h0;
+ ip4_header_t * ip0;
+ u32 next0;
+ u32 old0, new0;
+ ip_csum_t sum0;
+ u32 error0 = (u32) ~0;
+ u32 sw_if_index = 0;
+ u32 original_sw_if_index = 0;
+ u8 *end = NULL;
+ u32 fib_index, server_index;
+ dhcp_server_t * server;
+ u32 rx_sw_if_index;
+
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+
+ h0 = vlib_buffer_get_current (b0);
+
+ /*
+ * udp_local hands us the DHCP header, need udp hdr,
+ * ip hdr to relay to server
+ */
+ vlib_buffer_advance (b0, -(sizeof(*u0)));
+ u0 = vlib_buffer_get_current (b0);
+
+ /* This blows. Return traffic has src_port = 67, dst_port = 67 */
+ if (u0->src_port == clib_net_to_host_u16(UDP_DST_PORT_dhcp_to_server))
+ {
+ vlib_buffer_advance (b0, sizeof(*u0));
+ next0 = DHCP_PROXY_TO_SERVER_INPUT_NEXT_SEND_TO_CLIENT;
+ error0 = 0;
+ pkts_to_client++;
+ goto do_enqueue;
+ }
+
+ rx_sw_if_index = vnet_buffer(b0)->sw_if_index[VLIB_RX];
+
+ fib_index = im->fib_index_by_sw_if_index [rx_sw_if_index];
+
+ if (fib_index < vec_len(dpm->dhcp_server_index_by_rx_fib_index))
+ server_index = dpm->dhcp_server_index_by_rx_fib_index[fib_index];
+ else
+ server_index = 0;
+
+ if (PREDICT_FALSE (pool_is_free_index (dpm->dhcp_servers,
+ server_index)))
+ {
+ no_server:
+ error0 = DHCP_PROXY_ERROR_NO_SERVER;
+ next0 = DHCP_PROXY_TO_SERVER_INPUT_NEXT_DROP;
+ pkts_no_server++;
+ goto do_trace;
+ }
+
+ server = pool_elt_at_index (dpm->dhcp_servers, server_index);
+ if (server->valid == 0)
+ goto no_server;
+
+ vlib_buffer_advance (b0, -(sizeof(*ip0)));
+ ip0 = vlib_buffer_get_current (b0);
+
+ /* disable UDP checksum */
+ u0->checksum = 0;
+ sum0 = ip0->checksum;
+ old0 = ip0->dst_address.as_u32;
+ new0 = server->dhcp_server.as_u32;
+ ip0->dst_address.as_u32 = server->dhcp_server.as_u32;
+ sum0 = ip_csum_update (sum0, old0, new0,
+ ip4_header_t /* structure */,
+ dst_address /* changed member */);
+ ip0->checksum = ip_csum_fold (sum0);
+
+ sum0 = ip0->checksum;
+ old0 = ip0->src_address.as_u32;
+ new0 = server->dhcp_src_address.as_u32;
+ ip0->src_address.as_u32 = new0;
+ sum0 = ip_csum_update (sum0, old0, new0,
+ ip4_header_t /* structure */,
+ src_address /* changed member */);
+ ip0->checksum = ip_csum_fold (sum0);
+
+ /* Send to DHCP server via the configured FIB */
+ vnet_buffer(b0)->sw_if_index[VLIB_TX] =
+ server->server_fib_index;
+
+ h0->gateway_ip_address.as_u32 = server->dhcp_src_address.as_u32;
+ pkts_to_server++;
+
+ if (server->insert_option_82)
+ {
+ u32 fib_index, fib_id, opt82_fib_id=0, opt82_oui=0;
+ ip4_fib_t * fib;
+ dhcp_option_t *o = (dhcp_option_t *) h0->options;
+ u32 len = 0;
+ vlib_buffer_free_list_t *fl;
+
+ fib_index = im->fib_index_by_sw_if_index
+ [vnet_buffer(b0)->sw_if_index[VLIB_RX]];
+ fib = ip4_fib_get (fib_index);
+ fib_id = fib->table_id;
+
+ end = b0->data + b0->current_data + b0->current_length;
+ /* TLVs are not performance-friendly... */
+ while (o->option != 0xFF /* end of options */ && (u8 *)o < end)
+ o = (dhcp_option_t *) (((uword) o) + (o->length + 2));
+
+ fl = vlib_buffer_get_free_list (vm, b0->free_list_index);
+ // start write at (option*)o, some packets have padding
+ if (((u8 *)o - (u8 *)b0->data + VPP_DHCP_OPTION82_SIZE) > fl->n_data_bytes)
+ {
+ next0 = DHCP_PROXY_TO_SERVER_INPUT_NEXT_DROP;
+ pkts_too_big++;
+ goto do_trace;
+ }
+
+ if ((o->option == 0xFF) && ((u8 *)o <= end))
+ {
+ vnet_main_t *vnm = vnet_get_main();
+ u16 old_l0, new_l0;
+ ip4_address_t _ia0, * ia0 = &_ia0;
+ uword *p_vss;
+ vss_info *vss;
+ vnet_sw_interface_t *swif;
+ sw_if_index = 0;
+ original_sw_if_index = 0;
+
+ original_sw_if_index = sw_if_index =
+ vnet_buffer(b0)->sw_if_index[VLIB_RX];
+ swif = vnet_get_sw_interface (vnm, sw_if_index);
+ if (swif->flags & VNET_SW_INTERFACE_FLAG_UNNUMBERED)
+ sw_if_index = swif->unnumbered_sw_if_index;
+
+ p_vss = hash_get (dpm->opt82vss_index_by_vrf_id,
+ fib_id);
+ if (p_vss)
+ {
+ vss = pool_elt_at_index (dpm->opt82vss, p_vss[0]);
+ opt82_oui = vss->vpn_id.oui;
+ opt82_fib_id = vss->vpn_id.fib_id;
+ }
+ /*
+ * Get the first ip4 address on the [client-side]
+ * RX interface, if not unnumbered. otherwise use
+ * the loopback interface's ip address.
+ */
+ ia0 = ip4_interface_first_address(&ip4_main, sw_if_index, 0);
+
+ if (ia0 == 0)
+ {
+ error0 = DHCP_PROXY_ERROR_NO_INTERFACE_ADDRESS;
+ next0 = DHCP_PROXY_TO_SERVER_INPUT_NEXT_DROP;
+ pkts_no_interface_address++;
+ goto do_trace;
+ }
+
+ /* Add option 82 */
+ o->option = 82; /* option 82 */
+ o->length = 12; /* 12 octets to follow */
+ o->data[0] = 1; /* suboption 1, circuit ID (=FIB id) */
+ o->data[1] = 4; /* length of suboption */
+ o->data[2] = (original_sw_if_index >> 24) & 0xFF;
+ o->data[3] = (original_sw_if_index >> 16) & 0xFF;
+ o->data[4] = (original_sw_if_index >> 8) & 0xFF;
+ o->data[5] = (original_sw_if_index >> 0) & 0xFF;
+ o->data[6] = 5; /* suboption 5 (client RX intfc address) */
+ o->data[7] = 4; /* length 4 */
+ o->data[8] = ia0->as_u8[0];
+ o->data[9] = ia0->as_u8[1];
+ o->data[10] = ia0->as_u8[2];
+ o->data[11] = ia0->as_u8[3];
+ o->data[12] = 0xFF;
+ if (opt82_oui !=0 || opt82_fib_id != 0)
+ {
+ o->data[12] = 151; /* vss suboption */
+ if (255 == opt82_fib_id) {
+ o->data[13] = 1; /* length */
+ o->data[14] = 255; /* vss option type */
+ o->data[15] = 152; /* vss control suboption */
+ o->data[16] = 0; /* length */
+ /* and a new "end-of-options" option (0xff) */
+ o->data[17] = 0xFF;
+ o->length += 5;
+ } else {
+ o->data[13] = 8; /* length */
+ o->data[14] = 1; /* vss option type */
+ o->data[15] = (opt82_oui >> 16) & 0xff;
+ o->data[16] = (opt82_oui >> 8) & 0xff;
+ o->data[17] = (opt82_oui ) & 0xff;
+ o->data[18] = (opt82_fib_id >> 24) & 0xff;
+ o->data[19] = (opt82_fib_id >> 16) & 0xff;
+ o->data[20] = (opt82_fib_id >> 8) & 0xff;
+ o->data[21] = (opt82_fib_id) & 0xff;
+ o->data[22] = 152; /* vss control suboption */
+ o->data[23] = 0; /* length */
+
+ /* and a new "end-of-options" option (0xff) */
+ o->data[24] = 0xFF;
+ o->length += 12;
+ }
+ }
+
+ len = o->length + 3;
+ b0->current_length += len;
+ /* Fix IP header length and checksum */
+ old_l0 = ip0->length;
+ new_l0 = clib_net_to_host_u16 (old_l0);
+ new_l0 += len;
+ new_l0 = clib_host_to_net_u16 (new_l0);
+ ip0->length = new_l0;
+ sum0 = ip0->checksum;
+ sum0 = ip_csum_update (sum0, old_l0, new_l0, ip4_header_t,
+ length /* changed member */);
+ ip0->checksum = ip_csum_fold (sum0);
+
+ /* Fix UDP length */
+ new_l0 = clib_net_to_host_u16 (u0->length);
+ new_l0 += len;
+ u0->length = clib_host_to_net_u16 (new_l0);
+ } else {
+ vlib_node_increment_counter
+ (vm, dhcp_proxy_to_server_node.index,
+ DHCP_PROXY_ERROR_OPTION_82_ERROR, 1);
+ }
+ }
+
+ next0 = DHCP_PROXY_TO_SERVER_INPUT_NEXT_LOOKUP;
+
+ do_trace:
+ if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ dhcp_proxy_trace_t *tr = vlib_add_trace (vm, node,
+ b0, sizeof (*tr));
+ tr->which = 0; /* to server */
+ tr->error = error0;
+ tr->original_sw_if_index = original_sw_if_index;
+ tr->sw_if_index = sw_if_index;
+ if (next0 == DHCP_PROXY_TO_SERVER_INPUT_NEXT_LOOKUP)
+ tr->trace_ip4_address.as_u32 = server->dhcp_server.as_u32;
+ }
+
+ do_enqueue:
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, next0);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ vlib_node_increment_counter (vm, dhcp_proxy_to_server_node.index,
+ DHCP_PROXY_ERROR_RELAY_TO_CLIENT,
+ pkts_to_client);
+ vlib_node_increment_counter (vm, dhcp_proxy_to_server_node.index,
+ DHCP_PROXY_ERROR_RELAY_TO_SERVER,
+ pkts_to_server);
+ vlib_node_increment_counter (vm, dhcp_proxy_to_server_node.index,
+ DHCP_PROXY_ERROR_NO_SERVER,
+ pkts_no_server);
+ vlib_node_increment_counter (vm, dhcp_proxy_to_server_node.index,
+ DHCP_PROXY_ERROR_NO_INTERFACE_ADDRESS,
+ pkts_no_interface_address);
+ vlib_node_increment_counter (vm, dhcp_proxy_to_server_node.index,
+ DHCP_PROXY_ERROR_PKT_TOO_BIG,
+ pkts_too_big);
+ return from_frame->n_vectors;
+}
+
+VLIB_REGISTER_NODE (dhcp_proxy_to_server_node) = {
+ .function = dhcp_proxy_to_server_input,
+ .name = "dhcp-proxy-to-server",
+ /* Takes a vector of packets. */
+ .vector_size = sizeof (u32),
+
+ .n_errors = DHCP_PROXY_N_ERROR,
+ .error_strings = dhcp_proxy_error_strings,
+
+ .n_next_nodes = DHCP_PROXY_TO_SERVER_INPUT_N_NEXT,
+ .next_nodes = {
+#define _(s,n) [DHCP_PROXY_TO_SERVER_INPUT_NEXT_##s] = n,
+ foreach_dhcp_proxy_to_server_input_next
+#undef _
+ },
+
+ .format_buffer = format_dhcp_proxy_header_with_length,
+ .format_trace = format_dhcp_proxy_trace,
+#if 0
+ .unformat_buffer = unformat_dhcp_proxy_header,
+#endif
+};
+
+static uword
+dhcp_proxy_to_client_input (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame)
+{
+ u32 n_left_from, * from;
+ ethernet_main_t *em = ethernet_get_main (vm);
+ dhcp_proxy_main_t * dpm = &dhcp_proxy_main;
+ vnet_main_t * vnm = vnet_get_main();
+ ip4_main_t * im = &ip4_main;
+
+ from = vlib_frame_vector_args (from_frame);
+ n_left_from = from_frame->n_vectors;
+
+ while (n_left_from > 0)
+ {
+ u32 bi0;
+ vlib_buffer_t * b0;
+ udp_header_t * u0;
+ dhcp_header_t * h0;
+ ip4_header_t * ip0 = 0;
+ ip4_address_t * ia0 = 0;
+ u32 old0, new0;
+ ip_csum_t sum0;
+ ethernet_interface_t *ei0;
+ ethernet_header_t *mac0;
+ vnet_hw_interface_t *hi0;
+ vlib_frame_t *f0;
+ u32 * to_next0;
+ u32 sw_if_index = ~0;
+ vnet_sw_interface_t *si0;
+ u32 error0 = (u32)~0;
+ vnet_sw_interface_t *swif;
+ u32 server_index;
+ u32 fib_index;
+ dhcp_server_t * server;
+ u32 original_sw_if_index = (u32) ~0;
+
+ bi0 = from[0];
+ from += 1;
+ n_left_from -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ h0 = vlib_buffer_get_current (b0);
+
+ /*
+ * udp_local hands us the DHCP header, need udp hdr,
+ * ip hdr to relay to client
+ */
+ vlib_buffer_advance (b0, -(sizeof(*u0)));
+ u0 = vlib_buffer_get_current (b0);
+
+ vlib_buffer_advance (b0, -(sizeof(*ip0)));
+ ip0 = vlib_buffer_get_current (b0);
+
+ /* Consumed by dhcp client code? */
+ if (dhcp_client_for_us (bi0, b0, ip0, u0, h0))
+ continue;
+
+ if (1 /* dpm->insert_option_82 */)
+ {
+ dhcp_option_t *o = (dhcp_option_t *) h0->options;
+ dhcp_option_t *sub;
+
+ /* Parse through TLVs looking for option 82.
+ The circuit-ID is the FIB number we need
+ to track down the client-facing interface */
+
+ while (o->option != 0xFF /* end of options */ &&
+ (u8 *) o < (b0->data + b0->current_data + b0->current_length))
+ {
+ if (o->option == 82)
+ {
+ u32 vss_exist = 0;
+ u32 vss_ctrl = 0;
+ sub = (dhcp_option_t *) &o->data[0];
+ while (sub->option != 0xFF /* end of options */ &&
+ (u8 *) sub < (u8 *)(o + o->length)) {
+ /* If this is one of ours, it will have
+ total length 12, circuit-id suboption type,
+ and the sw_if_index */
+ if (sub->option == 1 && sub->length == 4)
+ {
+ sw_if_index = (o->data[2] << 24)
+ | (o->data[3] << 16)
+ | (o->data[4] << 8)
+ | (o->data[5]);
+ } else if (sub->option == 151 &&
+ sub->length == 7 &&
+ sub->data[0] == 1)
+ vss_exist = 1;
+ else if (sub->option == 152 && sub->length == 0)
+ vss_ctrl = 1;
+ sub = (dhcp_option_t *)
+ (((uword) sub) + (sub->length + 2));
+ }
+ if (vss_ctrl && vss_exist)
+ vlib_node_increment_counter
+ (vm, dhcp_proxy_to_client_node.index,
+ DHCP_PROXY_ERROR_OPTION_82_VSS_NOT_PROCESSED, 1);
+
+ }
+ o = (dhcp_option_t *) (((uword) o) + (o->length + 2));
+ }
+ }
+
+ if (sw_if_index == (u32)~0)
+ {
+ error0 = DHCP_PROXY_ERROR_NO_OPTION_82;
+
+ drop_packet:
+ vlib_node_increment_counter (vm, dhcp_proxy_to_client_node.index,
+ error0, 1);
+ f0 = vlib_get_frame_to_node (vm, dpm->error_drop_node_index);
+ to_next0 = vlib_frame_vector_args (f0);
+ to_next0[0] = bi0;
+ f0->n_vectors = 1;
+ vlib_put_frame_to_node (vm, dpm->error_drop_node_index, f0);
+ goto do_trace;
+ }
+
+
+ if (sw_if_index >= vec_len (im->fib_index_by_sw_if_index))
+ {
+ error0 = DHCP_PROXY_ERROR_BAD_OPTION_82;
+ goto drop_packet;
+ }
+
+ fib_index = im->fib_index_by_sw_if_index [sw_if_index];
+
+ if (fib_index < vec_len(dpm->dhcp_server_index_by_rx_fib_index))
+ server_index = dpm->dhcp_server_index_by_rx_fib_index[fib_index];
+ else
+ server_index = 0;
+
+ if (PREDICT_FALSE (pool_is_free_index (dpm->dhcp_servers,
+ server_index)))
+ {
+ error0 = DHCP_PROXY_ERROR_BAD_OPTION_82;
+ goto drop_packet;
+ }
+
+ server = pool_elt_at_index (dpm->dhcp_servers, server_index);
+ if (server->valid == 0)
+ {
+ error0 = DHCP_PROXY_ERROR_NO_SERVER;
+ goto drop_packet;
+ }
+
+ if (ip0->src_address.as_u32 != server->dhcp_server.as_u32)
+ {
+ error0 = DHCP_PROXY_ERROR_BAD_SVR_FIB_OR_ADDRESS;
+ goto drop_packet;
+ }
+
+ vnet_buffer (b0)->sw_if_index[VLIB_TX] = sw_if_index;
+
+ swif = vnet_get_sw_interface (vnm, sw_if_index);
+ original_sw_if_index = sw_if_index;
+ if (swif->flags & VNET_SW_INTERFACE_FLAG_UNNUMBERED)
+ sw_if_index = swif->unnumbered_sw_if_index;
+
+ ia0 = ip4_interface_first_address (&ip4_main, sw_if_index, 0);
+ if (ia0 == 0)
+ {
+ error0 = DHCP_PROXY_ERROR_NO_INTERFACE_ADDRESS;
+ goto drop_packet;
+ }
+
+ u0->checksum = 0;
+ u0->dst_port = clib_net_to_host_u16 (UDP_DST_PORT_dhcp_to_client);
+ sum0 = ip0->checksum;
+ old0 = ip0->dst_address.as_u32;
+ new0 = 0xFFFFFFFF;
+ ip0->dst_address.as_u32 = new0;
+ sum0 = ip_csum_update (sum0, old0, new0,
+ ip4_header_t /* structure */,
+ dst_address /* offset of changed member */);
+ ip0->checksum = ip_csum_fold (sum0);
+
+ sum0 = ip0->checksum;
+ old0 = ip0->src_address.as_u32;
+ new0 = ia0->as_u32;
+ ip0->src_address.as_u32 = new0;
+ sum0 = ip_csum_update (sum0, old0, new0,
+ ip4_header_t /* structure */,
+ src_address /* offset of changed member */);
+ ip0->checksum = ip_csum_fold (sum0);
+
+ vlib_buffer_advance (b0, -(sizeof(ethernet_header_t)));
+ si0 = vnet_get_sw_interface (vnm, original_sw_if_index);
+ if (si0->type == VNET_SW_INTERFACE_TYPE_SUB)
+ vlib_buffer_advance (b0, -4 /* space for VLAN tag */);
+
+ mac0 = vlib_buffer_get_current (b0);
+
+ hi0 = vnet_get_sup_hw_interface (vnm, original_sw_if_index);
+ ei0 = pool_elt_at_index (em->interfaces, hi0->hw_instance);
+ clib_memcpy (mac0->src_address, ei0->address, sizeof (ei0->address));
+ memset (mac0->dst_address, 0xff, sizeof (mac0->dst_address));
+ mac0->type = (si0->type == VNET_SW_INTERFACE_TYPE_SUB) ?
+ clib_net_to_host_u16(0x8100) : clib_net_to_host_u16 (0x0800);
+
+ if (si0->type == VNET_SW_INTERFACE_TYPE_SUB)
+ {
+ u32 * vlan_tag = (u32 *)(mac0+1);
+ u32 tmp;
+ tmp = (si0->sub.id << 16) | 0x0800;
+ *vlan_tag = clib_host_to_net_u32 (tmp);
+ }
+
+ /* $$$ This needs to be rewritten, for sure */
+ f0 = vlib_get_frame_to_node (vm, hi0->output_node_index);
+ to_next0 = vlib_frame_vector_args (f0);
+ to_next0[0] = bi0;
+ f0->n_vectors = 1;
+ vlib_put_frame_to_node (vm, hi0->output_node_index, f0);
+
+ do_trace:
+ if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ dhcp_proxy_trace_t *tr = vlib_add_trace (vm, node,
+ b0, sizeof (*tr));
+ tr->which = 1; /* to client */
+ tr->trace_ip4_address.as_u32 = ia0 ? ia0->as_u32 : 0;
+ tr->error = error0;
+ tr->original_sw_if_index = original_sw_if_index;
+ tr->sw_if_index = sw_if_index;
+ }
+ }
+ return from_frame->n_vectors;
+}
+
+VLIB_REGISTER_NODE (dhcp_proxy_to_client_node) = {
+ .function = dhcp_proxy_to_client_input,
+ .name = "dhcp-proxy-to-client",
+ /* Takes a vector of packets. */
+ .vector_size = sizeof (u32),
+
+ .n_errors = DHCP_PROXY_N_ERROR,
+ .error_strings = dhcp_proxy_error_strings,
+ .format_buffer = format_dhcp_proxy_header_with_length,
+ .format_trace = format_dhcp_proxy_trace,
+#if 0
+ .unformat_buffer = unformat_dhcp_proxy_header,
+#endif
+};
+
+clib_error_t * dhcp_proxy_init (vlib_main_t * vm)
+{
+ dhcp_proxy_main_t * dm = &dhcp_proxy_main;
+ vlib_node_t * error_drop_node;
+ dhcp_server_t * server;
+
+ dm->vlib_main = vm;
+ dm->vnet_main = vnet_get_main();
+ error_drop_node = vlib_get_node_by_name (vm, (u8 *) "error-drop");
+ dm->error_drop_node_index = error_drop_node->index;
+
+ dm->opt82vss_index_by_vrf_id = hash_create (0, sizeof (uword));
+
+ udp_register_dst_port (vm, UDP_DST_PORT_dhcp_to_client,
+ dhcp_proxy_to_client_node.index, 1 /* is_ip4 */);
+
+ udp_register_dst_port (vm, UDP_DST_PORT_dhcp_to_server,
+ dhcp_proxy_to_server_node.index, 1 /* is_ip4 */);
+
+ /* Create the default server, don't mark it valid */
+ pool_get (dm->dhcp_servers, server);
+ memset (server, 0, sizeof (*server));
+
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (dhcp_proxy_init);
+
+int dhcp_proxy_set_server_2 (ip4_address_t *addr, ip4_address_t *src_address,
+ u32 rx_fib_id,
+ u32 server_fib_id,
+ int insert_option_82, int is_del)
+{
+ dhcp_proxy_main_t * dpm = &dhcp_proxy_main;
+ dhcp_server_t * server = 0;
+ u32 server_index = 0;
+ u32 rx_fib_index = 0;
+
+ if (addr->as_u32 == 0)
+ return VNET_API_ERROR_INVALID_DST_ADDRESS;
+
+ if (src_address->as_u32 == 0)
+ return VNET_API_ERROR_INVALID_SRC_ADDRESS;
+
+ rx_fib_index = fib_table_find_or_create_and_lock(FIB_PROTOCOL_IP4,
+ rx_fib_id);
+
+ if (rx_fib_id == 0)
+ {
+ server = pool_elt_at_index (dpm->dhcp_servers, 0);
+
+ if (is_del)
+ {
+ memset (server, 0, sizeof (*server));
+ return 0;
+ }
+ goto initialize_it;
+ }
+
+ if (is_del)
+ {
+ if (rx_fib_index >= vec_len(dpm->dhcp_server_index_by_rx_fib_index))
+ return VNET_API_ERROR_NO_SUCH_ENTRY;
+
+ server_index = dpm->dhcp_server_index_by_rx_fib_index[rx_fib_index];
+ ASSERT(server_index > 0);
+
+ /* Use the default server again. */
+ dpm->dhcp_server_index_by_rx_fib_index[rx_fib_index] = 0;
+ server = pool_elt_at_index (dpm->dhcp_servers, server_index);
+ memset (server, 0, sizeof (*server));
+ pool_put (dpm->dhcp_servers, server);
+ return 0;
+ }
+
+ if (rx_fib_index < vec_len(dpm->dhcp_server_index_by_rx_fib_index))
+ {
+ server_index = dpm->dhcp_server_index_by_rx_fib_index[rx_fib_index];
+ if (server_index != 0)
+ {
+ server = pool_elt_at_index (dpm->dhcp_servers, server_index);
+ goto initialize_it;
+ }
+ }
+
+ pool_get (dpm->dhcp_servers, server);
+
+ initialize_it:
+
+ server->dhcp_server.as_u32 = addr->as_u32;
+ server->server_fib_index =
+ fib_table_find_or_create_and_lock(FIB_PROTOCOL_IP4,
+ server_fib_id);
+ server->dhcp_src_address.as_u32 = src_address->as_u32;
+ server->insert_option_82 = insert_option_82;
+ server->valid = 1;
+ if (rx_fib_index)
+ {
+ vec_validate (dpm->dhcp_server_index_by_rx_fib_index, rx_fib_index);
+ dpm->dhcp_server_index_by_rx_fib_index[rx_fib_index] =
+ server - dpm->dhcp_servers;
+ }
+
+ return 0;
+}
+
+/* Old API, manipulates the default server (only) */
+int dhcp_proxy_set_server (ip4_address_t *addr, ip4_address_t *src_address,
+ u32 fib_id, int insert_option_82, int is_del)
+{
+ return dhcp_proxy_set_server_2 (addr, src_address, 0 /* rx_fib_id */,
+ fib_id /* server_fib_id */,
+ insert_option_82, is_del);
+}
+
+
+static clib_error_t *
+dhcp_proxy_set_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ ip4_address_t server_addr, src_addr;
+ u32 server_fib_id = 0, rx_fib_id = 0;
+ int is_del = 0;
+ int add_option_82 = 0;
+ int set_src = 0, set_server = 0;
+
+ while (unformat_check_input(input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "server %U",
+ unformat_ip4_address, &server_addr))
+ set_server = 1;
+ else if (unformat (input, "server-fib-id %d", &server_fib_id))
+ ;
+ else if (unformat (input, "rx-fib-id %d", &rx_fib_id))
+ ;
+ else if (unformat(input, "src-address %U",
+ unformat_ip4_address, &src_addr))
+ set_src = 1;
+ else if (unformat (input, "add-option-82")
+ || unformat (input, "insert-option-82"))
+ add_option_82 = 1;
+ else if (unformat (input, "delete") ||
+ unformat (input, "del"))
+ is_del = 1;
+ else
+ break;
+ }
+
+ if (is_del || (set_server && set_src))
+ {
+ int rv;
+
+ rv = dhcp_proxy_set_server_2 (&server_addr, &src_addr, rx_fib_id,
+ server_fib_id, add_option_82, is_del);
+ switch (rv)
+ {
+ case 0:
+ return 0;
+
+ case VNET_API_ERROR_INVALID_DST_ADDRESS:
+ return clib_error_return (0, "Invalid server address");
+
+ case VNET_API_ERROR_INVALID_SRC_ADDRESS:
+ return clib_error_return (0, "Invalid src address");
+
+ case VNET_API_ERROR_NO_SUCH_INNER_FIB:
+ return clib_error_return (0, "No such rx fib id %d", rx_fib_id);
+
+ case VNET_API_ERROR_NO_SUCH_FIB:
+ return clib_error_return (0, "No such server fib id %d",
+ server_fib_id);
+
+ case VNET_API_ERROR_NO_SUCH_ENTRY:
+ return clib_error_return
+ (0, "Fib id %d: no per-fib DHCP server configured", rx_fib_id);
+
+ default:
+ return clib_error_return (0, "BUG: rv %d", rv);
+ }
+ }
+ else
+ return clib_error_return (0, "parse error`%U'",
+ format_unformat_error, input);
+}
+
+VLIB_CLI_COMMAND (dhcp_proxy_set_command, static) = {
+ .path = "set dhcp proxy",
+ .short_help = "set dhcp proxy [del] server <ip-addr> src-address <ip-addr> [add-option-82] [server-fib-id <n>] [rx-fib-id <n>]",
+ .function = dhcp_proxy_set_command_fn,
+};
+
+u8 * format_dhcp_proxy_server (u8 * s, va_list * args)
+{
+ dhcp_proxy_main_t * dm = va_arg (*args, dhcp_proxy_main_t *);
+ dhcp_server_t * server = va_arg (*args, dhcp_server_t *);
+ u32 rx_fib_index = va_arg (*args, u32);
+ ip4_fib_t * rx_fib, * server_fib;
+ u32 server_fib_id = ~0, rx_fib_id = ~0;
+
+ if (dm == 0)
+ {
+ s = format (s, "%=16s%=16s%=14s%=14s%=20s", "Server", "Src Address",
+ "Server FIB", "RX FIB", "Insert Option 82");
+ return s;
+ }
+
+ server_fib = ip4_fib_get(server->server_fib_index);
+
+ if (server_fib)
+ server_fib_id = server_fib->table_id;
+
+ rx_fib = ip4_fib_get(rx_fib_index);
+
+ if (rx_fib)
+ rx_fib_id = rx_fib->table_id;
+
+ s = format (s, "%=16U%=16U%=14u%=14u%=20s",
+ format_ip4_address, &server->dhcp_server,
+ format_ip4_address, &server->dhcp_src_address,
+ server_fib_id, rx_fib_id,
+ server->insert_option_82 ? "yes" : "no");
+ return s;
+}
+
+static clib_error_t *
+dhcp_proxy_show_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ dhcp_proxy_main_t * dpm = &dhcp_proxy_main;
+ ip4_main_t * im = &ip4_main;
+ dhcp_server_t * server;
+ u32 server_index;
+ int i;
+
+ vlib_cli_output (vm, "%U", format_dhcp_proxy_server, 0 /* header line */,
+ 0, 0);
+
+ for (i = 0; i < vec_len (im->fibs); i++)
+ {
+ if (i < vec_len(dpm->dhcp_server_index_by_rx_fib_index))
+ server_index = dpm->dhcp_server_index_by_rx_fib_index[i];
+ else
+ server_index = 0;
+ server = pool_elt_at_index (dpm->dhcp_servers, server_index);
+ if (server->valid)
+ vlib_cli_output (vm, "%U", format_dhcp_proxy_server, dpm,
+ server, i);
+ }
+
+ return 0;
+}
+
+VLIB_CLI_COMMAND (dhcp_proxy_show_command, static) = {
+ .path = "show dhcp proxy",
+ .short_help = "Display dhcp proxy server info",
+ .function = dhcp_proxy_show_command_fn,
+};
+
+
+int dhcp_proxy_set_option82_vss( u32 vrf_id,
+ u32 oui,
+ u32 fib_id,
+ int is_del)
+{
+ dhcp_proxy_main_t *dm = &dhcp_proxy_main;
+ uword *p;
+ vss_info *a;
+ u32 old_oui=0, old_fib_id=0;
+
+ p = hash_get (dm->opt82vss_index_by_vrf_id, vrf_id);
+
+ if (p)
+ {
+ a = pool_elt_at_index (dm->opt82vss, p[0]);
+ if (!a)
+ return VNET_API_ERROR_NO_SUCH_FIB;
+ old_oui = a->vpn_id.oui;
+ old_fib_id = a->vpn_id.fib_id;
+
+ if (is_del)
+ {
+ if (old_oui == oui &&
+ old_fib_id == fib_id)
+ {
+ pool_put(dm->opt82vss, a);
+ hash_unset (dm->opt82vss_index_by_vrf_id, vrf_id);
+ return 0;
+ }
+ else
+ return VNET_API_ERROR_NO_SUCH_ENTRY;
+ }
+ pool_put(dm->opt82vss, a);
+ hash_unset (dm->opt82vss_index_by_vrf_id, vrf_id);
+ } else if (is_del)
+ return VNET_API_ERROR_NO_SUCH_ENTRY;
+ pool_get (dm->opt82vss, a);
+ memset (a, ~0, sizeof (a[0]));
+ a->vpn_id.oui = oui;
+ a->vpn_id.fib_id = fib_id;
+ hash_set (dm->opt82vss_index_by_vrf_id, vrf_id, a - dm->opt82vss);
+
+ return 0;
+}
+
+static clib_error_t *
+dhcp_option_82_vss_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ int is_del = 0, got_new_vpn_id=0;
+ u32 oui=0, fib_id=0, tbl_id=~0;
+
+
+ while (unformat_check_input(input) != UNFORMAT_END_OF_INPUT)
+ {
+
+ if (unformat(input, "delete") || unformat(input, "del"))
+ is_del = 1;
+ else if (unformat (input, "oui %d", &oui))
+ got_new_vpn_id = 1;
+ else if (unformat (input, "vpn-id %d", &fib_id))
+ got_new_vpn_id = 1;
+ else if (unformat (input, "table %d", &tbl_id))
+ got_new_vpn_id = 1;
+ else
+ break;
+ }
+ if (tbl_id == ~0)
+ return clib_error_return (0, "no table ID specified.");
+
+ if (is_del || got_new_vpn_id)
+ {
+ int rv;
+ rv = dhcp_proxy_set_option82_vss(tbl_id, oui, fib_id, is_del);
+ switch (rv)
+ {
+ case 0:
+ return 0;
+
+ case VNET_API_ERROR_NO_SUCH_FIB:
+ return clib_error_return (0, "option 82 vss(oui:%d, vpn-id:%d) not found in table %d",
+ oui, fib_id, tbl_id);
+
+ case VNET_API_ERROR_NO_SUCH_ENTRY:
+ return clib_error_return (0, "option 82 vss for table %d not found in in pool.",
+ tbl_id);
+ default:
+ return clib_error_return (0, "BUG: rv %d", rv);
+ }
+ }
+ else
+ return clib_error_return (0, "parse error`%U'",
+ format_unformat_error, input);
+}
+
+VLIB_CLI_COMMAND (dhcp_proxy_vss_command,static) = {
+ .path = "set dhcp option-82 vss",
+ .short_help = "set dhcp option-82 vss [del] table <table id> oui <oui> vpn-id <vpn-id>",
+ .function = dhcp_option_82_vss_fn,
+};
+
+
+static clib_error_t *
+dhcp_vss_show_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+
+{
+ dhcp_proxy_main_t * dm = &dhcp_proxy_main;
+ vss_info *v;
+ u32 oui;
+ u32 fib_id;
+ u32 tbl_id;
+ uword index;
+
+ vlib_cli_output (vm, "%=9s%=11s%=12s","Table", "OUI", "VPN-ID");
+ hash_foreach (tbl_id, index, dm->opt82vss_index_by_vrf_id,
+ ({
+ v = pool_elt_at_index (dm->opt82vss, index);
+ oui = v->vpn_id.oui;
+ fib_id = v->vpn_id.fib_id;
+ vlib_cli_output (vm, "%=9d 0x%08x%=12d",
+ tbl_id, oui, fib_id);
+ }));
+
+ return 0;
+}
+
+VLIB_CLI_COMMAND (dhcp_proxy_vss_show_command, static) = {
+ .path = "show dhcp vss",
+ .short_help = "show dhcp VSS",
+ .function = dhcp_vss_show_command_fn,
+};
+
+static clib_error_t *
+dhcp_option_82_address_show_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+
+{
+ dhcp_proxy_main_t *dm = &dhcp_proxy_main;
+ vnet_main_t *vnm = vnet_get_main();
+ u32 sw_if_index0=0, sw_if_index;
+ ip4_address_t *ia0;
+ vnet_sw_interface_t *swif;
+
+ while (unformat_check_input(input) != UNFORMAT_END_OF_INPUT)
+ {
+
+ if (unformat(input, "%U",
+ unformat_vnet_sw_interface, dm->vnet_main, &sw_if_index0))
+ {
+ swif = vnet_get_sw_interface (vnm, sw_if_index0);
+ sw_if_index = (swif->flags & VNET_SW_INTERFACE_FLAG_UNNUMBERED) ?
+ swif->unnumbered_sw_if_index : sw_if_index0;
+ ia0 = ip4_interface_first_address(&ip4_main, sw_if_index, 0);
+ if (ia0)
+ {
+ vlib_cli_output (vm, "%=20s%=20s", "interface",
+ "source IP address");
+
+ vlib_cli_output (vm, "%=20U%=20U",
+ format_vnet_sw_if_index_name,
+ dm->vnet_main, sw_if_index0,
+ format_ip4_address, ia0);
+ }
+ else
+ vlib_cli_output (vm, "%=34s %=20U",
+ "No IPv4 address configured on",
+ format_vnet_sw_if_index_name,
+ dm->vnet_main, sw_if_index);
+ }
+ else
+ break;
+ }
+
+ return 0;
+}
+
+VLIB_CLI_COMMAND (dhcp_proxy_address_show_command,static) = {
+ .path = "show dhcp option-82-address interface",
+ .short_help = "show dhcp option-82-address interface <interface>",
+ .function = dhcp_option_82_address_show_command_fn,
+};
diff --git a/src/vnet/dhcpv6/packet.h b/src/vnet/dhcpv6/packet.h
new file mode 100644
index 00000000000..8634b5d8e9b
--- /dev/null
+++ b/src/vnet/dhcpv6/packet.h
@@ -0,0 +1,183 @@
+#ifndef included_vnet_dhcp_packet_h
+#define included_vnet_dhcp_packet_h
+
+/*
+ * DHCP packet format
+ *
+ * Copyright (c) 2013 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <vnet/ip/ip6_packet.h>
+
+// #define DHCP_VRF_NAME_MAX_LEN L3VM_MAX_NAME_STR_LEN
+// #define DHCPV6_MAX_VRF_NAME_LEN L3VM_MAX_NAME_STR_LEN
+#define DHCP_MAX_RELAY_ADDR 16
+#define PROTO_UDP 17
+#define DHCPV6_CLIENT_PORT 546
+#define DHCPV6_SERVER_PORT 547
+#define HOP_COUNT_LIMIT 32
+#define DHCPV6_CISCO_ENT_NUM 9
+
+/*
+ * DHCPv6 message types
+ */
+typedef enum dhcpv6_msg_type_{
+ DHCPV6_MSG_SOLICIT = 1,
+ DHCPV6_MSG_ADVERTISE = 2,
+ DHCPV6_MSG_REQUEST = 3,
+ DHCPV6_MSG_CONFIRM = 4,
+ DHCPV6_MSG_RENEW = 5,
+ DHCPV6_MSG_REBIND = 6,
+ DHCPV6_MSG_REPLY = 7,
+ DHCPV6_MSG_RELEASE = 8,
+ DHCPV6_MSG_DECLINE = 9,
+ DHCPV6_MSG_RECONFIGURE = 10,
+ DHCPV6_MSG_INFORMATION_REQUEST = 11,
+ DHCPV6_MSG_RELAY_FORW = 12,
+ DHCPV6_MSG_RELAY_REPL = 13,
+} dhcpv6_msg_type_t;
+
+/*
+ * DHCPv6 options types
+ */
+enum {
+ DHCPV6_OPTION_CLIENTID = 1,
+ DHCPV6_OPTION_SERVERID = 2,
+ DHCPV6_OPTION_IA_NA = 3,
+ DHCPV6_OPTION_IA_TA = 4,
+ DHCPV6_OPTION_IAADDR = 5,
+ DHCPV6_OPTION_ORO = 6,
+ DHCPV6_OPTION_PREFERENCE = 7,
+ DHCPV6_OPTION_ELAPSED_TIME = 8,
+ DHCPV6_OPTION_RELAY_MSG = 9,
+ DHCPV6_OPTION_AUTH = 11,
+ DHCPV6_OPTION_UNICAST = 12,
+ DHCPV6_OPTION_STATUS_CODE = 13,
+ DHCPV6_OPTION_RAPID_COMMIT = 14,
+ DHCPV6_OPTION_USER_CLASS = 15,
+ DHCPV6_OPTION_VENDOR_CLASS = 16,
+ DHCPV6_OPTION_VENDOR_OPTS = 17,
+ DHCPV6_OPTION_INTERFACE_ID = 18, // relay agent fills this
+ DHCPV6_OPTION_RECONF_MSG = 19,
+ DHCPV6_OPTION_RECONF_ACCEPT = 20,
+ DHCPV6_OPTION_REMOTEID = 37, // relay agent fills this
+ DHCPV6_OPTION_VSS = 68, // relay agent fills this
+ DHCPV6_OPTION_CLIENT_LINK_LAYER_ADDRESS = 79,
+ DHCPV6_OPTION_MAX
+};
+
+/*
+* DHCPv6 status codes
+ */
+enum {
+ DHCPV6_STATUS_SUCCESS = 0,
+ DHCPV6_STATUS_UNSPEC_FAIL = 1,
+ DHCPV6_STATUS_NOADDRS_AVAIL = 2,
+ DHCPV6_STATUS_NO_BINDING = 3,
+ DHCPV6_STATUS_NOT_ONLINK = 4,
+ DHCPV6_STATUS_USE_MULTICAST = 5,
+};
+
+/*
+ * DHCPv6 DUID types
+ */
+enum {
+ DHCPV6_DUID_LLT = 1, /* DUID Based on Link-layer Address Plus Time */
+ DHCPV6_DUID_EN = 2, /* DUID Based on Enterprise Number */
+ DHCPV6_DUID_LL = 3, /* DUID Based on Link-layer Address */
+};
+
+//Structure for DHCPv6 payload from client
+typedef struct dhcpv6_hdr_ {
+ union {
+ u8 msg_type; //DHCP msg type
+ u32 xid; // transaction id
+ }u;
+ u8 data[0];
+} dhcpv6_header_t;
+
+
+
+typedef CLIB_PACKED (struct dhcpv6_relay_ctx_ {
+ dhcpv6_header_t *pkt;
+ u32 pkt_len;
+ u32 dhcpv6_len; //DHCPv6 payload load
+// if_ordinal iod;
+ u32 if_index;
+ u32 ctx_id;
+ char ctx_name[32+1];
+ u8 dhcp_msg_type;
+}) dhcpv6_relay_ctx_t;
+
+//Structure for DHCPv6 RELAY-FORWARD and DHCPv6 RELAY-REPLY pkts
+typedef CLIB_PACKED (struct dhcpv6_relay_hdr_ {
+ u8 msg_type;
+ u8 hop_count;
+ ip6_address_t link_addr;
+ ip6_address_t peer_addr;
+ u8 data[0];
+}) dhcpv6_relay_hdr_t;
+
+typedef enum dhcp_stats_action_type_ {
+ DHCP_STATS_ACTION_FORWARDED=1,
+ DHCP_STATS_ACTION_RECEIVED,
+ DHCP_STATS_ACTION_DROPPED
+} dhcp_stats_action_type_t;
+//Generic counters for a packet
+typedef struct dhcp_stats_counters_ {
+ u64 rx_pkts; //counter for received pkts
+ u64 tx_pkts; //counter for forwarded pkts
+ u64 drops; //counter for dropped pkts
+} dhcp_stats_counters_t;
+
+
+typedef enum dhcpv6_stats_drop_reason_ {
+ DHCPV6_RELAY_PKT_DROP_RELAYDISABLE = 1,
+ DHCPV6_RELAY_PKT_DROP_MAX_HOPS,
+ DHCPV6_RELAY_PKT_DROP_VALIDATION_FAIL,
+ DHCPV6_RELAY_PKT_DROP_UNKNOWN_OP_INTF,
+ DHCPV6_RELAY_PKT_DROP_BAD_CONTEXT,
+ DHCPV6_RELAY_PKT_DROP_OPT_INSERT_FAIL,
+ DHCPV6_RELAY_PKT_DROP_REPLY_FROM_CLIENT,
+} dhcpv6_stats_drop_reason_t;
+
+typedef CLIB_PACKED (struct {
+ u16 option;
+ u16 length;
+ u8 data[0];
+}) dhcpv6_option_t;
+
+typedef CLIB_PACKED (struct {
+ dhcpv6_option_t opt;
+ u32 int_idx;
+}) dhcpv6_int_id_t;
+
+typedef CLIB_PACKED (struct {
+ dhcpv6_option_t opt;
+ u8 data[8]; // data[0]:type, data[1..7]: VPN ID
+}) dhcpv6_vss_t;
+
+typedef CLIB_PACKED (struct {
+ dhcpv6_option_t opt;
+ u32 ent_num;
+ u32 rmt_id;
+}) dhcpv6_rmt_id_t;
+
+typedef CLIB_PACKED (struct {
+ dhcpv6_option_t opt;
+ u16 link_type;
+ u8 data[6]; // data[0]:data[5]: MAC address
+}) dhcpv6_client_mac_t;
+
+
+#endif /* included_vnet_dhcp_packet_h */
diff --git a/src/vnet/dhcpv6/proxy.h b/src/vnet/dhcpv6/proxy.h
new file mode 100644
index 00000000000..9e18913a970
--- /dev/null
+++ b/src/vnet/dhcpv6/proxy.h
@@ -0,0 +1,95 @@
+/*
+ * proxy.h: dhcp proxy
+ *
+ * Copyright (c) 2013 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef included_dhcpv6_proxy_h
+#define included_dhcpv6_proxy_h
+
+#include <vnet/vnet.h>
+#include <vnet/ethernet/ethernet.h>
+#include <vnet/ip/ip.h>
+#include <vnet/ip/ip4.h>
+#include <vnet/ip/ip6_packet.h>
+#include <vnet/pg/pg.h>
+#include <vnet/ip/format.h>
+#include <vnet/ip/udp.h>
+#include <vnet/dhcpv6/packet.h>
+
+typedef enum {
+#define dhcpv6_proxy_error(n,s) DHCPV6_PROXY_ERROR_##n,
+#include <vnet/dhcpv6/proxy_error.def>
+#undef dhcpv6_proxy_error
+ DHCPV6_PROXY_N_ERROR,
+} dhcpv6_proxy_error_t;
+
+typedef struct {
+ u32 oui;
+ u32 fib_id;
+} dhcpv6_vss_id;
+
+typedef union {
+ u8 as_u8[8];
+ dhcpv6_vss_id vpn_id;
+} dhcpv6_vss_info;
+
+typedef struct {
+ ip6_address_t dhcp6_server;
+ ip6_address_t dhcp6_src_address;
+ u32 insert_vss;
+ u32 server_fib6_index;
+ u32 valid;
+} dhcpv6_server_t;
+
+typedef struct {
+ /* Pool of DHCP servers */
+ dhcpv6_server_t * dhcp6_servers;
+
+ /* Pool of selected DHCP server. Zero is the default server */
+ u32 * dhcp6_server_index_by_rx_fib_index;
+
+ /* all DHCP servers address */
+ ip6_address_t all_dhcpv6_server_address;
+ ip6_address_t all_dhcpv6_server_relay_agent_address;
+
+ /* to drop pkts in server-to-client direction */
+ u32 error_drop_node_index;
+
+ dhcpv6_vss_info *vss;
+
+ /* hash lookup specific vrf_id -> VSS vector index*/
+ uword *vss_index_by_vrf_id;
+
+ /* convenience */
+ vlib_main_t * vlib_main;
+ vnet_main_t * vnet_main;
+} dhcpv6_proxy_main_t;
+
+dhcpv6_proxy_main_t dhcpv6_proxy_main;
+
+int dhcpv6_proxy_set_server (ip6_address_t *addr, ip6_address_t *src_address,
+ u32 fib_id, int insert_vss, int is_del);
+
+int dhcpv6_proxy_set_vss(u32 tbl_id,
+ u32 oui,
+ u32 fib_id,
+ int is_del);
+
+int dhcpv6_proxy_set_server_2 (ip6_address_t *addr, ip6_address_t *src_address,
+ u32 rx_fib_id,
+ u32 server_fib_id,
+ int insert_vss, int is_del);
+
+#endif /* included_dhcpv6_proxy_h */
diff --git a/src/vnet/dhcpv6/proxy_error.def b/src/vnet/dhcpv6/proxy_error.def
new file mode 100644
index 00000000000..55fa731766c
--- /dev/null
+++ b/src/vnet/dhcpv6/proxy_error.def
@@ -0,0 +1,29 @@
+/*
+ * dhcp_proxy_error.def: dhcp proxy errors
+ *
+ * Copyright (c) 2013 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+dhcpv6_proxy_error (NONE, "no error")
+dhcpv6_proxy_error (NO_SERVER, "no dhcpv6 server configured")
+dhcpv6_proxy_error (RELAY_TO_SERVER, "DHCPV6 packets relayed to the server")
+dhcpv6_proxy_error (RELAY_TO_CLIENT, "DHCPV6 packets relayed to clients")
+dhcpv6_proxy_error (NO_INTERFACE_ADDRESS, "DHCPV6 no interface address")
+dhcpv6_proxy_error (WRONG_MESSAGE_TYPE, "DHCPV6 wrong message type.")
+dhcpv6_proxy_error (NO_SRC_ADDRESS, "DHCPV6 no srouce IPv6 address configured.")
+dhcpv6_proxy_error (NO_CIRCUIT_ID_OPTION, "DHCPv6 reply packets without circuit ID option")
+dhcpv6_proxy_error (NO_RELAY_MESSAGE_OPTION, "DHCPv6 reply packets without relay message option")
+dhcpv6_proxy_error (BAD_SVR_FIB_OR_ADDRESS, "DHCPv6 packets not from DHCPv6 server or server FIB.")
+dhcpv6_proxy_error (PKT_TOO_BIG, "DHCPv6 packets which are too big.")
+dhcpv6_proxy_error (WRONG_INTERFACE_ID_OPTION, "DHCPv6 reply to invalid interface.")
diff --git a/src/vnet/dhcpv6/proxy_node.c b/src/vnet/dhcpv6/proxy_node.c
new file mode 100644
index 00000000000..40df35cd059
--- /dev/null
+++ b/src/vnet/dhcpv6/proxy_node.c
@@ -0,0 +1,1191 @@
+/*
+ * proxy_node.c: dhcpv6 proxy node processing
+ *
+ * Copyright (c) 2013 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vlib/vlib.h>
+#include <vnet/pg/pg.h>
+#include <vnet/dhcpv6/proxy.h>
+#include <vnet/fib/ip6_fib.h>
+
+static char * dhcpv6_proxy_error_strings[] = {
+#define dhcpv6_proxy_error(n,s) s,
+#include "proxy_error.def"
+#undef dhcpv6_proxy_error
+};
+
+#define foreach_dhcpv6_proxy_to_server_input_next \
+ _ (DROP, "error-drop") \
+ _ (LOOKUP, "ip6-lookup") \
+ _ (SEND_TO_CLIENT, "dhcpv6-proxy-to-client")
+
+
+typedef enum {
+#define _(s,n) DHCPV6_PROXY_TO_SERVER_INPUT_NEXT_##s,
+ foreach_dhcpv6_proxy_to_server_input_next
+#undef _
+ DHCPV6_PROXY_TO_SERVER_INPUT_N_NEXT,
+} dhcpv6_proxy_to_server_input_next_t;
+
+typedef struct {
+ /* 0 => to server, 1 => to client */
+ int which;
+ u8 packet_data[64];
+ u32 error;
+ u32 sw_if_index;
+ u32 original_sw_if_index;
+} dhcpv6_proxy_trace_t;
+
+vlib_node_registration_t dhcpv6_proxy_to_server_node;
+vlib_node_registration_t dhcpv6_proxy_to_client_node;
+
+
+u8 * format_dhcpv6_proxy_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ dhcpv6_proxy_trace_t * t = va_arg (*args, dhcpv6_proxy_trace_t *);
+
+ if (t->which == 0)
+ s = format (s, "DHCPV6 proxy: sent to server %U",
+ format_ip6_address, &t->packet_data, sizeof (ip6_address_t));
+ else
+ s = format (s, "DHCPV6 proxy: sent to client from %U",
+ format_ip6_address, &t->packet_data, sizeof (ip6_address_t));
+ if (t->error != (u32)~0)
+ s = format (s, " error: %s\n", dhcpv6_proxy_error_strings[t->error]);
+
+ s = format (s, " original_sw_if_index: %d, sw_if_index: %d\n",
+ t->original_sw_if_index, t->sw_if_index);
+
+ return s;
+}
+
+u8 * format_dhcpv6_proxy_header_with_length (u8 * s, va_list * args)
+{
+ dhcpv6_header_t * h = va_arg (*args, dhcpv6_header_t *);
+ u32 max_header_bytes = va_arg (*args, u32);
+ u32 header_bytes;
+
+ header_bytes = sizeof (h[0]);
+ if (max_header_bytes != 0 && header_bytes > max_header_bytes)
+ return format (s, "dhcpv6 header truncated");
+
+ s = format (s, "DHCPV6 Proxy");
+
+ return s;
+}
+/* get first interface address */
+static ip6_address_t *
+ip6_interface_first_global_or_site_address (ip6_main_t * im, u32 sw_if_index)
+{
+ ip_lookup_main_t * lm = &im->lookup_main;
+ ip_interface_address_t * ia = 0;
+ ip6_address_t * result = 0;
+
+ foreach_ip_interface_address (lm, ia, sw_if_index,
+ 1 /* honor unnumbered */,
+ ({
+ ip6_address_t * a = ip_interface_address_get_address (lm, ia);
+ if ((a->as_u8[0] & 0xe0) == 0x20 ||
+ (a->as_u8[0] & 0xfe) == 0xfc) {
+ result = a;
+ break;
+ }
+ }));
+ return result;
+}
+
+/* get first interface address */
+static ip6_address_t *
+ip6_interface_first_address (ip6_main_t * im, u32 sw_if_index)
+{
+ ip_lookup_main_t * lm = &im->lookup_main;
+ ip_interface_address_t * ia = 0;
+ ip6_address_t * result = 0;
+
+ foreach_ip_interface_address (lm, ia, sw_if_index,
+ 1 /* honor unnumbered */,
+ ({
+ ip6_address_t * a = ip_interface_address_get_address (lm, ia);
+ result = a;
+ break;
+ }));
+ return result;
+}
+
+static inline void copy_ip6_address (ip6_address_t *dst, ip6_address_t *src)
+{
+
+ dst->as_u64[0] = src->as_u64[0];
+ dst->as_u64[1] = src->as_u64[1];
+}
+
+static uword
+dhcpv6_proxy_to_server_input (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame)
+{
+ u32 n_left_from, next_index, * from, * to_next;
+ dhcpv6_proxy_main_t * dpm = &dhcpv6_proxy_main;
+ from = vlib_frame_vector_args (from_frame);
+ n_left_from = from_frame->n_vectors;
+ u32 pkts_to_server=0, pkts_to_client=0, pkts_no_server=0;
+ u32 pkts_no_interface_address=0, pkts_no_exceeding_max_hop=0;
+ u32 pkts_no_src_address=0;
+ u32 pkts_wrong_msg_type=0;
+ u32 pkts_too_big=0;
+ ip6_main_t * im = &ip6_main;
+ ip6_fib_t * fib;
+ ip6_address_t * src;
+ int bogus_length;
+ dhcpv6_server_t * server;
+ u32 rx_fib_idx = 0, server_fib_idx = 0;
+ u32 server_idx;
+ u32 fib_id1 = 0;
+
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame (vm, node, next_index,
+ to_next, n_left_to_next);
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ vnet_main_t *vnm = vnet_get_main();
+ u32 sw_if_index = 0;
+ u32 rx_sw_if_index = 0;
+ vnet_sw_interface_t *swif;
+ u32 bi0;
+ vlib_buffer_t * b0;
+ udp_header_t * u0, *u1;
+ dhcpv6_header_t * h0; // client msg hdr
+ ip6_header_t * ip0, *ip1;
+ ip6_address_t _ia0, *ia0=&_ia0;
+ u32 next0;
+ u32 error0 = (u32) ~0;
+ dhcpv6_option_t *fwd_opt;
+ dhcpv6_relay_hdr_t *r1;
+ u16 len;
+ dhcpv6_int_id_t *id1;
+ dhcpv6_vss_t *vss1;
+ dhcpv6_client_mac_t *cmac; // client mac
+ ethernet_header_t * e_h0;
+ u8 client_src_mac[6];
+ vlib_buffer_free_list_t *fl;
+
+ uword *p_vss;
+ u32 oui1=0;
+ dhcpv6_vss_info *vss;
+
+
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+
+ h0 = vlib_buffer_get_current (b0);
+
+ /*
+ * udp_local hands us the DHCPV6 header.
+ */
+ u0 = (void *)h0 -(sizeof(*u0));
+ ip0 = (void *)u0 -(sizeof(*ip0));
+ e_h0 = (void *)ip0 - ethernet_buffer_header_size(b0);
+
+ clib_memcpy(client_src_mac, e_h0->src_address, 6);
+
+ switch (h0->u.msg_type) {
+ case DHCPV6_MSG_SOLICIT:
+ case DHCPV6_MSG_REQUEST:
+ case DHCPV6_MSG_CONFIRM:
+ case DHCPV6_MSG_RENEW:
+ case DHCPV6_MSG_REBIND:
+ case DHCPV6_MSG_RELEASE:
+ case DHCPV6_MSG_DECLINE:
+ case DHCPV6_MSG_INFORMATION_REQUEST:
+ case DHCPV6_MSG_RELAY_FORW:
+ /* send to server */
+ break;
+ case DHCPV6_MSG_RELAY_REPL:
+ /* send to client */
+ next0 = DHCPV6_PROXY_TO_SERVER_INPUT_NEXT_SEND_TO_CLIENT;
+ error0 = 0;
+ pkts_to_client++;
+ goto do_enqueue;
+ default:
+ /* drop the packet */
+ pkts_wrong_msg_type++;
+ error0 = DHCPV6_PROXY_ERROR_WRONG_MESSAGE_TYPE;
+ next0 = DHCPV6_PROXY_TO_SERVER_INPUT_NEXT_DROP;
+ goto do_trace;
+
+ }
+
+ /* Send to DHCPV6 server via the configured FIB */
+ rx_sw_if_index = sw_if_index = vnet_buffer(b0)->sw_if_index[VLIB_RX];
+ rx_fib_idx = im->fib_index_by_sw_if_index [rx_sw_if_index];
+ server_idx = dpm->dhcp6_server_index_by_rx_fib_index[rx_fib_idx];
+
+ if (PREDICT_FALSE (pool_is_free_index (dpm->dhcp6_servers,
+ server_idx)))
+ {
+ no_server:
+ error0 = DHCPV6_PROXY_ERROR_NO_SERVER;
+ next0 = DHCPV6_PROXY_TO_SERVER_INPUT_NEXT_DROP;
+ pkts_no_server++;
+ goto do_trace;
+ }
+
+ server = pool_elt_at_index(dpm->dhcp6_servers, server_idx);
+ if (server->valid == 0)
+ goto no_server;
+
+ server_fib_idx = server->server_fib6_index;
+ vnet_buffer(b0)->sw_if_index[VLIB_TX] = server_fib_idx;
+
+
+ /* relay-option header pointer */
+ vlib_buffer_advance(b0, -(sizeof(*fwd_opt)));
+ fwd_opt = vlib_buffer_get_current(b0);
+ /* relay message header pointer */
+ vlib_buffer_advance(b0, -(sizeof(*r1)));
+ r1 = vlib_buffer_get_current(b0);
+
+ vlib_buffer_advance(b0, -(sizeof(*u1)));
+ u1 = vlib_buffer_get_current(b0);
+
+ vlib_buffer_advance(b0, -(sizeof(*ip1)));
+ ip1 = vlib_buffer_get_current(b0);
+
+ /* fill in all that rubbish... */
+ len = clib_net_to_host_u16(u0->length) - sizeof(udp_header_t);
+ copy_ip6_address(&r1->peer_addr, &ip0->src_address);
+
+ r1->msg_type = DHCPV6_MSG_RELAY_FORW;
+ fwd_opt->length = clib_host_to_net_u16(len);
+ fwd_opt->option = clib_host_to_net_u16(DHCPV6_OPTION_RELAY_MSG);
+
+ r1->hop_count++;
+ r1->hop_count = (h0->u.msg_type != DHCPV6_MSG_RELAY_FORW) ? 0 : r1->hop_count;
+
+ if (PREDICT_FALSE(r1->hop_count >= HOP_COUNT_LIMIT))
+ {
+ error0 = DHCPV6_RELAY_PKT_DROP_MAX_HOPS;
+ next0 = DHCPV6_PROXY_TO_SERVER_INPUT_NEXT_DROP;
+ pkts_no_exceeding_max_hop++;
+ goto do_trace;
+ }
+
+
+ /* If relay-fwd and src address is site or global unicast address */
+ if (h0->u.msg_type == DHCPV6_MSG_RELAY_FORW &&
+ ((ip0->src_address.as_u8[0] & 0xe0) == 0x20 ||
+ (ip0->src_address.as_u8[0] & 0xfe) == 0xfc))
+ {
+ /* Set link address to zero */
+ r1->link_addr.as_u64[0] = 0;
+ r1->link_addr.as_u64[1] = 0;
+ goto link_address_set;
+ }
+
+ /* if receiving interface is unnumbered, use receiving interface
+ * IP address as link address, otherwise use the loopback interface
+ * IP address as link address.
+ */
+
+ swif = vnet_get_sw_interface (vnm, rx_sw_if_index);
+ if (swif->flags & VNET_SW_INTERFACE_FLAG_UNNUMBERED)
+ sw_if_index = swif->unnumbered_sw_if_index;
+
+ ia0 = ip6_interface_first_global_or_site_address(&ip6_main, sw_if_index);
+ if (ia0 == 0)
+ {
+ error0 = DHCPV6_PROXY_ERROR_NO_INTERFACE_ADDRESS;
+ next0 = DHCPV6_PROXY_TO_SERVER_INPUT_NEXT_DROP;
+ pkts_no_interface_address++;
+ goto do_trace;
+ }
+
+ copy_ip6_address(&r1->link_addr, ia0);
+
+ link_address_set:
+ fl = vlib_buffer_get_free_list (vm, b0->free_list_index);
+
+ if ((b0->current_length+sizeof(*id1)+sizeof(*vss1)+sizeof(*cmac))
+ > fl->n_data_bytes)
+ {
+ error0 = DHCPV6_PROXY_ERROR_PKT_TOO_BIG;
+ next0 = DHCPV6_PROXY_TO_SERVER_INPUT_NEXT_DROP;
+ pkts_too_big++;
+ goto do_trace;
+ }
+
+ id1 = (dhcpv6_int_id_t *) (((uword) ip1) + b0->current_length);
+ b0->current_length += (sizeof (*id1));
+
+
+ fib = ip6_fib_get (rx_fib_idx);
+
+ //TODO: Revisit if hash makes sense here
+ p_vss = hash_get (dpm->vss_index_by_vrf_id,
+ fib->table_id);
+ if (p_vss)
+ {
+ vss = pool_elt_at_index (dpm->vss, p_vss[0]);
+ oui1 = vss->vpn_id.oui;
+ fib_id1 = vss->vpn_id.fib_id;
+ }
+
+ id1->opt.option = clib_host_to_net_u16(DHCPV6_OPTION_INTERFACE_ID);
+ id1->opt.length = clib_host_to_net_u16(sizeof(rx_sw_if_index));
+ id1->int_idx = clib_host_to_net_u32(rx_sw_if_index);
+
+ u1->length =0;
+ if (h0->u.msg_type != DHCPV6_MSG_RELAY_FORW)
+ {
+ cmac = (dhcpv6_client_mac_t *) (((uword) ip1) + b0->current_length);
+ b0->current_length += (sizeof (*cmac));
+ cmac->opt.length =clib_host_to_net_u16(sizeof(*cmac) -
+ sizeof(cmac->opt));
+ cmac->opt.option = clib_host_to_net_u16(DHCPV6_OPTION_CLIENT_LINK_LAYER_ADDRESS);
+ cmac->link_type = clib_host_to_net_u16(1); // ethernet
+ clib_memcpy(cmac->data, client_src_mac, 6);
+ u1->length += sizeof(*cmac);
+ }
+ if (server->insert_vss !=0 ) {
+ vss1 = (dhcpv6_vss_t *) (((uword) ip1) + b0->current_length);
+ b0->current_length += (sizeof (*vss1));
+ vss1->opt.length =clib_host_to_net_u16(sizeof(*vss1) -
+ sizeof(vss1->opt));
+ vss1->opt.option = clib_host_to_net_u16(DHCPV6_OPTION_VSS);
+ vss1->data[0] = 1; // type
+ vss1->data[1] = oui1>>16 & 0xff;
+ vss1->data[2] = oui1>>8 & 0xff;
+ vss1->data[3] = oui1 & 0xff;
+ vss1->data[4] = fib_id1>>24 & 0xff;
+ vss1->data[5] = fib_id1>>16 & 0xff;
+ vss1->data[6] = fib_id1>>8 & 0xff;
+ vss1->data[7] = fib_id1 & 0xff;
+ u1->length += sizeof(*vss1);
+ }
+
+ pkts_to_server++;
+ u1->checksum = 0;
+ u1->src_port = clib_host_to_net_u16(UDP_DST_PORT_dhcpv6_to_client);
+ u1->dst_port = clib_host_to_net_u16(UDP_DST_PORT_dhcpv6_to_server);
+
+ u1->length =
+ clib_host_to_net_u16( clib_net_to_host_u16(fwd_opt->length) +
+ sizeof(*r1) + sizeof(*fwd_opt) +
+ sizeof(*u1) + sizeof(*id1) + u1->length);
+
+ memset(ip1, 0, sizeof(*ip1));
+ ip1->ip_version_traffic_class_and_flow_label = 0x60;
+ ip1->payload_length = u1->length;
+ ip1->protocol = PROTO_UDP;
+ ip1->hop_limit = HOP_COUNT_LIMIT;
+ src = (server->dhcp6_server.as_u64[0] || server->dhcp6_server.as_u64[1]) ?
+ &server->dhcp6_server : &dpm->all_dhcpv6_server_address;
+ copy_ip6_address(&ip1->dst_address, src);
+
+
+ ia0 = ip6_interface_first_global_or_site_address
+ (&ip6_main, vnet_buffer(b0)->sw_if_index[VLIB_RX]);
+
+ src = (server->dhcp6_src_address.as_u64[0] || server->dhcp6_src_address.as_u64[1]) ?
+ &server->dhcp6_src_address : ia0;
+ if (ia0 == 0)
+ {
+ error0 = DHCPV6_PROXY_ERROR_NO_SRC_ADDRESS;
+ next0 = DHCPV6_PROXY_TO_SERVER_INPUT_NEXT_DROP;
+ pkts_no_src_address++;
+ goto do_trace;
+ }
+
+ copy_ip6_address (&ip1->src_address, src);
+
+
+ u1->checksum = ip6_tcp_udp_icmp_compute_checksum(vm, b0, ip1,
+ &bogus_length);
+ ASSERT(bogus_length == 0);
+
+ next0 = DHCPV6_PROXY_TO_SERVER_INPUT_NEXT_LOOKUP;
+
+ do_trace:
+ if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ dhcpv6_proxy_trace_t *tr = vlib_add_trace (vm, node,
+ b0, sizeof (*tr));
+ tr->which = 0; /* to server */
+ tr->error = error0;
+ tr->original_sw_if_index = rx_sw_if_index;
+ tr->sw_if_index = sw_if_index;
+ if (DHCPV6_PROXY_TO_SERVER_INPUT_NEXT_LOOKUP == next0)
+ copy_ip6_address((ip6_address_t *)&tr->packet_data[0], &server->dhcp6_server);
+ }
+
+ do_enqueue:
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, next0);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ vlib_node_increment_counter (vm, dhcpv6_proxy_to_server_node.index,
+ DHCPV6_PROXY_ERROR_RELAY_TO_CLIENT,
+ pkts_to_client);
+ vlib_node_increment_counter (vm, dhcpv6_proxy_to_server_node.index,
+ DHCPV6_PROXY_ERROR_RELAY_TO_SERVER,
+ pkts_to_server);
+ vlib_node_increment_counter (vm, dhcpv6_proxy_to_server_node.index,
+ DHCPV6_PROXY_ERROR_NO_INTERFACE_ADDRESS,
+ pkts_no_interface_address);
+ vlib_node_increment_counter (vm, dhcpv6_proxy_to_server_node.index,
+ DHCPV6_PROXY_ERROR_WRONG_MESSAGE_TYPE,
+ pkts_wrong_msg_type);
+ vlib_node_increment_counter (vm, dhcpv6_proxy_to_server_node.index,
+ DHCPV6_PROXY_ERROR_NO_SRC_ADDRESS,
+ pkts_no_src_address);
+ vlib_node_increment_counter (vm, dhcpv6_proxy_to_server_node.index,
+ DHCPV6_PROXY_ERROR_PKT_TOO_BIG,
+ pkts_too_big);
+ return from_frame->n_vectors;
+}
+
+VLIB_REGISTER_NODE (dhcpv6_proxy_to_server_node) = {
+ .function = dhcpv6_proxy_to_server_input,
+ .name = "dhcpv6-proxy-to-server",
+ /* Takes a vector of packets. */
+ .vector_size = sizeof (u32),
+
+ .n_errors = DHCPV6_PROXY_N_ERROR,
+ .error_strings = dhcpv6_proxy_error_strings,
+
+ .n_next_nodes = DHCPV6_PROXY_TO_SERVER_INPUT_N_NEXT,
+ .next_nodes = {
+#define _(s,n) [DHCPV6_PROXY_TO_SERVER_INPUT_NEXT_##s] = n,
+ foreach_dhcpv6_proxy_to_server_input_next
+#undef _
+ },
+
+ .format_buffer = format_dhcpv6_proxy_header_with_length,
+ .format_trace = format_dhcpv6_proxy_trace,
+#if 0
+ .unformat_buffer = unformat_dhcpv6_proxy_header,
+#endif
+};
+
+static uword
+dhcpv6_proxy_to_client_input (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame)
+{
+
+ u32 n_left_from, * from;
+ ethernet_main_t *em = ethernet_get_main (vm);
+ dhcpv6_proxy_main_t * dm = &dhcpv6_proxy_main;
+ dhcpv6_server_t * server;
+ vnet_main_t * vnm = vnet_get_main();
+ int bogus_length;
+
+ from = vlib_frame_vector_args (from_frame);
+ n_left_from = from_frame->n_vectors;
+
+ while (n_left_from > 0)
+ {
+ u32 bi0;
+ vlib_buffer_t * b0;
+ udp_header_t * u0, *u1=0;
+ dhcpv6_relay_hdr_t * h0;
+ ip6_header_t * ip1 = 0, *ip0;
+ ip6_address_t _ia0, * ia0 = &_ia0;
+ ip6_address_t client_address;
+ ethernet_interface_t *ei0;
+ ethernet_header_t *mac0;
+ vnet_hw_interface_t *hi0;
+ vlib_frame_t *f0;
+ u32 * to_next0;
+ u32 sw_if_index = ~0;
+ u32 original_sw_if_index = ~0;
+ vnet_sw_interface_t *si0;
+ u32 error0 = (u32)~0;
+ vnet_sw_interface_t *swif;
+ dhcpv6_option_t *r0 = 0, *o;
+ u16 len = 0;
+ u8 interface_opt_flag = 0;
+ u8 relay_msg_opt_flag = 0;
+ ip6_fib_t * svr_fib;
+ ip6_main_t * im = &ip6_main;
+ u32 server_fib_idx, svr_fib_id, client_fib_idx, server_idx;
+
+ bi0 = from[0];
+ from += 1;
+ n_left_from -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ h0 = vlib_buffer_get_current (b0);
+
+ if (DHCPV6_MSG_RELAY_REPL != h0->msg_type)
+ {
+ error0 = DHCPV6_PROXY_ERROR_WRONG_MESSAGE_TYPE;
+
+ drop_packet:
+ vlib_node_increment_counter (vm, dhcpv6_proxy_to_client_node.index,
+ error0, 1);
+
+ f0 = vlib_get_frame_to_node (vm, dm->error_drop_node_index);
+ to_next0 = vlib_frame_vector_args (f0);
+ to_next0[0] = bi0;
+ f0->n_vectors = 1;
+ vlib_put_frame_to_node (vm, dm->error_drop_node_index, f0);
+ goto do_trace;
+ }
+ /* hop count seems not need to be checked */
+ if (HOP_COUNT_LIMIT < h0->hop_count)
+ {
+ error0 = DHCPV6_RELAY_PKT_DROP_MAX_HOPS;
+ goto drop_packet;
+ }
+ u0 = (void *)h0 -(sizeof(*u0));
+ ip0 = (void *)u0 -(sizeof(*ip0));
+
+ vlib_buffer_advance (b0, sizeof(*h0));
+ o = vlib_buffer_get_current (b0);
+
+ /* Parse through TLVs looking for option 18 (DHCPV6_OPTION_INTERFACE_ID)
+ _and_ option 9 (DHCPV6_OPTION_RELAY_MSG) option which must be there.
+ Currently assuming no other options need to be processed
+ The interface-ID is the FIB number we need
+ to track down the client-facing interface */
+
+ while ((u8 *) o < (b0->data + b0->current_data + b0->current_length))
+ {
+ if (DHCPV6_OPTION_INTERFACE_ID == clib_net_to_host_u16(o->option))
+ {
+ interface_opt_flag = 1;
+ if (clib_net_to_host_u16(o->length) == sizeof(sw_if_index))
+ sw_if_index = clib_net_to_host_u32(((dhcpv6_int_id_t*)o)->int_idx);
+ if (sw_if_index >= vec_len (im->fib_index_by_sw_if_index))
+ {
+ error0 = DHCPV6_PROXY_ERROR_WRONG_INTERFACE_ID_OPTION;
+ goto drop_packet;
+ }
+ }
+ if (DHCPV6_OPTION_RELAY_MSG == clib_net_to_host_u16(o->option))
+ {
+ relay_msg_opt_flag = 1;
+ r0 = vlib_buffer_get_current (b0);
+ }
+ if ((relay_msg_opt_flag == 1) && (interface_opt_flag == 1))
+ break;
+ vlib_buffer_advance (b0, sizeof(*o) + clib_net_to_host_u16(o->length));
+ o = (dhcpv6_option_t *) (((uword) o) + clib_net_to_host_u16(o->length) + sizeof(*o));
+ }
+
+ if ((relay_msg_opt_flag == 0) || (r0 == 0))
+ {
+ error0 = DHCPV6_PROXY_ERROR_NO_RELAY_MESSAGE_OPTION;
+ goto drop_packet;
+ }
+
+ if ((u32)~0 == sw_if_index)
+ {
+ error0 = DHCPV6_PROXY_ERROR_NO_CIRCUIT_ID_OPTION;
+ goto drop_packet;
+ }
+
+ //Advance buffer to start of encapsulated DHCPv6 message
+ vlib_buffer_advance (b0, sizeof(*r0));
+
+ client_fib_idx = im->fib_index_by_sw_if_index[sw_if_index];
+ if (client_fib_idx < vec_len(dm->dhcp6_server_index_by_rx_fib_index))
+ server_idx = dm->dhcp6_server_index_by_rx_fib_index[client_fib_idx];
+ else
+ server_idx = 0;
+
+ if (PREDICT_FALSE (pool_is_free_index (dm->dhcp6_servers, server_idx)))
+ {
+ error0 = DHCPV6_PROXY_ERROR_WRONG_INTERFACE_ID_OPTION;
+ goto drop_packet;
+ }
+
+ server = pool_elt_at_index (dm->dhcp6_servers, server_idx);
+ if (server->valid == 0)
+ {
+ error0 = DHCPV6_PROXY_ERROR_NO_SERVER;
+ goto drop_packet;
+ }
+
+
+ server_fib_idx = im->fib_index_by_sw_if_index
+ [vnet_buffer(b0)->sw_if_index[VLIB_RX]];
+ svr_fib = ip6_fib_get (server_fib_idx);
+ svr_fib_id = svr_fib->table_id;
+
+ if (svr_fib_id != server->server_fib6_index ||
+ ip0->src_address.as_u64[0] != server->dhcp6_server.as_u64[0] ||
+ ip0->src_address.as_u64[1] != server->dhcp6_server.as_u64[1])
+ {
+ //drop packet if not from server with configured address or FIB
+ error0 = DHCPV6_PROXY_ERROR_BAD_SVR_FIB_OR_ADDRESS;
+ goto drop_packet;
+ }
+
+ vnet_buffer (b0)->sw_if_index[VLIB_TX] = original_sw_if_index
+ = sw_if_index;
+
+ swif = vnet_get_sw_interface (vnm, original_sw_if_index);
+ if (swif->flags & VNET_SW_INTERFACE_FLAG_UNNUMBERED)
+ sw_if_index = swif->unnumbered_sw_if_index;
+
+
+ /*
+ * udp_local hands us the DHCPV6 header, need udp hdr,
+ * ip hdr to relay to client
+ */
+ vlib_buffer_advance (b0, -(sizeof(*u1)));
+ u1 = vlib_buffer_get_current (b0);
+
+ vlib_buffer_advance (b0, -(sizeof(*ip1)));
+ ip1 = vlib_buffer_get_current (b0);
+
+ copy_ip6_address(&client_address, &h0->peer_addr);
+
+ ia0 = ip6_interface_first_address (&ip6_main, sw_if_index);
+ if (ia0 == 0)
+ {
+ error0 = DHCPV6_PROXY_ERROR_NO_INTERFACE_ADDRESS;
+ goto drop_packet;
+ }
+
+ len = clib_net_to_host_u16(r0->length);
+ memset(ip1, 0, sizeof(*ip1));
+ copy_ip6_address(&ip1->dst_address, &client_address);
+ u1->checksum = 0;
+ u1->src_port = clib_net_to_host_u16 (UDP_DST_PORT_dhcpv6_to_server);
+ u1->dst_port = clib_net_to_host_u16 (UDP_DST_PORT_dhcpv6_to_client);
+ u1->length = clib_host_to_net_u16 (len + sizeof(udp_header_t));
+
+ ip1->ip_version_traffic_class_and_flow_label =
+ ip0->ip_version_traffic_class_and_flow_label &
+ 0x00000fff;
+ ip1->payload_length = u1->length;
+ ip1->protocol = PROTO_UDP;
+ ip1->hop_limit = HOP_COUNT_LIMIT;
+ copy_ip6_address(&ip1->src_address, ia0);
+
+ u1->checksum = ip6_tcp_udp_icmp_compute_checksum(vm, b0, ip1,
+ &bogus_length);
+ ASSERT(bogus_length == 0);
+
+ vlib_buffer_advance (b0, -(sizeof(ethernet_header_t)));
+ si0 = vnet_get_sw_interface (vnm, original_sw_if_index);
+ if (si0->type == VNET_SW_INTERFACE_TYPE_SUB)
+ vlib_buffer_advance (b0, -4 /* space for VLAN tag */);
+
+ mac0 = vlib_buffer_get_current (b0);
+
+ hi0 = vnet_get_sup_hw_interface (vnm, original_sw_if_index);
+ ei0 = pool_elt_at_index (em->interfaces, hi0->hw_instance);
+ clib_memcpy (mac0->src_address, ei0->address, sizeof (ei0->address));
+ memset (&mac0->dst_address, 0xff, sizeof (mac0->dst_address));
+ mac0->type = (si0->type == VNET_SW_INTERFACE_TYPE_SUB) ?
+ clib_net_to_host_u16(0x8100) : clib_net_to_host_u16 (0x86dd);
+
+ if (si0->type == VNET_SW_INTERFACE_TYPE_SUB)
+ {
+ u32 * vlan_tag = (u32 *)(mac0+1);
+ u32 tmp;
+ tmp = (si0->sub.id << 16) | 0x0800;
+ *vlan_tag = clib_host_to_net_u32 (tmp);
+ }
+
+ /* $$$ consider adding a dynamic next to the graph node, for performance */
+ f0 = vlib_get_frame_to_node (vm, hi0->output_node_index);
+ to_next0 = vlib_frame_vector_args (f0);
+ to_next0[0] = bi0;
+ f0->n_vectors = 1;
+ vlib_put_frame_to_node (vm, hi0->output_node_index, f0);
+
+ do_trace:
+ if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ dhcpv6_proxy_trace_t *tr = vlib_add_trace (vm, node,
+ b0, sizeof (*tr));
+ tr->which = 1; /* to client */
+ if (ia0)
+ copy_ip6_address((ip6_address_t*)tr->packet_data, ia0);
+ tr->error = error0;
+ tr->original_sw_if_index = original_sw_if_index;
+ tr->sw_if_index = sw_if_index;
+ }
+ }
+ return from_frame->n_vectors;
+
+}
+
+VLIB_REGISTER_NODE (dhcpv6_proxy_to_client_node) = {
+ .function = dhcpv6_proxy_to_client_input,
+ .name = "dhcpv6-proxy-to-client",
+ /* Takes a vector of packets. */
+ .vector_size = sizeof (u32),
+
+ .n_errors = DHCPV6_PROXY_N_ERROR,
+ .error_strings = dhcpv6_proxy_error_strings,
+ .format_buffer = format_dhcpv6_proxy_header_with_length,
+ .format_trace = format_dhcpv6_proxy_trace,
+#if 0
+ .unformat_buffer = unformat_dhcpv6_proxy_header,
+#endif
+};
+
+clib_error_t * dhcpv6_proxy_init (vlib_main_t * vm)
+{
+ dhcpv6_proxy_main_t * dm = &dhcpv6_proxy_main;
+ vlib_node_t * error_drop_node;
+ dhcpv6_server_t * server;
+
+ dm->vlib_main = vm;
+ dm->vnet_main = vnet_get_main();
+ error_drop_node = vlib_get_node_by_name (vm, (u8 *) "error-drop");
+ dm->error_drop_node_index = error_drop_node->index;
+
+ dm->vss_index_by_vrf_id = hash_create (0, sizeof (uword));
+
+ /* RFC says this is the dhcpv6 server address */
+ dm->all_dhcpv6_server_address.as_u64[0] = clib_host_to_net_u64 (0xFF05000000000000);
+ dm->all_dhcpv6_server_address.as_u64[1] = clib_host_to_net_u64 (0x00010003);
+
+ /* RFC says this is the server and agent address */
+ dm->all_dhcpv6_server_relay_agent_address.as_u64[0] = clib_host_to_net_u64 (0xFF02000000000000);
+ dm->all_dhcpv6_server_relay_agent_address.as_u64[1] = clib_host_to_net_u64 (0x00010002);
+
+ udp_register_dst_port (vm, UDP_DST_PORT_dhcpv6_to_client,
+ dhcpv6_proxy_to_client_node.index, 0 /* is_ip6 */);
+
+ udp_register_dst_port (vm, UDP_DST_PORT_dhcpv6_to_server,
+ dhcpv6_proxy_to_server_node.index, 0 /* is_ip6 */);
+
+ /* Create the default server, don't mark it valid */
+ pool_get (dm->dhcp6_servers, server);
+ memset (server, 0, sizeof (*server));
+
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (dhcpv6_proxy_init);
+
+/* Old API, manipulates a single server (only) shared by all Rx VRFs */
+int dhcpv6_proxy_set_server (ip6_address_t *addr, ip6_address_t *src_address,
+ u32 fib_id, int insert_vss, int is_del)
+{
+ return dhcpv6_proxy_set_server_2 (addr, src_address,
+ 0, fib_id,
+ insert_vss, is_del);
+}
+
+int dhcpv6_proxy_set_server_2 (ip6_address_t *addr, ip6_address_t *src_address,
+ u32 rx_fib_id, u32 server_fib_id,
+ int insert_vss, int is_del)
+{
+ dhcpv6_proxy_main_t * dm = &dhcpv6_proxy_main;
+ dhcpv6_server_t * server = 0;
+ u32 server_fib_index = 0;
+ u32 rx_fib_index = 0;
+
+ rx_fib_index = ip6_fib_table_find_or_create_and_lock(rx_fib_id);
+ server_fib_index = ip6_fib_table_find_or_create_and_lock(server_fib_id);
+
+ if (is_del)
+ {
+
+ if (rx_fib_index >= vec_len(dm->dhcp6_server_index_by_rx_fib_index))
+ return VNET_API_ERROR_NO_SUCH_ENTRY;
+
+ server_fib_index = dm->dhcp6_server_index_by_rx_fib_index[rx_fib_index];
+
+ dm->dhcp6_server_index_by_rx_fib_index[rx_fib_index] = 0;
+ server = pool_elt_at_index (dm->dhcp6_servers, server_fib_index);
+ memset (server, 0, sizeof (*server));
+ pool_put (dm->dhcp6_servers, server);
+ return 0;
+ }
+
+ if (addr->as_u64[0] == 0 &&
+ addr->as_u64[1] == 0 )
+ return VNET_API_ERROR_INVALID_DST_ADDRESS;
+
+ if (src_address->as_u64[0] == 0 &&
+ src_address->as_u64[1] == 0)
+ return VNET_API_ERROR_INVALID_SRC_ADDRESS;
+
+ if (rx_fib_id == 0)
+ {
+ server = pool_elt_at_index (dm->dhcp6_servers, 0);
+
+ goto initialize_it;
+ }
+
+ if (rx_fib_index < vec_len(dm->dhcp6_server_index_by_rx_fib_index))
+ {
+ server_fib_index = dm->dhcp6_server_index_by_rx_fib_index[rx_fib_index];
+ if (server_fib_index != 0)
+ {
+ server = pool_elt_at_index (dm->dhcp6_servers, server_fib_index);
+ goto initialize_it;
+ }
+ }
+
+ /*Allocate a new server*/
+ pool_get (dm->dhcp6_servers, server);
+
+ initialize_it:
+
+ copy_ip6_address(&server->dhcp6_server, addr);
+ copy_ip6_address(&server->dhcp6_src_address, src_address);
+ server->server_fib6_index = server_fib_index;
+ server->valid = 1;
+ server->insert_vss = insert_vss;
+
+ vec_validate (dm->dhcp6_server_index_by_rx_fib_index, rx_fib_index);
+ dm->dhcp6_server_index_by_rx_fib_index[rx_fib_index] =
+ server - dm->dhcp6_servers;
+
+ return 0;
+}
+
+static clib_error_t *
+dhcpv6_proxy_set_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ ip6_address_t addr, src_addr;
+ int set_server = 0, set_src_address = 0;
+ u32 rx_fib_id = 0, server_fib_id = 0;
+ int is_del = 0, add_vss = 0;
+
+ while (unformat_check_input(input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "server %U",
+ unformat_ip6_address, &addr))
+ set_server = 1;
+ else if (unformat(input, "src-address %U",
+ unformat_ip6_address, &src_addr))
+ set_src_address =1;
+ else if (unformat (input, "server-fib-id %d", &server_fib_id))
+ ;
+ else if (unformat (input, "rx-fib-id %d", &rx_fib_id))
+ ;
+ else if (unformat (input, "add-vss-option")
+ || unformat (input, "insert-option"))
+ add_vss = 1;
+ else if (unformat (input, "delete") ||
+ unformat (input, "del"))
+ is_del = 1;
+ else
+ break;
+ }
+
+ if (is_del || (set_server && set_src_address))
+ {
+ int rv;
+
+ rv = dhcpv6_proxy_set_server_2 (&addr, &src_addr, rx_fib_id,
+ server_fib_id, add_vss, is_del);
+
+ //TODO: Complete the errors
+ switch (rv)
+ {
+ case 0:
+ return 0;
+
+ case -1:
+ return clib_error_return (0, "FIB id %d does not exist", server_fib_id);
+
+ default:
+ return clib_error_return (0, "BUG: rv %d", rv);
+ }
+ }
+ else
+ return clib_error_return (0, "parse error`%U'",
+ format_unformat_error, input);
+}
+
+VLIB_CLI_COMMAND (dhcpv6_proxy_set_command, static) = {
+ .path = "set dhcpv6 proxy",
+ .short_help = "set dhcpv6 proxy [del] server <ipv6-addr> src-address <ipv6-addr> "
+ "[add-vss-option] [server-fib-id <fib-id>] [rx-fib-id <fib-id>] ",
+ .function = dhcpv6_proxy_set_command_fn,
+};
+
+u8 * format_dhcpv6_proxy_server (u8 * s, va_list * args)
+{
+ dhcpv6_proxy_main_t * dm = va_arg (*args, dhcpv6_proxy_main_t *);
+ dhcpv6_server_t * server = va_arg (*args, dhcpv6_server_t *);
+ u32 rx_fib_index = va_arg (*args, u32);
+ ip6_fib_t * rx_fib, * server_fib;
+ u32 server_fib_id = (u32)~0, rx_fib_id = ~0;
+
+ if (dm == 0)
+ {
+ s = format (s, "%=40s%=40s%=14s%=14s%=20s", "Server Address", "Source Address",
+ "Server FIB", "RX FIB", "Insert VSS Option");
+ return s;
+ }
+
+ server_fib = ip6_fib_get(server->server_fib6_index);
+ if (server_fib)
+ server_fib_id= server_fib->table_id;
+
+ rx_fib= ip6_fib_get(rx_fib_index);
+
+ if (rx_fib)
+ rx_fib_id = rx_fib->table_id;
+
+ s = format (s, "%=40U%=40U%=14u%=14u%=20s",
+ format_ip6_address, &server->dhcp6_server,
+ format_ip6_address, &server->dhcp6_src_address,
+ server_fib_id, rx_fib_id,
+ server->insert_vss ? "yes" : "no");
+ return s;
+}
+
+static clib_error_t *
+dhcpv6_proxy_show_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ dhcpv6_proxy_main_t * dm = &dhcpv6_proxy_main;
+ ip6_main_t * im = &ip6_main;
+ int i;
+ u32 server_index;
+ dhcpv6_server_t * server;
+
+ vlib_cli_output (vm, "%U", format_dhcpv6_proxy_server, 0 /* header line */,
+ 0, 0);
+ for (i = 0; i < vec_len (im->fibs); i++)
+ {
+ if (i < vec_len(dm->dhcp6_server_index_by_rx_fib_index))
+ server_index = dm->dhcp6_server_index_by_rx_fib_index[i];
+ else
+ server_index = 0;
+ server = pool_elt_at_index (dm->dhcp6_servers, server_index);
+ if (server->valid)
+ vlib_cli_output (vm, "%U", format_dhcpv6_proxy_server, dm,
+ server, i);
+ }
+ return 0;
+}
+
+VLIB_CLI_COMMAND (dhcpv6_proxy_show_command, static) = {
+ .path = "show dhcpv6 proxy",
+ .short_help = "Display dhcpv6 proxy info",
+ .function = dhcpv6_proxy_show_command_fn,
+};
+
+int dhcpv6_proxy_set_vss(u32 tbl_id,
+ u32 oui,
+ u32 fib_id,
+ int is_del)
+{
+ dhcpv6_proxy_main_t *dm = &dhcpv6_proxy_main;
+ u32 old_oui, old_fib_id;
+ uword *p;
+ dhcpv6_vss_info *v;
+
+ p = hash_get (dm->vss_index_by_vrf_id, tbl_id);
+
+ if (p) {
+ v = pool_elt_at_index (dm->vss, p[0]);
+ if (!v)
+ return VNET_API_ERROR_NO_SUCH_FIB;
+
+ old_oui = v->vpn_id.oui;
+ old_fib_id = v->vpn_id.fib_id;
+
+ if (is_del)
+ {
+ if (old_oui == oui &&
+ old_fib_id == fib_id )
+ {
+ pool_put(dm->vss, v);
+ hash_unset (dm->vss_index_by_vrf_id, tbl_id);
+ return 0;
+ }
+ else
+ return VNET_API_ERROR_NO_SUCH_ENTRY;
+ }
+
+ pool_put(dm->vss, v);
+ hash_unset (dm->vss_index_by_vrf_id, tbl_id);
+ } else if (is_del)
+ return VNET_API_ERROR_NO_SUCH_ENTRY;
+
+ pool_get (dm->vss, v);
+ memset (v, ~0, sizeof (*v));
+ v->vpn_id.fib_id = fib_id;
+ v->vpn_id.oui = oui;
+ hash_set (dm->vss_index_by_vrf_id, tbl_id, v - dm->vss);
+
+ return 0;
+}
+
+
+static clib_error_t *
+dhcpv6_vss_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ int is_del = 0, got_new_vss=0;
+ u32 oui=0;
+ u32 fib_id=0, tbl_id=~0;
+
+ while (unformat_check_input(input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "oui %d", &oui))
+ got_new_vss = 1;
+ else if (unformat (input, "vpn-id %d", &fib_id))
+ got_new_vss = 1;
+ else if (unformat (input, "table %d", &tbl_id))
+ got_new_vss = 1;
+ else if (unformat(input, "delete") || unformat(input, "del"))
+ is_del = 1;
+ else
+ break;
+ }
+
+ if (tbl_id ==~0)
+ return clib_error_return (0, "no table ID specified.");
+
+ if (is_del || got_new_vss)
+ {
+ int rv;
+
+ rv = dhcpv6_proxy_set_vss(tbl_id, oui, fib_id, is_del);
+ switch (rv)
+ {
+ case 0:
+ return 0;
+
+ case VNET_API_ERROR_NO_SUCH_FIB:
+ return clib_error_return (0, "vss info (oui:%d, vpn-id:%d) not found in table %d.",
+ oui, fib_id, tbl_id);
+
+ case VNET_API_ERROR_NO_SUCH_ENTRY:
+ return clib_error_return (0, "vss for table %d not found in pool.",
+ tbl_id);
+
+ default:
+ return clib_error_return (0, "BUG: rv %d", rv);
+ }
+ }
+ else
+ return clib_error_return (0, "parse error`%U'",
+ format_unformat_error, input);
+
+}
+
+VLIB_CLI_COMMAND (dhcpv6_proxy_vss_command, static) = {
+ .path = "set dhcpv6 vss",
+ .short_help = "set dhcpv6 vss table <table-id> oui <oui> vpn-idx <vpn-idx>",
+ .function = dhcpv6_vss_command_fn,
+};
+
+static clib_error_t *
+dhcpv6_vss_show_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+
+{
+ dhcpv6_proxy_main_t * dm = &dhcpv6_proxy_main;
+ dhcpv6_vss_info *v;
+ u32 oui;
+ u32 fib_id;
+ u32 tbl_id;
+ uword index;
+
+ vlib_cli_output (vm, "%=6s%=6s%=12s","Table", "OUI", "VPN ID");
+ hash_foreach (tbl_id, index, dm->vss_index_by_vrf_id,
+ ({
+ v = pool_elt_at_index (dm->vss, index);
+ oui = v->vpn_id.oui;
+ fib_id = v->vpn_id.fib_id;
+ vlib_cli_output (vm, "%=6d%=6d%=12d",
+ tbl_id, oui, fib_id);
+ }));
+
+ return 0;
+}
+
+VLIB_CLI_COMMAND (dhcpv6_proxy_vss_show_command, static) = {
+ .path = "show dhcpv6 vss",
+ .short_help = "show dhcpv6 VSS",
+ .function = dhcpv6_vss_show_command_fn,
+};
+
+static clib_error_t *
+dhcpv6_link_address_show_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+
+{
+ dhcpv6_proxy_main_t *dm = &dhcpv6_proxy_main;
+ vnet_main_t *vnm = vnet_get_main();
+ u32 sw_if_index0=0, sw_if_index;
+ ip6_address_t *ia0;
+ vnet_sw_interface_t *swif;
+
+ while (unformat_check_input(input) != UNFORMAT_END_OF_INPUT)
+ {
+
+ if (unformat(input, "%U",
+ unformat_vnet_sw_interface, dm->vnet_main, &sw_if_index0))
+ {
+ swif = vnet_get_sw_interface (vnm, sw_if_index0);
+ sw_if_index = (swif->flags & VNET_SW_INTERFACE_FLAG_UNNUMBERED) ?
+ swif->unnumbered_sw_if_index : sw_if_index0;
+ ia0 = ip6_interface_first_address(&ip6_main, sw_if_index);
+ if (ia0)
+ {
+ vlib_cli_output (vm, "%=20s%=48s", "interface", "link-address");
+
+ vlib_cli_output (vm, "%=20U%=48U",
+ format_vnet_sw_if_index_name, dm->vnet_main, sw_if_index0,
+ format_ip6_address, ia0);
+ } else
+ vlib_cli_output (vm, "%=34s%=20U", "No IPv6 address configured on",
+ format_vnet_sw_if_index_name, dm->vnet_main, sw_if_index);
+ } else
+ break;
+ }
+
+ return 0;
+}
+
+VLIB_CLI_COMMAND (dhcpv6_proxy_address_show_command, static) = {
+ .path = "show dhcpv6 link-address interface",
+ .short_help = "show dhcpv6 link-address interface <interface>",
+ .function = dhcpv6_link_address_show_command_fn,
+};
diff --git a/src/vnet/dpo/classify_dpo.c b/src/vnet/dpo/classify_dpo.c
new file mode 100644
index 00000000000..9e7886c9edd
--- /dev/null
+++ b/src/vnet/dpo/classify_dpo.c
@@ -0,0 +1,131 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/ip/ip.h>
+#include <vnet/dpo/classify_dpo.h>
+#include <vnet/mpls/mpls.h>
+
+/*
+ * pool of all MPLS Label DPOs
+ */
+classify_dpo_t *classify_dpo_pool;
+
+static classify_dpo_t *
+classify_dpo_alloc (void)
+{
+ classify_dpo_t *cd;
+
+ pool_get_aligned(classify_dpo_pool, cd, CLIB_CACHE_LINE_BYTES);
+ memset(cd, 0, sizeof(*cd));
+
+ return (cd);
+}
+
+static index_t
+classify_dpo_get_index (classify_dpo_t *cd)
+{
+ return (cd - classify_dpo_pool);
+}
+
+index_t
+classify_dpo_create (dpo_proto_t proto,
+ u32 classify_table_index)
+{
+ classify_dpo_t *cd;
+
+ cd = classify_dpo_alloc();
+ cd->cd_proto = proto;
+ cd->cd_table_index = classify_table_index;
+
+ return (classify_dpo_get_index(cd));
+}
+
+u8*
+format_classify_dpo (u8 *s, va_list *args)
+{
+ index_t index = va_arg (*args, index_t);
+ CLIB_UNUSED(u32 indent) = va_arg (*args, u32);
+ classify_dpo_t *cd;
+
+ cd = classify_dpo_get(index);
+
+ return (format(s, "%U-classify:[%d]:table:%d",
+ format_dpo_proto, cd->cd_proto,
+ index, cd->cd_table_index));
+}
+
+static void
+classify_dpo_lock (dpo_id_t *dpo)
+{
+ classify_dpo_t *cd;
+
+ cd = classify_dpo_get(dpo->dpoi_index);
+
+ cd->cd_locks++;
+}
+
+static void
+classify_dpo_unlock (dpo_id_t *dpo)
+{
+ classify_dpo_t *cd;
+
+ cd = classify_dpo_get(dpo->dpoi_index);
+
+ cd->cd_locks--;
+
+ if (0 == cd->cd_locks)
+ {
+ pool_put(classify_dpo_pool, cd);
+ }
+}
+
+static void
+classify_dpo_mem_show (void)
+{
+ fib_show_memory_usage("Classify",
+ pool_elts(classify_dpo_pool),
+ pool_len(classify_dpo_pool),
+ sizeof(classify_dpo_t));
+}
+
+const static dpo_vft_t cd_vft = {
+ .dv_lock = classify_dpo_lock,
+ .dv_unlock = classify_dpo_unlock,
+ .dv_format = format_classify_dpo,
+ .dv_mem_show = classify_dpo_mem_show,
+};
+
+const static char* const classify_ip4_nodes[] =
+{
+ "ip4-classify",
+ NULL,
+};
+const static char* const classify_ip6_nodes[] =
+{
+ "ip6-classify",
+ NULL,
+};
+const static char* const * const classify_nodes[DPO_PROTO_NUM] =
+{
+ [DPO_PROTO_IP4] = classify_ip4_nodes,
+ [DPO_PROTO_IP6] = classify_ip6_nodes,
+ [DPO_PROTO_MPLS] = NULL,
+};
+
+void
+classify_dpo_module_init (void)
+{
+ dpo_register(DPO_CLASSIFY, &cd_vft, classify_nodes);
+}
diff --git a/src/vnet/dpo/classify_dpo.h b/src/vnet/dpo/classify_dpo.h
new file mode 100644
index 00000000000..48f4b2bf8a5
--- /dev/null
+++ b/src/vnet/dpo/classify_dpo.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __CLASSIFY_DPO_H__
+#define __CLASSIFY_DPO_H__
+
+#include <vnet/vnet.h>
+#include <vnet/mpls/packet.h>
+#include <vnet/dpo/dpo.h>
+
+/**
+ * A representation of an MPLS label for imposition in the data-path
+ */
+typedef struct classify_dpo_t
+{
+ dpo_proto_t cd_proto;
+
+ u32 cd_table_index;
+
+ /**
+ * Number of locks/users of the label
+ */
+ u16 cd_locks;
+} classify_dpo_t;
+
+extern index_t classify_dpo_create(dpo_proto_t proto,
+ u32 classify_table_index);
+
+extern u8* format_classify_dpo(u8 *s, va_list *args);
+
+/*
+ * Encapsulation violation for fast data-path access
+ */
+extern classify_dpo_t *classify_dpo_pool;
+
+static inline classify_dpo_t *
+classify_dpo_get (index_t index)
+{
+ return (pool_elt_at_index(classify_dpo_pool, index));
+}
+
+extern void classify_dpo_module_init(void);
+
+#endif
diff --git a/src/vnet/dpo/dpo.c b/src/vnet/dpo/dpo.c
new file mode 100644
index 00000000000..688d2892412
--- /dev/null
+++ b/src/vnet/dpo/dpo.c
@@ -0,0 +1,500 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * @brief
+ * A Data-Path Object is an object that represents actions that are
+ * applied to packets are they are switched through VPP.
+ *
+ * The DPO is a base class that is specialised by other objects to provide
+ * concreate actions
+ *
+ * The VLIB graph nodes are graph of types, the DPO graph is a graph of instances.
+ */
+
+#include <vnet/dpo/dpo.h>
+#include <vnet/ip/lookup.h>
+#include <vnet/ip/format.h>
+#include <vnet/adj/adj.h>
+
+#include <vnet/dpo/load_balance.h>
+#include <vnet/dpo/mpls_label_dpo.h>
+#include <vnet/dpo/lookup_dpo.h>
+#include <vnet/dpo/drop_dpo.h>
+#include <vnet/dpo/receive_dpo.h>
+#include <vnet/dpo/punt_dpo.h>
+#include <vnet/dpo/classify_dpo.h>
+#include <vnet/dpo/ip_null_dpo.h>
+
+/**
+ * Array of char* names for the DPO types and protos
+ */
+static const char* dpo_type_names[] = DPO_TYPES;
+static const char* dpo_proto_names[] = DPO_PROTOS;
+
+/**
+ * @brief Vector of virtual function tables for the DPO types
+ *
+ * This is a vector so we can dynamically register new DPO types in plugins.
+ */
+static dpo_vft_t *dpo_vfts;
+
+/**
+ * @brief vector of graph node names associated with each DPO type and protocol.
+ *
+ * dpo_nodes[child_type][child_proto][node_X] = node_name;
+ * i.e.
+ * dpo_node[DPO_LOAD_BALANCE][DPO_PROTO_IP4][0] = "ip4-lookup"
+ * dpo_node[DPO_LOAD_BALANCE][DPO_PROTO_IP4][1] = "ip4-load-balance"
+ *
+ * This is a vector so we can dynamically register new DPO types in plugins.
+ */
+static const char* const * const ** dpo_nodes;
+
+/**
+ * @brief Vector of edge indicies from parent DPO nodes to child
+ *
+ * dpo_edges[child_type][child_proto][parent_type][parent_proto] = edge_index
+ *
+ * This array is derived at init time from the dpo_nodes above. Note that
+ * the third dimension in dpo_nodes is lost, hence, the edge index from each
+ * node MUST be the same.
+ * Including both the child and parent protocol is required to support the
+ * case where it changes as the grapth is traversed, most notablly when an
+ * MPLS label is popped.
+ *
+ * Note that this array is child type specific, not child instance specific.
+ */
+static u32 ****dpo_edges;
+
+/**
+ * @brief The DPO type value that can be assigend to the next dynamic
+ * type registration.
+ */
+static dpo_type_t dpo_dynamic = DPO_LAST;
+
+dpo_proto_t
+vnet_link_to_dpo_proto (vnet_link_t linkt)
+{
+ switch (linkt)
+ {
+ case VNET_LINK_IP6:
+ return (DPO_PROTO_IP6);
+ case VNET_LINK_IP4:
+ return (DPO_PROTO_IP4);
+ case VNET_LINK_MPLS:
+ return (DPO_PROTO_MPLS);
+ case VNET_LINK_ETHERNET:
+ return (DPO_PROTO_ETHERNET);
+ case VNET_LINK_ARP:
+ break;
+ }
+ ASSERT(0);
+ return (0);
+}
+
+u8 *
+format_dpo_type (u8 * s, va_list * args)
+{
+ dpo_type_t type = va_arg (*args, int);
+
+ s = format(s, "%s", dpo_type_names[type]);
+
+ return (s);
+}
+
+u8 *
+format_dpo_id (u8 * s, va_list * args)
+{
+ dpo_id_t *dpo = va_arg (*args, dpo_id_t*);
+ u32 indent = va_arg (*args, u32);
+
+ s = format(s, "[@%d]: ", dpo->dpoi_next_node);
+
+ if (NULL != dpo_vfts[dpo->dpoi_type].dv_format)
+ {
+ return (format(s, "%U",
+ dpo_vfts[dpo->dpoi_type].dv_format,
+ dpo->dpoi_index,
+ indent));
+ }
+
+ switch (dpo->dpoi_type)
+ {
+ case DPO_FIRST:
+ s = format(s, "unset");
+ break;
+ default:
+ s = format(s, "unknown");
+ break;
+ }
+ return (s);
+}
+
+u8 *
+format_dpo_proto (u8 * s, va_list * args)
+{
+ dpo_proto_t proto = va_arg (*args, int);
+
+ return (format(s, "%s", dpo_proto_names[proto]));
+}
+
+void
+dpo_set (dpo_id_t *dpo,
+ dpo_type_t type,
+ dpo_proto_t proto,
+ index_t index)
+{
+ dpo_id_t tmp = *dpo;
+
+ dpo->dpoi_type = type;
+ dpo->dpoi_proto = proto,
+ dpo->dpoi_index = index;
+
+ if (DPO_ADJACENCY == type)
+ {
+ /*
+ * set the adj subtype
+ */
+ ip_adjacency_t *adj;
+
+ adj = adj_get(index);
+
+ switch (adj->lookup_next_index)
+ {
+ case IP_LOOKUP_NEXT_ARP:
+ dpo->dpoi_type = DPO_ADJACENCY_INCOMPLETE;
+ break;
+ case IP_LOOKUP_NEXT_MIDCHAIN:
+ dpo->dpoi_type = DPO_ADJACENCY_MIDCHAIN;
+ break;
+ default:
+ break;
+ }
+ }
+ dpo_lock(dpo);
+ dpo_unlock(&tmp);
+}
+
+void
+dpo_reset (dpo_id_t *dpo)
+{
+ dpo_id_t tmp = DPO_INVALID;
+
+ /*
+ * use the atomic copy operation.
+ */
+ dpo_copy(dpo, &tmp);
+}
+
+/**
+ * \brief
+ * Compare two Data-path objects
+ *
+ * like memcmp, return 0 is matching, !0 otherwise.
+ */
+int
+dpo_cmp (const dpo_id_t *dpo1,
+ const dpo_id_t *dpo2)
+{
+ int res;
+
+ res = dpo1->dpoi_type - dpo2->dpoi_type;
+
+ if (0 != res) return (res);
+
+ return (dpo1->dpoi_index - dpo2->dpoi_index);
+}
+
+void
+dpo_copy (dpo_id_t *dst,
+ const dpo_id_t *src)
+{
+ dpo_id_t tmp = *dst;
+
+ /*
+ * the destination is written in a single u64 write - hence atomically w.r.t
+ * any packets inflight.
+ */
+ *((u64*)dst) = *(u64*)src;
+
+ dpo_lock(dst);
+ dpo_unlock(&tmp);
+}
+
+int
+dpo_is_adj (const dpo_id_t *dpo)
+{
+ return ((dpo->dpoi_type == DPO_ADJACENCY) ||
+ (dpo->dpoi_type == DPO_ADJACENCY_INCOMPLETE) ||
+ (dpo->dpoi_type == DPO_ADJACENCY_MIDCHAIN) ||
+ (dpo->dpoi_type == DPO_ADJACENCY_GLEAN));
+}
+
+void
+dpo_register (dpo_type_t type,
+ const dpo_vft_t *vft,
+ const char * const * const * nodes)
+{
+ vec_validate(dpo_vfts, type);
+ dpo_vfts[type] = *vft;
+
+ vec_validate(dpo_nodes, type);
+ dpo_nodes[type] = nodes;
+}
+
+dpo_type_t
+dpo_register_new_type (const dpo_vft_t *vft,
+ const char * const * const * nodes)
+{
+ dpo_type_t type = dpo_dynamic++;
+
+ dpo_register(type, vft, nodes);
+
+ return (type);
+}
+
+void
+dpo_lock (dpo_id_t *dpo)
+{
+ if (!dpo_id_is_valid(dpo))
+ return;
+
+ dpo_vfts[dpo->dpoi_type].dv_lock(dpo);
+}
+
+void
+dpo_unlock (dpo_id_t *dpo)
+{
+ if (!dpo_id_is_valid(dpo))
+ return;
+
+ dpo_vfts[dpo->dpoi_type].dv_unlock(dpo);
+}
+
+
+static u32
+dpo_get_next_node (dpo_type_t child_type,
+ dpo_proto_t child_proto,
+ const dpo_id_t *parent_dpo)
+{
+ dpo_proto_t parent_proto;
+ dpo_type_t parent_type;
+
+ parent_type = parent_dpo->dpoi_type;
+ parent_proto = parent_dpo->dpoi_proto;
+
+ vec_validate(dpo_edges, child_type);
+ vec_validate(dpo_edges[child_type], child_proto);
+ vec_validate(dpo_edges[child_type][child_proto], parent_type);
+ vec_validate_init_empty(
+ dpo_edges[child_type][child_proto][parent_type],
+ parent_proto, ~0);
+
+ /*
+ * if the edge index has not yet been created for this node to node transistion
+ */
+ if (~0 == dpo_edges[child_type][child_proto][parent_type][parent_proto])
+ {
+ vlib_node_t *parent_node, *child_node;
+ vlib_main_t *vm;
+ u32 edge ,pp, cc;
+
+ vm = vlib_get_main();
+
+ ASSERT(NULL != dpo_nodes[child_type]);
+ ASSERT(NULL != dpo_nodes[child_type][child_proto]);
+ ASSERT(NULL != dpo_nodes[parent_type]);
+ ASSERT(NULL != dpo_nodes[parent_type][parent_proto]);
+
+ cc = 0;
+
+ /*
+ * create a graph arc from each of the parent's registered node types,
+ * to each of the childs.
+ */
+ while (NULL != dpo_nodes[child_type][child_proto][cc])
+ {
+ child_node =
+ vlib_get_node_by_name(vm,
+ (u8*) dpo_nodes[child_type][child_proto][cc]);
+
+ pp = 0;
+
+ while (NULL != dpo_nodes[parent_type][parent_proto][pp])
+ {
+ parent_node =
+ vlib_get_node_by_name(vm,
+ (u8*) dpo_nodes[parent_type][parent_proto][pp]);
+
+ edge = vlib_node_add_next(vm,
+ child_node->index,
+ parent_node->index);
+
+ if (~0 == dpo_edges[child_type][child_proto][parent_type][parent_proto])
+ {
+ dpo_edges[child_type][child_proto][parent_type][parent_proto] = edge;
+ }
+ else
+ {
+ ASSERT(dpo_edges[child_type][child_proto][parent_type][parent_proto] == edge);
+ }
+ pp++;
+ }
+ cc++;
+ }
+ }
+
+ return (dpo_edges[child_type][child_proto][parent_type][parent_proto]);
+}
+
+/**
+ * @brief Stack one DPO object on another, and thus establish a child parent
+ * relationship. The VLIB graph arc used is taken from the parent and child types
+ * passed.
+ */
+static void
+dpo_stack_i (u32 edge,
+ dpo_id_t *dpo,
+ const dpo_id_t *parent)
+{
+ /*
+ * in order to get an atomic update of the parent we create a temporary,
+ * from a copy of the child, and add the next_node. then we copy to the parent
+ */
+ dpo_id_t tmp = DPO_INVALID;
+ dpo_copy(&tmp, parent);
+
+ /*
+ * get the edge index for the parent to child VLIB graph transisition
+ */
+ tmp.dpoi_next_node = edge;
+
+ /*
+ * this update is atomic.
+ */
+ dpo_copy(dpo, &tmp);
+
+ dpo_reset(&tmp);
+}
+
+/**
+ * @brief Stack one DPO object on another, and thus establish a child-parent
+ * relationship. The VLIB graph arc used is taken from the parent and child types
+ * passed.
+ */
+void
+dpo_stack (dpo_type_t child_type,
+ dpo_proto_t child_proto,
+ dpo_id_t *dpo,
+ const dpo_id_t *parent)
+{
+ dpo_stack_i(dpo_get_next_node(child_type, child_proto, parent), dpo, parent);
+}
+
+/**
+ * @brief Stack one DPO object on another, and thus establish a child parent
+ * relationship. A new VLIB graph arc is created from the child node passed
+ * to the nodes registered by the parent. The VLIB infra will ensure this arc
+ * is added only once.
+ */
+void
+dpo_stack_from_node (u32 child_node_index,
+ dpo_id_t *dpo,
+ const dpo_id_t *parent)
+{
+ dpo_proto_t parent_proto;
+ vlib_node_t *parent_node;
+ dpo_type_t parent_type;
+ vlib_main_t *vm;
+ u32 edge;
+
+ parent_type = parent->dpoi_type;
+ parent_proto = parent->dpoi_proto;
+
+ vm = vlib_get_main();
+
+ ASSERT(NULL != dpo_nodes[parent_type]);
+ ASSERT(NULL != dpo_nodes[parent_type][parent_proto]);
+
+ parent_node =
+ vlib_get_node_by_name(vm, (u8*) dpo_nodes[parent_type][parent_proto][0]);
+
+ edge = vlib_node_add_next(vm,
+ child_node_index,
+ parent_node->index);
+
+ dpo_stack_i(edge, dpo, parent);
+}
+
+static clib_error_t *
+dpo_module_init (vlib_main_t * vm)
+{
+ drop_dpo_module_init();
+ punt_dpo_module_init();
+ receive_dpo_module_init();
+ load_balance_module_init();
+ mpls_label_dpo_module_init();
+ classify_dpo_module_init();
+ lookup_dpo_module_init();
+ ip_null_dpo_module_init();
+
+ return (NULL);
+}
+
+VLIB_INIT_FUNCTION(dpo_module_init);
+
+static clib_error_t *
+dpo_memory_show (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ dpo_vft_t *vft;
+
+ vlib_cli_output (vm, "DPO memory");
+ vlib_cli_output (vm, "%=30s %=5s %=8s/%=9s totals",
+ "Name","Size", "in-use", "allocated");
+
+ vec_foreach(vft, dpo_vfts)
+ {
+ if (NULL != vft->dv_mem_show)
+ vft->dv_mem_show();
+ }
+
+ return (NULL);
+}
+
+/* *INDENT-OFF* */
+/*?
+ * The '<em>sh dpo memory </em>' command displays the memory usage for each
+ * data-plane object type.
+ *
+ * @cliexpar
+ * @cliexstart{show dpo memory}
+ * DPO memory
+ * Name Size in-use /allocated totals
+ * load-balance 64 12 / 12 768/768
+ * Adjacency 256 1 / 1 256/256
+ * Receive 24 5 / 5 120/120
+ * Lookup 12 0 / 0 0/0
+ * Classify 12 0 / 0 0/0
+ * MPLS label 24 0 / 0 0/0
+ * @cliexend
+?*/
+VLIB_CLI_COMMAND (show_fib_memory, static) = {
+ .path = "show dpo memory",
+ .function = dpo_memory_show,
+ .short_help = "show dpo memory",
+};
+/* *INDENT-ON* */
diff --git a/src/vnet/dpo/dpo.h b/src/vnet/dpo/dpo.h
new file mode 100644
index 00000000000..1efcbc8834b
--- /dev/null
+++ b/src/vnet/dpo/dpo.h
@@ -0,0 +1,381 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * @brief
+ * A Data-Path Object is an object that represents actions that are
+ * applied to packets are they are switched through VPP's data-path.
+ *
+ * The DPO can be considered to be like is a base class that is specialised
+ * by other objects to provide concreate actions
+ *
+ * The VLIB graph nodes are graph of DPO types, the DPO graph is a graph of
+ * instances.
+ */
+
+#ifndef __DPO_H__
+#define __DPO_H__
+
+#include <vnet/vnet.h>
+
+/**
+ * @brief An index for adjacencies.
+ * Alas 'C' is not typesafe enough to b0rk when a u32 is used instead of
+ * an index_t. However, for us humans, we can glean much more intent
+ * from the declaration
+ * foo barindex_t t);
+ * than we can from
+ * foo bar(u32 t);
+ */
+typedef u32 index_t;
+
+/**
+ * @brief Invalid index - used when no index is known
+ * blazoned capitals INVALID speak volumes where ~0 does not.
+ */
+#define INDEX_INVALID ((index_t)(~0))
+
+/**
+ * @brief Data path protocol.
+ * Actions performed on packets in the data-plane can be described and represented
+ * by protocol independent objects, i.e. ADJACENCY, but the spceifics actions
+ * required during ADJACENCY processing can be protocol dependent. For example,
+ * the adjacency rewrite node performs a ip4 checksum calculation, ip6 and MPLS
+ * do not, all 3 perform a TTL decrement. The VLIB graph nodes are thus protocol
+ * dependent, and thus each graph edge/arc is too.
+ * When programming a DPO's next node arc from child to parent it is thus required
+ * to know the parent's data-path protocol so the correct arc index can be used.
+ */
+typedef enum dpo_proto_t_
+{
+#if CLIB_DEBUG > 0
+ DPO_PROTO_IP4 = 1,
+#else
+ DPO_PROTO_IP4 = 0,
+#endif
+ DPO_PROTO_IP6,
+ DPO_PROTO_ETHERNET,
+ DPO_PROTO_MPLS,
+} __attribute__((packed)) dpo_proto_t;
+
+#define DPO_PROTO_NUM ((dpo_proto_t)(DPO_PROTO_MPLS+1))
+#define DPO_PROTO_NONE ((dpo_proto_t)(DPO_PROTO_NUM+1))
+
+#define DPO_PROTOS { \
+ [DPO_PROTO_IP4] = "ip4", \
+ [DPO_PROTO_IP6] = "ip6", \
+ [DPO_PROTO_ETHERNET] = "ethernet", \
+ [DPO_PROTO_MPLS] = "mpls", \
+}
+
+#define FOR_EACH_DPO_PROTO(_proto) \
+ for (_proto = DPO_PROTO_IP4; \
+ _proto <= DPO_PROTO_MPLS; \
+ _proto++)
+
+/**
+ * @brief Common types of data-path objects
+ * New types can be dynamically added using dpo_register_new_type()
+ */
+typedef enum dpo_type_t_ {
+ /**
+ * A non-zero value first so we can spot unitialisation errors
+ */
+ DPO_FIRST,
+ DPO_DROP,
+ DPO_IP_NULL,
+ DPO_PUNT,
+ /**
+ * @brief load-balancing over a choice of [un]equal cost paths
+ */
+ DPO_LOAD_BALANCE,
+ DPO_ADJACENCY,
+ DPO_ADJACENCY_INCOMPLETE,
+ DPO_ADJACENCY_MIDCHAIN,
+ DPO_ADJACENCY_GLEAN,
+ DPO_RECEIVE,
+ DPO_LOOKUP,
+ DPO_LISP_CP,
+ DPO_CLASSIFY,
+ DPO_MPLS_LABEL,
+ DPO_LAST,
+} __attribute__((packed)) dpo_type_t;
+
+#define DPO_TYPE_NUM DPO_LAST
+
+#define DPO_TYPES { \
+ [DPO_FIRST] = "dpo-invalid", \
+ [DPO_DROP] = "dpo-drop", \
+ [DPO_IP_NULL] = "dpo-ip-null", \
+ [DPO_PUNT] = "dpo-punt", \
+ [DPO_ADJACENCY] = "dpo-adjacency", \
+ [DPO_ADJACENCY_INCOMPLETE] = "dpo-adjacency-incomplete", \
+ [DPO_ADJACENCY_MIDCHAIN] = "dpo-adjacency-midcahin", \
+ [DPO_ADJACENCY_GLEAN] = "dpo-glean", \
+ [DPO_RECEIVE] = "dpo-receive", \
+ [DPO_LOOKUP] = "dpo-lookup", \
+ [DPO_LOAD_BALANCE] = "dpo-load-balance", \
+ [DPO_LISP_CP] = "dpo-lisp-cp", \
+ [DPO_CLASSIFY] = "dpo-classify", \
+ [DPO_MPLS_LABEL] = "dpo-mpls-label" \
+}
+
+/**
+ * @brief The identity of a DPO is a combination of its type and its
+ * instance number/index of objects of that type
+ */
+typedef struct dpo_id_t_ {
+ /**
+ * the type
+ */
+ dpo_type_t dpoi_type;
+ /**
+ * the data-path protocol of the type.
+ */
+ dpo_proto_t dpoi_proto;
+ /**
+ * The next VLIB node to follow.
+ */
+ u16 dpoi_next_node;
+ /**
+ * the index of objects of that type
+ */
+ index_t dpoi_index;
+} __attribute__ ((aligned(sizeof(u64)))) dpo_id_t;
+
+STATIC_ASSERT(sizeof(dpo_id_t) <= sizeof(u64),
+ "DPO ID is greater than sizeof u64 "
+ "atomic updates need to be revisited");
+
+/**
+ * @brief An initialiser for DPOs declared on the stack.
+ * Thenext node is set to 0 since VLIB graph nodes should set 0 index to drop.
+ */
+#define DPO_INVALID \
+{ \
+ .dpoi_type = DPO_FIRST, \
+ .dpoi_proto = DPO_PROTO_NONE, \
+ .dpoi_index = INDEX_INVALID, \
+ .dpoi_next_node = 0, \
+}
+
+/**
+ * @brief Return true if the DPO object is valid, i.e. has been initialised.
+ */
+static inline int
+dpo_id_is_valid (const dpo_id_t *dpoi)
+{
+ return (dpoi->dpoi_type != DPO_FIRST &&
+ dpoi->dpoi_index != INDEX_INVALID);
+}
+
+extern dpo_proto_t vnet_link_to_dpo_proto(vnet_link_t linkt);
+
+/**
+ * @brief
+ * Take a reference counting lock on the DPO
+ */
+extern void dpo_lock(dpo_id_t *dpo);
+
+/**
+ * @brief
+ * Release a reference counting lock on the DPO
+ */
+extern void dpo_unlock(dpo_id_t *dpo);
+
+/**
+ * @brief Set/create a DPO ID
+ * The DPO will be locked.
+ *
+ * @param dpo
+ * The DPO object to configure
+ *
+ * @param type
+ * The dpo_type_t of the DPO
+ *
+ * @param proto
+ * The dpo_proto_t of the DPO
+ *
+ * @param index
+ * The type specific index of the DPO
+ */
+extern void dpo_set(dpo_id_t *dpo,
+ dpo_type_t type,
+ dpo_proto_t proto,
+ index_t index);
+
+/**
+ * @brief reset a DPO ID
+ * The DPO will be unlocked.
+ *
+ * @param dpo
+ * The DPO object to reset
+ */
+extern void dpo_reset(dpo_id_t *dpo);
+
+/**
+ * @brief compare two DPOs for equality
+ */
+extern int dpo_cmp(const dpo_id_t *dpo1,
+ const dpo_id_t *dpo2);
+
+/**
+ * @brief
+ * atomic copy a data-plane object.
+ * This is safe to use when the dst DPO is currently switching packets
+ */
+extern void dpo_copy(dpo_id_t *dst,
+ const dpo_id_t *src);
+
+/**
+ * @brief Return TRUE is the DPO is any type of adjacency
+ */
+extern int dpo_is_adj(const dpo_id_t *dpo);
+
+/**
+ * @biref Format a DPO_id_t oject
+ */
+extern u8 *format_dpo_id(u8 * s, va_list * args);
+
+/**
+ * @biref format a DPO type
+ */
+extern u8 *format_dpo_type(u8 * s, va_list * args);
+
+/**
+ * @brief format a DPO protocol
+ */
+extern u8 *format_dpo_proto(u8 * s, va_list * args);
+
+/**
+ * @brief
+ * Set and stack a DPO.
+ * The DPO passed is set to the parent DPO and the necessary
+ * VLIB graph arcs are created. The child_type and child_proto
+ * are used to get the VLID nodes from which the arcs are added.
+ *
+ * @param child_type
+ * Child DPO type.
+ *
+ * @param child_proto
+ * Child DPO proto
+ *
+ * @parem dpo
+ * This is the DPO to stack and set.
+ *
+ * @paren parent_dpo
+ * The parent DPO to stack onto.
+ */
+extern void dpo_stack(dpo_type_t child_type,
+ dpo_proto_t child_proto,
+ dpo_id_t *dpo,
+ const dpo_id_t *parent_dpo);
+
+/**
+ * @brief
+ * Set and stack a DPO.
+ * The DPO passed is set to the parent DPO and the necessary
+ * VLIB graph arcs are created, from the child_node passed.
+ *
+ * @param child_node
+ * The VLIB grpah node index to create an arc from to the parent
+ *
+ * @parem dpo
+ * This is the DPO to stack and set.
+ *
+ * @paren parent_dpo
+ * The parent DPO to stack onto.
+ */
+extern void dpo_stack_from_node(u32 child_node,
+ dpo_id_t *dpo,
+ const dpo_id_t *parent);
+
+/**
+ * @brief A lock function registered for a DPO type
+ */
+typedef void (*dpo_lock_fn_t)(dpo_id_t *dpo);
+
+/**
+ * @brief An unlock function registered for a DPO type
+ */
+typedef void (*dpo_unlock_fn_t)(dpo_id_t *dpo);
+
+/**
+ * @brief An memory usage show command
+ */
+typedef void (*dpo_mem_show_t)(void);
+
+/**
+ * @brief A virtual function table regisitered for a DPO type
+ */
+typedef struct dpo_vft_t_
+{
+ /**
+ * A reference counting lock function
+ */
+ dpo_lock_fn_t dv_lock;
+ /**
+ * A reference counting unlock function
+ */
+ dpo_lock_fn_t dv_unlock;
+ /**
+ * A format function
+ */
+ format_function_t *dv_format;
+ /**
+ * A show memory usage function
+ */
+ dpo_mem_show_t dv_mem_show;
+} dpo_vft_t;
+
+
+/**
+ * @brief For a given DPO type Register:
+ * - a virtual function table
+ * - a NULL terminated array of graph nodes from which that object type
+ * will originate packets, i.e. the nodes in which the object type will be
+ * the parent DPO in the DP graph. The ndoes are per-data-path protocol
+ * (see above).
+ *
+ * @param type
+ * The type being registered.
+ *
+ * @param vft
+ * The virtual function table to register for the type.
+ *
+ * @param nodes
+ * The string description of the per-protocol VLIB graph nodes.
+ */
+extern void dpo_register(dpo_type_t type,
+ const dpo_vft_t *vft,
+ const char * const * const * nodes);
+
+/**
+ * @brief Create and register a new DPO type.
+ *
+ * This can be used by plugins to create new DPO types that are not listed
+ * in dpo_type_t enum
+ *
+ * @param vft
+ * The virtual function table to register for the type.
+ *
+ * @param nodes
+ * The string description of the per-protocol VLIB graph nodes.
+ *
+ * @return The new dpo_type_t
+ */
+extern dpo_type_t dpo_register_new_type(const dpo_vft_t *vft,
+ const char * const * const * nodes);
+
+#endif
diff --git a/src/vnet/dpo/drop_dpo.c b/src/vnet/dpo/drop_dpo.c
new file mode 100644
index 00000000000..5118d2a45b7
--- /dev/null
+++ b/src/vnet/dpo/drop_dpo.c
@@ -0,0 +1,106 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * @brief
+ * The data-path object representing dropping the packet
+ */
+
+#include <vnet/dpo/dpo.h>
+
+static dpo_id_t drop_dpos[DPO_PROTO_NUM];
+
+const dpo_id_t *
+drop_dpo_get (dpo_proto_t proto)
+{
+ dpo_set(&drop_dpos[proto], DPO_DROP, proto, proto);
+
+ return (&drop_dpos[proto]);
+}
+
+int
+dpo_is_drop (const dpo_id_t *dpo)
+{
+ return (dpo->dpoi_type == DPO_DROP);
+}
+
+static void
+drop_dpo_lock (dpo_id_t *dpo)
+{
+ /*
+ * not maintaining a lock count on the drop
+ * more trouble than it's worth.
+ * There always needs to be one around. no point it managaing its lifetime
+ */
+}
+static void
+drop_dpo_unlock (dpo_id_t *dpo)
+{
+}
+
+static u8*
+format_drop_dpo (u8 *s, va_list *ap)
+{
+ CLIB_UNUSED(index_t index) = va_arg(*ap, index_t);
+ CLIB_UNUSED(u32 indent) = va_arg(*ap, u32);
+
+ return (format(s, "dpo-drop %U", format_dpo_proto, index));
+}
+
+const static dpo_vft_t drop_vft = {
+ .dv_lock = drop_dpo_lock,
+ .dv_unlock = drop_dpo_unlock,
+ .dv_format = format_drop_dpo,
+};
+
+/**
+ * @brief The per-protocol VLIB graph nodes that are assigned to a drop
+ * object.
+ *
+ * this means that these graph nodes are ones from which a drop is the
+ * parent object in the DPO-graph.
+ */
+const static char* const drop_ip4_nodes[] =
+{
+ "ip4-drop",
+ NULL,
+};
+const static char* const drop_ip6_nodes[] =
+{
+ "ip6-drop",
+ NULL,
+};
+const static char* const drop_mpls_nodes[] =
+{
+ "mpls-drop",
+ NULL,
+};
+const static char* const drop_ethernet_nodes[] =
+{
+ "error-drop",
+ NULL,
+};
+const static char* const * const drop_nodes[DPO_PROTO_NUM] =
+{
+ [DPO_PROTO_IP4] = drop_ip4_nodes,
+ [DPO_PROTO_IP6] = drop_ip6_nodes,
+ [DPO_PROTO_MPLS] = drop_mpls_nodes,
+ [DPO_PROTO_ETHERNET] = drop_ethernet_nodes,
+};
+
+void
+drop_dpo_module_init (void)
+{
+ dpo_register(DPO_DROP, &drop_vft, drop_nodes);
+}
diff --git a/src/vnet/dpo/drop_dpo.h b/src/vnet/dpo/drop_dpo.h
new file mode 100644
index 00000000000..436df36c84e
--- /dev/null
+++ b/src/vnet/dpo/drop_dpo.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * @brief The Drop DPO will drop all packets, no questions asked. It is valid
+ * for any packet protocol.
+ */
+
+#ifndef __DROP_DPO_H__
+#define __DROP_DPO_H__
+
+#include <vnet/dpo/dpo.h>
+
+extern int dpo_is_drop(const dpo_id_t *dpo);
+
+extern const dpo_id_t *drop_dpo_get(dpo_proto_t proto);
+
+extern void drop_dpo_module_init(void);
+
+#endif
diff --git a/src/vnet/dpo/ip_null_dpo.c b/src/vnet/dpo/ip_null_dpo.c
new file mode 100644
index 00000000000..22682e4eee4
--- /dev/null
+++ b/src/vnet/dpo/ip_null_dpo.c
@@ -0,0 +1,408 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * @brief
+ * The data-path object representing dropping the packet
+ */
+
+#include <vnet/dpo/ip_null_dpo.h>
+#include <vnet/ip/ip.h>
+
+/**
+ * @brief A representation of the IP_NULL DPO
+ */
+typedef struct ip_null_dpo_t_
+{
+ /**
+ * @brief The action to take on a packet
+ */
+ ip_null_dpo_action_t ind_action;
+ /**
+ * @brief The next VLIB node
+ */
+ u32 ind_next_index;
+ /**
+ * rate limits
+ */
+} ip_null_dpo_t;
+
+/**
+ * @brief the IP_NULL dpos are shared by all routes, hence they are global.
+ * As the neame implies this is only for IP, hence 2.
+ */
+static ip_null_dpo_t ip_null_dpos[2 * IP_NULL_DPO_ACTION_NUM] = {
+ [0] = {
+ /* proto ip4, no action */
+ .ind_action = IP_NULL_ACTION_NONE,
+ },
+ [1] = {
+ /* proto ip4, action send unreach */
+ .ind_action = IP_NULL_ACTION_SEND_ICMP_UNREACH,
+ },
+ [2] = {
+ /* proto ip4, action send unreach */
+ .ind_action = IP_NULL_ACTION_SEND_ICMP_PROHIBIT,
+ },
+ [3] = {
+ /* proto ip6, no action */
+ .ind_action = IP_NULL_ACTION_NONE,
+ },
+ [4] = {
+ /* proto ip6, action send unreach */
+ .ind_action = IP_NULL_ACTION_SEND_ICMP_UNREACH,
+ },
+ [5] = {
+ /* proto ip6, action send unreach */
+ .ind_action = IP_NULL_ACTION_SEND_ICMP_PROHIBIT,
+ },
+};
+
+/**
+ * @brief Action strings
+ */
+const char *ip_null_action_strings[] = IP_NULL_ACTIONS;
+
+void
+ip_null_dpo_add_and_lock (dpo_proto_t proto,
+ ip_null_dpo_action_t action,
+ dpo_id_t *dpo)
+{
+ int i;
+
+ ASSERT((proto == DPO_PROTO_IP4) ||
+ (proto == DPO_PROTO_IP6));
+ ASSERT(action < IP_NULL_DPO_ACTION_NUM);
+
+ i = (proto == DPO_PROTO_IP4 ? 0 : 1);
+
+ dpo_set(dpo, DPO_IP_NULL, proto, (i*IP_NULL_DPO_ACTION_NUM) + action);
+}
+
+always_inline const ip_null_dpo_t*
+ip_null_dpo_get (index_t indi)
+{
+ return (&ip_null_dpos[indi]);
+}
+
+static void
+ip_null_dpo_lock (dpo_id_t *dpo)
+{
+ /*
+ * not maintaining a lock count on the ip_null, they are const global and
+ * never die.
+ */
+}
+static void
+ip_null_dpo_unlock (dpo_id_t *dpo)
+{
+}
+
+static u8*
+format_ip_null_dpo (u8 *s, va_list *ap)
+{
+ index_t index = va_arg(*ap, index_t);
+ CLIB_UNUSED(u32 indent) = va_arg(*ap, u32);
+ const ip_null_dpo_t *ind;
+ dpo_proto_t proto;
+
+ ind = ip_null_dpo_get(index);
+ proto = (index < IP_NULL_DPO_ACTION_NUM ? DPO_PROTO_IP4 : DPO_PROTO_IP6);
+
+ return (format(s, "%U-null action:%s",
+ format_dpo_proto, proto,
+ ip_null_action_strings[ind->ind_action]));
+}
+
+const static dpo_vft_t ip_null_vft = {
+ .dv_lock = ip_null_dpo_lock,
+ .dv_unlock = ip_null_dpo_unlock,
+ .dv_format = format_ip_null_dpo,
+};
+
+/**
+ * @brief The per-protocol VLIB graph nodes that are assigned to a ip_null
+ * object.
+ *
+ * this means that these graph nodes are ones from which a ip_null is the
+ * parent object in the DPO-graph.
+ */
+const static char* const ip4_null_nodes[] =
+{
+ "ip4-null",
+ NULL,
+};
+const static char* const ip6_null_nodes[] =
+{
+ "ip6-null",
+ NULL,
+};
+
+const static char* const * const ip_null_nodes[DPO_PROTO_NUM] =
+{
+ [DPO_PROTO_IP4] = ip4_null_nodes,
+ [DPO_PROTO_IP6] = ip6_null_nodes,
+};
+
+typedef struct ip_null_dpo_trace_t_
+{
+ index_t ind_index;
+} ip_null_dpo_trace_t;
+
+/**
+ * @brief Exit nodes from a IP_NULL
+ */
+typedef enum ip_null_next_t_
+{
+ IP_NULL_NEXT_DROP,
+ IP_NULL_NEXT_ICMP,
+ IP_NULL_NEXT_NUM,
+} ip_null_next_t;
+
+always_inline uword
+ip_null_dpo_switch (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame,
+ u8 is_ip4)
+{
+ u32 n_left_from, next_index, *from, *to_next;
+ static f64 time_last_seed_change = -1e100;
+ static u32 hash_seeds[3];
+ static uword hash_bitmap[256 / BITS (uword)];
+ f64 time_now;
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+
+ time_now = vlib_time_now (vm);
+ if (time_now - time_last_seed_change > 1e-1)
+ {
+ uword i;
+ u32 * r = clib_random_buffer_get_data (&vm->random_buffer,
+ sizeof (hash_seeds));
+ for (i = 0; i < ARRAY_LEN (hash_seeds); i++)
+ hash_seeds[i] = r[i];
+
+ /* Mark all hash keys as been not-seen before. */
+ for (i = 0; i < ARRAY_LEN (hash_bitmap); i++)
+ hash_bitmap[i] = 0;
+
+ time_last_seed_change = time_now;
+ }
+
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 a0, b0, c0, m0, drop0;
+ vlib_buffer_t *p0;
+ u32 bi0, indi0, next0;
+ const ip_null_dpo_t *ind0;
+ uword bm0;
+
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ p0 = vlib_get_buffer (vm, bi0);
+
+ /* lookup dst + src mac */
+ indi0 = vnet_buffer (p0)->ip.adj_index[VLIB_TX];
+ ind0 = ip_null_dpo_get(indi0);
+ next0 = IP_NULL_NEXT_DROP;
+
+ /*
+ * rate limit - don't DoS the sender.
+ */
+ a0 = hash_seeds[0];
+ b0 = hash_seeds[1];
+ c0 = hash_seeds[2];
+
+ if (is_ip4)
+ {
+ ip4_header_t *ip0 = vlib_buffer_get_current (p0);
+
+ a0 ^= ip0->dst_address.data_u32;
+ b0 ^= ip0->src_address.data_u32;
+
+ hash_v3_finalize32 (a0, b0, c0);
+ }
+ else
+ {
+ ip6_header_t *ip0 = vlib_buffer_get_current (p0);
+
+ a0 ^= ip0->dst_address.as_u32[0];
+ b0 ^= ip0->src_address.as_u32[0];
+ c0 ^= ip0->src_address.as_u32[1];
+
+ hash_v3_mix32 (a0, b0, c0);
+
+ a0 ^= ip0->dst_address.as_u32[1];
+ b0 ^= ip0->src_address.as_u32[2];
+ c0 ^= ip0->src_address.as_u32[3];
+
+ hash_v3_finalize32 (a0, b0, c0);
+ }
+
+ c0 &= BITS (hash_bitmap) - 1;
+ c0 = c0 / BITS (uword);
+ m0 = (uword) 1 << (c0 % BITS (uword));
+
+ bm0 = hash_bitmap[c0];
+ drop0 = (bm0 & m0) != 0;
+
+ /* Mark it as seen. */
+ hash_bitmap[c0] = bm0 | m0;
+
+ if (PREDICT_FALSE(!drop0))
+ {
+ if (is_ip4)
+ {
+ /*
+ * There's a trade-off here. This conditinal statement
+ * versus a graph node per-condition. Given the number
+ * expect number of packets to reach a null route is 0
+ * we favour the run-time cost over the graph complexity
+ */
+ if (IP_NULL_ACTION_SEND_ICMP_UNREACH == ind0->ind_action)
+ {
+ next0 = IP_NULL_NEXT_ICMP;
+ icmp4_error_set_vnet_buffer(
+ p0,
+ ICMP4_destination_unreachable,
+ ICMP4_destination_unreachable_destination_unreachable_host,
+ 0);
+ }
+ else if (IP_NULL_ACTION_SEND_ICMP_PROHIBIT == ind0->ind_action)
+ {
+ next0 = IP_NULL_NEXT_ICMP;
+ icmp4_error_set_vnet_buffer(
+ p0,
+ ICMP4_destination_unreachable,
+ ICMP4_destination_unreachable_host_administratively_prohibited,
+ 0);
+ }
+ }
+ else
+ {
+ if (IP_NULL_ACTION_SEND_ICMP_UNREACH == ind0->ind_action)
+ {
+ next0 = IP_NULL_NEXT_ICMP;
+ icmp6_error_set_vnet_buffer(
+ p0,
+ ICMP6_destination_unreachable,
+ ICMP6_destination_unreachable_no_route_to_destination,
+ 0);
+ }
+ else if (IP_NULL_ACTION_SEND_ICMP_PROHIBIT == ind0->ind_action)
+ {
+ next0 = IP_NULL_NEXT_ICMP;
+ icmp6_error_set_vnet_buffer(
+ p0,
+ ICMP6_destination_unreachable,
+ ICMP6_destination_unreachable_destination_administratively_prohibited,
+ 0);
+ }
+ }
+ }
+
+ if (PREDICT_FALSE (p0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ ip_null_dpo_trace_t *tr = vlib_add_trace (vm, node, p0,
+ sizeof (*tr));
+ tr->ind_index = indi0;
+ }
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
+ n_left_to_next, bi0, next0);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ return frame->n_vectors;
+}
+
+static u8 *
+format_ip_null_dpo_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ ip_null_dpo_trace_t *t = va_arg (*args, ip_null_dpo_trace_t *);
+
+ s = format (s, "%U", format_ip_null_dpo, t->ind_index, 0);
+ return s;
+}
+
+static uword
+ip4_null_dpo_switch (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ return (ip_null_dpo_switch(vm, node, frame, 1));
+}
+
+/**
+ * @brief
+ */
+VLIB_REGISTER_NODE (ip4_null_dpo_node) = {
+ .function = ip4_null_dpo_switch,
+ .name = "ip4-null",
+ .vector_size = sizeof (u32),
+
+ .format_trace = format_ip_null_dpo_trace,
+ .n_next_nodes = IP_NULL_NEXT_NUM,
+ .next_nodes = {
+ [IP_NULL_NEXT_DROP] = "ip4-drop",
+ [IP_NULL_NEXT_ICMP] = "ip4-icmp-error",
+ },
+};
+
+static uword
+ip6_null_dpo_switch (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ return (ip_null_dpo_switch(vm, node, frame, 0));
+}
+
+/**
+ * @brief
+ */
+VLIB_REGISTER_NODE (ip6_null_dpo_node) = {
+ .function = ip6_null_dpo_switch,
+ .name = "ip6-null",
+ .vector_size = sizeof (u32),
+
+ .format_trace = format_ip_null_dpo_trace,
+ .n_next_nodes = IP_NULL_NEXT_NUM,
+ .next_nodes = {
+ [IP_NULL_NEXT_DROP] = "ip6-drop",
+ [IP_NULL_NEXT_ICMP] = "ip6-icmp-error",
+ },
+};
+
+void
+ip_null_dpo_module_init (void)
+{
+ dpo_register(DPO_IP_NULL, &ip_null_vft, ip_null_nodes);
+}
diff --git a/src/vnet/dpo/ip_null_dpo.h b/src/vnet/dpo/ip_null_dpo.h
new file mode 100644
index 00000000000..002a2a7016d
--- /dev/null
+++ b/src/vnet/dpo/ip_null_dpo.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * @brief
+ * The IP NULL DPO represents the rubbish bin for IP traffic. Without specifying an
+ * action (i.e. send IMCP type X to sender) it is equivalent to using a drop DPO.
+ * However, in contrast to the drop DPO any route that resovles via a NULL, is
+ * considered to 'resolved' by FIB, i.e. a IP NULL is used when the control plane
+ * is explicitly expressing the desire to drop packets. Drop DPOs are used
+ * internally by FIB when resolution is not possible.
+ *
+ * Any replies to sender are rate limited.
+ */
+
+#ifndef __IP_NULL_DPO_H__
+#define __IP_NULL_DPO_H__
+
+#include <vnet/dpo/dpo.h>
+
+/**
+ * @brief Actions to take when a packet encounters the NULL DPO
+ */
+typedef enum ip_null_dpo_action_t_
+{
+ IP_NULL_ACTION_NONE,
+ IP_NULL_ACTION_SEND_ICMP_UNREACH,
+ IP_NULL_ACTION_SEND_ICMP_PROHIBIT,
+} ip_null_dpo_action_t;
+
+#define IP_NULL_ACTIONS { \
+ [IP_NULL_ACTION_NONE] = "discard", \
+ [IP_NULL_ACTION_SEND_ICMP_UNREACH] = "send-unreachable", \
+ [IP_NULL_ACTION_SEND_ICMP_PROHIBIT] = "send-prohibited", \
+}
+
+#define IP_NULL_DPO_ACTION_NUM (IP_NULL_ACTION_SEND_ICMP_PROHIBIT+1)
+
+extern void ip_null_dpo_add_and_lock (dpo_proto_t proto,
+ ip_null_dpo_action_t action,
+ dpo_id_t *dpo);
+
+extern void ip_null_dpo_module_init(void);
+
+#endif
diff --git a/src/vnet/dpo/load_balance.c b/src/vnet/dpo/load_balance.c
new file mode 100644
index 00000000000..a244776ffb8
--- /dev/null
+++ b/src/vnet/dpo/load_balance.c
@@ -0,0 +1,993 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/ip/lookup.h>
+#include <vnet/dpo/load_balance.h>
+#include <vnet/dpo/load_balance_map.h>
+#include <vnet/dpo/drop_dpo.h>
+#include <vppinfra/math.h> /* for fabs */
+#include <vnet/adj/adj.h>
+#include <vnet/adj/adj_internal.h>
+#include <vnet/fib/fib_urpf_list.h>
+
+/*
+ * distribution error tolerance for load-balancing
+ */
+const f64 multipath_next_hop_error_tolerance = 0.1;
+
+#undef LB_DEBUG
+
+#ifdef LB_DEBUG
+#define LB_DBG(_lb, _fmt, _args...) \
+{ \
+ u8* _tmp =NULL; \
+ clib_warning("lb:[%s]:" _fmt, \
+ load_balance_format(load_balance_get_index((_lb)), \
+ 0, _tmp), \
+ ##_args); \
+ vec_free(_tmp); \
+}
+#else
+#define LB_DBG(_p, _fmt, _args...)
+#endif
+
+
+/**
+ * Pool of all DPOs. It's not static so the DP can have fast access
+ */
+load_balance_t *load_balance_pool;
+
+/**
+ * The one instance of load-balance main
+ */
+load_balance_main_t load_balance_main;
+
+f64
+load_balance_get_multipath_tolerance (void)
+{
+ return (multipath_next_hop_error_tolerance);
+}
+
+static inline index_t
+load_balance_get_index (const load_balance_t *lb)
+{
+ return (lb - load_balance_pool);
+}
+
+static inline dpo_id_t*
+load_balance_get_buckets (load_balance_t *lb)
+{
+ if (LB_HAS_INLINE_BUCKETS(lb))
+ {
+ return (lb->lb_buckets_inline);
+ }
+ else
+ {
+ return (lb->lb_buckets);
+ }
+}
+
+static load_balance_t *
+load_balance_alloc_i (void)
+{
+ load_balance_t *lb;
+
+ pool_get_aligned(load_balance_pool, lb, CLIB_CACHE_LINE_BYTES);
+ memset(lb, 0, sizeof(*lb));
+
+ lb->lb_map = INDEX_INVALID;
+ lb->lb_urpf = INDEX_INVALID;
+ vlib_validate_combined_counter(&(load_balance_main.lbm_to_counters),
+ load_balance_get_index(lb));
+ vlib_validate_combined_counter(&(load_balance_main.lbm_via_counters),
+ load_balance_get_index(lb));
+ vlib_zero_combined_counter(&(load_balance_main.lbm_to_counters),
+ load_balance_get_index(lb));
+ vlib_zero_combined_counter(&(load_balance_main.lbm_via_counters),
+ load_balance_get_index(lb));
+
+ return (lb);
+}
+
+static u8*
+load_balance_format (index_t lbi,
+ load_balance_format_flags_t flags,
+ u32 indent,
+ u8 *s)
+{
+ vlib_counter_t to, via;
+ load_balance_t *lb;
+ dpo_id_t *buckets;
+ u32 i;
+
+ lb = load_balance_get(lbi);
+ vlib_get_combined_counter(&(load_balance_main.lbm_to_counters), lbi, &to);
+ vlib_get_combined_counter(&(load_balance_main.lbm_via_counters), lbi, &via);
+ buckets = load_balance_get_buckets(lb);
+
+ s = format(s, "%U: ", format_dpo_type, DPO_LOAD_BALANCE);
+ s = format(s, "[index:%d buckets:%d ", lbi, lb->lb_n_buckets);
+ s = format(s, "uRPF:%d ", lb->lb_urpf);
+ s = format(s, "to:[%Ld:%Ld]", to.packets, to.bytes);
+ if (0 != via.packets)
+ {
+ s = format(s, " via:[%Ld:%Ld]",
+ via.packets, via.bytes);
+ }
+ s = format(s, "]");
+
+ if (INDEX_INVALID != lb->lb_map)
+ {
+ s = format(s, "\n%U%U",
+ format_white_space, indent+4,
+ format_load_balance_map, lb->lb_map, indent+4);
+ }
+ for (i = 0; i < lb->lb_n_buckets; i++)
+ {
+ s = format(s, "\n%U[%d] %U",
+ format_white_space, indent+2,
+ i,
+ format_dpo_id,
+ &buckets[i], indent+6);
+ }
+ return (s);
+}
+
+u8*
+format_load_balance (u8 * s, va_list * args)
+{
+ index_t lbi = va_arg(*args, index_t);
+ load_balance_format_flags_t flags = va_arg(*args, load_balance_format_flags_t);
+
+ return (load_balance_format(lbi, flags, 0, s));
+}
+static u8*
+format_load_balance_dpo (u8 * s, va_list * args)
+{
+ index_t lbi = va_arg(*args, index_t);
+ u32 indent = va_arg(*args, u32);
+
+ return (load_balance_format(lbi, LOAD_BALANCE_FORMAT_DETAIL, indent, s));
+}
+
+
+static load_balance_t *
+load_balance_create_i (u32 num_buckets,
+ dpo_proto_t lb_proto,
+ flow_hash_config_t fhc)
+{
+ load_balance_t *lb;
+
+ lb = load_balance_alloc_i();
+ lb->lb_hash_config = fhc;
+ lb->lb_n_buckets = num_buckets;
+ lb->lb_n_buckets_minus_1 = num_buckets-1;
+ lb->lb_proto = lb_proto;
+
+ if (!LB_HAS_INLINE_BUCKETS(lb))
+ {
+ vec_validate_aligned(lb->lb_buckets,
+ lb->lb_n_buckets - 1,
+ CLIB_CACHE_LINE_BYTES);
+ }
+
+ LB_DBG(lb, "create");
+
+ return (lb);
+}
+
+index_t
+load_balance_create (u32 n_buckets,
+ dpo_proto_t lb_proto,
+ flow_hash_config_t fhc)
+{
+ return (load_balance_get_index(load_balance_create_i(n_buckets, lb_proto, fhc)));
+}
+
+static inline void
+load_balance_set_bucket_i (load_balance_t *lb,
+ u32 bucket,
+ dpo_id_t *buckets,
+ const dpo_id_t *next)
+{
+ dpo_stack(DPO_LOAD_BALANCE, lb->lb_proto, &buckets[bucket], next);
+}
+
+void
+load_balance_set_bucket (index_t lbi,
+ u32 bucket,
+ const dpo_id_t *next)
+{
+ load_balance_t *lb;
+ dpo_id_t *buckets;
+
+ lb = load_balance_get(lbi);
+ buckets = load_balance_get_buckets(lb);
+
+ ASSERT(bucket < lb->lb_n_buckets);
+
+ load_balance_set_bucket_i(lb, bucket, buckets, next);
+}
+
+int
+load_balance_is_drop (const dpo_id_t *dpo)
+{
+ load_balance_t *lb;
+
+ if (DPO_LOAD_BALANCE != dpo->dpoi_type)
+ return (0);
+
+ lb = load_balance_get(dpo->dpoi_index);
+
+ if (1 == lb->lb_n_buckets)
+ {
+ return (dpo_is_drop(load_balance_get_bucket_i(lb, 0)));
+ }
+ return (0);
+}
+
+void
+load_balance_set_urpf (index_t lbi,
+ index_t urpf)
+{
+ load_balance_t *lb;
+ index_t old;
+
+ lb = load_balance_get(lbi);
+
+ /*
+ * packets in flight we see this change. but it's atomic, so :P
+ */
+ old = lb->lb_urpf;
+ lb->lb_urpf = urpf;
+
+ fib_urpf_list_unlock(old);
+ fib_urpf_list_lock(urpf);
+}
+
+index_t
+load_balance_get_urpf (index_t lbi)
+{
+ load_balance_t *lb;
+
+ lb = load_balance_get(lbi);
+
+ return (lb->lb_urpf);
+}
+
+const dpo_id_t *
+load_balance_get_bucket (index_t lbi,
+ u32 bucket)
+{
+ load_balance_t *lb;
+
+ lb = load_balance_get(lbi);
+
+ return (load_balance_get_bucket_i(lb, bucket));
+}
+
+static int
+next_hop_sort_by_weight (load_balance_path_t * n1,
+ load_balance_path_t * n2)
+{
+ return ((int) n1->path_weight - (int) n2->path_weight);
+}
+
+/* Given next hop vector is over-written with normalized one with sorted weights and
+ with weights corresponding to the number of adjacencies for each next hop.
+ Returns number of adjacencies in block. */
+u32
+ip_multipath_normalize_next_hops (load_balance_path_t * raw_next_hops,
+ load_balance_path_t ** normalized_next_hops,
+ u32 *sum_weight_in,
+ f64 multipath_next_hop_error_tolerance)
+{
+ load_balance_path_t * nhs;
+ uword n_nhs, n_adj, n_adj_left, i, sum_weight;
+ f64 norm, error;
+
+ n_nhs = vec_len (raw_next_hops);
+ ASSERT (n_nhs > 0);
+ if (n_nhs == 0)
+ return 0;
+
+ /* Allocate enough space for 2 copies; we'll use second copy to save original weights. */
+ nhs = *normalized_next_hops;
+ vec_validate (nhs, 2*n_nhs - 1);
+
+ /* Fast path: 1 next hop in block. */
+ n_adj = n_nhs;
+ if (n_nhs == 1)
+ {
+ nhs[0] = raw_next_hops[0];
+ nhs[0].path_weight = 1;
+ _vec_len (nhs) = 1;
+ sum_weight = 1;
+ goto done;
+ }
+
+ else if (n_nhs == 2)
+ {
+ int cmp = next_hop_sort_by_weight (&raw_next_hops[0], &raw_next_hops[1]) < 0;
+
+ /* Fast sort. */
+ nhs[0] = raw_next_hops[cmp];
+ nhs[1] = raw_next_hops[cmp ^ 1];
+
+ /* Fast path: equal cost multipath with 2 next hops. */
+ if (nhs[0].path_weight == nhs[1].path_weight)
+ {
+ nhs[0].path_weight = nhs[1].path_weight = 1;
+ _vec_len (nhs) = 2;
+ sum_weight = 2;
+ goto done;
+ }
+ }
+ else
+ {
+ clib_memcpy (nhs, raw_next_hops, n_nhs * sizeof (raw_next_hops[0]));
+ qsort (nhs, n_nhs, sizeof (nhs[0]), (void *) next_hop_sort_by_weight);
+ }
+
+ /* Find total weight to normalize weights. */
+ sum_weight = 0;
+ for (i = 0; i < n_nhs; i++)
+ sum_weight += nhs[i].path_weight;
+
+ /* In the unlikely case that all weights are given as 0, set them all to 1. */
+ if (sum_weight == 0)
+ {
+ for (i = 0; i < n_nhs; i++)
+ nhs[i].path_weight = 1;
+ sum_weight = n_nhs;
+ }
+
+ /* Save copies of all next hop weights to avoid being overwritten in loop below. */
+ for (i = 0; i < n_nhs; i++)
+ nhs[n_nhs + i].path_weight = nhs[i].path_weight;
+
+ /* Try larger and larger power of 2 sized adjacency blocks until we
+ find one where traffic flows to within 1% of specified weights. */
+ for (n_adj = max_pow2 (n_nhs); ; n_adj *= 2)
+ {
+ error = 0;
+
+ norm = n_adj / ((f64) sum_weight);
+ n_adj_left = n_adj;
+ for (i = 0; i < n_nhs; i++)
+ {
+ f64 nf = nhs[n_nhs + i].path_weight * norm; /* use saved weights */
+ word n = flt_round_nearest (nf);
+
+ n = n > n_adj_left ? n_adj_left : n;
+ n_adj_left -= n;
+ error += fabs (nf - n);
+ nhs[i].path_weight = n;
+
+ if (0 == nhs[i].path_weight)
+ {
+ /*
+ * when the weight skew is high (norm is small) and n == nf.
+ * without this correction the path with a low weight would have
+ * no represenation in the load-balanace - don't want that.
+ * If the weight skew is high so the load-balance has many buckets
+ * to allow it. pays ya money takes ya choice.
+ */
+ error = n_adj;
+ break;
+ }
+ }
+
+ nhs[0].path_weight += n_adj_left;
+
+ /* Less than 5% average error per adjacency with this size adjacency block? */
+ if (error <= multipath_next_hop_error_tolerance*n_adj)
+ {
+ /* Truncate any next hops with zero weight. */
+ _vec_len (nhs) = i;
+ break;
+ }
+ }
+
+done:
+ /* Save vector for next call. */
+ *normalized_next_hops = nhs;
+ *sum_weight_in = sum_weight;
+ return n_adj;
+}
+
+static load_balance_path_t *
+load_balance_multipath_next_hop_fixup (load_balance_path_t *nhs,
+ dpo_proto_t drop_proto)
+{
+ if (0 == vec_len(nhs))
+ {
+ load_balance_path_t *nh;
+
+ /*
+ * we need something for the load-balance. so use the drop
+ */
+ vec_add2(nhs, nh, 1);
+
+ nh->path_weight = 1;
+ dpo_copy(&nh->path_dpo, drop_dpo_get(drop_proto));
+ }
+
+ return (nhs);
+}
+
+/*
+ * Fill in adjacencies in block based on corresponding
+ * next hop adjacencies.
+ */
+static void
+load_balance_fill_buckets (load_balance_t *lb,
+ load_balance_path_t *nhs,
+ dpo_id_t *buckets,
+ u32 n_buckets)
+{
+ load_balance_path_t * nh;
+ u16 ii, bucket;
+
+ bucket = 0;
+
+ /*
+ * the next-hops have normalised weights. that means their sum is the number
+ * of buckets we need to fill.
+ */
+ vec_foreach (nh, nhs)
+ {
+ for (ii = 0; ii < nh->path_weight; ii++)
+ {
+ ASSERT(bucket < n_buckets);
+ load_balance_set_bucket_i(lb, bucket++, buckets, &nh->path_dpo);
+ }
+ }
+}
+
+static inline void
+load_balance_set_n_buckets (load_balance_t *lb,
+ u32 n_buckets)
+{
+ lb->lb_n_buckets = n_buckets;
+ lb->lb_n_buckets_minus_1 = n_buckets-1;
+}
+
+void
+load_balance_multipath_update (const dpo_id_t *dpo,
+ load_balance_path_t * raw_next_hops,
+ load_balance_flags_t flags)
+{
+ u32 sum_of_weights,n_buckets, ii;
+ load_balance_path_t * nh, * nhs;
+ index_t lbmi, old_lbmi;
+ load_balance_t *lb;
+ dpo_id_t *tmp_dpo;
+
+ nhs = NULL;
+
+ ASSERT(DPO_LOAD_BALANCE == dpo->dpoi_type);
+ lb = load_balance_get(dpo->dpoi_index);
+ raw_next_hops =
+ load_balance_multipath_next_hop_fixup(raw_next_hops,
+ lb->lb_proto);
+ n_buckets =
+ ip_multipath_normalize_next_hops(raw_next_hops,
+ &nhs,
+ &sum_of_weights,
+ multipath_next_hop_error_tolerance);
+
+ ASSERT (n_buckets >= vec_len (raw_next_hops));
+
+ /*
+ * Save the old load-balance map used, and get a new one if required.
+ */
+ old_lbmi = lb->lb_map;
+ if (flags & LOAD_BALANCE_FLAG_USES_MAP)
+ {
+ lbmi = load_balance_map_add_or_lock(n_buckets, sum_of_weights, nhs);
+ }
+ else
+ {
+ lbmi = INDEX_INVALID;
+ }
+
+ if (0 == lb->lb_n_buckets)
+ {
+ /*
+ * first time initialisation. no packets inflight, so we can write
+ * at leisure.
+ */
+ load_balance_set_n_buckets(lb, n_buckets);
+
+ if (!LB_HAS_INLINE_BUCKETS(lb))
+ vec_validate_aligned(lb->lb_buckets,
+ lb->lb_n_buckets - 1,
+ CLIB_CACHE_LINE_BYTES);
+
+ load_balance_fill_buckets(lb, nhs,
+ load_balance_get_buckets(lb),
+ n_buckets);
+ lb->lb_map = lbmi;
+ }
+ else
+ {
+ /*
+ * This is a modification of an existing load-balance.
+ * We need to ensure that packets inflight see a consistent state, that
+ * is the number of reported buckets the LB has (read from
+ * lb_n_buckets_minus_1) is not more than it actually has. So if the
+ * number of buckets is increasing, we must update the bucket array first,
+ * then the reported number. vice-versa if the number of buckets goes down.
+ */
+ if (n_buckets == lb->lb_n_buckets)
+ {
+ /*
+ * no change in the number of buckets. we can simply fill what
+ * is new over what is old.
+ */
+ load_balance_fill_buckets(lb, nhs,
+ load_balance_get_buckets(lb),
+ n_buckets);
+ lb->lb_map = lbmi;
+ }
+ else if (n_buckets > lb->lb_n_buckets)
+ {
+ /*
+ * we have more buckets. the old load-balance map (if there is one)
+ * will remain valid, i.e. mapping to indices within range, so we
+ * update it last.
+ */
+ if (n_buckets > LB_NUM_INLINE_BUCKETS &&
+ lb->lb_n_buckets <= LB_NUM_INLINE_BUCKETS)
+ {
+ /*
+ * the new increased number of buckets is crossing the threshold
+ * from the inline storage to out-line. Alloc the outline buckets
+ * first, then fixup the number. then reset the inlines.
+ */
+ ASSERT(NULL == lb->lb_buckets);
+ vec_validate_aligned(lb->lb_buckets,
+ n_buckets - 1,
+ CLIB_CACHE_LINE_BYTES);
+
+ load_balance_fill_buckets(lb, nhs,
+ lb->lb_buckets,
+ n_buckets);
+ CLIB_MEMORY_BARRIER();
+ load_balance_set_n_buckets(lb, n_buckets);
+
+ CLIB_MEMORY_BARRIER();
+
+ for (ii = 0; ii < LB_NUM_INLINE_BUCKETS; ii++)
+ {
+ dpo_reset(&lb->lb_buckets_inline[ii]);
+ }
+ }
+ else
+ {
+ if (n_buckets <= LB_NUM_INLINE_BUCKETS)
+ {
+ /*
+ * we are not crossing the threshold and it's still inline buckets.
+ * we can write the new on the old..
+ */
+ load_balance_fill_buckets(lb, nhs,
+ load_balance_get_buckets(lb),
+ n_buckets);
+ CLIB_MEMORY_BARRIER();
+ load_balance_set_n_buckets(lb, n_buckets);
+ }
+ else
+ {
+ /*
+ * we are not crossing the threshold. We need a new bucket array to
+ * hold the increased number of choices.
+ */
+ dpo_id_t *new_buckets, *old_buckets, *tmp_dpo;
+
+ new_buckets = NULL;
+ old_buckets = load_balance_get_buckets(lb);
+
+ vec_validate_aligned(new_buckets,
+ n_buckets - 1,
+ CLIB_CACHE_LINE_BYTES);
+
+ load_balance_fill_buckets(lb, nhs, new_buckets, n_buckets);
+ CLIB_MEMORY_BARRIER();
+ lb->lb_buckets = new_buckets;
+ CLIB_MEMORY_BARRIER();
+ load_balance_set_n_buckets(lb, n_buckets);
+
+ vec_foreach(tmp_dpo, old_buckets)
+ {
+ dpo_reset(tmp_dpo);
+ }
+ vec_free(old_buckets);
+ }
+ }
+
+ /*
+ * buckets fixed. ready for the MAP update.
+ */
+ lb->lb_map = lbmi;
+ }
+ else
+ {
+ /*
+ * bucket size shrinkage.
+ * Any map we have will be based on the old
+ * larger number of buckets, so will be translating to indices
+ * out of range. So the new MAP must be installed first.
+ */
+ lb->lb_map = lbmi;
+ CLIB_MEMORY_BARRIER();
+
+
+ if (n_buckets <= LB_NUM_INLINE_BUCKETS &&
+ lb->lb_n_buckets > LB_NUM_INLINE_BUCKETS)
+ {
+ /*
+ * the new decreased number of buckets is crossing the threshold
+ * from out-line storage to inline:
+ * 1 - Fill the inline buckets,
+ * 2 - fixup the number (and this point the inline buckets are
+ * used).
+ * 3 - free the outline buckets
+ */
+ load_balance_fill_buckets(lb, nhs,
+ lb->lb_buckets_inline,
+ n_buckets);
+ CLIB_MEMORY_BARRIER();
+ load_balance_set_n_buckets(lb, n_buckets);
+ CLIB_MEMORY_BARRIER();
+
+ vec_foreach(tmp_dpo, lb->lb_buckets)
+ {
+ dpo_reset(tmp_dpo);
+ }
+ vec_free(lb->lb_buckets);
+ }
+ else
+ {
+ /*
+ * not crossing the threshold.
+ * 1 - update the number to the smaller size
+ * 2 - write the new buckets
+ * 3 - reset those no longer used.
+ */
+ dpo_id_t *buckets;
+ u32 old_n_buckets;
+
+ old_n_buckets = lb->lb_n_buckets;
+ buckets = load_balance_get_buckets(lb);
+
+ load_balance_set_n_buckets(lb, n_buckets);
+ CLIB_MEMORY_BARRIER();
+
+ load_balance_fill_buckets(lb, nhs,
+ buckets,
+ n_buckets);
+
+ for (ii = old_n_buckets-n_buckets; ii < old_n_buckets; ii++)
+ {
+ dpo_reset(&buckets[ii]);
+ }
+ }
+ }
+ }
+
+ vec_foreach (nh, nhs)
+ {
+ dpo_reset(&nh->path_dpo);
+ }
+ vec_free(nhs);
+
+ load_balance_map_unlock(old_lbmi);
+}
+
+static void
+load_balance_lock (dpo_id_t *dpo)
+{
+ load_balance_t *lb;
+
+ lb = load_balance_get(dpo->dpoi_index);
+
+ lb->lb_locks++;
+}
+
+static void
+load_balance_destroy (load_balance_t *lb)
+{
+ dpo_id_t *buckets;
+ int i;
+
+ buckets = load_balance_get_buckets(lb);
+
+ for (i = 0; i < lb->lb_n_buckets; i++)
+ {
+ dpo_reset(&buckets[i]);
+ }
+
+ LB_DBG(lb, "destroy");
+ if (!LB_HAS_INLINE_BUCKETS(lb))
+ {
+ vec_free(lb->lb_buckets);
+ }
+
+ fib_urpf_list_unlock(lb->lb_urpf);
+ load_balance_map_unlock(lb->lb_map);
+
+ pool_put(load_balance_pool, lb);
+}
+
+static void
+load_balance_unlock (dpo_id_t *dpo)
+{
+ load_balance_t *lb;
+
+ lb = load_balance_get(dpo->dpoi_index);
+
+ lb->lb_locks--;
+
+ if (0 == lb->lb_locks)
+ {
+ load_balance_destroy(lb);
+ }
+}
+
+static void
+load_balance_mem_show (void)
+{
+ fib_show_memory_usage("load-balance",
+ pool_elts(load_balance_pool),
+ pool_len(load_balance_pool),
+ sizeof(load_balance_t));
+ load_balance_map_show_mem();
+}
+
+const static dpo_vft_t lb_vft = {
+ .dv_lock = load_balance_lock,
+ .dv_unlock = load_balance_unlock,
+ .dv_format = format_load_balance_dpo,
+ .dv_mem_show = load_balance_mem_show,
+};
+
+/**
+ * @brief The per-protocol VLIB graph nodes that are assigned to a load-balance
+ * object.
+ *
+ * this means that these graph nodes are ones from which a load-balance is the
+ * parent object in the DPO-graph.
+ *
+ * We do not list all the load-balance nodes, such as the *-lookup. instead
+ * we are relying on the correct use of the .sibling_of field when setting
+ * up these sibling nodes.
+ */
+const static char* const load_balance_ip4_nodes[] =
+{
+ "ip4-load-balance",
+ NULL,
+};
+const static char* const load_balance_ip6_nodes[] =
+{
+ "ip6-load-balance",
+ NULL,
+};
+const static char* const load_balance_mpls_nodes[] =
+{
+ "mpls-load-balance",
+ NULL,
+};
+const static char* const load_balance_l2_nodes[] =
+{
+ "l2-load-balance",
+ NULL,
+};
+const static char* const * const load_balance_nodes[DPO_PROTO_NUM] =
+{
+ [DPO_PROTO_IP4] = load_balance_ip4_nodes,
+ [DPO_PROTO_IP6] = load_balance_ip6_nodes,
+ [DPO_PROTO_MPLS] = load_balance_mpls_nodes,
+ [DPO_PROTO_ETHERNET] = load_balance_l2_nodes,
+};
+
+void
+load_balance_module_init (void)
+{
+ dpo_register(DPO_LOAD_BALANCE, &lb_vft, load_balance_nodes);
+
+ load_balance_map_module_init();
+}
+
+static clib_error_t *
+load_balance_show (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ index_t lbi = INDEX_INVALID;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "%d", &lbi))
+ ;
+ else
+ break;
+ }
+
+ if (INDEX_INVALID != lbi)
+ {
+ vlib_cli_output (vm, "%U", format_load_balance, lbi,
+ LOAD_BALANCE_FORMAT_DETAIL);
+ }
+ else
+ {
+ load_balance_t *lb;
+
+ pool_foreach(lb, load_balance_pool,
+ ({
+ vlib_cli_output (vm, "%U", format_load_balance,
+ load_balance_get_index(lb),
+ LOAD_BALANCE_FORMAT_NONE);
+ }));
+ }
+
+ return 0;
+}
+
+VLIB_CLI_COMMAND (load_balance_show_command, static) = {
+ .path = "show load-balance",
+ .short_help = "show load-balance [<index>]",
+ .function = load_balance_show,
+};
+
+
+always_inline u32
+ip_flow_hash (void *data)
+{
+ ip4_header_t *iph = (ip4_header_t *) data;
+
+ if ((iph->ip_version_and_header_length & 0xF0) == 0x40)
+ return ip4_compute_flow_hash (iph, IP_FLOW_HASH_DEFAULT);
+ else
+ return ip6_compute_flow_hash ((ip6_header_t *) iph, IP_FLOW_HASH_DEFAULT);
+}
+
+always_inline u64
+mac_to_u64 (u8 * m)
+{
+ return (*((u64 *) m) & 0xffffffffffff);
+}
+
+always_inline u32
+l2_flow_hash (vlib_buffer_t * b0)
+{
+ ethernet_header_t *eh;
+ u64 a, b, c;
+ uword is_ip, eh_size;
+ u16 eh_type;
+
+ eh = vlib_buffer_get_current (b0);
+ eh_type = clib_net_to_host_u16 (eh->type);
+ eh_size = ethernet_buffer_header_size (b0);
+
+ is_ip = (eh_type == ETHERNET_TYPE_IP4 || eh_type == ETHERNET_TYPE_IP6);
+
+ /* since we have 2 cache lines, use them */
+ if (is_ip)
+ a = ip_flow_hash ((u8 *) vlib_buffer_get_current (b0) + eh_size);
+ else
+ a = eh->type;
+
+ b = mac_to_u64 ((u8 *) eh->dst_address);
+ c = mac_to_u64 ((u8 *) eh->src_address);
+ hash_mix64 (a, b, c);
+
+ return (u32) c;
+}
+
+typedef struct load_balance_trace_t_
+{
+ index_t lb_index;
+} load_balance_trace_t;
+
+static uword
+l2_load_balance (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ u32 n_left_from, next_index, *from, *to_next;
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ vlib_buffer_t *b0;
+ u32 bi0, lbi0, next0;
+ const dpo_id_t *dpo0;
+ const load_balance_t *lb0;
+
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+
+ /* lookup dst + src mac */
+ lbi0 = vnet_buffer (b0)->ip.adj_index[VLIB_TX];
+ lb0 = load_balance_get(lbi0);
+
+ vnet_buffer(b0)->ip.flow_hash = l2_flow_hash(b0);
+
+ dpo0 = load_balance_get_bucket_i(lb0,
+ vnet_buffer(b0)->ip.flow_hash &
+ (lb0->lb_n_buckets_minus_1));
+
+ next0 = dpo0->dpoi_next_node;
+ vnet_buffer (b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
+
+ if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ load_balance_trace_t *tr = vlib_add_trace (vm, node, b0,
+ sizeof (*tr));
+ tr->lb_index = lbi0;
+ }
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
+ n_left_to_next, bi0, next0);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ return frame->n_vectors;
+}
+
+static u8 *
+format_load_balance_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ load_balance_trace_t *t = va_arg (*args, load_balance_trace_t *);
+
+ s = format (s, "L2-load-balance: index %d", t->lb_index);
+ return s;
+}
+
+/**
+ * @brief
+ */
+VLIB_REGISTER_NODE (l2_load_balance_node) = {
+ .function = l2_load_balance,
+ .name = "l2-load-balance",
+ .vector_size = sizeof (u32),
+
+ .format_trace = format_load_balance_trace,
+ .n_next_nodes = 1,
+ .next_nodes = {
+ [0] = "error-drop",
+ },
+};
diff --git a/src/vnet/dpo/load_balance.h b/src/vnet/dpo/load_balance.h
new file mode 100644
index 00000000000..dc6485e688a
--- /dev/null
+++ b/src/vnet/dpo/load_balance.h
@@ -0,0 +1,211 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * \brief
+ * The load-balance object represents an ECMP choice. The buckets of a load
+ * balance object point to the sub-graph after the choice is made.
+ * THe load-balance object is also object type returned from a FIB table lookup.
+ * As such it needs to represent the case where there is only one coice. It may
+ * seem like overkill to use a load-balance object in this case, but the reason
+ * is for performance. If the load-balance object were not the result of the FIB
+ * lookup, then some other object would be. The case where there was ECMP
+ * this other object would need a load-balance as a parent and hence just add
+ * an unnecessary indirection.
+ *
+ * It is also the object in the DP that represents a via-fib-entry in a recursive
+ * route.
+ *
+ */
+
+#ifndef __LOAD_BALANCE_H__
+#define __LOAD_BALANCE_H__
+
+#include <vlib/vlib.h>
+#include <vnet/ip/lookup.h>
+#include <vnet/dpo/dpo.h>
+#include <vnet/fib/fib_types.h>
+
+/**
+ * Load-balance main
+ */
+typedef struct load_balance_main_t_
+{
+ vlib_combined_counter_main_t lbm_to_counters;
+ vlib_combined_counter_main_t lbm_via_counters;
+} load_balance_main_t;
+
+extern load_balance_main_t load_balance_main;
+
+/**
+ * The number of buckets that a load-balance object can have and still
+ * fit in one cache-line
+ */
+#define LB_NUM_INLINE_BUCKETS 4
+
+/**
+ * @brief One path from an [EU]CMP set that the client wants to add to a
+ * load-balance object
+ */
+typedef struct load_balance_path_t_ {
+ /**
+ * ID of the Data-path object.
+ */
+ dpo_id_t path_dpo;
+
+ /**
+ * The index of the FIB path
+ */
+ fib_node_index_t path_index;
+
+ /**
+ * weight for the path.
+ */
+ u32 path_weight;
+} load_balance_path_t;
+
+/**
+ * The FIB DPO provieds;
+ * - load-balancing over the next DPOs in the chain/graph
+ * - per-route counters
+ */
+typedef struct load_balance_t_ {
+ /**
+ * number of buckets in the load-balance. always a power of 2.
+ */
+ u16 lb_n_buckets;
+ /**
+ * number of buckets in the load-balance - 1. used in the switch path
+ * as part of the hash calculation.
+ */
+ u16 lb_n_buckets_minus_1;
+
+ /**
+ * The protocol of packets that traverse this LB.
+ * need in combination with the flow hash config to determine how to hash.
+ * u8.
+ */
+ dpo_proto_t lb_proto;
+
+ /**
+ * The number of locks, which is approximately the number of users,
+ * of this load-balance.
+ * Load-balance objects of via-entries are heavily shared by recursives,
+ * so the lock count is a u32.
+ */
+ u32 lb_locks;
+
+ /**
+ * index of the load-balance map, INVALID if this LB does not use one
+ */
+ index_t lb_map;
+
+ /**
+ * This is the index of the uRPF list for this LB
+ */
+ index_t lb_urpf;
+
+ /**
+ * the hash config to use when selecting a bucket. this is a u16
+ */
+ flow_hash_config_t lb_hash_config;
+
+ /**
+ * Vector of buckets containing the next DPOs, sized as lbo_num
+ */
+ dpo_id_t *lb_buckets;
+
+ /**
+ * The rest of the cache line is used for buckets. In the common case
+ * where there there are less than 4 buckets, then the buckets are
+ * on the same cachlie and we save ourselves a pointer dereferance in
+ * the data-path.
+ */
+ dpo_id_t lb_buckets_inline[LB_NUM_INLINE_BUCKETS];
+} load_balance_t;
+
+STATIC_ASSERT(sizeof(load_balance_t) <= CLIB_CACHE_LINE_BYTES,
+ "A load_balance object size exceeds one cachline");
+
+/**
+ * Flags controlling load-balance formatting/display
+ */
+typedef enum load_balance_format_flags_t_ {
+ LOAD_BALANCE_FORMAT_NONE,
+ LOAD_BALANCE_FORMAT_DETAIL = (1 << 0),
+} load_balance_format_flags_t;
+
+/**
+ * Flags controlling load-balance creation and modification
+ */
+typedef enum load_balance_flags_t_ {
+ LOAD_BALANCE_FLAG_NONE = 0,
+ LOAD_BALANCE_FLAG_USES_MAP = (1 << 0),
+} load_balance_flags_t;
+
+extern index_t load_balance_create(u32 num_buckets,
+ dpo_proto_t lb_proto,
+ flow_hash_config_t fhc);
+extern void load_balance_multipath_update(
+ const dpo_id_t *dpo,
+ load_balance_path_t * raw_next_hops,
+ load_balance_flags_t flags);
+
+extern void load_balance_set_bucket(index_t lbi,
+ u32 bucket,
+ const dpo_id_t *next);
+extern void load_balance_set_urpf(index_t lbi,
+ index_t urpf);
+extern index_t load_balance_get_urpf(index_t lbi);
+
+extern u8* format_load_balance(u8 * s, va_list * args);
+
+extern const dpo_id_t *load_balance_get_bucket(index_t lbi,
+ u32 bucket);
+extern int load_balance_is_drop(const dpo_id_t *dpo);
+
+extern f64 load_balance_get_multipath_tolerance(void);
+
+/**
+ * The encapsulation breakages are for fast DP access
+ */
+extern load_balance_t *load_balance_pool;
+static inline load_balance_t*
+load_balance_get (index_t lbi)
+{
+ return (pool_elt_at_index(load_balance_pool, lbi));
+}
+
+#define LB_HAS_INLINE_BUCKETS(_lb) \
+ ((_lb)->lb_n_buckets <= LB_NUM_INLINE_BUCKETS)
+
+static inline const dpo_id_t *
+load_balance_get_bucket_i (const load_balance_t *lb,
+ u32 bucket)
+{
+ ASSERT(bucket < lb->lb_n_buckets);
+
+ if (PREDICT_TRUE(LB_HAS_INLINE_BUCKETS(lb)))
+ {
+ return (&lb->lb_buckets_inline[bucket]);
+ }
+ else
+ {
+ return (&lb->lb_buckets[bucket]);
+ }
+}
+
+extern void load_balance_module_init(void);
+
+#endif
diff --git a/src/vnet/dpo/load_balance_map.c b/src/vnet/dpo/load_balance_map.c
new file mode 100644
index 00000000000..70ce1bf7c39
--- /dev/null
+++ b/src/vnet/dpo/load_balance_map.c
@@ -0,0 +1,575 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * @brief
+ */
+#include <vnet/fib/fib_path.h>
+#include <vnet/fib/fib_node_list.h>
+#include <vnet/dpo/load_balance_map.h>
+#include <vnet/dpo/load_balance.h>
+
+/**
+ * A hash-table of load-balance maps by path index.
+ * this provides the fast lookup of the LB map when a path goes down
+ */
+static uword *lb_maps_by_path_index;
+
+/**
+ * A hash-table of load-balance maps by set of paths.
+ * This provides the LB map sharing.
+ * LB maps do not necessarily use all the paths in the list, since
+ * the entry that is requesting the map, may not have an out-going
+ * label for each of the paths.
+ */
+static uword *load_balance_map_db;
+
+typedef enum load_balance_map_path_flags_t_
+{
+ LOAD_BALANCE_MAP_PATH_UP = (1 << 0),
+ LOAD_BALANCE_MAP_PATH_USABLE = (1 << 1),
+} __attribute__ ((packed)) load_balance_map_path_flags_t;
+
+typedef struct load_balance_map_path_t_ {
+ /**
+ * Index of the path
+ */
+ fib_node_index_t lbmp_index;
+
+ /**
+ * Sibling Index in the list of all maps with this path index
+ */
+ fib_node_index_t lbmp_sibling;
+
+ /**
+ * the normalised wegiht of the path
+ */
+ u32 lbmp_weight;
+
+ /**
+ * The sate of the path
+ */
+ load_balance_map_path_flags_t lbmp_flags;
+} load_balance_map_path_t;
+
+/**
+ * The global pool of LB maps
+ */
+load_balance_map_t *load_balance_map_pool;
+
+/*
+ * Debug macro
+ */
+#ifdef FIB_DEBUG
+#define LOAD_BALANCE_MAP_DBG(_pl, _fmt, _args...) \
+ { \
+ clib_warning("lbm: FIXME" _fmt, \
+ ##_args); \
+ }
+#else
+#define LOAD_BALANCE_MAP_DBG(_pl, _fmt, _args...)
+#endif
+
+static index_t
+load_balance_map_get_index (load_balance_map_t *lbm)
+{
+ return (lbm - load_balance_map_pool);
+}
+
+u8*
+format_load_balance_map (u8 *s, va_list ap)
+{
+ index_t lbmi = va_arg(ap, index_t);
+ u32 indent = va_arg(ap, u32);
+ load_balance_map_t *lbm;
+ u32 n_buckets, ii;
+
+ lbm = load_balance_map_get(lbmi);
+ n_buckets = vec_len(lbm->lbm_buckets);
+
+ s = format(s, "load-balance-map: index:%d buckets:%d", lbmi, n_buckets);
+ s = format(s, "\n%U index:", format_white_space, indent+2);
+ for (ii = 0; ii < n_buckets; ii++)
+ {
+ s = format(s, "%5d", ii);
+ }
+ s = format(s, "\n%U map:", format_white_space, indent+2);
+ for (ii = 0; ii < n_buckets; ii++)
+ {
+ s = format(s, "%5d", lbm->lbm_buckets[ii]);
+ }
+
+ return (s);
+}
+
+
+static uword
+load_balance_map_hash (load_balance_map_t *lbm)
+{
+ u32 old_lbm_hash, new_lbm_hash, hash;
+ load_balance_map_path_t *lb_path;
+
+ new_lbm_hash = old_lbm_hash = vec_len(lbm->lbm_paths);
+
+ vec_foreach (lb_path, lbm->lbm_paths)
+ {
+ hash = lb_path->lbmp_index;
+ hash_mix32(hash, old_lbm_hash, new_lbm_hash);
+ }
+
+ return (new_lbm_hash);
+}
+
+always_inline uword
+load_balance_map_db_hash_key_from_index (uword index)
+{
+ return 1 + 2*index;
+}
+
+always_inline uword
+load_balance_map_db_hash_key_is_index (uword key)
+{
+ return key & 1;
+}
+
+always_inline uword
+load_balance_map_db_hash_key_2_index (uword key)
+{
+ ASSERT (load_balance_map_db_hash_key_is_index (key));
+ return key / 2;
+}
+
+static load_balance_map_t*
+load_balance_map_db_get_from_hash_key (uword key)
+{
+ load_balance_map_t *lbm;
+
+ if (load_balance_map_db_hash_key_is_index (key))
+ {
+ index_t lbm_index;
+
+ lbm_index = load_balance_map_db_hash_key_2_index(key);
+ lbm = load_balance_map_get(lbm_index);
+ }
+ else
+ {
+ lbm = uword_to_pointer (key, load_balance_map_t *);
+ }
+
+ return (lbm);
+}
+
+static uword
+load_balance_map_db_hash_key_sum (hash_t * h,
+ uword key)
+{
+ load_balance_map_t *lbm;
+
+ lbm = load_balance_map_db_get_from_hash_key(key);
+
+ return (load_balance_map_hash(lbm));
+}
+
+static uword
+load_balance_map_db_hash_key_equal (hash_t * h,
+ uword key1,
+ uword key2)
+{
+ load_balance_map_t *lbm1, *lbm2;
+
+ lbm1 = load_balance_map_db_get_from_hash_key(key1);
+ lbm2 = load_balance_map_db_get_from_hash_key(key2);
+
+ return (load_balance_map_hash(lbm1) ==
+ load_balance_map_hash(lbm2));
+}
+
+static index_t
+load_balance_map_db_find (load_balance_map_t *lbm)
+{
+ uword *p;
+
+ p = hash_get(load_balance_map_db, lbm);
+
+ if (NULL != p)
+ {
+ return p[0];
+ }
+
+ return (FIB_NODE_INDEX_INVALID);
+}
+
+static void
+load_balance_map_db_insert (load_balance_map_t *lbm)
+{
+ load_balance_map_path_t *lbmp;
+ fib_node_list_t list;
+ uword *p;
+
+ ASSERT(FIB_NODE_INDEX_INVALID == load_balance_map_db_find(lbm));
+
+ /*
+ * insert into the DB based on the set of paths.
+ */
+ hash_set (load_balance_map_db,
+ load_balance_map_db_hash_key_from_index(
+ load_balance_map_get_index(lbm)),
+ load_balance_map_get_index(lbm));
+
+ /*
+ * insert into each per-path list.
+ */
+ vec_foreach(lbmp, lbm->lbm_paths)
+ {
+ p = hash_get(lb_maps_by_path_index, lbmp->lbmp_index);
+
+ if (NULL == p)
+ {
+ list = fib_node_list_create();
+ hash_set(lb_maps_by_path_index, lbmp->lbmp_index, list);
+ }
+ else
+ {
+ list = p[0];
+ }
+
+ lbmp->lbmp_sibling =
+ fib_node_list_push_front(list,
+ 0, FIB_NODE_TYPE_FIRST,
+ load_balance_map_get_index(lbm));
+ }
+
+ LOAD_BALANCE_MAP_DBG(lbm, "DB-inserted");
+}
+
+static void
+load_balance_map_db_remove (load_balance_map_t *lbm)
+{
+ load_balance_map_path_t *lbmp;
+ uword *p;
+
+ ASSERT(FIB_NODE_INDEX_INVALID != load_balance_map_db_find(lbm));
+
+ hash_unset(load_balance_map_db,
+ load_balance_map_db_hash_key_from_index(
+ load_balance_map_get_index(lbm)));
+
+ /*
+ * remove from each per-path list.
+ */
+ vec_foreach(lbmp, lbm->lbm_paths)
+ {
+ p = hash_get(lb_maps_by_path_index, lbmp->lbmp_index);
+
+ ASSERT(NULL != p);
+
+ fib_node_list_remove(p[0], lbmp->lbmp_sibling);
+ }
+
+ LOAD_BALANCE_MAP_DBG(lbm, "DB-removed");
+}
+
+/**
+ * @brief from the paths that are usable, fill the Map.
+ */
+static void
+load_balance_map_fill (load_balance_map_t *lbm)
+{
+ load_balance_map_path_t *lbmp;
+ u32 n_buckets, bucket, ii, jj;
+ u16 *tmp_buckets;
+
+ tmp_buckets = NULL;
+ n_buckets = vec_len(lbm->lbm_buckets);
+
+ /*
+ * run throught the set of paths once, and build a vector of the
+ * indices that are usable. we do this is a scratch space, since we
+ * need to refer to it multiple times as we build the real buckets.
+ */
+ vec_validate(tmp_buckets, n_buckets-1);
+
+ bucket = jj = 0;
+ vec_foreach (lbmp, lbm->lbm_paths)
+ {
+ if (fib_path_is_resolved(lbmp->lbmp_index))
+ {
+ for (ii = 0; ii < lbmp->lbmp_weight; ii++)
+ {
+ tmp_buckets[jj++] = bucket++;
+ }
+ }
+ else
+ {
+ bucket += lbmp->lbmp_weight;
+ }
+ }
+ _vec_len(tmp_buckets) = jj;
+
+ /*
+ * If the number of temporaries written is as many as we need, implying
+ * all paths were up, then we can simply copy the scratch area over the
+ * actual buckets' memory
+ */
+ if (jj == n_buckets)
+ {
+ memcpy(lbm->lbm_buckets,
+ tmp_buckets,
+ sizeof(lbm->lbm_buckets[0]) * n_buckets);
+ }
+ else
+ {
+ /*
+ * one or more paths are down.
+ */
+ if (0 == vec_len(tmp_buckets))
+ {
+ /*
+ * if the scratch area is empty, then no paths are usable.
+ * they will all drop. so use them all, lest we account drops
+ * against only one.
+ */
+ for (bucket = 0; bucket < n_buckets; bucket++)
+ {
+ lbm->lbm_buckets[bucket] = bucket;
+ }
+ }
+ else
+ {
+ bucket = jj = 0;
+ vec_foreach (lbmp, lbm->lbm_paths)
+ {
+ if (fib_path_is_resolved(lbmp->lbmp_index))
+ {
+ for (ii = 0; ii < lbmp->lbmp_weight; ii++)
+ {
+ lbm->lbm_buckets[bucket] = bucket;
+ bucket++;
+ }
+ }
+ else
+ {
+ /*
+ * path is unusable
+ * cycle through the scratch space selecting a index.
+ * this means we load balance, in the intended ratio,
+ * over the paths that are still usable.
+ */
+ for (ii = 0; ii < lbmp->lbmp_weight; ii++)
+ {
+ lbm->lbm_buckets[bucket] = tmp_buckets[jj];
+ jj = (jj + 1) % vec_len(tmp_buckets);
+ bucket++;
+ }
+ }
+ }
+ }
+ }
+
+ vec_free(tmp_buckets);
+}
+
+static load_balance_map_t*
+load_balance_map_alloc (const load_balance_path_t *paths)
+{
+ load_balance_map_t *lbm;
+ u32 ii;
+
+ pool_get_aligned(load_balance_map_pool, lbm, CLIB_CACHE_LINE_BYTES);
+ memset(lbm, 0, sizeof(*lbm));
+
+ vec_validate(lbm->lbm_paths, vec_len(paths)-1);
+
+ vec_foreach_index(ii, paths)
+ {
+ lbm->lbm_paths[ii].lbmp_index = paths[ii].path_index;
+ lbm->lbm_paths[ii].lbmp_weight = paths[ii].path_weight;
+ }
+
+ return (lbm);
+}
+
+static load_balance_map_t *
+load_balance_map_init (load_balance_map_t *lbm,
+ u32 n_buckets,
+ u32 sum_of_weights)
+{
+ lbm->lbm_sum_of_norm_weights = sum_of_weights;
+ vec_validate(lbm->lbm_buckets, n_buckets-1);
+
+ load_balance_map_db_insert(lbm);
+
+ load_balance_map_fill(lbm);
+
+ return (lbm);
+}
+
+index_t
+load_balance_map_add_or_lock (u32 n_buckets,
+ u32 sum_of_weights,
+ const load_balance_path_t *paths)
+{
+ load_balance_map_t *tmp, *lbm;
+ index_t lbmi;
+
+ tmp = load_balance_map_alloc(paths);
+
+ lbmi = load_balance_map_db_find(tmp);
+
+ if (INDEX_INVALID == lbmi)
+ {
+ lbm = load_balance_map_init(tmp, n_buckets, sum_of_weights);
+ }
+ else
+ {
+ lbm = load_balance_map_get(lbmi);
+ }
+
+ lbm->lbm_locks++;
+
+ return (load_balance_map_get_index(lbm));
+}
+
+void
+load_balance_map_lock (index_t lbmi)
+{
+ load_balance_map_t *lbm;
+
+ lbm = load_balance_map_get(lbmi);
+
+ lbm->lbm_locks++;
+}
+
+void
+load_balance_map_unlock (index_t lbmi)
+{
+ load_balance_map_t *lbm;
+
+ if (INDEX_INVALID == lbmi)
+ {
+ return;
+ }
+
+ lbm = load_balance_map_get(lbmi);
+
+ lbm->lbm_locks--;
+
+ if (0 == lbm->lbm_locks)
+ {
+ load_balance_map_db_remove(lbm);
+ vec_free(lbm->lbm_paths);
+ vec_free(lbm->lbm_buckets);
+ pool_put(load_balance_map_pool, lbm);
+ }
+}
+
+static int
+load_balance_map_path_state_change_walk (fib_node_ptr_t *fptr,
+ void *ctx)
+{
+ load_balance_map_t *lbm;
+
+ lbm = load_balance_map_get(fptr->fnp_index);
+
+ load_balance_map_fill(lbm);
+
+ return (!0);
+}
+
+/**
+ * @brief the state of a path has changed (it has no doubt gone down).
+ * This is the trigger to perform a PIC edge cutover and update the maps
+ * to exclude this path.
+ */
+void
+load_balance_map_path_state_change (fib_node_index_t path_index)
+{
+ uword *p;
+
+ /*
+ * re-stripe the buckets for each affect MAP
+ */
+ p = hash_get(lb_maps_by_path_index, path_index);
+
+ if (NULL == p)
+ return;
+
+ fib_node_list_walk(p[0], load_balance_map_path_state_change_walk, NULL);
+}
+
+/**
+ * @brief Make/add a new or lock an existing Load-balance map
+ */
+void
+load_balance_map_module_init (void)
+{
+ load_balance_map_db =
+ hash_create2 (/* elts */ 0,
+ /* user */ 0,
+ /* value_bytes */ sizeof (index_t),
+ load_balance_map_db_hash_key_sum,
+ load_balance_map_db_hash_key_equal,
+ /* format pair/arg */
+ 0, 0);
+
+ lb_maps_by_path_index = hash_create(0, sizeof(fib_node_list_t));
+}
+
+void
+load_balance_map_show_mem (void)
+{
+ fib_show_memory_usage("Load-Balance Map",
+ pool_elts(load_balance_map_pool),
+ pool_len(load_balance_map_pool),
+ sizeof(load_balance_map_t));
+}
+
+static clib_error_t *
+load_balance_map_show (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ index_t lbmi = INDEX_INVALID;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "%d", &lbmi))
+ ;
+ else
+ break;
+ }
+
+ if (INDEX_INVALID != lbmi)
+ {
+ vlib_cli_output (vm, "%U", format_load_balance_map, lbmi, 0);
+ }
+ else
+ {
+ load_balance_map_t *lbm;
+
+ pool_foreach(lbm, load_balance_map_pool,
+ ({
+ vlib_cli_output (vm, "%U", format_load_balance_map,
+ load_balance_map_get_index(lbm), 0);
+ }));
+ }
+
+ return 0;
+}
+
+VLIB_CLI_COMMAND (load_balance_map_show_command, static) = {
+ .path = "show load-balance-map",
+ .short_help = "show load-balance-map [<index>]",
+ .function = load_balance_map_show,
+};
diff --git a/src/vnet/dpo/load_balance_map.h b/src/vnet/dpo/load_balance_map.h
new file mode 100644
index 00000000000..454bf4b3763
--- /dev/null
+++ b/src/vnet/dpo/load_balance_map.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * @brief
+ */
+
+#ifndef __LOAD_BALANCE_MAP_H__
+#define __LOAD_BALANCE_MAP_H__
+
+#include <vlib/vlib.h>
+#include <vnet/fib/fib_types.h>
+#include <vnet/dpo/load_balance.h>
+
+struct load_balance_map_path_t_;
+
+/**
+ */
+typedef struct load_balance_map_t_ {
+ /**
+ * The buckets of the map that provide the index to index translation.
+ * In the first cacheline.
+ */
+ u16 *lbm_buckets;
+
+ /**
+ * the vector of paths this MAP represents
+ */
+ struct load_balance_map_path_t_ *lbm_paths;
+
+ /**
+ * the sum of the normalised weights. cache for convenience
+ */
+ u32 lbm_sum_of_norm_weights;
+
+ /**
+ * Number of locks. Maps are shared by a large number of recrusvie fib_entry_ts
+ */
+ u32 lbm_locks;
+} load_balance_map_t;
+
+extern index_t load_balance_map_add_or_lock(u32 n_buckets,
+ u32 sum_of_weights,
+ const load_balance_path_t *norm_paths);
+
+extern void load_balance_map_lock(index_t lmbi);
+extern void load_balance_map_unlock(index_t lbmi);
+
+extern void load_balance_map_path_state_change(fib_node_index_t path_index);
+
+extern u8* format_load_balance_map(u8 *s, va_list ap);
+extern void load_balance_map_show_mem(void);
+
+/**
+ * The encapsulation breakages are for fast DP access
+ */
+extern load_balance_map_t *load_balance_map_pool;
+
+static inline load_balance_map_t*
+load_balance_map_get (index_t lbmi)
+{
+ return (pool_elt_at_index(load_balance_map_pool, lbmi));
+}
+
+
+extern void load_balance_map_module_init(void);
+
+#endif
diff --git a/src/vnet/dpo/lookup_dpo.c b/src/vnet/dpo/lookup_dpo.c
new file mode 100644
index 00000000000..96fedd27ce9
--- /dev/null
+++ b/src/vnet/dpo/lookup_dpo.c
@@ -0,0 +1,1185 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/ip/ip.h>
+#include <vnet/dpo/lookup_dpo.h>
+#include <vnet/dpo/load_balance.h>
+#include <vnet/mpls/mpls.h>
+#include <vnet/fib/fib_table.h>
+#include <vnet/fib/ip4_fib.h>
+#include <vnet/fib/ip6_fib.h>
+#include <vnet/fib/mpls_fib.h>
+
+static const char *const lookup_input_names[] = LOOKUP_INPUTS;
+
+/**
+ * @brief Enumeration of the lookup subtypes
+ */
+typedef enum lookup_sub_type_t_
+{
+ LOOKUP_SUB_TYPE_SRC,
+ LOOKUP_SUB_TYPE_DST,
+ LOOKUP_SUB_TYPE_DST_TABLE_FROM_INTERFACE,
+} lookup_sub_type_t;
+#define LOOKUP_SUB_TYPE_NUM (LOOKUP_SUB_TYPE_DST_TABLE_FROM_INTERFACE+1)
+
+#define FOR_EACH_LOOKUP_SUB_TYPE(_st) \
+ for (_st = LOOKUP_SUB_TYPE_IP4_SRC; _st < LOOKUP_SUB_TYPE_NUM; _st++)
+
+/**
+ * @brief pool of all MPLS Label DPOs
+ */
+lookup_dpo_t *lookup_dpo_pool;
+
+/**
+ * @brief An array of registered DPO type values for the sub-types
+ */
+static dpo_type_t lookup_dpo_sub_types[LOOKUP_SUB_TYPE_NUM];
+
+static lookup_dpo_t *
+lookup_dpo_alloc (void)
+{
+ lookup_dpo_t *lkd;
+
+ pool_get_aligned(lookup_dpo_pool, lkd, CLIB_CACHE_LINE_BYTES);
+
+ return (lkd);
+}
+
+static index_t
+lookup_dpo_get_index (lookup_dpo_t *lkd)
+{
+ return (lkd - lookup_dpo_pool);
+}
+
+static void
+lookup_dpo_add_or_lock_i (fib_node_index_t fib_index,
+ dpo_proto_t proto,
+ lookup_input_t input,
+ lookup_table_t table_config,
+ dpo_id_t *dpo)
+{
+ lookup_dpo_t *lkd;
+ dpo_type_t type;
+
+ lkd = lookup_dpo_alloc();
+ lkd->lkd_fib_index = fib_index;
+ lkd->lkd_proto = proto;
+ lkd->lkd_input = input;
+ lkd->lkd_table = table_config;
+
+ /*
+ * use the input type to select the lookup sub-type
+ */
+ type = 0;
+
+ switch (input)
+ {
+ case LOOKUP_INPUT_SRC_ADDR:
+ type = lookup_dpo_sub_types[LOOKUP_SUB_TYPE_SRC];
+ break;
+ case LOOKUP_INPUT_DST_ADDR:
+ switch (table_config)
+ {
+ case LOOKUP_TABLE_FROM_INPUT_INTERFACE:
+ type = lookup_dpo_sub_types[LOOKUP_SUB_TYPE_DST_TABLE_FROM_INTERFACE];
+ break;
+ case LOOKUP_TABLE_FROM_CONFIG:
+ type = lookup_dpo_sub_types[LOOKUP_SUB_TYPE_DST];
+ break;
+ }
+ }
+
+ if (0 == type)
+ {
+ dpo_reset(dpo);
+ }
+ else
+ {
+ dpo_set(dpo, type, proto, lookup_dpo_get_index(lkd));
+ }
+}
+
+void
+lookup_dpo_add_or_lock_w_fib_index (fib_node_index_t fib_index,
+ dpo_proto_t proto,
+ lookup_input_t input,
+ lookup_table_t table_config,
+ dpo_id_t *dpo)
+{
+ if (LOOKUP_TABLE_FROM_CONFIG == table_config)
+ {
+ fib_table_lock(fib_index, dpo_proto_to_fib(proto));
+ }
+ lookup_dpo_add_or_lock_i(fib_index, proto, input, table_config, dpo);
+}
+
+void
+lookup_dpo_add_or_lock_w_table_id (u32 table_id,
+ dpo_proto_t proto,
+ lookup_input_t input,
+ lookup_table_t table_config,
+ dpo_id_t *dpo)
+{
+ fib_node_index_t fib_index = FIB_NODE_INDEX_INVALID;
+
+ if (LOOKUP_TABLE_FROM_CONFIG == table_config)
+ {
+ fib_index =
+ fib_table_find_or_create_and_lock(dpo_proto_to_fib(proto),
+ table_id);
+ }
+
+ ASSERT(FIB_NODE_INDEX_INVALID != fib_index);
+ lookup_dpo_add_or_lock_i(fib_index, proto, input, table_config, dpo);
+}
+
+u8*
+format_lookup_dpo (u8 *s, va_list *args)
+{
+ index_t index = va_arg (*args, index_t);
+ lookup_dpo_t *lkd;
+
+ lkd = lookup_dpo_get(index);
+
+ if (LOOKUP_TABLE_FROM_INPUT_INTERFACE == lkd->lkd_table)
+ {
+ s = format(s, "%s lookup in interface's %U table",
+ lookup_input_names[lkd->lkd_input],
+ format_dpo_proto, lkd->lkd_proto);
+ }
+ else
+ {
+ s = format(s, "%s lookup in %U",
+ lookup_input_names[lkd->lkd_input],
+ format_fib_table_name, lkd->lkd_fib_index,
+ dpo_proto_to_fib(lkd->lkd_proto));
+ }
+ return (s);
+}
+
+static void
+lookup_dpo_lock (dpo_id_t *dpo)
+{
+ lookup_dpo_t *lkd;
+
+ lkd = lookup_dpo_get(dpo->dpoi_index);
+
+ lkd->lkd_locks++;
+}
+
+static void
+lookup_dpo_unlock (dpo_id_t *dpo)
+{
+ lookup_dpo_t *lkd;
+
+ lkd = lookup_dpo_get(dpo->dpoi_index);
+
+ lkd->lkd_locks--;
+
+ if (0 == lkd->lkd_locks)
+ {
+ if (LOOKUP_TABLE_FROM_CONFIG == lkd->lkd_table)
+ {
+ fib_table_unlock(lkd->lkd_fib_index,
+ dpo_proto_to_fib(lkd->lkd_proto));
+ }
+ pool_put(lookup_dpo_pool, lkd);
+ }
+}
+
+always_inline void
+ip4_src_fib_lookup_one (u32 src_fib_index0,
+ const ip4_address_t * addr0,
+ u32 * src_adj_index0)
+{
+ ip4_fib_mtrie_leaf_t leaf0, leaf1;
+ ip4_fib_mtrie_t * mtrie0;
+
+ mtrie0 = &ip4_fib_get (src_fib_index0)->mtrie;
+
+ leaf0 = leaf1 = IP4_FIB_MTRIE_LEAF_ROOT;
+ leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, addr0, 0);
+ leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, addr0, 1);
+ leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, addr0, 2);
+ leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, addr0, 3);
+
+ /* Handle default route. */
+ leaf0 = (leaf0 == IP4_FIB_MTRIE_LEAF_EMPTY ? mtrie0->default_leaf : leaf0);
+ src_adj_index0[0] = ip4_fib_mtrie_leaf_get_adj_index (leaf0);
+}
+
+always_inline void
+ip4_src_fib_lookup_two (u32 src_fib_index0,
+ u32 src_fib_index1,
+ const ip4_address_t * addr0,
+ const ip4_address_t * addr1,
+ u32 * src_adj_index0,
+ u32 * src_adj_index1)
+{
+ ip4_fib_mtrie_leaf_t leaf0, leaf1;
+ ip4_fib_mtrie_t * mtrie0, * mtrie1;
+
+ mtrie0 = &ip4_fib_get (src_fib_index0)->mtrie;
+ mtrie1 = &ip4_fib_get (src_fib_index1)->mtrie;
+
+ leaf0 = leaf1 = IP4_FIB_MTRIE_LEAF_ROOT;
+
+ leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, addr0, 0);
+ leaf1 = ip4_fib_mtrie_lookup_step (mtrie1, leaf1, addr1, 0);
+
+ leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, addr0, 1);
+ leaf1 = ip4_fib_mtrie_lookup_step (mtrie1, leaf1, addr1, 1);
+
+ leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, addr0, 2);
+ leaf1 = ip4_fib_mtrie_lookup_step (mtrie1, leaf1, addr1, 2);
+
+ leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, addr0, 3);
+ leaf1 = ip4_fib_mtrie_lookup_step (mtrie1, leaf1, addr1, 3);
+
+ /* Handle default route. */
+ leaf0 = (leaf0 == IP4_FIB_MTRIE_LEAF_EMPTY ? mtrie0->default_leaf : leaf0);
+ leaf1 = (leaf1 == IP4_FIB_MTRIE_LEAF_EMPTY ? mtrie1->default_leaf : leaf1);
+ src_adj_index0[0] = ip4_fib_mtrie_leaf_get_adj_index (leaf0);
+ src_adj_index1[0] = ip4_fib_mtrie_leaf_get_adj_index (leaf1);
+}
+
+/**
+ * @brief Lookup trace data
+ */
+typedef struct lookup_trace_t_
+{
+ union {
+ ip46_address_t addr;
+ mpls_unicast_header_t hdr;
+ };
+ fib_node_index_t fib_index;
+ index_t lbi;
+} lookup_trace_t;
+
+
+always_inline uword
+lookup_dpo_ip4_inline (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame,
+ int input_src_addr,
+ int table_from_interface)
+{
+ u32 n_left_from, next_index, * from, * to_next;
+ u32 cpu_index = os_get_cpu_number();
+ vlib_combined_counter_main_t * cm = &load_balance_main.lbm_to_counters;
+
+ from = vlib_frame_vector_args (from_frame);
+ n_left_from = from_frame->n_vectors;
+
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame(vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from >= 4 && n_left_to_next > 2)
+ {
+ u32 bi0, lkdi0, lbi0, fib_index0, next0, hash_c0;
+ flow_hash_config_t flow_hash_config0;
+ const ip4_address_t *input_addr0;
+ const load_balance_t *lb0;
+ const lookup_dpo_t * lkd0;
+ const ip4_header_t * ip0;
+ const dpo_id_t *dpo0;
+ vlib_buffer_t * b0;
+ u32 bi1, lkdi1, lbi1, fib_index1, next1, hash_c1;
+ flow_hash_config_t flow_hash_config1;
+ const ip4_address_t *input_addr1;
+ const load_balance_t *lb1;
+ const lookup_dpo_t * lkd1;
+ const ip4_header_t * ip1;
+ const dpo_id_t *dpo1;
+ vlib_buffer_t * b1;
+
+ /* Prefetch next iteration. */
+ {
+ vlib_buffer_t * p2, * p3;
+
+ p2 = vlib_get_buffer (vm, from[2]);
+ p3 = vlib_get_buffer (vm, from[3]);
+
+ vlib_prefetch_buffer_header (p2, LOAD);
+ vlib_prefetch_buffer_header (p3, LOAD);
+
+ CLIB_PREFETCH (p2->data, CLIB_CACHE_LINE_BYTES, STORE);
+ CLIB_PREFETCH (p3->data, CLIB_CACHE_LINE_BYTES, STORE);
+ }
+
+ bi0 = from[0];
+ to_next[0] = bi0;
+ bi1 = from[1];
+ to_next[1] = bi1;
+ from += 2;
+ to_next += 2;
+ n_left_from -= 2;
+ n_left_to_next -= 2;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ ip0 = vlib_buffer_get_current (b0);
+ b1 = vlib_get_buffer (vm, bi1);
+ ip1 = vlib_buffer_get_current (b1);
+
+ /* dst lookup was done by ip4 lookup */
+ lkdi0 = vnet_buffer(b0)->ip.adj_index[VLIB_TX];
+ lkdi1 = vnet_buffer(b1)->ip.adj_index[VLIB_TX];
+ lkd0 = lookup_dpo_get(lkdi0);
+ lkd1 = lookup_dpo_get(lkdi1);
+
+ /*
+ * choose between a lookup using the fib index in the DPO
+ * or getting the FIB index from the interface.
+ */
+ if (table_from_interface)
+ {
+ fib_index0 =
+ ip4_fib_table_get_index_for_sw_if_index(
+ vnet_buffer(b0)->sw_if_index[VLIB_RX]);
+ fib_index1 =
+ ip4_fib_table_get_index_for_sw_if_index(
+ vnet_buffer(b1)->sw_if_index[VLIB_RX]);
+ }
+ else
+ {
+ fib_index0 = lkd0->lkd_fib_index;
+ fib_index1 = lkd1->lkd_fib_index;
+ }
+
+ /*
+ * choose between a source or destination address lookup in the table
+ */
+ if (input_src_addr)
+ {
+ input_addr0 = &ip0->src_address;
+ input_addr1 = &ip1->src_address;
+ }
+ else
+ {
+ input_addr0 = &ip0->dst_address;
+ input_addr1 = &ip1->dst_address;
+ }
+
+ /* do lookup */
+ ip4_src_fib_lookup_two (fib_index0, fib_index1,
+ input_addr0, input_addr1,
+ &lbi0, &lbi1);
+ lb0 = load_balance_get(lbi0);
+ lb1 = load_balance_get(lbi1);
+
+ vnet_buffer(b0)->sw_if_index[VLIB_TX] = fib_index0;
+ vnet_buffer(b1)->sw_if_index[VLIB_TX] = fib_index1;
+
+ /* Use flow hash to compute multipath adjacency. */
+ hash_c0 = vnet_buffer (b0)->ip.flow_hash = 0;
+ hash_c1 = vnet_buffer (b1)->ip.flow_hash = 0;
+
+ if (PREDICT_FALSE (lb0->lb_n_buckets > 1))
+ {
+ flow_hash_config0 = lb0->lb_hash_config;
+ hash_c0 = vnet_buffer (b0)->ip.flow_hash =
+ ip4_compute_flow_hash (ip0, flow_hash_config0);
+ }
+
+ if (PREDICT_FALSE (lb1->lb_n_buckets > 1))
+ {
+ flow_hash_config1 = lb1->lb_hash_config;
+ hash_c1 = vnet_buffer (b1)->ip.flow_hash =
+ ip4_compute_flow_hash (ip1, flow_hash_config1);
+ }
+
+ dpo0 = load_balance_get_bucket_i(lb0,
+ (hash_c0 &
+ (lb0->lb_n_buckets_minus_1)));
+ dpo1 = load_balance_get_bucket_i(lb1,
+ (hash_c1 &
+ (lb1->lb_n_buckets_minus_1)));
+
+ next0 = dpo0->dpoi_next_node;
+ next1 = dpo1->dpoi_next_node;
+ vnet_buffer(b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
+ vnet_buffer(b1)->ip.adj_index[VLIB_TX] = dpo1->dpoi_index;
+
+ vlib_increment_combined_counter
+ (cm, cpu_index, lbi0, 1,
+ vlib_buffer_length_in_chain (vm, b0));
+ vlib_increment_combined_counter
+ (cm, cpu_index, lbi1, 1,
+ vlib_buffer_length_in_chain (vm, b1));
+
+ if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ lookup_trace_t *tr = vlib_add_trace (vm, node,
+ b0, sizeof (*tr));
+ tr->fib_index = fib_index0;
+ tr->lbi = lbi0;
+ tr->addr.ip4 = *input_addr0;
+ }
+ if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ lookup_trace_t *tr = vlib_add_trace (vm, node,
+ b1, sizeof (*tr));
+ tr->fib_index = fib_index1;
+ tr->lbi = lbi1;
+ tr->addr.ip4 = *input_addr1;
+ }
+
+ vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, bi1, next0, next1);
+ }
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 bi0, lkdi0, lbi0, fib_index0, next0, hash_c0;
+ flow_hash_config_t flow_hash_config0;
+ const ip4_address_t *input_addr;
+ const load_balance_t *lb0;
+ const lookup_dpo_t * lkd0;
+ const ip4_header_t * ip0;
+ const dpo_id_t *dpo0;
+ vlib_buffer_t * b0;
+
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ ip0 = vlib_buffer_get_current (b0);
+
+ /* dst lookup was done by ip4 lookup */
+ lkdi0 = vnet_buffer(b0)->ip.adj_index[VLIB_TX];
+ lkd0 = lookup_dpo_get(lkdi0);
+
+ /*
+ * choose between a lookup using the fib index in the DPO
+ * or getting the FIB index from the interface.
+ */
+ if (table_from_interface)
+ {
+ fib_index0 =
+ ip4_fib_table_get_index_for_sw_if_index(
+ vnet_buffer(b0)->sw_if_index[VLIB_RX]);
+ }
+ else
+ {
+ fib_index0 = lkd0->lkd_fib_index;
+ }
+
+ /*
+ * choose between a source or destination address lookup in the table
+ */
+ if (input_src_addr)
+ {
+ input_addr = &ip0->src_address;
+ }
+ else
+ {
+ input_addr = &ip0->dst_address;
+ }
+
+ /* do lookup */
+ ip4_src_fib_lookup_one (fib_index0, input_addr, &lbi0);
+ lb0 = load_balance_get(lbi0);
+
+ vnet_buffer(b0)->sw_if_index[VLIB_TX] = fib_index0;
+
+ /* Use flow hash to compute multipath adjacency. */
+ hash_c0 = vnet_buffer (b0)->ip.flow_hash = 0;
+
+ if (PREDICT_FALSE (lb0->lb_n_buckets > 1))
+ {
+ flow_hash_config0 = lb0->lb_hash_config;
+ hash_c0 = vnet_buffer (b0)->ip.flow_hash =
+ ip4_compute_flow_hash (ip0, flow_hash_config0);
+ }
+
+ dpo0 = load_balance_get_bucket_i(lb0,
+ (hash_c0 &
+ (lb0->lb_n_buckets_minus_1)));
+
+ next0 = dpo0->dpoi_next_node;
+ vnet_buffer(b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
+
+ vlib_increment_combined_counter
+ (cm, cpu_index, lbi0, 1,
+ vlib_buffer_length_in_chain (vm, b0));
+
+ if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ lookup_trace_t *tr = vlib_add_trace (vm, node,
+ b0, sizeof (*tr));
+ tr->fib_index = fib_index0;
+ tr->lbi = lbi0;
+ tr->addr.ip4 = *input_addr;
+ }
+
+ vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next,
+ n_left_to_next, bi0, next0);
+ }
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+ return from_frame->n_vectors;
+}
+
+static u8 *
+format_lookup_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ lookup_trace_t * t = va_arg (*args, lookup_trace_t *);
+ uword indent = format_get_indent (s);
+ s = format (s, "%U fib-index:%d addr:%U load-balance:%d",
+ format_white_space, indent,
+ t->fib_index,
+ format_ip46_address, &t->addr, IP46_TYPE_ANY,
+ t->lbi);
+ return s;
+}
+
+always_inline uword
+lookup_ip4_dst (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame)
+{
+ return (lookup_dpo_ip4_inline(vm, node, from_frame, 0, 0));
+}
+
+VLIB_REGISTER_NODE (lookup_ip4_dst_node) = {
+ .function = lookup_ip4_dst,
+ .name = "lookup-ip4-dst",
+ .vector_size = sizeof (u32),
+ .sibling_of = "ip4-lookup",
+ .format_trace = format_lookup_trace,
+};
+VLIB_NODE_FUNCTION_MULTIARCH (lookup_ip4_dst_node, lookup_ip4_dst)
+
+always_inline uword
+lookup_ip4_dst_itf (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame)
+{
+ return (lookup_dpo_ip4_inline(vm, node, from_frame, 0, 1));
+}
+
+VLIB_REGISTER_NODE (lookup_ip4_dst_itf_node) = {
+ .function = lookup_ip4_dst_itf,
+ .name = "lookup-ip4-dst-itf",
+ .vector_size = sizeof (u32),
+ .sibling_of = "ip4-lookup",
+ .format_trace = format_lookup_trace,
+};
+VLIB_NODE_FUNCTION_MULTIARCH (lookup_ip4_dst_itf_node, lookup_ip4_dst_itf)
+
+always_inline uword
+lookup_ip4_src (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame)
+{
+ return (lookup_dpo_ip4_inline(vm, node, from_frame, 1, 0));
+}
+
+VLIB_REGISTER_NODE (lookup_ip4_src_node) = {
+ .function = lookup_ip4_src,
+ .name = "lookup-ip4-src",
+ .vector_size = sizeof (u32),
+ .format_trace = format_lookup_trace,
+ .sibling_of = "ip4-lookup",
+};
+VLIB_NODE_FUNCTION_MULTIARCH (lookup_ip4_src_node, lookup_ip4_src)
+
+always_inline uword
+lookup_dpo_ip6_inline (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame,
+ int input_src_addr,
+ int table_from_interface)
+{
+ vlib_combined_counter_main_t * cm = &load_balance_main.lbm_to_counters;
+ u32 n_left_from, next_index, * from, * to_next;
+ u32 cpu_index = os_get_cpu_number();
+
+ from = vlib_frame_vector_args (from_frame);
+ n_left_from = from_frame->n_vectors;
+
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame(vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from >= 4 && n_left_to_next > 2)
+ {
+ u32 bi0, lkdi0, lbi0, fib_index0, next0, hash_c0;
+ flow_hash_config_t flow_hash_config0;
+ const ip6_address_t *input_addr0;
+ const load_balance_t *lb0;
+ const lookup_dpo_t * lkd0;
+ const ip6_header_t * ip0;
+ const dpo_id_t *dpo0;
+ vlib_buffer_t * b0;
+ u32 bi1, lkdi1, lbi1, fib_index1, next1, hash_c1;
+ flow_hash_config_t flow_hash_config1;
+ const ip6_address_t *input_addr1;
+ const load_balance_t *lb1;
+ const lookup_dpo_t * lkd1;
+ const ip6_header_t * ip1;
+ const dpo_id_t *dpo1;
+ vlib_buffer_t * b1;
+
+ /* Prefetch next iteration. */
+ {
+ vlib_buffer_t * p2, * p3;
+
+ p2 = vlib_get_buffer (vm, from[2]);
+ p3 = vlib_get_buffer (vm, from[3]);
+
+ vlib_prefetch_buffer_header (p2, LOAD);
+ vlib_prefetch_buffer_header (p3, LOAD);
+
+ CLIB_PREFETCH (p2->data, CLIB_CACHE_LINE_BYTES, STORE);
+ CLIB_PREFETCH (p3->data, CLIB_CACHE_LINE_BYTES, STORE);
+ }
+
+ bi0 = from[0];
+ to_next[0] = bi0;
+ bi1 = from[1];
+ to_next[1] = bi1;
+ from += 2;
+ to_next += 2;
+ n_left_from -= 2;
+ n_left_to_next -= 2;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ ip0 = vlib_buffer_get_current (b0);
+ b1 = vlib_get_buffer (vm, bi1);
+ ip1 = vlib_buffer_get_current (b1);
+
+ /* dst lookup was done by ip6 lookup */
+ lkdi0 = vnet_buffer(b0)->ip.adj_index[VLIB_TX];
+ lkdi1 = vnet_buffer(b1)->ip.adj_index[VLIB_TX];
+ lkd0 = lookup_dpo_get(lkdi0);
+ lkd1 = lookup_dpo_get(lkdi1);
+
+ /*
+ * choose between a lookup using the fib index in the DPO
+ * or getting the FIB index from the interface.
+ */
+ if (table_from_interface)
+ {
+ fib_index0 =
+ ip6_fib_table_get_index_for_sw_if_index(
+ vnet_buffer(b0)->sw_if_index[VLIB_RX]);
+ fib_index1 =
+ ip6_fib_table_get_index_for_sw_if_index(
+ vnet_buffer(b1)->sw_if_index[VLIB_RX]);
+ }
+ else
+ {
+ fib_index0 = lkd0->lkd_fib_index;
+ fib_index1 = lkd1->lkd_fib_index;
+ }
+
+ /*
+ * choose between a source or destination address lookup in the table
+ */
+ if (input_src_addr)
+ {
+ input_addr0 = &ip0->src_address;
+ input_addr1 = &ip1->src_address;
+ }
+ else
+ {
+ input_addr0 = &ip0->dst_address;
+ input_addr1 = &ip1->dst_address;
+ }
+
+ /* do src lookup */
+ lbi0 = ip6_fib_table_fwding_lookup(&ip6_main,
+ fib_index0,
+ input_addr0);
+ lbi1 = ip6_fib_table_fwding_lookup(&ip6_main,
+ fib_index1,
+ input_addr1);
+ lb0 = load_balance_get(lbi0);
+ lb1 = load_balance_get(lbi1);
+
+ vnet_buffer(b0)->sw_if_index[VLIB_TX] = fib_index0;
+ vnet_buffer(b1)->sw_if_index[VLIB_TX] = fib_index1;
+
+ /* Use flow hash to compute multipath adjacency. */
+ hash_c0 = vnet_buffer (b0)->ip.flow_hash = 0;
+ hash_c1 = vnet_buffer (b1)->ip.flow_hash = 0;
+
+ if (PREDICT_FALSE (lb0->lb_n_buckets > 1))
+ {
+ flow_hash_config0 = lb0->lb_hash_config;
+ hash_c0 = vnet_buffer (b0)->ip.flow_hash =
+ ip6_compute_flow_hash (ip0, flow_hash_config0);
+ }
+
+ if (PREDICT_FALSE (lb1->lb_n_buckets > 1))
+ {
+ flow_hash_config1 = lb1->lb_hash_config;
+ hash_c1 = vnet_buffer (b1)->ip.flow_hash =
+ ip6_compute_flow_hash (ip1, flow_hash_config1);
+ }
+
+ dpo0 = load_balance_get_bucket_i(lb0,
+ (hash_c0 &
+ (lb0->lb_n_buckets_minus_1)));
+ dpo1 = load_balance_get_bucket_i(lb1,
+ (hash_c1 &
+ (lb1->lb_n_buckets_minus_1)));
+
+ next0 = dpo0->dpoi_next_node;
+ next1 = dpo1->dpoi_next_node;
+ vnet_buffer(b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
+ vnet_buffer(b1)->ip.adj_index[VLIB_TX] = dpo1->dpoi_index;
+
+ vlib_increment_combined_counter
+ (cm, cpu_index, lbi0, 1,
+ vlib_buffer_length_in_chain (vm, b0));
+ vlib_increment_combined_counter
+ (cm, cpu_index, lbi1, 1,
+ vlib_buffer_length_in_chain (vm, b1));
+
+ if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ lookup_trace_t *tr = vlib_add_trace (vm, node,
+ b0, sizeof (*tr));
+ tr->fib_index = fib_index0;
+ tr->lbi = lbi0;
+ tr->addr.ip6 = *input_addr0;
+ }
+ if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ lookup_trace_t *tr = vlib_add_trace (vm, node,
+ b1, sizeof (*tr));
+ tr->fib_index = fib_index1;
+ tr->lbi = lbi1;
+ tr->addr.ip6 = *input_addr1;
+ }
+ vlib_validate_buffer_enqueue_x2(vm, node, next_index, to_next,
+ n_left_to_next, bi0, bi1,
+ next0, next1);
+ }
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 bi0, lkdi0, lbi0, fib_index0, next0, hash_c0;
+ flow_hash_config_t flow_hash_config0;
+ const ip6_address_t *input_addr0;
+ const load_balance_t *lb0;
+ const lookup_dpo_t * lkd0;
+ const ip6_header_t * ip0;
+ const dpo_id_t *dpo0;
+ vlib_buffer_t * b0;
+
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ ip0 = vlib_buffer_get_current (b0);
+
+ /* dst lookup was done by ip6 lookup */
+ lkdi0 = vnet_buffer(b0)->ip.adj_index[VLIB_TX];
+ lkd0 = lookup_dpo_get(lkdi0);
+
+ /*
+ * choose between a lookup using the fib index in the DPO
+ * or getting the FIB index from the interface.
+ */
+ if (table_from_interface)
+ {
+ fib_index0 =
+ ip6_fib_table_get_index_for_sw_if_index(
+ vnet_buffer(b0)->sw_if_index[VLIB_RX]);
+ }
+ else
+ {
+ fib_index0 = lkd0->lkd_fib_index;
+ }
+
+ /*
+ * choose between a source or destination address lookup in the table
+ */
+ if (input_src_addr)
+ {
+ input_addr0 = &ip0->src_address;
+ }
+ else
+ {
+ input_addr0 = &ip0->dst_address;
+ }
+
+ /* do src lookup */
+ lbi0 = ip6_fib_table_fwding_lookup(&ip6_main,
+ fib_index0,
+ input_addr0);
+ lb0 = load_balance_get(lbi0);
+
+ vnet_buffer(b0)->sw_if_index[VLIB_TX] = fib_index0;
+
+ /* Use flow hash to compute multipath adjacency. */
+ hash_c0 = vnet_buffer (b0)->ip.flow_hash = 0;
+
+ if (PREDICT_FALSE (lb0->lb_n_buckets > 1))
+ {
+ flow_hash_config0 = lb0->lb_hash_config;
+ hash_c0 = vnet_buffer (b0)->ip.flow_hash =
+ ip6_compute_flow_hash (ip0, flow_hash_config0);
+ }
+
+ dpo0 = load_balance_get_bucket_i(lb0,
+ (hash_c0 &
+ (lb0->lb_n_buckets_minus_1)));
+
+ next0 = dpo0->dpoi_next_node;
+ vnet_buffer(b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
+
+ vlib_increment_combined_counter
+ (cm, cpu_index, lbi0, 1,
+ vlib_buffer_length_in_chain (vm, b0));
+
+ if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ lookup_trace_t *tr = vlib_add_trace (vm, node,
+ b0, sizeof (*tr));
+ tr->fib_index = fib_index0;
+ tr->lbi = lbi0;
+ tr->addr.ip6 = *input_addr0;
+ }
+ vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next,
+ n_left_to_next, bi0, next0);
+ }
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+ return from_frame->n_vectors;
+}
+
+always_inline uword
+lookup_ip6_dst (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame)
+{
+ return (lookup_dpo_ip6_inline(vm, node, from_frame, 0 /*use src*/, 0));
+}
+
+VLIB_REGISTER_NODE (lookup_ip6_dst_node) = {
+ .function = lookup_ip6_dst,
+ .name = "lookup-ip6-dst",
+ .vector_size = sizeof (u32),
+ .format_trace = format_lookup_trace,
+ .sibling_of = "ip6-lookup",
+};
+VLIB_NODE_FUNCTION_MULTIARCH (lookup_ip6_dst_node, lookup_ip6_dst)
+
+always_inline uword
+lookup_ip6_dst_itf (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame)
+{
+ return (lookup_dpo_ip6_inline(vm, node, from_frame, 0 /*use src*/, 1));
+}
+
+VLIB_REGISTER_NODE (lookup_ip6_dst_itf_node) = {
+ .function = lookup_ip6_dst_itf,
+ .name = "lookup-ip6-dst-itf",
+ .vector_size = sizeof (u32),
+ .format_trace = format_lookup_trace,
+ .sibling_of = "ip6-lookup",
+};
+VLIB_NODE_FUNCTION_MULTIARCH (lookup_ip6_dst_itf_node, lookup_ip6_dst_itf)
+
+always_inline uword
+lookup_ip6_src (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame)
+{
+ return (lookup_dpo_ip6_inline(vm, node, from_frame, 1, 0));
+}
+
+VLIB_REGISTER_NODE (lookup_ip6_src_node) = {
+ .function = lookup_ip6_src,
+ .name = "lookup-ip6-src",
+ .vector_size = sizeof (u32),
+ .format_trace = format_lookup_trace,
+ .sibling_of = "ip6-lookup",
+};
+VLIB_NODE_FUNCTION_MULTIARCH (lookup_ip6_src_node, lookup_ip6_src)
+
+always_inline uword
+lookup_dpo_mpls_inline (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame,
+ int table_from_interface)
+{
+ u32 n_left_from, next_index, * from, * to_next;
+ u32 cpu_index = os_get_cpu_number();
+ vlib_combined_counter_main_t * cm = &load_balance_main.lbm_to_counters;
+
+ from = vlib_frame_vector_args (from_frame);
+ n_left_from = from_frame->n_vectors;
+
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame(vm, node, next_index, to_next, n_left_to_next);
+
+ /* while (n_left_from >= 4 && n_left_to_next >= 2) */
+ /* } */
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 bi0, lkdi0, lbi0, fib_index0, next0;
+ const mpls_unicast_header_t * hdr0;
+ const load_balance_t *lb0;
+ const lookup_dpo_t * lkd0;
+ const dpo_id_t *dpo0;
+ vlib_buffer_t * b0;
+
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ hdr0 = vlib_buffer_get_current (b0);
+
+ /* dst lookup was done by mpls lookup */
+ lkdi0 = vnet_buffer(b0)->ip.adj_index[VLIB_TX];
+ lkd0 = lookup_dpo_get(lkdi0);
+
+ /*
+ * choose between a lookup using the fib index in the DPO
+ * or getting the FIB index from the interface.
+ */
+ if (table_from_interface)
+ {
+ fib_index0 =
+ mpls_fib_table_get_index_for_sw_if_index(
+ vnet_buffer(b0)->sw_if_index[VLIB_RX]);
+ }
+ else
+ {
+ fib_index0 = lkd0->lkd_fib_index;
+ }
+
+ /* do lookup */
+ lbi0 = mpls_fib_table_forwarding_lookup (fib_index0, hdr0);
+ lb0 = load_balance_get(lbi0);
+ dpo0 = load_balance_get_bucket_i(lb0, 0);
+
+ next0 = dpo0->dpoi_next_node;
+ vnet_buffer(b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
+
+ vlib_increment_combined_counter
+ (cm, cpu_index, lbi0, 1,
+ vlib_buffer_length_in_chain (vm, b0));
+
+ if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ lookup_trace_t *tr = vlib_add_trace (vm, node,
+ b0, sizeof (*tr));
+ tr->fib_index = fib_index0;
+ tr->lbi = lbi0;
+ tr->hdr = *hdr0;
+ }
+
+ vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next,
+ n_left_to_next, bi0, next0);
+ }
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+ return from_frame->n_vectors;
+}
+
+static u8 *
+format_lookup_mpls_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ lookup_trace_t * t = va_arg (*args, lookup_trace_t *);
+ uword indent = format_get_indent (s);
+ mpls_unicast_header_t hdr;
+
+ hdr.label_exp_s_ttl = clib_net_to_host_u32(t->hdr.label_exp_s_ttl);
+
+ s = format (s, "%U fib-index:%d hdr:%U load-balance:%d",
+ format_white_space, indent,
+ t->fib_index,
+ format_mpls_header, hdr,
+ t->lbi);
+ return s;
+}
+
+always_inline uword
+lookup_mpls_dst (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame)
+{
+ return (lookup_dpo_mpls_inline(vm, node, from_frame, 0));
+}
+
+VLIB_REGISTER_NODE (lookup_mpls_dst_node) = {
+ .function = lookup_mpls_dst,
+ .name = "lookup-mpls-dst",
+ .vector_size = sizeof (u32),
+ .sibling_of = "mpls-lookup",
+ .format_trace = format_lookup_mpls_trace,
+ .n_next_nodes = 0,
+};
+VLIB_NODE_FUNCTION_MULTIARCH (lookup_mpls_dst_node, lookup_mpls_dst)
+
+always_inline uword
+lookup_mpls_dst_itf (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame)
+{
+ return (lookup_dpo_mpls_inline(vm, node, from_frame, 1));
+}
+
+VLIB_REGISTER_NODE (lookup_mpls_dst_itf_node) = {
+ .function = lookup_mpls_dst_itf,
+ .name = "lookup-mpls-dst-itf",
+ .vector_size = sizeof (u32),
+ .sibling_of = "mpls-lookup",
+ .format_trace = format_lookup_mpls_trace,
+ .n_next_nodes = 0,
+};
+VLIB_NODE_FUNCTION_MULTIARCH (lookup_mpls_dst_itf_node, lookup_mpls_dst_itf)
+
+static void
+lookup_dpo_mem_show (void)
+{
+ fib_show_memory_usage("Lookup",
+ pool_elts(lookup_dpo_pool),
+ pool_len(lookup_dpo_pool),
+ sizeof(lookup_dpo_t));
+}
+
+const static dpo_vft_t lkd_vft = {
+ .dv_lock = lookup_dpo_lock,
+ .dv_unlock = lookup_dpo_unlock,
+ .dv_format = format_lookup_dpo,
+};
+const static dpo_vft_t lkd_vft_w_mem_show = {
+ .dv_lock = lookup_dpo_lock,
+ .dv_unlock = lookup_dpo_unlock,
+ .dv_format = format_lookup_dpo,
+ .dv_mem_show = lookup_dpo_mem_show,
+};
+
+const static char* const lookup_src_ip4_nodes[] =
+{
+ "lookup-ip4-src",
+ NULL,
+};
+const static char* const lookup_src_ip6_nodes[] =
+{
+ "lookup-ip6-src",
+ NULL,
+};
+const static char* const * const lookup_src_nodes[DPO_PROTO_NUM] =
+{
+ [DPO_PROTO_IP4] = lookup_src_ip4_nodes,
+ [DPO_PROTO_IP6] = lookup_src_ip6_nodes,
+ [DPO_PROTO_MPLS] = NULL,
+};
+
+const static char* const lookup_dst_ip4_nodes[] =
+{
+ "lookup-ip4-dst",
+ NULL,
+};
+const static char* const lookup_dst_ip6_nodes[] =
+{
+ "lookup-ip6-dst",
+ NULL,
+};
+const static char* const lookup_dst_mpls_nodes[] =
+{
+ "lookup-mpls-dst",
+ NULL,
+};
+const static char* const * const lookup_dst_nodes[DPO_PROTO_NUM] =
+{
+ [DPO_PROTO_IP4] = lookup_dst_ip4_nodes,
+ [DPO_PROTO_IP6] = lookup_dst_ip6_nodes,
+ [DPO_PROTO_MPLS] = lookup_dst_mpls_nodes,
+};
+
+const static char* const lookup_dst_from_interface_ip4_nodes[] =
+{
+ "lookup-ip4-dst-itf",
+ NULL,
+};
+const static char* const lookup_dst_from_interface_ip6_nodes[] =
+{
+ "lookup-ip6-dst-itf",
+ NULL,
+};
+const static char* const lookup_dst_from_interface_mpls_nodes[] =
+{
+ "lookup-mpls-dst-itf",
+ NULL,
+};
+const static char* const * const lookup_dst_from_interface_nodes[DPO_PROTO_NUM] =
+{
+ [DPO_PROTO_IP4] = lookup_dst_from_interface_ip4_nodes,
+ [DPO_PROTO_IP6] = lookup_dst_from_interface_ip6_nodes,
+ [DPO_PROTO_MPLS] = lookup_dst_from_interface_mpls_nodes,
+};
+
+
+void
+lookup_dpo_module_init (void)
+{
+ dpo_register(DPO_LOOKUP, &lkd_vft_w_mem_show, NULL);
+
+ /*
+ * There are various sorts of lookup; src or dst addr v4 /v6 etc.
+ * there isn't an object type for each (there is only the lookup_dpo_t),
+ * but, for performance reasons, there is a data plane function, and hence
+ * VLIB node for each. VLIB graph node construction is based on DPO types
+ * so we create sub-types.
+ */
+ lookup_dpo_sub_types[LOOKUP_SUB_TYPE_SRC] =
+ dpo_register_new_type(&lkd_vft, lookup_src_nodes);
+ lookup_dpo_sub_types[LOOKUP_SUB_TYPE_DST] =
+ dpo_register_new_type(&lkd_vft, lookup_dst_nodes);
+ lookup_dpo_sub_types[LOOKUP_SUB_TYPE_DST_TABLE_FROM_INTERFACE] =
+ dpo_register_new_type(&lkd_vft, lookup_dst_from_interface_nodes);
+}
diff --git a/src/vnet/dpo/lookup_dpo.h b/src/vnet/dpo/lookup_dpo.h
new file mode 100644
index 00000000000..ff283388868
--- /dev/null
+++ b/src/vnet/dpo/lookup_dpo.h
@@ -0,0 +1,108 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LOOKUP_DPO_H__
+#define __LOOKUP_DPO_H__
+
+#include <vnet/vnet.h>
+#include <vnet/fib/fib_types.h>
+#include <vnet/dpo/dpo.h>
+
+/**
+ * Switch to use the packet's source or destination address for lookup
+ */
+typedef enum lookup_input_t_ {
+ LOOKUP_INPUT_SRC_ADDR,
+ LOOKUP_INPUT_DST_ADDR,
+} __attribute__ ((packed)) lookup_input_t;
+
+#define LOOKUP_INPUTS { \
+ [LOOKUP_INPUT_SRC_ADDR] = "src-address", \
+ [LOOKUP_INPUT_DST_ADDR] = "dst-address", \
+}
+
+/**
+ * Switch to use the packet's source or destination address for lookup
+ */
+typedef enum lookup_table_t_ {
+ LOOKUP_TABLE_FROM_INPUT_INTERFACE,
+ LOOKUP_TABLE_FROM_CONFIG,
+} __attribute__ ((packed)) lookup_table_t;
+
+#define LOOKUP_TABLES { \
+ [LOOKUP_INPUT_SRC_ADDR] = "table-input-interface", \
+ [LOOKUP_INPUT_DST_ADDR] = "table-configured", \
+}
+
+/**
+ * A representation of an MPLS label for imposition in the data-path
+ */
+typedef struct lookup_dpo_t
+{
+ /**
+ * The FIB, or interface from which to get a FIB, in which to perform
+ * the next lookup;
+ */
+ fib_node_index_t lkd_fib_index;
+
+ /**
+ * The protocol of the FIB for the lookup, and hence
+ * the protocol of the packet
+ */
+ dpo_proto_t lkd_proto;
+
+ /**
+ * Switch to use src or dst address
+ */
+ lookup_input_t lkd_input;
+
+ /**
+ * Switch to use the table index passed, or the table of the input interface
+ */
+ lookup_table_t lkd_table;
+
+ /**
+ * Number of locks
+ */
+ u16 lkd_locks;
+} lookup_dpo_t;
+
+extern void lookup_dpo_add_or_lock_w_fib_index(fib_node_index_t fib_index,
+ dpo_proto_t proto,
+ lookup_input_t input,
+ lookup_table_t table,
+ dpo_id_t *dpo);
+extern void lookup_dpo_add_or_lock_w_table_id(u32 table_id,
+ dpo_proto_t proto,
+ lookup_input_t input,
+ lookup_table_t table,
+ dpo_id_t *dpo);
+
+extern u8* format_lookup_dpo(u8 *s, va_list *args);
+
+/*
+ * Encapsulation violation for fast data-path access
+ */
+extern lookup_dpo_t *lookup_dpo_pool;
+
+static inline lookup_dpo_t *
+lookup_dpo_get (index_t index)
+{
+ return (pool_elt_at_index(lookup_dpo_pool, index));
+}
+
+extern void lookup_dpo_module_init(void);
+
+#endif
diff --git a/src/vnet/dpo/mpls_label_dpo.c b/src/vnet/dpo/mpls_label_dpo.c
new file mode 100644
index 00000000000..bbdc9666503
--- /dev/null
+++ b/src/vnet/dpo/mpls_label_dpo.c
@@ -0,0 +1,570 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/ip/ip.h>
+#include <vnet/dpo/mpls_label_dpo.h>
+#include <vnet/mpls/mpls.h>
+
+/*
+ * pool of all MPLS Label DPOs
+ */
+mpls_label_dpo_t *mpls_label_dpo_pool;
+
+static mpls_label_dpo_t *
+mpls_label_dpo_alloc (void)
+{
+ mpls_label_dpo_t *mld;
+
+ pool_get_aligned(mpls_label_dpo_pool, mld, CLIB_CACHE_LINE_BYTES);
+ memset(mld, 0, sizeof(*mld));
+
+ dpo_reset(&mld->mld_dpo);
+
+ return (mld);
+}
+
+static index_t
+mpls_label_dpo_get_index (mpls_label_dpo_t *mld)
+{
+ return (mld - mpls_label_dpo_pool);
+}
+
+index_t
+mpls_label_dpo_create (mpls_label_t *label_stack,
+ mpls_eos_bit_t eos,
+ u8 ttl,
+ u8 exp,
+ dpo_proto_t payload_proto,
+ const dpo_id_t *dpo)
+{
+ mpls_label_dpo_t *mld;
+ u32 ii;
+
+ mld = mpls_label_dpo_alloc();
+ mld->mld_n_labels = vec_len(label_stack);
+ mld->mld_n_hdr_bytes = mld->mld_n_labels * sizeof(mld->mld_hdr[0]);
+ mld->mld_payload_proto = payload_proto;
+
+ /*
+ * construct label rewrite headers for each value value passed.
+ * get the header in network byte order since we will paint it
+ * on a packet in the data-plane
+ */
+
+ for (ii = 0; ii < mld->mld_n_labels-1; ii++)
+ {
+ vnet_mpls_uc_set_label(&mld->mld_hdr[ii].label_exp_s_ttl, label_stack[ii]);
+ vnet_mpls_uc_set_ttl(&mld->mld_hdr[ii].label_exp_s_ttl, 255);
+ vnet_mpls_uc_set_exp(&mld->mld_hdr[ii].label_exp_s_ttl, 0);
+ vnet_mpls_uc_set_s(&mld->mld_hdr[ii].label_exp_s_ttl, MPLS_NON_EOS);
+ mld->mld_hdr[ii].label_exp_s_ttl =
+ clib_host_to_net_u32(mld->mld_hdr[ii].label_exp_s_ttl);
+ }
+
+ /*
+ * the inner most label
+ */
+ ii = mld->mld_n_labels-1;
+
+ vnet_mpls_uc_set_label(&mld->mld_hdr[ii].label_exp_s_ttl, label_stack[ii]);
+ vnet_mpls_uc_set_ttl(&mld->mld_hdr[ii].label_exp_s_ttl, ttl);
+ vnet_mpls_uc_set_exp(&mld->mld_hdr[ii].label_exp_s_ttl, exp);
+ vnet_mpls_uc_set_s(&mld->mld_hdr[ii].label_exp_s_ttl, eos);
+ mld->mld_hdr[ii].label_exp_s_ttl =
+ clib_host_to_net_u32(mld->mld_hdr[ii].label_exp_s_ttl);
+
+ /*
+ * stack this label objct on its parent.
+ */
+ dpo_stack(DPO_MPLS_LABEL,
+ mld->mld_payload_proto,
+ &mld->mld_dpo,
+ dpo);
+
+ return (mpls_label_dpo_get_index(mld));
+}
+
+u8*
+format_mpls_label_dpo (u8 *s, va_list *args)
+{
+ index_t index = va_arg (*args, index_t);
+ u32 indent = va_arg (*args, u32);
+ mpls_unicast_header_t hdr;
+ mpls_label_dpo_t *mld;
+ u32 ii;
+
+ mld = mpls_label_dpo_get(index);
+
+ s = format(s, "mpls-label:[%d]:", index);
+
+ for (ii = 0; ii < mld->mld_n_labels; ii++)
+ {
+ hdr.label_exp_s_ttl =
+ clib_net_to_host_u32(mld->mld_hdr[ii].label_exp_s_ttl);
+ s = format(s, "%U", format_mpls_header, hdr);
+ }
+
+ s = format(s, "\n%U", format_white_space, indent);
+ s = format(s, "%U", format_dpo_id, &mld->mld_dpo, indent+2);
+
+ return (s);
+}
+
+static void
+mpls_label_dpo_lock (dpo_id_t *dpo)
+{
+ mpls_label_dpo_t *mld;
+
+ mld = mpls_label_dpo_get(dpo->dpoi_index);
+
+ mld->mld_locks++;
+}
+
+static void
+mpls_label_dpo_unlock (dpo_id_t *dpo)
+{
+ mpls_label_dpo_t *mld;
+
+ mld = mpls_label_dpo_get(dpo->dpoi_index);
+
+ mld->mld_locks--;
+
+ if (0 == mld->mld_locks)
+ {
+ dpo_reset(&mld->mld_dpo);
+ pool_put(mpls_label_dpo_pool, mld);
+ }
+}
+
+/**
+ * @brief A struct to hold tracing information for the MPLS label imposition
+ * node.
+ */
+typedef struct mpls_label_imposition_trace_t_
+{
+ /**
+ * The MPLS header imposed
+ */
+ mpls_unicast_header_t hdr;
+} mpls_label_imposition_trace_t;
+
+always_inline uword
+mpls_label_imposition_inline (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame,
+ u8 payload_is_ip4,
+ u8 payload_is_ip6)
+{
+ u32 n_left_from, next_index, * from, * to_next;
+
+ from = vlib_frame_vector_args (from_frame);
+ n_left_from = from_frame->n_vectors;
+
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame(vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ mpls_unicast_header_t *hdr0, *hdr1;
+ mpls_label_dpo_t *mld0, *mld1;
+ u32 bi0, mldi0, bi1, mldi1;
+ vlib_buffer_t * b0, *b1;
+ u32 next0, next1;
+ u8 ttl0, ttl1;
+
+ bi0 = to_next[0] = from[0];
+ bi1 = to_next[1] = from[1];
+
+ /* Prefetch next iteration. */
+ {
+ vlib_buffer_t * p2, * p3;
+
+ p2 = vlib_get_buffer (vm, from[2]);
+ p3 = vlib_get_buffer (vm, from[3]);
+
+ vlib_prefetch_buffer_header (p2, STORE);
+ vlib_prefetch_buffer_header (p3, STORE);
+
+ CLIB_PREFETCH (p2->data, sizeof (hdr0[0]), STORE);
+ CLIB_PREFETCH (p3->data, sizeof (hdr0[0]), STORE);
+ }
+
+ from += 2;
+ to_next += 2;
+ n_left_from -= 2;
+ n_left_to_next -= 2;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ b1 = vlib_get_buffer (vm, bi1);
+
+ /* dst lookup was done by ip4 lookup */
+ mldi0 = vnet_buffer(b0)->ip.adj_index[VLIB_TX];
+ mldi1 = vnet_buffer(b1)->ip.adj_index[VLIB_TX];
+ mld0 = mpls_label_dpo_get(mldi0);
+ mld1 = mpls_label_dpo_get(mldi1);
+
+ if (payload_is_ip4)
+ {
+ /*
+ * decrement the TTL on ingress to the LSP
+ */
+ ip4_header_t * ip0 = vlib_buffer_get_current(b0);
+ ip4_header_t * ip1 = vlib_buffer_get_current(b1);
+ u32 checksum0;
+ u32 checksum1;
+
+ checksum0 = ip0->checksum + clib_host_to_net_u16 (0x0100);
+ checksum1 = ip1->checksum + clib_host_to_net_u16 (0x0100);
+
+ checksum0 += checksum0 >= 0xffff;
+ checksum1 += checksum1 >= 0xffff;
+
+ ip0->checksum = checksum0;
+ ip1->checksum = checksum1;
+
+ ip0->ttl -= 1;
+ ip1->ttl -= 1;
+
+ ttl1 = ip1->ttl;
+ ttl0 = ip0->ttl;
+ }
+ else if (payload_is_ip6)
+ {
+ /*
+ * decrement the TTL on ingress to the LSP
+ */
+ ip6_header_t * ip0 = vlib_buffer_get_current(b0);
+ ip6_header_t * ip1 = vlib_buffer_get_current(b1);
+
+
+ ip0->hop_limit -= 1;
+ ip1->hop_limit -= 1;
+
+ ttl0 = ip0->hop_limit;
+ ttl1 = ip1->hop_limit;
+ }
+ else
+ {
+ /*
+ * else, the packet to be encapped is an MPLS packet
+ */
+ if (PREDICT_TRUE(vnet_buffer(b0)->mpls.first))
+ {
+ /*
+ * The first label to be imposed on the packet. this is a label swap.
+ * in which case we stashed the TTL and EXP bits in the
+ * packet in the lookup node
+ */
+ ASSERT(0 != vnet_buffer (b0)->mpls.ttl);
+
+ ttl0 = vnet_buffer(b0)->mpls.ttl - 1;
+ }
+ else
+ {
+ /*
+ * not the first label. implying we are recusring down a chain of
+ * output labels.
+ * Each layer is considered a new LSP - hence the TTL is reset.
+ */
+ ttl0 = 255;
+ }
+ if (PREDICT_TRUE(vnet_buffer(b1)->mpls.first))
+ {
+ ASSERT(1 != vnet_buffer (b1)->mpls.ttl);
+ ttl1 = vnet_buffer(b1)->mpls.ttl - 1;
+ }
+ else
+ {
+ ttl1 = 255;
+ }
+ }
+ vnet_buffer(b0)->mpls.first = 0;
+ vnet_buffer(b1)->mpls.first = 0;
+
+ /* Paint the MPLS header */
+ vlib_buffer_advance(b0, -(mld0->mld_n_hdr_bytes));
+ vlib_buffer_advance(b1, -(mld1->mld_n_hdr_bytes));
+
+ hdr0 = vlib_buffer_get_current(b0);
+ hdr1 = vlib_buffer_get_current(b1);
+
+ clib_memcpy(hdr0, mld0->mld_hdr, mld0->mld_n_hdr_bytes);
+ clib_memcpy(hdr1, mld1->mld_hdr, mld1->mld_n_hdr_bytes);
+
+ /* fixup the TTL for the inner most label */
+ hdr0 = hdr0 + (mld0->mld_n_labels - 1);
+ hdr1 = hdr1 + (mld1->mld_n_labels - 1);
+ ((char*)hdr0)[3] = ttl0;
+ ((char*)hdr1)[3] = ttl1;
+
+ next0 = mld0->mld_dpo.dpoi_next_node;
+ next1 = mld1->mld_dpo.dpoi_next_node;
+ vnet_buffer(b0)->ip.adj_index[VLIB_TX] = mld0->mld_dpo.dpoi_index;
+ vnet_buffer(b1)->ip.adj_index[VLIB_TX] = mld1->mld_dpo.dpoi_index;
+
+ if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ mpls_label_imposition_trace_t *tr =
+ vlib_add_trace (vm, node, b0, sizeof (*tr));
+ tr->hdr = *hdr0;
+ }
+ if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ mpls_label_imposition_trace_t *tr =
+ vlib_add_trace (vm, node, b1, sizeof (*tr));
+ tr->hdr = *hdr1;
+ }
+
+ vlib_validate_buffer_enqueue_x2(vm, node, next_index, to_next,
+ n_left_to_next,
+ bi0, bi1, next0, next1);
+ }
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ mpls_unicast_header_t *hdr0;
+ mpls_label_dpo_t *mld0;
+ vlib_buffer_t * b0;
+ u32 bi0, mldi0;
+ u32 next0;
+ u8 ttl;
+
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+
+ /* dst lookup was done by ip4 lookup */
+ mldi0 = vnet_buffer(b0)->ip.adj_index[VLIB_TX];
+ mld0 = mpls_label_dpo_get(mldi0);
+
+ if (payload_is_ip4)
+ {
+ /*
+ * decrement the TTL on ingress to the LSP
+ */
+ ip4_header_t * ip0 = vlib_buffer_get_current(b0);
+ u32 checksum0;
+
+ checksum0 = ip0->checksum + clib_host_to_net_u16 (0x0100);
+ checksum0 += checksum0 >= 0xffff;
+
+ ip0->checksum = checksum0;
+ ip0->ttl -= 1;
+ ttl = ip0->ttl;
+ }
+ else if (payload_is_ip6)
+ {
+ /*
+ * decrement the TTL on ingress to the LSP
+ */
+ ip6_header_t * ip0 = vlib_buffer_get_current(b0);
+
+ ip0->hop_limit -= 1;
+ ttl = ip0->hop_limit;
+ }
+ else
+ {
+ /*
+ * else, the packet to be encapped is an MPLS packet
+ */
+ if (vnet_buffer(b0)->mpls.first)
+ {
+ /*
+ * The first label to be imposed on the packet. this is a label swap.
+ * in which case we stashed the TTL and EXP bits in the
+ * packet in the lookup node
+ */
+ ASSERT(0 != vnet_buffer (b0)->mpls.ttl);
+
+ ttl = vnet_buffer(b0)->mpls.ttl - 1;
+ }
+ else
+ {
+ /*
+ * not the first label. implying we are recusring down a chain of
+ * output labels.
+ * Each layer is considered a new LSP - hence the TTL is reset.
+ */
+ ttl = 255;
+ }
+ }
+ vnet_buffer(b0)->mpls.first = 0;
+
+ /* Paint the MPLS header */
+ vlib_buffer_advance(b0, -(mld0->mld_n_hdr_bytes));
+ hdr0 = vlib_buffer_get_current(b0);
+ clib_memcpy(hdr0, mld0->mld_hdr, mld0->mld_n_hdr_bytes);
+
+ /* fixup the TTL for the inner most label */
+ hdr0 = hdr0 + (mld0->mld_n_labels - 1);
+ ((char*)hdr0)[3] = ttl;
+
+ next0 = mld0->mld_dpo.dpoi_next_node;
+ vnet_buffer(b0)->ip.adj_index[VLIB_TX] = mld0->mld_dpo.dpoi_index;
+
+ if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ mpls_label_imposition_trace_t *tr =
+ vlib_add_trace (vm, node, b0, sizeof (*tr));
+ tr->hdr = *hdr0;
+ }
+
+ vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next,
+ n_left_to_next, bi0, next0);
+ }
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+ return from_frame->n_vectors;
+}
+
+static u8 *
+format_mpls_label_imposition_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ mpls_label_imposition_trace_t * t;
+ mpls_unicast_header_t hdr;
+ uword indent;
+
+ t = va_arg (*args, mpls_label_imposition_trace_t *);
+ indent = format_get_indent (s);
+ hdr.label_exp_s_ttl = clib_net_to_host_u32(t->hdr.label_exp_s_ttl);
+
+ s = format (s, "%Umpls-header:%U",
+ format_white_space, indent,
+ format_mpls_header, hdr);
+ return (s);
+}
+
+static uword
+mpls_label_imposition (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ return (mpls_label_imposition_inline(vm, node, frame, 0, 0));
+}
+
+VLIB_REGISTER_NODE (mpls_label_imposition_node) = {
+ .function = mpls_label_imposition,
+ .name = "mpls-label-imposition",
+ .vector_size = sizeof (u32),
+
+ .format_trace = format_mpls_label_imposition_trace,
+ .n_next_nodes = 1,
+ .next_nodes = {
+ [0] = "error-drop",
+ }
+};
+VLIB_NODE_FUNCTION_MULTIARCH (mpls_label_imposition_node,
+ mpls_label_imposition)
+
+static uword
+ip4_mpls_label_imposition (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ return (mpls_label_imposition_inline(vm, node, frame, 1, 0));
+}
+
+VLIB_REGISTER_NODE (ip4_mpls_label_imposition_node) = {
+ .function = ip4_mpls_label_imposition,
+ .name = "ip4-mpls-label-imposition",
+ .vector_size = sizeof (u32),
+
+ .format_trace = format_mpls_label_imposition_trace,
+ .n_next_nodes = 1,
+ .next_nodes = {
+ [0] = "error-drop",
+ }
+};
+VLIB_NODE_FUNCTION_MULTIARCH (ip4_mpls_label_imposition_node,
+ ip4_mpls_label_imposition)
+
+static uword
+ip6_mpls_label_imposition (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ return (mpls_label_imposition_inline(vm, node, frame, 0, 1));
+}
+
+VLIB_REGISTER_NODE (ip6_mpls_label_imposition_node) = {
+ .function = ip6_mpls_label_imposition,
+ .name = "ip6-mpls-label-imposition",
+ .vector_size = sizeof (u32),
+
+ .format_trace = format_mpls_label_imposition_trace,
+ .n_next_nodes = 1,
+ .next_nodes = {
+ [0] = "error-drop",
+ }
+};
+VLIB_NODE_FUNCTION_MULTIARCH (ip6_mpls_label_imposition_node,
+ ip6_mpls_label_imposition)
+
+static void
+mpls_label_dpo_mem_show (void)
+{
+ fib_show_memory_usage("MPLS label",
+ pool_elts(mpls_label_dpo_pool),
+ pool_len(mpls_label_dpo_pool),
+ sizeof(mpls_label_dpo_t));
+}
+
+const static dpo_vft_t mld_vft = {
+ .dv_lock = mpls_label_dpo_lock,
+ .dv_unlock = mpls_label_dpo_unlock,
+ .dv_format = format_mpls_label_dpo,
+ .dv_mem_show = mpls_label_dpo_mem_show,
+};
+
+const static char* const mpls_label_imp_ip4_nodes[] =
+{
+ "ip4-mpls-label-imposition",
+ NULL,
+};
+const static char* const mpls_label_imp_ip6_nodes[] =
+{
+ "ip6-mpls-label-imposition",
+ NULL,
+};
+const static char* const mpls_label_imp_mpls_nodes[] =
+{
+ "mpls-label-imposition",
+ NULL,
+};
+const static char* const * const mpls_label_imp_nodes[DPO_PROTO_NUM] =
+{
+ [DPO_PROTO_IP4] = mpls_label_imp_ip4_nodes,
+ [DPO_PROTO_IP6] = mpls_label_imp_ip6_nodes,
+ [DPO_PROTO_MPLS] = mpls_label_imp_mpls_nodes,
+};
+
+
+void
+mpls_label_dpo_module_init (void)
+{
+ dpo_register(DPO_MPLS_LABEL, &mld_vft, mpls_label_imp_nodes);
+}
diff --git a/src/vnet/dpo/mpls_label_dpo.h b/src/vnet/dpo/mpls_label_dpo.h
new file mode 100644
index 00000000000..89bcb093b04
--- /dev/null
+++ b/src/vnet/dpo/mpls_label_dpo.h
@@ -0,0 +1,101 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __MPLS_LABEL_DPO_H__
+#define __MPLS_LABEL_DPO_H__
+
+#include <vnet/vnet.h>
+#include <vnet/mpls/packet.h>
+#include <vnet/dpo/dpo.h>
+
+/**
+ * A representation of an MPLS label for imposition in the data-path
+ */
+typedef struct mpls_label_dpo_t
+{
+ /**
+ * The MPLS label header to impose. Outer most label first.
+ */
+ mpls_unicast_header_t mld_hdr[8];
+
+ /**
+ * Next DPO in the graph
+ */
+ dpo_id_t mld_dpo;
+
+ /**
+ * The protocol of the payload/packets that are being encapped
+ */
+ dpo_proto_t mld_payload_proto;
+
+ /**
+ * Size of the label stack
+ */
+ u16 mld_n_labels;
+
+ /**
+ * Cached amount of header bytes to paint
+ */
+ u16 mld_n_hdr_bytes;
+
+ /**
+ * Number of locks/users of the label
+ */
+ u16 mld_locks;
+} mpls_label_dpo_t;
+
+/**
+ * @brief Assert that the MPLS label object is less than a cache line in size.
+ * Should this get any bigger then we will need to reconsider how many labels
+ * can be pushed in one object.
+ */
+_Static_assert((sizeof(mpls_label_dpo_t) <= CLIB_CACHE_LINE_BYTES),
+ "MPLS label DPO is larger than one cache line.");
+
+/**
+ * @brief Create an MPLS label object
+ *
+ * @param label_stack The stack if labels to impose, outer most label first
+ * @param eos The inner most label's EOS bit
+ * @param ttl The inner most label's TTL bit
+ * @param exp The inner most label's EXP bit
+ * @param payload_proto The ptocool of the payload packets that will
+ * be imposed with this label header.
+ * @param dpo The parent of the created MPLS label object
+ */
+extern index_t mpls_label_dpo_create(mpls_label_t *label_stack,
+ mpls_eos_bit_t eos,
+ u8 ttl,
+ u8 exp,
+ dpo_proto_t payload_proto,
+ const dpo_id_t *dpo);
+
+extern u8* format_mpls_label_dpo(u8 *s, va_list *args);
+
+
+/*
+ * Encapsulation violation for fast data-path access
+ */
+extern mpls_label_dpo_t *mpls_label_dpo_pool;
+
+static inline mpls_label_dpo_t *
+mpls_label_dpo_get (index_t index)
+{
+ return (pool_elt_at_index(mpls_label_dpo_pool, index));
+}
+
+extern void mpls_label_dpo_module_init(void);
+
+#endif
diff --git a/src/vnet/dpo/punt_dpo.c b/src/vnet/dpo/punt_dpo.c
new file mode 100644
index 00000000000..d1661dcc8e0
--- /dev/null
+++ b/src/vnet/dpo/punt_dpo.c
@@ -0,0 +1,100 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * @brief
+ * The data-path object representing puntping the packet
+ */
+
+#include <vnet/dpo/dpo.h>
+
+static dpo_id_t punt_dpos[DPO_PROTO_NUM];
+
+const dpo_id_t *
+punt_dpo_get (dpo_proto_t proto)
+{
+ dpo_set(&punt_dpos[proto], DPO_PUNT, proto, 1);
+
+ return (&punt_dpos[proto]);
+}
+
+int
+dpo_is_punt (const dpo_id_t *dpo)
+{
+ return (dpo->dpoi_type == DPO_PUNT);
+}
+
+static void
+punt_dpo_lock (dpo_id_t *dpo)
+{
+ /*
+ * not maintaining a lock count on the punt
+ * more trouble than it's worth.
+ * There always needs to be one around. no point it managaing its lifetime
+ */
+}
+static void
+punt_dpo_unlock (dpo_id_t *dpo)
+{
+}
+
+static u8*
+format_punt_dpo (u8 *s, va_list *ap)
+{
+ CLIB_UNUSED(index_t index) = va_arg(*ap, index_t);
+ CLIB_UNUSED(u32 indent) = va_arg(*ap, u32);
+
+ return (format(s, "dpo-punt"));
+}
+
+const static dpo_vft_t punt_vft = {
+ .dv_lock = punt_dpo_lock,
+ .dv_unlock = punt_dpo_unlock,
+ .dv_format = format_punt_dpo,
+};
+
+/**
+ * @brief The per-protocol VLIB graph nodes that are assigned to a punt
+ * object.
+ *
+ * this means that these graph nodes are ones from which a punt is the
+ * parent object in the DPO-graph.
+ */
+const static char* const punt_ip4_nodes[] =
+{
+ "ip4-punt",
+ NULL,
+};
+const static char* const punt_ip6_nodes[] =
+{
+ "ip6-punt",
+ NULL,
+};
+const static char* const punt_mpls_nodes[] =
+{
+ "mpls-punt",
+ NULL,
+};
+const static char* const * const punt_nodes[DPO_PROTO_NUM] =
+{
+ [DPO_PROTO_IP4] = punt_ip4_nodes,
+ [DPO_PROTO_IP6] = punt_ip6_nodes,
+ [DPO_PROTO_MPLS] = punt_mpls_nodes,
+};
+
+void
+punt_dpo_module_init (void)
+{
+ dpo_register(DPO_PUNT, &punt_vft, punt_nodes);
+}
diff --git a/src/vnet/dpo/punt_dpo.h b/src/vnet/dpo/punt_dpo.h
new file mode 100644
index 00000000000..370547c1596
--- /dev/null
+++ b/src/vnet/dpo/punt_dpo.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * @brief A DPO to punt packets to the Control-plane
+ */
+
+#ifndef __PUNT_DPO_H__
+#define __PUNT_DPO_H__
+
+#include <vnet/dpo/dpo.h>
+
+extern int dpo_is_punt(const dpo_id_t *dpo);
+
+extern const dpo_id_t *punt_dpo_get(dpo_proto_t proto);
+
+extern void punt_dpo_module_init(void);
+
+#endif
diff --git a/src/vnet/dpo/receive_dpo.c b/src/vnet/dpo/receive_dpo.c
new file mode 100644
index 00000000000..2b2571c6c83
--- /dev/null
+++ b/src/vnet/dpo/receive_dpo.c
@@ -0,0 +1,165 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * @brief
+ * The data-path object representing receiveing the packet, i.e. it's for-us
+ */
+#include <vlib/vlib.h>
+#include <vnet/ip/ip.h>
+#include <vnet/dpo/receive_dpo.h>
+
+/**
+ * @brief pool of all receive DPOs
+ */
+receive_dpo_t *receive_dpo_pool;
+
+static receive_dpo_t *
+receive_dpo_alloc (void)
+{
+ receive_dpo_t *rd;
+
+ pool_get_aligned(receive_dpo_pool, rd, CLIB_CACHE_LINE_BYTES);
+ memset(rd, 0, sizeof(*rd));
+
+ return (rd);
+}
+
+static receive_dpo_t *
+receive_dpo_get_from_dpo (const dpo_id_t *dpo)
+{
+ ASSERT(DPO_RECEIVE == dpo->dpoi_type);
+
+ return (receive_dpo_get(dpo->dpoi_index));
+}
+
+
+/*
+ * receive_dpo_add_or_lock
+ *
+ * The next_hop address here is used for source address selection in the DP.
+ * The local adj is added to an interface's receive prefix, the next-hop
+ * passed here is the local prefix on the same interface.
+ */
+void
+receive_dpo_add_or_lock (dpo_proto_t proto,
+ u32 sw_if_index,
+ const ip46_address_t *nh_addr,
+ dpo_id_t *dpo)
+{
+ receive_dpo_t *rd;
+
+ rd = receive_dpo_alloc();
+
+ rd->rd_sw_if_index = sw_if_index;
+ if (NULL != nh_addr)
+ {
+ rd->rd_addr = *nh_addr;
+ }
+
+ dpo_set(dpo, DPO_RECEIVE, proto, (rd - receive_dpo_pool));
+}
+
+static void
+receive_dpo_lock (dpo_id_t *dpo)
+{
+ receive_dpo_t *rd;
+
+ rd = receive_dpo_get_from_dpo(dpo);
+ rd->rd_locks++;
+}
+
+static void
+receive_dpo_unlock (dpo_id_t *dpo)
+{
+ receive_dpo_t *rd;
+
+ rd = receive_dpo_get_from_dpo(dpo);
+ rd->rd_locks--;
+
+ if (0 == rd->rd_locks)
+ {
+ pool_put(receive_dpo_pool, rd);
+ }
+}
+
+static u8*
+format_receive_dpo (u8 *s, va_list *ap)
+{
+ CLIB_UNUSED(index_t index) = va_arg(*ap, index_t);
+ CLIB_UNUSED(u32 indent) = va_arg(*ap, u32);
+ vnet_main_t * vnm = vnet_get_main();
+ receive_dpo_t *rd;
+
+ rd = receive_dpo_get(index);
+
+ if (~0 != rd->rd_sw_if_index)
+ {
+ return (format(s, "dpo-receive: %U on %U",
+ format_ip46_address, &rd->rd_addr, IP46_TYPE_ANY,
+ format_vnet_sw_interface_name, vnm,
+ vnet_get_sw_interface(vnm, rd->rd_sw_if_index)));
+ }
+ else
+ {
+ return (format(s, "dpo-receive"));
+ }
+}
+
+static void
+receive_dpo_mem_show (void)
+{
+ fib_show_memory_usage("Receive",
+ pool_elts(receive_dpo_pool),
+ pool_len(receive_dpo_pool),
+ sizeof(receive_dpo_t));
+}
+
+const static dpo_vft_t receive_vft = {
+ .dv_lock = receive_dpo_lock,
+ .dv_unlock = receive_dpo_unlock,
+ .dv_format = format_receive_dpo,
+ .dv_mem_show = receive_dpo_mem_show,
+};
+
+/**
+ * @brief The per-protocol VLIB graph nodes that are assigned to a receive
+ * object.
+ *
+ * this means that these graph nodes are ones from which a receive is the
+ * parent object in the DPO-graph.
+ */
+const static char* const receive_ip4_nodes[] =
+{
+ "ip4-local",
+ NULL,
+};
+const static char* const receive_ip6_nodes[] =
+{
+ "ip6-local",
+ NULL,
+};
+
+const static char* const * const receive_nodes[DPO_PROTO_NUM] =
+{
+ [DPO_PROTO_IP4] = receive_ip4_nodes,
+ [DPO_PROTO_IP6] = receive_ip6_nodes,
+ [DPO_PROTO_MPLS] = NULL,
+};
+
+void
+receive_dpo_module_init (void)
+{
+ dpo_register(DPO_RECEIVE, &receive_vft, receive_nodes);
+}
diff --git a/src/vnet/dpo/receive_dpo.h b/src/vnet/dpo/receive_dpo.h
new file mode 100644
index 00000000000..2420fd7843c
--- /dev/null
+++ b/src/vnet/dpo/receive_dpo.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * @brief
+ * The data-path object representing receiveing the packet, i.e. it's for-us
+ */
+
+#ifndef __RECEIVE_DPO_H__
+#define __RECEIVE_DPO_H__
+
+#include <vnet/dpo/dpo.h>
+#include <vnet/ip/ip6.h>
+
+typedef struct receive_dpo_t_
+{
+ /**
+ * The Software interface index on which traffic is received
+ */
+ u32 rd_sw_if_index;
+
+ /**
+ * The address on the receive interface. packet are destined to this address
+ */
+ ip46_address_t rd_addr;
+
+ /**
+ * number oflocks.
+ */
+ u16 rd_locks;
+} receive_dpo_t;
+
+extern void receive_dpo_add_or_lock (dpo_proto_t proto,
+ u32 sw_if_index,
+ const ip46_address_t *nh_addr,
+ dpo_id_t *dpo);
+
+extern void receive_dpo_module_init(void);
+
+/**
+ * @brief pool of all receive DPOs
+ */
+receive_dpo_t *receive_dpo_pool;
+
+static inline receive_dpo_t *
+receive_dpo_get (index_t index)
+{
+ return (pool_elt_at_index(receive_dpo_pool, index));
+}
+
+#endif
diff --git a/src/vnet/ethernet/arp.c b/src/vnet/ethernet/arp.c
new file mode 100644
index 00000000000..b388e778ac8
--- /dev/null
+++ b/src/vnet/ethernet/arp.c
@@ -0,0 +1,2355 @@
+/*
+ * ethernet/arp.c: IP v4 ARP node
+ *
+ * Copyright (c) 2010 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/ip/ip.h>
+#include <vnet/ip/ip6.h>
+#include <vnet/ethernet/ethernet.h>
+#include <vnet/ethernet/arp_packet.h>
+#include <vnet/l2/l2_input.h>
+#include <vppinfra/mhash.h>
+#include <vnet/fib/ip4_fib.h>
+#include <vnet/adj/adj_nbr.h>
+#include <vnet/mpls/mpls.h>
+
+/**
+ * @file
+ * @brief IPv4 ARP.
+ *
+ * This file contains code to manage the IPv4 ARP tables (IP Address
+ * to MAC Address lookup).
+ */
+
+
+void vl_api_rpc_call_main_thread (void *fp, u8 * data, u32 data_length);
+
+/**
+ * @brief Per-interface ARP configuration and state
+ */
+typedef struct ethernet_arp_interface_t_
+{
+ /**
+ * Hash table of ARP entries.
+ * Since this hash table is per-interface, the key is only the IPv4 address.
+ */
+ uword *arp_entries;
+} ethernet_arp_interface_t;
+
+typedef struct
+{
+ u32 lo_addr;
+ u32 hi_addr;
+ u32 fib_index;
+} ethernet_proxy_arp_t;
+
+typedef struct
+{
+ u32 next_index;
+ uword node_index;
+ uword type_opaque;
+ uword data;
+ /* Used for arp event notification only */
+ void *data_callback;
+ u32 pid;
+} pending_resolution_t;
+
+typedef struct
+{
+ /* Hash tables mapping name to opcode. */
+ uword *opcode_by_name;
+
+ /* lite beer "glean" adjacency handling */
+ uword *pending_resolutions_by_address;
+ pending_resolution_t *pending_resolutions;
+
+ /* Mac address change notification */
+ uword *mac_changes_by_address;
+ pending_resolution_t *mac_changes;
+
+ ethernet_arp_ip4_entry_t *ip4_entry_pool;
+
+ /* ARP attack mitigation */
+ u32 arp_delete_rotor;
+ u32 limit_arp_cache_size;
+
+ /** Per interface state */
+ ethernet_arp_interface_t *ethernet_arp_by_sw_if_index;
+
+ /* Proxy arp vector */
+ ethernet_proxy_arp_t *proxy_arps;
+} ethernet_arp_main_t;
+
+static ethernet_arp_main_t ethernet_arp_main;
+
+typedef struct
+{
+ u32 sw_if_index;
+ ethernet_arp_ip4_over_ethernet_address_t a;
+ int is_static;
+ int flags;
+#define ETHERNET_ARP_ARGS_REMOVE (1<<0)
+#define ETHERNET_ARP_ARGS_FLUSH (1<<1)
+#define ETHERNET_ARP_ARGS_POPULATE (1<<2)
+} vnet_arp_set_ip4_over_ethernet_rpc_args_t;
+
+static void
+set_ip4_over_ethernet_rpc_callback (vnet_arp_set_ip4_over_ethernet_rpc_args_t
+ * a);
+
+static u8 *
+format_ethernet_arp_hardware_type (u8 * s, va_list * va)
+{
+ ethernet_arp_hardware_type_t h = va_arg (*va, ethernet_arp_hardware_type_t);
+ char *t = 0;
+ switch (h)
+ {
+#define _(n,f) case n: t = #f; break;
+ foreach_ethernet_arp_hardware_type;
+#undef _
+
+ default:
+ return format (s, "unknown 0x%x", h);
+ }
+
+ return format (s, "%s", t);
+}
+
+static u8 *
+format_ethernet_arp_opcode (u8 * s, va_list * va)
+{
+ ethernet_arp_opcode_t o = va_arg (*va, ethernet_arp_opcode_t);
+ char *t = 0;
+ switch (o)
+ {
+#define _(f) case ETHERNET_ARP_OPCODE_##f: t = #f; break;
+ foreach_ethernet_arp_opcode;
+#undef _
+
+ default:
+ return format (s, "unknown 0x%x", o);
+ }
+
+ return format (s, "%s", t);
+}
+
+static uword
+unformat_ethernet_arp_opcode_host_byte_order (unformat_input_t * input,
+ va_list * args)
+{
+ int *result = va_arg (*args, int *);
+ ethernet_arp_main_t *am = &ethernet_arp_main;
+ int x, i;
+
+ /* Numeric opcode. */
+ if (unformat (input, "0x%x", &x) || unformat (input, "%d", &x))
+ {
+ if (x >= (1 << 16))
+ return 0;
+ *result = x;
+ return 1;
+ }
+
+ /* Named type. */
+ if (unformat_user (input, unformat_vlib_number_by_name,
+ am->opcode_by_name, &i))
+ {
+ *result = i;
+ return 1;
+ }
+
+ return 0;
+}
+
+static uword
+unformat_ethernet_arp_opcode_net_byte_order (unformat_input_t * input,
+ va_list * args)
+{
+ int *result = va_arg (*args, int *);
+ if (!unformat_user
+ (input, unformat_ethernet_arp_opcode_host_byte_order, result))
+ return 0;
+
+ *result = clib_host_to_net_u16 ((u16) * result);
+ return 1;
+}
+
+static u8 *
+format_ethernet_arp_header (u8 * s, va_list * va)
+{
+ ethernet_arp_header_t *a = va_arg (*va, ethernet_arp_header_t *);
+ u32 max_header_bytes = va_arg (*va, u32);
+ uword indent;
+ u16 l2_type, l3_type;
+
+ if (max_header_bytes != 0 && sizeof (a[0]) > max_header_bytes)
+ return format (s, "ARP header truncated");
+
+ l2_type = clib_net_to_host_u16 (a->l2_type);
+ l3_type = clib_net_to_host_u16 (a->l3_type);
+
+ indent = format_get_indent (s);
+
+ s = format (s, "%U, type %U/%U, address size %d/%d",
+ format_ethernet_arp_opcode, clib_net_to_host_u16 (a->opcode),
+ format_ethernet_arp_hardware_type, l2_type,
+ format_ethernet_type, l3_type,
+ a->n_l2_address_bytes, a->n_l3_address_bytes);
+
+ if (l2_type == ETHERNET_ARP_HARDWARE_TYPE_ethernet
+ && l3_type == ETHERNET_TYPE_IP4)
+ {
+ s = format (s, "\n%U%U/%U -> %U/%U",
+ format_white_space, indent,
+ format_ethernet_address, a->ip4_over_ethernet[0].ethernet,
+ format_ip4_address, &a->ip4_over_ethernet[0].ip4,
+ format_ethernet_address, a->ip4_over_ethernet[1].ethernet,
+ format_ip4_address, &a->ip4_over_ethernet[1].ip4);
+ }
+ else
+ {
+ uword n2 = a->n_l2_address_bytes;
+ uword n3 = a->n_l3_address_bytes;
+ s = format (s, "\n%U%U/%U -> %U/%U",
+ format_white_space, indent,
+ format_hex_bytes, a->data + 0 * n2 + 0 * n3, n2,
+ format_hex_bytes, a->data + 1 * n2 + 0 * n3, n3,
+ format_hex_bytes, a->data + 1 * n2 + 1 * n3, n2,
+ format_hex_bytes, a->data + 2 * n2 + 1 * n3, n3);
+ }
+
+ return s;
+}
+
+u8 *
+format_ethernet_arp_ip4_entry (u8 * s, va_list * va)
+{
+ vnet_main_t *vnm = va_arg (*va, vnet_main_t *);
+ ethernet_arp_ip4_entry_t *e = va_arg (*va, ethernet_arp_ip4_entry_t *);
+ vnet_sw_interface_t *si;
+ u8 *flags = 0;
+
+ if (!e)
+ return format (s, "%=12s%=16s%=6s%=20s%=24s", "Time", "IP4",
+ "Flags", "Ethernet", "Interface");
+
+ si = vnet_get_sw_interface (vnm, e->sw_if_index);
+
+ if (e->flags & ETHERNET_ARP_IP4_ENTRY_FLAG_STATIC)
+ flags = format (flags, "S");
+
+ if (e->flags & ETHERNET_ARP_IP4_ENTRY_FLAG_DYNAMIC)
+ flags = format (flags, "D");
+
+ s = format (s, "%=12U%=16U%=6s%=20U%=24U",
+ format_vlib_cpu_time, vnm->vlib_main, e->cpu_time_last_updated,
+ format_ip4_address, &e->ip4_address,
+ flags ? (char *) flags : "",
+ format_ethernet_address, e->ethernet_address,
+ format_vnet_sw_interface_name, vnm, si);
+
+ vec_free (flags);
+ return s;
+}
+
+typedef struct
+{
+ u8 packet_data[64];
+} ethernet_arp_input_trace_t;
+
+static u8 *
+format_ethernet_arp_input_trace (u8 * s, va_list * va)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*va, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*va, vlib_node_t *);
+ ethernet_arp_input_trace_t *t = va_arg (*va, ethernet_arp_input_trace_t *);
+
+ s = format (s, "%U",
+ format_ethernet_arp_header,
+ t->packet_data, sizeof (t->packet_data));
+
+ return s;
+}
+
+static u8 *
+format_arp_term_input_trace (u8 * s, va_list * va)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*va, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*va, vlib_node_t *);
+ ethernet_arp_input_trace_t *t = va_arg (*va, ethernet_arp_input_trace_t *);
+
+ /* arp-term trace data saved is either arp or ip6/icmp6 packet:
+ - for arp, the 1st 16-bit field is hw type of value of 0x0001.
+ - for ip6, the first nibble has value of 6. */
+ s = format (s, "%U", t->packet_data[0] == 0 ?
+ format_ethernet_arp_header : format_ip6_header,
+ t->packet_data, sizeof (t->packet_data));
+
+ return s;
+}
+
+static void
+arp_nbr_probe (ip_adjacency_t * adj)
+{
+ vnet_main_t *vnm = vnet_get_main ();
+ ip4_main_t *im = &ip4_main;
+ ip_interface_address_t *ia;
+ ethernet_arp_header_t *h;
+ vnet_hw_interface_t *hi;
+ vnet_sw_interface_t *si;
+ ip4_address_t *src;
+ vlib_buffer_t *b;
+ vlib_main_t *vm;
+ u32 bi = 0;
+
+ vm = vlib_get_main ();
+
+ si = vnet_get_sw_interface (vnm, adj->rewrite_header.sw_if_index);
+
+ if (!(si->flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP))
+ {
+ return;
+ }
+
+ src =
+ ip4_interface_address_matching_destination (im,
+ &adj->sub_type.nbr.next_hop.
+ ip4,
+ adj->rewrite_header.
+ sw_if_index, &ia);
+ if (!src)
+ {
+ return;
+ }
+
+ h =
+ vlib_packet_template_get_packet (vm, &im->ip4_arp_request_packet_template,
+ &bi);
+
+ hi = vnet_get_sup_hw_interface (vnm, adj->rewrite_header.sw_if_index);
+
+ clib_memcpy (h->ip4_over_ethernet[0].ethernet,
+ hi->hw_address, sizeof (h->ip4_over_ethernet[0].ethernet));
+
+ h->ip4_over_ethernet[0].ip4 = src[0];
+ h->ip4_over_ethernet[1].ip4 = adj->sub_type.nbr.next_hop.ip4;
+
+ b = vlib_get_buffer (vm, bi);
+ vnet_buffer (b)->sw_if_index[VLIB_RX] =
+ vnet_buffer (b)->sw_if_index[VLIB_TX] = adj->rewrite_header.sw_if_index;
+
+ /* Add encapsulation string for software interface (e.g. ethernet header). */
+ vnet_rewrite_one_header (adj[0], h, sizeof (ethernet_header_t));
+ vlib_buffer_advance (b, -adj->rewrite_header.data_bytes);
+
+ {
+ vlib_frame_t *f = vlib_get_frame_to_node (vm, hi->output_node_index);
+ u32 *to_next = vlib_frame_vector_args (f);
+ to_next[0] = bi;
+ f->n_vectors = 1;
+ vlib_put_frame_to_node (vm, hi->output_node_index, f);
+ }
+}
+
+static void
+arp_mk_complete (adj_index_t ai, ethernet_arp_ip4_entry_t * e)
+{
+ adj_nbr_update_rewrite
+ (ai, ADJ_NBR_REWRITE_FLAG_COMPLETE,
+ ethernet_build_rewrite (vnet_get_main (),
+ e->sw_if_index,
+ adj_get_link_type (ai), e->ethernet_address));
+}
+
+static void
+arp_mk_incomplete (adj_index_t ai)
+{
+ ip_adjacency_t *adj = adj_get (ai);
+
+ adj_nbr_update_rewrite
+ (ai,
+ ADJ_NBR_REWRITE_FLAG_INCOMPLETE,
+ ethernet_build_rewrite (vnet_get_main (),
+ adj->rewrite_header.sw_if_index,
+ VNET_LINK_ARP,
+ VNET_REWRITE_FOR_SW_INTERFACE_ADDRESS_BROADCAST));
+}
+
+static ethernet_arp_ip4_entry_t *
+arp_entry_find (ethernet_arp_interface_t * eai, const ip4_address_t * addr)
+{
+ ethernet_arp_main_t *am = &ethernet_arp_main;
+ ethernet_arp_ip4_entry_t *e = NULL;
+ uword *p;
+
+ if (NULL != eai->arp_entries)
+ {
+ p = hash_get (eai->arp_entries, addr->as_u32);
+ if (!p)
+ return (NULL);
+
+ e = pool_elt_at_index (am->ip4_entry_pool, p[0]);
+ }
+
+ return (e);
+}
+
+static adj_walk_rc_t
+arp_mk_complete_walk (adj_index_t ai, void *ctx)
+{
+ ethernet_arp_ip4_entry_t *e = ctx;
+
+ arp_mk_complete (ai, e);
+
+ return (ADJ_WALK_RC_CONTINUE);
+}
+
+static adj_walk_rc_t
+arp_mk_incomplete_walk (adj_index_t ai, void *ctx)
+{
+ arp_mk_incomplete (ai);
+
+ return (ADJ_WALK_RC_CONTINUE);
+}
+
+void
+arp_update_adjacency (vnet_main_t * vnm, u32 sw_if_index, u32 ai)
+{
+ ethernet_arp_main_t *am = &ethernet_arp_main;
+ ethernet_arp_interface_t *arp_int;
+ ethernet_arp_ip4_entry_t *e;
+ ip_adjacency_t *adj;
+
+ adj = adj_get (ai);
+
+ vec_validate (am->ethernet_arp_by_sw_if_index, sw_if_index);
+ arp_int = &am->ethernet_arp_by_sw_if_index[sw_if_index];
+ e = arp_entry_find (arp_int, &adj->sub_type.nbr.next_hop.ip4);
+
+ if (NULL != e)
+ {
+ adj_nbr_walk_nh4 (sw_if_index,
+ &e->ip4_address, arp_mk_complete_walk, e);
+ }
+ else
+ {
+ /*
+ * no matching ARP entry.
+ * construct the rewire required to for an ARP packet, and stick
+ * that in the adj's pipe to smoke.
+ */
+ adj_nbr_update_rewrite (ai,
+ ADJ_NBR_REWRITE_FLAG_INCOMPLETE,
+ ethernet_build_rewrite (vnm,
+ sw_if_index,
+ VNET_LINK_ARP,
+ VNET_REWRITE_FOR_SW_INTERFACE_ADDRESS_BROADCAST));
+
+ /*
+ * since the FIB has added this adj for a route, it makes sense it may
+ * want to forward traffic sometime soon. Let's send a speculative ARP.
+ * just one. If we were to do periodically that wouldn't be bad either,
+ * but that's more code than i'm prepared to write at this time for
+ * relatively little reward.
+ */
+ arp_nbr_probe (adj);
+ }
+}
+
+int
+vnet_arp_set_ip4_over_ethernet_internal (vnet_main_t * vnm,
+ vnet_arp_set_ip4_over_ethernet_rpc_args_t
+ * args)
+{
+ ethernet_arp_ip4_entry_t *e = 0;
+ ethernet_arp_main_t *am = &ethernet_arp_main;
+ ethernet_arp_ip4_over_ethernet_address_t *a = &args->a;
+ vlib_main_t *vm = vlib_get_main ();
+ int make_new_arp_cache_entry = 1;
+ uword *p;
+ pending_resolution_t *pr, *mc;
+ ethernet_arp_interface_t *arp_int;
+ int is_static = args->is_static;
+ u32 sw_if_index = args->sw_if_index;
+
+ vec_validate (am->ethernet_arp_by_sw_if_index, sw_if_index);
+
+ arp_int = &am->ethernet_arp_by_sw_if_index[sw_if_index];
+
+ if (NULL != arp_int->arp_entries)
+ {
+ p = hash_get (arp_int->arp_entries, a->ip4.as_u32);
+ if (p)
+ {
+ e = pool_elt_at_index (am->ip4_entry_pool, p[0]);
+
+ /* Refuse to over-write static arp. */
+ if (!is_static && (e->flags & ETHERNET_ARP_IP4_ENTRY_FLAG_STATIC))
+ return -2;
+ make_new_arp_cache_entry = 0;
+ }
+ }
+
+ if (make_new_arp_cache_entry)
+ {
+ fib_prefix_t pfx = {
+ .fp_len = 32,
+ .fp_proto = FIB_PROTOCOL_IP4,
+ .fp_addr = {
+ .ip4 = a->ip4,
+ }
+ ,
+ };
+ u32 fib_index;
+
+ pool_get (am->ip4_entry_pool, e);
+
+ if (NULL == arp_int->arp_entries)
+ {
+ arp_int->arp_entries = hash_create (0, sizeof (u32));
+ }
+
+ hash_set (arp_int->arp_entries, a->ip4.as_u32, e - am->ip4_entry_pool);
+
+ e->sw_if_index = sw_if_index;
+ e->ip4_address = a->ip4;
+ clib_memcpy (e->ethernet_address,
+ a->ethernet, sizeof (e->ethernet_address));
+
+ fib_index = ip4_fib_table_get_index_for_sw_if_index (e->sw_if_index);
+ e->fib_entry_index =
+ fib_table_entry_update_one_path (fib_index,
+ &pfx,
+ FIB_SOURCE_ADJ,
+ FIB_ENTRY_FLAG_ATTACHED,
+ FIB_PROTOCOL_IP4,
+ &pfx.fp_addr,
+ e->sw_if_index,
+ ~0,
+ 1, NULL, FIB_ROUTE_PATH_FLAG_NONE);
+ }
+ else
+ {
+ /*
+ * prevent a DoS attack from the data-plane that
+ * spams us with no-op updates to the MAC address
+ */
+ if (0 == memcmp (e->ethernet_address,
+ a->ethernet, sizeof (e->ethernet_address)))
+ return -1;
+
+ /* Update time stamp and ethernet address. */
+ clib_memcpy (e->ethernet_address, a->ethernet,
+ sizeof (e->ethernet_address));
+ }
+
+ e->cpu_time_last_updated = clib_cpu_time_now ();
+ if (is_static)
+ e->flags |= ETHERNET_ARP_IP4_ENTRY_FLAG_STATIC;
+ else
+ e->flags |= ETHERNET_ARP_IP4_ENTRY_FLAG_DYNAMIC;
+
+ adj_nbr_walk_nh4 (sw_if_index, &e->ip4_address, arp_mk_complete_walk, e);
+
+ /* Customer(s) waiting for this address to be resolved? */
+ p = hash_get (am->pending_resolutions_by_address, a->ip4.as_u32);
+ if (p)
+ {
+ u32 next_index;
+ next_index = p[0];
+
+ while (next_index != (u32) ~ 0)
+ {
+ pr = pool_elt_at_index (am->pending_resolutions, next_index);
+ vlib_process_signal_event (vm, pr->node_index,
+ pr->type_opaque, pr->data);
+ next_index = pr->next_index;
+ pool_put (am->pending_resolutions, pr);
+ }
+
+ hash_unset (am->pending_resolutions_by_address, a->ip4.as_u32);
+ }
+
+ /* Customer(s) requesting ARP event for this address? */
+ p = hash_get (am->mac_changes_by_address, a->ip4.as_u32);
+ if (p)
+ {
+ u32 next_index;
+ next_index = p[0];
+
+ while (next_index != (u32) ~ 0)
+ {
+ int (*fp) (u32, u8 *, u32, u32);
+ int rv = 1;
+ mc = pool_elt_at_index (am->mac_changes, next_index);
+ fp = mc->data_callback;
+
+ /* Call the user's data callback, return 1 to suppress dup events */
+ if (fp)
+ rv = (*fp) (mc->data, a->ethernet, sw_if_index, 0);
+
+ /*
+ * Signal the resolver process, as long as the user
+ * says they want to be notified
+ */
+ if (rv == 0)
+ vlib_process_signal_event (vm, mc->node_index,
+ mc->type_opaque, mc->data);
+ next_index = mc->next_index;
+ }
+ }
+
+ return 0;
+}
+
+void
+vnet_register_ip4_arp_resolution_event (vnet_main_t * vnm,
+ void *address_arg,
+ uword node_index,
+ uword type_opaque, uword data)
+{
+ ethernet_arp_main_t *am = &ethernet_arp_main;
+ ip4_address_t *address = address_arg;
+ uword *p;
+ pending_resolution_t *pr;
+
+ pool_get (am->pending_resolutions, pr);
+
+ pr->next_index = ~0;
+ pr->node_index = node_index;
+ pr->type_opaque = type_opaque;
+ pr->data = data;
+ pr->data_callback = 0;
+
+ p = hash_get (am->pending_resolutions_by_address, address->as_u32);
+ if (p)
+ {
+ /* Insert new resolution at the head of the list */
+ pr->next_index = p[0];
+ hash_unset (am->pending_resolutions_by_address, address->as_u32);
+ }
+
+ hash_set (am->pending_resolutions_by_address, address->as_u32,
+ pr - am->pending_resolutions);
+}
+
+int
+vnet_add_del_ip4_arp_change_event (vnet_main_t * vnm,
+ void *data_callback,
+ u32 pid,
+ void *address_arg,
+ uword node_index,
+ uword type_opaque, uword data, int is_add)
+{
+ ethernet_arp_main_t *am = &ethernet_arp_main;
+ ip4_address_t *address = address_arg;
+ uword *p;
+ pending_resolution_t *mc;
+ void (*fp) (u32, u8 *) = data_callback;
+
+ if (is_add)
+ {
+ pool_get (am->mac_changes, mc);
+
+ mc->next_index = ~0;
+ mc->node_index = node_index;
+ mc->type_opaque = type_opaque;
+ mc->data = data;
+ mc->data_callback = data_callback;
+ mc->pid = pid;
+
+ p = hash_get (am->mac_changes_by_address, address->as_u32);
+ if (p)
+ {
+ /* Insert new resolution at the head of the list */
+ mc->next_index = p[0];
+ hash_unset (am->mac_changes_by_address, address->as_u32);
+ }
+
+ hash_set (am->mac_changes_by_address, address->as_u32,
+ mc - am->mac_changes);
+ return 0;
+ }
+ else
+ {
+ u32 index;
+ pending_resolution_t *mc_last = 0;
+
+ p = hash_get (am->mac_changes_by_address, address->as_u32);
+ if (p == 0)
+ return VNET_API_ERROR_NO_SUCH_ENTRY;
+
+ index = p[0];
+
+ while (index != (u32) ~ 0)
+ {
+ mc = pool_elt_at_index (am->mac_changes, index);
+ if (mc->node_index == node_index &&
+ mc->type_opaque == type_opaque && mc->pid == pid)
+ {
+ /* Clients may need to clean up pool entries, too */
+ if (fp)
+ (*fp) (mc->data, 0 /* no new mac addrs */ );
+ if (index == p[0])
+ {
+ hash_unset (am->mac_changes_by_address, address->as_u32);
+ if (mc->next_index != ~0)
+ hash_set (am->mac_changes_by_address, address->as_u32,
+ mc->next_index);
+ pool_put (am->mac_changes, mc);
+ return 0;
+ }
+ else
+ {
+ ASSERT (mc_last);
+ mc_last->next_index = mc->next_index;
+ pool_put (am->mac_changes, mc);
+ return 0;
+ }
+ }
+ mc_last = mc;
+ index = mc->next_index;
+ }
+
+ return VNET_API_ERROR_NO_SUCH_ENTRY;
+ }
+}
+
+/* Either we drop the packet or we send a reply to the sender. */
+typedef enum
+{
+ ARP_INPUT_NEXT_DROP,
+ ARP_INPUT_NEXT_REPLY_TX,
+ ARP_INPUT_N_NEXT,
+} arp_input_next_t;
+
+#define foreach_ethernet_arp_error \
+ _ (replies_sent, "ARP replies sent") \
+ _ (l2_type_not_ethernet, "L2 type not ethernet") \
+ _ (l3_type_not_ip4, "L3 type not IP4") \
+ _ (l3_src_address_not_local, "IP4 source address not local to subnet") \
+ _ (l3_dst_address_not_local, "IP4 destination address not local to subnet") \
+ _ (l3_src_address_is_local, "IP4 source address matches local interface") \
+ _ (l3_src_address_learned, "ARP request IP4 source address learned") \
+ _ (replies_received, "ARP replies received") \
+ _ (opcode_not_request, "ARP opcode not request") \
+ _ (proxy_arp_replies_sent, "Proxy ARP replies sent") \
+ _ (l2_address_mismatch, "ARP hw addr does not match L2 frame src addr") \
+ _ (missing_interface_address, "ARP missing interface address") \
+ _ (gratuitous_arp, "ARP probe or announcement dropped") \
+ _ (interface_no_table, "Interface is not mapped to an IP table") \
+
+typedef enum
+{
+#define _(sym,string) ETHERNET_ARP_ERROR_##sym,
+ foreach_ethernet_arp_error
+#undef _
+ ETHERNET_ARP_N_ERROR,
+} ethernet_arp_input_error_t;
+
+
+static void
+unset_random_arp_entry (void)
+{
+ ethernet_arp_main_t *am = &ethernet_arp_main;
+ ethernet_arp_ip4_entry_t *e;
+ vnet_main_t *vnm = vnet_get_main ();
+ ethernet_arp_ip4_over_ethernet_address_t delme;
+ u32 index;
+
+ index = pool_next_index (am->ip4_entry_pool, am->arp_delete_rotor);
+ am->arp_delete_rotor = index;
+
+ /* Try again from elt 0, could happen if an intfc goes down */
+ if (index == ~0)
+ {
+ index = pool_next_index (am->ip4_entry_pool, am->arp_delete_rotor);
+ am->arp_delete_rotor = index;
+ }
+
+ /* Nothing left in the pool */
+ if (index == ~0)
+ return;
+
+ e = pool_elt_at_index (am->ip4_entry_pool, index);
+
+ clib_memcpy (&delme.ethernet, e->ethernet_address, 6);
+ delme.ip4.as_u32 = e->ip4_address.as_u32;
+
+ vnet_arp_unset_ip4_over_ethernet (vnm, e->sw_if_index, &delme);
+}
+
+static int
+arp_unnumbered (vlib_buffer_t * p0,
+ u32 pi0, ethernet_header_t * eth0, u32 sw_if_index)
+{
+ vlib_main_t *vm = vlib_get_main ();
+ vnet_main_t *vnm = vnet_get_main ();
+ vnet_interface_main_t *vim = &vnm->interface_main;
+ vnet_sw_interface_t *si;
+ vnet_hw_interface_t *hi;
+ u32 unnum_src_sw_if_index;
+ u32 *broadcast_swifs = 0;
+ u32 *buffers = 0;
+ u32 n_alloc = 0;
+ vlib_buffer_t *b0;
+ int i;
+ u8 dst_mac_address[6];
+ i16 header_size;
+ ethernet_arp_header_t *arp0;
+
+ /* Save the dst mac address */
+ clib_memcpy (dst_mac_address, eth0->dst_address, sizeof (dst_mac_address));
+
+ /* Figure out which sw_if_index supplied the address */
+ unnum_src_sw_if_index = sw_if_index;
+
+ /* Track down all users of the unnumbered source */
+ /* *INDENT-OFF* */
+ pool_foreach (si, vim->sw_interfaces,
+ ({
+ if (si->flags & VNET_SW_INTERFACE_FLAG_UNNUMBERED &&
+ (si->unnumbered_sw_if_index == unnum_src_sw_if_index))
+ {
+ vec_add1 (broadcast_swifs, si->sw_if_index);
+ }
+ }));
+ /* *INDENT-ON* */
+
+ /* If there are no interfaces un-unmbered to this interface,
+ we are done here. */
+ if (0 == vec_len (broadcast_swifs))
+ return 0;
+
+ /* Allocate buffering if we need it */
+ if (vec_len (broadcast_swifs) > 1)
+ {
+ vec_validate (buffers, vec_len (broadcast_swifs) - 2);
+ n_alloc = vlib_buffer_alloc (vm, buffers, vec_len (buffers));
+ _vec_len (buffers) = n_alloc;
+ for (i = 0; i < n_alloc; i++)
+ {
+ b0 = vlib_get_buffer (vm, buffers[i]);
+
+ /* xerox (partially built) ARP pkt */
+ clib_memcpy (b0->data, p0->data,
+ p0->current_length + p0->current_data);
+ b0->current_data = p0->current_data;
+ b0->current_length = p0->current_length;
+ vnet_buffer (b0)->sw_if_index[VLIB_RX] =
+ vnet_buffer (p0)->sw_if_index[VLIB_RX];
+ }
+ }
+
+ vec_insert (buffers, 1, 0);
+ buffers[0] = pi0;
+
+ for (i = 0; i < vec_len (buffers); i++)
+ {
+ b0 = vlib_get_buffer (vm, buffers[i]);
+ arp0 = vlib_buffer_get_current (b0);
+
+ hi = vnet_get_sup_hw_interface (vnm, broadcast_swifs[i]);
+ si = vnet_get_sw_interface (vnm, broadcast_swifs[i]);
+
+ /* For decoration, most likely */
+ vnet_buffer (b0)->sw_if_index[VLIB_TX] = hi->sw_if_index;
+
+ /* Fix ARP pkt src address */
+ clib_memcpy (arp0->ip4_over_ethernet[0].ethernet, hi->hw_address, 6);
+
+ /* Build L2 encaps for this swif */
+ header_size = sizeof (ethernet_header_t);
+ if (si->sub.eth.flags.one_tag)
+ header_size += 4;
+ else if (si->sub.eth.flags.two_tags)
+ header_size += 8;
+
+ vlib_buffer_advance (b0, -header_size);
+ eth0 = vlib_buffer_get_current (b0);
+
+ if (si->sub.eth.flags.one_tag)
+ {
+ ethernet_vlan_header_t *outer = (void *) (eth0 + 1);
+
+ eth0->type = si->sub.eth.flags.dot1ad ?
+ clib_host_to_net_u16 (ETHERNET_TYPE_DOT1AD) :
+ clib_host_to_net_u16 (ETHERNET_TYPE_VLAN);
+ outer->priority_cfi_and_id =
+ clib_host_to_net_u16 (si->sub.eth.outer_vlan_id);
+ outer->type = clib_host_to_net_u16 (ETHERNET_TYPE_ARP);
+
+ }
+ else if (si->sub.eth.flags.two_tags)
+ {
+ ethernet_vlan_header_t *outer = (void *) (eth0 + 1);
+ ethernet_vlan_header_t *inner = (void *) (outer + 1);
+
+ eth0->type = si->sub.eth.flags.dot1ad ?
+ clib_host_to_net_u16 (ETHERNET_TYPE_DOT1AD) :
+ clib_host_to_net_u16 (ETHERNET_TYPE_VLAN);
+ outer->priority_cfi_and_id =
+ clib_host_to_net_u16 (si->sub.eth.outer_vlan_id);
+ outer->type = clib_host_to_net_u16 (ETHERNET_TYPE_VLAN);
+ inner->priority_cfi_and_id =
+ clib_host_to_net_u16 (si->sub.eth.inner_vlan_id);
+ inner->type = clib_host_to_net_u16 (ETHERNET_TYPE_ARP);
+
+ }
+ else
+ {
+ eth0->type = clib_host_to_net_u16 (ETHERNET_TYPE_ARP);
+ }
+
+ /* Restore the original dst address, set src address */
+ clib_memcpy (eth0->dst_address, dst_mac_address,
+ sizeof (eth0->dst_address));
+ clib_memcpy (eth0->src_address, hi->hw_address,
+ sizeof (eth0->src_address));
+
+ /* Transmit replicas */
+ if (i > 0)
+ {
+ vlib_frame_t *f =
+ vlib_get_frame_to_node (vm, hi->output_node_index);
+ u32 *to_next = vlib_frame_vector_args (f);
+ to_next[0] = buffers[i];
+ f->n_vectors = 1;
+ vlib_put_frame_to_node (vm, hi->output_node_index, f);
+ }
+ }
+
+ /* The regular path outputs the original pkt.. */
+ vnet_buffer (p0)->sw_if_index[VLIB_TX] = broadcast_swifs[0];
+
+ vec_free (broadcast_swifs);
+ vec_free (buffers);
+
+ return !0;
+}
+
+static uword
+arp_input (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ ethernet_arp_main_t *am = &ethernet_arp_main;
+ vnet_main_t *vnm = vnet_get_main ();
+ ip4_main_t *im4 = &ip4_main;
+ u32 n_left_from, next_index, *from, *to_next;
+ u32 n_replies_sent = 0, n_proxy_arp_replies_sent = 0;
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+
+ if (node->flags & VLIB_NODE_FLAG_TRACE)
+ vlib_trace_frame_buffers_only (vm, node, from, frame->n_vectors,
+ /* stride */ 1,
+ sizeof (ethernet_arp_input_trace_t));
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ vlib_buffer_t *p0;
+ vnet_hw_interface_t *hw_if0;
+ ethernet_arp_header_t *arp0;
+ ethernet_header_t *eth0;
+ ip_adjacency_t *adj0;
+ ip4_address_t *if_addr0, proxy_src;
+ u32 pi0, error0, next0, sw_if_index0, conn_sw_if_index0, fib_index0;
+ u8 is_request0, dst_is_local0, is_unnum0;
+ ethernet_proxy_arp_t *pa;
+ fib_node_index_t dst_fei, src_fei;
+ fib_prefix_t pfx0;
+ fib_entry_flag_t src_flags, dst_flags;
+
+ pi0 = from[0];
+ to_next[0] = pi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+ pa = 0;
+
+ p0 = vlib_get_buffer (vm, pi0);
+ arp0 = vlib_buffer_get_current (p0);
+
+ is_request0 = arp0->opcode
+ == clib_host_to_net_u16 (ETHERNET_ARP_OPCODE_request);
+
+ error0 = ETHERNET_ARP_ERROR_replies_sent;
+
+ error0 =
+ (arp0->l2_type !=
+ clib_net_to_host_u16 (ETHERNET_ARP_HARDWARE_TYPE_ethernet) ?
+ ETHERNET_ARP_ERROR_l2_type_not_ethernet : error0);
+ error0 =
+ (arp0->l3_type !=
+ clib_net_to_host_u16 (ETHERNET_TYPE_IP4) ?
+ ETHERNET_ARP_ERROR_l3_type_not_ip4 : error0);
+
+ sw_if_index0 = vnet_buffer (p0)->sw_if_index[VLIB_RX];
+
+ if (error0)
+ goto drop2;
+
+ /* Check that IP address is local and matches incoming interface. */
+ fib_index0 = ip4_fib_table_get_index_for_sw_if_index (sw_if_index0);
+ if (~0 == fib_index0)
+ {
+ error0 = ETHERNET_ARP_ERROR_interface_no_table;
+ goto drop2;
+
+ }
+ dst_fei = ip4_fib_table_lookup (ip4_fib_get (fib_index0),
+ &arp0->ip4_over_ethernet[1].ip4,
+ 32);
+ dst_flags = fib_entry_get_flags_for_source (dst_fei,
+ FIB_SOURCE_INTERFACE);
+
+ conn_sw_if_index0 =
+ fib_entry_get_resolving_interface_for_source (dst_fei,
+ FIB_SOURCE_INTERFACE);
+
+ if (!(FIB_ENTRY_FLAG_CONNECTED & dst_flags))
+ {
+ error0 = ETHERNET_ARP_ERROR_l3_dst_address_not_local;
+ goto drop1;
+ }
+
+ /* Honor unnumbered interface, if any */
+ is_unnum0 = sw_if_index0 != conn_sw_if_index0;
+
+ /* Source must also be local to subnet of matching interface address. */
+ src_fei = ip4_fib_table_lookup (ip4_fib_get (fib_index0),
+ &arp0->ip4_over_ethernet[0].ip4,
+ 32);
+ src_flags = fib_entry_get_flags (src_fei);
+
+ if (!((FIB_ENTRY_FLAG_ATTACHED & src_flags) ||
+ (FIB_ENTRY_FLAG_CONNECTED & src_flags)) ||
+ sw_if_index0 != fib_entry_get_resolving_interface (src_fei))
+ {
+ error0 = ETHERNET_ARP_ERROR_l3_src_address_not_local;
+ goto drop2;
+ }
+
+ /* Reject requests/replies with our local interface address. */
+ if (FIB_ENTRY_FLAG_LOCAL & src_flags)
+ {
+ error0 = ETHERNET_ARP_ERROR_l3_src_address_is_local;
+ goto drop2;
+ }
+
+ dst_is_local0 = (FIB_ENTRY_FLAG_LOCAL & dst_flags);
+ fib_entry_get_prefix (dst_fei, &pfx0);
+ if_addr0 = &pfx0.fp_addr.ip4;
+
+ /* Fill in ethernet header. */
+ eth0 = ethernet_buffer_get_header (p0);
+
+ /* Trash ARP packets whose ARP-level source addresses do not
+ match their L2-frame-level source addresses */
+ if (memcmp (eth0->src_address, arp0->ip4_over_ethernet[0].ethernet,
+ sizeof (eth0->src_address)))
+ {
+ error0 = ETHERNET_ARP_ERROR_l2_address_mismatch;
+ goto drop2;
+ }
+
+ /* Learn or update sender's mapping only for requests or unicasts
+ that don't match local interface address. */
+ if (ethernet_address_cast (eth0->dst_address) ==
+ ETHERNET_ADDRESS_UNICAST || is_request0)
+ {
+ if (am->limit_arp_cache_size &&
+ pool_elts (am->ip4_entry_pool) >= am->limit_arp_cache_size)
+ unset_random_arp_entry ();
+
+ vnet_arp_set_ip4_over_ethernet (vnm, sw_if_index0,
+ &arp0->ip4_over_ethernet[0],
+ 0 /* is_static */ );
+ error0 = ETHERNET_ARP_ERROR_l3_src_address_learned;
+ }
+
+ /* Only send a reply for requests sent which match a local interface. */
+ if (!(is_request0 && dst_is_local0))
+ {
+ error0 =
+ (arp0->opcode ==
+ clib_host_to_net_u16 (ETHERNET_ARP_OPCODE_reply) ?
+ ETHERNET_ARP_ERROR_replies_received : error0);
+ goto drop1;
+ }
+
+ /* Send a reply. */
+ send_reply:
+ vnet_buffer (p0)->sw_if_index[VLIB_TX] = sw_if_index0;
+ hw_if0 = vnet_get_sup_hw_interface (vnm, sw_if_index0);
+
+ /* Send reply back through input interface */
+ vnet_buffer (p0)->sw_if_index[VLIB_TX] = sw_if_index0;
+ next0 = ARP_INPUT_NEXT_REPLY_TX;
+
+ arp0->opcode = clib_host_to_net_u16 (ETHERNET_ARP_OPCODE_reply);
+
+ arp0->ip4_over_ethernet[1] = arp0->ip4_over_ethernet[0];
+
+ clib_memcpy (arp0->ip4_over_ethernet[0].ethernet,
+ hw_if0->hw_address, 6);
+ clib_mem_unaligned (&arp0->ip4_over_ethernet[0].ip4.data_u32, u32) =
+ if_addr0->data_u32;
+
+ /* Hardware must be ethernet-like. */
+ ASSERT (vec_len (hw_if0->hw_address) == 6);
+
+ clib_memcpy (eth0->dst_address, eth0->src_address, 6);
+ clib_memcpy (eth0->src_address, hw_if0->hw_address, 6);
+
+ /* Figure out how much to rewind current data from adjacency. */
+ /* get the adj from the destination's covering connected */
+ if (NULL == pa)
+ {
+ adj0 =
+ adj_get (fib_entry_get_adj_for_source
+ (ip4_fib_table_lookup
+ (ip4_fib_get (fib_index0),
+ &arp0->ip4_over_ethernet[1].ip4, 31),
+ FIB_SOURCE_INTERFACE));
+ if (adj0->lookup_next_index != IP_LOOKUP_NEXT_GLEAN)
+ {
+ error0 = ETHERNET_ARP_ERROR_missing_interface_address;
+ goto drop2;
+ }
+ if (is_unnum0)
+ {
+ if (!arp_unnumbered (p0, pi0, eth0, conn_sw_if_index0))
+ goto drop2;
+ }
+ else
+ vlib_buffer_advance (p0, -adj0->rewrite_header.data_bytes);
+ }
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
+ n_left_to_next, pi0, next0);
+
+ n_replies_sent += 1;
+ continue;
+
+ drop1:
+ if (0 == arp0->ip4_over_ethernet[0].ip4.as_u32 ||
+ (arp0->ip4_over_ethernet[0].ip4.as_u32 ==
+ arp0->ip4_over_ethernet[1].ip4.as_u32))
+ {
+ error0 = ETHERNET_ARP_ERROR_gratuitous_arp;
+ goto drop2;
+ }
+ /* See if proxy arp is configured for the address */
+ if (is_request0)
+ {
+ vnet_sw_interface_t *si;
+ u32 this_addr = clib_net_to_host_u32
+ (arp0->ip4_over_ethernet[1].ip4.as_u32);
+ u32 fib_index0;
+
+ si = vnet_get_sw_interface (vnm, sw_if_index0);
+
+ if (!(si->flags & VNET_SW_INTERFACE_FLAG_PROXY_ARP))
+ goto drop2;
+
+ fib_index0 = vec_elt (im4->fib_index_by_sw_if_index,
+ sw_if_index0);
+
+ vec_foreach (pa, am->proxy_arps)
+ {
+ u32 lo_addr = clib_net_to_host_u32 (pa->lo_addr);
+ u32 hi_addr = clib_net_to_host_u32 (pa->hi_addr);
+
+ /* an ARP request hit in the proxy-arp table? */
+ if ((this_addr >= lo_addr && this_addr <= hi_addr) &&
+ (fib_index0 == pa->fib_index))
+ {
+ eth0 = ethernet_buffer_get_header (p0);
+ proxy_src.as_u32 =
+ arp0->ip4_over_ethernet[1].ip4.data_u32;
+
+ /*
+ * Rewind buffer, direct code above not to
+ * think too hard about it.
+ */
+ if_addr0 = &proxy_src;
+ is_unnum0 = 0;
+ i32 ethernet_start =
+ vnet_buffer (p0)->ethernet.start_of_ethernet_header;
+ i32 rewind = p0->current_data - ethernet_start;
+ vlib_buffer_advance (p0, -rewind);
+ n_proxy_arp_replies_sent++;
+ goto send_reply;
+ }
+ }
+ }
+
+ drop2:
+
+ next0 = ARP_INPUT_NEXT_DROP;
+ p0->error = node->errors[error0];
+
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
+ n_left_to_next, pi0, next0);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ vlib_error_count (vm, node->node_index,
+ ETHERNET_ARP_ERROR_replies_sent,
+ n_replies_sent - n_proxy_arp_replies_sent);
+
+ vlib_error_count (vm, node->node_index,
+ ETHERNET_ARP_ERROR_proxy_arp_replies_sent,
+ n_proxy_arp_replies_sent);
+ return frame->n_vectors;
+}
+
+static char *ethernet_arp_error_strings[] = {
+#define _(sym,string) string,
+ foreach_ethernet_arp_error
+#undef _
+};
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (arp_input_node, static) =
+{
+ .function = arp_input,
+ .name = "arp-input",
+ .vector_size = sizeof (u32),
+ .n_errors = ETHERNET_ARP_N_ERROR,
+ .error_strings = ethernet_arp_error_strings,
+ .n_next_nodes = ARP_INPUT_N_NEXT,
+ .next_nodes = {
+ [ARP_INPUT_NEXT_DROP] = "error-drop",
+ [ARP_INPUT_NEXT_REPLY_TX] = "interface-output",
+ },
+ .format_buffer = format_ethernet_arp_header,
+ .format_trace = format_ethernet_arp_input_trace,
+};
+/* *INDENT-ON* */
+
+static int
+ip4_arp_entry_sort (void *a1, void *a2)
+{
+ ethernet_arp_ip4_entry_t *e1 = a1;
+ ethernet_arp_ip4_entry_t *e2 = a2;
+
+ int cmp;
+ vnet_main_t *vnm = vnet_get_main ();
+
+ cmp = vnet_sw_interface_compare (vnm, e1->sw_if_index, e2->sw_if_index);
+ if (!cmp)
+ cmp = ip4_address_compare (&e1->ip4_address, &e2->ip4_address);
+ return cmp;
+}
+
+ethernet_arp_ip4_entry_t *
+ip4_neighbor_entries (u32 sw_if_index)
+{
+ ethernet_arp_main_t *am = &ethernet_arp_main;
+ ethernet_arp_ip4_entry_t *n, *ns = 0;
+
+ /* *INDENT-OFF* */
+ pool_foreach (n, am->ip4_entry_pool, ({
+ if (sw_if_index != ~0 && n->sw_if_index != sw_if_index)
+ continue;
+ vec_add1 (ns, n[0]);
+ }));
+ /* *INDENT-ON* */
+
+ if (ns)
+ vec_sort_with_function (ns, ip4_arp_entry_sort);
+ return ns;
+}
+
+static clib_error_t *
+show_ip4_arp (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ vnet_main_t *vnm = vnet_get_main ();
+ ethernet_arp_main_t *am = &ethernet_arp_main;
+ ethernet_arp_ip4_entry_t *e, *es;
+ ethernet_proxy_arp_t *pa;
+ clib_error_t *error = 0;
+ u32 sw_if_index;
+
+ /* Filter entries by interface if given. */
+ sw_if_index = ~0;
+ (void) unformat_user (input, unformat_vnet_sw_interface, vnm, &sw_if_index);
+
+ es = ip4_neighbor_entries (sw_if_index);
+ if (es)
+ {
+ vlib_cli_output (vm, "%U", format_ethernet_arp_ip4_entry, vnm, 0);
+ vec_foreach (e, es)
+ {
+ vlib_cli_output (vm, "%U", format_ethernet_arp_ip4_entry, vnm, e);
+ }
+ vec_free (es);
+ }
+
+ if (vec_len (am->proxy_arps))
+ {
+ vlib_cli_output (vm, "Proxy arps enabled for:");
+ vec_foreach (pa, am->proxy_arps)
+ {
+ vlib_cli_output (vm, "Fib_index %d %U - %U ",
+ pa->fib_index,
+ format_ip4_address, &pa->lo_addr,
+ format_ip4_address, &pa->hi_addr);
+ }
+ }
+
+ return error;
+}
+
+/*?
+ * Display all the IPv4 ARP entries.
+ *
+ * @cliexpar
+ * Example of how to display the IPv4 ARP table:
+ * @cliexstart{show ip arp}
+ * Time FIB IP4 Flags Ethernet Interface
+ * 346.3028 0 6.1.1.3 de:ad:be:ef:ba:be GigabitEthernet2/0/0
+ * 3077.4271 0 6.1.1.4 S de:ad:be:ef:ff:ff GigabitEthernet2/0/0
+ * 2998.6409 1 6.2.2.3 de:ad:be:ef:00:01 GigabitEthernet2/0/0
+ * Proxy arps enabled for:
+ * Fib_index 0 6.0.0.1 - 6.0.0.11
+ * @cliexend
+ ?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (show_ip4_arp_command, static) = {
+ .path = "show ip arp",
+ .function = show_ip4_arp,
+ .short_help = "show ip arp",
+};
+/* *INDENT-ON* */
+
+typedef struct
+{
+ pg_edit_t l2_type, l3_type;
+ pg_edit_t n_l2_address_bytes, n_l3_address_bytes;
+ pg_edit_t opcode;
+ struct
+ {
+ pg_edit_t ethernet;
+ pg_edit_t ip4;
+ } ip4_over_ethernet[2];
+} pg_ethernet_arp_header_t;
+
+static inline void
+pg_ethernet_arp_header_init (pg_ethernet_arp_header_t * p)
+{
+ /* Initialize fields that are not bit fields in the IP header. */
+#define _(f) pg_edit_init (&p->f, ethernet_arp_header_t, f);
+ _(l2_type);
+ _(l3_type);
+ _(n_l2_address_bytes);
+ _(n_l3_address_bytes);
+ _(opcode);
+ _(ip4_over_ethernet[0].ethernet);
+ _(ip4_over_ethernet[0].ip4);
+ _(ip4_over_ethernet[1].ethernet);
+ _(ip4_over_ethernet[1].ip4);
+#undef _
+}
+
+uword
+unformat_pg_arp_header (unformat_input_t * input, va_list * args)
+{
+ pg_stream_t *s = va_arg (*args, pg_stream_t *);
+ pg_ethernet_arp_header_t *p;
+ u32 group_index;
+
+ p = pg_create_edit_group (s, sizeof (p[0]), sizeof (ethernet_arp_header_t),
+ &group_index);
+ pg_ethernet_arp_header_init (p);
+
+ /* Defaults. */
+ pg_edit_set_fixed (&p->l2_type, ETHERNET_ARP_HARDWARE_TYPE_ethernet);
+ pg_edit_set_fixed (&p->l3_type, ETHERNET_TYPE_IP4);
+ pg_edit_set_fixed (&p->n_l2_address_bytes, 6);
+ pg_edit_set_fixed (&p->n_l3_address_bytes, 4);
+
+ if (!unformat (input, "%U: %U/%U -> %U/%U",
+ unformat_pg_edit,
+ unformat_ethernet_arp_opcode_net_byte_order, &p->opcode,
+ unformat_pg_edit,
+ unformat_ethernet_address, &p->ip4_over_ethernet[0].ethernet,
+ unformat_pg_edit,
+ unformat_ip4_address, &p->ip4_over_ethernet[0].ip4,
+ unformat_pg_edit,
+ unformat_ethernet_address, &p->ip4_over_ethernet[1].ethernet,
+ unformat_pg_edit,
+ unformat_ip4_address, &p->ip4_over_ethernet[1].ip4))
+ {
+ /* Free up any edits we may have added. */
+ pg_free_edit_group (s);
+ return 0;
+ }
+ return 1;
+}
+
+clib_error_t *
+ip4_set_arp_limit (u32 arp_limit)
+{
+ ethernet_arp_main_t *am = &ethernet_arp_main;
+
+ am->limit_arp_cache_size = arp_limit;
+ return 0;
+}
+
+/**
+ * @brief Control Plane hook to remove an ARP entry
+ */
+int
+vnet_arp_unset_ip4_over_ethernet (vnet_main_t * vnm,
+ u32 sw_if_index, void *a_arg)
+{
+ ethernet_arp_ip4_over_ethernet_address_t *a = a_arg;
+ vnet_arp_set_ip4_over_ethernet_rpc_args_t args;
+
+ args.sw_if_index = sw_if_index;
+ args.flags = ETHERNET_ARP_ARGS_REMOVE;
+ clib_memcpy (&args.a, a, sizeof (*a));
+
+ vl_api_rpc_call_main_thread (set_ip4_over_ethernet_rpc_callback,
+ (u8 *) & args, sizeof (args));
+ return 0;
+}
+
+/**
+ * @brief Internally generated event to flush the ARP cache on an
+ * interface state change event.
+ * A flush will remove dynamic ARP entries, and for statics remove the MAC
+ * address from the corresponding adjacencies.
+ */
+static int
+vnet_arp_flush_ip4_over_ethernet (vnet_main_t * vnm,
+ u32 sw_if_index, void *a_arg)
+{
+ ethernet_arp_ip4_over_ethernet_address_t *a = a_arg;
+ vnet_arp_set_ip4_over_ethernet_rpc_args_t args;
+
+ args.sw_if_index = sw_if_index;
+ args.flags = ETHERNET_ARP_ARGS_FLUSH;
+ clib_memcpy (&args.a, a, sizeof (*a));
+
+ vl_api_rpc_call_main_thread (set_ip4_over_ethernet_rpc_callback,
+ (u8 *) & args, sizeof (args));
+ return 0;
+}
+
+/**
+ * @brief Internally generated event to populate the ARP cache on an
+ * interface state change event.
+ * For static entries this will re-source the adjacencies.
+ *
+ * @param sw_if_index The interface on which the ARP entires are acted
+ */
+static int
+vnet_arp_populate_ip4_over_ethernet (vnet_main_t * vnm,
+ u32 sw_if_index, void *a_arg)
+{
+ ethernet_arp_ip4_over_ethernet_address_t *a = a_arg;
+ vnet_arp_set_ip4_over_ethernet_rpc_args_t args;
+
+ args.sw_if_index = sw_if_index;
+ args.flags = ETHERNET_ARP_ARGS_POPULATE;
+ clib_memcpy (&args.a, a, sizeof (*a));
+
+ vl_api_rpc_call_main_thread (set_ip4_over_ethernet_rpc_callback,
+ (u8 *) & args, sizeof (args));
+ return 0;
+}
+
+/*
+ * arp_add_del_interface_address
+ *
+ * callback when an interface address is added or deleted
+ */
+static void
+arp_add_del_interface_address (ip4_main_t * im,
+ uword opaque,
+ u32 sw_if_index,
+ ip4_address_t * address,
+ u32 address_length,
+ u32 if_address_index, u32 is_del)
+{
+ /*
+ * Flush the ARP cache of all entries covered by the address
+ * that is being removed.
+ */
+ ethernet_arp_main_t *am = &ethernet_arp_main;
+ ethernet_arp_ip4_entry_t *e;
+
+ if (vec_len (am->ethernet_arp_by_sw_if_index) <= sw_if_index)
+ return;
+
+ if (is_del)
+ {
+ ethernet_arp_interface_t *eai;
+ u32 i, *to_delete = 0;
+ hash_pair_t *pair;
+
+ eai = &am->ethernet_arp_by_sw_if_index[sw_if_index];
+
+ /* *INDENT-OFF* */
+ hash_foreach_pair (pair, eai->arp_entries,
+ ({
+ e = pool_elt_at_index(am->ip4_entry_pool,
+ pair->value[0]);
+ if (ip4_destination_matches_route (im, &e->ip4_address,
+ address, address_length))
+ {
+ vec_add1 (to_delete, e - am->ip4_entry_pool);
+ }
+ }));
+ /* *INDENT-ON* */
+
+ for (i = 0; i < vec_len (to_delete); i++)
+ {
+ ethernet_arp_ip4_over_ethernet_address_t delme;
+ e = pool_elt_at_index (am->ip4_entry_pool, to_delete[i]);
+
+ clib_memcpy (&delme.ethernet, e->ethernet_address, 6);
+ delme.ip4.as_u32 = e->ip4_address.as_u32;
+
+ vnet_arp_flush_ip4_over_ethernet (vnet_get_main (),
+ e->sw_if_index, &delme);
+ }
+
+ vec_free (to_delete);
+ }
+}
+
+static clib_error_t *
+ethernet_arp_init (vlib_main_t * vm)
+{
+ ethernet_arp_main_t *am = &ethernet_arp_main;
+ ip4_main_t *im = &ip4_main;
+ clib_error_t *error;
+ pg_node_t *pn;
+
+ if ((error = vlib_call_init_function (vm, ethernet_init)))
+ return error;
+
+ ethernet_register_input_type (vm, ETHERNET_TYPE_ARP, arp_input_node.index);
+
+ pn = pg_get_node (arp_input_node.index);
+ pn->unformat_edit = unformat_pg_arp_header;
+
+ am->opcode_by_name = hash_create_string (0, sizeof (uword));
+#define _(o) hash_set_mem (am->opcode_by_name, #o, ETHERNET_ARP_OPCODE_##o);
+ foreach_ethernet_arp_opcode;
+#undef _
+
+ /* $$$ configurable */
+ am->limit_arp_cache_size = 50000;
+
+ am->pending_resolutions_by_address = hash_create (0, sizeof (uword));
+ am->mac_changes_by_address = hash_create (0, sizeof (uword));
+
+ /* don't trace ARP error packets */
+ {
+ vlib_node_runtime_t *rt =
+ vlib_node_get_runtime (vm, arp_input_node.index);
+
+#define _(a,b) \
+ vnet_pcap_drop_trace_filter_add_del \
+ (rt->errors[ETHERNET_ARP_ERROR_##a], \
+ 1 /* is_add */);
+ foreach_ethernet_arp_error
+#undef _
+ }
+
+ ip4_add_del_interface_address_callback_t cb;
+ cb.function = arp_add_del_interface_address;
+ cb.function_opaque = 0;
+ vec_add1 (im->add_del_interface_address_callbacks, cb);
+
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (ethernet_arp_init);
+
+static void
+arp_entry_free (ethernet_arp_interface_t * eai, ethernet_arp_ip4_entry_t * e)
+{
+ ethernet_arp_main_t *am = &ethernet_arp_main;
+
+ fib_table_entry_delete_index (e->fib_entry_index, FIB_SOURCE_ADJ);
+ hash_unset (eai->arp_entries, e->ip4_address.as_u32);
+ pool_put (am->ip4_entry_pool, e);
+}
+
+static inline int
+vnet_arp_unset_ip4_over_ethernet_internal (vnet_main_t * vnm,
+ vnet_arp_set_ip4_over_ethernet_rpc_args_t
+ * args)
+{
+ ethernet_arp_main_t *am = &ethernet_arp_main;
+ ethernet_arp_ip4_entry_t *e;
+ ethernet_arp_interface_t *eai;
+
+ eai = &am->ethernet_arp_by_sw_if_index[args->sw_if_index];
+
+ e = arp_entry_find (eai, &args->a.ip4);
+
+ if (NULL != e)
+ {
+ arp_entry_free (eai, e);
+
+ adj_nbr_walk_nh4 (e->sw_if_index,
+ &e->ip4_address, arp_mk_incomplete_walk, NULL);
+ }
+
+ return 0;
+}
+
+static int
+vnet_arp_flush_ip4_over_ethernet_internal (vnet_main_t * vnm,
+ vnet_arp_set_ip4_over_ethernet_rpc_args_t
+ * args)
+{
+ ethernet_arp_main_t *am = &ethernet_arp_main;
+ ethernet_arp_ip4_entry_t *e;
+ ethernet_arp_interface_t *eai;
+
+ eai = &am->ethernet_arp_by_sw_if_index[args->sw_if_index];
+
+ e = arp_entry_find (eai, &args->a.ip4);
+
+ if (NULL != e)
+ {
+ adj_nbr_walk_nh4 (e->sw_if_index,
+ &e->ip4_address, arp_mk_incomplete_walk, e);
+
+ /*
+ * The difference between flush and unset, is that an unset
+ * means delete for static and dynamic entries. A flush
+ * means delete only for dynamic. Flushing is what the DP
+ * does in response to interface events. unset is only done
+ * by the control plane.
+ */
+ if (e->flags & ETHERNET_ARP_IP4_ENTRY_FLAG_DYNAMIC)
+ {
+ arp_entry_free (eai, e);
+ }
+ }
+ return (0);
+}
+
+static int
+vnet_arp_populate_ip4_over_ethernet_internal (vnet_main_t * vnm,
+ vnet_arp_set_ip4_over_ethernet_rpc_args_t
+ * args)
+{
+ ethernet_arp_main_t *am = &ethernet_arp_main;
+ ethernet_arp_ip4_entry_t *e;
+ ethernet_arp_interface_t *eai;
+
+ eai = &am->ethernet_arp_by_sw_if_index[args->sw_if_index];
+
+ e = arp_entry_find (eai, &args->a.ip4);
+
+ if (NULL != e)
+ {
+ adj_nbr_walk_nh4 (e->sw_if_index,
+ &e->ip4_address, arp_mk_complete_walk, e);
+ }
+ return (0);
+}
+
+static void
+set_ip4_over_ethernet_rpc_callback (vnet_arp_set_ip4_over_ethernet_rpc_args_t
+ * a)
+{
+ vnet_main_t *vm = vnet_get_main ();
+ ASSERT (os_get_cpu_number () == 0);
+
+ if (a->flags & ETHERNET_ARP_ARGS_REMOVE)
+ vnet_arp_unset_ip4_over_ethernet_internal (vm, a);
+ else if (a->flags & ETHERNET_ARP_ARGS_FLUSH)
+ vnet_arp_flush_ip4_over_ethernet_internal (vm, a);
+ else if (a->flags & ETHERNET_ARP_ARGS_POPULATE)
+ vnet_arp_populate_ip4_over_ethernet_internal (vm, a);
+ else
+ vnet_arp_set_ip4_over_ethernet_internal (vm, a);
+}
+
+/**
+ * @brief Invoked when the interface's admin state changes
+ */
+static clib_error_t *
+ethernet_arp_sw_interface_up_down (vnet_main_t * vnm,
+ u32 sw_if_index, u32 flags)
+{
+ ethernet_arp_main_t *am = &ethernet_arp_main;
+ ethernet_arp_ip4_entry_t *e;
+ u32 i, *to_delete = 0;
+
+ /* *INDENT-OFF* */
+ pool_foreach (e, am->ip4_entry_pool,
+ ({
+ if (e->sw_if_index == sw_if_index)
+ vec_add1 (to_delete,
+ e - am->ip4_entry_pool);
+ }));
+ /* *INDENT-ON* */
+
+ for (i = 0; i < vec_len (to_delete); i++)
+ {
+ ethernet_arp_ip4_over_ethernet_address_t delme;
+ e = pool_elt_at_index (am->ip4_entry_pool, to_delete[i]);
+
+ clib_memcpy (&delme.ethernet, e->ethernet_address, 6);
+ delme.ip4.as_u32 = e->ip4_address.as_u32;
+
+ if (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP)
+ {
+ vnet_arp_populate_ip4_over_ethernet (vnm, e->sw_if_index, &delme);
+ }
+ else
+ {
+ vnet_arp_flush_ip4_over_ethernet (vnm, e->sw_if_index, &delme);
+ }
+
+ }
+ vec_free (to_delete);
+
+ return 0;
+}
+
+VNET_SW_INTERFACE_ADMIN_UP_DOWN_FUNCTION (ethernet_arp_sw_interface_up_down);
+
+static void
+increment_ip4_and_mac_address (ethernet_arp_ip4_over_ethernet_address_t * a)
+{
+ u8 old;
+ int i;
+
+ for (i = 3; i >= 0; i--)
+ {
+ old = a->ip4.as_u8[i];
+ a->ip4.as_u8[i] += 1;
+ if (old < a->ip4.as_u8[i])
+ break;
+ }
+
+ for (i = 5; i >= 0; i--)
+ {
+ old = a->ethernet[i];
+ a->ethernet[i] += 1;
+ if (old < a->ethernet[i])
+ break;
+ }
+}
+
+int
+vnet_arp_set_ip4_over_ethernet (vnet_main_t * vnm,
+ u32 sw_if_index, void *a_arg, int is_static)
+{
+ ethernet_arp_ip4_over_ethernet_address_t *a = a_arg;
+ vnet_arp_set_ip4_over_ethernet_rpc_args_t args;
+
+ args.sw_if_index = sw_if_index;
+ args.is_static = is_static;
+ args.flags = 0;
+ clib_memcpy (&args.a, a, sizeof (*a));
+
+ vl_api_rpc_call_main_thread (set_ip4_over_ethernet_rpc_callback,
+ (u8 *) & args, sizeof (args));
+ return 0;
+}
+
+int
+vnet_proxy_arp_add_del (ip4_address_t * lo_addr,
+ ip4_address_t * hi_addr, u32 fib_index, int is_del)
+{
+ ethernet_arp_main_t *am = &ethernet_arp_main;
+ ethernet_proxy_arp_t *pa;
+ u32 found_at_index = ~0;
+
+ vec_foreach (pa, am->proxy_arps)
+ {
+ if (pa->lo_addr == lo_addr->as_u32
+ && pa->hi_addr == hi_addr->as_u32 && pa->fib_index == fib_index)
+ {
+ found_at_index = pa - am->proxy_arps;
+ break;
+ }
+ }
+
+ if (found_at_index != ~0)
+ {
+ /* Delete, otherwise it's already in the table */
+ if (is_del)
+ vec_delete (am->proxy_arps, 1, found_at_index);
+ return 0;
+ }
+ /* delete, no such entry */
+ if (is_del)
+ return VNET_API_ERROR_NO_SUCH_ENTRY;
+
+ /* add, not in table */
+ vec_add2 (am->proxy_arps, pa, 1);
+ pa->lo_addr = lo_addr->as_u32;
+ pa->hi_addr = hi_addr->as_u32;
+ pa->fib_index = fib_index;
+ return 0;
+}
+
+/*
+ * Remove any proxy arp entries asdociated with the
+ * specificed fib.
+ */
+int
+vnet_proxy_arp_fib_reset (u32 fib_id)
+{
+ ip4_main_t *im = &ip4_main;
+ ethernet_arp_main_t *am = &ethernet_arp_main;
+ ethernet_proxy_arp_t *pa;
+ u32 *entries_to_delete = 0;
+ u32 fib_index;
+ uword *p;
+ int i;
+
+ p = hash_get (im->fib_index_by_table_id, fib_id);
+ if (!p)
+ return VNET_API_ERROR_NO_SUCH_ENTRY;
+ fib_index = p[0];
+
+ vec_foreach (pa, am->proxy_arps)
+ {
+ if (pa->fib_index == fib_index)
+ {
+ vec_add1 (entries_to_delete, pa - am->proxy_arps);
+ }
+ }
+
+ for (i = 0; i < vec_len (entries_to_delete); i++)
+ {
+ vec_delete (am->proxy_arps, 1, entries_to_delete[i]);
+ }
+
+ vec_free (entries_to_delete);
+
+ return 0;
+}
+
+static clib_error_t *
+ip_arp_add_del_command_fn (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ vnet_main_t *vnm = vnet_get_main ();
+ u32 sw_if_index;
+ ethernet_arp_ip4_over_ethernet_address_t lo_addr, hi_addr, addr;
+ int addr_valid = 0;
+ int is_del = 0;
+ int count = 1;
+ u32 fib_index = 0;
+ u32 fib_id;
+ int is_static = 0;
+ int is_proxy = 0;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ /* set ip arp TenGigE1/1/0/1 1.2.3.4 aa:bb:... or aabb.ccdd... */
+ if (unformat (input, "%U %U %U",
+ unformat_vnet_sw_interface, vnm, &sw_if_index,
+ unformat_ip4_address, &addr.ip4,
+ unformat_ethernet_address, &addr.ethernet))
+ addr_valid = 1;
+
+ else if (unformat (input, "delete") || unformat (input, "del"))
+ is_del = 1;
+
+ else if (unformat (input, "static"))
+ is_static = 1;
+
+ else if (unformat (input, "count %d", &count))
+ ;
+
+ else if (unformat (input, "fib-id %d", &fib_id))
+ {
+ ip4_main_t *im = &ip4_main;
+ uword *p = hash_get (im->fib_index_by_table_id, fib_id);
+ if (!p)
+ return clib_error_return (0, "fib ID %d doesn't exist\n", fib_id);
+ fib_index = p[0];
+ }
+
+ else if (unformat (input, "proxy %U - %U",
+ unformat_ip4_address, &lo_addr.ip4,
+ unformat_ip4_address, &hi_addr.ip4))
+ is_proxy = 1;
+ else
+ break;
+ }
+
+ if (is_proxy)
+ {
+ (void) vnet_proxy_arp_add_del (&lo_addr.ip4, &hi_addr.ip4,
+ fib_index, is_del);
+ return 0;
+ }
+
+ if (addr_valid)
+ {
+ int i;
+
+ for (i = 0; i < count; i++)
+ {
+ if (is_del == 0)
+ {
+ uword event_type, *event_data = 0;
+
+ /* Park the debug CLI until the arp entry is installed */
+ vnet_register_ip4_arp_resolution_event
+ (vnm, &addr.ip4, vlib_current_process (vm),
+ 1 /* type */ , 0 /* data */ );
+
+ vnet_arp_set_ip4_over_ethernet
+ (vnm, sw_if_index, &addr, is_static);
+
+ vlib_process_wait_for_event (vm);
+ event_type = vlib_process_get_events (vm, &event_data);
+ vec_reset_length (event_data);
+ if (event_type != 1)
+ clib_warning ("event type %d unexpected", event_type);
+ }
+ else
+ vnet_arp_unset_ip4_over_ethernet (vnm, sw_if_index, &addr);
+
+ increment_ip4_and_mac_address (&addr);
+ }
+ }
+ else
+ {
+ return clib_error_return (0, "unknown input `%U'",
+ format_unformat_error, input);
+ }
+
+ return 0;
+}
+
+/* *INDENT-OFF* */
+/*?
+ * Add or delete IPv4 ARP cache entries.
+ *
+ * @note 'set ip arp' options (e.g. delete, static, 'fib-id <id>',
+ * 'count <number>', 'interface ip4_addr mac_addr') can be added in
+ * any order and combination.
+ *
+ * @cliexpar
+ * @parblock
+ * Add or delete IPv4 ARP cache entries as follows. MAC Address can be in
+ * either aa:bb:cc:dd:ee:ff format or aabb.ccdd.eeff format.
+ * @cliexcmd{set ip arp GigabitEthernet2/0/0 6.0.0.3 dead.beef.babe}
+ * @cliexcmd{set ip arp delete GigabitEthernet2/0/0 6.0.0.3 de:ad:be:ef:ba:be}
+ *
+ * To add or delete an IPv4 ARP cache entry to or from a specific fib
+ * table:
+ * @cliexcmd{set ip arp fib-id 1 GigabitEthernet2/0/0 6.0.0.3 dead.beef.babe}
+ * @cliexcmd{set ip arp fib-id 1 delete GigabitEthernet2/0/0 6.0.0.3 dead.beef.babe}
+ *
+ * Add or delete IPv4 static ARP cache entries as follows:
+ * @cliexcmd{set ip arp static GigabitEthernet2/0/0 6.0.0.3 dead.beef.babe}
+ * @cliexcmd{set ip arp static delete GigabitEthernet2/0/0 6.0.0.3 dead.beef.babe}
+ *
+ * For testing / debugging purposes, the 'set ip arp' command can add or
+ * delete multiple entries. Supply the 'count N' parameter:
+ * @cliexcmd{set ip arp count 10 GigabitEthernet2/0/0 6.0.0.3 dead.beef.babe}
+ * @endparblock
+ ?*/
+VLIB_CLI_COMMAND (ip_arp_add_del_command, static) = {
+ .path = "set ip arp",
+ .short_help =
+ "set ip arp [del] <intfc> <ip-address> <mac-address> [static] [count <count>] [fib-id <fib-id>] [proxy <lo-addr> - <hi-addr>]",
+ .function = ip_arp_add_del_command_fn,
+};
+/* *INDENT-ON* */
+
+static clib_error_t *
+set_int_proxy_arp_command_fn (vlib_main_t * vm,
+ unformat_input_t *
+ input, vlib_cli_command_t * cmd)
+{
+ vnet_main_t *vnm = vnet_get_main ();
+ u32 sw_if_index;
+ vnet_sw_interface_t *si;
+ int enable = 0;
+ int intfc_set = 0;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "%U", unformat_vnet_sw_interface,
+ vnm, &sw_if_index))
+ intfc_set = 1;
+ else if (unformat (input, "enable") || unformat (input, "on"))
+ enable = 1;
+ else if (unformat (input, "disable") || unformat (input, "off"))
+ enable = 0;
+ else
+ break;
+ }
+
+ if (intfc_set == 0)
+ return clib_error_return (0, "unknown input '%U'",
+ format_unformat_error, input);
+
+ si = vnet_get_sw_interface (vnm, sw_if_index);
+ ASSERT (si);
+ if (enable)
+ si->flags |= VNET_SW_INTERFACE_FLAG_PROXY_ARP;
+ else
+ si->flags &= ~VNET_SW_INTERFACE_FLAG_PROXY_ARP;
+
+ return 0;
+}
+
+/* *INDENT-OFF* */
+/*?
+ * Enable proxy-arp on an interface. The vpp stack will answer ARP
+ * requests for the indicated address range. Multiple proxy-arp
+ * ranges may be provisioned.
+ *
+ * @note Proxy ARP as a technology is infamous for blackholing traffic.
+ * Also, the underlying implementation has not been performance-tuned.
+ * Avoid creating an unnecessarily large set of ranges.
+ *
+ * @cliexpar
+ * To enable proxy arp on a range of addresses, use:
+ * @cliexcmd{set ip arp proxy 6.0.0.1 - 6.0.0.11}
+ * Append 'del' to delete a range of proxy ARP addresses:
+ * @cliexcmd{set ip arp proxy 6.0.0.1 - 6.0.0.11 del}
+ * You must then specifically enable proxy arp on individual interfaces:
+ * @cliexcmd{set interface proxy-arp GigabitEthernet0/8/0 enable}
+ * To disable proxy arp on an individual interface:
+ * @cliexcmd{set interface proxy-arp GigabitEthernet0/8/0 disable}
+ ?*/
+VLIB_CLI_COMMAND (set_int_proxy_enable_command, static) = {
+ .path = "set interface proxy-arp",
+ .short_help =
+ "set interface proxy-arp <intfc> [enable|disable]",
+ .function = set_int_proxy_arp_command_fn,
+};
+/* *INDENT-ON* */
+
+
+/*
+ * ARP/ND Termination in a L2 Bridge Domain based on IP4/IP6 to MAC
+ * hash tables mac_by_ip4 and mac_by_ip6 for each BD.
+ */
+typedef enum
+{
+ ARP_TERM_NEXT_L2_OUTPUT,
+ ARP_TERM_NEXT_DROP,
+ ARP_TERM_N_NEXT,
+} arp_term_next_t;
+
+u32 arp_term_next_node_index[32];
+
+static uword
+arp_term_l2bd (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ l2input_main_t *l2im = &l2input_main;
+ u32 n_left_from, next_index, *from, *to_next;
+ u32 n_replies_sent = 0;
+ u16 last_bd_index = ~0;
+ l2_bridge_domain_t *last_bd_config = 0;
+ l2_input_config_t *cfg0;
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ vlib_buffer_t *p0;
+ ethernet_header_t *eth0;
+ ethernet_arp_header_t *arp0;
+ ip6_header_t *iph0;
+ u8 *l3h0;
+ u32 pi0, error0, next0, sw_if_index0;
+ u16 ethertype0;
+ u16 bd_index0;
+ u32 ip0;
+ u8 *macp0;
+
+ pi0 = from[0];
+ to_next[0] = pi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ p0 = vlib_get_buffer (vm, pi0);
+ eth0 = vlib_buffer_get_current (p0);
+ l3h0 = (u8 *) eth0 + vnet_buffer (p0)->l2.l2_len;
+ ethertype0 = clib_net_to_host_u16 (*(u16 *) (l3h0 - 2));
+ arp0 = (ethernet_arp_header_t *) l3h0;
+
+ if (PREDICT_FALSE ((ethertype0 != ETHERNET_TYPE_ARP) ||
+ (arp0->opcode !=
+ clib_host_to_net_u16
+ (ETHERNET_ARP_OPCODE_request))))
+ goto check_ip6_nd;
+
+ /* Must be ARP request packet here */
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) &&
+ (p0->flags & VLIB_BUFFER_IS_TRACED)))
+ {
+ u8 *t0 = vlib_add_trace (vm, node, p0,
+ sizeof (ethernet_arp_input_trace_t));
+ clib_memcpy (t0, l3h0, sizeof (ethernet_arp_input_trace_t));
+ }
+
+ error0 = ETHERNET_ARP_ERROR_replies_sent;
+ error0 =
+ (arp0->l2_type !=
+ clib_net_to_host_u16 (ETHERNET_ARP_HARDWARE_TYPE_ethernet)
+ ? ETHERNET_ARP_ERROR_l2_type_not_ethernet : error0);
+ error0 =
+ (arp0->l3_type !=
+ clib_net_to_host_u16 (ETHERNET_TYPE_IP4) ?
+ ETHERNET_ARP_ERROR_l3_type_not_ip4 : error0);
+
+ sw_if_index0 = vnet_buffer (p0)->sw_if_index[VLIB_RX];
+
+ if (error0)
+ goto drop;
+
+ /* Trash ARP packets whose ARP-level source addresses do not
+ match their L2-frame-level source addresses */
+ if (PREDICT_FALSE
+ (memcmp
+ (eth0->src_address, arp0->ip4_over_ethernet[0].ethernet,
+ sizeof (eth0->src_address))))
+ {
+ error0 = ETHERNET_ARP_ERROR_l2_address_mismatch;
+ goto drop;
+ }
+
+ /* Check if anyone want ARP request events for L2 BDs */
+ {
+ pending_resolution_t *mc;
+ ethernet_arp_main_t *am = &ethernet_arp_main;
+ uword *p = hash_get (am->mac_changes_by_address, 0);
+ if (p && (vnet_buffer (p0)->l2.shg == 0))
+ { // Only SHG 0 interface which is more likely local
+ u32 next_index = p[0];
+ while (next_index != (u32) ~ 0)
+ {
+ int (*fp) (u32, u8 *, u32, u32);
+ int rv = 1;
+ mc = pool_elt_at_index (am->mac_changes, next_index);
+ fp = mc->data_callback;
+ /* Call the callback, return 1 to suppress dup events */
+ if (fp)
+ rv = (*fp) (mc->data,
+ arp0->ip4_over_ethernet[0].ethernet,
+ sw_if_index0,
+ arp0->ip4_over_ethernet[0].ip4.as_u32);
+ /* Signal the resolver process */
+ if (rv == 0)
+ vlib_process_signal_event (vm, mc->node_index,
+ mc->type_opaque, mc->data);
+ next_index = mc->next_index;
+ }
+ }
+ }
+
+ /* lookup BD mac_by_ip4 hash table for MAC entry */
+ ip0 = arp0->ip4_over_ethernet[1].ip4.as_u32;
+ bd_index0 = vnet_buffer (p0)->l2.bd_index;
+ if (PREDICT_FALSE ((bd_index0 != last_bd_index)
+ || (last_bd_index == (u16) ~ 0)))
+ {
+ last_bd_index = bd_index0;
+ last_bd_config = vec_elt_at_index (l2im->bd_configs, bd_index0);
+ }
+ macp0 = (u8 *) hash_get (last_bd_config->mac_by_ip4, ip0);
+
+ if (PREDICT_FALSE (!macp0))
+ goto next_l2_feature; /* MAC not found */
+
+ /* MAC found, send ARP reply -
+ Convert ARP request packet to ARP reply */
+ arp0->opcode = clib_host_to_net_u16 (ETHERNET_ARP_OPCODE_reply);
+ arp0->ip4_over_ethernet[1] = arp0->ip4_over_ethernet[0];
+ arp0->ip4_over_ethernet[0].ip4.as_u32 = ip0;
+ clib_memcpy (arp0->ip4_over_ethernet[0].ethernet, macp0, 6);
+ clib_memcpy (eth0->dst_address, eth0->src_address, 6);
+ clib_memcpy (eth0->src_address, macp0, 6);
+ n_replies_sent += 1;
+
+ output_response:
+ /* For BVI, need to use l2-fwd node to send ARP reply as
+ l2-output node cannot output packet to BVI properly */
+ cfg0 = vec_elt_at_index (l2im->configs, sw_if_index0);
+ if (PREDICT_FALSE (cfg0->bvi))
+ {
+ vnet_buffer (p0)->l2.feature_bitmap |= L2INPUT_FEAT_FWD;
+ vnet_buffer (p0)->sw_if_index[VLIB_RX] = 0;
+ goto next_l2_feature;
+ }
+
+ /* Send ARP/ND reply back out input interface through l2-output */
+ vnet_buffer (p0)->sw_if_index[VLIB_TX] = sw_if_index0;
+ next0 = ARP_TERM_NEXT_L2_OUTPUT;
+ /* Note that output to VXLAN tunnel will fail due to SHG which
+ is probably desireable since ARP termination is not intended
+ for ARP requests from other hosts. If output to VXLAN tunnel is
+ required, however, can just clear the SHG in packet as follows:
+ vnet_buffer(p0)->l2.shg = 0; */
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next, pi0,
+ next0);
+ continue;
+
+ check_ip6_nd:
+ /* IP6 ND event notification or solicitation handling to generate
+ local response instead of flooding */
+ iph0 = (ip6_header_t *) l3h0;
+ if (PREDICT_FALSE (ethertype0 == ETHERNET_TYPE_IP6 &&
+ iph0->protocol == IP_PROTOCOL_ICMP6 &&
+ !ip6_address_is_unspecified
+ (&iph0->src_address)))
+ {
+ sw_if_index0 = vnet_buffer (p0)->sw_if_index[VLIB_RX];
+ if (vnet_ip6_nd_term
+ (vm, node, p0, eth0, iph0, sw_if_index0,
+ vnet_buffer (p0)->l2.bd_index, vnet_buffer (p0)->l2.shg))
+ goto output_response;
+ }
+
+ next_l2_feature:
+ {
+ u32 feature_bitmap0 =
+ vnet_buffer (p0)->l2.feature_bitmap & ~L2INPUT_FEAT_ARP_TERM;
+ vnet_buffer (p0)->l2.feature_bitmap = feature_bitmap0;
+ next0 =
+ feat_bitmap_get_next_node_index (arp_term_next_node_index,
+ feature_bitmap0);
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ pi0, next0);
+ continue;
+ }
+
+ drop:
+ if (0 == arp0->ip4_over_ethernet[0].ip4.as_u32 ||
+ (arp0->ip4_over_ethernet[0].ip4.as_u32 ==
+ arp0->ip4_over_ethernet[1].ip4.as_u32))
+ {
+ error0 = ETHERNET_ARP_ERROR_gratuitous_arp;
+ }
+ next0 = ARP_TERM_NEXT_DROP;
+ p0->error = node->errors[error0];
+
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next, pi0,
+ next0);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ vlib_error_count (vm, node->node_index,
+ ETHERNET_ARP_ERROR_replies_sent, n_replies_sent);
+ return frame->n_vectors;
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (arp_term_l2bd_node, static) = {
+ .function = arp_term_l2bd,
+ .name = "arp-term-l2bd",
+ .vector_size = sizeof (u32),
+ .n_errors = ETHERNET_ARP_N_ERROR,
+ .error_strings = ethernet_arp_error_strings,
+ .n_next_nodes = ARP_TERM_N_NEXT,
+ .next_nodes = {
+ [ARP_TERM_NEXT_L2_OUTPUT] = "l2-output",
+ [ARP_TERM_NEXT_DROP] = "error-drop",
+ },
+ .format_buffer = format_ethernet_arp_header,
+ .format_trace = format_arp_term_input_trace,
+};
+/* *INDENT-ON* */
+
+clib_error_t *
+arp_term_init (vlib_main_t * vm)
+{
+ // Initialize the feature next-node indexes
+ feat_bitmap_init_next_nodes (vm,
+ arp_term_l2bd_node.index,
+ L2INPUT_N_FEAT,
+ l2input_get_feat_names (),
+ arp_term_next_node_index);
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (arp_term_init);
+
+void
+change_arp_mac (u32 sw_if_index, ethernet_arp_ip4_entry_t * e)
+{
+ if (e->sw_if_index == sw_if_index)
+ {
+ adj_nbr_walk_nh4 (e->sw_if_index,
+ &e->ip4_address, arp_mk_complete_walk, e);
+ }
+}
+
+void
+ethernet_arp_change_mac (u32 sw_if_index)
+{
+ ethernet_arp_main_t *am = &ethernet_arp_main;
+ ethernet_arp_ip4_entry_t *e;
+
+ /* *INDENT-OFF* */
+ pool_foreach (e, am->ip4_entry_pool,
+ ({
+ change_arp_mac (sw_if_index, e);
+ }));
+ /* *INDENT-ON* */
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/ethernet/arp_packet.h b/src/vnet/ethernet/arp_packet.h
new file mode 100644
index 00000000000..e762ffa4018
--- /dev/null
+++ b/src/vnet/ethernet/arp_packet.h
@@ -0,0 +1,173 @@
+/*
+ * ethernet/arp.c: IP v4 ARP node
+ *
+ * Copyright (c) 2010 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef included_ethernet_arp_packet_h
+#define included_ethernet_arp_packet_h
+
+#define foreach_ethernet_arp_hardware_type \
+ _ (0, reserved) \
+ _ (1, ethernet) \
+ _ (2, experimental_ethernet) \
+ _ (3, ax_25) \
+ _ (4, proteon_pronet_token_ring) \
+ _ (5, chaos) \
+ _ (6, ieee_802) \
+ _ (7, arcnet) \
+ _ (8, hyperchannel) \
+ _ (9, lanstar) \
+ _ (10, autonet) \
+ _ (11, localtalk) \
+ _ (12, localnet) \
+ _ (13, ultra_link) \
+ _ (14, smds) \
+ _ (15, frame_relay) \
+ _ (16, atm) \
+ _ (17, hdlc) \
+ _ (18, fibre_channel) \
+ _ (19, atm19) \
+ _ (20, serial_line) \
+ _ (21, atm21) \
+ _ (22, mil_std_188_220) \
+ _ (23, metricom) \
+ _ (24, ieee_1394) \
+ _ (25, mapos) \
+ _ (26, twinaxial) \
+ _ (27, eui_64) \
+ _ (28, hiparp) \
+ _ (29, iso_7816_3) \
+ _ (30, arpsec) \
+ _ (31, ipsec_tunnel) \
+ _ (32, infiniband) \
+ _ (33, cai) \
+ _ (34, wiegand) \
+ _ (35, pure_ip) \
+ _ (36, hw_exp1) \
+ _ (256, hw_exp2)
+
+#define foreach_ethernet_arp_opcode \
+ _ (reserved) \
+ _ (request) \
+ _ (reply) \
+ _ (reverse_request) \
+ _ (reverse_reply) \
+ _ (drarp_request) \
+ _ (drarp_reply) \
+ _ (drarp_error) \
+ _ (inarp_request) \
+ _ (inarp_reply) \
+ _ (arp_nak) \
+ _ (mars_request) \
+ _ (mars_multi) \
+ _ (mars_mserv) \
+ _ (mars_join) \
+ _ (mars_leave) \
+ _ (mars_nak) \
+ _ (mars_unserv) \
+ _ (mars_sjoin) \
+ _ (mars_sleave) \
+ _ (mars_grouplist_request) \
+ _ (mars_grouplist_reply) \
+ _ (mars_redirect_map) \
+ _ (mapos_unarp) \
+ _ (exp1) \
+ _ (exp2)
+
+typedef enum
+{
+#define _(n,f) ETHERNET_ARP_HARDWARE_TYPE_##f = (n),
+ foreach_ethernet_arp_hardware_type
+#undef _
+} ethernet_arp_hardware_type_t;
+
+typedef enum
+{
+#define _(f) ETHERNET_ARP_OPCODE_##f,
+ foreach_ethernet_arp_opcode
+#undef _
+ ETHERNET_ARP_N_OPCODE,
+} ethernet_arp_opcode_t;
+
+typedef enum
+{
+ IP4_ARP_NEXT_DROP,
+ IP4_ARP_N_NEXT,
+} ip4_arp_next_t;
+
+typedef enum
+{
+ IP4_ARP_ERROR_DROP,
+ IP4_ARP_ERROR_REQUEST_SENT,
+ IP4_ARP_ERROR_NON_ARP_ADJ,
+ IP4_ARP_ERROR_REPLICATE_DROP,
+ IP4_ARP_ERROR_REPLICATE_FAIL,
+ IP4_ARP_ERROR_NO_SOURCE_ADDRESS,
+} ip4_arp_error_t;
+
+/* *INDENT-OFF* */
+typedef CLIB_PACKED (struct {
+ u8 ethernet[6];
+ ip4_address_t ip4;
+}) ethernet_arp_ip4_over_ethernet_address_t;
+/* *INDENT-ON* */
+
+typedef struct
+{
+ u16 l2_type;
+ u16 l3_type;
+ u8 n_l2_address_bytes;
+ u8 n_l3_address_bytes;
+ u16 opcode;
+ union
+ {
+ ethernet_arp_ip4_over_ethernet_address_t ip4_over_ethernet[2];
+
+ /* Others... */
+ u8 data[0];
+ };
+} ethernet_arp_header_t;
+
+typedef struct
+{
+ u32 sw_if_index;
+ ip4_address_t ip4_address;
+
+ u8 ethernet_address[6];
+
+ u16 flags;
+#define ETHERNET_ARP_IP4_ENTRY_FLAG_STATIC (1 << 0)
+#define ETHERNET_ARP_IP4_ENTRY_FLAG_DYNAMIC (1 << 1)
+
+ u64 cpu_time_last_updated;
+
+ /**
+ * The index of the adj-fib entry created
+ */
+ fib_node_index_t fib_entry_index;
+} ethernet_arp_ip4_entry_t;
+
+ethernet_arp_ip4_entry_t *ip4_neighbor_entries (u32 sw_if_index);
+u8 *format_ethernet_arp_ip4_entry (u8 * s, va_list * va);
+
+#endif /* included_ethernet_arp_packet_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/ethernet/dir.dox b/src/vnet/ethernet/dir.dox
new file mode 100644
index 00000000000..a55a73c014b
--- /dev/null
+++ b/src/vnet/ethernet/dir.dox
@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) 2013 Cisco and/or its affiliates.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+@dir
+@brief Ethernet ARP and Loopback Code.
+
+This directory contains the source code for ARP and Loopback Interfaces.
+
+*/
+/*? %%clicmd:group_label ARP and Loopback CLI %% ?*/
diff --git a/src/vnet/ethernet/error.def b/src/vnet/ethernet/error.def
new file mode 100644
index 00000000000..36679c0ce1c
--- /dev/null
+++ b/src/vnet/ethernet/error.def
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * ethernet_error.def: ethernet errors
+ *
+ * Copyright (c) 2008 Eliot Dresselhaus
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+ethernet_error (NONE, PUNT, "no error")
+ethernet_error (BAD_LLC_LENGTH, DROP, "llc length > packet length")
+ethernet_error (UNKNOWN_TYPE, PUNT, "unknown ethernet type")
+ethernet_error (UNKNOWN_VLAN, DROP, "unknown vlan")
+ethernet_error (L3_MAC_MISMATCH, DROP, "l3 mac mismatch")
+ethernet_error (DOWN, DROP, "subinterface down")
+
diff --git a/src/vnet/ethernet/ethernet.h b/src/vnet/ethernet/ethernet.h
new file mode 100644
index 00000000000..f88b0cf3c87
--- /dev/null
+++ b/src/vnet/ethernet/ethernet.h
@@ -0,0 +1,561 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * ethernet.h: types/functions for ethernet.
+ *
+ * Copyright (c) 2008 Eliot Dresselhaus
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef included_ethernet_h
+#define included_ethernet_h
+
+#include <vnet/vnet.h>
+#include <vnet/ethernet/packet.h>
+#include <vnet/pg/pg.h>
+#include <vnet/feature/feature.h>
+
+always_inline u64
+ethernet_mac_address_u64 (u8 * a)
+{
+ return (((u64) a[0] << (u64) (5 * 8))
+ | ((u64) a[1] << (u64) (4 * 8))
+ | ((u64) a[2] << (u64) (3 * 8))
+ | ((u64) a[3] << (u64) (2 * 8))
+ | ((u64) a[4] << (u64) (1 * 8)) | ((u64) a[5] << (u64) (0 * 8)));
+}
+
+static inline int
+ethernet_mac_address_is_multicast_u64 (u64 a)
+{
+ return (a & (1ULL << (5 * 8))) != 0;
+}
+
+static_always_inline int
+ethernet_frame_is_tagged (u16 type)
+{
+#if __SSE4_2__
+ const __m128i ethertype_mask = _mm_set_epi16 (ETHERNET_TYPE_VLAN,
+ ETHERNET_TYPE_DOT1AD,
+ ETHERNET_TYPE_VLAN_9100,
+ ETHERNET_TYPE_VLAN_9200,
+ /* duplicate last one to
+ fill register */
+ ETHERNET_TYPE_VLAN_9200,
+ ETHERNET_TYPE_VLAN_9200,
+ ETHERNET_TYPE_VLAN_9200,
+ ETHERNET_TYPE_VLAN_9200);
+
+ __m128i r = _mm_set1_epi16 (type);
+ r = _mm_cmpeq_epi16 (ethertype_mask, r);
+ return !_mm_test_all_zeros (r, r);
+#else
+ if ((type == ETHERNET_TYPE_VLAN) ||
+ (type == ETHERNET_TYPE_DOT1AD) ||
+ (type == ETHERNET_TYPE_VLAN_9100) || (type == ETHERNET_TYPE_VLAN_9200))
+ return 1;
+#endif
+ return 0;
+}
+
+/* Max. sized ethernet/vlan header for parsing. */
+typedef struct
+{
+ ethernet_header_t ethernet;
+
+ /* Allow up to 2 stacked vlan headers. */
+ ethernet_vlan_header_t vlan[2];
+} ethernet_max_header_t;
+
+struct vnet_hw_interface_t;
+/* Ethernet flag change callback. */
+typedef u32 (ethernet_flag_change_function_t)
+ (vnet_main_t * vnm, struct vnet_hw_interface_t * hi, u32 flags);
+
+#define ETHERNET_MIN_PACKET_BYTES 64
+#define ETHERNET_MAX_PACKET_BYTES 9216
+
+/* Ethernet interface instance. */
+typedef struct ethernet_interface
+{
+
+ /* Accept all packets (promiscuous mode). */
+#define ETHERNET_INTERFACE_FLAG_ACCEPT_ALL (1 << 0)
+#define ETHERNET_INTERFACE_FLAG_CONFIG_PROMISC(flags) \
+ (((flags) & ~ETHERNET_INTERFACE_FLAG_ACCEPT_ALL) == 0)
+
+ /* Change MTU on interface from hw interface structure */
+#define ETHERNET_INTERFACE_FLAG_MTU (1 << 1)
+#define ETHERNET_INTERFACE_FLAG_CONFIG_MTU(flags) \
+ ((flags) & ETHERNET_INTERFACE_FLAG_MTU)
+
+ /* Callback, e.g. to turn on/off promiscuous mode */
+ ethernet_flag_change_function_t *flag_change;
+
+ u32 driver_instance;
+
+ /* Ethernet (MAC) address for this interface. */
+ u8 address[6];
+} ethernet_interface_t;
+
+extern vnet_hw_interface_class_t ethernet_hw_interface_class;
+
+typedef struct
+{
+ /* Name (a c string). */
+ char *name;
+
+ /* Ethernet type in host byte order. */
+ ethernet_type_t type;
+
+ /* Node which handles this type. */
+ u32 node_index;
+
+ /* Next index for this type. */
+ u32 next_index;
+} ethernet_type_info_t;
+
+typedef enum
+{
+#define ethernet_error(n,c,s) ETHERNET_ERROR_##n,
+#include <vnet/ethernet/error.def>
+#undef ethernet_error
+ ETHERNET_N_ERROR,
+} ethernet_error_t;
+
+
+// Structs used when parsing packet to find sw_if_index
+
+typedef struct
+{
+ u32 sw_if_index;
+ u32 flags;
+ // config entry is-valid flag
+ // exact match flags (valid if packet has 0/1/2/3 tags)
+ // L2 vs L3 forwarding mode
+#define SUBINT_CONFIG_MATCH_0_TAG (1<<0)
+#define SUBINT_CONFIG_MATCH_1_TAG (1<<1)
+#define SUBINT_CONFIG_MATCH_2_TAG (1<<2)
+#define SUBINT_CONFIG_MATCH_3_TAG (1<<3)
+#define SUBINT_CONFIG_VALID (1<<4)
+#define SUBINT_CONFIG_L2 (1<<5)
+
+} subint_config_t;
+
+always_inline u32
+eth_create_valid_subint_match_flags (u32 num_tags)
+{
+ return SUBINT_CONFIG_VALID | (1 << num_tags);
+}
+
+
+typedef struct
+{
+ subint_config_t untagged_subint;
+ subint_config_t default_subint;
+ u16 dot1q_vlans; // pool id for vlan table
+ u16 dot1ad_vlans; // pool id for vlan table
+} main_intf_t;
+
+typedef struct
+{
+ subint_config_t single_tag_subint;
+ subint_config_t inner_any_subint;
+ u32 qinqs; // pool id for qinq table
+} vlan_intf_t;
+
+typedef struct
+{
+ vlan_intf_t vlans[ETHERNET_N_VLAN];
+} vlan_table_t;
+
+typedef struct
+{
+ subint_config_t subint;
+} qinq_intf_t;
+
+typedef struct
+{
+ qinq_intf_t vlans[ETHERNET_N_VLAN];
+} qinq_table_t;
+
+// Structure mapping to a next index based on ethertype.
+// Common ethertypes are stored explicitly, others are
+// stored in a sparse table.
+typedef struct
+{
+ /* Sparse vector mapping ethernet type in network byte order
+ to next index. */
+ u16 *input_next_by_type;
+ u32 *sparse_index_by_input_next_index;
+
+ /* cached next indexes for common ethertypes */
+ u32 input_next_ip4;
+ u32 input_next_ip6;
+ u32 input_next_mpls;
+} next_by_ethertype_t;
+
+typedef struct
+{
+ vlib_main_t *vlib_main;
+
+ /* next node index for the L3 input node of each ethertype */
+ next_by_ethertype_t l3_next;
+
+ /* next node index for L2 interfaces */
+ u32 l2_next;
+
+ /* flag and next node index for L3 redirect */
+ u32 redirect_l3;
+ u32 redirect_l3_next;
+
+ /* Pool of ethernet interface instances. */
+ ethernet_interface_t *interfaces;
+
+ ethernet_type_info_t *type_infos;
+
+ /* Hash tables mapping name/type to type info index. */
+ uword *type_info_by_name, *type_info_by_type;
+
+ // The root of the vlan parsing tables. A vector with one element
+ // for each main interface, indexed by hw_if_index.
+ main_intf_t *main_intfs;
+
+ // Pool of vlan tables
+ vlan_table_t *vlan_pool;
+
+ // Pool of qinq tables;
+ qinq_table_t *qinq_pool;
+
+ /* Set to one to use AB.CD.EF instead of A:B:C:D:E:F as ethernet format. */
+ int format_ethernet_address_16bit;
+
+ /* debug: make sure we don't wipe out an ethernet registration by mistake */
+ u8 next_by_ethertype_register_called;
+
+ /* Feature arc index */
+ u8 output_feature_arc_index;
+} ethernet_main_t;
+
+ethernet_main_t ethernet_main;
+
+always_inline ethernet_type_info_t *
+ethernet_get_type_info (ethernet_main_t * em, ethernet_type_t type)
+{
+ uword *p = hash_get (em->type_info_by_type, type);
+ return p ? vec_elt_at_index (em->type_infos, p[0]) : 0;
+}
+
+ethernet_interface_t *ethernet_get_interface (ethernet_main_t * em,
+ u32 hw_if_index);
+
+clib_error_t *ethernet_register_interface (vnet_main_t * vnm,
+ u32 dev_class_index,
+ u32 dev_instance,
+ u8 * address,
+ u32 * hw_if_index_return,
+ ethernet_flag_change_function_t
+ flag_change);
+
+void ethernet_delete_interface (vnet_main_t * vnm, u32 hw_if_index);
+
+/* Register given node index to take input for given ethernet type. */
+void
+ethernet_register_input_type (vlib_main_t * vm,
+ ethernet_type_t type, u32 node_index);
+
+/* Register given node index to take input for packet from L2 interfaces. */
+void ethernet_register_l2_input (vlib_main_t * vm, u32 node_index);
+
+/* Register given node index to take redirected L3 traffic, and enable L3 redirect */
+void ethernet_register_l3_redirect (vlib_main_t * vm, u32 node_index);
+
+/* Formats ethernet address X:X:X:X:X:X */
+u8 *format_ethernet_address (u8 * s, va_list * args);
+u8 *format_ethernet_type (u8 * s, va_list * args);
+u8 *format_ethernet_vlan_tci (u8 * s, va_list * va);
+u8 *format_ethernet_header (u8 * s, va_list * args);
+u8 *format_ethernet_header_with_length (u8 * s, va_list * args);
+
+/* Parse ethernet address in either X:X:X:X:X:X unix or X.X.X cisco format. */
+uword unformat_ethernet_address (unformat_input_t * input, va_list * args);
+
+/* Parse ethernet type as 0xXXXX or type name from ethernet/types.def.
+ In either host or network byte order. */
+uword
+unformat_ethernet_type_host_byte_order (unformat_input_t * input,
+ va_list * args);
+uword
+unformat_ethernet_type_net_byte_order (unformat_input_t * input,
+ va_list * args);
+
+/* Parse ethernet header. */
+uword unformat_ethernet_header (unformat_input_t * input, va_list * args);
+
+/* Parse ethernet interface name; return hw_if_index. */
+uword unformat_ethernet_interface (unformat_input_t * input, va_list * args);
+
+uword unformat_pg_ethernet_header (unformat_input_t * input, va_list * args);
+
+always_inline void
+ethernet_setup_node (vlib_main_t * vm, u32 node_index)
+{
+ vlib_node_t *n = vlib_get_node (vm, node_index);
+ pg_node_t *pn = pg_get_node (node_index);
+
+ n->format_buffer = format_ethernet_header_with_length;
+ n->unformat_buffer = unformat_ethernet_header;
+ pn->unformat_edit = unformat_pg_ethernet_header;
+}
+
+always_inline ethernet_header_t *
+ethernet_buffer_get_header (vlib_buffer_t * b)
+{
+ return (void *)
+ (b->data + vnet_buffer (b)->ethernet.start_of_ethernet_header);
+}
+
+/** Returns the number of VLAN headers in the current Ethernet frame in the
+ * buffer. Returns 0, 1, 2 for the known header count. The value 3 indicates
+ * the number of headers is not known.
+ */
+#define ethernet_buffer_get_vlan_count(b) ( \
+ ((b)->flags & ETH_BUFFER_VLAN_BITS) >> LOG2_ETH_BUFFER_VLAN_1_DEEP \
+)
+
+/** Sets the number of VLAN headers in the current Ethernet frame in the
+ * buffer. Values 0, 1, 2 indicate the header count. The value 3 indicates
+ * the number of headers is not known.
+ */
+#define ethernet_buffer_set_vlan_count(b, v) ( \
+ (b)->flags = ((b)->flags & ~ETH_BUFFER_VLAN_BITS) | \
+ (((v) << LOG2_ETH_BUFFER_VLAN_1_DEEP) & ETH_BUFFER_VLAN_BITS) \
+)
+
+/** Adjusts the vlan count by the delta in 'v' */
+#define ethernet_buffer_adjust_vlan_count(b, v) ( \
+ ethernet_buffer_set_vlan_count(b, \
+ (word)ethernet_buffer_get_vlan_count(b) + (word)(v)) \
+)
+
+/** Adjusts the vlan count by the header size byte delta in 'v' */
+#define ethernet_buffer_adjust_vlan_count_by_bytes(b, v) ( \
+ (b)->flags = ((b)->flags & ~ETH_BUFFER_VLAN_BITS) | (( \
+ ((b)->flags & ETH_BUFFER_VLAN_BITS) + \
+ ((v) << (LOG2_ETH_BUFFER_VLAN_1_DEEP - 2)) \
+ ) & ETH_BUFFER_VLAN_BITS) \
+)
+
+/**
+ * Determine the size of the Ethernet headers of the current frame in
+ * the buffer. This uses the VLAN depth flags that are set by
+ * ethernet-input. Because these flags are stored in the vlib_buffer_t
+ * "flags" field this count is valid regardless of the node so long as it's
+ * checked downstream of ethernet-input; That is, the value is not stored in
+ * the opaque space.
+ */
+#define ethernet_buffer_header_size(b) ( \
+ ethernet_buffer_get_vlan_count((b)) * sizeof(ethernet_vlan_header_t) + \
+ sizeof(ethernet_header_t) \
+)
+
+ethernet_main_t *ethernet_get_main (vlib_main_t * vm);
+u32 ethernet_set_flags (vnet_main_t * vnm, u32 hw_if_index, u32 flags);
+void ethernet_sw_interface_set_l2_mode (vnet_main_t * vnm, u32 sw_if_index,
+ u32 l2);
+void ethernet_sw_interface_set_l2_mode_noport (vnet_main_t * vnm,
+ u32 sw_if_index, u32 l2);
+void ethernet_set_rx_redirect (vnet_main_t * vnm, vnet_hw_interface_t * hi,
+ u32 enable);
+
+int
+vnet_arp_set_ip4_over_ethernet (vnet_main_t * vnm,
+ u32 sw_if_index, void *a_arg, int is_static);
+
+int
+vnet_arp_unset_ip4_over_ethernet (vnet_main_t * vnm,
+ u32 sw_if_index, void *a_arg);
+
+int vnet_proxy_arp_fib_reset (u32 fib_id);
+
+clib_error_t *next_by_ethertype_init (next_by_ethertype_t * l3_next);
+clib_error_t *next_by_ethertype_register (next_by_ethertype_t * l3_next,
+ u32 ethertype, u32 next_index);
+
+int vnet_create_loopback_interface (u32 * sw_if_indexp, u8 * mac_address);
+int vnet_delete_loopback_interface (u32 sw_if_index);
+int vnet_delete_sub_interface (u32 sw_if_index);
+
+// Perform ethernet subinterface classification table lookups given
+// the ports's sw_if_index and fields extracted from the ethernet header.
+// The resulting tables are used by identify_subint().
+always_inline void
+eth_vlan_table_lookups (ethernet_main_t * em,
+ vnet_main_t * vnm,
+ u32 port_sw_if_index0,
+ u16 first_ethertype,
+ u16 outer_id,
+ u16 inner_id,
+ vnet_hw_interface_t ** hi,
+ main_intf_t ** main_intf,
+ vlan_intf_t ** vlan_intf, qinq_intf_t ** qinq_intf)
+{
+ vlan_table_t *vlan_table;
+ qinq_table_t *qinq_table;
+ u32 vlan_table_id;
+
+ // Read the main, vlan, and qinq interface table entries
+ // TODO: Consider if/how to prefetch tables. Also consider
+ // single-entry cache to skip table lookups and identify_subint()
+ // processing.
+ *hi = vnet_get_sup_hw_interface (vnm, port_sw_if_index0);
+ *main_intf = vec_elt_at_index (em->main_intfs, (*hi)->hw_if_index);
+
+ // Always read the vlan and qinq tables, even if there are not that
+ // many tags on the packet. This makes the lookups and comparisons
+ // easier (and less branchy).
+ vlan_table_id = (first_ethertype == ETHERNET_TYPE_DOT1AD) ?
+ (*main_intf)->dot1ad_vlans : (*main_intf)->dot1q_vlans;
+ vlan_table = vec_elt_at_index (em->vlan_pool, vlan_table_id);
+ *vlan_intf = &vlan_table->vlans[outer_id];
+
+ qinq_table = vec_elt_at_index (em->qinq_pool, (*vlan_intf)->qinqs);
+ *qinq_intf = &qinq_table->vlans[inner_id];
+}
+
+
+// Determine the subinterface for this packet, given the result of the
+// vlan table lookups and vlan header parsing. Check the most specific
+// matches first.
+// Returns 1 if a matching subinterface was found, otherwise returns 0.
+always_inline u32
+eth_identify_subint (vnet_hw_interface_t * hi,
+ vlib_buffer_t * b0,
+ u32 match_flags,
+ main_intf_t * main_intf,
+ vlan_intf_t * vlan_intf,
+ qinq_intf_t * qinq_intf,
+ u32 * new_sw_if_index, u8 * error0, u32 * is_l2)
+{
+ subint_config_t *subint;
+
+ // Each comparison is checking both the valid flag and the number of tags
+ // (incorporating exact-match/non-exact-match).
+
+ // check for specific double tag
+ subint = &qinq_intf->subint;
+ if ((subint->flags & match_flags) == match_flags)
+ goto matched;
+
+ // check for specific outer and 'any' inner
+ subint = &vlan_intf->inner_any_subint;
+ if ((subint->flags & match_flags) == match_flags)
+ goto matched;
+
+ // check for specific single tag
+ subint = &vlan_intf->single_tag_subint;
+ if ((subint->flags & match_flags) == match_flags)
+ goto matched;
+
+ // check for untagged interface
+ subint = &main_intf->untagged_subint;
+ if ((subint->flags & match_flags) == match_flags)
+ goto matched;
+
+ // check for default interface
+ subint = &main_intf->default_subint;
+ if ((subint->flags & match_flags) == match_flags)
+ goto matched;
+
+ // No matching subinterface
+ *new_sw_if_index = ~0;
+ *error0 = ETHERNET_ERROR_UNKNOWN_VLAN;
+ *is_l2 = 0;
+ return 0;
+
+matched:
+ *new_sw_if_index = subint->sw_if_index;
+ *is_l2 = subint->flags & SUBINT_CONFIG_L2;
+ return 1;
+}
+
+// Compare two ethernet macs. Return 1 if they are the same, 0 if different
+always_inline u32
+eth_mac_equal (u8 * mac1, u8 * mac2)
+{
+ return (*((u32 *) (mac1 + 0)) == *((u32 *) (mac2 + 0)) &&
+ *((u32 *) (mac1 + 2)) == *((u32 *) (mac2 + 2)));
+}
+
+
+always_inline ethernet_main_t *
+vnet_get_ethernet_main (void)
+{
+ return &ethernet_main;
+}
+
+void vnet_register_ip4_arp_resolution_event (vnet_main_t * vnm,
+ void *address_arg,
+ uword node_index,
+ uword type_opaque, uword data);
+
+
+int vnet_add_del_ip4_arp_change_event (vnet_main_t * vnm,
+ void *data_callback,
+ u32 pid,
+ void *address_arg,
+ uword node_index,
+ uword type_opaque,
+ uword data, int is_add);
+
+void ethernet_arp_change_mac (u32 sw_if_index);
+void ethernet_ndp_change_mac (u32 sw_if_index);
+
+void arp_update_adjacency (vnet_main_t * vnm, u32 sw_if_index, u32 ai);
+
+void ethernet_update_adjacency (vnet_main_t * vnm, u32 sw_if_index, u32 ai);
+u8 *ethernet_build_rewrite (vnet_main_t * vnm,
+ u32 sw_if_index,
+ vnet_link_t link_type, const void *dst_address);
+
+extern vlib_node_registration_t ethernet_input_node;
+
+#endif /* included_ethernet_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/ethernet/format.c b/src/vnet/ethernet/format.c
new file mode 100644
index 00000000000..4edef5adbeb
--- /dev/null
+++ b/src/vnet/ethernet/format.c
@@ -0,0 +1,366 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * ethernet_format.c: ethernet formatting/parsing.
+ *
+ * Copyright (c) 2008 Eliot Dresselhaus
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <vlib/vlib.h>
+#include <vnet/ethernet/ethernet.h>
+
+u8 *
+format_ethernet_address (u8 * s, va_list * args)
+{
+ ethernet_main_t *em = &ethernet_main;
+ u8 *a = va_arg (*args, u8 *);
+
+ if (em->format_ethernet_address_16bit)
+ return format (s, "%02x%02x.%02x%02x.%02x%02x",
+ a[0], a[1], a[2], a[3], a[4], a[5]);
+ else
+ return format (s, "%02x:%02x:%02x:%02x:%02x:%02x",
+ a[0], a[1], a[2], a[3], a[4], a[5]);
+}
+
+u8 *
+format_ethernet_type (u8 * s, va_list * args)
+{
+ ethernet_type_t type = va_arg (*args, u32);
+ ethernet_main_t *em = &ethernet_main;
+ ethernet_type_info_t *t = ethernet_get_type_info (em, type);
+
+ if (t)
+ s = format (s, "%s", t->name);
+ else
+ s = format (s, "0x%04x", type);
+
+ return s;
+}
+
+u8 *
+format_ethernet_vlan_tci (u8 * s, va_list * va)
+{
+ u32 vlan_tci = va_arg (*va, u32);
+
+ u32 vid = (vlan_tci & 0xfff);
+ u32 cfi = (vlan_tci >> 12) & 1;
+ u32 pri = (vlan_tci >> 13);
+
+ s = format (s, "%d", vid);
+ if (pri != 0)
+ s = format (s, " priority %d", pri);
+ if (cfi != 0)
+ s = format (s, " cfi");
+
+ return s;
+}
+
+u8 *
+format_ethernet_pbb (u8 * s, va_list * va)
+{
+ u32 b_tag = va_arg (*va, u32);
+ u32 i_tag = va_arg (*va, u32);
+ u32 vid = (b_tag & 0xfff);
+ u32 bdei = (b_tag >> 12) & 1;
+ u32 bpcp = (b_tag >> 13);
+ u32 sid = (i_tag & 0xffffff);
+ u8 ires = (i_tag >> 24) & 3;
+ u8 iuca = (i_tag >> 27) & 1;
+ u8 idei = (i_tag >> 28) & 1;
+ u8 ipcp = (i_tag >> 29);
+
+ s =
+ format (s, "B_tag %04X (vid %d, dei %d, pcp %d), ", b_tag, vid, bdei,
+ bpcp);
+ s =
+ format (s, "I_tag %08X (sid %d, res %d, dei %d, pcp %d)", i_tag, sid,
+ ires, iuca, idei, ipcp);
+
+ return s;
+}
+
+u8 *
+format_ethernet_header_with_length (u8 * s, va_list * args)
+{
+ ethernet_pbb_header_packed_t *ph =
+ va_arg (*args, ethernet_pbb_header_packed_t *);
+ ethernet_max_header_t *m = (ethernet_max_header_t *) ph;
+ u32 max_header_bytes = va_arg (*args, u32);
+ ethernet_main_t *em = &ethernet_main;
+ ethernet_header_t *e = &m->ethernet;
+ ethernet_vlan_header_t *v;
+ ethernet_type_t type = clib_net_to_host_u16 (e->type);
+ ethernet_type_t vlan_type[ARRAY_LEN (m->vlan)];
+ u32 n_vlan = 0, i, header_bytes;
+ uword indent;
+
+ while ((type == ETHERNET_TYPE_VLAN || type == ETHERNET_TYPE_DOT1AD
+ || type == ETHERNET_TYPE_DOT1AH) && n_vlan < ARRAY_LEN (m->vlan))
+ {
+ vlan_type[n_vlan] = type;
+ if (type != ETHERNET_TYPE_DOT1AH)
+ {
+ v = m->vlan + n_vlan;
+ type = clib_net_to_host_u16 (v->type);
+ }
+ n_vlan++;
+ }
+
+ header_bytes = sizeof (e[0]) + n_vlan * sizeof (v[0]);
+ if (max_header_bytes != 0 && header_bytes > max_header_bytes)
+ return format (s, "ethernet header truncated");
+
+ indent = format_get_indent (s);
+
+ s = format (s, "%U: %U -> %U",
+ format_ethernet_type, type,
+ format_ethernet_address, e->src_address,
+ format_ethernet_address, e->dst_address);
+
+ if (type != ETHERNET_TYPE_DOT1AH)
+ {
+ for (i = 0; i < n_vlan; i++)
+ {
+ u32 v = clib_net_to_host_u16 (m->vlan[i].priority_cfi_and_id);
+ if (*vlan_type == ETHERNET_TYPE_VLAN)
+ s = format (s, " 802.1q vlan %U", format_ethernet_vlan_tci, v);
+ else
+ s = format (s, " 802.1ad vlan %U", format_ethernet_vlan_tci, v);
+ }
+
+ if (max_header_bytes != 0 && header_bytes < max_header_bytes)
+ {
+ ethernet_type_info_t *ti;
+ vlib_node_t *node = 0;
+
+ ti = ethernet_get_type_info (em, type);
+ if (ti && ti->node_index != ~0)
+ node = vlib_get_node (em->vlib_main, ti->node_index);
+ if (node && node->format_buffer)
+ s = format (s, "\n%U%U",
+ format_white_space, indent,
+ node->format_buffer, (void *) m + header_bytes,
+ max_header_bytes - header_bytes);
+ }
+ }
+ else
+ {
+ s = format (s, "\n%UPBB header : %U", format_white_space, indent,
+ format_ethernet_pbb,
+ clib_net_to_host_u16 (ph->priority_dei_id),
+ clib_net_to_host_u32 (ph->priority_dei_uca_res_sid));
+ }
+
+ return s;
+}
+
+u8 *
+format_ethernet_header (u8 * s, va_list * args)
+{
+ ethernet_max_header_t *m = va_arg (*args, ethernet_max_header_t *);
+ return format (s, "%U", format_ethernet_header_with_length, m, 0);
+}
+
+/* Parse X:X:X:X:X:X unix style ethernet address. */
+static uword
+unformat_ethernet_address_unix (unformat_input_t * input, va_list * args)
+{
+ u8 *result = va_arg (*args, u8 *);
+ u32 i, a[6];
+
+ if (!unformat (input, "%_%x:%x:%x:%x:%x:%x%_",
+ &a[0], &a[1], &a[2], &a[3], &a[4], &a[5]))
+ return 0;
+
+ /* Check range. */
+ for (i = 0; i < ARRAY_LEN (a); i++)
+ if (a[i] >= (1 << 8))
+ return 0;
+
+ for (i = 0; i < ARRAY_LEN (a); i++)
+ result[i] = a[i];
+
+ return 1;
+}
+
+/* Parse X.X.X cisco style ethernet address. */
+static uword
+unformat_ethernet_address_cisco (unformat_input_t * input, va_list * args)
+{
+ u8 *result = va_arg (*args, u8 *);
+ u32 i, a[3];
+
+ if (!unformat (input, "%_%x.%x.%x%_", &a[0], &a[1], &a[2]))
+ return 0;
+
+ /* Check range. */
+ for (i = 0; i < ARRAY_LEN (a); i++)
+ if (a[i] >= (1 << 16))
+ return 0;
+
+ result[0] = (a[0] >> 8) & 0xff;
+ result[1] = (a[0] >> 0) & 0xff;
+ result[2] = (a[1] >> 8) & 0xff;
+ result[3] = (a[1] >> 0) & 0xff;
+ result[4] = (a[2] >> 8) & 0xff;
+ result[5] = (a[2] >> 0) & 0xff;
+
+ return 1;
+}
+
+/* Parse ethernet address; accept either unix or style addresses. */
+uword
+unformat_ethernet_address (unformat_input_t * input, va_list * args)
+{
+ u8 *result = va_arg (*args, u8 *);
+ return (unformat_user (input, unformat_ethernet_address_unix, result)
+ || unformat_user (input, unformat_ethernet_address_cisco, result));
+}
+
+/* Returns ethernet type as an int in host byte order. */
+uword
+unformat_ethernet_type_host_byte_order (unformat_input_t * input,
+ va_list * args)
+{
+ u16 *result = va_arg (*args, u16 *);
+ ethernet_main_t *em = &ethernet_main;
+ int type, i;
+
+ /* Numeric type. */
+ if (unformat (input, "0x%x", &type) || unformat (input, "%d", &type))
+ {
+ if (type >= (1 << 16))
+ return 0;
+ *result = type;
+ return 1;
+ }
+
+ /* Named type. */
+ if (unformat_user (input, unformat_vlib_number_by_name,
+ em->type_info_by_name, &i))
+ {
+ ethernet_type_info_t *ti = vec_elt_at_index (em->type_infos, i);
+ *result = ti->type;
+ return 1;
+ }
+
+ return 0;
+}
+
+uword
+unformat_ethernet_type_net_byte_order (unformat_input_t * input,
+ va_list * args)
+{
+ u16 *result = va_arg (*args, u16 *);
+ if (!unformat_user (input, unformat_ethernet_type_host_byte_order, result))
+ return 0;
+
+ *result = clib_host_to_net_u16 ((u16) * result);
+ return 1;
+}
+
+uword
+unformat_ethernet_header (unformat_input_t * input, va_list * args)
+{
+ u8 **result = va_arg (*args, u8 **);
+ ethernet_max_header_t _m, *m = &_m;
+ ethernet_header_t *e = &m->ethernet;
+ u16 type;
+ u32 n_vlan;
+
+ if (!unformat (input, "%U: %U -> %U",
+ unformat_ethernet_type_host_byte_order, &type,
+ unformat_ethernet_address, &e->src_address,
+ unformat_ethernet_address, &e->dst_address))
+ return 0;
+
+ n_vlan = 0;
+ while (unformat (input, "vlan"))
+ {
+ u32 id, priority;
+
+ if (!unformat_user (input, unformat_vlib_number, &id)
+ || id >= ETHERNET_N_VLAN)
+ return 0;
+
+ if (unformat (input, "priority %d", &priority))
+ {
+ if (priority >= 8)
+ return 0;
+ id |= priority << 13;
+ }
+
+ if (unformat (input, "cfi"))
+ id |= 1 << 12;
+
+ /* Too many vlans given. */
+ if (n_vlan >= ARRAY_LEN (m->vlan))
+ return 0;
+
+ m->vlan[n_vlan].priority_cfi_and_id = clib_host_to_net_u16 (id);
+ n_vlan++;
+ }
+
+ if (n_vlan == 0)
+ e->type = clib_host_to_net_u16 (type);
+ else
+ {
+ int i;
+
+ e->type = clib_host_to_net_u16 (ETHERNET_TYPE_VLAN);
+ for (i = 0; i < n_vlan - 1; i++)
+ m->vlan[i].type = clib_host_to_net_u16 (ETHERNET_TYPE_VLAN);
+ m->vlan[n_vlan - 1].type = clib_host_to_net_u16 (type);
+ }
+
+ /* Add header to result. */
+ {
+ void *p;
+ u32 n_bytes = sizeof (e[0]) + n_vlan * sizeof (m->vlan[0]);
+
+ vec_add2 (*result, p, n_bytes);
+ clib_memcpy (p, m, n_bytes);
+ }
+
+ return 1;
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/ethernet/init.c b/src/vnet/ethernet/init.c
new file mode 100644
index 00000000000..2d20adc9610
--- /dev/null
+++ b/src/vnet/ethernet/init.c
@@ -0,0 +1,128 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * ethernet_init.c: ethernet initialization
+ *
+ * Copyright (c) 2008 Eliot Dresselhaus
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <vlib/vlib.h>
+#include <vnet/ethernet/ethernet.h>
+#include <vnet/ip/ip.h> // for feature registration
+
+/* Global main structure. */
+ethernet_main_t ethernet_main;
+
+static void
+add_type (ethernet_main_t * em, ethernet_type_t type, char *type_name)
+{
+ ethernet_type_info_t *ti;
+ u32 i;
+
+ vec_add2 (em->type_infos, ti, 1);
+ i = ti - em->type_infos;
+
+ ti->name = type_name;
+ ti->type = type;
+ ti->next_index = ti->node_index = ~0;
+
+ hash_set (em->type_info_by_type, type, i);
+ hash_set_mem (em->type_info_by_name, ti->name, i);
+}
+
+/* Built-in ip4 tx feature path definition */
+/* *INDENT-OFF* */
+VNET_FEATURE_ARC_INIT (ethernet_output, static) =
+{
+ .arc_name = "ethernet-output",
+ .start_nodes = VNET_FEATURES ("adj-l2-midchain"),
+ .arc_index_ptr = &ethernet_main.output_feature_arc_index,
+};
+
+VNET_FEATURE_INIT (ethernet_tx_drop, static) =
+{
+ .arc_name = "ethernet-output",
+ .node_name = "error-drop",
+ .runs_before = 0, /* not before any other features */
+};
+/* *INDENT-ON* */
+
+static clib_error_t *
+ethernet_init (vlib_main_t * vm)
+{
+ ethernet_main_t *em = &ethernet_main;
+ clib_error_t *error;
+
+ /*
+ * Set up the L2 path now, or we'll wipe out the L2 ARP
+ * registration set up by ethernet_arp_init.
+ */
+ if ((error = vlib_call_init_function (vm, l2_init)))
+ return error;
+
+ em->vlib_main = vm;
+
+ em->type_info_by_name = hash_create_string (0, sizeof (uword));
+ em->type_info_by_type = hash_create (0, sizeof (uword));
+
+#define ethernet_type(n,s) add_type (em, ETHERNET_TYPE_##s, #s);
+#include "types.def"
+#undef ethernet_type
+
+ if ((error = vlib_call_init_function (vm, llc_init)))
+ return error;
+ if ((error = vlib_call_init_function (vm, ethernet_input_init)))
+ return error;
+ if ((error = vlib_call_init_function (vm, vnet_feature_init)))
+ return error;
+
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (ethernet_init);
+
+ethernet_main_t *
+ethernet_get_main (vlib_main_t * vm)
+{
+ vlib_call_init_function (vm, ethernet_init);
+ return &ethernet_main;
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/ethernet/interface.c b/src/vnet/ethernet/interface.c
new file mode 100644
index 00000000000..1c1f4353983
--- /dev/null
+++ b/src/vnet/ethernet/interface.c
@@ -0,0 +1,730 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * ethernet_interface.c: ethernet interfaces
+ *
+ * Copyright (c) 2008 Eliot Dresselhaus
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <vnet/vnet.h>
+#include <vnet/ip/ip.h>
+#include <vnet/pg/pg.h>
+#include <vnet/ethernet/ethernet.h>
+#include <vnet/l2/l2_input.h>
+#include <vnet/adj/adj.h>
+
+/**
+ * @file
+ * @brief Loopback Interfaces.
+ *
+ * This file contains code to manage loopback interfaces.
+ */
+
+/**
+ * @brief build a rewrite string to use for sending packets of type 'link_type'
+ * to 'dst_address'
+ */
+u8 *
+ethernet_build_rewrite (vnet_main_t * vnm,
+ u32 sw_if_index,
+ vnet_link_t link_type, const void *dst_address)
+{
+ vnet_sw_interface_t *sub_sw = vnet_get_sw_interface (vnm, sw_if_index);
+ vnet_sw_interface_t *sup_sw = vnet_get_sup_sw_interface (vnm, sw_if_index);
+ vnet_hw_interface_t *hw = vnet_get_sup_hw_interface (vnm, sw_if_index);
+ ethernet_main_t *em = &ethernet_main;
+ ethernet_interface_t *ei;
+ ethernet_header_t *h;
+ ethernet_type_t type;
+ uword n_bytes = sizeof (h[0]);
+ u8 *rewrite = NULL;
+
+ if (sub_sw != sup_sw)
+ {
+ if (sub_sw->sub.eth.flags.one_tag)
+ {
+ n_bytes += sizeof (ethernet_vlan_header_t);
+ }
+ else if (sub_sw->sub.eth.flags.two_tags)
+ {
+ n_bytes += 2 * (sizeof (ethernet_vlan_header_t));
+ }
+ // Check for encaps that are not supported for L3 interfaces
+ if (!(sub_sw->sub.eth.flags.exact_match) ||
+ (sub_sw->sub.eth.flags.default_sub) ||
+ (sub_sw->sub.eth.flags.outer_vlan_id_any) ||
+ (sub_sw->sub.eth.flags.inner_vlan_id_any))
+ {
+ return 0;
+ }
+ }
+
+ switch (link_type)
+ {
+#define _(a,b) case VNET_LINK_##a: type = ETHERNET_TYPE_##b; break
+ _(IP4, IP4);
+ _(IP6, IP6);
+ _(MPLS, MPLS_UNICAST);
+ _(ARP, ARP);
+#undef _
+ default:
+ return NULL;
+ }
+
+ vec_validate (rewrite, n_bytes - 1);
+ h = (ethernet_header_t *) rewrite;
+ ei = pool_elt_at_index (em->interfaces, hw->hw_instance);
+ clib_memcpy (h->src_address, ei->address, sizeof (h->src_address));
+ if (dst_address)
+ clib_memcpy (h->dst_address, dst_address, sizeof (h->dst_address));
+ else
+ memset (h->dst_address, ~0, sizeof (h->dst_address)); /* broadcast */
+
+ if (sub_sw->sub.eth.flags.one_tag)
+ {
+ ethernet_vlan_header_t *outer = (void *) (h + 1);
+
+ h->type = sub_sw->sub.eth.flags.dot1ad ?
+ clib_host_to_net_u16 (ETHERNET_TYPE_DOT1AD) :
+ clib_host_to_net_u16 (ETHERNET_TYPE_VLAN);
+ outer->priority_cfi_and_id =
+ clib_host_to_net_u16 (sub_sw->sub.eth.outer_vlan_id);
+ outer->type = clib_host_to_net_u16 (type);
+
+ }
+ else if (sub_sw->sub.eth.flags.two_tags)
+ {
+ ethernet_vlan_header_t *outer = (void *) (h + 1);
+ ethernet_vlan_header_t *inner = (void *) (outer + 1);
+
+ h->type = sub_sw->sub.eth.flags.dot1ad ?
+ clib_host_to_net_u16 (ETHERNET_TYPE_DOT1AD) :
+ clib_host_to_net_u16 (ETHERNET_TYPE_VLAN);
+ outer->priority_cfi_and_id =
+ clib_host_to_net_u16 (sub_sw->sub.eth.outer_vlan_id);
+ outer->type = clib_host_to_net_u16 (ETHERNET_TYPE_VLAN);
+ inner->priority_cfi_and_id =
+ clib_host_to_net_u16 (sub_sw->sub.eth.inner_vlan_id);
+ inner->type = clib_host_to_net_u16 (type);
+
+ }
+ else
+ {
+ h->type = clib_host_to_net_u16 (type);
+ }
+
+ return (rewrite);
+}
+
+void
+ethernet_update_adjacency (vnet_main_t * vnm, u32 sw_if_index, u32 ai)
+{
+ ip_adjacency_t *adj;
+
+ adj = adj_get (ai);
+
+ if (FIB_PROTOCOL_IP4 == adj->ia_nh_proto)
+ {
+ arp_update_adjacency (vnm, sw_if_index, ai);
+ }
+ else if (FIB_PROTOCOL_IP6 == adj->ia_nh_proto)
+ {
+ ip6_ethernet_update_adjacency (vnm, sw_if_index, ai);
+ }
+ else
+ {
+ ASSERT (0);
+ }
+}
+
+static clib_error_t *
+ethernet_mac_change (vnet_hw_interface_t * hi, char *mac_address)
+{
+ ethernet_interface_t *ei;
+ ethernet_main_t *em;
+
+ em = &ethernet_main;
+ ei = pool_elt_at_index (em->interfaces, hi->hw_instance);
+
+ vec_validate (hi->hw_address,
+ STRUCT_SIZE_OF (ethernet_header_t, src_address) - 1);
+ clib_memcpy (hi->hw_address, mac_address, vec_len (hi->hw_address));
+
+ clib_memcpy (ei->address, (u8 *) mac_address, sizeof (ei->address));
+ ethernet_arp_change_mac (hi->sw_if_index);
+ ethernet_ndp_change_mac (hi->sw_if_index);
+
+ return (NULL);
+}
+
+/* *INDENT-OFF* */
+VNET_HW_INTERFACE_CLASS (ethernet_hw_interface_class) = {
+ .name = "Ethernet",
+ .format_address = format_ethernet_address,
+ .format_header = format_ethernet_header_with_length,
+ .unformat_hw_address = unformat_ethernet_address,
+ .unformat_header = unformat_ethernet_header,
+ .build_rewrite = ethernet_build_rewrite,
+ .update_adjacency = ethernet_update_adjacency,
+ .mac_addr_change_function = ethernet_mac_change,
+};
+/* *INDENT-ON* */
+
+uword
+unformat_ethernet_interface (unformat_input_t * input, va_list * args)
+{
+ vnet_main_t *vnm = va_arg (*args, vnet_main_t *);
+ u32 *result = va_arg (*args, u32 *);
+ u32 hw_if_index;
+ ethernet_main_t *em = &ethernet_main;
+ ethernet_interface_t *eif;
+
+ if (!unformat_user (input, unformat_vnet_hw_interface, vnm, &hw_if_index))
+ return 0;
+
+ eif = ethernet_get_interface (em, hw_if_index);
+ if (eif)
+ {
+ *result = hw_if_index;
+ return 1;
+ }
+ return 0;
+}
+
+clib_error_t *
+ethernet_register_interface (vnet_main_t * vnm,
+ u32 dev_class_index,
+ u32 dev_instance,
+ u8 * address,
+ u32 * hw_if_index_return,
+ ethernet_flag_change_function_t flag_change)
+{
+ ethernet_main_t *em = &ethernet_main;
+ ethernet_interface_t *ei;
+ vnet_hw_interface_t *hi;
+ clib_error_t *error = 0;
+ u32 hw_if_index;
+
+ pool_get (em->interfaces, ei);
+ ei->flag_change = flag_change;
+
+ hw_if_index = vnet_register_interface
+ (vnm,
+ dev_class_index, dev_instance,
+ ethernet_hw_interface_class.index, ei - em->interfaces);
+ *hw_if_index_return = hw_if_index;
+
+ hi = vnet_get_hw_interface (vnm, hw_if_index);
+
+ ethernet_setup_node (vnm->vlib_main, hi->output_node_index);
+
+ hi->min_packet_bytes = hi->min_supported_packet_bytes =
+ ETHERNET_MIN_PACKET_BYTES;
+ hi->max_packet_bytes = hi->max_supported_packet_bytes =
+ ETHERNET_MAX_PACKET_BYTES;
+ hi->per_packet_overhead_bytes =
+ /* preamble */ 8 + /* inter frame gap */ 12;
+
+ /* Standard default ethernet MTU. */
+ hi->max_l3_packet_bytes[VLIB_RX] = hi->max_l3_packet_bytes[VLIB_TX] = 9000;
+
+ clib_memcpy (ei->address, address, sizeof (ei->address));
+ vec_free (hi->hw_address);
+ vec_add (hi->hw_address, address, sizeof (ei->address));
+
+ if (error)
+ {
+ pool_put (em->interfaces, ei);
+ return error;
+ }
+ return error;
+}
+
+void
+ethernet_delete_interface (vnet_main_t * vnm, u32 hw_if_index)
+{
+ ethernet_main_t *em = &ethernet_main;
+ ethernet_interface_t *ei;
+ vnet_hw_interface_t *hi;
+ main_intf_t *main_intf;
+ vlan_table_t *vlan_table;
+ u32 idx;
+
+ hi = vnet_get_hw_interface (vnm, hw_if_index);
+ ei = pool_elt_at_index (em->interfaces, hi->hw_instance);
+
+ /* Delete vlan mapping table for dot1q and dot1ad. */
+ main_intf = vec_elt_at_index (em->main_intfs, hi->hw_if_index);
+ if (main_intf->dot1q_vlans)
+ {
+ vlan_table = vec_elt_at_index (em->vlan_pool, main_intf->dot1q_vlans);
+ for (idx = 0; idx < ETHERNET_N_VLAN; idx++)
+ {
+ if (vlan_table->vlans[idx].qinqs)
+ {
+ pool_put_index (em->qinq_pool, vlan_table->vlans[idx].qinqs);
+ }
+ }
+ pool_put_index (em->vlan_pool, main_intf->dot1q_vlans);
+ }
+ if (main_intf->dot1ad_vlans)
+ {
+ vlan_table = vec_elt_at_index (em->vlan_pool, main_intf->dot1ad_vlans);
+ for (idx = 0; idx < ETHERNET_N_VLAN; idx++)
+ {
+ if (vlan_table->vlans[idx].qinqs)
+ {
+ pool_put_index (em->qinq_pool, vlan_table->vlans[idx].qinqs);
+ }
+ }
+ pool_put_index (em->vlan_pool, main_intf->dot1ad_vlans);
+ }
+
+ vnet_delete_hw_interface (vnm, hw_if_index);
+ pool_put (em->interfaces, ei);
+}
+
+u32
+ethernet_set_flags (vnet_main_t * vnm, u32 hw_if_index, u32 flags)
+{
+ ethernet_main_t *em = &ethernet_main;
+ vnet_hw_interface_t *hi;
+ ethernet_interface_t *ei;
+
+ hi = vnet_get_hw_interface (vnm, hw_if_index);
+
+ ASSERT (hi->hw_class_index == ethernet_hw_interface_class.index);
+
+ ei = pool_elt_at_index (em->interfaces, hi->hw_instance);
+ if (ei->flag_change)
+ return ei->flag_change (vnm, hi, flags);
+ return (u32) ~ 0;
+}
+
+/* Echo packets back to ethernet/l2-input. */
+static uword
+simulated_ethernet_interface_tx (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ u32 n_left_from, n_left_to_next, n_copy, *from, *to_next;
+ u32 next_index = VNET_SIMULATED_ETHERNET_TX_NEXT_ETHERNET_INPUT;
+ u32 i, next_node_index, bvi_flag, sw_if_index;
+ u32 n_pkts = 0, n_bytes = 0;
+ u32 cpu_index = vm->cpu_index;
+ vnet_main_t *vnm = vnet_get_main ();
+ vnet_interface_main_t *im = &vnm->interface_main;
+ vlib_node_main_t *nm = &vm->node_main;
+ vlib_node_t *loop_node;
+ vlib_buffer_t *b;
+
+ // check tx node index, it is ethernet-input on loopback create
+ // but can be changed to l2-input if loopback is configured as
+ // BVI of a BD (Bridge Domain).
+ loop_node = vec_elt (nm->nodes, node->node_index);
+ next_node_index = loop_node->next_nodes[next_index];
+ bvi_flag = (next_node_index == l2input_node.index) ? 1 : 0;
+
+ n_left_from = frame->n_vectors;
+ from = vlib_frame_args (frame);
+
+ while (n_left_from > 0)
+ {
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ n_copy = clib_min (n_left_from, n_left_to_next);
+
+ clib_memcpy (to_next, from, n_copy * sizeof (from[0]));
+ n_left_to_next -= n_copy;
+ n_left_from -= n_copy;
+ i = 0;
+ b = vlib_get_buffer (vm, from[i]);
+ sw_if_index = vnet_buffer (b)->sw_if_index[VLIB_TX];
+ while (1)
+ {
+ // Set up RX and TX indices as if received from a real driver
+ // unless loopback is used as a BVI. For BVI case, leave TX index
+ // and update l2_len in packet as required for l2 forwarding path
+ vnet_buffer (b)->sw_if_index[VLIB_RX] = sw_if_index;
+ if (bvi_flag)
+ {
+ vnet_update_l2_len (b);
+ vnet_buffer (b)->sw_if_index[VLIB_TX] = L2INPUT_BVI;
+ }
+ else
+ vnet_buffer (b)->sw_if_index[VLIB_TX] = (u32) ~ 0;
+
+ i++;
+ n_pkts++;
+ n_bytes += vlib_buffer_length_in_chain (vm, b);
+
+ if (i < n_copy)
+ b = vlib_get_buffer (vm, from[i]);
+ else
+ break;
+ }
+ from += n_copy;
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+
+ /* increment TX interface stat */
+ vlib_increment_combined_counter (im->combined_sw_if_counters +
+ VNET_INTERFACE_COUNTER_TX, cpu_index,
+ sw_if_index, n_pkts, n_bytes);
+ }
+
+ return n_left_from;
+}
+
+static u8 *
+format_simulated_ethernet_name (u8 * s, va_list * args)
+{
+ u32 dev_instance = va_arg (*args, u32);
+ return format (s, "loop%d", dev_instance);
+}
+
+static clib_error_t *
+simulated_ethernet_admin_up_down (vnet_main_t * vnm, u32 hw_if_index,
+ u32 flags)
+{
+ u32 hw_flags = (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) ?
+ VNET_HW_INTERFACE_FLAG_LINK_UP : 0;
+ vnet_hw_interface_set_flags (vnm, hw_if_index, hw_flags);
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VNET_DEVICE_CLASS (ethernet_simulated_device_class) = {
+ .name = "Loopback",
+ .format_device_name = format_simulated_ethernet_name,
+ .tx_function = simulated_ethernet_interface_tx,
+ .admin_up_down_function = simulated_ethernet_admin_up_down,
+};
+/* *INDENT-ON* */
+
+int
+vnet_create_loopback_interface (u32 * sw_if_indexp, u8 * mac_address)
+{
+ vnet_main_t *vnm = vnet_get_main ();
+ vlib_main_t *vm = vlib_get_main ();
+ clib_error_t *error;
+ static u32 instance;
+ u8 address[6];
+ u32 hw_if_index;
+ vnet_hw_interface_t *hw_if;
+ u32 slot;
+ int rv = 0;
+
+ ASSERT (sw_if_indexp);
+
+ *sw_if_indexp = (u32) ~ 0;
+
+ memset (address, 0, sizeof (address));
+
+ /*
+ * Default MAC address (dead:0000:0000 + instance) is allocated
+ * if zero mac_address is configured. Otherwise, user-configurable MAC
+ * address is programmed on the loopback interface.
+ */
+ if (memcmp (address, mac_address, sizeof (address)))
+ clib_memcpy (address, mac_address, sizeof (address));
+ else
+ {
+ address[0] = 0xde;
+ address[1] = 0xad;
+ address[5] = instance;
+ }
+
+ error = ethernet_register_interface
+ (vnm,
+ ethernet_simulated_device_class.index, instance++, address, &hw_if_index,
+ /* flag change */ 0);
+
+ if (error)
+ {
+ rv = VNET_API_ERROR_INVALID_REGISTRATION;
+ clib_error_report (error);
+ return rv;
+ }
+
+ hw_if = vnet_get_hw_interface (vnm, hw_if_index);
+ slot = vlib_node_add_named_next_with_slot
+ (vm, hw_if->tx_node_index,
+ "ethernet-input", VNET_SIMULATED_ETHERNET_TX_NEXT_ETHERNET_INPUT);
+ ASSERT (slot == VNET_SIMULATED_ETHERNET_TX_NEXT_ETHERNET_INPUT);
+
+ {
+ vnet_sw_interface_t *si = vnet_get_hw_sw_interface (vnm, hw_if_index);
+ *sw_if_indexp = si->sw_if_index;
+ }
+
+ return 0;
+}
+
+static clib_error_t *
+create_simulated_ethernet_interfaces (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ int rv;
+ u32 sw_if_index;
+ u8 mac_address[6];
+
+ memset (mac_address, 0, sizeof (mac_address));
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "mac %U", unformat_ethernet_address, mac_address))
+ ;
+ else
+ break;
+ }
+
+ rv = vnet_create_loopback_interface (&sw_if_index, mac_address);
+
+ if (rv)
+ return clib_error_return (0, "vnet_create_loopback_interface failed");
+
+ vlib_cli_output (vm, "%U\n", format_vnet_sw_if_index_name, vnet_get_main (),
+ sw_if_index);
+ return 0;
+}
+
+/*?
+ * Create a loopback interface. Optionally, a MAC Address can be
+ * provided. If not provided, de:ad:00:00:00:<loopId> will be used.
+ *
+ * @cliexpar
+ * The following two command syntaxes are equivalent:
+ * @cliexcmd{loopback create-interface [mac <mac-addr>]}
+ * @cliexcmd{create loopback interface [mac <mac-addr>]}
+ * Example of how to create a loopback interface:
+ * @cliexcmd{loopback create-interface}
+?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (create_simulated_ethernet_interface_command, static) = {
+ .path = "loopback create-interface",
+ .short_help = "loopback create-interface [mac <mac-addr>]",
+ .function = create_simulated_ethernet_interfaces,
+};
+/* *INDENT-ON* */
+
+/*?
+ * Create a loopback interface. Optionally, a MAC Address can be
+ * provided. If not provided, de:ad:00:00:00:<loopId> will be used.
+ *
+ * @cliexpar
+ * The following two command syntaxes are equivalent:
+ * @cliexcmd{loopback create-interface [mac <mac-addr>]}
+ * @cliexcmd{create loopback interface [mac <mac-addr>]}
+ * Example of how to create a loopback interface:
+ * @cliexcmd{create loopback interface}
+?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (create_loopback_interface_command, static) = {
+ .path = "create loopback interface",
+ .short_help = "create loopback interface [mac <mac-addr>]",
+ .function = create_simulated_ethernet_interfaces,
+};
+/* *INDENT-ON* */
+
+ethernet_interface_t *
+ethernet_get_interface (ethernet_main_t * em, u32 hw_if_index)
+{
+ vnet_hw_interface_t *i =
+ vnet_get_hw_interface (vnet_get_main (), hw_if_index);
+ return (i->hw_class_index ==
+ ethernet_hw_interface_class.
+ index ? pool_elt_at_index (em->interfaces, i->hw_instance) : 0);
+}
+
+int
+vnet_delete_loopback_interface (u32 sw_if_index)
+{
+ vnet_main_t *vnm = vnet_get_main ();
+ vnet_sw_interface_t *si;
+
+ if (pool_is_free_index (vnm->interface_main.sw_interfaces, sw_if_index))
+ return VNET_API_ERROR_INVALID_SW_IF_INDEX;
+
+ si = vnet_get_sw_interface (vnm, sw_if_index);
+ ethernet_delete_interface (vnm, si->hw_if_index);
+
+ return 0;
+}
+
+int
+vnet_delete_sub_interface (u32 sw_if_index)
+{
+ vnet_main_t *vnm = vnet_get_main ();
+ int rv = 0;
+
+ if (pool_is_free_index (vnm->interface_main.sw_interfaces, sw_if_index))
+ return VNET_API_ERROR_INVALID_SW_IF_INDEX;
+
+
+ vnet_interface_main_t *im = &vnm->interface_main;
+ vnet_sw_interface_t *si = vnet_get_sw_interface (vnm, sw_if_index);
+
+ if (si->type == VNET_SW_INTERFACE_TYPE_SUB)
+ {
+ vnet_sw_interface_t *si = vnet_get_sw_interface (vnm, sw_if_index);
+ u64 sup_and_sub_key =
+ ((u64) (si->sup_sw_if_index) << 32) | (u64) si->sub.id;
+
+ hash_unset_mem (im->sw_if_index_by_sup_and_sub, &sup_and_sub_key);
+ vnet_delete_sw_interface (vnm, sw_if_index);
+ }
+ else
+ {
+ rv = VNET_API_ERROR_INVALID_SUB_SW_IF_INDEX;
+ }
+ return rv;
+}
+
+static clib_error_t *
+delete_simulated_ethernet_interfaces (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ int rv;
+ u32 sw_if_index = ~0;
+ vnet_main_t *vnm = vnet_get_main ();
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "intfc %U",
+ unformat_vnet_sw_interface, vnm, &sw_if_index))
+ ;
+ else
+ break;
+ }
+
+ if (sw_if_index == ~0)
+ return clib_error_return (0, "interface not specified");
+
+ rv = vnet_delete_loopback_interface (sw_if_index);
+
+ if (rv)
+ return clib_error_return (0, "vnet_delete_loopback_interface failed");
+
+ return 0;
+}
+
+static clib_error_t *
+delete_sub_interface (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ int rv = 0;
+ u32 sw_if_index = ~0;
+ vnet_main_t *vnm = vnet_get_main ();
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat
+ (input, "%U", unformat_vnet_sw_interface, vnm, &sw_if_index))
+ ;
+ else
+ break;
+ }
+ if (sw_if_index == ~0)
+ return clib_error_return (0, "interface doesn't exist");
+
+ if (pool_is_free_index (vnm->interface_main.sw_interfaces, sw_if_index))
+ rv = VNET_API_ERROR_INVALID_SW_IF_INDEX;
+ else
+ rv = vnet_delete_sub_interface (sw_if_index);
+ if (rv)
+ return clib_error_return (0, "delete_subinterface_interface failed");
+ return 0;
+}
+
+/*?
+ * Delete a loopback interface.
+ *
+ * @cliexpar
+ * The following two command syntaxes are equivalent:
+ * @cliexcmd{loopback delete-interface intfc <interface>}
+ * @cliexcmd{delete loopback interface intfc <interface>}
+ * Example of how to delete a loopback interface:
+ * @cliexcmd{loopback delete-interface intfc loop0}
+?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (delete_simulated_ethernet_interface_command, static) = {
+ .path = "loopback delete-interface",
+ .short_help = "loopback delete-interface intfc <interface>",
+ .function = delete_simulated_ethernet_interfaces,
+};
+/* *INDENT-ON* */
+
+/*?
+ * Delete a loopback interface.
+ *
+ * @cliexpar
+ * The following two command syntaxes are equivalent:
+ * @cliexcmd{loopback delete-interface intfc <interface>}
+ * @cliexcmd{delete loopback interface intfc <interface>}
+ * Example of how to delete a loopback interface:
+ * @cliexcmd{delete loopback interface intfc loop0}
+?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (delete_loopback_interface_command, static) = {
+ .path = "delete loopback interface",
+ .short_help = "delete loopback interface intfc <interface>",
+ .function = delete_simulated_ethernet_interfaces,
+};
+/* *INDENT-ON* */
+
+/*?
+ * Delete a sub-interface.
+ *
+ * @cliexpar
+ * Example of how to delete a sub-interface:
+ * @cliexcmd{delete sub-interface GigabitEthernet0/8/0.200}
+?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (delete_sub_interface_command, static) = {
+ .path = "delete sub-interface",
+ .short_help = "delete sub-interface <interface>",
+ .function = delete_sub_interface,
+};
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/ethernet/mac_swap.c b/src/vnet/ethernet/mac_swap.c
new file mode 100644
index 00000000000..c0fec12e61e
--- /dev/null
+++ b/src/vnet/ethernet/mac_swap.c
@@ -0,0 +1,397 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <vlib/vlib.h>
+#include <vnet/pg/pg.h>
+#include <vnet/ethernet/ethernet.h>
+#include <vppinfra/error.h>
+#include <vnet/devices/pci/ige.h>
+#include <vnet/devices/pci/ixge.h>
+#include <vnet/devices/pci/ixgev.h>
+
+typedef struct
+{
+ u32 cached_next_index;
+ u32 cached_sw_if_index;
+
+ /* Hash table to map sw_if_index to next node index */
+ uword *next_node_index_by_sw_if_index;
+
+ /* convenience */
+ vlib_main_t *vlib_main;
+ vnet_main_t *vnet_main;
+} mac_swap_main_t;
+
+typedef struct
+{
+ u8 src[6];
+ u8 dst[6];
+ u32 sw_if_index;
+ u32 next_index;
+} swap_trace_t;
+
+/* packet trace format function */
+static u8 *
+format_swap_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ swap_trace_t *t = va_arg (*args, swap_trace_t *);
+
+ s = format (s, "SWAP: dst now %U src now %U sw_if_index %d next_index %d",
+ format_ethernet_address, t->dst,
+ format_ethernet_address, t->src, t->sw_if_index, t->next_index);
+ return s;
+}
+
+#define foreach_hw_driver_next \
+ _(IP4) \
+ _(IP6) \
+ _(ETHERNET)
+
+mac_swap_main_t mac_swap_main;
+
+static vlib_node_registration_t mac_swap_node;
+
+#define foreach_mac_swap_error \
+_(SWAPS, "mac addresses swapped")
+
+typedef enum
+{
+#define _(sym,str) MAC_SWAP_ERROR_##sym,
+ foreach_mac_swap_error
+#undef _
+ MAC_SWAP_N_ERROR,
+} mac_swap_error_t;
+
+static char *mac_swap_error_strings[] = {
+#define _(sym,string) string,
+ foreach_mac_swap_error
+#undef _
+};
+
+/*
+ * To drop a pkt and increment one of the previous counters:
+ *
+ * set b0->error = error_node->errors[RANDOM_ERROR_SAMPLE];
+ * set next0 to a disposition index bound to "error-drop".
+ *
+ * To manually increment the specific counter MAC_SWAP_ERROR_SAMPLE:
+ *
+ * vlib_node_t *n = vlib_get_node (vm, mac_swap.index);
+ * u32 node_counter_base_index = n->error_heap_index;
+ * vlib_error_main_t * em = &vm->error_main;
+ * em->counters[node_counter_base_index + MAC_SWAP_ERROR_SAMPLE] += 1;
+ *
+ */
+
+typedef enum
+{
+ MAC_SWAP_NEXT_DROP,
+ MAC_SWAP_N_NEXT,
+} mac_swap_next_t;
+
+static uword
+mac_swap_node_fn (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ u32 n_left_from, *from, *to_next;
+ mac_swap_next_t next_index;
+ mac_swap_main_t *msm = &mac_swap_main;
+ vlib_node_t *n = vlib_get_node (vm, mac_swap_node.index);
+ u32 node_counter_base_index = n->error_heap_index;
+ vlib_error_main_t *em = &vm->error_main;
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ u32 bi0, bi1;
+ vlib_buffer_t *b0, *b1;
+ u32 next0, next1;
+ u32 sw_if_index0, sw_if_index1;
+ uword *p0, *p1;
+ u64 tmp0a, tmp0b;
+ u64 tmp1a, tmp1b;
+ ethernet_header_t *h0, *h1;
+
+
+ /* Prefetch next iteration. */
+ {
+ vlib_buffer_t *p2, *p3;
+
+ p2 = vlib_get_buffer (vm, from[2]);
+ p3 = vlib_get_buffer (vm, from[3]);
+
+ vlib_prefetch_buffer_header (p2, LOAD);
+ vlib_prefetch_buffer_header (p3, LOAD);
+
+ CLIB_PREFETCH (p2->data, CLIB_CACHE_LINE_BYTES, STORE);
+ CLIB_PREFETCH (p3->data, CLIB_CACHE_LINE_BYTES, STORE);
+ }
+
+ to_next[0] = bi0 = from[0];
+ to_next[1] = bi1 = from[1];
+ from += 2;
+ to_next += 2;
+ n_left_from -= 2;
+ n_left_to_next -= 2;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ b1 = vlib_get_buffer (vm, bi1);
+
+ sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+ next0 = msm->cached_next_index;
+ sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_RX];
+ next1 = msm->cached_next_index;
+
+ if (PREDICT_FALSE (msm->cached_sw_if_index != sw_if_index0))
+ {
+ p0 =
+ hash_get (msm->next_node_index_by_sw_if_index, sw_if_index0);
+ if (p0 == 0)
+ {
+ vnet_hw_interface_t *hw0;
+
+ hw0 = vnet_get_sup_hw_interface (msm->vnet_main,
+ sw_if_index0);
+
+ next0 = vlib_node_add_next (msm->vlib_main,
+ mac_swap_node.index,
+ hw0->output_node_index);
+ hash_set (msm->next_node_index_by_sw_if_index,
+ sw_if_index0, next0);
+ }
+ else
+ next0 = p0[0];
+ msm->cached_sw_if_index = sw_if_index0;
+ msm->cached_next_index = next0;
+ next1 = next0;
+ }
+ if (PREDICT_FALSE (msm->cached_sw_if_index != sw_if_index1))
+ {
+ p1 =
+ hash_get (msm->next_node_index_by_sw_if_index, sw_if_index1);
+ if (p1 == 0)
+ {
+ vnet_hw_interface_t *hw1;
+
+ hw1 = vnet_get_sup_hw_interface (msm->vnet_main,
+ sw_if_index1);
+
+ next1 = vlib_node_add_next (msm->vlib_main,
+ mac_swap_node.index,
+ hw1->output_node_index);
+ hash_set (msm->next_node_index_by_sw_if_index,
+ sw_if_index1, next1);
+ }
+ else
+ next1 = p1[0];
+ msm->cached_sw_if_index = sw_if_index1;
+ msm->cached_next_index = next1;
+ }
+
+ em->counters[node_counter_base_index + MAC_SWAP_ERROR_SWAPS] += 2;
+
+ /* reset buffer so we always point at the MAC hdr */
+ vlib_buffer_reset (b0);
+ vlib_buffer_reset (b1);
+ h0 = vlib_buffer_get_current (b0);
+ h1 = vlib_buffer_get_current (b1);
+
+ /* Swap 2 x src and dst mac addresses using 8-byte load/stores */
+ tmp0a = clib_net_to_host_u64 (((u64 *) (h0->dst_address))[0]);
+ tmp1a = clib_net_to_host_u64 (((u64 *) (h1->dst_address))[0]);
+ tmp0b = clib_net_to_host_u64 (((u64 *) (h0->src_address))[0]);
+ tmp1b = clib_net_to_host_u64 (((u64 *) (h1->src_address))[0]);
+ ((u64 *) (h0->dst_address))[0] = clib_host_to_net_u64 (tmp0b);
+ ((u64 *) (h1->dst_address))[0] = clib_host_to_net_u64 (tmp1b);
+ /* Move the ethertype from "b" to "a" */
+ tmp0a &= ~(0xFFFF);
+ tmp1a &= ~(0xFFFF);
+ tmp0a |= tmp0b & 0xFFFF;
+ ((u64 *) (h0->src_address))[0] = clib_host_to_net_u64 (tmp0a);
+ tmp1a |= tmp1b & 0xFFFF;
+ ((u64 *) (h1->src_address))[0] = clib_host_to_net_u64 (tmp1a);
+
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)))
+ {
+ if (b0->flags & VLIB_BUFFER_IS_TRACED)
+ {
+ swap_trace_t *t =
+ vlib_add_trace (vm, node, b0, sizeof (*t));
+ clib_memcpy (t->src, h0->src_address, 6);
+ clib_memcpy (t->dst, h0->dst_address, 6);
+ t->sw_if_index = sw_if_index0;
+ t->next_index = next0;
+ }
+ if (b1->flags & VLIB_BUFFER_IS_TRACED)
+ {
+ swap_trace_t *t =
+ vlib_add_trace (vm, node, b1, sizeof (*t));
+ clib_memcpy (t->src, h1->src_address, 6);
+ clib_memcpy (t->dst, h1->dst_address, 6);
+ t->sw_if_index = sw_if_index1;
+ t->next_index = next1;
+ }
+ }
+
+ vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, bi1, next0, next1);
+ }
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 bi0;
+ vlib_buffer_t *b0;
+ u32 next0;
+ u32 sw_if_index0;
+ uword *p0;
+ u64 tmp0a, tmp0b;
+ ethernet_header_t *h0;
+
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+
+ sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+ next0 = msm->cached_next_index;
+
+ if (PREDICT_FALSE (msm->cached_sw_if_index != sw_if_index0))
+ {
+ p0 =
+ hash_get (msm->next_node_index_by_sw_if_index, sw_if_index0);
+ if (p0 == 0)
+ {
+ vnet_hw_interface_t *hw0;
+
+ hw0 = vnet_get_sup_hw_interface (msm->vnet_main,
+ sw_if_index0);
+
+ next0 = vlib_node_add_next (msm->vlib_main,
+ mac_swap_node.index,
+ hw0->output_node_index);
+ hash_set (msm->next_node_index_by_sw_if_index,
+ sw_if_index0, next0);
+ }
+ else
+ next0 = p0[0];
+ msm->cached_sw_if_index = sw_if_index0;
+ msm->cached_next_index = next0;
+ }
+
+ em->counters[node_counter_base_index + MAC_SWAP_ERROR_SWAPS] += 1;
+
+ /* reset buffer so we always point at the MAC hdr */
+ vlib_buffer_reset (b0);
+ h0 = vlib_buffer_get_current (b0);
+
+ /* Exchange src and dst, preserve the ethertype */
+ tmp0a = clib_net_to_host_u64 (((u64 *) (h0->dst_address))[0]);
+ tmp0b = clib_net_to_host_u64 (((u64 *) (h0->src_address))[0]);
+ ((u64 *) (h0->dst_address))[0] = clib_host_to_net_u64 (tmp0b);
+ tmp0a &= ~(0xFFFF);
+ tmp0a |= tmp0b & 0xFFFF;
+ ((u64 *) (h0->src_address))[0] = clib_host_to_net_u64 (tmp0a);
+
+ /* ship it */
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)
+ && (b0->flags & VLIB_BUFFER_IS_TRACED)))
+ {
+ swap_trace_t *t = vlib_add_trace (vm, node, b0, sizeof (*t));
+ clib_memcpy (t->src, h0->src_address, 6);
+ clib_memcpy (t->dst, h0->dst_address, 6);
+ t->sw_if_index = sw_if_index0;
+ t->next_index = next0;
+ }
+
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, next0);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ return frame->n_vectors;
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (mac_swap_node,static) = {
+ .function = mac_swap_node_fn,
+ .name = "mac-swap",
+ .vector_size = sizeof (u32),
+ .format_trace = format_swap_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_errors = ARRAY_LEN(mac_swap_error_strings),
+ .error_strings = mac_swap_error_strings,
+
+ .n_next_nodes = MAC_SWAP_N_NEXT,
+
+ /* edit / add dispositions here */
+ .next_nodes = {
+ [MAC_SWAP_NEXT_DROP] = "error-drop",
+ },
+};
+/* *INDENT-ON* */
+
+clib_error_t *
+mac_swap_init (vlib_main_t * vm)
+{
+ mac_swap_main_t *msm = &mac_swap_main;
+
+ msm->next_node_index_by_sw_if_index = hash_create (0, sizeof (uword));
+ msm->cached_next_index = (u32) ~ 0;
+ msm->cached_sw_if_index = (u32) ~ 0;
+ msm->vlib_main = vm;
+ msm->vnet_main = vnet_get_main ();
+
+ /* Driver RX nodes send pkts here... */
+#define _(a) ixge_set_next_node (IXGE_RX_NEXT_##a##_INPUT, "mac-swap");
+ foreach_hw_driver_next
+#undef _
+#define _(a) ixgev_set_next_node (IXGEV_RX_NEXT_##a##_INPUT, "mac-swap");
+ foreach_hw_driver_next
+#undef _
+#define _(a) ige_set_next_node (IGE_RX_NEXT_##a##_INPUT, "mac-swap");
+ foreach_hw_driver_next
+#undef _
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (mac_swap_init);
+
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/ethernet/node.c b/src/vnet/ethernet/node.c
new file mode 100755
index 00000000000..6d57da31708
--- /dev/null
+++ b/src/vnet/ethernet/node.c
@@ -0,0 +1,1368 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * ethernet_node.c: ethernet packet processing
+ *
+ * Copyright (c) 2008 Eliot Dresselhaus
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <vlib/vlib.h>
+#include <vnet/pg/pg.h>
+#include <vnet/ethernet/ethernet.h>
+#include <vppinfra/sparse_vec.h>
+#include <vnet/l2/l2_bvi.h>
+
+
+#define foreach_ethernet_input_next \
+ _ (PUNT, "error-punt") \
+ _ (DROP, "error-drop") \
+ _ (LLC, "llc-input")
+
+typedef enum
+{
+#define _(s,n) ETHERNET_INPUT_NEXT_##s,
+ foreach_ethernet_input_next
+#undef _
+ ETHERNET_INPUT_N_NEXT,
+} ethernet_input_next_t;
+
+typedef struct
+{
+ u8 packet_data[32];
+} ethernet_input_trace_t;
+
+static u8 *
+format_ethernet_input_trace (u8 * s, va_list * va)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*va, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*va, vlib_node_t *);
+ ethernet_input_trace_t *t = va_arg (*va, ethernet_input_trace_t *);
+
+ s = format (s, "%U", format_ethernet_header, t->packet_data);
+
+ return s;
+}
+
+vlib_node_registration_t ethernet_input_node;
+
+typedef enum
+{
+ ETHERNET_INPUT_VARIANT_ETHERNET,
+ ETHERNET_INPUT_VARIANT_ETHERNET_TYPE,
+ ETHERNET_INPUT_VARIANT_NOT_L2,
+} ethernet_input_variant_t;
+
+
+// Parse the ethernet header to extract vlan tags and innermost ethertype
+static_always_inline void
+parse_header (ethernet_input_variant_t variant,
+ vlib_buffer_t * b0,
+ u16 * type,
+ u16 * orig_type,
+ u16 * outer_id, u16 * inner_id, u32 * match_flags)
+{
+ u8 vlan_count;
+
+ if (variant == ETHERNET_INPUT_VARIANT_ETHERNET
+ || variant == ETHERNET_INPUT_VARIANT_NOT_L2)
+ {
+ ethernet_header_t *e0;
+
+ e0 = (void *) (b0->data + b0->current_data);
+
+ vnet_buffer (b0)->ethernet.start_of_ethernet_header = b0->current_data;
+
+ vlib_buffer_advance (b0, sizeof (e0[0]));
+
+ *type = clib_net_to_host_u16 (e0->type);
+ }
+ else if (variant == ETHERNET_INPUT_VARIANT_ETHERNET_TYPE)
+ {
+ // here when prior node was LLC/SNAP processing
+ u16 *e0;
+
+ e0 = (void *) (b0->data + b0->current_data);
+
+ vlib_buffer_advance (b0, sizeof (e0[0]));
+
+ *type = clib_net_to_host_u16 (e0[0]);
+ }
+
+ // save for distinguishing between dot1q and dot1ad later
+ *orig_type = *type;
+
+ // default the tags to 0 (used if there is no corresponding tag)
+ *outer_id = 0;
+ *inner_id = 0;
+
+ *match_flags = SUBINT_CONFIG_VALID | SUBINT_CONFIG_MATCH_0_TAG;
+ vlan_count = 0;
+
+ // check for vlan encaps
+ if (ethernet_frame_is_tagged (*type))
+ {
+ ethernet_vlan_header_t *h0;
+ u16 tag;
+
+ *match_flags = SUBINT_CONFIG_VALID | SUBINT_CONFIG_MATCH_1_TAG;
+
+ h0 = (void *) (b0->data + b0->current_data);
+
+ tag = clib_net_to_host_u16 (h0->priority_cfi_and_id);
+
+ *outer_id = tag & 0xfff;
+
+ *type = clib_net_to_host_u16 (h0->type);
+
+ vlib_buffer_advance (b0, sizeof (h0[0]));
+ vlan_count = 1;
+
+ if (*type == ETHERNET_TYPE_VLAN)
+ {
+ // Double tagged packet
+ *match_flags = SUBINT_CONFIG_VALID | SUBINT_CONFIG_MATCH_2_TAG;
+
+ h0 = (void *) (b0->data + b0->current_data);
+
+ tag = clib_net_to_host_u16 (h0->priority_cfi_and_id);
+
+ *inner_id = tag & 0xfff;
+
+ *type = clib_net_to_host_u16 (h0->type);
+
+ vlib_buffer_advance (b0, sizeof (h0[0]));
+ vlan_count = 2;
+
+ if (*type == ETHERNET_TYPE_VLAN)
+ {
+ // More than double tagged packet
+ *match_flags = SUBINT_CONFIG_VALID | SUBINT_CONFIG_MATCH_3_TAG;
+ vlan_count = 3; // "unknown" number, aka, 3-or-more
+ }
+ }
+ }
+ ethernet_buffer_set_vlan_count (b0, vlan_count);
+}
+
+// Determine the subinterface for this packet, given the result of the
+// vlan table lookups and vlan header parsing. Check the most specific
+// matches first.
+static_always_inline void
+identify_subint (vnet_hw_interface_t * hi,
+ vlib_buffer_t * b0,
+ u32 match_flags,
+ main_intf_t * main_intf,
+ vlan_intf_t * vlan_intf,
+ qinq_intf_t * qinq_intf,
+ u32 * new_sw_if_index, u8 * error0, u32 * is_l2)
+{
+ u32 matched;
+
+ matched = eth_identify_subint (hi, b0, match_flags,
+ main_intf, vlan_intf, qinq_intf,
+ new_sw_if_index, error0, is_l2);
+
+ if (matched)
+ {
+
+ // Perform L3 my-mac filter
+ // A unicast packet arriving on an L3 interface must have a dmac matching the interface mac.
+ // This is required for promiscuous mode, else we will forward packets we aren't supposed to.
+ if (!(*is_l2))
+ {
+ ethernet_header_t *e0;
+ e0 =
+ (void *) (b0->data +
+ vnet_buffer (b0)->ethernet.start_of_ethernet_header);
+
+ if (!(ethernet_address_cast (e0->dst_address)))
+ {
+ if (!eth_mac_equal ((u8 *) e0, hi->hw_address))
+ {
+ *error0 = ETHERNET_ERROR_L3_MAC_MISMATCH;
+ }
+ }
+ }
+
+ // Check for down subinterface
+ *error0 = (*new_sw_if_index) != ~0 ? (*error0) : ETHERNET_ERROR_DOWN;
+ }
+}
+
+static_always_inline void
+determine_next_node (ethernet_main_t * em,
+ ethernet_input_variant_t variant,
+ u32 is_l20,
+ u32 type0, vlib_buffer_t * b0, u8 * error0, u8 * next0)
+{
+ if (PREDICT_FALSE (*error0 != ETHERNET_ERROR_NONE))
+ {
+ // some error occurred
+ *next0 = ETHERNET_INPUT_NEXT_DROP;
+ }
+ else if (is_l20)
+ {
+ *next0 = em->l2_next;
+ // record the L2 len and reset the buffer so the L2 header is preserved
+ u32 eth_start = vnet_buffer (b0)->ethernet.start_of_ethernet_header;
+ vnet_buffer (b0)->l2.l2_len = b0->current_data - eth_start;
+ vlib_buffer_advance (b0, -ethernet_buffer_header_size (b0));
+
+ // check for common IP/MPLS ethertypes
+ }
+ else if (type0 == ETHERNET_TYPE_IP4)
+ {
+ *next0 = em->l3_next.input_next_ip4;
+ }
+ else if (type0 == ETHERNET_TYPE_IP6)
+ {
+ *next0 = em->l3_next.input_next_ip6;
+ }
+ else if (type0 == ETHERNET_TYPE_MPLS_UNICAST)
+ {
+ *next0 = em->l3_next.input_next_mpls;
+
+ }
+ else if (em->redirect_l3)
+ {
+ // L3 Redirect is on, the cached common next nodes will be
+ // pointing to the redirect node, catch the uncommon types here
+ *next0 = em->redirect_l3_next;
+ }
+ else
+ {
+ // uncommon ethertype, check table
+ u32 i0;
+ i0 = sparse_vec_index (em->l3_next.input_next_by_type, type0);
+ *next0 = vec_elt (em->l3_next.input_next_by_type, i0);
+ *error0 =
+ i0 ==
+ SPARSE_VEC_INVALID_INDEX ? ETHERNET_ERROR_UNKNOWN_TYPE : *error0;
+
+ // The table is not populated with LLC values, so check that now.
+ // If variant is variant_ethernet then we came from LLC processing. Don't
+ // go back there; drop instead using by keeping the drop/bad table result.
+ if ((type0 < 0x600) && (variant == ETHERNET_INPUT_VARIANT_ETHERNET))
+ {
+ *next0 = ETHERNET_INPUT_NEXT_LLC;
+ }
+ }
+}
+
+static_always_inline uword
+ethernet_input_inline (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame,
+ ethernet_input_variant_t variant)
+{
+ vnet_main_t *vnm = vnet_get_main ();
+ ethernet_main_t *em = &ethernet_main;
+ vlib_node_runtime_t *error_node;
+ u32 n_left_from, next_index, *from, *to_next;
+ u32 stats_sw_if_index, stats_n_packets, stats_n_bytes;
+ u32 cpu_index = os_get_cpu_number ();
+ u32 cached_sw_if_index = ~0;
+ u32 cached_is_l2 = 0; /* shut up gcc */
+
+ if (variant != ETHERNET_INPUT_VARIANT_ETHERNET)
+ error_node = vlib_node_get_runtime (vm, ethernet_input_node.index);
+ else
+ error_node = node;
+
+ from = vlib_frame_vector_args (from_frame);
+ n_left_from = from_frame->n_vectors;
+
+ if (node->flags & VLIB_NODE_FLAG_TRACE)
+ vlib_trace_frame_buffers_only (vm, node,
+ from,
+ n_left_from,
+ sizeof (from[0]),
+ sizeof (ethernet_input_trace_t));
+
+ next_index = node->cached_next_index;
+ stats_sw_if_index = node->runtime_data[0];
+ stats_n_packets = stats_n_bytes = 0;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ u32 bi0, bi1;
+ vlib_buffer_t *b0, *b1;
+ u8 next0, next1, error0, error1;
+ u16 type0, orig_type0, type1, orig_type1;
+ u16 outer_id0, inner_id0, outer_id1, inner_id1;
+ u32 match_flags0, match_flags1;
+ u32 old_sw_if_index0, new_sw_if_index0, len0, old_sw_if_index1,
+ new_sw_if_index1, len1;
+ vnet_hw_interface_t *hi0, *hi1;
+ main_intf_t *main_intf0, *main_intf1;
+ vlan_intf_t *vlan_intf0, *vlan_intf1;
+ qinq_intf_t *qinq_intf0, *qinq_intf1;
+ u32 is_l20, is_l21;
+ ethernet_header_t *e0, *e1;
+
+ /* Prefetch next iteration. */
+ {
+ vlib_buffer_t *b2, *b3;
+
+ b2 = vlib_get_buffer (vm, from[2]);
+ b3 = vlib_get_buffer (vm, from[3]);
+
+ vlib_prefetch_buffer_header (b2, STORE);
+ vlib_prefetch_buffer_header (b3, STORE);
+
+ CLIB_PREFETCH (b2->data, sizeof (ethernet_header_t), LOAD);
+ CLIB_PREFETCH (b3->data, sizeof (ethernet_header_t), LOAD);
+ }
+
+ bi0 = from[0];
+ bi1 = from[1];
+ to_next[0] = bi0;
+ to_next[1] = bi1;
+ from += 2;
+ to_next += 2;
+ n_left_to_next -= 2;
+ n_left_from -= 2;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ b1 = vlib_get_buffer (vm, bi1);
+
+ error0 = error1 = ETHERNET_ERROR_NONE;
+ e0 = vlib_buffer_get_current (b0);
+ type0 = clib_net_to_host_u16 (e0->type);
+ e1 = vlib_buffer_get_current (b1);
+ type1 = clib_net_to_host_u16 (e1->type);
+
+ /* Speed-path for the untagged case */
+ if (PREDICT_TRUE (variant == ETHERNET_INPUT_VARIANT_ETHERNET
+ && !ethernet_frame_is_tagged (type0)
+ && !ethernet_frame_is_tagged (type1)))
+ {
+ main_intf_t *intf0;
+ subint_config_t *subint0;
+ u32 sw_if_index0, sw_if_index1;
+
+ sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+ sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_RX];
+ is_l20 = cached_is_l2;
+
+ /* This is probably wholly unnecessary */
+ if (PREDICT_FALSE (sw_if_index0 != sw_if_index1))
+ goto slowpath;
+
+ if (PREDICT_FALSE (cached_sw_if_index != sw_if_index0))
+ {
+ cached_sw_if_index = sw_if_index0;
+ hi0 = vnet_get_sup_hw_interface (vnm, sw_if_index0);
+ intf0 = vec_elt_at_index (em->main_intfs, hi0->hw_if_index);
+ subint0 = &intf0->untagged_subint;
+ cached_is_l2 = is_l20 = subint0->flags & SUBINT_CONFIG_L2;
+ }
+
+ vnet_buffer (b0)->ethernet.start_of_ethernet_header =
+ b0->current_data;
+ vnet_buffer (b1)->ethernet.start_of_ethernet_header =
+ b1->current_data;
+
+ if (PREDICT_TRUE (is_l20 != 0))
+ {
+ next0 = em->l2_next;
+ vnet_buffer (b0)->l2.l2_len = sizeof (ethernet_header_t);
+ next1 = em->l2_next;
+ vnet_buffer (b1)->l2.l2_len = sizeof (ethernet_header_t);
+ }
+ else
+ {
+ determine_next_node (em, variant, 0, type0, b0,
+ &error0, &next0);
+ vlib_buffer_advance (b0, sizeof (ethernet_header_t));
+ determine_next_node (em, variant, 0, type1, b1,
+ &error1, &next1);
+ vlib_buffer_advance (b1, sizeof (ethernet_header_t));
+ }
+ goto ship_it01;
+ }
+
+ /* Slow-path for the tagged case */
+ slowpath:
+ parse_header (variant,
+ b0,
+ &type0,
+ &orig_type0, &outer_id0, &inner_id0, &match_flags0);
+
+ parse_header (variant,
+ b1,
+ &type1,
+ &orig_type1, &outer_id1, &inner_id1, &match_flags1);
+
+ old_sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+ old_sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_RX];
+
+ eth_vlan_table_lookups (em,
+ vnm,
+ old_sw_if_index0,
+ orig_type0,
+ outer_id0,
+ inner_id0,
+ &hi0,
+ &main_intf0, &vlan_intf0, &qinq_intf0);
+
+ eth_vlan_table_lookups (em,
+ vnm,
+ old_sw_if_index1,
+ orig_type1,
+ outer_id1,
+ inner_id1,
+ &hi1,
+ &main_intf1, &vlan_intf1, &qinq_intf1);
+
+ identify_subint (hi0,
+ b0,
+ match_flags0,
+ main_intf0,
+ vlan_intf0,
+ qinq_intf0, &new_sw_if_index0, &error0, &is_l20);
+
+ identify_subint (hi1,
+ b1,
+ match_flags1,
+ main_intf1,
+ vlan_intf1,
+ qinq_intf1, &new_sw_if_index1, &error1, &is_l21);
+
+ // Save RX sw_if_index for later nodes
+ vnet_buffer (b0)->sw_if_index[VLIB_RX] =
+ error0 !=
+ ETHERNET_ERROR_NONE ? old_sw_if_index0 : new_sw_if_index0;
+ vnet_buffer (b1)->sw_if_index[VLIB_RX] =
+ error1 !=
+ ETHERNET_ERROR_NONE ? old_sw_if_index1 : new_sw_if_index1;
+
+ // Check if there is a stat to take (valid and non-main sw_if_index for pkt 0 or pkt 1)
+ if (((new_sw_if_index0 != ~0)
+ && (new_sw_if_index0 != old_sw_if_index0))
+ || ((new_sw_if_index1 != ~0)
+ && (new_sw_if_index1 != old_sw_if_index1)))
+ {
+
+ len0 = vlib_buffer_length_in_chain (vm, b0) + b0->current_data
+ - vnet_buffer (b0)->ethernet.start_of_ethernet_header;
+ len1 = vlib_buffer_length_in_chain (vm, b1) + b1->current_data
+ - vnet_buffer (b1)->ethernet.start_of_ethernet_header;
+
+ stats_n_packets += 2;
+ stats_n_bytes += len0 + len1;
+
+ if (PREDICT_FALSE
+ (!(new_sw_if_index0 == stats_sw_if_index
+ && new_sw_if_index1 == stats_sw_if_index)))
+ {
+ stats_n_packets -= 2;
+ stats_n_bytes -= len0 + len1;
+
+ if (new_sw_if_index0 != old_sw_if_index0
+ && new_sw_if_index0 != ~0)
+ vlib_increment_combined_counter (vnm->
+ interface_main.combined_sw_if_counters
+ +
+ VNET_INTERFACE_COUNTER_RX,
+ cpu_index,
+ new_sw_if_index0, 1,
+ len0);
+ if (new_sw_if_index1 != old_sw_if_index1
+ && new_sw_if_index1 != ~0)
+ vlib_increment_combined_counter (vnm->
+ interface_main.combined_sw_if_counters
+ +
+ VNET_INTERFACE_COUNTER_RX,
+ cpu_index,
+ new_sw_if_index1, 1,
+ len1);
+
+ if (new_sw_if_index0 == new_sw_if_index1)
+ {
+ if (stats_n_packets > 0)
+ {
+ vlib_increment_combined_counter
+ (vnm->interface_main.combined_sw_if_counters
+ + VNET_INTERFACE_COUNTER_RX,
+ cpu_index,
+ stats_sw_if_index,
+ stats_n_packets, stats_n_bytes);
+ stats_n_packets = stats_n_bytes = 0;
+ }
+ stats_sw_if_index = new_sw_if_index0;
+ }
+ }
+ }
+
+ if (variant == ETHERNET_INPUT_VARIANT_NOT_L2)
+ is_l20 = is_l21 = 0;
+
+ determine_next_node (em, variant, is_l20, type0, b0, &error0,
+ &next0);
+ determine_next_node (em, variant, is_l21, type1, b1, &error1,
+ &next1);
+
+ b0->error = error_node->errors[error0];
+ b1->error = error_node->errors[error1];
+
+ ship_it01:
+ // verify speculative enqueue
+ vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next,
+ n_left_to_next, bi0, bi1, next0,
+ next1);
+ }
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 bi0;
+ vlib_buffer_t *b0;
+ u8 error0, next0;
+ u16 type0, orig_type0;
+ u16 outer_id0, inner_id0;
+ u32 match_flags0;
+ u32 old_sw_if_index0, new_sw_if_index0, len0;
+ vnet_hw_interface_t *hi0;
+ main_intf_t *main_intf0;
+ vlan_intf_t *vlan_intf0;
+ qinq_intf_t *qinq_intf0;
+ ethernet_header_t *e0;
+ u32 is_l20;
+
+ // Prefetch next iteration
+ if (n_left_from > 1)
+ {
+ vlib_buffer_t *p2;
+
+ p2 = vlib_get_buffer (vm, from[1]);
+ vlib_prefetch_buffer_header (p2, STORE);
+ CLIB_PREFETCH (p2->data, CLIB_CACHE_LINE_BYTES, LOAD);
+ }
+
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+
+ error0 = ETHERNET_ERROR_NONE;
+ e0 = vlib_buffer_get_current (b0);
+ type0 = clib_net_to_host_u16 (e0->type);
+
+ /* Speed-path for the untagged case */
+ if (PREDICT_TRUE (variant == ETHERNET_INPUT_VARIANT_ETHERNET
+ && !ethernet_frame_is_tagged (type0)))
+ {
+ main_intf_t *intf0;
+ subint_config_t *subint0;
+ u32 sw_if_index0;
+
+ sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+ is_l20 = cached_is_l2;
+
+ if (PREDICT_FALSE (cached_sw_if_index != sw_if_index0))
+ {
+ cached_sw_if_index = sw_if_index0;
+ hi0 = vnet_get_sup_hw_interface (vnm, sw_if_index0);
+ intf0 = vec_elt_at_index (em->main_intfs, hi0->hw_if_index);
+ subint0 = &intf0->untagged_subint;
+ cached_is_l2 = is_l20 = subint0->flags & SUBINT_CONFIG_L2;
+ }
+
+ vnet_buffer (b0)->ethernet.start_of_ethernet_header =
+ b0->current_data;
+
+ if (PREDICT_TRUE (is_l20 != 0))
+ {
+ next0 = em->l2_next;
+ vnet_buffer (b0)->l2.l2_len = sizeof (ethernet_header_t);
+ }
+ else
+ {
+ determine_next_node (em, variant, 0, type0, b0,
+ &error0, &next0);
+ vlib_buffer_advance (b0, sizeof (ethernet_header_t));
+ }
+ goto ship_it0;
+ }
+
+ /* Slow-path for the tagged case */
+ parse_header (variant,
+ b0,
+ &type0,
+ &orig_type0, &outer_id0, &inner_id0, &match_flags0);
+
+ old_sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+
+ eth_vlan_table_lookups (em,
+ vnm,
+ old_sw_if_index0,
+ orig_type0,
+ outer_id0,
+ inner_id0,
+ &hi0,
+ &main_intf0, &vlan_intf0, &qinq_intf0);
+
+ identify_subint (hi0,
+ b0,
+ match_flags0,
+ main_intf0,
+ vlan_intf0,
+ qinq_intf0, &new_sw_if_index0, &error0, &is_l20);
+
+ // Save RX sw_if_index for later nodes
+ vnet_buffer (b0)->sw_if_index[VLIB_RX] =
+ error0 !=
+ ETHERNET_ERROR_NONE ? old_sw_if_index0 : new_sw_if_index0;
+
+ // Increment subinterface stats
+ // Note that interface-level counters have already been incremented
+ // prior to calling this function. Thus only subinterface counters
+ // are incremented here.
+ //
+ // Interface level counters include packets received on the main
+ // interface and all subinterfaces. Subinterface level counters
+ // include only those packets received on that subinterface
+ // Increment stats if the subint is valid and it is not the main intf
+ if ((new_sw_if_index0 != ~0)
+ && (new_sw_if_index0 != old_sw_if_index0))
+ {
+
+ len0 = vlib_buffer_length_in_chain (vm, b0) + b0->current_data
+ - vnet_buffer (b0)->ethernet.start_of_ethernet_header;
+
+ stats_n_packets += 1;
+ stats_n_bytes += len0;
+
+ // Batch stat increments from the same subinterface so counters
+ // don't need to be incremented for every packet.
+ if (PREDICT_FALSE (new_sw_if_index0 != stats_sw_if_index))
+ {
+ stats_n_packets -= 1;
+ stats_n_bytes -= len0;
+
+ if (new_sw_if_index0 != ~0)
+ vlib_increment_combined_counter
+ (vnm->interface_main.combined_sw_if_counters
+ + VNET_INTERFACE_COUNTER_RX,
+ cpu_index, new_sw_if_index0, 1, len0);
+ if (stats_n_packets > 0)
+ {
+ vlib_increment_combined_counter
+ (vnm->interface_main.combined_sw_if_counters
+ + VNET_INTERFACE_COUNTER_RX,
+ cpu_index,
+ stats_sw_if_index, stats_n_packets, stats_n_bytes);
+ stats_n_packets = stats_n_bytes = 0;
+ }
+ stats_sw_if_index = new_sw_if_index0;
+ }
+ }
+
+ if (variant == ETHERNET_INPUT_VARIANT_NOT_L2)
+ is_l20 = 0;
+
+ determine_next_node (em, variant, is_l20, type0, b0, &error0,
+ &next0);
+
+ b0->error = error_node->errors[error0];
+
+ // verify speculative enqueue
+ ship_it0:
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, next0);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ // Increment any remaining batched stats
+ if (stats_n_packets > 0)
+ {
+ vlib_increment_combined_counter
+ (vnm->interface_main.combined_sw_if_counters
+ + VNET_INTERFACE_COUNTER_RX,
+ cpu_index, stats_sw_if_index, stats_n_packets, stats_n_bytes);
+ node->runtime_data[0] = stats_sw_if_index;
+ }
+
+ return from_frame->n_vectors;
+}
+
+static uword
+ethernet_input (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * from_frame)
+{
+ return ethernet_input_inline (vm, node, from_frame,
+ ETHERNET_INPUT_VARIANT_ETHERNET);
+}
+
+static uword
+ethernet_input_type (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * from_frame)
+{
+ return ethernet_input_inline (vm, node, from_frame,
+ ETHERNET_INPUT_VARIANT_ETHERNET_TYPE);
+}
+
+static uword
+ethernet_input_not_l2 (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * from_frame)
+{
+ return ethernet_input_inline (vm, node, from_frame,
+ ETHERNET_INPUT_VARIANT_NOT_L2);
+}
+
+
+// Return the subinterface config struct for the given sw_if_index
+// Also return via parameter the appropriate match flags for the
+// configured number of tags.
+// On error (unsupported or not ethernet) return 0.
+static subint_config_t *
+ethernet_sw_interface_get_config (vnet_main_t * vnm,
+ u32 sw_if_index,
+ u32 * flags, u32 * unsupported)
+{
+ ethernet_main_t *em = &ethernet_main;
+ vnet_hw_interface_t *hi;
+ vnet_sw_interface_t *si;
+ main_intf_t *main_intf;
+ vlan_table_t *vlan_table;
+ qinq_table_t *qinq_table;
+ subint_config_t *subint = 0;
+
+ hi = vnet_get_sup_hw_interface (vnm, sw_if_index);
+
+ if (!hi || (hi->hw_class_index != ethernet_hw_interface_class.index))
+ {
+ *unsupported = 0;
+ goto done; // non-ethernet interface
+ }
+
+ // ensure there's an entry for the main intf (shouldn't really be necessary)
+ vec_validate (em->main_intfs, hi->hw_if_index);
+ main_intf = vec_elt_at_index (em->main_intfs, hi->hw_if_index);
+
+ // Locate the subint for the given ethernet config
+ si = vnet_get_sw_interface (vnm, sw_if_index);
+
+ if (si->sub.eth.flags.default_sub)
+ {
+ subint = &main_intf->default_subint;
+ *flags = SUBINT_CONFIG_MATCH_0_TAG |
+ SUBINT_CONFIG_MATCH_1_TAG |
+ SUBINT_CONFIG_MATCH_2_TAG | SUBINT_CONFIG_MATCH_3_TAG;
+ }
+ else if ((si->sub.eth.flags.no_tags) || (si->sub.eth.raw_flags == 0))
+ {
+ // if no flags are set then this is a main interface
+ // so treat as untagged
+ subint = &main_intf->untagged_subint;
+ *flags = SUBINT_CONFIG_MATCH_0_TAG;
+ }
+ else
+ {
+ // one or two tags
+ // first get the vlan table
+ if (si->sub.eth.flags.dot1ad)
+ {
+ if (main_intf->dot1ad_vlans == 0)
+ {
+ // Allocate a vlan table from the pool
+ pool_get (em->vlan_pool, vlan_table);
+ main_intf->dot1ad_vlans = vlan_table - em->vlan_pool;
+ }
+ else
+ {
+ // Get ptr to existing vlan table
+ vlan_table =
+ vec_elt_at_index (em->vlan_pool, main_intf->dot1ad_vlans);
+ }
+ }
+ else
+ { // dot1q
+ if (main_intf->dot1q_vlans == 0)
+ {
+ // Allocate a vlan table from the pool
+ pool_get (em->vlan_pool, vlan_table);
+ main_intf->dot1q_vlans = vlan_table - em->vlan_pool;
+ }
+ else
+ {
+ // Get ptr to existing vlan table
+ vlan_table =
+ vec_elt_at_index (em->vlan_pool, main_intf->dot1q_vlans);
+ }
+ }
+
+ if (si->sub.eth.flags.one_tag)
+ {
+ *flags = si->sub.eth.flags.exact_match ?
+ SUBINT_CONFIG_MATCH_1_TAG :
+ (SUBINT_CONFIG_MATCH_1_TAG |
+ SUBINT_CONFIG_MATCH_2_TAG | SUBINT_CONFIG_MATCH_3_TAG);
+
+ if (si->sub.eth.flags.outer_vlan_id_any)
+ {
+ // not implemented yet
+ *unsupported = 1;
+ goto done;
+ }
+ else
+ {
+ // a single vlan, a common case
+ subint =
+ &vlan_table->vlans[si->sub.eth.
+ outer_vlan_id].single_tag_subint;
+ }
+
+ }
+ else
+ {
+ // Two tags
+ *flags = si->sub.eth.flags.exact_match ?
+ SUBINT_CONFIG_MATCH_2_TAG :
+ (SUBINT_CONFIG_MATCH_2_TAG | SUBINT_CONFIG_MATCH_3_TAG);
+
+ if (si->sub.eth.flags.outer_vlan_id_any
+ && si->sub.eth.flags.inner_vlan_id_any)
+ {
+ // not implemented yet
+ *unsupported = 1;
+ goto done;
+ }
+
+ if (si->sub.eth.flags.inner_vlan_id_any)
+ {
+ // a specific outer and "any" inner
+ // don't need a qinq table for this
+ subint =
+ &vlan_table->vlans[si->sub.eth.
+ outer_vlan_id].inner_any_subint;
+ if (si->sub.eth.flags.exact_match)
+ {
+ *flags = SUBINT_CONFIG_MATCH_2_TAG;
+ }
+ else
+ {
+ *flags = SUBINT_CONFIG_MATCH_2_TAG |
+ SUBINT_CONFIG_MATCH_3_TAG;
+ }
+ }
+ else
+ {
+ // a specific outer + specifc innner vlan id, a common case
+
+ // get the qinq table
+ if (vlan_table->vlans[si->sub.eth.outer_vlan_id].qinqs == 0)
+ {
+ // Allocate a qinq table from the pool
+ pool_get (em->qinq_pool, qinq_table);
+ vlan_table->vlans[si->sub.eth.outer_vlan_id].qinqs =
+ qinq_table - em->qinq_pool;
+ }
+ else
+ {
+ // Get ptr to existing qinq table
+ qinq_table =
+ vec_elt_at_index (em->qinq_pool,
+ vlan_table->vlans[si->sub.
+ eth.outer_vlan_id].
+ qinqs);
+ }
+ subint = &qinq_table->vlans[si->sub.eth.inner_vlan_id].subint;
+ }
+ }
+ }
+
+done:
+ return subint;
+}
+
+clib_error_t *
+ethernet_sw_interface_up_down (vnet_main_t * vnm, u32 sw_if_index, u32 flags)
+{
+ subint_config_t *subint;
+ u32 dummy_flags;
+ u32 dummy_unsup;
+ clib_error_t *error = 0;
+
+ // Find the config for this subinterface
+ subint =
+ ethernet_sw_interface_get_config (vnm, sw_if_index, &dummy_flags,
+ &dummy_unsup);
+
+ if (subint == 0)
+ {
+ // not implemented yet or not ethernet
+ goto done;
+ }
+
+ subint->sw_if_index =
+ ((flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) ? sw_if_index : ~0);
+
+done:
+ return error;
+}
+
+VNET_SW_INTERFACE_ADMIN_UP_DOWN_FUNCTION (ethernet_sw_interface_up_down);
+
+
+// Set the L2/L3 mode for the subinterface
+void
+ethernet_sw_interface_set_l2_mode (vnet_main_t * vnm, u32 sw_if_index, u32 l2)
+{
+ subint_config_t *subint;
+ u32 dummy_flags;
+ u32 dummy_unsup;
+ int is_port;
+ vnet_sw_interface_t *sw = vnet_get_sw_interface (vnm, sw_if_index);
+
+ is_port = !(sw->type == VNET_SW_INTERFACE_TYPE_SUB);
+
+ // Find the config for this subinterface
+ subint =
+ ethernet_sw_interface_get_config (vnm, sw_if_index, &dummy_flags,
+ &dummy_unsup);
+
+ if (subint == 0)
+ {
+ // unimplemented or not ethernet
+ goto done;
+ }
+
+ // Double check that the config we found is for our interface (or the interface is down)
+ ASSERT ((subint->sw_if_index == sw_if_index) | (subint->sw_if_index == ~0));
+
+ if (l2)
+ {
+ subint->flags |= SUBINT_CONFIG_L2;
+ if (is_port)
+ subint->flags |=
+ SUBINT_CONFIG_MATCH_0_TAG | SUBINT_CONFIG_MATCH_1_TAG
+ | SUBINT_CONFIG_MATCH_2_TAG | SUBINT_CONFIG_MATCH_3_TAG;
+ }
+ else
+ {
+ subint->flags &= ~SUBINT_CONFIG_L2;
+ if (is_port)
+ subint->flags &=
+ ~(SUBINT_CONFIG_MATCH_1_TAG | SUBINT_CONFIG_MATCH_2_TAG
+ | SUBINT_CONFIG_MATCH_3_TAG);
+ }
+
+done:
+ return;
+}
+
+/*
+ * Set the L2/L3 mode for the subinterface regardless of port
+ */
+void
+ethernet_sw_interface_set_l2_mode_noport (vnet_main_t * vnm,
+ u32 sw_if_index, u32 l2)
+{
+ subint_config_t *subint;
+ u32 dummy_flags;
+ u32 dummy_unsup;
+
+ /* Find the config for this subinterface */
+ subint =
+ ethernet_sw_interface_get_config (vnm, sw_if_index, &dummy_flags,
+ &dummy_unsup);
+
+ if (subint == 0)
+ {
+ /* unimplemented or not ethernet */
+ goto done;
+ }
+
+ /*
+ * Double check that the config we found is for our interface (or the
+ * interface is down)
+ */
+ ASSERT ((subint->sw_if_index == sw_if_index) | (subint->sw_if_index == ~0));
+
+ if (l2)
+ {
+ subint->flags |= SUBINT_CONFIG_L2;
+ }
+ else
+ {
+ subint->flags &= ~SUBINT_CONFIG_L2;
+ }
+
+done:
+ return;
+}
+
+static clib_error_t *
+ethernet_sw_interface_add_del (vnet_main_t * vnm,
+ u32 sw_if_index, u32 is_create)
+{
+ clib_error_t *error = 0;
+ subint_config_t *subint;
+ u32 match_flags;
+ u32 unsupported = 0;
+
+ // Find the config for this subinterface
+ subint =
+ ethernet_sw_interface_get_config (vnm, sw_if_index, &match_flags,
+ &unsupported);
+
+ if (subint == 0)
+ {
+ // not implemented yet or not ethernet
+ if (unsupported)
+ {
+ // this is the NYI case
+ error = clib_error_return (0, "not implemented yet");
+ }
+ goto done;
+ }
+
+ if (!is_create)
+ {
+ subint->flags = 0;
+ return error;
+ }
+
+ // Initialize the subint
+ if (subint->flags & SUBINT_CONFIG_VALID)
+ {
+ // Error vlan already in use
+ error = clib_error_return (0, "vlan is already in use");
+ }
+ else
+ {
+ // Note that config is L3 by defaulty
+ subint->flags = SUBINT_CONFIG_VALID | match_flags;
+ subint->sw_if_index = ~0; // because interfaces are initially down
+ }
+
+done:
+ return error;
+}
+
+VNET_SW_INTERFACE_ADD_DEL_FUNCTION (ethernet_sw_interface_add_del);
+
+static char *ethernet_error_strings[] = {
+#define ethernet_error(n,c,s) s,
+#include "error.def"
+#undef ethernet_error
+};
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (ethernet_input_node) = {
+ .function = ethernet_input,
+ .name = "ethernet-input",
+ /* Takes a vector of packets. */
+ .vector_size = sizeof (u32),
+ .n_errors = ETHERNET_N_ERROR,
+ .error_strings = ethernet_error_strings,
+ .n_next_nodes = ETHERNET_INPUT_N_NEXT,
+ .next_nodes = {
+#define _(s,n) [ETHERNET_INPUT_NEXT_##s] = n,
+ foreach_ethernet_input_next
+#undef _
+ },
+ .format_buffer = format_ethernet_header_with_length,
+ .format_trace = format_ethernet_input_trace,
+ .unformat_buffer = unformat_ethernet_header,
+};
+/* *INDENT-ON* */
+
+/* *INDENT-OFF* */
+VLIB_NODE_FUNCTION_MULTIARCH (ethernet_input_node, ethernet_input)
+/* *INDENT-ON* */
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (ethernet_input_type_node, static) = {
+ .function = ethernet_input_type,
+ .name = "ethernet-input-type",
+ /* Takes a vector of packets. */
+ .vector_size = sizeof (u32),
+ .n_next_nodes = ETHERNET_INPUT_N_NEXT,
+ .next_nodes = {
+#define _(s,n) [ETHERNET_INPUT_NEXT_##s] = n,
+ foreach_ethernet_input_next
+#undef _
+ },
+};
+/* *INDENT-ON* */
+
+/* *INDENT-OFF* */
+VLIB_NODE_FUNCTION_MULTIARCH (ethernet_input_type_node, ethernet_input_type)
+/* *INDENT-ON* */
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (ethernet_input_not_l2_node, static) = {
+ .function = ethernet_input_not_l2,
+ .name = "ethernet-input-not-l2",
+ /* Takes a vector of packets. */
+ .vector_size = sizeof (u32),
+ .n_next_nodes = ETHERNET_INPUT_N_NEXT,
+ .next_nodes = {
+#define _(s,n) [ETHERNET_INPUT_NEXT_##s] = n,
+ foreach_ethernet_input_next
+#undef _
+ },
+};
+/* *INDENT-ON* */
+
+
+/* *INDENT-OFF* */
+VLIB_NODE_FUNCTION_MULTIARCH (ethernet_input_not_l2_node,
+ ethernet_input_not_l2)
+/* *INDENT-ON* */
+
+
+void
+ethernet_set_rx_redirect (vnet_main_t * vnm,
+ vnet_hw_interface_t * hi, u32 enable)
+{
+ // Insure all packets go to ethernet-input (i.e. untagged ipv4 packets
+ // don't go directly to ip4-input)
+ vnet_hw_interface_rx_redirect_to_node
+ (vnm, hi->hw_if_index, enable ? ethernet_input_node.index : ~0);
+}
+
+
+/*
+ * Initialization and registration for the next_by_ethernet structure
+ */
+
+clib_error_t *
+next_by_ethertype_init (next_by_ethertype_t * l3_next)
+{
+ l3_next->input_next_by_type = sparse_vec_new
+ ( /* elt bytes */ sizeof (l3_next->input_next_by_type[0]),
+ /* bits in index */ BITS (((ethernet_header_t *) 0)->type));
+
+ vec_validate (l3_next->sparse_index_by_input_next_index,
+ ETHERNET_INPUT_NEXT_DROP);
+ vec_validate (l3_next->sparse_index_by_input_next_index,
+ ETHERNET_INPUT_NEXT_PUNT);
+ l3_next->sparse_index_by_input_next_index[ETHERNET_INPUT_NEXT_DROP] =
+ SPARSE_VEC_INVALID_INDEX;
+ l3_next->sparse_index_by_input_next_index[ETHERNET_INPUT_NEXT_PUNT] =
+ SPARSE_VEC_INVALID_INDEX;
+
+ /*
+ * Make sure we don't wipe out an ethernet registration by mistake
+ * Can happen if init function ordering constraints are missing.
+ */
+ if (CLIB_DEBUG > 0)
+ {
+ ethernet_main_t *em = &ethernet_main;
+ ASSERT (em->next_by_ethertype_register_called == 0);
+ }
+
+ return 0;
+}
+
+// Add an ethertype -> next index mapping to the structure
+clib_error_t *
+next_by_ethertype_register (next_by_ethertype_t * l3_next,
+ u32 ethertype, u32 next_index)
+{
+ u32 i;
+ u16 *n;
+ ethernet_main_t *em = &ethernet_main;
+
+ if (CLIB_DEBUG > 0)
+ {
+ ethernet_main_t *em = &ethernet_main;
+ em->next_by_ethertype_register_called = 1;
+ }
+
+ /* Setup ethernet type -> next index sparse vector mapping. */
+ n = sparse_vec_validate (l3_next->input_next_by_type, ethertype);
+ n[0] = next_index;
+
+ /* Rebuild next index -> sparse index inverse mapping when sparse vector
+ is updated. */
+ vec_validate (l3_next->sparse_index_by_input_next_index, next_index);
+ for (i = 1; i < vec_len (l3_next->input_next_by_type); i++)
+ l3_next->
+ sparse_index_by_input_next_index[l3_next->input_next_by_type[i]] = i;
+
+ // do not allow the cached next index's to be updated if L3
+ // redirect is enabled, as it will have overwritten them
+ if (!em->redirect_l3)
+ {
+ // Cache common ethertypes directly
+ if (ethertype == ETHERNET_TYPE_IP4)
+ {
+ l3_next->input_next_ip4 = next_index;
+ }
+ else if (ethertype == ETHERNET_TYPE_IP6)
+ {
+ l3_next->input_next_ip6 = next_index;
+ }
+ else if (ethertype == ETHERNET_TYPE_MPLS_UNICAST)
+ {
+ l3_next->input_next_mpls = next_index;
+ }
+ }
+ return 0;
+}
+
+
+static clib_error_t *
+ethernet_input_init (vlib_main_t * vm)
+{
+ ethernet_main_t *em = &ethernet_main;
+ __attribute__ ((unused)) vlan_table_t *invalid_vlan_table;
+ __attribute__ ((unused)) qinq_table_t *invalid_qinq_table;
+
+ ethernet_setup_node (vm, ethernet_input_node.index);
+ ethernet_setup_node (vm, ethernet_input_type_node.index);
+ ethernet_setup_node (vm, ethernet_input_not_l2_node.index);
+
+ next_by_ethertype_init (&em->l3_next);
+
+ // Initialize pools and vector for vlan parsing
+ vec_validate (em->main_intfs, 10); // 10 main interfaces
+ pool_alloc (em->vlan_pool, 10);
+ pool_alloc (em->qinq_pool, 1);
+
+ // The first vlan pool will always be reserved for an invalid table
+ pool_get (em->vlan_pool, invalid_vlan_table); // first id = 0
+ // The first qinq pool will always be reserved for an invalid table
+ pool_get (em->qinq_pool, invalid_qinq_table); // first id = 0
+
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (ethernet_input_init);
+
+void
+ethernet_register_input_type (vlib_main_t * vm,
+ ethernet_type_t type, u32 node_index)
+{
+ ethernet_main_t *em = &ethernet_main;
+ ethernet_type_info_t *ti;
+ u32 i;
+
+ {
+ clib_error_t *error = vlib_call_init_function (vm, ethernet_init);
+ if (error)
+ clib_error_report (error);
+ }
+
+ ti = ethernet_get_type_info (em, type);
+ ti->node_index = node_index;
+ ti->next_index = vlib_node_add_next (vm,
+ ethernet_input_node.index, node_index);
+ i = vlib_node_add_next (vm, ethernet_input_type_node.index, node_index);
+ ASSERT (i == ti->next_index);
+
+ i = vlib_node_add_next (vm, ethernet_input_not_l2_node.index, node_index);
+ ASSERT (i == ti->next_index);
+
+ // Add the L3 node for this ethertype to the next nodes structure
+ next_by_ethertype_register (&em->l3_next, type, ti->next_index);
+
+ // Call the registration functions for other nodes that want a mapping
+ l2bvi_register_input_type (vm, type, node_index);
+}
+
+void
+ethernet_register_l2_input (vlib_main_t * vm, u32 node_index)
+{
+ ethernet_main_t *em = &ethernet_main;
+ u32 i;
+
+ em->l2_next =
+ vlib_node_add_next (vm, ethernet_input_node.index, node_index);
+
+ /*
+ * Even if we never use these arcs, we have to align the next indices...
+ */
+ i = vlib_node_add_next (vm, ethernet_input_type_node.index, node_index);
+
+ ASSERT (i == em->l2_next);
+
+ i = vlib_node_add_next (vm, ethernet_input_not_l2_node.index, node_index);
+ ASSERT (i == em->l2_next);
+}
+
+// Register a next node for L3 redirect, and enable L3 redirect
+void
+ethernet_register_l3_redirect (vlib_main_t * vm, u32 node_index)
+{
+ ethernet_main_t *em = &ethernet_main;
+ u32 i;
+
+ em->redirect_l3 = 1;
+ em->redirect_l3_next = vlib_node_add_next (vm,
+ ethernet_input_node.index,
+ node_index);
+ /*
+ * Change the cached next nodes to the redirect node
+ */
+ em->l3_next.input_next_ip4 = em->redirect_l3_next;
+ em->l3_next.input_next_ip6 = em->redirect_l3_next;
+ em->l3_next.input_next_mpls = em->redirect_l3_next;
+
+ /*
+ * Even if we never use these arcs, we have to align the next indices...
+ */
+ i = vlib_node_add_next (vm, ethernet_input_type_node.index, node_index);
+
+ ASSERT (i == em->redirect_l3_next);
+
+ i = vlib_node_add_next (vm, ethernet_input_not_l2_node.index, node_index);
+
+ ASSERT (i == em->redirect_l3_next);
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/ethernet/packet.h b/src/vnet/ethernet/packet.h
new file mode 100644
index 00000000000..964cf638101
--- /dev/null
+++ b/src/vnet/ethernet/packet.h
@@ -0,0 +1,152 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * ethernet/packet.h: ethernet packet format.
+ *
+ * Copyright (c) 2008 Eliot Dresselhaus
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef included_ethernet_packet_h
+#define included_ethernet_packet_h
+
+typedef enum
+{
+#define ethernet_type(n,s) ETHERNET_TYPE_##s = n,
+#include <vnet/ethernet/types.def>
+#undef ethernet_type
+} ethernet_type_t;
+
+typedef struct
+{
+ /* Source/destination address. */
+ u8 dst_address[6];
+ u8 src_address[6];
+
+ /* Ethernet type. */
+ u16 type;
+} ethernet_header_t;
+
+#define ETHERNET_ADDRESS_UNICAST 0
+#define ETHERNET_ADDRESS_MULTICAST 1
+
+/* I/G bit: individual (unicast)/group (broadcast/multicast). */
+always_inline uword
+ethernet_address_cast (u8 * a)
+{
+ return (a[0] >> 0) & 1;
+}
+
+always_inline uword
+ethernet_address_is_locally_administered (u8 * a)
+{
+ return (a[0] >> 1) & 1;
+}
+
+always_inline void
+ethernet_address_set_locally_administered (u8 * a)
+{
+ a[0] |= 1 << 1;
+}
+
+/* For VLAN ethernet type. */
+typedef struct
+{
+ /* 3 bit priority, 1 bit CFI and 12 bit vlan id. */
+ u16 priority_cfi_and_id;
+
+#define ETHERNET_N_VLAN (1 << 12)
+
+ /* Inner ethernet type. */
+ u16 type;
+} ethernet_vlan_header_t;
+
+
+/* VLAN with ethertype first and vlan id second */
+typedef struct
+{
+ /* vlan type */
+ u16 type;
+
+ /* 3 bit priority, 1 bit CFI and 12 bit vlan id. */
+ u16 priority_cfi_and_id;
+} ethernet_vlan_header_tv_t;
+
+/* PBB header with B-TAG - backbone VLAN indicator and I-TAG - service encapsulation */
+typedef struct
+{
+ /* Backbone source/destination address. */
+ u8 b_dst_address[6];
+ u8 b_src_address[6];
+
+ /* B-tag */
+ u16 b_type;
+ /* 3 bit priority, 1 bit DEI and 12 bit vlan id */
+ u16 priority_dei_id;
+
+ /* I-tag */
+ u16 i_type;
+ /* 3 bit priority, 1 bit DEI, 1 bit UCA, 3 bit RES and 24 bit I_SID (service identifier) */
+ u32 priority_dei_uca_res_sid;
+
+#define ETHERNET_N_PBB (1 << 24)
+} ethernet_pbb_header_t;
+
+/* *INDENT-OFF* */
+typedef CLIB_PACKED (struct
+{
+ /* Backbone source/destination address. */
+ u8 b_dst_address[6];
+ u8 b_src_address[6];
+
+ /* B-tag */
+ u16 b_type;
+ /* 3 bit priority, 1 bit DEI and 12 bit vlan id */
+ u16 priority_dei_id;
+
+ /* I-tag */
+ u16 i_type;
+ /* 3 bit priority, 1 bit DEI, 1 bit UCA, 3 bit RES and 24 bit I_SID (service identifier) */
+ u32 priority_dei_uca_res_sid;
+}) ethernet_pbb_header_packed_t;
+/* *INDENT-ON* */
+
+#endif /* included_ethernet_packet_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/ethernet/pg.c b/src/vnet/ethernet/pg.c
new file mode 100644
index 00000000000..67ccfcf5fbe
--- /dev/null
+++ b/src/vnet/ethernet/pg.c
@@ -0,0 +1,183 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * ethernet_pg.c: packet generator ethernet interface
+ *
+ * Copyright (c) 2008 Eliot Dresselhaus
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <vlib/vlib.h>
+#include <vnet/pg/pg.h>
+#include <vnet/ethernet/ethernet.h>
+
+typedef struct
+{
+ pg_edit_t type;
+ pg_edit_t src_address;
+ pg_edit_t dst_address;
+} pg_ethernet_header_t;
+
+static inline void
+pg_ethernet_header_init (pg_ethernet_header_t * e)
+{
+ pg_edit_init (&e->type, ethernet_header_t, type);
+ pg_edit_init (&e->src_address, ethernet_header_t, src_address);
+ pg_edit_init (&e->dst_address, ethernet_header_t, dst_address);
+}
+
+typedef struct
+{
+ pg_edit_t type;
+ pg_edit_t id;
+ pg_edit_t cfi;
+ pg_edit_t priority;
+} pg_ethernet_vlan_header_t;
+
+static inline void
+pg_ethernet_vlan_header_init (pg_ethernet_vlan_header_t * v, int vlan_index)
+{
+ ASSERT (vlan_index < ARRAY_LEN (((ethernet_max_header_t *) 0)->vlan));
+ pg_edit_init (&v->type, ethernet_max_header_t, vlan[vlan_index].type);
+
+ pg_edit_init_bitfield (&v->id, ethernet_max_header_t,
+ vlan[vlan_index].priority_cfi_and_id, 0, 12);
+ pg_edit_init_bitfield (&v->cfi, ethernet_max_header_t,
+ vlan[vlan_index].priority_cfi_and_id, 12, 1);
+ pg_edit_init_bitfield (&v->priority, ethernet_max_header_t,
+ vlan[vlan_index].priority_cfi_and_id, 13, 3);
+}
+
+uword
+unformat_pg_ethernet_header (unformat_input_t * input, va_list * args)
+{
+ pg_stream_t *s = va_arg (*args, pg_stream_t *);
+ pg_ethernet_header_t *e;
+ pg_ethernet_vlan_header_t *v;
+ pg_edit_t *ether_type_edit;
+ u32 n_vlan, error, group_index;
+
+ e = pg_create_edit_group (s, sizeof (e[0]), sizeof (ethernet_header_t),
+ &group_index);
+ pg_ethernet_header_init (e);
+ error = 1;
+
+ if (!unformat (input, "%U: %U -> %U",
+ unformat_pg_edit,
+ unformat_ethernet_type_net_byte_order, &e->type,
+ unformat_pg_edit,
+ unformat_ethernet_address, &e->src_address,
+ unformat_pg_edit,
+ unformat_ethernet_address, &e->dst_address))
+ goto done;
+
+ n_vlan = 0;
+ while (unformat (input, "vlan"))
+ {
+ v = pg_add_edits (s, sizeof (v[0]), sizeof (ethernet_vlan_header_t),
+ group_index);
+ pg_ethernet_vlan_header_init (v, n_vlan);
+
+ if (!unformat_user (input, unformat_pg_edit,
+ unformat_pg_number, &v->id))
+ goto done;
+
+ if (!unformat (input, "priority %U", unformat_pg_edit,
+ unformat_pg_number, &v->priority))
+ pg_edit_set_fixed (&v->priority, 0);
+
+ if (!unformat (input, "cfi %U", unformat_pg_edit,
+ unformat_pg_number, &v->cfi))
+ pg_edit_set_fixed (&v->cfi, 0);
+
+ /* Too many vlans given. */
+ if (n_vlan >= 2)
+ goto done;
+
+ n_vlan++;
+ }
+
+ /* Address of e may have changed due to vlan edits being added */
+ e = pg_get_edit_group (s, group_index);
+ v = (void *) (e + 1);
+
+ /* Correct types for vlan packets. */
+ ether_type_edit = &e->type;
+ if (n_vlan > 0)
+ {
+ int i;
+
+ ether_type_edit = &v[n_vlan - 1].type;
+ pg_edit_copy_type_and_values (ether_type_edit, &e->type);
+ pg_edit_set_fixed (&e->type, ETHERNET_TYPE_VLAN);
+
+ for (i = 0; i < n_vlan - 1; i++)
+ pg_edit_set_fixed (&v[i].type, ETHERNET_TYPE_VLAN);
+ }
+
+ {
+ ethernet_main_t *em = &ethernet_main;
+ ethernet_type_info_t *ti = 0;
+ pg_node_t *pg_node = 0;
+
+ if (ether_type_edit->type == PG_EDIT_FIXED)
+ {
+ u16 t = *(u16 *) ether_type_edit->values[PG_EDIT_LO];
+ ti = ethernet_get_type_info (em, clib_net_to_host_u16 (t));
+ if (ti && ti->node_index != ~0)
+ pg_node = pg_get_node (ti->node_index);
+ }
+
+ if (pg_node && pg_node->unformat_edit
+ && unformat_user (input, pg_node->unformat_edit, s))
+ ;
+ else if (!unformat_user (input, unformat_pg_payload, s))
+ goto done;
+ }
+
+ error = 0;
+
+done:
+ if (error)
+ pg_free_edit_group (s);
+ return error == 0;
+}
+
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/ethernet/types.def b/src/vnet/ethernet/types.def
new file mode 100644
index 00000000000..643f3152a85
--- /dev/null
+++ b/src/vnet/ethernet/types.def
@@ -0,0 +1,113 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/* Emacs editing mode -*-C-*- Ethernet types. */
+
+/*
+ * ethernet types
+ *
+ * Copyright (c) 2008 Eliot Dresselhaus
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/* Types < 0x600 (1536) are LLC packet lengths. */
+ethernet_type (0x600, LLC_LENGTH)
+
+ethernet_type (0x600, XNS_IDP)
+ethernet_type (0x800, IP4)
+ethernet_type (0x806, ARP)
+ethernet_type (0x0BAD, VINES_IP)
+ethernet_type (0x0BAE, VINES_LOOPBACK)
+ethernet_type (0x0BAF, VINES_ECHO)
+ethernet_type (0x1984, TRAIN)
+ethernet_type (0x2000, CDP)
+ethernet_type (0x2001, CGMP)
+ethernet_type (0x2007, SRP_CONTROL)
+ethernet_type (0x2452, CENTRINO_PROMISC)
+ethernet_type (0x6000, DECNET)
+ethernet_type (0x6001, DECNET_DUMP_LOAD)
+ethernet_type (0x6002, DECNET_REMOTE_CONSOLE)
+ethernet_type (0x6003, DECNET_ROUTE)
+ethernet_type (0x6004, DEC_LAT)
+ethernet_type (0x6005, DEC_DIAGNOSTIC)
+ethernet_type (0x6006, DEC_CUSTOMER)
+ethernet_type (0x6007, DEC_SCA)
+ethernet_type (0x6558, TRANSPARENT_BRIDGING)
+ethernet_type (0x6559, RAW_FRAME_RELAY)
+ethernet_type (0x8035, REVERSE_ARP)
+ethernet_type (0x8038, DEC_LAN_BRIDGE)
+ethernet_type (0x803D, DEC_ETHERNET_ENCRYPTION)
+ethernet_type (0x803F, DEC_LAN_TRAFFIC_MONITOR)
+ethernet_type (0x8041, DEC_LAST)
+ethernet_type (0x809B, APPLETALK)
+ethernet_type (0x80D5, IBM_SNA)
+ethernet_type (0x80F3, APPLETALK_AARP)
+ethernet_type (0x80FF, WELLFLEET_COMPRESSION)
+ethernet_type (0x8100, VLAN)
+ethernet_type (0x8137, IPX)
+ethernet_type (0x814C, SNMP)
+ethernet_type (0x81FD, CABLETRON_ISMP)
+ethernet_type (0x81FF, CABLETRON_ISMP_TBFLOOD)
+ethernet_type (0x86DD, IP6)
+ethernet_type (0x86DF, ATOMIC)
+ethernet_type (0x876B, TCP_IP_COMPRESSION)
+ethernet_type (0x876C, IP_AUTONOMOUS_SYSTEMS)
+ethernet_type (0x876D, SECURE_DATA)
+ethernet_type (0x8808, MAC_CONTROL)
+ethernet_type (0x8809, SLOW_PROTOCOLS)
+ethernet_type (0x880B, PPP)
+ethernet_type (0x8847, MPLS_UNICAST)
+ethernet_type (0x8848, MPLS_MULTICAST)
+ethernet_type (0x8863, PPPOE_DISCOVERY)
+ethernet_type (0x8864, PPPOE_SESSION)
+ethernet_type (0x886D, INTEL_ANS)
+ethernet_type (0x886F, MICROSOFT_NLB_HEARTBEAT)
+ethernet_type (0x8881, CDMA_2000)
+ethernet_type (0x888e, 802_1X_AUTHENTICATION)
+ethernet_type (0x8892, PROFINET)
+ethernet_type (0x889a, HYPERSCSI)
+ethernet_type (0x88a2, AOE)
+ethernet_type (0x88a8, DOT1AD)
+ethernet_type (0x88AE, BRDWALK)
+ethernet_type (0x88B7, 802_OUI_EXTENDED)
+ethernet_type (0x88c7, 802_11I_PRE_AUTHENTICATION)
+ethernet_type (0x88cc, 802_1_LLDP)
+ethernet_type (0x88e7, DOT1AH)
+ethernet_type (0x894f, VPATH_3)
+ethernet_type (0x9000, LOOPBACK)
+ethernet_type (0x9021, RTNET_MAC)
+ethernet_type (0x9022, RTNET_CONFIG)
+ethernet_type (0x9100, VLAN_9100)
+ethernet_type (0x9200, VLAN_9200)
+ethernet_type (0x9999, PGLAN)
+ethernet_type (0xFEFE, SRP_ISIS)
+ethernet_type (0xFFFF, RESERVED)
diff --git a/src/vnet/feature/feature.c b/src/vnet/feature/feature.c
new file mode 100644
index 00000000000..032fe784ace
--- /dev/null
+++ b/src/vnet/feature/feature.c
@@ -0,0 +1,463 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/feature/feature.h>
+
+vnet_feature_main_t feature_main;
+
+static clib_error_t *
+vnet_feature_init (vlib_main_t * vm)
+{
+ vnet_feature_main_t *fm = &feature_main;
+ vnet_feature_registration_t *freg;
+ vnet_feature_arc_registration_t *areg;
+ u32 arc_index = 0;
+
+ fm->arc_index_by_name = hash_create_string (0, sizeof (uword));
+ areg = fm->next_arc;
+
+ /* process feature arc registrations */
+ while (areg)
+ {
+ char *s;
+ int i = 0;
+ areg->feature_arc_index = arc_index;
+ if (areg->arc_index_ptr)
+ *areg->arc_index_ptr = arc_index;
+ hash_set_mem (fm->arc_index_by_name, areg->arc_name,
+ pointer_to_uword (areg));
+
+ /* process start nodes */
+ while ((s = areg->start_nodes[i]))
+ {
+ i++;
+ }
+ areg->n_start_nodes = i;
+
+ /* next */
+ areg = areg->next;
+ arc_index++;
+ }
+
+ vec_validate (fm->next_feature_by_arc, arc_index - 1);
+ vec_validate (fm->feature_nodes, arc_index - 1);
+ vec_validate (fm->feature_config_mains, arc_index - 1);
+ vec_validate (fm->next_feature_by_name, arc_index - 1);
+ vec_validate (fm->sw_if_index_has_features, arc_index - 1);
+ vec_validate (fm->feature_count_by_sw_if_index, arc_index - 1);
+
+ freg = fm->next_feature;
+ while (freg)
+ {
+ vnet_feature_registration_t *next;
+ uword *p = hash_get_mem (fm->arc_index_by_name, freg->arc_name);
+ if (p == 0)
+ return clib_error_return (0, "Unknown feature arc '%s'",
+ freg->arc_name);
+
+ areg = uword_to_pointer (p[0], vnet_feature_arc_registration_t *);
+ arc_index = areg->feature_arc_index;
+
+ next = freg->next;
+ freg->next = fm->next_feature_by_arc[arc_index];
+ fm->next_feature_by_arc[arc_index] = freg;
+
+ /* next */
+ freg = next;
+ }
+
+ areg = fm->next_arc;
+ while (areg)
+ {
+ clib_error_t *error;
+ vnet_feature_config_main_t *cm;
+ vnet_config_main_t *vcm;
+
+ arc_index = areg->feature_arc_index;
+ cm = &fm->feature_config_mains[arc_index];
+ vcm = &cm->config_main;
+ if ((error = vnet_feature_arc_init (vm, vcm,
+ areg->start_nodes,
+ areg->n_start_nodes,
+ fm->next_feature_by_arc[arc_index],
+ &fm->feature_nodes[arc_index])))
+ {
+ return error;
+ }
+
+ fm->next_feature_by_name[arc_index] =
+ hash_create_string (0, sizeof (uword));
+ freg = fm->next_feature_by_arc[arc_index];
+
+ while (freg)
+ {
+ hash_set_mem (fm->next_feature_by_name[arc_index],
+ freg->node_name, pointer_to_uword (freg));
+ freg = freg->next;
+ }
+
+ cm->end_feature_index =
+ vnet_get_feature_index (arc_index, areg->end_node);
+
+ /* next */
+ areg = areg->next;
+ arc_index++;
+ }
+
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (vnet_feature_init);
+
+u8
+vnet_get_feature_arc_index (const char *s)
+{
+ vnet_feature_main_t *fm = &feature_main;
+ vnet_feature_arc_registration_t *reg;
+ uword *p;
+
+ p = hash_get_mem (fm->arc_index_by_name, s);
+ if (p == 0)
+ return ~0;
+
+ reg = uword_to_pointer (p[0], vnet_feature_arc_registration_t *);
+ return reg->feature_arc_index;
+}
+
+vnet_feature_registration_t *
+vnet_get_feature_reg (const char *arc_name, const char *node_name)
+{
+ u8 arc_index;
+
+ arc_index = vnet_get_feature_arc_index (arc_name);
+ if (arc_index == (u8) ~ 0)
+ return 0;
+
+ vnet_feature_main_t *fm = &feature_main;
+ vnet_feature_registration_t *reg;
+ uword *p;
+
+ p = hash_get_mem (fm->next_feature_by_name[arc_index], node_name);
+ if (p == 0)
+ return 0;
+
+ reg = uword_to_pointer (p[0], vnet_feature_registration_t *);
+ return reg;
+}
+
+u32
+vnet_get_feature_index (u8 arc, const char *s)
+{
+ vnet_feature_main_t *fm = &feature_main;
+ vnet_feature_registration_t *reg;
+ uword *p;
+
+ if (s == 0)
+ return ~0;
+
+ p = hash_get_mem (fm->next_feature_by_name[arc], s);
+ if (p == 0)
+ return ~0;
+
+ reg = uword_to_pointer (p[0], vnet_feature_registration_t *);
+ return reg->feature_index;
+}
+
+int
+vnet_feature_enable_disable_with_index (u8 arc_index, u32 feature_index,
+ u32 sw_if_index, int enable_disable,
+ void *feature_config,
+ u32 n_feature_config_bytes)
+{
+ vnet_feature_main_t *fm = &feature_main;
+ vnet_feature_config_main_t *cm;
+ i16 feature_count;
+ int is_first_or_last;
+ u32 ci;
+
+ if (arc_index == (u8) ~ 0)
+ return VNET_API_ERROR_INVALID_VALUE;
+
+ if (feature_index == ~0)
+ return VNET_API_ERROR_INVALID_VALUE_2;
+
+ cm = &fm->feature_config_mains[arc_index];
+ vec_validate_init_empty (cm->config_index_by_sw_if_index, sw_if_index, ~0);
+ ci = cm->config_index_by_sw_if_index[sw_if_index];
+
+ vec_validate (fm->feature_count_by_sw_if_index[arc_index], sw_if_index);
+ feature_count = fm->feature_count_by_sw_if_index[arc_index][sw_if_index];
+
+ if (!enable_disable && feature_count < 1)
+ return 0;
+
+ ci = (enable_disable
+ ? vnet_config_add_feature
+ : vnet_config_del_feature)
+ (vlib_get_main (), &cm->config_main, ci, feature_index, feature_config,
+ n_feature_config_bytes);
+ cm->config_index_by_sw_if_index[sw_if_index] = ci;
+
+ /* update feature count */
+ enable_disable = (enable_disable > 0);
+ feature_count += enable_disable ? 1 : -1;
+ is_first_or_last = (feature_count == enable_disable);
+ ASSERT (feature_count >= 0);
+
+ if (is_first_or_last && cm->end_feature_index != ~0)
+ {
+ /*register end node */
+ ci = (enable_disable
+ ? vnet_config_add_feature
+ : vnet_config_del_feature)
+ (vlib_get_main (), &cm->config_main, ci, cm->end_feature_index, 0, 0);
+ cm->config_index_by_sw_if_index[sw_if_index] = ci;
+ }
+
+ fm->sw_if_index_has_features[arc_index] =
+ clib_bitmap_set (fm->sw_if_index_has_features[arc_index], sw_if_index,
+ (feature_count > 0));
+
+ fm->feature_count_by_sw_if_index[arc_index][sw_if_index] = feature_count;
+ return 0;
+}
+
+int
+vnet_feature_enable_disable (const char *arc_name, const char *node_name,
+ u32 sw_if_index, int enable_disable,
+ void *feature_config, u32 n_feature_config_bytes)
+{
+ u32 feature_index;
+ u8 arc_index;
+
+ arc_index = vnet_get_feature_arc_index (arc_name);
+
+ if (arc_index == (u8) ~ 0)
+ return VNET_API_ERROR_INVALID_VALUE;
+
+ feature_index = vnet_get_feature_index (arc_index, node_name);
+
+ return vnet_feature_enable_disable_with_index (arc_index, feature_index,
+ sw_if_index, enable_disable,
+ feature_config,
+ n_feature_config_bytes);
+}
+
+
+/** Display the set of available driver features.
+ Useful for verifying that expected features are present
+*/
+
+static clib_error_t *
+show_features_command_fn (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ vnet_feature_main_t *fm = &feature_main;
+ vnet_feature_arc_registration_t *areg;
+ vnet_feature_registration_t *freg;
+
+ vlib_cli_output (vm, "Available feature paths");
+
+ areg = fm->next_arc;
+ while (areg)
+ {
+ vlib_cli_output (vm, "%s:", areg->arc_name);
+ freg = fm->next_feature_by_arc[areg->feature_arc_index];
+ while (freg)
+ {
+ vlib_cli_output (vm, " %s\n", freg->node_name);
+ freg = freg->next;
+ }
+
+
+ /* next */
+ areg = areg->next;
+ }
+
+ return 0;
+}
+
+/*?
+ * Display the set of available driver features
+ *
+ * @cliexpar
+ * Example:
+ * @cliexcmd{show ip features}
+ * @cliexend
+ * @endparblock
+?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (show_features_command, static) = {
+ .path = "show features",
+ .short_help = "show features",
+ .function = show_features_command_fn,
+};
+/* *INDENT-ON* */
+
+/** Display the set of driver features configured on a specific interface
+ * Called by "show interface" handler
+ */
+
+void
+vnet_interface_features_show (vlib_main_t * vm, u32 sw_if_index)
+{
+ vnet_feature_main_t *fm = &feature_main;
+ u32 node_index, current_config_index;
+ u16 feature_arc;
+ vnet_feature_config_main_t *cm = fm->feature_config_mains;
+ vnet_feature_arc_registration_t *areg;
+ vnet_config_main_t *vcm;
+ vnet_config_t *cfg;
+ u32 cfg_index;
+ vnet_config_feature_t *feat;
+ vlib_node_t *n;
+ int i;
+
+ vlib_cli_output (vm, "Driver feature paths configured on %U...",
+ format_vnet_sw_if_index_name,
+ vnet_get_main (), sw_if_index);
+
+ areg = fm->next_arc;
+ while (areg)
+ {
+ feature_arc = areg->feature_arc_index;
+ vcm = &(cm[feature_arc].config_main);
+
+ vlib_cli_output (vm, "\n%s:", areg->arc_name);
+ areg = areg->next;
+
+ if (NULL == cm[feature_arc].config_index_by_sw_if_index ||
+ vec_len (cm[feature_arc].config_index_by_sw_if_index) <=
+ sw_if_index)
+ {
+ vlib_cli_output (vm, " none configured");
+ continue;
+ }
+
+ current_config_index =
+ vec_elt (cm[feature_arc].config_index_by_sw_if_index, sw_if_index);
+
+ if (current_config_index == ~0)
+ {
+ vlib_cli_output (vm, " none configured");
+ continue;
+ }
+
+ ASSERT (current_config_index
+ < vec_len (vcm->config_pool_index_by_user_index));
+
+ cfg_index = vcm->config_pool_index_by_user_index[current_config_index];
+ cfg = pool_elt_at_index (vcm->config_pool, cfg_index);
+
+ for (i = 0; i < vec_len (cfg->features); i++)
+ {
+ feat = cfg->features + i;
+ node_index = feat->node_index;
+ n = vlib_get_node (vm, node_index);
+ vlib_cli_output (vm, " %v", n->name);
+ }
+ }
+}
+
+static clib_error_t *
+set_interface_features_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ vnet_main_t *vnm = vnet_get_main ();
+ unformat_input_t _line_input, *line_input = &_line_input;
+ clib_error_t *error = 0;
+
+ u8 *arc_name = 0;
+ u8 *feature_name = 0;
+ u32 sw_if_index = ~0;
+ u8 enable = 1;
+
+ /* Get a line of input. */
+ if (!unformat_user (input, unformat_line_input, line_input))
+ goto done;
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat
+ (line_input, "%U %v", unformat_vnet_sw_interface, vnm, &sw_if_index,
+ &feature_name))
+ ;
+ else if (unformat (line_input, "arc %v", &arc_name))
+ ;
+ else if (unformat (line_input, "disable"))
+ enable = 0;
+ else
+ {
+ error = unformat_parse_error (line_input);
+ goto done;
+ }
+ }
+
+ if (sw_if_index == ~0)
+ {
+ error = clib_error_return (0, "Interface not specified...");
+ goto done;
+ }
+
+ vec_add1 (arc_name, 0);
+ vec_add1 (feature_name, 0);
+
+ vnet_feature_registration_t *reg;
+ reg =
+ vnet_get_feature_reg ((const char *) arc_name,
+ (const char *) feature_name);
+ if (reg == 0)
+ {
+ error = clib_error_return (0, "Unknown feature...");
+ goto done;
+ }
+ if (reg->enable_disable_cb)
+ error = reg->enable_disable_cb (sw_if_index, enable);
+ if (!error)
+ vnet_feature_enable_disable ((const char *) arc_name,
+ (const char *) feature_name, sw_if_index,
+ enable, 0, 0);
+
+done:
+ vec_free (feature_name);
+ vec_free (arc_name);
+ return error;
+}
+
+/*?
+ * Set feature for given interface
+ *
+ * @cliexpar
+ * Example:
+ * @cliexcmd{set interface feature GigabitEthernet2/0/0 ip4_flow_classify arc ip4_unicast}
+ * @cliexend
+ * @endparblock
+?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (set_interface_feature_command, static) = {
+ .path = "set interface feature",
+ .short_help = "set interface feature <intfc> <feature_name> arc <arc_name>",
+ .function = set_interface_features_command_fn,
+};
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/feature/feature.h b/src/vnet/feature/feature.h
new file mode 100644
index 00000000000..b27aaf17804
--- /dev/null
+++ b/src/vnet/feature/feature.h
@@ -0,0 +1,382 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef included_features_h
+#define included_features_h
+
+#include <vnet/vnet.h>
+#include <vnet/api_errno.h>
+
+/** feature registration object */
+typedef struct _vnet_feature_arc_registration
+{
+ /** next registration in list of all registrations*/
+ struct _vnet_feature_arc_registration *next;
+ /** Feature Arc name */
+ char *arc_name;
+ /** Start nodes */
+ char **start_nodes;
+ int n_start_nodes;
+ /** End node */
+ char *end_node;
+ /* Feature arc index, assigned by init function */
+ u8 feature_arc_index;
+ u8 *arc_index_ptr;
+} vnet_feature_arc_registration_t;
+
+/* Enable feature callback. */
+typedef clib_error_t *(vnet_feature_enable_disable_function_t)
+ (u32 sw_if_index, int enable_disable);
+
+/** feature registration object */
+typedef struct _vnet_feature_registration
+{
+ /** next registration in list of all registrations*/
+ struct _vnet_feature_registration *next;
+ /** Feature arc name */
+ char *arc_name;
+ /** Graph node name */
+ char *node_name;
+ /** Pointer to this feature index, filled in by vnet_feature_arc_init */
+ u32 *feature_index_ptr;
+ u32 feature_index;
+ /** Constraints of the form "this feature runs before X" */
+ char **runs_before;
+ /** Constraints of the form "this feature runs after Y" */
+ char **runs_after;
+
+ /** Function to enable/disable feature **/
+ vnet_feature_enable_disable_function_t *enable_disable_cb;
+} vnet_feature_registration_t;
+
+typedef struct vnet_feature_config_main_t_
+{
+ vnet_config_main_t config_main;
+ u32 *config_index_by_sw_if_index;
+ u32 end_feature_index;
+} vnet_feature_config_main_t;
+
+typedef struct
+{
+ /** feature arc configuration list */
+ vnet_feature_arc_registration_t *next_arc;
+ uword **arc_index_by_name;
+
+ /** feature path configuration lists */
+ vnet_feature_registration_t *next_feature;
+ vnet_feature_registration_t **next_feature_by_arc;
+ uword **next_feature_by_name;
+
+ /** feature config main objects */
+ vnet_feature_config_main_t *feature_config_mains;
+
+ /** Save partial order results for show command */
+ char ***feature_nodes;
+
+ /** bitmap of interfaces which have driver rx features configured */
+ uword **sw_if_index_has_features;
+
+ /** feature reference counts by interface */
+ i16 **feature_count_by_sw_if_index;
+
+ /** Feature arc index for device-input */
+ u8 device_input_feature_arc_index;
+
+ /** convenience */
+ vlib_main_t *vlib_main;
+ vnet_main_t *vnet_main;
+} vnet_feature_main_t;
+
+extern vnet_feature_main_t feature_main;
+
+#define VNET_FEATURE_ARC_INIT(x,...) \
+ __VA_ARGS__ vnet_feature_arc_registration_t vnet_feat_arc_##x;\
+static void __vnet_add_feature_arc_registration_##x (void) \
+ __attribute__((__constructor__)) ; \
+static void __vnet_add_feature_arc_registration_##x (void) \
+{ \
+ vnet_feature_main_t * fm = &feature_main; \
+ vnet_feat_arc_##x.next = fm->next_arc; \
+ fm->next_arc = & vnet_feat_arc_##x; \
+} \
+__VA_ARGS__ vnet_feature_arc_registration_t vnet_feat_arc_##x
+
+#define VNET_FEATURE_INIT(x,...) \
+ __VA_ARGS__ vnet_feature_registration_t vnet_feat_##x; \
+static void __vnet_add_feature_registration_##x (void) \
+ __attribute__((__constructor__)) ; \
+static void __vnet_add_feature_registration_##x (void) \
+{ \
+ vnet_feature_main_t * fm = &feature_main; \
+ vnet_feat_##x.next = fm->next_feature; \
+ fm->next_feature = & vnet_feat_##x; \
+} \
+__VA_ARGS__ vnet_feature_registration_t vnet_feat_##x
+
+void
+vnet_config_update_feature_count (vnet_feature_main_t * fm, u8 arc,
+ u32 sw_if_index, int is_add);
+
+u32 vnet_get_feature_index (u8 arc, const char *s);
+u8 vnet_get_feature_arc_index (const char *s);
+vnet_feature_registration_t *vnet_get_feature_reg (const char *arc_name,
+ const char *node_name);
+
+
+int
+vnet_feature_enable_disable_with_index (u8 arc_index, u32 feature_index,
+ u32 sw_if_index, int enable_disable,
+ void *feature_config,
+ u32 n_feature_config_bytes);
+
+int
+vnet_feature_enable_disable (const char *arc_name, const char *node_name,
+ u32 sw_if_index, int enable_disable,
+ void *feature_config,
+ u32 n_feature_config_bytes);
+
+static inline vnet_feature_config_main_t *
+vnet_get_feature_arc_config_main (u8 arc_index)
+{
+ vnet_feature_main_t *fm = &feature_main;
+
+ if (arc_index == (u8) ~ 0)
+ return 0;
+
+ return &fm->feature_config_mains[arc_index];
+}
+
+static_always_inline vnet_feature_config_main_t *
+vnet_feature_get_config_main (u16 arc)
+{
+ vnet_feature_main_t *fm = &feature_main;
+ return &fm->feature_config_mains[arc];
+}
+
+static_always_inline int
+vnet_have_features (u8 arc, u32 sw_if_index)
+{
+ vnet_feature_main_t *fm = &feature_main;
+ return clib_bitmap_get (fm->sw_if_index_has_features[arc], sw_if_index);
+}
+
+static_always_inline u32
+vnet_get_feature_config_index (u8 arc, u32 sw_if_index)
+{
+ vnet_feature_main_t *fm = &feature_main;
+ vnet_feature_config_main_t *cm = &fm->feature_config_mains[arc];
+ return vec_elt (cm->config_index_by_sw_if_index, sw_if_index);
+}
+
+static_always_inline void *
+vnet_feature_arc_start_with_data (u8 arc, u32 sw_if_index, u32 * next,
+ vlib_buffer_t * b, u32 n_data_bytes)
+{
+ vnet_feature_main_t *fm = &feature_main;
+ vnet_feature_config_main_t *cm;
+ cm = &fm->feature_config_mains[arc];
+
+ if (PREDICT_FALSE (vnet_have_features (arc, sw_if_index)))
+ {
+ b->feature_arc_index = arc;
+ b->current_config_index =
+ vec_elt (cm->config_index_by_sw_if_index, sw_if_index);
+ return vnet_get_config_data (&cm->config_main, &b->current_config_index,
+ next, n_data_bytes);
+ }
+ return 0;
+}
+
+static_always_inline void
+vnet_feature_arc_start (u8 arc, u32 sw_if_index, u32 * next0,
+ vlib_buffer_t * b0)
+{
+ vnet_feature_arc_start_with_data (arc, sw_if_index, next0, b0, 0);
+}
+
+static_always_inline void *
+vnet_feature_next_with_data (u32 sw_if_index, u32 * next0,
+ vlib_buffer_t * b0, u32 n_data_bytes)
+{
+ vnet_feature_main_t *fm = &feature_main;
+ u8 arc = b0->feature_arc_index;
+ vnet_feature_config_main_t *cm = &fm->feature_config_mains[arc];
+
+ return vnet_get_config_data (&cm->config_main,
+ &b0->current_config_index, next0,
+ n_data_bytes);
+}
+
+static_always_inline void
+vnet_feature_next (u32 sw_if_index, u32 * next0, vlib_buffer_t * b0)
+{
+ vnet_feature_next_with_data (sw_if_index, next0, b0, 0);
+}
+
+static_always_inline void
+vnet_feature_start_device_input_x1 (u32 sw_if_index, u32 * next0,
+ vlib_buffer_t * b0, u16 buffer_advanced0)
+{
+ vnet_feature_main_t *fm = &feature_main;
+ vnet_feature_config_main_t *cm;
+ u8 feature_arc_index = fm->device_input_feature_arc_index;
+ cm = &fm->feature_config_mains[feature_arc_index];
+
+ if (PREDICT_FALSE
+ (clib_bitmap_get
+ (fm->sw_if_index_has_features[feature_arc_index], sw_if_index)))
+ {
+ /*
+ * Save next0 so that the last feature in the chain
+ * can skip ethernet-input if indicated...
+ */
+ vnet_buffer (b0)->device_input_feat.saved_next_index = *next0;
+ vnet_buffer (b0)->device_input_feat.buffer_advance = buffer_advanced0;
+ vlib_buffer_advance (b0, -buffer_advanced0);
+
+ b0->feature_arc_index = feature_arc_index;
+ b0->current_config_index =
+ vec_elt (cm->config_index_by_sw_if_index, sw_if_index);
+ vnet_get_config_data (&cm->config_main, &b0->current_config_index,
+ next0, /* # bytes of config data */ 0);
+ }
+}
+
+static_always_inline void
+vnet_feature_start_device_input_x2 (u32 sw_if_index,
+ u32 * next0,
+ u32 * next1,
+ vlib_buffer_t * b0,
+ vlib_buffer_t * b1,
+ u16 buffer_advanced0,
+ u16 buffer_advanced1)
+{
+ vnet_feature_main_t *fm = &feature_main;
+ vnet_feature_config_main_t *cm;
+ u8 feature_arc_index = fm->device_input_feature_arc_index;
+ cm = &fm->feature_config_mains[feature_arc_index];
+
+ if (PREDICT_FALSE
+ (clib_bitmap_get
+ (fm->sw_if_index_has_features[feature_arc_index], sw_if_index)))
+ {
+ /*
+ * Save next0 so that the last feature in the chain
+ * can skip ethernet-input if indicated...
+ */
+ vnet_buffer (b0)->device_input_feat.saved_next_index = *next0;
+ vnet_buffer (b1)->device_input_feat.saved_next_index = *next1;
+ vnet_buffer (b0)->device_input_feat.buffer_advance = buffer_advanced0;
+ vnet_buffer (b1)->device_input_feat.buffer_advance = buffer_advanced1;
+ vlib_buffer_advance (b0, -buffer_advanced0);
+ vlib_buffer_advance (b1, -buffer_advanced1);
+
+ b0->feature_arc_index = feature_arc_index;
+ b1->feature_arc_index = feature_arc_index;
+ b0->current_config_index =
+ vec_elt (cm->config_index_by_sw_if_index, sw_if_index);
+ b1->current_config_index = b0->current_config_index;
+ vnet_get_config_data (&cm->config_main, &b0->current_config_index,
+ next0, /* # bytes of config data */ 0);
+ vnet_get_config_data (&cm->config_main, &b1->current_config_index,
+ next1, /* # bytes of config data */ 0);
+ }
+}
+
+static_always_inline void
+vnet_feature_start_device_input_x4 (u32 sw_if_index,
+ u32 * next0,
+ u32 * next1,
+ u32 * next2,
+ u32 * next3,
+ vlib_buffer_t * b0,
+ vlib_buffer_t * b1,
+ vlib_buffer_t * b2,
+ vlib_buffer_t * b3,
+ u16 buffer_advanced0,
+ u16 buffer_advanced1,
+ u16 buffer_advanced2,
+ u16 buffer_advanced3)
+{
+ vnet_feature_main_t *fm = &feature_main;
+ vnet_feature_config_main_t *cm;
+ u8 feature_arc_index = fm->device_input_feature_arc_index;
+ cm = &fm->feature_config_mains[feature_arc_index];
+
+ if (PREDICT_FALSE
+ (clib_bitmap_get
+ (fm->sw_if_index_has_features[feature_arc_index], sw_if_index)))
+ {
+ /*
+ * Save next0 so that the last feature in the chain
+ * can skip ethernet-input if indicated...
+ */
+ vnet_buffer (b0)->device_input_feat.saved_next_index = *next0;
+ vnet_buffer (b1)->device_input_feat.saved_next_index = *next1;
+ vnet_buffer (b2)->device_input_feat.saved_next_index = *next2;
+ vnet_buffer (b3)->device_input_feat.saved_next_index = *next3;
+
+ vnet_buffer (b0)->device_input_feat.buffer_advance = buffer_advanced0;
+ vnet_buffer (b1)->device_input_feat.buffer_advance = buffer_advanced1;
+ vnet_buffer (b2)->device_input_feat.buffer_advance = buffer_advanced2;
+ vnet_buffer (b3)->device_input_feat.buffer_advance = buffer_advanced3;
+
+ vlib_buffer_advance (b0, -buffer_advanced0);
+ vlib_buffer_advance (b1, -buffer_advanced1);
+ vlib_buffer_advance (b2, -buffer_advanced2);
+ vlib_buffer_advance (b3, -buffer_advanced3);
+
+ b0->feature_arc_index = feature_arc_index;
+ b1->feature_arc_index = feature_arc_index;
+ b2->feature_arc_index = feature_arc_index;
+ b3->feature_arc_index = feature_arc_index;
+
+ b0->current_config_index =
+ vec_elt (cm->config_index_by_sw_if_index, sw_if_index);
+ b1->current_config_index = b0->current_config_index;
+ b2->current_config_index = b0->current_config_index;
+ b3->current_config_index = b0->current_config_index;
+
+ vnet_get_config_data (&cm->config_main, &b0->current_config_index,
+ next0, /* # bytes of config data */ 0);
+ vnet_get_config_data (&cm->config_main, &b1->current_config_index,
+ next1, /* # bytes of config data */ 0);
+ vnet_get_config_data (&cm->config_main, &b2->current_config_index,
+ next2, /* # bytes of config data */ 0);
+ vnet_get_config_data (&cm->config_main, &b3->current_config_index,
+ next3, /* # bytes of config data */ 0);
+ }
+}
+
+#define VNET_FEATURES(...) (char*[]) { __VA_ARGS__, 0}
+
+clib_error_t *vnet_feature_arc_init (vlib_main_t * vm,
+ vnet_config_main_t * vcm,
+ char **feature_start_nodes,
+ int num_feature_start_nodes,
+ vnet_feature_registration_t *
+ first_reg, char ***feature_nodes);
+
+void vnet_interface_features_show (vlib_main_t * vm, u32 sw_if_index);
+
+#endif /* included_feature_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/feature/registration.c b/src/vnet/feature/registration.c
new file mode 100644
index 00000000000..1deeeef904c
--- /dev/null
+++ b/src/vnet/feature/registration.c
@@ -0,0 +1,301 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/vnet.h>
+#include <vnet/ip/ip.h>
+#include <vnet/mpls/mpls.h>
+
+/**
+ * @file
+ * @brief Feature Subgraph Ordering.
+
+ Dynamically compute feature subgraph ordering by performing a
+ topological sort across a set of "feature A before feature B" and
+ "feature C after feature B" constraints.
+
+ Use the topological sort result to set up vnet_config_main_t's for
+ use at runtime.
+
+ Feature subgraph arcs are simple enough. They start at specific
+ fixed nodes, and end at specific fixed nodes. In between, a
+ per-interface current feature configuration dictates which
+ additional nodes each packet visits. Each so-called feature node
+ can [of course] drop any specific packet.
+
+ See ip4_forward.c, ip6_forward.c in this directory to see the
+ current rx-unicast, rx-multicast, and tx feature subgraph arc
+ definitions.
+
+ Let's say that we wish to add a new feature to the ip4 unicast
+ feature subgraph arc, which needs to run before @c ip4-lookup. In
+ either base code or a plugin,
+ <CODE><PRE>
+ \#include <vnet/feature/feature.h>
+ </PRE></CODE>
+
+ and add the new feature as shown:
+
+ <CODE><PRE>
+ VNET_FEATURE_INIT (ip4_lookup, static) =
+ {
+ .arch_name = "ip4-unicast",
+ .node_name = "my-ip4-unicast-feature",
+ .runs_before = VLIB_FEATURES ("ip4-lookup")
+ };
+ </PRE></CODE>
+
+ Here's the standard coding pattern to enable / disable
+ @c my-ip4-unicast-feature on an interface:
+
+ <CODE><PRE>
+
+ sw_if_index = <interface-handle>
+ vnet_feature_enable_disable ("ip4-unicast", "my-ip4-unicast-feature",
+ sw_if_index, 1 );
+ </PRE></CODE>
+
+ Here's how to obtain the correct next node index in packet
+ processing code, aka in the implementation of @c my-ip4-unicast-feature:
+
+ <CODE><PRE>
+ vnet_feature_next (sw_if_index0, &next0, b0);
+
+ </PRE></CODE>
+
+ Nodes are free to drop or otherwise redirect packets. Packets
+ which "pass" should be enqueued via the next0 arc computed by
+ vnet_feature_next.
+*/
+
+
+static int
+comma_split (u8 * s, u8 ** a, u8 ** b)
+{
+ *a = s;
+
+ while (*s && *s != ',')
+ s++;
+
+ if (*s == ',')
+ *s = 0;
+ else
+ return 1;
+
+ *b = (u8 *) (s + 1);
+ return 0;
+}
+
+/**
+ * @brief Initialize a feature graph arc
+ * @param vm vlib main structure pointer
+ * @param vcm vnet config main structure pointer
+ * @param feature_start_nodes names of start-nodes which use this
+ * feature graph arc
+ * @param num_feature_start_nodes number of start-nodes
+ * @param first_reg first element in
+ * [an __attribute__((constructor)) function built, or
+ * otherwise created] singly-linked list of feature registrations
+ * @param [out] in_feature_nodes returned vector of
+ * topologically-sorted feature node names, for use in
+ * show commands
+ * @returns 0 on success, otherwise an error message. Errors
+ * are fatal since they invariably involve mistyped node-names, or
+ * genuinely missing node-names
+ */
+clib_error_t *
+vnet_feature_arc_init (vlib_main_t * vm,
+ vnet_config_main_t * vcm,
+ char **feature_start_nodes,
+ int num_feature_start_nodes,
+ vnet_feature_registration_t * first_reg,
+ char ***in_feature_nodes)
+{
+ uword *index_by_name;
+ uword *reg_by_index;
+ u8 **node_names = 0;
+ u8 *node_name;
+ char **these_constraints;
+ char *this_constraint_c;
+ u8 **constraints = 0;
+ u8 *constraint_tuple;
+ u8 *this_constraint;
+ u8 **orig, **closure;
+ uword *p;
+ int i, j, k;
+ u8 *a_name, *b_name;
+ int a_index, b_index;
+ int n_features;
+ u32 *result = 0;
+ vnet_feature_registration_t *this_reg = 0;
+ char **feature_nodes = 0;
+ hash_pair_t *hp;
+ u8 **keys_to_delete = 0;
+
+ index_by_name = hash_create_string (0, sizeof (uword));
+ reg_by_index = hash_create (0, sizeof (uword));
+
+ this_reg = first_reg;
+
+ /* pass 1, collect feature node names, construct a before b pairs */
+ while (this_reg)
+ {
+ node_name = format (0, "%s%c", this_reg->node_name, 0);
+ hash_set (reg_by_index, vec_len (node_names), (uword) this_reg);
+
+ hash_set_mem (index_by_name, node_name, vec_len (node_names));
+
+ vec_add1 (node_names, node_name);
+
+ these_constraints = this_reg->runs_before;
+ while (these_constraints && these_constraints[0])
+ {
+ this_constraint_c = these_constraints[0];
+
+ constraint_tuple = format (0, "%s,%s%c", node_name,
+ this_constraint_c, 0);
+ vec_add1 (constraints, constraint_tuple);
+ these_constraints++;
+ }
+
+ these_constraints = this_reg->runs_after;
+ while (these_constraints && these_constraints[0])
+ {
+ this_constraint_c = these_constraints[0];
+
+ constraint_tuple = format (0, "%s,%s%c",
+ this_constraint_c, node_name, 0);
+ vec_add1 (constraints, constraint_tuple);
+ these_constraints++;
+ }
+
+ this_reg = this_reg->next;
+ }
+
+ n_features = vec_len (node_names);
+ orig = clib_ptclosure_alloc (n_features);
+
+ for (i = 0; i < vec_len (constraints); i++)
+ {
+ this_constraint = constraints[i];
+
+ if (comma_split (this_constraint, &a_name, &b_name))
+ return clib_error_return (0, "comma_split failed!");
+
+ p = hash_get_mem (index_by_name, a_name);
+ /*
+ * Note: the next two errors mean that something is
+ * b0rked. As in: if you code "A depends on B," and you forget
+ * to define a FEATURE_INIT macro for B, you lose.
+ * Nonexistent graph nodes are tolerated.
+ */
+ if (p == 0)
+ return clib_error_return (0, "feature node '%s' not found", a_name);
+ a_index = p[0];
+
+ p = hash_get_mem (index_by_name, b_name);
+ if (p == 0)
+ return clib_error_return (0, "feature node '%s' not found", b_name);
+ b_index = p[0];
+
+ /* add a before b to the original set of constraints */
+ orig[a_index][b_index] = 1;
+ vec_free (this_constraint);
+ }
+
+ /* Compute the positive transitive closure of the original constraints */
+ closure = clib_ptclosure (orig);
+
+ /* Compute a partial order across feature nodes, if one exists. */
+again:
+ for (i = 0; i < n_features; i++)
+ {
+ for (j = 0; j < n_features; j++)
+ {
+ if (closure[i][j])
+ goto item_constrained;
+ }
+ /* Item i can be output */
+ vec_add1 (result, i);
+ {
+ for (k = 0; k < n_features; k++)
+ closure[k][i] = 0;
+ /*
+ * Add a "Magic" a before a constraint.
+ * This means we'll never output it again
+ */
+ closure[i][i] = 1;
+ goto again;
+ }
+ item_constrained:
+ ;
+ }
+
+ /* see if we got a partial order... */
+ if (vec_len (result) != n_features)
+ return clib_error_return (0, "%d feature_init_cast no partial order!");
+
+ /*
+ * We win.
+ * Bind the index variables, and output the feature node name vector
+ * using the partial order we just computed. Result is in stack
+ * order, because the entry with the fewest constraints (e.g. none)
+ * is output first, etc.
+ */
+
+ for (i = n_features - 1; i >= 0; i--)
+ {
+ p = hash_get (reg_by_index, result[i]);
+ ASSERT (p != 0);
+ this_reg = (vnet_feature_registration_t *) p[0];
+ if (this_reg->feature_index_ptr)
+ *this_reg->feature_index_ptr = n_features - (i + 1);
+ this_reg->feature_index = n_features - (i + 1);
+ vec_add1 (feature_nodes, this_reg->node_name);
+ }
+
+ /* Set up the config infrastructure */
+ vnet_config_init (vm, vcm,
+ feature_start_nodes,
+ num_feature_start_nodes,
+ feature_nodes, vec_len (feature_nodes));
+
+ /* Save a copy for show command */
+ *in_feature_nodes = feature_nodes;
+
+ /* Finally, clean up all the shit we allocated */
+ /* *INDENT-OFF* */
+ hash_foreach_pair (hp, index_by_name,
+ ({
+ vec_add1 (keys_to_delete, (u8 *)hp->key);
+ }));
+ /* *INDENT-ON* */
+ hash_free (index_by_name);
+ for (i = 0; i < vec_len (keys_to_delete); i++)
+ vec_free (keys_to_delete[i]);
+ vec_free (keys_to_delete);
+ hash_free (reg_by_index);
+ vec_free (result);
+ clib_ptclosure_free (orig);
+ clib_ptclosure_free (closure);
+ return 0;
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/fib/fib.c b/src/vnet/fib/fib.c
new file mode 100644
index 00000000000..413f93e893c
--- /dev/null
+++ b/src/vnet/fib/fib.c
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/fib/fib_entry_src.h>
+#include <vnet/fib/fib_entry.h>
+#include <vnet/fib/fib_path.h>
+#include <vnet/fib/fib_walk.h>
+#include <vnet/fib/fib_path_list.h>
+
+static clib_error_t *
+fib_module_init (vlib_main_t * vm)
+{
+ clib_error_t * error;
+
+ if ((error = vlib_call_init_function (vm, dpo_module_init)))
+ return (error);
+ if ((error = vlib_call_init_function (vm, adj_module_init)))
+ return (error);
+
+ fib_entry_module_init();
+ fib_entry_src_module_init();
+ fib_path_module_init();
+ fib_path_list_module_init();
+ fib_walk_module_init();
+
+ return (NULL);
+}
+
+VLIB_INIT_FUNCTION (fib_module_init);
diff --git a/src/vnet/fib/fib.h b/src/vnet/fib/fib.h
new file mode 100644
index 00000000000..7cf1d136935
--- /dev/null
+++ b/src/vnet/fib/fib.h
@@ -0,0 +1,652 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * \brief
+ * A IP v4/6 independent FIB.
+ *
+ * The main functions provided by the FIB are as follows;
+ *
+ * - source priorities
+ *
+ * A route can be added to the FIB by more than entity or source. Sources
+ * include, but are not limited to, API, CLI, LISP, MAP, etc (for the full list
+ * see fib_entry.h). Each source provides the forwarding information (FI) that
+ * is has determined as required for that route. Since each source determines the
+ * FI using different best path and loop prevention algorithms, it is not
+ * correct for the FI of multiple sources to be combined. Instead the FIB must
+ * choose to use the FI from only one source. This choose is based on a static
+ * priority assignment. For example;
+ * IF a prefix is added as a result of interface configuration:
+ * set interface address 192.168.1.1/24 GigE0
+ * and then it is also added from the CLI
+ * ip route 192.168.1.1/32 via 2.2.2.2/32
+ * then the 'interface' source will prevail, and the route will remain as
+ * 'local'.
+ * The requirement of the FIB is to always install the FI from the winning
+ * source and thus to maintain the FI added by losing sources so it can be
+ * installed should the winning source be withdrawn.
+ *
+ * - adj-fib maintenance
+ *
+ * When ARP or ND discover a neighbour on a link an adjacency forms for the
+ * address of that neighbour. It is also required to insert a route in the
+ * appropriate FIB table, corresponding to the VRF for the link, an entry for
+ * that neighbour. This entry is often referred to as an adj-fib. Adj-fibs
+ * have a dedicated source; 'ADJ'.
+ * The priority of the ADJ source is lower than most. This is so the following
+ * config;
+ * set interface address 192.168.1.1/32 GigE0
+ * ip arp 192.168.1.2 GigE0 dead.dead.dead
+ * ip route add 192.168.1.2 via 10.10.10.10 GigE1
+ * will forward traffic for 192.168.1.2 via GigE1. That is the route added
+ * by the control plane is favoured over the adjacency discovered by ARP.
+ * The control plane, with its associated authentication, is considered the
+ * authoritative source.
+ * To counter the nefarious addition of adj-fib, through the nefarious injection
+ * of adjacencies, the FIB is also required to ensure that only adj-fibs whose
+ * less specific covering prefix is connected are installed in forwarding. This
+ * requires the use of 'cover tracking', where a route maintains a dependency
+ * relationship with the route that is its less specific cover. When this cover
+ * changes (i.e. there is a new covering route) or the forwarding information
+ * of the cover changes, then the covered route is notified.
+ *
+ * Overlapping sub-nets are not supported, so no adj-fib has multiple paths.
+ * The control plane is expected to remove a prefix configured for an interface
+ * before the interface changes VRF.
+ * So while the following config is accepted:
+ * set interface address 192.168.1.1/32 GigE0
+ * ip arp 192.168.1.2 GigE0 dead.dead.dead
+ * set interface ip table GigE0 2
+ * it does not result in the desired behaviour.
+ *
+ * - attached export.
+ *
+ * Further to adj-fib maintenance above consider the following config:
+ * set interface address 192.168.1.1/24 GigE0
+ * ip route add table 2 192.168.1.0/24 GigE0
+ * Traffic destined for 192.168.1.2 in table 2 will generate an ARP request
+ * on GigE0. However, since GigE0 is in table 0, all adj-fibs will be added in
+ * FIB 0. Hence all hosts in the sub-net are unreachable from table 2. To resolve
+ * this, all adj-fib and local prefixes are exported (i.e. copied) from the
+ * 'export' table 0, to the 'import' table 2. There can be many import tables
+ * for a single export table.
+ *
+ * - recursive route resolution
+ *
+ * A recursive route is of the form:
+ * 1.1.1.1/32 via 10.10.10.10
+ * i.e. a route for which no egress interface is provided. In order to forward
+ * traffic to 1.1.1.1/32 the FIB must therefore first determine how to forward
+ * traffic to 10.10.10.10/32. This is recursive resolution.
+ * Recursive resolution, just like normal resolution, proceeds via a longest
+ * prefix match for the 'via-address' 10.10.10.10. Note it is only possible
+ * to add routes via an address (i.e. a /32 or /128) not via a shorter mask
+ * prefix. There is no use case for the latter.
+ * Since recursive resolution proceeds via a longest prefix match, the entry
+ * in the FIB that will resolve the recursive route, termed the via-entry, may
+ * change as other routes are added to the FIB. Consider the recursive
+ * route shown above, and this non-recursive route:
+ * 10.10.10.0/24 via 192.168.16.1 GigE0
+ * The entry for 10.10.10.0/24 is thus the resolving via-entry. If this entry is
+ * modified, to say;
+ * 10.10.10.0/24 via 192.16.1.3 GigE0
+ * Then packet for 1.1.1.1/32 must also be sent to the new next-hop.
+ * Now consider the addition of;
+ * 10.10.10.0/28 via 192.168.16.2 GigE0
+ * The more specific /28 is a better longest prefix match and thus becomes the
+ * via-entry. Removal of the /28 means the resolution will revert to the /24.
+ * The tracking to the changes in recursive resolution is the requirement of
+ * the FIB. When the forwarding information of the via-entry changes a back-walk
+ * is used to update dependent recursive routes. When new routes are added to
+ * the table the cover tracking feature provides the necessary notifications to
+ * the via-entry routes.
+ * The adjacency constructed for 1.1.1.1/32 will be a recursive adjacency
+ * whose next adjacency will be contributed from the via-entry. Maintaining
+ * the validity of this recursive adjacency is a requirement of the FIB.
+ *
+ * - recursive loop avoidance
+ *
+ * Consider this set of routes:
+ * 1.1.1.1/32 via 2.2.2.2
+ * 2.2.2.2/32 via 3.3.3.3
+ * 3.3.3.3/32 via 1.1.1.1
+ * this is termed a recursion loop - all of the routes in the loop are
+ * unresolved in so far as they do not have a resolving adjacency, but each
+ * is resolved because the via-entry is known. It is important here to note
+ * the distinction between the control-plane objects and the data-plane objects
+ * (more details in the implementation section). The control plane objects must
+ * allow the loop to form (i.e. the graph becomes cyclic), however, the
+ * data-plane absolutely must not allow the loop to form, otherwise the packet
+ * would loop indefinitely and never egress the device - meltdown would follow.
+ * The control plane must allow the loop to form, because when the loop breaks,
+ * all members of the loop need to be updated. Forming the loop allows the
+ * dependencies to be correctly setup to allow this to happen.
+ * There is no limit to the depth of recursion supported by VPP so:
+ * 9.9.9.100/32 via 9.9.9.99
+ * 9.9.9.99/32 via 9.9.9.98
+ * 9.9.9.98/32 via 9.9.9.97
+ * ... turtles, turtles, turtles ...
+ * 9.9.9.1/32 via 10.10.10.10 Gig0
+ * is supported to as many layers of turtles is desired, however, when
+ * back-walking a graph (in this case from 9.9.9.1/32 up toward 9.9.9.100/32)
+ * a FIB needs to differentiate the case where the recursion is deep versus
+ * the case where the recursion is looped. A simple method, employed by VPP FIB,
+ * is to limit the number of steps. VPP FIB limit is 16. Typical BGP scenarios
+ * in the wild do not exceed 3 (BGP Inter-AS option C).
+ *
+ * - Fast Convergence
+ *
+ * After a network topology change, the 'convergence' time, is the time taken
+ * for the router to complete a transition to forward traffic using the new
+ * topology. The convergence time is therefore a summation of the time to;
+ * - detect the failure.
+ * - calculate the new 'best path' information
+ * - download the new best paths to the data-plane.
+ * - install those best best in data-plane forwarding.
+ * The last two points are of relevance to VPP architecture. The download API is
+ * binary and batch, details are not discussed here. There is no HW component to
+ * programme, installation time is bounded by the memory allocation and table
+ * lookup and insert access times.
+ *
+ * 'Fast' convergence refers to a set of technologies that a FIB can employ to
+ * completely or partially restore forwarding whilst the convergence actions
+ * listed above are ongoing. Fast convergence technologies are further
+ * sub-divided into Prefix Independent Convergence (PIC) and Loop Free
+ * Alternate path Fast re-route (LFA-FRR or sometimes called IP-FRR) which
+ * affect recursive and non-recursive routes respectively.
+ *
+ * LFA-FRR
+ *
+ * Consider the network topology below:
+ *
+ * C
+ * / \
+ * X -- A --- B - Y
+ * | |
+ * D F
+ * \ /
+ * E
+ *
+ * all links are equal cost, traffic is passing from X to Y. the best path is
+ * X-A-B-Y. There are two alternative paths, one via C and one via E. An
+ * alternate path is considered to be loop free if no other router on that path
+ * would forward the traffic back to the sender. Consider router C, its best
+ * path to Y is via B, so if A were to send traffic destined to Y to C, then C
+ * would forward that traffic to B - this is a loop-free alternate path. In
+ * contrast consider router D. D's shortest path to Y is via A, so if A were to
+ * send traffic destined to Y via D, then D would send it back to A; this is
+ * not a loop-free alternate path. There are several points of note;
+ * - we are considering the pre-failure routing topology
+ * - any equal-cost multi-path between A and B is also a LFA path.
+ * - in order for A to calculate LFA paths it must be aware of the best-path
+ * to Y from the perspective of D. These calculations are thus limited to
+ * routing protocols that have a full view of the network topology, i.e.
+ * link-state DB protocols like OSPF or an SDN controller. LFA protected
+ * prefixes are thus non-recursive.
+ *
+ * LFA is specified as a 1 to 1 redundancy; a primary path has only one LFA
+ * (a.k.a. backup) path. To my knowledge this limitation is one of complexity
+ * in the calculation of and capacity planning using a 1-n redundancy.
+ *
+ * In the event that the link A-B fails, the alternate path via C can be used.
+ * In order to provide 'fast' failover in the event of a failure, the control
+ * plane will download both the primary and the backup path to the FIB. It is
+ * then a requirement of the FIB to perform the failover (a.k.a cutover) from
+ * the primary to the backup path as quickly as possible, and particularly
+ * without any other control-plane intervention. The expectation is cutover is
+ * less than 50 milli-seconds - a value allegedly from the VOIP QoS. Note that
+ * cutover time still includes the fault detection time, which in a vitalised
+ * environment could be the dominant factor. Failure detection can be either a
+ * link down, which will affect multiple paths on a multi-access interface, or
+ * via a specific path heartbeat (i.e. BFD).
+ * At this time VPP does not support LFA, that is it does not support the
+ * installation of a primary and backup path[s] for a route. However, it does
+ * support ECMP, and VPP FIB is designed to quickly remove failed paths from
+ * the ECMP set, however, it does not insert shared objects specific to the
+ * protected resource into the forwarding object graph, since this would incur
+ * a forwarding/performance cost. Failover time is thus route number dependent.
+ * Details are provided in the implementation section below.
+ *
+ * PIC
+ *
+ * PIC refers to the concept that the converge time should be independent of
+ * the number of prefixes/routes that are affected by the failure. PIC is
+ * therefore most appropriate when considering networks with large number of
+ * prefixes, i.e. BGP networks and thus recursive prefixes. There are several
+ * flavours of PIC covering different locations of protection and failure
+ * scenarios. An outline is given below, see the literature for more details:
+ *
+ * Y/16 - CE1 -- PE1---\
+ * | \ P1---\
+ * | \ PE3 -- CE3 - X/16
+ * | - P2---/
+ * Y/16 - CE2 -- PE2---/
+ *
+ * CE = customer edge, PE = provider edge. external-BGP runs between customer
+ * and provider, internal-BGP runs between provider and provider.
+ *
+ * 1) iBGP PIC-core: consider traffic from CE1 to X/16 via CE3. On PE1 there is
+ * are routes;
+ * X/16 (and hundreds of thousands of others like it)
+ * via PE3
+ * and
+ * PE3/32 (its loopback address)
+ * via 10.0.0.1 Link0 (this is P1)
+ * via 10.1.1.1 Link1 (this is P2)
+ * the failure is the loss of link0 or link1
+ * As in all PIC scenarios, in order to provide prefix independent convergence
+ * it must be that the route for X/16 (and all other routes via PE3) do not
+ * need to be updated in the FIB. The FIB therefore needs to update a single
+ * object that is shared by all routes - once this shared object is updated,
+ * then all routes using it will be instantly updated to use the new forwarding
+ * information. In this case the shared object is the resolving route via PE3.
+ * Once the route via PE3 is updated via IGP (OSPF) convergence, then all
+ * recursive routes that resolve through it are also updated. VPP FIB
+ * implements this scenario via a recursive-adjacency. the X/16 and it sibling
+ * routes share a recursive-adjacency that links to/points at/stacks on the
+ * normal adjacency contributed by the route for PE3. Once this shared
+ * recursive adj is re-linked then all routes are switched to using the new
+ * forwarding information. This is shown below;
+ *
+ * pre-failure;
+ * X/16 --> R-ADJ-1 --> ADJ-1-PE3 (multi-path via P1 and P2)
+ *
+ * post-failure:
+ * X/16 --> R-ADJ-1 --> ADJ-2-PE3 (single path via P1)
+ *
+ * note that R-ADJ-1 (the recursive adj) remains in the forwarding graph,
+ * therefore X/16 (and all its siblings) is not updated.
+ * X/16 and its siblings share the recursive adj since they share the same
+ * path-list. It is the path-list object that contributes the recursive-adj
+ * (see next section for more details)
+ *
+ *
+ * 2) iBGP PIC-edge; Traffic from CE3 to Y/16. On PE3 there is are routes;
+ * Y/16 (and hundreds of thousands of others like it)
+ * via PE1
+ * via PE2
+ * and
+ * PE1/32 (PE1's loopback address)
+ * via 10.0.2.2 Link0 (this is P1)
+ * PE2/32 (PE2's loopback address)
+ * via 10.0.3.3 Link1 (this is P2)
+ *
+ * the failure is the loss of reachability to PE2. this could be either the
+ * loss of the link P2-PE2 or the loss of the node PE2. This is detected either
+ * by the withdrawal of the PE2's loopback route or by some form of failure
+ * detection (i.e. BFD).
+ * VPP FIB again provides PIC via the use of the shared recursive-adj. Y/16 and
+ * its siblings will again share a path-list for the list {PE1,PE2}, this
+ * path-list will contribute a multi-path-recursive-adj, i.e. a multi-path-adj
+ * with each choice therein being another adj;
+ *
+ * Y/16 -> RM-ADJ --> ADJ1 (for PE1)
+ * --> ADJ2 (for PE2)
+ *
+ * when the route for PE1 is withdrawn then the multi-path-recursive-adjacency
+ * is updated to be;
+ *
+ * Y/16 --> RM-ADJ --> ADJ1 (for PE1)
+ * --> ADJ1 (for PE1)
+ *
+ * that is both choices in the ECMP set are the same and thus all traffic is
+ * forwarded to PE1. Eventually the control plane will download a route update
+ * for Y/16 to be via PE1 only. At that time the situation will be:
+ *
+ * Y/16 -> R-ADJ --> ADJ1 (for PE1)
+ *
+ * In the scenario above we assumed that PE1 and PE2 are ECMP for Y/16. eBGP
+ * PIC core is also specified for the case were one PE is primary and the other
+ * backup - VPP FIB does not support that case at this time.
+ *
+ * 3) eBGP PIC Edge; Traffic from CE3 to Y/16. On PE1 there is are routes;
+ * Y/16 (and hundreds of thousands of others like it)
+ * via CE1 (primary)
+ * via PE2 (backup)
+ * and
+ * CE1 (this is an adj-fib)
+ * via 11.0.0.1 Link0 (this is CE1) << this is an adj-fib
+ * PE2 (PE2's loopback address)
+ * via 10.0.5.5 Link1 (this is link PE1-PE2)
+ * the failure is the loss of link0 to CE1. The failure can be detected by FIB
+ * either as a link down event or by the control plane withdrawing the connected
+ * prefix on the link0 (say 10.0.5.4/30). The latter works because the resolving
+ * entry is an adj-fib, so removing the connected will withdraw the adj-fib, and
+ * hence the recursive path becomes unresolved. The former is faster,
+ * particularly in the case of Inter-AS option A where there are many VLAN
+ * sub-interfaces on the PE-CE link, one for each VRF, and so the control plane
+ * must remove the connected prefix for each sub-interface to trigger PIC in
+ * each VRF. Note though that total PIC cutover time will depend on VRF scale
+ * with either trigger.
+ * Primary and backup paths in this eBGP PIC-edge scenario are calculated by
+ * BGP. Each peer is configured to always advertise its best external path to
+ * its iBGP peers. Backup paths therefore send traffic from the PE back into the
+ * core to an alternate PE. A PE may have multiple external paths, i.e. multiple
+ * directly connected CEs, it may also have multiple backup PEs, however there
+ * is no correlation between the two, so unlike LFA-FRR, the redundancy model is
+ * N-M; N primary paths are backed-up by M backup paths - only when all primary
+ * paths fail, then the cutover is performed onto the M backup paths. Note that
+ * PE2 must be suitably configured to forward traffic on its external path that
+ * was received from PE1. VPP FIB does not support external-internal-BGP (eiBGP)
+ * load-balancing.
+ *
+ * As with LFA-FRR the use of primary and backup paths is not currently
+ * supported, however, the use of a recursive-multi-path-adj, and a suitably
+ * constrained hashing algorithm to choose from the primary or backup path sets,
+ * would again provide the necessary shared object and hence the prefix scale
+ * independent cutover.
+ *
+ * Astute readers will recognise that both of the eBGP PIC scenarios refer only
+ * to a BGP free core.
+ *
+ * Fast convergence implementation options come in two flavours:
+ * 1) Insert switches into the data-path. The switch represents the protected
+ * resource. If the switch is 'on' the primary path is taken, otherwise
+ * the backup path is taken. Testing the switch in the data-path comes with
+ * an associated performance cost. A given packet may encounter more than
+ * one protected resource as it is forwarded. This approach minimises
+ * cutover times as packets will be forwarded on the backup path as soon
+ * as the protected resource is detected to be down and the single switch
+ * is tripped. However, it comes at a performance cost, which increases
+ * with each shared resource a packet encounters in the data-path.
+ * This approach is thus best suited to LFA-FRR where the protected routes
+ * are non-recursive (i.e. encounter few shared resources) and the
+ * expectation on cutover times is more stringent (<50msecs).
+ * 2) Update shared objects. Identify objects in the data-path, that are
+ * required to be present whether or not fast convergence is required (i.e.
+ * adjacencies) that can be shared by multiple routes. Create a dependency
+ * between these objects at the protected resource. When the protected
+ * resource fails, each of the shared objects is updated in a way that all
+ * users of it see a consistent change. This approach incurs no performance
+ * penalty as the data-path structure is unchanged, however, the cutover
+ * times are longer as more work is required when the resource fails. This
+ * scheme is thus more appropriate to recursive prefixes (where the packet
+ * will encounter multiple protected resources) and to fast-convergence
+ * technologies where the cutover times are less stringent (i.e. PIC).
+ *
+ * Implementation:
+ * ---------------
+ *
+ * Due to the requirements outlined above, not all routes known to FIB
+ * (e.g. adj-fibs) are installed in forwarding. However, should circumstances
+ * change, those routes will need to be added. This adds the requirement that
+ * a FIB maintains two tables per-VRF, per-AF (where a 'table' is indexed by
+ * prefix); the forwarding and non-forwarding tables.
+ *
+ * For DP speed in VPP we want the lookup in the forwarding table to directly
+ * result in the ADJ. So the two tables; one contains all the routes (a
+ * lookup therein yields a fib_entry_t), the other contains only the forwarding
+ * routes (a lookup therein yields an ip_adjacency_t). The latter is used by the
+ * DP.
+ * This trades memory for forwarding performance. A good trade-off in VPP's
+ * expected operating environments.
+ *
+ * Note these tables are keyed only by the prefix (and since there 2 two
+ * per-VRF, implicitly by the VRF too). The key for an adjacency is the
+ * tuple:{next-hop, address (and it's AF), interface, link/ether-type}.
+ * consider this curious, but allowed, config;
+ *
+ * set int ip addr 10.0.0.1/24 Gig0
+ * set ip arp Gig0 10.0.0.2 dead.dead.dead
+ * # a host in that sub-net is routed via a better next hop (say it avoids a
+ * # big L2 domain)
+ * ip route add 10.0.0.2 Gig1 192.168.1.1
+ * # this recursive should go via Gig1
+ * ip route add 1.1.1.1/32 via 10.0.0.2
+ * # this non-recursive should go via Gig0
+ * ip route add 2.2.2.2/32 via Gig0 10.0.0.2
+ *
+ * for the last route, the lookup for the path (via {Gig0, 10.0.0.2}) in the
+ * prefix table would not yield the correct result. To fix this we need a
+ * separate table for the adjacencies.
+ *
+ * - FIB data structures;
+ *
+ * fib_entry_t:
+ * - a representation of a route.
+ * - has a prefix.
+ * - it maintains an array of path-lists that have been contributed by the
+ * different sources
+ * - install an adjacency in the forwarding table contributed by the best
+ * source's path-list.
+ *
+ * fib_path_list_t:
+ * - a list of paths
+ * - path-lists may be shared between FIB entries. The path-lists are thus
+ * kept in a DB. The key is the combined description of the paths. We share
+ * path-lists when it will aid convergence to do so. Adding path-lists to
+ * this DB that are never shared, or are not shared by prefixes that are
+ * not subject to PIC, will increase the size of the DB unnecessarily and
+ * may lead to increased search times due to hash collisions.
+ * - the path-list contributes the appropriate adj for the entry in the
+ * forwarding table. The adj can be 'normal', multi-path or recursive,
+ * depending on the number of paths and their types.
+ * - since path-lists are shared there is only one instance of the multi-path
+ * adj that they [may] create. As such multi-path adjacencies do not need a
+ * separate DB.
+ * The path-list with recursive paths and the recursive adjacency that it
+ * contributes forms the backbone of the fast convergence architecture (as
+ * described previously).
+ *
+ * fib_path_t:
+ * - a description of how to forward the traffic (i.e. via {Gig1, K}).
+ * - the path describes the intent on how to forward. This differs from how
+ * the path resolves. I.e. it might not be resolved at all (since the
+ * interface is deleted or down).
+ * - paths have different types, most notably recursive or non-recursive.
+ * - a fib_path_t will contribute the appropriate adjacency object. It is from
+ * these contributions that the DP graph/chain for the route is built.
+ * - if the path is recursive and a recursion loop is detected, then the path
+ * will contribute the special DROP adjacency. This way, whilst the control
+ * plane graph is looped, the data-plane graph does not.
+ *
+ * we build a graph of these objects;
+ *
+ * fib_entry_t -> fib_path_list_t -> fib_path_t -> ...
+ *
+ * for recursive paths:
+ *
+ * fib_path_t -> fib_entry_t -> ....
+ *
+ * for non-recursive paths
+ *
+ * fib_path_t -> ip_adjacency_t -> interface
+ *
+ * These objects, which constitute the 'control plane' part of the FIB are used
+ * to represent the resolution of a route. As a whole this is referred to as the
+ * control plane graph. There is a separate DP graph to represent the forwarding
+ * of a packet. In the DP graph each object represents an action that is applied
+ * to a packet as it traverses the graph. For example, a lookup of a IP address
+ * in the forwarding table could result in the following graph:
+ *
+ * recursive-adj --> multi-path-adj --> interface_A
+ * --> interface_B
+ *
+ * A packet traversing this FIB DP graph would thus also traverse a VPP node
+ * graph of:
+ *
+ * ipX_recursive --> ipX_rewrite --> interface_A_tx --> etc
+ *
+ * The taxonomy of objects in a FIB graph is as follows, consider;
+ *
+ * A -->
+ * B --> D
+ * C -->
+ *
+ * Where A,B and C are (for example) routes that resolve through D.
+ * parent; D is the parent of A, B, and C.
+ * children: A, B, and C are children of D.
+ * sibling: A, B and C are siblings of one another.
+ *
+ * All shared objects in the FIB are reference counted. Users of these objects
+ * are thus expected to use the add_lock/unlock semantics (as one would
+ * normally use malloc/free).
+ *
+ * WALKS
+ *
+ * It is necessary to walk/traverse the graph forwards (entry to interface) to
+ * perform a collapse or build a recursive adj and backwards (interface
+ * to entry) to perform updates, i.e. when interface state changes or when
+ * recursive route resolution updates occur.
+ * A forward walk follows simply by navigating an object's parent pointer to
+ * access its parent object. For objects with multiple parents (e.g. a
+ * path-list), each parent is walked in turn.
+ * To support back-walks direct dependencies are maintained between objects,
+ * i.e. in the relationship, {A, B, C} --> D, then object D will maintain a list
+ * of 'pointers' to its children {A, B, C}. Bare C-language pointers are not
+ * allowed, so a pointer is described in terms of an object type (i.e. entry,
+ * path-list, etc) and index - this allows the object to be retrieved from the
+ * appropriate pool. A list is maintained to achieve fast convergence at scale.
+ * When there are millions or recursive prefixes, it is very inefficient to
+ * blindly walk the tables looking for entries that were affected by a given
+ * topology change. The lowest hanging fruit when optimising is to remove
+ * actions that are not required, so all back-walks only traverse objects that
+ * are directly affected by the change.
+ *
+ * PIC Core and fast-reroute rely on FIB reacting quickly to an interface
+ * state change to update the multi-path-adjacencies that use this interface.
+ * An example graph is shown below:
+ *
+ * E_a -->
+ * E_b --> PL_2 --> P_a --> Interface_A
+ * ... --> P_c -\
+ * E_k --> \
+ * Interface_K
+ * /
+ * E_l --> /
+ * E_m --> PL_1 --> P_d -/
+ * ... --> P_f --> Interface_F
+ * E_z -->
+ *
+ * E = fib_entry_t
+ * PL = fib_path_list_t
+ * P = fib_path_t
+ * The subscripts are arbitrary and serve only to distinguish object instances.
+ * This CP graph result in the following DP graph:
+ *
+ * M-ADJ-2 --> Interface_A
+ * \
+ * -> Interface_K
+ * /
+ * M-ADJ-1 --> Interface_F
+ *
+ * M-ADJ = multi-path-adjacency.
+ *
+ * When interface K goes down a back-walk is started over its dependants in the
+ * control plane graph. This back-walk will reach PL_1 and PL_2 and result in
+ * the calculation of new adjacencies that have interface K removed. The walk
+ * will continue to the entry objects and thus the forwarding table is updated
+ * for each prefix with the new adjacency. The DP graph then becomes:
+ *
+ * ADJ-3 --> Interface_A
+ *
+ * ADJ-4 --> Interface_F
+ *
+ * The eBGP PIC scenarios described above relied on the update of a path-list's
+ * recursive-adjacency to provide the shared point of cutover. This is shown
+ * below
+ *
+ * E_a -->
+ * E_b --> PL_2 --> P_a --> E_44 --> PL_a --> P_b --> Interface_A
+ * ... --> P_c -\
+ * E_k --> \
+ * \
+ * E_1 --> PL_k -> P_k --> Interface_K
+ * /
+ * E_l --> /
+ * E_m --> PL_1 --> P_d -/
+ * ... --> P_f --> E_55 --> PL_e --> P_e --> Interface_E
+ * E_z -->
+ *
+ * The failure scenario is the removal of entry E_1 and thus the paths P_c and
+ * P_d become unresolved. To achieve PIC the two shared recursive path-lists,
+ * PL_1 and PL_2 must be updated to remove E_1 from the recursive-multi-path-
+ * adjacencies that they contribute, before any entry E_a to E_z is updated.
+ * This means that as the update propagates backwards (right to left) in the
+ * graph it must do so breadth first not depth first. Note this approach leads
+ * to convergence times that are dependent on the number of path-list and so
+ * the number of combinations of egress PEs - this is desirable as this
+ * scale is considerably lower than the number of prefixes.
+ *
+ * If we consider another section of the graph that is similar to the one
+ * shown above where there is another prefix E_2 in a similar position to E_1
+ * and so also has many dependent children. It is reasonable to expect that a
+ * particular network failure may simultaneously render E_1 and E_2 unreachable.
+ * This means that the update to withdraw E_2 is download immediately after the
+ * update to withdraw E_1. It is a requirement on the FIB to not spend large
+ * amounts of time in a back-walk whilst processing the update for E_1, i.e. the
+ * back-walk must not reach as far as E_a and its siblings. Therefore, after the
+ * back-walk has traversed one generation (breadth first) to update all the
+ * path-lists it should be suspended/back-ground and further updates allowed
+ * to be handled. Once the update queue is empty, the suspended walks can be
+ * resumed. Note that in the case that multiple updates affect the same entry
+ * (say E_1) then this will trigger multiple similar walks, these are merged,
+ * so each child is updated only once.
+ * In the presence of more layers of recursion PIC is still a desirable
+ * feature. Consider an extension to the diagram above, where more recursive
+ * routes (E_100 -> E_200) are added as children of E_a:
+ *
+ * E_100 -->
+ * E_101 --> PL_3 --> P_j-\
+ * ... \
+ * E_199 --> E_a -->
+ * E_b --> PL_2 --> P_a --> E_44 --> ...etc..
+ * ... --> P_c -\
+ * E_k \
+ * E_1 --> ...etc..
+ * /
+ * E_l --> /
+ * E_m --> PL_1 --> P_d -/
+ * ... --> P_e --> E_55 --> ...etc..
+ * E_z -->
+ *
+ * To achieve PIC for the routes E_100->E_199, PL_3 needs to be updated before
+ * E_b -> E_z, a breadth first traversal at each level would not achieve this.
+ * Instead the walk must proceed intelligently. Children on PL_2 are sorted so
+ * those Entry objects that themselves have children appear first in the list,
+ * those without later. When an entry object is walked that has children, a
+ * walk of its children is pushed to the front background queue. The back
+ * ground queue is a priority queue. As the breadth first traversal proceeds
+ * across the dependent entry object E_a to E_k, when the first entry that does
+ * not have children is reached (E_b), the walk is suspended and placed at the
+ * back of the queue. Following this prioritisation method shared path-list
+ * updates are performed before all non-resolving entry objects.
+ * The CPU/core/thread that handles the updates is the same thread that handles
+ * the back-walks. Handling updates has a higher priority than making walk
+ * progress, so a walk is required to be interruptable/suspendable when new
+ * updates are available.
+ * !!! TODO - this section describes how walks should be not how they are !!!
+ *
+ * In the diagram above E_100 is an IP route, however, VPP has no restrictions
+ * on the type of object that can be a dependent of a FIB entry. Children of
+ * a FIB entry can be (and are) GRE & VXLAN tunnels endpoints, L2VPN LSPs etc.
+ * By including all object types into the graph and extending the back-walk, we
+ * can thus deliver fast convergence to technologies that overlay on an IP
+ * network.
+ *
+ * If having read all the above carefully you are still thinking; 'i don't need
+ * all this %&$* i have a route only I know about and I just need to jam it in',
+ * then fib_table_entry_special_add() is your only friend.
+ */
+
+#ifndef __FIB_H__
+#define __FIB_H__
+
+#include <vnet/fib/fib_table.h>
+#include <vnet/fib/fib_entry.h>
+#include <vnet/fib/ip4_fib.h>
+#include <vnet/fib/ip6_fib.h>
+
+#endif
diff --git a/src/vnet/fib/fib_api.h b/src/vnet/fib/fib_api.h
new file mode 100644
index 00000000000..f82753170db
--- /dev/null
+++ b/src/vnet/fib/fib_api.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __FIB_API_H__
+#define __FIB_API_H__
+
+
+int
+add_del_route_check (fib_protocol_t table_proto,
+ u32 table_id,
+ u32 next_hop_sw_if_index,
+ fib_protocol_t next_hop_table_proto,
+ u32 next_hop_table_id,
+ u8 create_missing_tables,
+ u32 * fib_index, u32 * next_hop_fib_index);
+
+int
+add_del_route_t_handler (u8 is_multipath,
+ u8 is_add,
+ u8 is_drop,
+ u8 is_unreach,
+ u8 is_prohibit,
+ u8 is_local,
+ u8 is_classify,
+ u32 classify_table_index,
+ u8 is_resolve_host,
+ u8 is_resolve_attached,
+ u32 fib_index,
+ const fib_prefix_t * prefix,
+ u8 next_hop_proto_is_ip4,
+ const ip46_address_t * next_hop,
+ u32 next_hop_sw_if_index,
+ u8 next_hop_fib_index,
+ u32 next_hop_weight,
+ mpls_label_t next_hop_via_label,
+ mpls_label_t * next_hop_out_label_stack);
+
+void
+copy_fib_next_hop (fib_route_path_encode_t * api_rpath,
+ void * fp_arg);
+
+#endif /* __FIB_API_H__ */
diff --git a/src/vnet/fib/fib_attached_export.c b/src/vnet/fib/fib_attached_export.c
new file mode 100644
index 00000000000..c389ea43feb
--- /dev/null
+++ b/src/vnet/fib/fib_attached_export.c
@@ -0,0 +1,572 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/fib/fib_entry.h>
+#include <vnet/fib/fib_table.h>
+
+#include <vnet/fib/fib_attached_export.h>
+#include <vnet/fib/fib_entry_cover.h>
+#include <vnet/fib/fib_entry_src.h>
+#include <vnet/fib/fib_entry_delegate.h>
+
+/**
+ * A description of the need to import routes from the export table
+ */
+typedef struct fib_ae_import_t_
+{
+ /**
+ * The entry in the epxort table that this importer
+ * is importing covereds from
+ */
+ fib_node_index_t faei_export_entry;
+
+ /**
+ * The attached entry in the import table
+ */
+ fib_node_index_t faei_import_entry;
+ /**
+ * the sibling index on the cover
+ */
+ u32 faei_export_sibling;
+
+ /**
+ * The index of the exporter tracker. Not set if the
+ * export entry is not valid for export
+ */
+ fib_node_index_t faei_exporter;
+
+ /**
+ * A vector/list of imported entry indicies
+ */
+ fib_node_index_t *faei_importeds;
+
+ /**
+ * The FIB index and prefix we are tracking
+ */
+ fib_node_index_t faei_export_fib;
+ fib_prefix_t faei_prefix;
+
+ /**
+ * The FIB index we are importing into
+ */
+ fib_node_index_t faei_import_fib;
+} fib_ae_import_t;
+
+/**
+ * A description of the need to export routes to one or more export tables
+ */
+typedef struct fib_ae_export_t_ {
+ /**
+ * The vector/list of import tracker indicies
+ */
+ fib_node_index_t *faee_importers;
+
+ /**
+ * THe connected entry this export is acting on behalf of
+ */
+ fib_node_index_t faee_ei;
+
+ /**
+ * Reference counting locks
+ */
+ u32 faee_locks;
+} fib_ae_export_t;
+
+/*
+ * memory pools for the importers and exportes
+ */
+static fib_ae_import_t *fib_ae_import_pool;
+static fib_ae_export_t *fib_ae_export_pool;
+
+static fib_ae_export_t *
+fib_entry_ae_add_or_lock (fib_node_index_t connected)
+{
+ fib_entry_delegate_t *fed;
+ fib_ae_export_t *export;
+ fib_entry_t *entry;
+
+ entry = fib_entry_get(connected);
+ fed = fib_entry_delegate_get(entry,
+ FIB_ENTRY_DELEGATE_ATTACHED_EXPORT);
+
+ if (NULL == fed)
+ {
+ fed = fib_entry_delegate_find_or_add(entry,
+ FIB_ENTRY_DELEGATE_ATTACHED_EXPORT);
+ pool_get(fib_ae_export_pool, export);
+ memset(export, 0, sizeof(*export));
+
+ fed->fd_index = (export - fib_ae_export_pool);
+ export->faee_ei = connected;
+ }
+ else
+ {
+ export = pool_elt_at_index(fib_ae_export_pool, fed->fd_index);
+ }
+
+ export->faee_locks++;
+
+ return (export);
+}
+
+static void
+fib_entry_import_remove (fib_ae_import_t *import,
+ fib_node_index_t entry_index)
+{
+ fib_prefix_t prefix;
+ u32 index;
+
+ /*
+ * find the index in the vector of the entry we are removing
+ */
+ index = vec_search(import->faei_importeds, entry_index);
+
+ if (index < vec_len(import->faei_importeds))
+ {
+ /*
+ * this is an entry that was previsouly imported
+ */
+ fib_entry_get_prefix(entry_index, &prefix);
+
+ fib_table_entry_special_remove(import->faei_import_fib,
+ &prefix,
+ FIB_SOURCE_AE);
+
+ fib_entry_unlock(entry_index);
+ vec_del1(import->faei_importeds, index);
+ }
+}
+
+static void
+fib_entry_import_add (fib_ae_import_t *import,
+ fib_node_index_t entry_index)
+{
+ fib_node_index_t *existing;
+ fib_prefix_t prefix;
+
+ /*
+ * ensure we only add the exported entry once, since
+ * sourcing prefixes in the table is reference counted
+ */
+ vec_foreach(existing, import->faei_importeds)
+ {
+ if (*existing == entry_index)
+ {
+ return;
+ }
+ }
+
+ /*
+ * this is the first time this export entry has been imported
+ * Add it to the import FIB and to the list of importeds
+ */
+ fib_entry_get_prefix(entry_index, &prefix);
+
+ /*
+ * don't import entries that have the same prefix the import entry
+ */
+ if (0 != fib_prefix_cmp(&prefix,
+ &import->faei_prefix))
+ {
+ const dpo_id_t *dpo;
+
+ dpo = fib_entry_contribute_ip_forwarding(entry_index);
+
+ if (dpo_id_is_valid(dpo))
+ {
+ fib_table_entry_special_dpo_add(import->faei_import_fib,
+ &prefix,
+ FIB_SOURCE_AE,
+ (fib_entry_get_flags(entry_index) |
+ FIB_ENTRY_FLAG_EXCLUSIVE),
+ load_balance_get_bucket(dpo->dpoi_index, 0));
+
+ fib_entry_lock(entry_index);
+ vec_add1(import->faei_importeds, entry_index);
+ }
+ /*
+ * else
+ * the entry currently has no valid forwarding. when it
+ * does it will export itself
+ */
+ }
+}
+
+/**
+ * Call back when walking a connected prefix's covered prefixes for import
+ */
+static int
+fib_entry_covered_walk_import (fib_entry_t *cover,
+ fib_node_index_t covered,
+ void *ctx)
+{
+ fib_ae_import_t *import = ctx;
+
+ fib_entry_import_add(import, covered);
+
+ return (0);
+}
+
+/*
+ * fib_entry_ae_import_add
+ *
+ * Add an importer to a connected entry
+ */
+static void
+fib_ae_export_import_add (fib_ae_export_t *export,
+ fib_ae_import_t *import)
+{
+ fib_entry_t *entry;
+
+ import->faei_exporter = (export - fib_ae_export_pool);
+ entry = fib_entry_get(export->faee_ei);
+
+ fib_entry_cover_walk(entry,
+ fib_entry_covered_walk_import,
+ import);
+}
+
+void
+fib_attached_export_import (fib_entry_t *fib_entry,
+ fib_node_index_t export_fib)
+{
+ fib_entry_delegate_t *fed;
+ fib_ae_import_t *import;
+
+ pool_get(fib_ae_import_pool, import);
+
+ import->faei_import_fib = fib_entry->fe_fib_index;
+ import->faei_export_fib = export_fib;
+ import->faei_prefix = fib_entry->fe_prefix;
+ import->faei_import_entry = fib_entry_get_index(fib_entry);
+ import->faei_export_sibling = ~0;
+
+ /*
+ * do an exact match in the export table
+ */
+ import->faei_export_entry =
+ fib_table_lookup_exact_match(import->faei_export_fib,
+ &import->faei_prefix);
+
+ if (FIB_NODE_INDEX_INVALID == import->faei_export_entry)
+ {
+ /*
+ * no exact matching entry in the export table. can't be good.
+ * track the next best thing
+ */
+ import->faei_export_entry =
+ fib_table_lookup(import->faei_export_fib,
+ &import->faei_prefix);
+ import->faei_exporter = FIB_NODE_INDEX_INVALID;
+ }
+ else
+ {
+ /*
+ * found the entry in the export table. import the
+ * the prefixes that it covers.
+ * only if the prefix found in the export FIB really is
+ * attached do we want to import its covered
+ */
+ if (FIB_ENTRY_FLAG_ATTACHED &
+ fib_entry_get_flags_i(fib_entry_get(import->faei_export_entry)))
+ {
+ fib_ae_export_t *export;
+
+ export = fib_entry_ae_add_or_lock(import->faei_export_entry);
+ vec_add1(export->faee_importers, (import - fib_ae_import_pool));
+ fib_ae_export_import_add(export, import);
+ }
+ }
+
+ /*
+ * track the entry in the export table so we can update appropriately
+ * when it changes
+ */
+ import->faei_export_sibling =
+ fib_entry_cover_track(fib_entry_get(import->faei_export_entry),
+ fib_entry_get_index(fib_entry));
+
+ fed = fib_entry_delegate_find_or_add(fib_entry,
+ FIB_ENTRY_DELEGATE_ATTACHED_IMPORT);
+ fed->fd_index = (import - fib_ae_import_pool);
+}
+
+/**
+ * \brief All the imported entries need to be pruged
+ */
+void
+fib_attached_export_purge (fib_entry_t *fib_entry)
+{
+ fib_entry_delegate_t *fed;
+
+ fed = fib_entry_delegate_get(fib_entry,
+ FIB_ENTRY_DELEGATE_ATTACHED_IMPORT);
+
+ if (NULL != fed)
+ {
+ fib_node_index_t *import_index;
+ fib_entry_t *export_entry;
+ fib_ae_import_t *import;
+ fib_ae_export_t *export;
+
+ import = pool_elt_at_index(fib_ae_import_pool, fed->fd_index);
+
+ /*
+ * remove each imported entry
+ */
+ vec_foreach(import_index, import->faei_importeds)
+ {
+ fib_prefix_t prefix;
+
+ fib_entry_get_prefix(*import_index, &prefix);
+
+ fib_table_entry_delete(import->faei_import_fib,
+ &prefix,
+ FIB_SOURCE_AE);
+ fib_entry_unlock(*import_index);
+ }
+ vec_free(import->faei_importeds);
+
+ /*
+ * stop tracking the export entry
+ */
+ if (~0 != import->faei_export_sibling)
+ {
+ fib_entry_cover_untrack(fib_entry_get(import->faei_export_entry),
+ import->faei_export_sibling);
+ }
+ import->faei_export_sibling = ~0;
+
+ /*
+ * remove this import tracker from the export's list,
+ * if it is attached to one. It won't be in the case the tracked
+ * export entry is not an attached exact match.
+ */
+ if (FIB_NODE_INDEX_INVALID != import->faei_exporter)
+ {
+ fib_entry_delegate_t *fed;
+
+ export_entry = fib_entry_get(import->faei_export_entry);
+
+ fed = fib_entry_delegate_get(export_entry,
+ FIB_ENTRY_DELEGATE_ATTACHED_EXPORT);
+ ASSERT(NULL != fed);
+
+ export = pool_elt_at_index(fib_ae_export_pool, fed->fd_index);
+
+ u32 index = vec_search(export->faee_importers,
+ (import - fib_ae_import_pool));
+
+ ASSERT(index < vec_len(export->faee_importers));
+ vec_del1(export->faee_importers, index);
+
+ /*
+ * free the exporter if there are no longer importers
+ */
+ if (0 == --export->faee_locks)
+ {
+ pool_put(fib_ae_export_pool, export);
+ fib_entry_delegate_remove(export_entry,
+ FIB_ENTRY_DELEGATE_ATTACHED_EXPORT);
+ }
+ }
+
+ /*
+ * free the import tracker
+ */
+ pool_put(fib_ae_import_pool, import);
+ fib_entry_delegate_remove(fib_entry,
+ FIB_ENTRY_DELEGATE_ATTACHED_IMPORT);
+ }
+}
+
+void
+fib_attached_export_covered_added (fib_entry_t *cover,
+ fib_node_index_t covered)
+{
+ fib_entry_delegate_t *fed;
+
+ fed = fib_entry_delegate_get(cover,
+ FIB_ENTRY_DELEGATE_ATTACHED_EXPORT);
+
+ if (NULL != fed)
+ {
+ /*
+ * the covering prefix is exporting to other tables
+ */
+ fib_node_index_t *import_index;
+ fib_ae_import_t *import;
+ fib_ae_export_t *export;
+
+ export = pool_elt_at_index(fib_ae_export_pool, fed->fd_index);
+
+ /*
+ * export the covered entry to each of the importers
+ */
+ vec_foreach(import_index, export->faee_importers)
+ {
+ import = pool_elt_at_index(fib_ae_import_pool, *import_index);
+
+ fib_entry_import_add(import, covered);
+ }
+ }
+}
+
+void
+fib_attached_export_covered_removed (fib_entry_t *cover,
+ fib_node_index_t covered)
+{
+ fib_entry_delegate_t *fed;
+
+ fed = fib_entry_delegate_get(cover,
+ FIB_ENTRY_DELEGATE_ATTACHED_EXPORT);
+
+ if (NULL != fed)
+ {
+ /*
+ * the covering prefix is exporting to other tables
+ */
+ fib_node_index_t *import_index;
+ fib_ae_import_t *import;
+ fib_ae_export_t *export;
+
+ export = pool_elt_at_index(fib_ae_export_pool, fed->fd_index);
+
+ /*
+ * remove the covered entry from each of the importers
+ */
+ vec_foreach(import_index, export->faee_importers)
+ {
+ import = pool_elt_at_index(fib_ae_import_pool, *import_index);
+
+ fib_entry_import_remove(import, covered);
+ }
+ }
+}
+
+static void
+fib_attached_export_cover_modified_i (fib_entry_t *fib_entry)
+{
+ fib_entry_delegate_t *fed;
+
+ fed = fib_entry_delegate_get(fib_entry,
+ FIB_ENTRY_DELEGATE_ATTACHED_IMPORT);
+
+ if (NULL != fed)
+ {
+ fib_ae_import_t *import;
+ u32 export_fib;
+
+ /*
+ * safe the temporaries we need from the existing import
+ * since it will be toast after the purge.
+ */
+ import = pool_elt_at_index(fib_ae_import_pool, fed->fd_index);
+ export_fib = import->faei_export_fib;
+
+ /*
+ * keep it simple. purge anything that was previously imported.
+ * then re-evaluate the need to import.
+ */
+ fib_attached_export_purge(fib_entry);
+ fib_attached_export_import(fib_entry, export_fib);
+ }
+}
+
+/**
+ * \brief If this entry is tracking a cover (in another table)
+ * then that cover has changed. re-evaluate import.
+ */
+void
+fib_attached_export_cover_change (fib_entry_t *fib_entry)
+{
+ fib_attached_export_cover_modified_i(fib_entry);
+}
+
+/**
+ * \brief If this entry is tracking a cover (in another table)
+ * then that cover has been updated. re-evaluate import.
+ */
+void
+fib_attached_export_cover_update (fib_entry_t *fib_entry)
+{
+ fib_attached_export_cover_modified_i(fib_entry);
+}
+
+u8*
+fib_ae_import_format (fib_entry_t *fib_entry,
+ u8* s)
+{
+ fib_entry_delegate_t *fed;
+
+ fed = fib_entry_delegate_get(fib_entry,
+ FIB_ENTRY_DELEGATE_ATTACHED_IMPORT);
+
+ if (NULL != fed)
+ {
+ fib_node_index_t *index;
+ fib_ae_import_t *import;
+
+ import = pool_elt_at_index(fib_ae_import_pool, fed->fd_index);
+
+ s = format(s, "\n Attached-Import:%d:[", (import - fib_ae_import_pool));
+ s = format(s, "export-prefix:%U ", format_fib_prefix, &import->faei_prefix);
+ s = format(s, "export-entry:%d ", import->faei_export_entry);
+ s = format(s, "export-sibling:%d ", import->faei_export_sibling);
+ s = format(s, "exporter:%d ", import->faei_exporter);
+ s = format(s, "export-fib:%d ", import->faei_export_fib);
+
+ s = format(s, "import-entry:%d ", import->faei_import_entry);
+ s = format(s, "import-fib:%d ", import->faei_import_fib);
+
+ s = format(s, "importeds:[");
+ vec_foreach(index, import->faei_importeds)
+ {
+ s = format(s, "%d, ", *index);
+ }
+ s = format(s, "]]");
+ }
+
+ return (s);
+}
+
+u8*
+fib_ae_export_format (fib_entry_t *fib_entry,
+ u8* s)
+{
+ fib_entry_delegate_t *fed;
+
+ fed = fib_entry_delegate_get(fib_entry,
+ FIB_ENTRY_DELEGATE_ATTACHED_EXPORT);
+
+ if (NULL != fed)
+ {
+ fib_node_index_t *index;
+ fib_ae_export_t *export;
+
+ export = pool_elt_at_index(fib_ae_export_pool, fed->fd_list);
+
+ s = format(s, "\n Attached-Export:%d:[", (export - fib_ae_export_pool));
+ s = format(s, "export-entry:%d ", export->faee_ei);
+
+ s = format(s, "importers:[");
+ vec_foreach(index, export->faee_importers)
+ {
+ s = format(s, "%d, ", *index);
+ }
+ s = format(s, "]]");
+ }
+ return (s);
+}
diff --git a/src/vnet/fib/fib_attached_export.h b/src/vnet/fib/fib_attached_export.h
new file mode 100644
index 00000000000..fa28a6e13b8
--- /dev/null
+++ b/src/vnet/fib/fib_attached_export.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * FIB attached export
+ *
+ * what's it all about?
+ * say one does this:
+ * set int ip table Gig0 2
+ * set int ip addr Gig0 10.0.0.1/24
+ * Ggi0 is in table 2 with a connected address.
+ * Now we add a routing matching said connected in a different table
+ * ip route add table 3 10.0.0.0/24 via Gig0
+ * How do we expect traffic in table 3 to be forwarded? Clearly out of
+ * Ggi0. It's an attached route, hence we are saying that we can ARP for
+ * hosts in the attached subnet. and we can. but any ARP entries we send
+ * we be received on Gig0, but since Gig0 is in table 2, it will install
+ * the adj-fins in table 2. So traffic in table 3 will never hit an adj-fib
+ * and hence always the glean, and so thus be effectively dropped.
+ * How do we fix this? Attached Export !! All more specfiic entries in table 2
+ * that track and are covered by the connected are automatically exported into
+ * table 3. Now table 3 also has adj-fibs (and the local) so traffic to hosts
+ * is restored.
+ */
+
+#ifndef __FIB_ATTACHED_EXPORT_H__
+#define __FIB_ATTACHED_EXPORT_H__
+
+#include <vnet/fib/fib_types.h>
+
+extern void fib_attached_export_import(fib_entry_t *fib_entry,
+ fib_node_index_t export_fib);
+
+extern void fib_attached_export_purge(fib_entry_t *fib_entry);
+
+extern void fib_attached_export_covered_added(fib_entry_t *cover,
+ fib_node_index_t covered);
+extern void fib_attached_export_covered_removed(fib_entry_t *cover,
+ fib_node_index_t covered);
+extern void fib_attached_export_cover_change(fib_entry_t *fib_entry);
+extern void fib_attached_export_cover_update(fib_entry_t *fib_entry);
+
+extern u8* fib_ae_import_format(fib_entry_t *fib_entry, u8*s);
+extern u8* fib_ae_export_format(fib_entry_t *fib_entry, u8*s);
+
+#endif
diff --git a/src/vnet/fib/fib_entry.c b/src/vnet/fib/fib_entry.c
new file mode 100644
index 00000000000..24b506379ac
--- /dev/null
+++ b/src/vnet/fib/fib_entry.c
@@ -0,0 +1,1503 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vlib/vlib.h>
+#include <vnet/ip/format.h>
+#include <vnet/ip/lookup.h>
+#include <vnet/adj/adj.h>
+#include <vnet/dpo/load_balance.h>
+#include <vnet/dpo/drop_dpo.h>
+
+#include <vnet/fib/fib_entry.h>
+#include <vnet/fib/fib_walk.h>
+#include <vnet/fib/fib_entry_src.h>
+#include <vnet/fib/fib_entry_cover.h>
+#include <vnet/fib/fib_table.h>
+#include <vnet/fib/fib_internal.h>
+#include <vnet/fib/fib_attached_export.h>
+#include <vnet/fib/fib_path_ext.h>
+
+/*
+ * Array of strings/names for the FIB sources
+ */
+static const char *fib_source_names[] = FIB_SOURCES;
+static const char *fib_attribute_names[] = FIB_ENTRY_ATTRIBUTES;
+
+/*
+ * Pool for all fib_entries
+ */
+static fib_entry_t *fib_entry_pool;
+
+fib_entry_t *
+fib_entry_get (fib_node_index_t index)
+{
+ return (pool_elt_at_index(fib_entry_pool, index));
+}
+
+static fib_node_t *
+fib_entry_get_node (fib_node_index_t index)
+{
+ return ((fib_node_t*)fib_entry_get(index));
+}
+
+fib_node_index_t
+fib_entry_get_index (const fib_entry_t * fib_entry)
+{
+ return (fib_entry - fib_entry_pool);
+}
+
+static fib_protocol_t
+fib_entry_get_proto (const fib_entry_t * fib_entry)
+{
+ return (fib_entry->fe_prefix.fp_proto);
+}
+
+fib_forward_chain_type_t
+fib_entry_get_default_chain_type (const fib_entry_t *fib_entry)
+{
+ switch (fib_entry->fe_prefix.fp_proto)
+ {
+ case FIB_PROTOCOL_IP4:
+ return (FIB_FORW_CHAIN_TYPE_UNICAST_IP4);
+ case FIB_PROTOCOL_IP6:
+ return (FIB_FORW_CHAIN_TYPE_UNICAST_IP6);
+ case FIB_PROTOCOL_MPLS:
+ if (MPLS_EOS == fib_entry->fe_prefix.fp_eos)
+ /*
+ * If the entry being asked is a eos-MPLS label entry,
+ * then use the payload-protocol field, that we stashed there
+ * for just this purpose
+ */
+ return (fib_forw_chain_type_from_dpo_proto(
+ fib_entry->fe_prefix.fp_payload_proto));
+ else
+ return (FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS);
+ }
+
+ return (FIB_FORW_CHAIN_TYPE_UNICAST_IP4);
+}
+
+u8 *
+format_fib_entry (u8 * s, va_list * args)
+{
+ fib_forward_chain_type_t fct;
+ fib_entry_attribute_t attr;
+ fib_path_ext_t *path_ext;
+ fib_entry_t *fib_entry;
+ fib_entry_src_t *src;
+ fib_node_index_t fei;
+ fib_source_t source;
+ u32 n_covered;
+ int level;
+
+ fei = va_arg (*args, fib_node_index_t);
+ level = va_arg (*args, int);
+ fib_entry = fib_entry_get(fei);
+
+ s = format (s, "%U", format_fib_prefix, &fib_entry->fe_prefix);
+
+ if (level >= FIB_ENTRY_FORMAT_DETAIL)
+ {
+ s = format (s, " fib:%d", fib_entry->fe_fib_index);
+ s = format (s, " index:%d", fib_entry_get_index(fib_entry));
+ s = format (s, " locks:%d", fib_entry->fe_node.fn_locks);
+
+ FOR_EACH_SRC_ADDED(fib_entry, src, source,
+ ({
+ s = format (s, "\n src:%s ",
+ fib_source_names[source]);
+ s = fib_entry_src_format(fib_entry, source, s);
+ s = format (s, " refs:%d ", src->fes_ref_count);
+ if (FIB_ENTRY_FLAG_NONE != src->fes_entry_flags) {
+ s = format(s, "flags:");
+ FOR_EACH_FIB_ATTRIBUTE(attr) {
+ if ((1<<attr) & src->fes_entry_flags) {
+ s = format (s, "%s,", fib_attribute_names[attr]);
+ }
+ }
+ }
+ s = format (s, "\n");
+ if (FIB_NODE_INDEX_INVALID != src->fes_pl)
+ {
+ s = fib_path_list_format(src->fes_pl, s);
+ }
+ if (NULL != src->fes_path_exts)
+ {
+ s = format(s, " Extensions:");
+ vec_foreach(path_ext, src->fes_path_exts)
+ {
+ s = format(s, "\n %U", format_fib_path_ext, path_ext);
+ }
+ }
+ }));
+
+ n_covered = fib_entry_cover_get_size(fib_entry);
+ if (n_covered > 0) {
+ s = format(s, "\n tracking %d covered: ", n_covered);
+ s = fib_entry_cover_list_format(fib_entry, s);
+ }
+ s = fib_ae_import_format(fib_entry, s);
+ s = fib_ae_export_format(fib_entry, s);
+
+ s = format (s, "\n forwarding: ");
+ }
+ else
+ {
+ s = format (s, "\n");
+ }
+
+ fct = fib_entry_get_default_chain_type(fib_entry);
+
+ if (!dpo_id_is_valid(&fib_entry->fe_lb))
+ {
+ s = format (s, " UNRESOLVED\n");
+ return (s);
+ }
+ else
+ {
+ s = format(s, " %U-chain\n %U",
+ format_fib_forw_chain_type, fct,
+ format_dpo_id,
+ &fib_entry->fe_lb,
+ 2);
+ s = format(s, "\n");
+
+ if (level >= FIB_ENTRY_FORMAT_DETAIL2)
+ {
+ fib_entry_delegate_type_t fdt;
+ fib_entry_delegate_t *fed;
+
+ FOR_EACH_DELEGATE_CHAIN(fib_entry, fdt, fed,
+ {
+ s = format(s, " %U-chain\n %U",
+ format_fib_forw_chain_type,
+ fib_entry_delegate_type_to_chain_type(fdt),
+ format_dpo_id, &fed->fd_dpo, 2);
+ s = format(s, "\n");
+ });
+ }
+ }
+
+ if (level >= FIB_ENTRY_FORMAT_DETAIL2)
+ {
+ s = format(s, "\nchildren:");
+ s = fib_node_children_format(fib_entry->fe_node.fn_children, s);
+ }
+
+ return (s);
+}
+
+static fib_entry_t*
+fib_entry_from_fib_node (fib_node_t *node)
+{
+#if CLIB_DEBUG > 0
+ ASSERT(FIB_NODE_TYPE_ENTRY == node->fn_type);
+#endif
+ return ((fib_entry_t*)node);
+}
+
+static void
+fib_entry_last_lock_gone (fib_node_t *node)
+{
+ fib_entry_delegate_type_t fdt;
+ fib_entry_delegate_t *fed;
+ fib_entry_t *fib_entry;
+
+ fib_entry = fib_entry_from_fib_node(node);
+
+ FOR_EACH_DELEGATE_CHAIN(fib_entry, fdt, fed,
+ {
+ dpo_reset(&fed->fd_dpo);
+ fib_entry_delegate_remove(fib_entry, fdt);
+ });
+
+ FIB_ENTRY_DBG(fib_entry, "last-lock");
+
+ fib_node_deinit(&fib_entry->fe_node);
+ // FIXME -RR Backwalk
+
+ ASSERT(0 == vec_len(fib_entry->fe_delegates));
+ vec_free(fib_entry->fe_delegates);
+ pool_put(fib_entry_pool, fib_entry);
+}
+
+static fib_entry_src_t*
+fib_entry_get_best_src_i (const fib_entry_t *fib_entry)
+{
+ fib_entry_src_t *bsrc;
+
+ /*
+ * the enum of sources is deliberately arranged in priority order
+ */
+ if (0 == vec_len(fib_entry->fe_srcs))
+ {
+ bsrc = NULL;
+ }
+ else
+ {
+ bsrc = vec_elt_at_index(fib_entry->fe_srcs, 0);
+ }
+
+ return (bsrc);
+}
+
+static fib_source_t
+fib_entry_src_get_source (const fib_entry_src_t *esrc)
+{
+ if (NULL != esrc)
+ {
+ return (esrc->fes_src);
+ }
+ return (FIB_SOURCE_MAX);
+}
+
+static fib_entry_flag_t
+fib_entry_src_get_flags (const fib_entry_src_t *esrc)
+{
+ if (NULL != esrc)
+ {
+ return (esrc->fes_entry_flags);
+ }
+ return (FIB_ENTRY_FLAG_NONE);
+}
+
+fib_entry_flag_t
+fib_entry_get_flags (fib_node_index_t fib_entry_index)
+{
+ return (fib_entry_get_flags_i(fib_entry_get(fib_entry_index)));
+}
+
+/*
+ * fib_entry_back_walk_notify
+ *
+ * A back walk has reach this entry.
+ */
+static fib_node_back_walk_rc_t
+fib_entry_back_walk_notify (fib_node_t *node,
+ fib_node_back_walk_ctx_t *ctx)
+{
+ fib_entry_t *fib_entry;
+
+ fib_entry = fib_entry_from_fib_node(node);
+
+ if (FIB_NODE_BW_REASON_FLAG_EVALUATE & ctx->fnbw_reason ||
+ FIB_NODE_BW_REASON_FLAG_ADJ_UPDATE & ctx->fnbw_reason ||
+ FIB_NODE_BW_REASON_FLAG_ADJ_DOWN & ctx->fnbw_reason ||
+ FIB_NODE_BW_REASON_FLAG_INTERFACE_UP & ctx->fnbw_reason ||
+ FIB_NODE_BW_REASON_FLAG_INTERFACE_DOWN & ctx->fnbw_reason ||
+ FIB_NODE_BW_REASON_FLAG_INTERFACE_DELETE & ctx->fnbw_reason)
+ {
+ fib_entry_src_action_reactivate(fib_entry,
+ fib_entry_get_best_source(
+ fib_entry_get_index(fib_entry)));
+ }
+
+ /*
+ * all other walk types can be reclassifed to a re-evaluate to
+ * all recursive dependents.
+ * By reclassifying we ensure that should any of these walk types meet
+ * they can be merged.
+ */
+ ctx->fnbw_reason = FIB_NODE_BW_REASON_FLAG_EVALUATE;
+
+ /*
+ * ... and nothing is forced sync from now on.
+ */
+ ctx->fnbw_flags &= ~FIB_NODE_BW_FLAG_FORCE_SYNC;
+
+ /*
+ * propagate the backwalk further if we haven't already reached the
+ * maximum depth.
+ */
+ fib_walk_sync(FIB_NODE_TYPE_ENTRY,
+ fib_entry_get_index(fib_entry),
+ ctx);
+
+ return (FIB_NODE_BACK_WALK_CONTINUE);
+}
+
+static void
+fib_entry_show_memory (void)
+{
+ u32 n_srcs = 0, n_exts = 0;
+ fib_entry_src_t *esrc;
+ fib_entry_t *entry;
+
+ fib_show_memory_usage("Entry",
+ pool_elts(fib_entry_pool),
+ pool_len(fib_entry_pool),
+ sizeof(fib_entry_t));
+
+ pool_foreach(entry, fib_entry_pool,
+ ({
+ n_srcs += vec_len(entry->fe_srcs);
+ vec_foreach(esrc, entry->fe_srcs)
+ {
+ n_exts += vec_len(esrc->fes_path_exts);
+ }
+ }));
+
+ fib_show_memory_usage("Entry Source",
+ n_srcs, n_srcs, sizeof(fib_entry_src_t));
+ fib_show_memory_usage("Entry Path-Extensions",
+ n_exts, n_exts,
+ sizeof(fib_path_ext_t));
+}
+
+/*
+ * The FIB path-list's graph node virtual function table
+ */
+static const fib_node_vft_t fib_entry_vft = {
+ .fnv_get = fib_entry_get_node,
+ .fnv_last_lock = fib_entry_last_lock_gone,
+ .fnv_back_walk = fib_entry_back_walk_notify,
+ .fnv_mem_show = fib_entry_show_memory,
+};
+
+/**
+ * @brief Contribute the set of Adjacencies that this entry forwards with
+ * to build the uRPF list of its children
+ */
+void
+fib_entry_contribute_urpf (fib_node_index_t entry_index,
+ index_t urpf)
+{
+ fib_entry_t *fib_entry;
+
+ fib_entry = fib_entry_get(entry_index);
+
+ return (fib_path_list_contribute_urpf(fib_entry->fe_parent, urpf));
+}
+
+/*
+ * fib_entry_contribute_forwarding
+ *
+ * Get an lock the forwarding information (DPO) contributed by the FIB entry.
+ */
+void
+fib_entry_contribute_forwarding (fib_node_index_t fib_entry_index,
+ fib_forward_chain_type_t fct,
+ dpo_id_t *dpo)
+{
+ fib_entry_delegate_t *fed;
+ fib_entry_t *fib_entry;
+
+ fib_entry = fib_entry_get(fib_entry_index);
+
+ if (fct == fib_entry_get_default_chain_type(fib_entry))
+ {
+ dpo_copy(dpo, &fib_entry->fe_lb);
+ }
+ else
+ {
+ fed = fib_entry_delegate_get(fib_entry,
+ fib_entry_chain_type_to_delegate_type(fct));
+
+ if (NULL == fed)
+ {
+ fed = fib_entry_delegate_find_or_add(
+ fib_entry,
+ fib_entry_chain_type_to_delegate_type(fct));
+ /*
+ * on-demand create eos/non-eos.
+ * There is no on-demand delete because:
+ * - memory versus complexity & reliability:
+ * leaving unrequired [n]eos LB arounds wastes memory, cleaning
+ * then up on the right trigger is more code. i favour the latter.
+ */
+ fib_entry_src_mk_lb(fib_entry,
+ fib_entry_get_best_src_i(fib_entry),
+ fct,
+ &fed->fd_dpo);
+ }
+
+ dpo_copy(dpo, &fed->fd_dpo);
+ }
+}
+
+const dpo_id_t *
+fib_entry_contribute_ip_forwarding (fib_node_index_t fib_entry_index)
+{
+ fib_forward_chain_type_t fct;
+ fib_entry_t *fib_entry;
+
+ fib_entry = fib_entry_get(fib_entry_index);
+ fct = fib_entry_get_default_chain_type(fib_entry);
+
+ ASSERT((fct == FIB_FORW_CHAIN_TYPE_UNICAST_IP4 ||
+ fct == FIB_FORW_CHAIN_TYPE_UNICAST_IP6));
+
+ return (&fib_entry->fe_lb);
+}
+
+adj_index_t
+fib_entry_get_adj (fib_node_index_t fib_entry_index)
+{
+ const dpo_id_t *dpo;
+
+ dpo = fib_entry_contribute_ip_forwarding(fib_entry_index);
+ dpo = load_balance_get_bucket(dpo->dpoi_index, 0);
+
+ if (dpo_is_adj(dpo))
+ {
+ return (dpo->dpoi_index);
+ }
+ return (ADJ_INDEX_INVALID);
+}
+
+fib_node_index_t
+fib_entry_get_path_list (fib_node_index_t fib_entry_index)
+{
+ fib_entry_t *fib_entry;
+
+ fib_entry = fib_entry_get(fib_entry_index);
+
+ return (fib_entry->fe_parent);
+}
+
+u32
+fib_entry_child_add (fib_node_index_t fib_entry_index,
+ fib_node_type_t child_type,
+ fib_node_index_t child_index)
+{
+ return (fib_node_child_add(FIB_NODE_TYPE_ENTRY,
+ fib_entry_index,
+ child_type,
+ child_index));
+};
+
+void
+fib_entry_child_remove (fib_node_index_t fib_entry_index,
+ u32 sibling_index)
+{
+ fib_node_child_remove(FIB_NODE_TYPE_ENTRY,
+ fib_entry_index,
+ sibling_index);
+
+ if (0 == fib_node_get_n_children(FIB_NODE_TYPE_ENTRY,
+ fib_entry_index))
+ {
+ /*
+ * if there are no children left then there is no reason to keep
+ * the non-default forwarding chains. those chains are built only
+ * because the children want them.
+ */
+ fib_entry_delegate_type_t fdt;
+ fib_entry_delegate_t *fed;
+ fib_entry_t *fib_entry;
+
+ fib_entry = fib_entry_get(fib_entry_index);
+
+ FOR_EACH_DELEGATE_CHAIN(fib_entry, fdt, fed,
+ {
+ dpo_reset(&fed->fd_dpo);
+ fib_entry_delegate_remove(fib_entry, fdt);
+ });
+ }
+}
+
+static fib_entry_t *
+fib_entry_alloc (u32 fib_index,
+ const fib_prefix_t *prefix,
+ fib_node_index_t *fib_entry_index)
+{
+ fib_entry_t *fib_entry;
+ fib_prefix_t *fep;
+
+ pool_get(fib_entry_pool, fib_entry);
+ memset(fib_entry, 0, sizeof(*fib_entry));
+
+ fib_node_init(&fib_entry->fe_node,
+ FIB_NODE_TYPE_ENTRY);
+
+ fib_entry->fe_fib_index = fib_index;
+
+ /*
+ * the one time we need to update the const prefix is when
+ * the entry is first created
+ */
+ fep = (fib_prefix_t*)&(fib_entry->fe_prefix);
+ *fep = *prefix;
+
+ if (FIB_PROTOCOL_MPLS == fib_entry->fe_prefix.fp_proto)
+ {
+ fep->fp_len = 21;
+ if (MPLS_NON_EOS == fep->fp_eos)
+ {
+ fep->fp_payload_proto = DPO_PROTO_MPLS;
+ }
+ ASSERT(DPO_PROTO_NONE != fib_entry->fe_prefix.fp_payload_proto);
+ }
+
+ dpo_reset(&fib_entry->fe_lb);
+
+ *fib_entry_index = fib_entry_get_index(fib_entry);
+
+ FIB_ENTRY_DBG(fib_entry, "alloc");
+
+ return (fib_entry);
+}
+
+static void
+fib_entry_post_flag_update_actions (fib_entry_t *fib_entry,
+ fib_source_t source,
+ fib_entry_flag_t old_flags)
+{
+ /*
+ * handle changes to attached export for import entries
+ */
+ int is_import = (FIB_ENTRY_FLAG_IMPORT & fib_entry_get_flags_i(fib_entry));
+ int was_import = (FIB_ENTRY_FLAG_IMPORT & old_flags);
+
+ if (!was_import && is_import)
+ {
+ /*
+ * transition from not exported to exported
+ */
+
+ /*
+ * there is an assumption here that the entry resolves via only
+ * one interface and that it is the cross VRF interface.
+ */
+ u32 sw_if_index = fib_path_list_get_resolving_interface(fib_entry->fe_parent);
+
+ fib_attached_export_import(fib_entry,
+ fib_table_get_index_for_sw_if_index(
+ fib_entry_get_proto(fib_entry),
+ sw_if_index));
+ }
+ else if (was_import && !is_import)
+ {
+ /*
+ * transition from exported to not exported
+ */
+ fib_attached_export_purge(fib_entry);
+ }
+ /*
+ * else
+ * no change. nothing to do.
+ */
+
+ /*
+ * handle changes to attached export for export entries
+ */
+ int is_attached = (FIB_ENTRY_FLAG_ATTACHED & fib_entry_get_flags_i(fib_entry));
+ int was_attached = (FIB_ENTRY_FLAG_ATTACHED & old_flags);
+
+ if (!was_attached && is_attached)
+ {
+ /*
+ * transition to attached. time to export
+ */
+ // FIXME
+ }
+ // else FIXME
+}
+
+static void
+fib_entry_post_install_actions (fib_entry_t *fib_entry,
+ fib_source_t source,
+ fib_entry_flag_t old_flags)
+{
+ fib_entry_post_flag_update_actions(fib_entry, source, old_flags);
+ fib_entry_src_action_installed(fib_entry, source);
+}
+
+fib_node_index_t
+fib_entry_create (u32 fib_index,
+ const fib_prefix_t *prefix,
+ fib_source_t source,
+ fib_entry_flag_t flags,
+ const fib_route_path_t *paths)
+{
+ fib_node_index_t fib_entry_index;
+ fib_entry_t *fib_entry;
+
+ ASSERT(0 < vec_len(paths));
+
+ fib_entry = fib_entry_alloc(fib_index, prefix, &fib_entry_index);
+
+ /*
+ * since this is a new entry create, we don't need to check for winning
+ * sources - there is only one.
+ */
+ fib_entry = fib_entry_src_action_add(fib_entry, source, flags,
+ drop_dpo_get(
+ fib_proto_to_dpo(
+ fib_entry_get_proto(fib_entry))));
+ fib_entry_src_action_path_swap(fib_entry,
+ source,
+ flags,
+ paths);
+ /*
+ * handle possible realloc's by refetching the pointer
+ */
+ fib_entry = fib_entry_get(fib_entry_index);
+ fib_entry_src_action_activate(fib_entry, source);
+
+ fib_entry_post_install_actions(fib_entry, source, FIB_ENTRY_FLAG_NONE);
+
+ return (fib_entry_index);
+}
+
+fib_node_index_t
+fib_entry_create_special (u32 fib_index,
+ const fib_prefix_t *prefix,
+ fib_source_t source,
+ fib_entry_flag_t flags,
+ const dpo_id_t *dpo)
+{
+ fib_node_index_t fib_entry_index;
+ fib_entry_t *fib_entry;
+
+ /*
+ * create and initiliase the new enty
+ */
+ fib_entry = fib_entry_alloc(fib_index, prefix, &fib_entry_index);
+
+ /*
+ * create the path-list
+ */
+ fib_entry = fib_entry_src_action_add(fib_entry, source, flags, dpo);
+ fib_entry_src_action_activate(fib_entry, source);
+
+ fib_entry_post_install_actions(fib_entry, source, FIB_ENTRY_FLAG_NONE);
+
+ return (fib_entry_index);
+}
+
+static void
+fib_entry_post_update_actions (fib_entry_t *fib_entry,
+ fib_source_t source,
+ fib_entry_flag_t old_flags)
+{
+ /*
+ * backwalk to children to inform then of the change to forwarding.
+ */
+ fib_node_back_walk_ctx_t bw_ctx = {
+ .fnbw_reason = FIB_NODE_BW_REASON_FLAG_EVALUATE,
+ };
+
+ fib_walk_sync(FIB_NODE_TYPE_ENTRY, fib_entry_get_index(fib_entry), &bw_ctx);
+
+ /*
+ * then inform any covered prefixes
+ */
+ fib_entry_cover_update_notify(fib_entry);
+
+ fib_entry_post_install_actions(fib_entry, source, old_flags);
+}
+
+static void
+fib_entry_source_change (fib_entry_t *fib_entry,
+ fib_source_t best_source,
+ fib_source_t new_source,
+ fib_entry_flag_t old_flags)
+{
+ /*
+ * if the path list for the source passed is invalid,
+ * then we need to create a new one. else we are updating
+ * an existing.
+ */
+ if (new_source < best_source)
+ {
+ /*
+ * we have a new winning source.
+ */
+ fib_entry_src_action_deactivate(fib_entry, best_source);
+ fib_entry_src_action_activate(fib_entry, new_source);
+ }
+ else if (new_source > best_source)
+ {
+ /*
+ * the new source loses. nothing to do here.
+ * the data from the source is saved in the path-list created
+ */
+ return;
+ }
+ else
+ {
+ /*
+ * the new source is one this entry already has.
+ * But the path-list was updated, which will contribute new forwarding,
+ * so install it.
+ */
+ fib_entry_src_action_deactivate(fib_entry, new_source);
+ fib_entry_src_action_activate(fib_entry, new_source);
+ }
+
+ fib_entry_post_update_actions(fib_entry, new_source, old_flags);
+}
+
+void
+fib_entry_special_add (fib_node_index_t fib_entry_index,
+ fib_source_t source,
+ fib_entry_flag_t flags,
+ const dpo_id_t *dpo)
+{
+ fib_source_t best_source;
+ fib_entry_flag_t bflags;
+ fib_entry_t *fib_entry;
+ fib_entry_src_t *bsrc;
+
+ fib_entry = fib_entry_get(fib_entry_index);
+
+ bsrc = fib_entry_get_best_src_i(fib_entry);
+ best_source = fib_entry_src_get_source(bsrc);
+ bflags = fib_entry_src_get_flags(bsrc);
+
+ fib_entry = fib_entry_src_action_add(fib_entry, source, flags, dpo);
+ fib_entry_source_change(fib_entry, best_source, source, bflags);
+}
+
+void
+fib_entry_special_update (fib_node_index_t fib_entry_index,
+ fib_source_t source,
+ fib_entry_flag_t flags,
+ const dpo_id_t *dpo)
+{
+ fib_source_t best_source;
+ fib_entry_flag_t bflags;
+ fib_entry_t *fib_entry;
+ fib_entry_src_t *bsrc;
+
+ fib_entry = fib_entry_get(fib_entry_index);
+
+ bsrc = fib_entry_get_best_src_i(fib_entry);
+ best_source = fib_entry_src_get_source(bsrc);
+ bflags = fib_entry_src_get_flags(bsrc);
+
+ fib_entry = fib_entry_src_action_update(fib_entry, source, flags, dpo);
+ fib_entry_source_change(fib_entry, best_source, source, bflags);
+}
+
+
+void
+fib_entry_path_add (fib_node_index_t fib_entry_index,
+ fib_source_t source,
+ fib_entry_flag_t flags,
+ const fib_route_path_t *rpath)
+{
+ fib_source_t best_source;
+ fib_entry_flag_t bflags;
+ fib_entry_t *fib_entry;
+ fib_entry_src_t *bsrc;
+
+ ASSERT(1 == vec_len(rpath));
+
+ fib_entry = fib_entry_get(fib_entry_index);
+ ASSERT(NULL != fib_entry);
+
+ bsrc = fib_entry_get_best_src_i(fib_entry);
+ best_source = fib_entry_src_get_source(bsrc);
+ bflags = fib_entry_src_get_flags(bsrc);
+
+ fib_entry = fib_entry_src_action_path_add(fib_entry, source, flags, rpath);
+
+ /*
+ * if the path list for the source passed is invalid,
+ * then we need to create a new one. else we are updating
+ * an existing.
+ */
+ if (source < best_source)
+ {
+ /*
+ * we have a new winning source.
+ */
+ fib_entry_src_action_deactivate(fib_entry, best_source);
+ fib_entry_src_action_activate(fib_entry, source);
+ }
+ else if (source > best_source)
+ {
+ /*
+ * the new source loses. nothing to do here.
+ * the data from the source is saved in the path-list created
+ */
+ return;
+ }
+ else
+ {
+ /*
+ * the new source is one this entry already has.
+ * But the path-list was updated, which will contribute new forwarding,
+ * so install it.
+ */
+ fib_entry_src_action_deactivate(fib_entry, source);
+ fib_entry_src_action_activate(fib_entry, source);
+ }
+
+ fib_entry_post_update_actions(fib_entry, source, bflags);
+}
+
+/*
+ * fib_entry_path_remove
+ *
+ * remove a path from the entry.
+ * return the fib_entry's index if it is still present, INVALID otherwise.
+ */
+fib_entry_src_flag_t
+fib_entry_path_remove (fib_node_index_t fib_entry_index,
+ fib_source_t source,
+ const fib_route_path_t *rpath)
+{
+ fib_entry_src_flag_t sflag;
+ fib_source_t best_source;
+ fib_entry_flag_t bflags;
+ fib_entry_t *fib_entry;
+ fib_entry_src_t *bsrc;
+
+ ASSERT(1 == vec_len(rpath));
+
+ fib_entry = fib_entry_get(fib_entry_index);
+ ASSERT(NULL != fib_entry);
+
+ bsrc = fib_entry_get_best_src_i(fib_entry);
+ best_source = fib_entry_src_get_source(bsrc);
+ bflags = fib_entry_src_get_flags(bsrc);
+
+ sflag = fib_entry_src_action_path_remove(fib_entry, source, rpath);
+
+ /*
+ * if the path list for the source passed is invalid,
+ * then we need to create a new one. else we are updating
+ * an existing.
+ */
+ if (source < best_source )
+ {
+ /*
+ * Que! removing a path from a source that is better than the
+ * one this entry is using.
+ */
+ ASSERT(0);
+ }
+ else if (source > best_source )
+ {
+ /*
+ * the source is not the best. nothing to do.
+ */
+ return (FIB_ENTRY_SRC_FLAG_ADDED);
+ }
+ else
+ {
+ /*
+ * removing a path from the path-list we were using.
+ */
+ if (!(FIB_ENTRY_SRC_FLAG_ADDED & sflag))
+ {
+ /*
+ * the last path from the source was removed.
+ * fallback to lower source
+ */
+ bsrc = fib_entry_get_best_src_i(fib_entry);
+ best_source = fib_entry_src_get_source(bsrc);
+
+ if (FIB_SOURCE_MAX == best_source) {
+ /*
+ * no more sources left. this entry is toast.
+ */
+ fib_entry_src_action_uninstall(fib_entry);
+ fib_entry_post_flag_update_actions(fib_entry, source, bflags);
+
+ return (FIB_ENTRY_SRC_FLAG_NONE);
+ }
+ else
+ {
+ fib_entry_src_action_activate(fib_entry, best_source);
+ source = best_source;
+ }
+ }
+ else
+ {
+ /*
+ * re-install the new forwarding information
+ */
+ fib_entry_src_action_deactivate(fib_entry, source);
+ fib_entry_src_action_activate(fib_entry, source);
+ }
+ }
+
+ fib_entry_post_update_actions(fib_entry, source, bflags);
+
+ /*
+ * still have sources
+ */
+ return (FIB_ENTRY_SRC_FLAG_ADDED);
+}
+
+/*
+ * fib_entry_special_remove
+ *
+ * remove a special source from the entry.
+ * return the fib_entry's index if it is still present, INVALID otherwise.
+ */
+fib_entry_src_flag_t
+fib_entry_special_remove (fib_node_index_t fib_entry_index,
+ fib_source_t source)
+{
+ fib_entry_src_flag_t sflag;
+ fib_source_t best_source;
+ fib_entry_flag_t bflags;
+ fib_entry_t *fib_entry;
+ fib_entry_src_t *bsrc;
+
+ fib_entry = fib_entry_get(fib_entry_index);
+ ASSERT(NULL != fib_entry);
+
+ bsrc = fib_entry_get_best_src_i(fib_entry);
+ best_source = fib_entry_src_get_source(bsrc);
+ bflags = fib_entry_src_get_flags(bsrc);
+
+ sflag = fib_entry_src_action_remove(fib_entry, source);
+
+ /*
+ * if the path list for the source passed is invalid,
+ * then we need to create a new one. else we are updating
+ * an existing.
+ */
+ if (source < best_source )
+ {
+ /*
+ * Que! removing a path from a source that is better than the
+ * one this entry is using. This can only mean it is a source
+ * this prefix does not have.
+ */
+ return (FIB_ENTRY_SRC_FLAG_ADDED);
+ }
+ else if (source > best_source ) {
+ /*
+ * the source is not the best. nothing to do.
+ */
+ return (FIB_ENTRY_SRC_FLAG_ADDED);
+ }
+ else
+ {
+ if (!(FIB_ENTRY_SRC_FLAG_ADDED & sflag))
+ {
+ /*
+ * the source was removed. use the next best.
+ */
+ bsrc = fib_entry_get_best_src_i(fib_entry);
+ best_source = fib_entry_src_get_source(bsrc);
+
+ if (FIB_SOURCE_MAX == best_source) {
+ /*
+ * no more sources left. this entry is toast.
+ */
+ fib_entry_src_action_uninstall(fib_entry);
+ fib_entry_post_flag_update_actions(fib_entry, source, bflags);
+
+ return (FIB_ENTRY_SRC_FLAG_NONE);
+ }
+ else
+ {
+ fib_entry_src_action_activate(fib_entry, best_source);
+ source = best_source;
+ }
+ }
+ else
+ {
+ /*
+ * re-install the new forwarding information
+ */
+ fib_entry_src_action_reactivate(fib_entry, source);
+ }
+ }
+
+ fib_entry_post_update_actions(fib_entry, source, bflags);
+
+ /*
+ * still have sources
+ */
+ return (FIB_ENTRY_SRC_FLAG_ADDED);
+}
+
+/**
+ * fib_entry_delete
+ *
+ * The source is withdrawing all the paths it provided
+ */
+fib_entry_src_flag_t
+fib_entry_delete (fib_node_index_t fib_entry_index,
+ fib_source_t source)
+{
+ return (fib_entry_special_remove(fib_entry_index, source));
+}
+
+/**
+ * fib_entry_update
+ *
+ * The source has provided a new set of paths that will replace the old.
+ */
+void
+fib_entry_update (fib_node_index_t fib_entry_index,
+ fib_source_t source,
+ fib_entry_flag_t flags,
+ const fib_route_path_t *paths)
+{
+ fib_source_t best_source;
+ fib_entry_flag_t bflags;
+ fib_entry_t *fib_entry;
+ fib_entry_src_t *bsrc;
+
+ fib_entry = fib_entry_get(fib_entry_index);
+ ASSERT(NULL != fib_entry);
+
+ bsrc = fib_entry_get_best_src_i(fib_entry);
+ best_source = fib_entry_src_get_source(bsrc);
+ bflags = fib_entry_src_get_flags(bsrc);
+
+ fib_entry_src_action_path_swap(fib_entry,
+ source,
+ flags,
+ paths);
+ /*
+ * handle possible realloc's by refetching the pointer
+ */
+ fib_entry = fib_entry_get(fib_entry_index);
+
+ /*
+ * if the path list for the source passed is invalid,
+ * then we need to create a new one. else we are updating
+ * an existing.
+ */
+ if (source < best_source)
+ {
+ /*
+ * we have a new winning source.
+ */
+ fib_entry_src_action_deactivate(fib_entry, best_source);
+ fib_entry_src_action_activate(fib_entry, source);
+ }
+ else if (source > best_source) {
+ /*
+ * the new source loses. nothing to do here.
+ * the data from the source is saved in the path-list created
+ */
+ return;
+ }
+ else
+ {
+ /*
+ * the new source is one this entry already has.
+ * But the path-list was updated, which will contribute new forwarding,
+ * so install it.
+ */
+ fib_entry_src_action_deactivate(fib_entry, source);
+ fib_entry_src_action_activate(fib_entry, source);
+ }
+
+ fib_entry_post_update_actions(fib_entry, source, bflags);
+}
+
+
+/*
+ * fib_entry_cover_changed
+ *
+ * this entry is tracking its cover and that cover has changed.
+ */
+void
+fib_entry_cover_changed (fib_node_index_t fib_entry_index)
+{
+ fib_entry_src_cover_res_t res = {
+ .install = !0,
+ .bw_reason = FIB_NODE_BW_REASON_FLAG_NONE,
+ };
+ fib_source_t source, best_source;
+ fib_entry_flag_t bflags;
+ fib_entry_t *fib_entry;
+ fib_entry_src_t *esrc;
+ u32 index;
+
+ bflags = FIB_ENTRY_FLAG_NONE;
+ best_source = FIB_SOURCE_FIRST;
+ fib_entry = fib_entry_get(fib_entry_index);
+
+ fib_attached_export_cover_change(fib_entry);
+
+ /*
+ * propagate the notificuation to each of the added sources
+ */
+ index = 0;
+ FOR_EACH_SRC_ADDED(fib_entry, esrc, source,
+ ({
+ if (0 == index)
+ {
+ /*
+ * only the best source gets to set the back walk flags
+ */
+ res = fib_entry_src_action_cover_change(fib_entry, source);
+ bflags = fib_entry_src_get_flags(esrc);
+ best_source = fib_entry_src_get_source(esrc);
+ }
+ else
+ {
+ fib_entry_src_action_cover_change(fib_entry, source);
+ }
+ index++;
+ }));
+
+ if (res.install)
+ {
+ fib_entry_src_action_reactivate(fib_entry,
+ fib_entry_src_get_source(
+ fib_entry_get_best_src_i(fib_entry)));
+ fib_entry_post_install_actions(fib_entry, best_source, bflags);
+ }
+ else
+ {
+ fib_entry_src_action_uninstall(fib_entry);
+ }
+
+ if (FIB_NODE_BW_REASON_FLAG_NONE != res.bw_reason)
+ {
+ /*
+ * time for walkies fido.
+ */
+ fib_node_back_walk_ctx_t bw_ctx = {
+ .fnbw_reason = res.bw_reason,
+ };
+
+ fib_walk_sync(FIB_NODE_TYPE_ENTRY, fib_entry_index, &bw_ctx);
+ }
+}
+
+/*
+ * fib_entry_cover_updated
+ *
+ * this entry is tracking its cover and that cover has been updated
+ * (i.e. its forwarding information has changed).
+ */
+void
+fib_entry_cover_updated (fib_node_index_t fib_entry_index)
+{
+ fib_entry_src_cover_res_t res = {
+ .install = !0,
+ .bw_reason = FIB_NODE_BW_REASON_FLAG_NONE,
+ };
+ fib_source_t source, best_source;
+ fib_entry_flag_t bflags;
+ fib_entry_t *fib_entry;
+ fib_entry_src_t *esrc;
+ u32 index;
+
+ bflags = FIB_ENTRY_FLAG_NONE;
+ best_source = FIB_SOURCE_FIRST;
+ fib_entry = fib_entry_get(fib_entry_index);
+
+ fib_attached_export_cover_update(fib_entry);
+
+ /*
+ * propagate the notificuation to each of the added sources
+ */
+ index = 0;
+ FOR_EACH_SRC_ADDED(fib_entry, esrc, source,
+ ({
+ if (0 == index)
+ {
+ /*
+ * only the best source gets to set the back walk flags
+ */
+ res = fib_entry_src_action_cover_update(fib_entry, source);
+ bflags = fib_entry_src_get_flags(esrc);
+ best_source = fib_entry_src_get_source(esrc);
+ }
+ else
+ {
+ fib_entry_src_action_cover_update(fib_entry, source);
+ }
+ index++;
+ }));
+
+ if (res.install)
+ {
+ fib_entry_src_action_reactivate(fib_entry,
+ fib_entry_src_get_source(
+ fib_entry_get_best_src_i(fib_entry)));
+ fib_entry_post_install_actions(fib_entry, best_source, bflags);
+ }
+ else
+ {
+ fib_entry_src_action_uninstall(fib_entry);
+ }
+
+ if (FIB_NODE_BW_REASON_FLAG_NONE != res.bw_reason)
+ {
+ /*
+ * time for walkies fido.
+ */
+ fib_node_back_walk_ctx_t bw_ctx = {
+ .fnbw_reason = res.bw_reason,
+ };
+
+ fib_walk_sync(FIB_NODE_TYPE_ENTRY, fib_entry_index, &bw_ctx);
+ }
+}
+
+int
+fib_entry_recursive_loop_detect (fib_node_index_t entry_index,
+ fib_node_index_t **entry_indicies)
+{
+ fib_entry_t *fib_entry;
+ int was_looped, is_looped;
+
+ fib_entry = fib_entry_get(entry_index);
+
+ if (FIB_NODE_INDEX_INVALID != fib_entry->fe_parent)
+ {
+ fib_node_index_t *entries = *entry_indicies;
+
+ vec_add1(entries, entry_index);
+ was_looped = fib_path_list_is_looped(fib_entry->fe_parent);
+ is_looped = fib_path_list_recursive_loop_detect(fib_entry->fe_parent,
+ &entries);
+
+ *entry_indicies = entries;
+
+ if (!!was_looped != !!is_looped)
+ {
+ /*
+ * re-evaluate all the entry's forwarding
+ * NOTE: this is an inplace modify
+ */
+ fib_entry_delegate_type_t fdt;
+ fib_entry_delegate_t *fed;
+
+ FOR_EACH_DELEGATE_CHAIN(fib_entry, fdt, fed,
+ {
+ fib_entry_src_mk_lb(fib_entry,
+ fib_entry_get_best_src_i(fib_entry),
+ fib_entry_delegate_type_to_chain_type(fdt),
+ &fed->fd_dpo);
+ });
+ }
+ }
+ else
+ {
+ /*
+ * the entry is currently not linked to a path-list. this happens
+ * when it is this entry that is re-linking path-lists and has thus
+ * broken the loop
+ */
+ is_looped = 0;
+ }
+
+ return (is_looped);
+}
+
+u32
+fib_entry_get_resolving_interface (fib_node_index_t entry_index)
+{
+ fib_entry_t *fib_entry;
+
+ fib_entry = fib_entry_get(entry_index);
+
+ return (fib_path_list_get_resolving_interface(fib_entry->fe_parent));
+}
+
+fib_source_t
+fib_entry_get_best_source (fib_node_index_t entry_index)
+{
+ fib_entry_t *fib_entry;
+ fib_entry_src_t *bsrc;
+
+ fib_entry = fib_entry_get(entry_index);
+
+ bsrc = fib_entry_get_best_src_i(fib_entry);
+ return (fib_entry_src_get_source(bsrc));
+}
+
+static int
+fib_ip4_address_compare (const ip4_address_t * a1,
+ const ip4_address_t * a2)
+{
+ /*
+ * IP addresses are unsiged ints. the return value here needs to be signed
+ * a simple subtraction won't cut it.
+ * If the addresses are the same, the sort order is undefiend, so phoey.
+ */
+ return ((clib_net_to_host_u32(a1->data_u32) >
+ clib_net_to_host_u32(a2->data_u32) ) ?
+ 1 : -1);
+}
+
+static int
+fib_ip6_address_compare (const ip6_address_t * a1,
+ const ip6_address_t * a2)
+{
+ int i;
+ for (i = 0; i < ARRAY_LEN (a1->as_u16); i++)
+ {
+ int cmp = (clib_net_to_host_u16 (a1->as_u16[i]) -
+ clib_net_to_host_u16 (a2->as_u16[i]));
+ if (cmp != 0)
+ return cmp;
+ }
+ return 0;
+}
+
+static int
+fib_entry_cmp (fib_node_index_t fib_entry_index1,
+ fib_node_index_t fib_entry_index2)
+{
+ fib_entry_t *fib_entry1, *fib_entry2;
+ int cmp = 0;
+
+ fib_entry1 = fib_entry_get(fib_entry_index1);
+ fib_entry2 = fib_entry_get(fib_entry_index2);
+
+ switch (fib_entry1->fe_prefix.fp_proto)
+ {
+ case FIB_PROTOCOL_IP4:
+ cmp = fib_ip4_address_compare(&fib_entry1->fe_prefix.fp_addr.ip4,
+ &fib_entry2->fe_prefix.fp_addr.ip4);
+ break;
+ case FIB_PROTOCOL_IP6:
+ cmp = fib_ip6_address_compare(&fib_entry1->fe_prefix.fp_addr.ip6,
+ &fib_entry2->fe_prefix.fp_addr.ip6);
+ break;
+ case FIB_PROTOCOL_MPLS:
+ cmp = (fib_entry1->fe_prefix.fp_label - fib_entry2->fe_prefix.fp_label);
+
+ if (0 == cmp)
+ {
+ cmp = (fib_entry1->fe_prefix.fp_eos - fib_entry2->fe_prefix.fp_eos);
+ }
+ break;
+ }
+
+ if (0 == cmp) {
+ cmp = (fib_entry1->fe_prefix.fp_len - fib_entry2->fe_prefix.fp_len);
+ }
+ return (cmp);
+}
+
+int
+fib_entry_cmp_for_sort (void *i1, void *i2)
+{
+ fib_node_index_t *fib_entry_index1 = i1, *fib_entry_index2 = i2;
+
+ return (fib_entry_cmp(*fib_entry_index1,
+ *fib_entry_index2));
+}
+
+void
+fib_entry_lock (fib_node_index_t fib_entry_index)
+{
+ fib_entry_t *fib_entry;
+
+ fib_entry = fib_entry_get(fib_entry_index);
+
+ fib_node_lock(&fib_entry->fe_node);
+}
+
+void
+fib_entry_unlock (fib_node_index_t fib_entry_index)
+{
+ fib_entry_t *fib_entry;
+
+ fib_entry = fib_entry_get(fib_entry_index);
+
+ fib_node_unlock(&fib_entry->fe_node);
+}
+
+void
+fib_entry_module_init (void)
+{
+ fib_node_register_type (FIB_NODE_TYPE_ENTRY, &fib_entry_vft);
+}
+
+void
+fib_entry_encode (fib_node_index_t fib_entry_index,
+ fib_route_path_encode_t **api_rpaths)
+{
+ fib_entry_t *fib_entry;
+
+ fib_entry = fib_entry_get(fib_entry_index);
+ fib_path_list_walk(fib_entry->fe_parent, fib_path_encode, api_rpaths);
+}
+
+void
+fib_entry_get_prefix (fib_node_index_t fib_entry_index,
+ fib_prefix_t *pfx)
+{
+ fib_entry_t *fib_entry;
+
+ fib_entry = fib_entry_get(fib_entry_index);
+ *pfx = fib_entry->fe_prefix;
+}
+
+u32
+fib_entry_get_fib_index (fib_node_index_t fib_entry_index)
+{
+ fib_entry_t *fib_entry;
+
+ fib_entry = fib_entry_get(fib_entry_index);
+
+ return (fib_entry->fe_fib_index);
+}
+
+u32
+fib_entry_pool_size (void)
+{
+ return (pool_elts(fib_entry_pool));
+}
+
+static clib_error_t *
+show_fib_entry_command (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ fib_node_index_t fei;
+
+ if (unformat (input, "%d", &fei))
+ {
+ /*
+ * show one in detail
+ */
+ if (!pool_is_free_index(fib_entry_pool, fei))
+ {
+ vlib_cli_output (vm, "%d@%U",
+ fei,
+ format_fib_entry, fei,
+ FIB_ENTRY_FORMAT_DETAIL2);
+ }
+ else
+ {
+ vlib_cli_output (vm, "entry %d invalid", fei);
+ }
+ }
+ else
+ {
+ /*
+ * show all
+ */
+ vlib_cli_output (vm, "FIB Entries:");
+ pool_foreach_index(fei, fib_entry_pool,
+ ({
+ vlib_cli_output (vm, "%d@%U",
+ fei,
+ format_fib_entry, fei,
+ FIB_ENTRY_FORMAT_BRIEF);
+ }));
+ }
+
+ return (NULL);
+}
+
+VLIB_CLI_COMMAND (show_fib_entry, static) = {
+ .path = "show fib entry",
+ .function = show_fib_entry_command,
+ .short_help = "show fib entry",
+};
diff --git a/src/vnet/fib/fib_entry.h b/src/vnet/fib/fib_entry.h
new file mode 100644
index 00000000000..44a5f2e6d7f
--- /dev/null
+++ b/src/vnet/fib/fib_entry.h
@@ -0,0 +1,530 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __FIB_ENTRY_H__
+#define __FIB_ENTRY_H__
+
+#include <vnet/fib/fib_node.h>
+#include <vnet/fib/fib_entry_delegate.h>
+#include <vnet/adj/adj.h>
+#include <vnet/ip/ip.h>
+#include <vnet/dpo/dpo.h>
+
+/**
+ * The different sources that can create a route.
+ * The sources are defined here the thier relative priority order.
+ * The lower the value the higher the priority
+ */
+typedef enum fib_source_t_ {
+ /**
+ * Marker. Add new values after this one.
+ */
+ FIB_SOURCE_FIRST,
+ /**
+ * Special sources. These are for entries that are added to all
+ * FIBs by default, and should never be over-ridden (hence they
+ * are the highest priority)
+ */
+ FIB_SOURCE_SPECIAL = FIB_SOURCE_FIRST,
+ /**
+ * Classify. A route that links directly to a classify adj
+ */
+ FIB_SOURCE_CLASSIFY,
+ /**
+ * Route added as a result of interface configuration.
+ * this will also come from the API/CLI, but the distinction is
+ * that is from confiiguration on an interface, not a 'ip route' command
+ */
+ FIB_SOURCE_INTERFACE,
+ /**
+ * A high priority source a plugin can use
+ */
+ FIB_SOURCE_PLUGIN_HI,
+ /**
+ * From the control plane API
+ */
+ FIB_SOURCE_API,
+ /**
+ * From the CLI.
+ */
+ FIB_SOURCE_CLI,
+ /**
+ * LISP
+ */
+ FIB_SOURCE_LISP,
+ /**
+ * SRv6
+ */
+ FIB_SOURCE_SR,
+ /**
+ * IPv[46] Mapping
+ */
+ FIB_SOURCE_MAP,
+ /**
+ * SIXRD
+ */
+ FIB_SOURCE_SIXRD,
+ /**
+ * DHCP
+ */
+ FIB_SOURCE_DHCP,
+ /**
+ * Adjacency source.
+ * routes created as a result of ARP/ND entries. This is lower priority
+ * then the API/CLI. This is on purpose. trust me.
+ */
+ FIB_SOURCE_ADJ,
+ /**
+ * MPLS label. The prefix has been assigned a local label. This source
+ * never provides forwarding information, instead it acts as a place-holder
+ * so the association of label to prefix can be maintained
+ */
+ FIB_SOURCE_MPLS,
+ /**
+ * Attached Export source.
+ * routes created as a result of attahced export. routes thus sourced
+ * will be present in the export tables
+ */
+ FIB_SOURCE_AE,
+ /**
+ * Recursive resolution source.
+ * Used to install an entry that is the resolution traget of another.
+ */
+ FIB_SOURCE_RR,
+ /**
+ * uRPF bypass/exemption.
+ * Used to install an entry that is exempt from the loose uRPF check
+ */
+ FIB_SOURCE_URPF_EXEMPT,
+ /**
+ * The default route source.
+ * The default route is always added to the FIB table (like the
+ * special sources) but we need to be able to over-ride it with
+ * 'ip route' sources when provided
+ */
+ FIB_SOURCE_DEFAULT_ROUTE,
+ /**
+ * Marker. add new entries before this one.
+ */
+ FIB_SOURCE_LAST = FIB_SOURCE_DEFAULT_ROUTE,
+} __attribute__ ((packed)) fib_source_t;
+
+STATIC_ASSERT (sizeof(fib_source_t) == 1,
+ "FIB too many sources");
+
+/**
+ * The maximum number of sources
+ */
+#define FIB_SOURCE_MAX (FIB_SOURCE_LAST+1)
+
+#define FIB_SOURCES { \
+ [FIB_SOURCE_SPECIAL] = "special", \
+ [FIB_SOURCE_INTERFACE] = "interface", \
+ [FIB_SOURCE_API] = "API", \
+ [FIB_SOURCE_CLI] = "CLI", \
+ [FIB_SOURCE_ADJ] = "adjacency", \
+ [FIB_SOURCE_MAP] = "MAP", \
+ [FIB_SOURCE_SR] = "SR", \
+ [FIB_SOURCE_SIXRD] = "SixRD", \
+ [FIB_SOURCE_LISP] = "LISP", \
+ [FIB_SOURCE_CLASSIFY] = "classify", \
+ [FIB_SOURCE_DHCP] = "DHCP", \
+ [FIB_SOURCE_RR] = "recursive-resolution", \
+ [FIB_SOURCE_AE] = "attached_export", \
+ [FIB_SOURCE_MPLS] = "mpls", \
+ [FIB_SOURCE_URPF_EXEMPT] = "urpf-exempt", \
+ [FIB_SOURCE_DEFAULT_ROUTE] = "default-route", \
+}
+
+#define FOR_EACH_FIB_SOURCE(_item) \
+ for (_item = FIB_SOURCE_FIRST; _item < FIB_SOURCE_MAX; _item++)
+
+/**
+ * The different sources that can create a route.
+ * The sources are defined here the thier relative priority order.
+ * The lower the value the higher the priority
+ */
+typedef enum fib_entry_attribute_t_ {
+ /**
+ * Marker. Add new values after this one.
+ */
+ FIB_ENTRY_ATTRIBUTE_FIRST,
+ /**
+ * Connected. The prefix is configured on an interface.
+ */
+ FIB_ENTRY_ATTRIBUTE_CONNECTED = FIB_ENTRY_ATTRIBUTE_FIRST,
+ /**
+ * Attached. The prefix is attached to an interface.
+ */
+ FIB_ENTRY_ATTRIBUTE_ATTACHED,
+ /**
+ * The route is an explicit drop.
+ */
+ FIB_ENTRY_ATTRIBUTE_DROP,
+ /**
+ * The route is exclusive. The client creating the route is
+ * providing an exclusive adjacency.
+ */
+ FIB_ENTRY_ATTRIBUTE_EXCLUSIVE,
+ /**
+ * The route is attached cross tables and thus imports covered
+ * prefixes from the other table.
+ */
+ FIB_ENTRY_ATTRIBUTE_IMPORT,
+ /**
+ * The prefix/address is local to this device
+ */
+ FIB_ENTRY_ATTRIBUTE_LOCAL,
+ /**
+ * Marker. add new entries before this one.
+ */
+ FIB_ENTRY_ATTRIBUTE_LAST = FIB_ENTRY_ATTRIBUTE_LOCAL,
+} fib_entry_attribute_t;
+
+/**
+ * The maximum number of sources
+ */
+#define FIB_ENTRY_ATTRIBUTE_MAX (FIB_ENTRY_ATTRIBUTE_LAST+1)
+
+#define FIB_ENTRY_ATTRIBUTES { \
+ [FIB_ENTRY_ATTRIBUTE_CONNECTED] = "connected", \
+ [FIB_ENTRY_ATTRIBUTE_ATTACHED] = "attached", \
+ [FIB_ENTRY_ATTRIBUTE_IMPORT] = "import", \
+ [FIB_ENTRY_ATTRIBUTE_DROP] = "drop", \
+ [FIB_ENTRY_ATTRIBUTE_EXCLUSIVE] = "exclusive", \
+ [FIB_ENTRY_ATTRIBUTE_LOCAL] = "local", \
+}
+
+#define FOR_EACH_FIB_ATTRIBUTE(_item) \
+ for (_item = FIB_ENTRY_ATTRIBUTE_FIRST; \
+ _item < FIB_ENTRY_ATTRIBUTE_MAX; \
+ _item++)
+
+typedef enum fib_entry_flag_t_ {
+ FIB_ENTRY_FLAG_NONE = 0,
+ FIB_ENTRY_FLAG_CONNECTED = (1 << FIB_ENTRY_ATTRIBUTE_CONNECTED),
+ FIB_ENTRY_FLAG_ATTACHED = (1 << FIB_ENTRY_ATTRIBUTE_ATTACHED),
+ FIB_ENTRY_FLAG_DROP = (1 << FIB_ENTRY_ATTRIBUTE_DROP),
+ FIB_ENTRY_FLAG_EXCLUSIVE = (1 << FIB_ENTRY_ATTRIBUTE_EXCLUSIVE),
+ FIB_ENTRY_FLAG_LOCAL = (1 << FIB_ENTRY_ATTRIBUTE_LOCAL),
+ FIB_ENTRY_FLAG_IMPORT = (1 << FIB_ENTRY_ATTRIBUTE_IMPORT),
+} fib_entry_flag_t;
+
+/**
+ * Flags for the source data
+ */
+typedef enum fib_entry_src_attribute_t_ {
+ /**
+ * Marker. Add new values after this one.
+ */
+ FIB_ENTRY_SRC_ATTRIBUTE_FIRST,
+ /**
+ * the source has been added to the entry
+ */
+ FIB_ENTRY_SRC_ATTRIBUTE_ADDED = FIB_ENTRY_SRC_ATTRIBUTE_FIRST,
+ /**
+ * the source is active/best
+ */
+ FIB_ENTRY_SRC_ATTRIBUTE_ACTIVE,
+ /**
+ * Marker. add new entries before this one.
+ */
+ FIB_ENTRY_SRC_ATTRIBUTE_LAST = FIB_ENTRY_SRC_ATTRIBUTE_ACTIVE,
+} fib_entry_src_attribute_t;
+
+#define FIB_ENTRY_SRC_ATTRIBUTE_MAX (FIB_ENTRY_SRC_ATTRIBUTE_LAST+1)
+
+#define FIB_ENTRY_SRC_ATTRIBUTES { \
+ [FIB_ENTRY_SRC_ATTRIBUTE_ADDED] = "added", \
+ [FIB_ENTRY_SRC_ATTRIBUTE_ACTIVE] = "active", \
+}
+
+typedef enum fib_entry_src_flag_t_ {
+ FIB_ENTRY_SRC_FLAG_NONE = 0,
+ FIB_ENTRY_SRC_FLAG_ADDED = (1 << FIB_ENTRY_SRC_ATTRIBUTE_ADDED),
+ FIB_ENTRY_SRC_FLAG_ACTIVE = (1 << FIB_ENTRY_SRC_ATTRIBUTE_ACTIVE),
+} __attribute__ ((packed)) fib_entry_src_flag_t;
+
+/*
+ * Keep the size of the flags field to 2 bytes, so it
+ * can be placed next to the 2 bytes reference count
+ */
+STATIC_ASSERT (sizeof(fib_entry_src_flag_t) <= 2,
+ "FIB entry flags field size too big");
+
+/**
+ * Information related to the source of a FIB entry
+ */
+typedef struct fib_entry_src_t_ {
+ /**
+ * A vector of path extensions
+ */
+ struct fib_path_ext_t_ *fes_path_exts;
+
+ /**
+ * The path-list created by the source
+ */
+ fib_node_index_t fes_pl;
+ /**
+ * Which source this info block is for
+ */
+ fib_source_t fes_src;
+ /**
+ * Flags on the source
+ */
+ fib_entry_src_flag_t fes_flags;
+
+ /**
+ * 1 bytes ref count. This is not the number of users of the Entry
+ * (which is itself not large, due to path-list sharing), but the number
+ * of times a given source has been added. Which is even fewer
+ */
+ u8 fes_ref_count;
+
+ /**
+ * Flags the source contributes to the entry
+ */
+ fib_entry_flag_t fes_entry_flags;
+
+ /**
+ * Source specific info
+ */
+ union {
+ struct {
+ /**
+ * the index of the FIB entry that is the covering entry
+ */
+ fib_node_index_t fesr_cover;
+ /**
+ * This source's index in the cover's list
+ */
+ u32 fesr_sibling;
+ } rr;
+ struct {
+ /**
+ * the index of the FIB entry that is the covering entry
+ */
+ fib_node_index_t fesa_cover;
+ /**
+ * This source's index in the cover's list
+ */
+ u32 fesa_sibling;
+ } adj;
+ struct {
+ /**
+ * the index of the FIB entry that is the covering entry
+ */
+ fib_node_index_t fesi_cover;
+ /**
+ * This source's index in the cover's list
+ */
+ u32 fesi_sibling;
+ } interface;
+ struct {
+ /**
+ * This MPLS local label associated with the prefix.
+ */
+ mpls_label_t fesm_label;
+
+ /**
+ * the indicies of the LFIB entries created
+ */
+ fib_node_index_t fesm_lfes[2];
+ } mpls;
+ struct {
+ /**
+ * The source FIB index.
+ */
+ fib_node_index_t fesl_fib_index;
+ } lisp;
+ };
+} fib_entry_src_t;
+
+/**
+ * An entry in a FIB table.
+ *
+ * This entry represents a route added to the FIB that is stored
+ * in one of the FIB tables.
+ */
+typedef struct fib_entry_t_ {
+ /**
+ * Base class. The entry's node representation in the graph.
+ */
+ fib_node_t fe_node;
+ /**
+ * The prefix of the route. this is const just to be sure.
+ * It is the entry's key/identity and so should never change.
+ */
+ const fib_prefix_t fe_prefix;
+ /**
+ * The index of the FIB table this entry is in
+ */
+ u32 fe_fib_index;
+ /**
+ * The load-balance used for forwarding.
+ *
+ * We don't share the EOS and non-EOS even in case when they could be
+ * because:
+ * - complexity & reliability v. memory
+ * determining the conditions where sharing is possible is non-trivial.
+ * - separate LBs means we can get the EOS bit right in the MPLS label DPO
+ * and so save a few clock cycles in the DP imposition node since we can
+ * paint the header straight on without the need to check the packet
+ * type to derive the EOS bit value.
+ */
+ dpo_id_t fe_lb; // [FIB_FORW_CHAIN_MPLS_NUM];
+ /**
+ * Vector of source infos.
+ * Most entries will only have 1 source. So we optimise for memory usage,
+ * which is preferable since we have many entries.
+ */
+ fib_entry_src_t *fe_srcs;
+ /**
+ * the path-list for which this entry is a child. This is also the path-list
+ * that is contributing forwarding for this entry.
+ */
+ fib_node_index_t fe_parent;
+ /**
+ * index of this entry in the parent's child list.
+ * This is set when this entry is added as a child, but can also
+ * be changed by the parent as it manages its list.
+ */
+ u32 fe_sibling;
+
+ /**
+ * A vector of delegates.
+ */
+ fib_entry_delegate_t *fe_delegates;
+} fib_entry_t;
+
+#define FOR_EACH_FIB_ENTRY_FLAG(_item) \
+ for (_item = FIB_ENTRY_FLAG_FIRST; _item < FIB_ENTRY_FLAG_MAX; _item++)
+
+#define FIB_ENTRY_FORMAT_BRIEF (0x0)
+#define FIB_ENTRY_FORMAT_DETAIL (0x1)
+#define FIB_ENTRY_FORMAT_DETAIL2 (0x2)
+
+extern u8 *format_fib_entry (u8 * s, va_list * args);
+
+extern fib_node_index_t fib_entry_create_special(u32 fib_index,
+ const fib_prefix_t *prefix,
+ fib_source_t source,
+ fib_entry_flag_t flags,
+ const dpo_id_t *dpo);
+
+extern fib_node_index_t fib_entry_create (u32 fib_index,
+ const fib_prefix_t *prefix,
+ fib_source_t source,
+ fib_entry_flag_t flags,
+ const fib_route_path_t *paths);
+extern void fib_entry_update (fib_node_index_t fib_entry_index,
+ fib_source_t source,
+ fib_entry_flag_t flags,
+ const fib_route_path_t *paths);
+
+extern void fib_entry_path_add(fib_node_index_t fib_entry_index,
+ fib_source_t source,
+ fib_entry_flag_t flags,
+ const fib_route_path_t *rpath);
+extern void fib_entry_special_add(fib_node_index_t fib_entry_index,
+ fib_source_t source,
+ fib_entry_flag_t flags,
+ const dpo_id_t *dpo);
+extern void fib_entry_special_update(fib_node_index_t fib_entry_index,
+ fib_source_t source,
+ fib_entry_flag_t flags,
+ const dpo_id_t *dpo);
+extern fib_entry_src_flag_t fib_entry_special_remove(fib_node_index_t fib_entry_index,
+ fib_source_t source);
+
+extern fib_entry_src_flag_t fib_entry_path_remove(fib_node_index_t fib_entry_index,
+ fib_source_t source,
+ const fib_route_path_t *rpath);
+extern fib_entry_src_flag_t fib_entry_delete(fib_node_index_t fib_entry_index,
+ fib_source_t source);
+
+extern void fib_entry_contribute_urpf(fib_node_index_t path_index,
+ index_t urpf);
+extern void fib_entry_contribute_forwarding(
+ fib_node_index_t fib_entry_index,
+ fib_forward_chain_type_t type,
+ dpo_id_t *dpo);
+extern const dpo_id_t * fib_entry_contribute_ip_forwarding(
+ fib_node_index_t fib_entry_index);
+extern adj_index_t fib_entry_get_adj_for_source(
+ fib_node_index_t fib_entry_index,
+ fib_source_t source);
+extern const int fib_entry_get_dpo_for_source (
+ fib_node_index_t fib_entry_index,
+ fib_source_t source,
+ dpo_id_t *dpo);
+
+extern adj_index_t fib_entry_get_adj(fib_node_index_t fib_entry_index);
+
+extern int fib_entry_cmp_for_sort(void *i1, void *i2);
+
+extern void fib_entry_cover_changed(fib_node_index_t fib_entry);
+extern void fib_entry_cover_updated(fib_node_index_t fib_entry);
+extern int fib_entry_recursive_loop_detect(fib_node_index_t entry_index,
+ fib_node_index_t **entry_indicies);
+
+extern void fib_entry_lock(fib_node_index_t fib_entry_index);
+extern void fib_entry_unlock(fib_node_index_t fib_entry_index);
+
+extern u32 fib_entry_child_add(fib_node_index_t fib_entry_index,
+ fib_node_type_t type,
+ fib_node_index_t child_index);
+extern void fib_entry_child_remove(fib_node_index_t fib_entry_index,
+ u32 sibling_index);
+extern u32 fib_entry_get_resolving_interface(fib_node_index_t fib_entry_index);
+extern u32 fib_entry_get_resolving_interface_for_source(
+ fib_node_index_t fib_entry_index,
+ fib_source_t source);
+
+extern void fib_entry_encode(fib_node_index_t fib_entry_index,
+ fib_route_path_encode_t **api_rpaths);
+extern void fib_entry_get_prefix(fib_node_index_t fib_entry_index,
+ fib_prefix_t *pfx);
+extern u32 fib_entry_get_fib_index(fib_node_index_t fib_entry_index);
+extern void fib_entry_set_source_data(fib_node_index_t fib_entry_index,
+ fib_source_t source,
+ const void *data);
+extern const void* fib_entry_get_source_data(fib_node_index_t fib_entry_index,
+ fib_source_t source);
+
+extern fib_entry_flag_t fib_entry_get_flags(fib_node_index_t fib_entry_index);
+extern fib_entry_flag_t fib_entry_get_flags_for_source(
+ fib_node_index_t fib_entry_index,
+ fib_source_t source);
+extern fib_source_t fib_entry_get_best_source(fib_node_index_t fib_entry_index);
+extern int fib_entry_is_sourced(fib_node_index_t fib_entry_index,
+ fib_source_t source);
+
+extern fib_node_index_t fib_entry_get_path_list(fib_node_index_t fib_entry_index);
+
+extern void fib_entry_module_init(void);
+
+/*
+ * unsafe... beware the raw pointer.
+ */
+extern fib_node_index_t fib_entry_get_index(const fib_entry_t * fib_entry);
+extern fib_entry_t * fib_entry_get(fib_node_index_t fib_entry_index);
+
+/*
+ * for testing purposes.
+ */
+extern u32 fib_entry_pool_size(void);
+
+#endif
diff --git a/src/vnet/fib/fib_entry_cover.c b/src/vnet/fib/fib_entry_cover.c
new file mode 100644
index 00000000000..147c5daa4fd
--- /dev/null
+++ b/src/vnet/fib/fib_entry_cover.c
@@ -0,0 +1,225 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/fib/fib_entry_cover.h>
+#include <vnet/fib/fib_entry_src.h>
+#include <vnet/fib/fib_node_list.h>
+
+u32
+fib_entry_cover_track (fib_entry_t* cover,
+ fib_node_index_t covered)
+{
+ fib_entry_delegate_t *fed;
+
+ FIB_ENTRY_DBG(cover, "cover-track %d", covered);
+
+ ASSERT(fib_entry_get_index(cover) != covered);
+
+ fed = fib_entry_delegate_get(cover, FIB_ENTRY_DELEGATE_COVERED);
+
+ if (NULL == fed)
+ {
+ fed = fib_entry_delegate_find_or_add(cover, FIB_ENTRY_DELEGATE_COVERED);
+ fed->fd_list = fib_node_list_create();
+ }
+
+ return (fib_node_list_push_front(fed->fd_list,
+ 0, FIB_NODE_TYPE_ENTRY,
+ covered));
+}
+
+void
+fib_entry_cover_untrack (fib_entry_t* cover,
+ u32 tracked_index)
+{
+ fib_entry_delegate_t *fed;
+
+ FIB_ENTRY_DBG(cover, "cover-untrack @ %d", tracked_index);
+
+ fed = fib_entry_delegate_get(cover, FIB_ENTRY_DELEGATE_COVERED);
+
+ if (NULL == fed)
+ return;
+
+ fib_node_list_remove(fed->fd_list, tracked_index);
+
+ if (0 == fib_node_list_get_size(fed->fd_list))
+ {
+ fib_node_list_destroy(&fed->fd_list);
+ fib_entry_delegate_remove(cover, FIB_ENTRY_DELEGATE_COVERED);
+ }
+}
+
+/**
+ * Internal struct to hold user supplied paraneters for the cover walk
+ */
+typedef struct fib_enty_cover_walk_ctx_t_ {
+ fib_entry_t *cover;
+ fib_entry_covered_walk_t walk;
+ void *ctx;
+} fib_enty_cover_walk_ctx_t;
+
+static int
+fib_entry_cover_walk_node_ptr (fib_node_ptr_t *depend,
+ void *args)
+{
+ fib_enty_cover_walk_ctx_t *ctx = args;
+
+ ctx->walk(ctx->cover, depend->fnp_index, ctx->ctx);
+
+ /* continue */
+ return (1);
+}
+
+void
+fib_entry_cover_walk (fib_entry_t *cover,
+ fib_entry_covered_walk_t walk,
+ void *args)
+{
+ fib_entry_delegate_t *fed;
+
+ fed = fib_entry_delegate_get(cover, FIB_ENTRY_DELEGATE_COVERED);
+
+ if (NULL == fed)
+ return;
+
+ fib_enty_cover_walk_ctx_t ctx = {
+ .cover = cover,
+ .walk = walk,
+ .ctx = args,
+ };
+
+ fib_node_list_walk(fed->fd_list,
+ fib_entry_cover_walk_node_ptr,
+ &ctx);
+}
+
+u32
+fib_entry_cover_get_size (fib_entry_t *cover)
+{
+ fib_entry_delegate_t *fed;
+
+ fed = fib_entry_delegate_get(cover, FIB_ENTRY_DELEGATE_COVERED);
+
+ if (NULL == fed)
+ return (0);
+
+ return (fib_node_list_get_size(fed->fd_list));
+}
+
+typedef struct fib_entry_cover_list_format_ctx_t_ {
+ u8 *s;
+} fib_entry_cover_list_format_ctx_t;
+
+static int
+fib_entry_covered_list_format_one (fib_entry_t *cover,
+ fib_node_index_t covered,
+ void *args)
+{
+ fib_entry_cover_list_format_ctx_t * ctx = args;
+
+ ctx->s = format(ctx->s, "%d, ", covered);
+
+ /* continue */
+ return (1);
+}
+
+u8*
+fib_entry_cover_list_format (fib_entry_t *fib_entry,
+ u8 *s)
+{
+ fib_entry_cover_list_format_ctx_t ctx = {
+ .s = s,
+ };
+
+ fib_entry_cover_walk(fib_entry,
+ fib_entry_covered_list_format_one,
+ &ctx);
+
+ return (ctx.s);
+}
+
+static int
+fib_entry_cover_change_one (fib_entry_t *cover,
+ fib_node_index_t covered,
+ void *args)
+{
+ fib_node_index_t new_cover;
+
+ /*
+ * The 3 entries involved here are:
+ * cover - the least specific. It will cover both the others
+ * new_cover - the enty just inserted below the cover
+ * covered - the entry that was tracking the cover.
+ *
+ * The checks below are to determine if new_cover is a cover for covered.
+ */
+ new_cover = pointer_to_uword(args);
+
+ if (FIB_NODE_INDEX_INVALID == new_cover)
+ {
+ /*
+ * nothing has been inserted, which implies the cover was removed.
+ * 'cover' is thus the new cover.
+ */
+ fib_entry_cover_changed(covered);
+ }
+ else if (new_cover != covered)
+ {
+ fib_prefix_t pfx_covered, pfx_new_cover;
+
+ fib_entry_get_prefix(covered, &pfx_covered);
+ fib_entry_get_prefix(new_cover, &pfx_new_cover);
+
+ if (fib_prefix_is_cover(&pfx_new_cover, &pfx_covered))
+ {
+ fib_entry_cover_changed(covered);
+ }
+ }
+ /* continue */
+ return (1);
+}
+
+void
+fib_entry_cover_change_notify (fib_node_index_t cover_index,
+ fib_node_index_t covered)
+{
+ fib_entry_t *cover;
+
+ cover = fib_entry_get(cover_index);
+
+ fib_entry_cover_walk(cover,
+ fib_entry_cover_change_one,
+ uword_to_pointer(covered, void*));
+}
+
+static int
+fib_entry_cover_update_one (fib_entry_t *cover,
+ fib_node_index_t covered,
+ void *args)
+{
+ fib_entry_cover_updated(covered);
+
+ /* continue */
+ return (1);
+}
+
+void
+fib_entry_cover_update_notify (fib_entry_t *fib_entry)
+{
+ fib_entry_cover_walk(fib_entry,
+ fib_entry_cover_update_one,
+ NULL);
+}
diff --git a/src/vnet/fib/fib_entry_cover.h b/src/vnet/fib/fib_entry_cover.h
new file mode 100644
index 00000000000..fbbbc211dc9
--- /dev/null
+++ b/src/vnet/fib/fib_entry_cover.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __FIB_ENTRY_COVER_H__
+#define __FIB_ENTRY_COVER_H__
+
+#include "fib_entry.h"
+
+/**
+ * callback function used when walking the covered entries
+ */
+typedef int (*fib_entry_covered_walk_t)(fib_entry_t *cover,
+ fib_node_index_t covered,
+ void *ctx);
+
+extern u32 fib_entry_cover_track(fib_entry_t *cover,
+ fib_node_index_t covered);
+
+extern void fib_entry_cover_untrack(fib_entry_t *cover,
+ u32 tracked_index);
+
+extern void fib_entry_cover_walk(fib_entry_t *cover,
+ fib_entry_covered_walk_t walk,
+ void *ctx);
+
+extern void fib_entry_cover_change_notify(fib_node_index_t cover_index,
+ fib_node_index_t covered_index);
+extern void fib_entry_cover_update_notify(fib_entry_t *cover);
+
+extern u32 fib_entry_cover_get_size(fib_entry_t *cover);
+
+extern u8* fib_entry_cover_list_format(fib_entry_t *fib_entry,
+ u8 *s);
+
+#endif
diff --git a/src/vnet/fib/fib_entry_delegate.c b/src/vnet/fib/fib_entry_delegate.c
new file mode 100644
index 00000000000..a0d45f970b3
--- /dev/null
+++ b/src/vnet/fib/fib_entry_delegate.c
@@ -0,0 +1,149 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/fib/fib_entry_delegate.h>
+#include <vnet/fib/fib_entry.h>
+
+static fib_entry_delegate_t *
+fib_entry_delegate_find_i (const fib_entry_t *fib_entry,
+ fib_entry_delegate_type_t type,
+ u32 *index)
+{
+ fib_entry_delegate_t *delegate;
+ int ii;
+
+ ii = 0;
+ vec_foreach(delegate, fib_entry->fe_delegates)
+ {
+ if (delegate->fd_type == type)
+ {
+ if (NULL != index)
+ *index = ii;
+
+ return (delegate);
+ }
+ else
+ {
+ ii++;
+ }
+ }
+
+ return (NULL);
+}
+
+fib_entry_delegate_t *
+fib_entry_delegate_get (const fib_entry_t *fib_entry,
+ fib_entry_delegate_type_t type)
+{
+ return (fib_entry_delegate_find_i(fib_entry, type, NULL));
+}
+
+void
+fib_entry_delegate_remove (fib_entry_t *fib_entry,
+ fib_entry_delegate_type_t type)
+{
+ fib_entry_delegate_t *fed;
+ u32 index = ~0;
+
+ fed = fib_entry_delegate_find_i(fib_entry, type, &index);
+
+ ASSERT(NULL != fed);
+
+ vec_del1(fib_entry->fe_delegates, index);
+}
+
+static int
+fib_entry_delegate_cmp_for_sort (void * v1,
+ void * v2)
+{
+ fib_entry_delegate_t *delegate1 = v1, *delegate2 = v2;
+
+ return (delegate1->fd_type - delegate2->fd_type);
+}
+
+static void
+fib_entry_delegate_init (fib_entry_t *fib_entry,
+ fib_entry_delegate_type_t type)
+
+{
+ fib_entry_delegate_t delegate = {
+ .fd_entry_index = fib_entry_get_index(fib_entry),
+ .fd_type = type,
+ };
+
+ vec_add1(fib_entry->fe_delegates, delegate);
+ vec_sort_with_function(fib_entry->fe_delegates,
+ fib_entry_delegate_cmp_for_sort);
+}
+
+fib_entry_delegate_t *
+fib_entry_delegate_find_or_add (fib_entry_t *fib_entry,
+ fib_entry_delegate_type_t fdt)
+{
+ fib_entry_delegate_t *delegate;
+
+ delegate = fib_entry_delegate_get(fib_entry, fdt);
+
+ if (NULL == delegate)
+ {
+ fib_entry_delegate_init(fib_entry, fdt);
+ }
+
+ return (fib_entry_delegate_get(fib_entry, fdt));
+}
+
+fib_entry_delegate_type_t
+fib_entry_chain_type_to_delegate_type (fib_forward_chain_type_t fct)
+{
+ switch (fct)
+ {
+ case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
+ return (FIB_ENTRY_DELEGATE_CHAIN_UNICAST_IP4);
+ case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
+ return (FIB_ENTRY_DELEGATE_CHAIN_UNICAST_IP6);
+ case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
+ return (FIB_ENTRY_DELEGATE_CHAIN_MPLS_EOS);
+ case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
+ return (FIB_ENTRY_DELEGATE_CHAIN_MPLS_NON_EOS);
+ case FIB_FORW_CHAIN_TYPE_ETHERNET:
+ return (FIB_ENTRY_DELEGATE_CHAIN_ETHERNET);
+ }
+ ASSERT(0);
+ return (FIB_ENTRY_DELEGATE_CHAIN_UNICAST_IP4);
+}
+
+fib_forward_chain_type_t
+fib_entry_delegate_type_to_chain_type (fib_entry_delegate_type_t fdt)
+{
+ switch (fdt)
+ {
+ case FIB_ENTRY_DELEGATE_CHAIN_UNICAST_IP4:
+ return (FIB_FORW_CHAIN_TYPE_UNICAST_IP4);
+ case FIB_ENTRY_DELEGATE_CHAIN_UNICAST_IP6:
+ return (FIB_FORW_CHAIN_TYPE_UNICAST_IP6);
+ case FIB_ENTRY_DELEGATE_CHAIN_MPLS_EOS:
+ return (FIB_FORW_CHAIN_TYPE_MPLS_EOS);
+ case FIB_ENTRY_DELEGATE_CHAIN_MPLS_NON_EOS:
+ return (FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS);
+ case FIB_ENTRY_DELEGATE_CHAIN_ETHERNET:
+ return (FIB_FORW_CHAIN_TYPE_ETHERNET);
+ case FIB_ENTRY_DELEGATE_COVERED:
+ case FIB_ENTRY_DELEGATE_ATTACHED_IMPORT:
+ case FIB_ENTRY_DELEGATE_ATTACHED_EXPORT:
+ break;
+ }
+ ASSERT(0);
+ return (FIB_FORW_CHAIN_TYPE_UNICAST_IP4);
+}
diff --git a/src/vnet/fib/fib_entry_delegate.h b/src/vnet/fib/fib_entry_delegate.h
new file mode 100644
index 00000000000..6d3a6549f32
--- /dev/null
+++ b/src/vnet/fib/fib_entry_delegate.h
@@ -0,0 +1,124 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __FIB_ENTRY_DELEGATE_T__
+#define __FIB_ENTRY_DELEGATE_T__
+
+#include <vnet/fib/fib_node.h>
+
+/**
+ * Delegate types
+ */
+typedef enum fib_entry_delegate_type_t_ {
+ /**
+ * Forwarding chain types:
+ * for the vast majority of FIB entries only one chain is required - the
+ * one that forwards traffic matching the fib_entry_t's fib_prefix_t. For those
+ * fib_entry_t that are a resolution target for other fib_entry_t's they will also
+ * need the chain to provide forwarding for those children. We store these additional
+ * chains in delegates to save memory in the common case.
+ */
+ FIB_ENTRY_DELEGATE_CHAIN_UNICAST_IP4 = FIB_FORW_CHAIN_TYPE_UNICAST_IP4,
+ FIB_ENTRY_DELEGATE_CHAIN_UNICAST_IP6 = FIB_FORW_CHAIN_TYPE_UNICAST_IP6,
+ FIB_ENTRY_DELEGATE_CHAIN_MPLS_EOS = FIB_FORW_CHAIN_TYPE_MPLS_EOS,
+ FIB_ENTRY_DELEGATE_CHAIN_MPLS_NON_EOS = FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS,
+ FIB_ENTRY_DELEGATE_CHAIN_ETHERNET = FIB_FORW_CHAIN_TYPE_ETHERNET,
+ /**
+ * Dependency list of covered entries.
+ * these are more specific entries that are interested in changes
+ * to their respective cover
+ */
+ FIB_ENTRY_DELEGATE_COVERED,
+ /**
+ * Attached import/export functionality
+ */
+ FIB_ENTRY_DELEGATE_ATTACHED_IMPORT,
+ FIB_ENTRY_DELEGATE_ATTACHED_EXPORT,
+} fib_entry_delegate_type_t;
+
+#define FOR_EACH_DELEGATE_CHAIN(_entry, _fdt, _fed, _body) \
+{ \
+ for (_fdt = FIB_ENTRY_DELEGATE_CHAIN_UNICAST_IP4; \
+ _fdt <= FIB_ENTRY_DELEGATE_CHAIN_ETHERNET; \
+ _fdt++) \
+ { \
+ _fed = fib_entry_delegate_get(_entry, _fdt); \
+ if (NULL != _fed) { \
+ _body; \
+ } \
+ } \
+}
+
+/**
+ * A Delagate is a means to implmenet the Delagation design pattern; the extension of an
+ * objects functionality through the composition of, and delgation to, other objects.
+ * These 'other' objects are delegates. Delagates are thus attached to other FIB objects
+ * to extend their functionality.
+ */
+typedef struct fib_entry_delegate_t_
+{
+ /**
+ * The FIB entry object to which the delagate is attached
+ */
+ fib_node_index_t fd_entry_index;
+
+ /**
+ * The delagate type
+ */
+ fib_entry_delegate_type_t fd_type;
+
+ /**
+ * A union of data for the different delegate types
+ * These delegates are stored in a sparse vector on the entry, so they
+ * must all be of the same size. We could use indirection here for all types,
+ * i.e. store an index, that's ok for large delegates, like the attached export
+ * but for the chain delegates it's excessive
+ */
+ union
+ {
+ /**
+ * Valid for the forwarding chain delegates. The LB that is built.
+ */
+ dpo_id_t fd_dpo;
+
+ /**
+ * Valid for the attached import cases. An index of the importer/exporter
+ */
+ fib_node_index_t fd_index;
+
+ /**
+ * For the cover tracking. The node list;
+ */
+ fib_node_list_t fd_list;
+ };
+} fib_entry_delegate_t;
+
+struct fib_entry_t_;
+
+extern void fib_entry_delegate_remove(struct fib_entry_t_ *fib_entry,
+ fib_entry_delegate_type_t type);
+
+extern fib_entry_delegate_t *fib_entry_delegate_find_or_add(struct fib_entry_t_ *fib_entry,
+ fib_entry_delegate_type_t fdt);
+extern fib_entry_delegate_t *fib_entry_delegate_get(const struct fib_entry_t_ *fib_entry,
+ fib_entry_delegate_type_t type);
+
+extern fib_forward_chain_type_t fib_entry_delegate_type_to_chain_type(
+ fib_entry_delegate_type_t type);
+
+extern fib_entry_delegate_type_t fib_entry_chain_type_to_delegate_type(
+ fib_forward_chain_type_t type);
+
+#endif
diff --git a/src/vnet/fib/fib_entry_src.c b/src/vnet/fib/fib_entry_src.c
new file mode 100644
index 00000000000..060fac941d2
--- /dev/null
+++ b/src/vnet/fib/fib_entry_src.c
@@ -0,0 +1,1456 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/adj/adj.h>
+#include <vnet/dpo/load_balance.h>
+#include <vnet/dpo/mpls_label_dpo.h>
+#include <vnet/dpo/drop_dpo.h>
+
+#include <vnet/fib/fib_entry_src.h>
+#include <vnet/fib/fib_table.h>
+#include <vnet/fib/fib_path_ext.h>
+#include <vnet/fib/fib_urpf_list.h>
+
+/*
+ * per-source type vft
+ */
+static fib_entry_src_vft_t fib_entry_src_vft[FIB_SOURCE_MAX];
+
+static fib_protocol_t
+fib_entry_get_proto (const fib_entry_t * fib_entry)
+{
+ return (fib_entry->fe_prefix.fp_proto);
+}
+
+void
+fib_entry_src_register (fib_source_t source,
+ const fib_entry_src_vft_t *vft)
+{
+ fib_entry_src_vft[source] = *vft;
+}
+
+static int
+fib_entry_src_cmp_for_sort (void * v1,
+ void * v2)
+{
+ fib_entry_src_t *esrc1 = v1, *esrc2 = v2;
+
+ return (esrc1->fes_src - esrc2->fes_src);
+}
+
+void
+fib_entry_src_action_init (fib_entry_t *fib_entry,
+ fib_source_t source)
+
+{
+ fib_entry_src_t esrc = {
+ .fes_pl = FIB_NODE_INDEX_INVALID,
+ .fes_flags = FIB_ENTRY_SRC_FLAG_NONE,
+ .fes_src = source,
+ };
+
+ if (NULL != fib_entry_src_vft[source].fesv_init)
+ {
+ fib_entry_src_vft[source].fesv_init(&esrc);
+ }
+
+ vec_add1(fib_entry->fe_srcs, esrc);
+ vec_sort_with_function(fib_entry->fe_srcs,
+ fib_entry_src_cmp_for_sort);
+}
+
+static fib_entry_src_t *
+fib_entry_src_find (const fib_entry_t *fib_entry,
+ fib_source_t source,
+ u32 *index)
+
+{
+ fib_entry_src_t *esrc;
+ int ii;
+
+ ii = 0;
+ vec_foreach(esrc, fib_entry->fe_srcs)
+ {
+ if (esrc->fes_src == source)
+ {
+ if (NULL != index)
+ {
+ *index = ii;
+ }
+ return (esrc);
+ }
+ else
+ {
+ ii++;
+ }
+ }
+
+ return (NULL);
+}
+
+int
+fib_entry_is_sourced (fib_node_index_t fib_entry_index,
+ fib_source_t source)
+{
+ fib_entry_t *fib_entry;
+
+ fib_entry = fib_entry_get(fib_entry_index);
+
+ return (NULL != fib_entry_src_find(fib_entry, source, NULL));
+}
+
+static fib_entry_src_t *
+fib_entry_src_find_or_create (fib_entry_t *fib_entry,
+ fib_source_t source,
+ u32 *index)
+{
+ fib_entry_src_t *esrc;
+
+ esrc = fib_entry_src_find(fib_entry, source, NULL);
+
+ if (NULL == esrc)
+ {
+ fib_entry_src_action_init(fib_entry, source);
+ }
+
+ return (fib_entry_src_find(fib_entry, source, NULL));
+}
+
+void
+fib_entry_src_action_deinit (fib_entry_t *fib_entry,
+ fib_source_t source)
+
+{
+ fib_entry_src_t *esrc;
+ u32 index = ~0;
+
+ esrc = fib_entry_src_find(fib_entry, source, &index);
+
+ ASSERT(NULL != esrc);
+
+ if (NULL != fib_entry_src_vft[source].fesv_deinit)
+ {
+ fib_entry_src_vft[source].fesv_deinit(esrc);
+ }
+
+ vec_free(esrc->fes_path_exts);
+ vec_del1(fib_entry->fe_srcs, index);
+}
+
+fib_entry_src_cover_res_t
+fib_entry_src_action_cover_change (fib_entry_t *fib_entry,
+ fib_source_t source)
+{
+ if (NULL != fib_entry_src_vft[source].fesv_cover_change)
+ {
+ return (fib_entry_src_vft[source].fesv_cover_change(
+ fib_entry_src_find(fib_entry, source, NULL),
+ fib_entry));
+ }
+
+ fib_entry_src_cover_res_t res = {
+ .install = !0,
+ .bw_reason = FIB_NODE_BW_REASON_FLAG_NONE,
+ };
+ return (res);
+}
+
+fib_entry_src_cover_res_t
+fib_entry_src_action_cover_update (fib_entry_t *fib_entry,
+ fib_source_t source)
+{
+ if (NULL != fib_entry_src_vft[source].fesv_cover_update)
+ {
+ return (fib_entry_src_vft[source].fesv_cover_update(
+ fib_entry_src_find(fib_entry, source, NULL),
+ fib_entry));
+ }
+
+ fib_entry_src_cover_res_t res = {
+ .install = !0,
+ .bw_reason = FIB_NODE_BW_REASON_FLAG_NONE,
+ };
+ return (res);
+}
+
+typedef struct fib_entry_src_collect_forwarding_ctx_t_
+{
+ load_balance_path_t * next_hops;
+ const fib_entry_t *fib_entry;
+ const fib_entry_src_t *esrc;
+ fib_forward_chain_type_t fct;
+ int is_recursive;
+} fib_entry_src_collect_forwarding_ctx_t;
+
+/**
+ * @brief Determine whether this FIB entry should use a load-balance MAP
+ * to support PIC edge fast convergence
+ */
+load_balance_flags_t
+fib_entry_calc_lb_flags (fib_entry_src_collect_forwarding_ctx_t *ctx)
+{
+ /**
+ * We'll use a LB map is the path-list has recursive paths.
+ * recursive paths implies BGP, and hence scale.
+ */
+ if (ctx->is_recursive)
+ {
+ return (LOAD_BALANCE_FLAG_USES_MAP);
+ }
+ return (LOAD_BALANCE_FLAG_NONE);
+}
+
+static int
+fib_entry_src_valid_out_label (mpls_label_t label)
+{
+ return ((MPLS_LABEL_IS_REAL(label) ||
+ MPLS_IETF_IPV4_EXPLICIT_NULL_LABEL == label ||
+ MPLS_IETF_IPV6_EXPLICIT_NULL_LABEL == label ||
+ MPLS_IETF_IMPLICIT_NULL_LABEL == label));
+}
+
+/**
+ * @brief Turn the chain type requested by the client into the one they
+ * really wanted
+ */
+fib_forward_chain_type_t
+fib_entry_chain_type_fixup (const fib_entry_t *entry,
+ fib_forward_chain_type_t fct)
+{
+ ASSERT(FIB_FORW_CHAIN_TYPE_MPLS_EOS == fct);
+
+ /*
+ * The EOS chain is a tricky since one cannot know the adjacency
+ * to link to without knowing what the packets payload protocol
+ * will be once the label is popped.
+ */
+ fib_forward_chain_type_t dfct;
+
+ dfct = fib_entry_get_default_chain_type(entry);
+
+ if (FIB_FORW_CHAIN_TYPE_MPLS_EOS == dfct)
+ {
+ /*
+ * If the entry being asked is a eos-MPLS label entry,
+ * then use the payload-protocol field, that we stashed there
+ * for just this purpose
+ */
+ return (fib_forw_chain_type_from_dpo_proto(
+ entry->fe_prefix.fp_payload_proto));
+ }
+ /*
+ * else give them what this entry would be by default. i.e. if it's a v6
+ * entry, then the label its local labelled should be carrying v6 traffic.
+ * If it's a non-EOS label entry, then there are more labels and we want
+ * a non-eos chain.
+ */
+ return (dfct);
+}
+
+static int
+fib_entry_src_collect_forwarding (fib_node_index_t pl_index,
+ fib_node_index_t path_index,
+ void *arg)
+{
+ fib_entry_src_collect_forwarding_ctx_t *ctx;
+ fib_path_ext_t *path_ext;
+
+ ctx = arg;
+
+ /*
+ * if the path is not resolved, don't include it.
+ */
+ if (!fib_path_is_resolved(path_index))
+ {
+ return (!0);
+ }
+
+ if (fib_path_is_recursive(path_index))
+ {
+ ctx->is_recursive = 1;
+ }
+
+ /*
+ * get the matching path-extension for the path being visited.
+ */
+ vec_foreach(path_ext, ctx->esrc->fes_path_exts)
+ {
+ if (path_ext->fpe_path_index == path_index)
+ break;
+ }
+
+ if (NULL != path_ext &&
+ path_ext->fpe_path_index == path_index &&
+ fib_entry_src_valid_out_label(path_ext->fpe_label_stack[0]))
+ {
+ /*
+ * found a matching extension. stack it to obtain the forwarding
+ * info for this path.
+ */
+ ctx->next_hops = fib_path_ext_stack(path_ext, ctx->fib_entry, ctx->fct, ctx->next_hops);
+ }
+ else
+ {
+ load_balance_path_t *nh;
+
+ /*
+ * no extension => no out-going label for this path. that's OK
+ * in the case of an IP or EOS chain, but not for non-EOS
+ */
+ switch (ctx->fct)
+ {
+ case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
+ case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
+ /*
+ * EOS traffic with no label to stack, we need the IP Adj
+ */
+ vec_add2(ctx->next_hops, nh, 1);
+
+ nh->path_index = path_index;
+ nh->path_weight = fib_path_get_weight(path_index);
+ fib_path_contribute_forwarding(path_index, ctx->fct, &nh->path_dpo);
+
+ break;
+ case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
+ if (fib_path_is_exclusive(path_index) ||
+ fib_path_is_deag(path_index))
+ {
+ vec_add2(ctx->next_hops, nh, 1);
+
+ nh->path_index = path_index;
+ nh->path_weight = fib_path_get_weight(path_index);
+ fib_path_contribute_forwarding(path_index,
+ FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS,
+ &nh->path_dpo);
+ }
+ break;
+ case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
+ {
+ /*
+ * no label. we need a chain based on the payload. fixup.
+ */
+ vec_add2(ctx->next_hops, nh, 1);
+
+ nh->path_index = path_index;
+ nh->path_weight = fib_path_get_weight(path_index);
+ fib_path_contribute_forwarding(path_index,
+ fib_entry_chain_type_fixup(ctx->fib_entry,
+ ctx->fct),
+ &nh->path_dpo);
+
+ break;
+ }
+ case FIB_FORW_CHAIN_TYPE_ETHERNET:
+ ASSERT(0);
+ break;
+ }
+ }
+
+ return (!0);
+}
+
+void
+fib_entry_src_mk_lb (fib_entry_t *fib_entry,
+ const fib_entry_src_t *esrc,
+ fib_forward_chain_type_t fct,
+ dpo_id_t *dpo_lb)
+{
+ dpo_proto_t lb_proto;
+
+ /*
+ * If the entry has path extensions then we construct a load-balance
+ * by stacking the extensions on the forwarding chains of the paths.
+ * Otherwise we use the load-balance of the path-list
+ */
+ fib_entry_src_collect_forwarding_ctx_t ctx = {
+ .esrc = esrc,
+ .fib_entry = fib_entry,
+ .next_hops = NULL,
+ .is_recursive = 0,
+ .fct = fct,
+ };
+
+ lb_proto = fib_proto_to_dpo(fib_entry->fe_prefix.fp_proto);
+
+ fib_path_list_walk(esrc->fes_pl,
+ fib_entry_src_collect_forwarding,
+ &ctx);
+
+ if (esrc->fes_entry_flags & FIB_ENTRY_FLAG_EXCLUSIVE)
+ {
+ /*
+ * the client provided the DPO that the entry should link to.
+ * all entries must link to a LB, so if it is an LB already
+ * then we can use it.
+ */
+ if ((1 == vec_len(ctx.next_hops)) &&
+ (DPO_LOAD_BALANCE == ctx.next_hops[0].path_dpo.dpoi_type))
+ {
+ dpo_copy(dpo_lb, &ctx.next_hops[0].path_dpo);
+ dpo_reset(&ctx.next_hops[0].path_dpo);
+ return;
+ }
+ }
+
+ if (!dpo_id_is_valid(dpo_lb))
+ {
+ /*
+ * first time create
+ */
+ flow_hash_config_t fhc;
+
+ fhc = fib_table_get_flow_hash_config(fib_entry->fe_fib_index,
+ dpo_proto_to_fib(lb_proto));
+ dpo_set(dpo_lb,
+ DPO_LOAD_BALANCE,
+ lb_proto,
+ load_balance_create(0, lb_proto, fhc));
+ }
+
+ load_balance_multipath_update(dpo_lb,
+ ctx.next_hops,
+ fib_entry_calc_lb_flags(&ctx));
+ vec_free(ctx.next_hops);
+
+ /*
+ * if this entry is sourced by the uRPF-exempt source then we
+ * append the always present local0 interface (index 0) to the
+ * uRPF list so it is not empty. that way packets pass the loose check.
+ */
+ index_t ui = fib_path_list_get_urpf(esrc->fes_pl);
+
+ if (fib_entry_is_sourced(fib_entry_get_index(fib_entry),
+ FIB_SOURCE_URPF_EXEMPT) &&
+ (0 == fib_urpf_check_size(ui)))
+ {
+ /*
+ * The uRPF list we get from the path-list is shared by all
+ * other users of the list, but the uRPF exemption applies
+ * only to this prefix. So we need our own list.
+ */
+ ui = fib_urpf_list_alloc_and_lock();
+ fib_urpf_list_append(ui, 0);
+ fib_urpf_list_bake(ui);
+ load_balance_set_urpf(dpo_lb->dpoi_index, ui);
+ fib_urpf_list_unlock(ui);
+ }
+ else
+ {
+ load_balance_set_urpf(dpo_lb->dpoi_index, ui);
+ }
+}
+
+void
+fib_entry_src_action_install (fib_entry_t *fib_entry,
+ fib_source_t source)
+{
+ /*
+ * Install the forwarding chain for the given source into the forwarding
+ * tables
+ */
+ fib_forward_chain_type_t fct;
+ fib_entry_src_t *esrc;
+ int insert;
+
+ fct = fib_entry_get_default_chain_type(fib_entry);
+ esrc = fib_entry_src_find(fib_entry, source, NULL);
+
+ /*
+ * Every entry has its own load-balance object. All changes to the entry's
+ * forwarding result in an inplace modify of the load-balance. This means
+ * the load-balance object only needs to be added to the forwarding
+ * DB once, when it is created.
+ */
+ insert = !dpo_id_is_valid(&fib_entry->fe_lb);
+
+ fib_entry_src_mk_lb(fib_entry, esrc, fct, &fib_entry->fe_lb);
+
+ ASSERT(dpo_id_is_valid(&fib_entry->fe_lb));
+ FIB_ENTRY_DBG(fib_entry, "install: %d", fib_entry->fe_lb);
+
+ /*
+ * insert the adj into the data-plane forwarding trie
+ */
+ if (insert)
+ {
+ fib_table_fwding_dpo_update(fib_entry->fe_fib_index,
+ &fib_entry->fe_prefix,
+ &fib_entry->fe_lb);
+ }
+
+ /*
+ * if any of the other chain types are already created they will need
+ * updating too
+ */
+ fib_entry_delegate_type_t fdt;
+ fib_entry_delegate_t *fed;
+
+ FOR_EACH_DELEGATE_CHAIN(fib_entry, fdt, fed,
+ {
+ fib_entry_src_mk_lb(fib_entry, esrc,
+ fib_entry_delegate_type_to_chain_type(fdt),
+ &fed->fd_dpo);
+ });
+}
+
+void
+fib_entry_src_action_uninstall (fib_entry_t *fib_entry)
+{
+ /*
+ * uninstall the forwarding chain from the forwarding tables
+ */
+ FIB_ENTRY_DBG(fib_entry, "uninstall: %d",
+ fib_entry->fe_adj_index);
+
+ if (dpo_id_is_valid(&fib_entry->fe_lb))
+ {
+ fib_table_fwding_dpo_remove(
+ fib_entry->fe_fib_index,
+ &fib_entry->fe_prefix,
+ &fib_entry->fe_lb);
+
+ dpo_reset(&fib_entry->fe_lb);
+ }
+}
+
+static void
+fib_entry_recursive_loop_detect_i (fib_node_index_t path_list_index)
+{
+ fib_node_index_t *entries = NULL;
+
+ fib_path_list_recursive_loop_detect(path_list_index, &entries);
+
+ vec_free(entries);
+}
+
+void
+fib_entry_src_action_activate (fib_entry_t *fib_entry,
+ fib_source_t source)
+
+{
+ int houston_we_are_go_for_install;
+ fib_entry_src_t *esrc;
+
+ esrc = fib_entry_src_find(fib_entry, source, NULL);
+
+ ASSERT(!(esrc->fes_flags & FIB_ENTRY_SRC_FLAG_ACTIVE));
+ ASSERT(esrc->fes_flags & FIB_ENTRY_SRC_FLAG_ADDED);
+
+ esrc->fes_flags |= FIB_ENTRY_SRC_FLAG_ACTIVE;
+
+ if (NULL != fib_entry_src_vft[source].fesv_activate)
+ {
+ houston_we_are_go_for_install =
+ fib_entry_src_vft[source].fesv_activate(esrc, fib_entry);
+ }
+ else
+ {
+ /*
+ * the source is not providing an activate function, we'll assume
+ * therefore it has no objection to installing the entry
+ */
+ houston_we_are_go_for_install = !0;
+ }
+
+ /*
+ * link to the path-list provided by the source, and go check
+ * if that forms any loops in the graph.
+ */
+ fib_entry->fe_parent = esrc->fes_pl;
+ fib_entry->fe_sibling =
+ fib_path_list_child_add(fib_entry->fe_parent,
+ FIB_NODE_TYPE_ENTRY,
+ fib_entry_get_index(fib_entry));
+
+ fib_entry_recursive_loop_detect_i(fib_entry->fe_parent);
+
+ FIB_ENTRY_DBG(fib_entry, "activate: %d",
+ fib_entry->fe_parent);
+
+ if (0 != houston_we_are_go_for_install)
+ {
+ fib_entry_src_action_install(fib_entry, source);
+ }
+ else
+ {
+ fib_entry_src_action_uninstall(fib_entry);
+ }
+}
+
+void
+fib_entry_src_action_deactivate (fib_entry_t *fib_entry,
+ fib_source_t source)
+
+{
+ fib_node_index_t path_list_index;
+ fib_entry_src_t *esrc;
+
+ esrc = fib_entry_src_find(fib_entry, source, NULL);
+
+ ASSERT(esrc->fes_flags & FIB_ENTRY_SRC_FLAG_ACTIVE);
+
+ if (NULL != fib_entry_src_vft[source].fesv_deactivate)
+ {
+ fib_entry_src_vft[source].fesv_deactivate(esrc, fib_entry);
+ }
+
+ esrc->fes_flags &= ~FIB_ENTRY_SRC_FLAG_ACTIVE;
+
+ FIB_ENTRY_DBG(fib_entry, "deactivate: %d", fib_entry->fe_parent);
+
+ /*
+ * un-link from an old path-list. Check for any loops this will clear
+ */
+ path_list_index = fib_entry->fe_parent;
+ fib_entry->fe_parent = FIB_NODE_INDEX_INVALID;
+
+ fib_entry_recursive_loop_detect_i(path_list_index);
+
+ /*
+ * this will unlock the path-list, so it may be invalid thereafter.
+ */
+ fib_path_list_child_remove(path_list_index, fib_entry->fe_sibling);
+ fib_entry->fe_sibling = FIB_NODE_INDEX_INVALID;
+}
+
+static void
+fib_entry_src_action_fwd_update (const fib_entry_t *fib_entry,
+ fib_source_t source)
+{
+ fib_entry_src_t *esrc;
+
+ vec_foreach(esrc, fib_entry->fe_srcs)
+ {
+ if (NULL != fib_entry_src_vft[esrc->fes_src].fesv_fwd_update)
+ {
+ fib_entry_src_vft[esrc->fes_src].fesv_fwd_update(esrc,
+ fib_entry,
+ source);
+ }
+ }
+}
+
+void
+fib_entry_src_action_reactivate (fib_entry_t *fib_entry,
+ fib_source_t source)
+{
+ fib_node_index_t path_list_index;
+ fib_entry_src_t *esrc;
+
+ esrc = fib_entry_src_find(fib_entry, source, NULL);
+
+ ASSERT(esrc->fes_flags & FIB_ENTRY_SRC_FLAG_ACTIVE);
+
+ FIB_ENTRY_DBG(fib_entry, "reactivate: %d to %d",
+ fib_entry->fe_parent,
+ esrc->fes_pl);
+
+ if (fib_entry->fe_parent != esrc->fes_pl)
+ {
+ /*
+ * un-link from an old path-list. Check for any loops this will clear
+ */
+ path_list_index = fib_entry->fe_parent;
+ fib_entry->fe_parent = FIB_NODE_INDEX_INVALID;
+
+ /*
+ * temporary lock so it doesn't get deleted when this entry is no
+ * longer a child.
+ */
+ fib_path_list_lock(path_list_index);
+
+ /*
+ * this entry is no longer a child. after unlinking check if any loops
+ * were broken
+ */
+ fib_path_list_child_remove(path_list_index,
+ fib_entry->fe_sibling);
+
+ fib_entry_recursive_loop_detect_i(path_list_index);
+
+ /*
+ * link to the path-list provided by the source, and go check
+ * if that forms any loops in the graph.
+ */
+ fib_entry->fe_parent = esrc->fes_pl;
+ fib_entry->fe_sibling =
+ fib_path_list_child_add(fib_entry->fe_parent,
+ FIB_NODE_TYPE_ENTRY,
+ fib_entry_get_index(fib_entry));
+
+ fib_entry_recursive_loop_detect_i(fib_entry->fe_parent);
+ fib_path_list_unlock(path_list_index);
+ }
+ fib_entry_src_action_install(fib_entry, source);
+ fib_entry_src_action_fwd_update(fib_entry, source);
+}
+
+void
+fib_entry_src_action_installed (const fib_entry_t *fib_entry,
+ fib_source_t source)
+{
+ fib_entry_src_t *esrc;
+
+ esrc = fib_entry_src_find(fib_entry, source, NULL);
+
+ if (NULL != fib_entry_src_vft[source].fesv_installed)
+ {
+ fib_entry_src_vft[source].fesv_installed(esrc,
+ fib_entry);
+ }
+
+ fib_entry_src_action_fwd_update(fib_entry, source);
+}
+
+/*
+ * fib_entry_src_action_add
+ *
+ * Adding a source can result in a new fib_entry being created, which
+ * can inturn mean the pool is realloc'd and thus the entry passed as
+ * an argument it also realloc'd
+ * @return the original entry
+ */
+fib_entry_t *
+fib_entry_src_action_add (fib_entry_t *fib_entry,
+ fib_source_t source,
+ fib_entry_flag_t flags,
+ const dpo_id_t *dpo)
+{
+ fib_node_index_t fib_entry_index;
+ fib_entry_src_t *esrc;
+
+ esrc = fib_entry_src_find_or_create(fib_entry, source, NULL);
+
+ esrc->fes_ref_count++;
+
+ if (1 != esrc->fes_ref_count)
+ {
+ /*
+ * we only want to add the source on the 0->1 transition
+ */
+ return (fib_entry);
+ }
+
+ esrc->fes_entry_flags = flags;
+
+ /*
+ * save variable so we can recover from a fib_entry realloc.
+ */
+ fib_entry_index = fib_entry_get_index(fib_entry);
+
+ if (NULL != fib_entry_src_vft[source].fesv_add)
+ {
+ fib_entry_src_vft[source].fesv_add(esrc,
+ fib_entry,
+ flags,
+ fib_entry_get_proto(fib_entry),
+ dpo);
+ }
+
+ fib_entry = fib_entry_get(fib_entry_index);
+
+ esrc->fes_flags |= FIB_ENTRY_SRC_FLAG_ADDED;
+
+ fib_path_list_lock(esrc->fes_pl);
+
+ /*
+ * the source owns a lock on the entry
+ */
+ fib_entry_lock(fib_entry_get_index(fib_entry));
+
+ return (fib_entry);
+}
+
+/*
+ * fib_entry_src_action_update
+ *
+ * Adding a source can result in a new fib_entry being created, which
+ * can inturn mean the pool is realloc'd and thus the entry passed as
+ * an argument it also realloc'd
+ * @return the original entry
+ */
+fib_entry_t *
+fib_entry_src_action_update (fib_entry_t *fib_entry,
+ fib_source_t source,
+ fib_entry_flag_t flags,
+ const dpo_id_t *dpo)
+{
+ fib_node_index_t fib_entry_index, old_path_list_index;
+ fib_entry_src_t *esrc;
+
+ esrc = fib_entry_src_find_or_create(fib_entry, source, NULL);
+
+ if (NULL == esrc)
+ return (fib_entry_src_action_add(fib_entry, source, flags, dpo));
+
+ old_path_list_index = esrc->fes_pl;
+ esrc->fes_entry_flags = flags;
+
+ /*
+ * save variable so we can recover from a fib_entry realloc.
+ */
+ fib_entry_index = fib_entry_get_index(fib_entry);
+
+ if (NULL != fib_entry_src_vft[source].fesv_add)
+ {
+ fib_entry_src_vft[source].fesv_add(esrc,
+ fib_entry,
+ flags,
+ fib_entry_get_proto(fib_entry),
+ dpo);
+ }
+
+ fib_entry = fib_entry_get(fib_entry_index);
+
+ esrc->fes_flags |= FIB_ENTRY_SRC_FLAG_ADDED;
+
+ fib_path_list_lock(esrc->fes_pl);
+ fib_path_list_unlock(old_path_list_index);
+
+ return (fib_entry);
+}
+
+
+fib_entry_src_flag_t
+fib_entry_src_action_remove (fib_entry_t *fib_entry,
+ fib_source_t source)
+
+{
+ fib_node_index_t old_path_list;
+ fib_entry_src_flag_t sflags;
+ fib_entry_src_t *esrc;
+
+ esrc = fib_entry_src_find(fib_entry, source, NULL);
+
+ if (NULL == esrc)
+ return (FIB_ENTRY_SRC_FLAG_ACTIVE);
+
+ esrc->fes_ref_count--;
+ sflags = esrc->fes_flags;
+
+ if (0 != esrc->fes_ref_count)
+ {
+ /*
+ * only remove the source on the 1->0 transisition
+ */
+ return (sflags);
+ }
+
+ if (esrc->fes_flags & FIB_ENTRY_SRC_FLAG_ACTIVE)
+ {
+ fib_entry_src_action_deactivate(fib_entry, source);
+ }
+
+ old_path_list = esrc->fes_pl;
+
+ if (NULL != fib_entry_src_vft[source].fesv_remove)
+ {
+ fib_entry_src_vft[source].fesv_remove(esrc);
+ }
+
+ fib_path_list_unlock(old_path_list);
+ fib_entry_unlock(fib_entry_get_index(fib_entry));
+
+ sflags &= ~FIB_ENTRY_SRC_FLAG_ADDED;
+ fib_entry_src_action_deinit(fib_entry, source);
+
+ return (sflags);
+}
+
+static inline int
+fib_route_recurses_via_self (const fib_prefix_t *prefix,
+ const fib_route_path_t *rpath)
+{
+ /*
+ * not all zeros next hop &&
+ * is recursive path &&
+ * nexthop is same as the route's address
+ */
+ return ((!ip46_address_is_zero(&rpath->frp_addr)) &&
+ (~0 == rpath->frp_sw_if_index) &&
+ (0 == ip46_address_cmp(&rpath->frp_addr, &prefix->fp_addr)));
+
+}
+
+/*
+ * fib_route_attached_cross_table
+ *
+ * Return true the the route is attached via an interface that
+ * is not in the same table as the route
+ */
+static inline int
+fib_route_attached_cross_table (const fib_entry_t *fib_entry,
+ const fib_route_path_t *rpath)
+{
+ /*
+ * - All zeros next-hop
+ * - a valid interface
+ * - entry's fib index not equeal to interface's index
+ */
+ if (ip46_address_is_zero(&rpath->frp_addr) &&
+ (~0 != rpath->frp_sw_if_index) &&
+ (fib_entry->fe_fib_index !=
+ fib_table_get_index_for_sw_if_index(fib_entry_get_proto(fib_entry),
+ rpath->frp_sw_if_index)))
+ {
+ return (!0);
+ }
+ return (0);
+}
+
+/*
+ * fib_route_attached_cross_table
+ *
+ * Return true the the route is attached via an interface that
+ * is not in the same table as the route
+ */
+static inline int
+fib_path_is_attached (const fib_route_path_t *rpath)
+{
+ /*
+ * - All zeros next-hop
+ * - a valid interface
+ */
+ if (ip46_address_is_zero(&rpath->frp_addr) &&
+ (~0 != rpath->frp_sw_if_index))
+ {
+ return (!0);
+ }
+ return (0);
+}
+
+fib_path_list_flags_t
+fib_entry_src_flags_2_path_list_flags (fib_entry_flag_t eflags)
+{
+ fib_path_list_flags_t plf = FIB_PATH_LIST_FLAG_NONE;
+
+ if (eflags & FIB_ENTRY_FLAG_DROP)
+ {
+ plf |= FIB_PATH_LIST_FLAG_DROP;
+ }
+ if (eflags & FIB_ENTRY_FLAG_LOCAL)
+ {
+ plf |= FIB_PATH_LIST_FLAG_LOCAL;
+ }
+ if (eflags & FIB_ENTRY_FLAG_EXCLUSIVE)
+ {
+ plf |= FIB_PATH_LIST_FLAG_EXCLUSIVE;
+ }
+
+ return (plf);
+}
+
+static void
+fib_entry_flags_update (const fib_entry_t *fib_entry,
+ const fib_route_path_t *rpath,
+ fib_path_list_flags_t *pl_flags,
+ fib_entry_src_t *esrc)
+{
+ /*
+ * don't allow the addition of a recursive looped path for prefix
+ * via itself.
+ */
+ if (fib_route_recurses_via_self(&fib_entry->fe_prefix, rpath))
+ {
+ /*
+ * force the install of a drop path-list.
+ * we want the entry to have some path-list, mainly so
+ * the dodgy path can be rmeoved when the source stops playing
+ * silly buggers.
+ */
+ *pl_flags |= FIB_PATH_LIST_FLAG_DROP;
+ }
+ else
+ {
+ *pl_flags &= ~FIB_PATH_LIST_FLAG_DROP;
+ }
+
+ if ((esrc->fes_src == FIB_SOURCE_API) ||
+ (esrc->fes_src == FIB_SOURCE_CLI))
+ {
+ if (fib_path_is_attached(rpath))
+ {
+ esrc->fes_entry_flags |= FIB_ENTRY_FLAG_ATTACHED;
+ }
+ else
+ {
+ esrc->fes_entry_flags &= ~FIB_ENTRY_FLAG_ATTACHED;
+ }
+ }
+ if (fib_route_attached_cross_table(fib_entry, rpath))
+ {
+ esrc->fes_entry_flags |= FIB_ENTRY_FLAG_IMPORT;
+ }
+ else
+ {
+ esrc->fes_entry_flags &= ~FIB_ENTRY_FLAG_IMPORT;
+ }
+}
+
+/*
+ * fib_entry_src_path_ext_add
+ *
+ * append a path extension to the entry's list
+ */
+static void
+fib_entry_src_path_ext_append (fib_entry_src_t *esrc,
+ const fib_route_path_t *rpath)
+{
+ if (NULL != rpath->frp_label_stack)
+ {
+ fib_path_ext_t *path_ext;
+
+ vec_add2(esrc->fes_path_exts, path_ext, 1);
+
+ fib_path_ext_init(path_ext, esrc->fes_pl, rpath);
+ }
+}
+
+/*
+ * fib_entry_src_path_ext_insert
+ *
+ * insert, sorted, a path extension to the entry's list.
+ * It's not strictly necessary in sort the path extensions, since each
+ * extension has the path index to which it resolves. However, by being
+ * sorted the load-balance produced has a deterministic order, not an order
+ * based on the sequence of extension additions. this is a considerable benefit.
+ */
+static void
+fib_entry_src_path_ext_insert (fib_entry_src_t *esrc,
+ const fib_route_path_t *rpath)
+{
+ if (0 == vec_len(esrc->fes_path_exts))
+ return (fib_entry_src_path_ext_append(esrc, rpath));
+
+ if (NULL != rpath->frp_label_stack)
+ {
+ fib_path_ext_t path_ext;
+ int i = 0;
+
+ fib_path_ext_init(&path_ext, esrc->fes_pl, rpath);
+
+ while (i < vec_len(esrc->fes_path_exts) &&
+ (fib_path_ext_cmp(&esrc->fes_path_exts[i], rpath) < 0))
+ {
+ i++;
+ }
+
+ vec_insert_elts(esrc->fes_path_exts, &path_ext, 1, i);
+ }
+}
+
+/*
+ * fib_entry_src_action_add
+ *
+ * Adding a source can result in a new fib_entry being created, which
+ * can inturn mean the pool is realloc'd and thus the entry passed as
+ * an argument it also realloc'd
+ * @return the entry
+ */
+fib_entry_t*
+fib_entry_src_action_path_add (fib_entry_t *fib_entry,
+ fib_source_t source,
+ fib_entry_flag_t flags,
+ const fib_route_path_t *rpath)
+{
+ fib_node_index_t old_path_list, fib_entry_index;
+ fib_path_list_flags_t pl_flags;
+ fib_path_ext_t *path_ext;
+ fib_entry_src_t *esrc;
+
+ /*
+ * save variable so we can recover from a fib_entry realloc.
+ */
+ fib_entry_index = fib_entry_get_index(fib_entry);
+
+ esrc = fib_entry_src_find(fib_entry, source, NULL);
+ if (NULL == esrc)
+ {
+ fib_entry =
+ fib_entry_src_action_add(fib_entry,
+ source,
+ flags,
+ drop_dpo_get(
+ fib_proto_to_dpo(
+ fib_entry_get_proto(fib_entry))));
+ esrc = fib_entry_src_find(fib_entry, source, NULL);
+ }
+
+ /*
+ * we are no doubt modifying a path-list. If the path-list
+ * is shared, and hence not modifiable, then the index returned
+ * will be for a different path-list. This FIB entry to needs
+ * to maintain its lock appropriately.
+ */
+ old_path_list = esrc->fes_pl;
+
+ ASSERT(NULL != fib_entry_src_vft[source].fesv_path_add);
+
+ pl_flags = fib_entry_src_flags_2_path_list_flags(fib_entry_get_flags_i(fib_entry));
+ fib_entry_flags_update(fib_entry, rpath, &pl_flags, esrc);
+
+ fib_entry_src_vft[source].fesv_path_add(esrc, fib_entry, pl_flags, rpath);
+ fib_entry = fib_entry_get(fib_entry_index);
+
+ /*
+ * re-resolve all the path-extensions with the new path-list
+ */
+ vec_foreach(path_ext, esrc->fes_path_exts)
+ {
+ fib_path_ext_resolve(path_ext, esrc->fes_pl);
+ }
+ /*
+ * if the path has a label we need to add a path extension
+ */
+ fib_entry_src_path_ext_insert(esrc, rpath);
+
+ fib_path_list_lock(esrc->fes_pl);
+ fib_path_list_unlock(old_path_list);
+
+ return (fib_entry);
+}
+
+/*
+ * fib_entry_src_action_swap
+ *
+ * The source is providing new paths to replace the old ones.
+ * Adding a source can result in a new fib_entry being created, which
+ * can inturn mean the pool is realloc'd and thus the entry passed as
+ * an argument it also realloc'd
+ * @return the entry
+ */
+fib_entry_t*
+fib_entry_src_action_path_swap (fib_entry_t *fib_entry,
+ fib_source_t source,
+ fib_entry_flag_t flags,
+ const fib_route_path_t *rpaths)
+{
+ fib_node_index_t old_path_list, fib_entry_index;
+ fib_path_list_flags_t pl_flags;
+ const fib_route_path_t *rpath;
+ fib_path_ext_t *path_ext;
+ fib_entry_src_t *esrc;
+
+ esrc = fib_entry_src_find(fib_entry, source, NULL);
+
+ /*
+ * save variable so we can recover from a fib_entry realloc.
+ */
+ fib_entry_index = fib_entry_get_index(fib_entry);
+
+ if (NULL == esrc)
+ {
+ fib_entry = fib_entry_src_action_add(fib_entry,
+ source,
+ flags,
+ drop_dpo_get(
+ fib_proto_to_dpo(
+ fib_entry_get_proto(fib_entry))));
+ esrc = fib_entry_src_find(fib_entry, source, NULL);
+ }
+
+ /*
+ * swapping paths may create a new path-list (or may use an existing shared)
+ * but we are certainly getting a different one. This FIB entry to needs
+ * to maintain its lock appropriately.
+ */
+ old_path_list = esrc->fes_pl;
+
+ ASSERT(NULL != fib_entry_src_vft[source].fesv_path_swap);
+
+ pl_flags = fib_entry_src_flags_2_path_list_flags(flags);
+
+ vec_foreach(rpath, rpaths)
+ {
+ fib_entry_flags_update(fib_entry, rpath, &pl_flags, esrc);
+ }
+
+ fib_entry_src_vft[source].fesv_path_swap(esrc,
+ fib_entry,
+ pl_flags,
+ rpaths);
+
+ vec_foreach(path_ext, esrc->fes_path_exts)
+ {
+ vec_free(path_ext->fpe_label_stack);
+ }
+ vec_free(esrc->fes_path_exts);
+
+ vec_foreach(rpath, rpaths)
+ {
+ fib_entry_src_path_ext_append(esrc, rpath);
+ }
+
+ fib_entry = fib_entry_get(fib_entry_index);
+
+ fib_path_list_lock(esrc->fes_pl);
+ fib_path_list_unlock(old_path_list);
+
+ return (fib_entry);
+}
+
+fib_entry_src_flag_t
+fib_entry_src_action_path_remove (fib_entry_t *fib_entry,
+ fib_source_t source,
+ const fib_route_path_t *rpath)
+{
+ fib_path_list_flags_t pl_flags;
+ fib_node_index_t old_path_list;
+ fib_path_ext_t *path_ext;
+ fib_entry_src_t *esrc;
+
+ esrc = fib_entry_src_find(fib_entry, source, NULL);
+
+ ASSERT(NULL != esrc);
+ ASSERT(esrc->fes_flags & FIB_ENTRY_SRC_FLAG_ADDED);
+
+ /*
+ * we no doubt modifying a path-list. If the path-list
+ * is shared, and hence not modifiable, then the index returned
+ * will be for a different path-list. This FIB entry to needs
+ * to maintain its lock appropriately.
+ */
+ old_path_list = esrc->fes_pl;
+
+ ASSERT(NULL != fib_entry_src_vft[source].fesv_path_remove);
+
+ pl_flags = fib_entry_src_flags_2_path_list_flags(fib_entry_get_flags_i(fib_entry));
+ fib_entry_flags_update(fib_entry, rpath, &pl_flags, esrc);
+
+ fib_entry_src_vft[source].fesv_path_remove(esrc, pl_flags, rpath);
+ /*
+ * find the matching path extension and remove it
+ */
+ vec_foreach(path_ext, esrc->fes_path_exts)
+ {
+ if (!fib_path_ext_cmp(path_ext, rpath))
+ {
+ /*
+ * delete the element moving the remaining elements down 1 position.
+ * this preserves the sorted order.
+ */
+ vec_free(path_ext->fpe_label_stack);
+ vec_delete(esrc->fes_path_exts, 1, (path_ext - esrc->fes_path_exts));
+ break;
+ }
+ }
+ /*
+ * re-resolve all the path-extensions with the new path-list
+ */
+ vec_foreach(path_ext, esrc->fes_path_exts)
+ {
+ fib_path_ext_resolve(path_ext, esrc->fes_pl);
+ }
+
+ /*
+ * lock the new path-list, unlock the old if it had one
+ */
+ fib_path_list_unlock(old_path_list);
+
+ if (FIB_NODE_INDEX_INVALID != esrc->fes_pl) {
+ fib_path_list_lock(esrc->fes_pl);
+ return (FIB_ENTRY_SRC_FLAG_ADDED);
+ }
+ else
+ {
+ /*
+ * no more paths left from this source
+ */
+ fib_entry_src_action_remove(fib_entry, source);
+ return (FIB_ENTRY_SRC_FLAG_NONE);
+ }
+}
+
+u8*
+fib_entry_src_format (fib_entry_t *fib_entry,
+ fib_source_t source,
+ u8* s)
+{
+ fib_entry_src_t *esrc;
+
+ esrc = fib_entry_src_find(fib_entry, source, NULL);
+
+ if (NULL != fib_entry_src_vft[source].fesv_format)
+ {
+ return (fib_entry_src_vft[source].fesv_format(esrc, s));
+ }
+ return (s);
+}
+
+adj_index_t
+fib_entry_get_adj_for_source (fib_node_index_t fib_entry_index,
+ fib_source_t source)
+{
+ fib_entry_t *fib_entry;
+ fib_entry_src_t *esrc;
+
+ if (FIB_NODE_INDEX_INVALID == fib_entry_index)
+ return (ADJ_INDEX_INVALID);
+
+ fib_entry = fib_entry_get(fib_entry_index);
+ esrc = fib_entry_src_find(fib_entry, source, NULL);
+
+ if (NULL != esrc)
+ {
+ if (FIB_NODE_INDEX_INVALID != esrc->fes_pl)
+ {
+ return (fib_path_list_get_adj(
+ esrc->fes_pl,
+ fib_entry_get_default_chain_type(fib_entry)));
+ }
+ }
+ return (ADJ_INDEX_INVALID);
+}
+
+const int
+fib_entry_get_dpo_for_source (fib_node_index_t fib_entry_index,
+ fib_source_t source,
+ dpo_id_t *dpo)
+{
+ fib_entry_t *fib_entry;
+ fib_entry_src_t *esrc;
+
+ if (FIB_NODE_INDEX_INVALID == fib_entry_index)
+ return (0);
+
+ fib_entry = fib_entry_get(fib_entry_index);
+ esrc = fib_entry_src_find(fib_entry, source, NULL);
+
+ if (NULL != esrc)
+ {
+ if (FIB_NODE_INDEX_INVALID != esrc->fes_pl)
+ {
+ fib_path_list_contribute_forwarding(
+ esrc->fes_pl,
+ fib_entry_get_default_chain_type(fib_entry),
+ dpo);
+
+ return (dpo_id_is_valid(dpo));
+ }
+ }
+ return (0);
+}
+
+u32
+fib_entry_get_resolving_interface_for_source (fib_node_index_t entry_index,
+ fib_source_t source)
+{
+ fib_entry_t *fib_entry;
+ fib_entry_src_t *esrc;
+
+ fib_entry = fib_entry_get(entry_index);
+
+ esrc = fib_entry_src_find(fib_entry, source, NULL);
+
+ if (NULL != esrc)
+ {
+ if (FIB_NODE_INDEX_INVALID != esrc->fes_pl)
+ {
+ return (fib_path_list_get_resolving_interface(esrc->fes_pl));
+ }
+ }
+ return (~0);
+}
+
+fib_entry_flag_t
+fib_entry_get_flags_for_source (fib_node_index_t entry_index,
+ fib_source_t source)
+{
+ fib_entry_t *fib_entry;
+ fib_entry_src_t *esrc;
+
+ fib_entry = fib_entry_get(entry_index);
+
+ esrc = fib_entry_src_find(fib_entry, source, NULL);
+
+ if (NULL != esrc)
+ {
+ return (esrc->fes_entry_flags);
+ }
+
+ return (FIB_ENTRY_FLAG_NONE);
+}
+
+fib_entry_flag_t
+fib_entry_get_flags_i (const fib_entry_t *fib_entry)
+{
+ fib_entry_flag_t flags;
+
+ /*
+ * the vector of sources is deliberately arranged in priority order
+ */
+ if (0 == vec_len(fib_entry->fe_srcs))
+ {
+ flags = FIB_ENTRY_FLAG_NONE;
+ }
+ else
+ {
+ fib_entry_src_t *esrc;
+
+ esrc = vec_elt_at_index(fib_entry->fe_srcs, 0);
+ flags = esrc->fes_entry_flags;
+ }
+
+ return (flags);
+}
+
+void
+fib_entry_set_source_data (fib_node_index_t fib_entry_index,
+ fib_source_t source,
+ const void *data)
+{
+ fib_entry_t *fib_entry;
+ fib_entry_src_t *esrc;
+
+ fib_entry = fib_entry_get(fib_entry_index);
+ esrc = fib_entry_src_find(fib_entry, source, NULL);
+
+ if (NULL != esrc &&
+ NULL != fib_entry_src_vft[source].fesv_set_data)
+ {
+ fib_entry_src_vft[source].fesv_set_data(esrc, fib_entry, data);
+ }
+}
+
+const void*
+fib_entry_get_source_data (fib_node_index_t fib_entry_index,
+ fib_source_t source)
+{
+ fib_entry_t *fib_entry;
+ fib_entry_src_t *esrc;
+
+ fib_entry = fib_entry_get(fib_entry_index);
+ esrc = fib_entry_src_find(fib_entry, source, NULL);
+
+ if (NULL != esrc &&
+ NULL != fib_entry_src_vft[source].fesv_get_data)
+ {
+ return (fib_entry_src_vft[source].fesv_get_data(esrc, fib_entry));
+ }
+ return (NULL);
+}
+
+void
+fib_entry_src_module_init (void)
+{
+ fib_entry_src_rr_register();
+ fib_entry_src_interface_register();
+ fib_entry_src_default_route_register();
+ fib_entry_src_special_register();
+ fib_entry_src_api_register();
+ fib_entry_src_adj_register();
+ fib_entry_src_mpls_register();
+ fib_entry_src_lisp_register();
+}
diff --git a/src/vnet/fib/fib_entry_src.h b/src/vnet/fib/fib_entry_src.h
new file mode 100644
index 00000000000..640c174db47
--- /dev/null
+++ b/src/vnet/fib/fib_entry_src.h
@@ -0,0 +1,296 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __FIB_ENTRY_SRC_H__
+#define __FIB_ENTRY_SRC_H__
+
+#include "fib_entry.h"
+#include "fib_path_list.h"
+#include "fib_internal.h"
+
+/**
+ * Debug macro
+ */
+#ifdef FIB_DEBUG
+#define FIB_ENTRY_DBG(_e, _fmt, _args...) \
+{ \
+ u8*__tmp = NULL; \
+ __tmp = format(__tmp, "e:[%d:%U", \
+ fib_entry_get_index(_e), \
+ format_ip46_address, \
+ &_e->fe_prefix.fp_addr, \
+ IP46_TYPE_ANY); \
+ __tmp = format(__tmp, "/%d]:", \
+ _e->fe_prefix.fp_len); \
+ __tmp = format(__tmp, _fmt, ##_args); \
+ clib_warning("%s", __tmp); \
+ vec_free(__tmp); \
+}
+#else
+#define FIB_ENTRY_DBG(_e, _fmt, _args...)
+#endif
+
+/**
+ * Source initialisation Function
+ */
+typedef void (*fib_entry_src_init_t)(fib_entry_src_t *src);
+
+/**
+ * Source deinitialisation Function
+ */
+typedef void (*fib_entry_src_deinit_t)(fib_entry_src_t *src);
+
+/**
+ * Source activation. Called when the source is the new best source on the entry.
+ * Return non-zero if the entry can now install, 0 otherwise
+ */
+typedef int (*fib_entry_src_activate_t)(fib_entry_src_t *src,
+ const fib_entry_t *fib_entry);
+
+/**
+ * Source Deactivate.
+ * Called when the source is no longer best source on the entry
+ */
+typedef void (*fib_entry_src_deactivate_t)(fib_entry_src_t *src,
+ const fib_entry_t *fib_entry);
+
+/**
+ * Source Add.
+ * Called when the source is added to the entry
+ */
+typedef void (*fib_entry_src_add_t)(fib_entry_src_t *src,
+ const fib_entry_t *entry,
+ fib_entry_flag_t flags,
+ fib_protocol_t proto,
+ const dpo_id_t *dpo);
+
+/**
+ * Source Remove.
+ */
+typedef void (*fib_entry_src_remove_t)(fib_entry_src_t *src);
+
+/**
+ * Result from a cover update/change
+ */
+typedef struct fib_entry_src_cover_res_t_ {
+ u16 install;
+ fib_node_bw_reason_flag_t bw_reason;
+} fib_entry_src_cover_res_t;
+
+/**
+ * Cover changed. the source should re-evaluate its cover.
+ */
+typedef fib_entry_src_cover_res_t (*fib_entry_src_cover_change_t)(
+ fib_entry_src_t *src,
+ const fib_entry_t *fib_entry);
+
+/**
+ * Cover updated. The cover the source has, has updated (i.e. its forwarding)
+ * the source may need to re-evaluate.
+ */
+typedef fib_entry_src_cover_res_t (*fib_entry_src_cover_update_t)(
+ fib_entry_src_t *src,
+ const fib_entry_t *fib_entry);
+
+/**
+ * Forwarding updated. Notification that the forwarding information for the
+ * entry has been updated. This notification is sent to all sources, not just
+ * the active best.
+ */
+typedef void (*fib_entry_src_fwd_update_t)(fib_entry_src_t *src,
+ const fib_entry_t *fib_entry,
+ fib_source_t best_source);
+
+/**
+ * Installed. Notification that the source is now installed as
+ * the entry's forwarding source.
+ */
+typedef void (*fib_entry_src_installed_t)(fib_entry_src_t *src,
+ const fib_entry_t *fib_entry);
+
+/**
+ * format.
+ */
+typedef u8* (*fib_entry_src_format_t)(fib_entry_src_t *src,
+ u8* s);
+
+/**
+ * Source path add
+ * the source is adding a new path
+ */
+typedef void (*fib_entry_src_path_add_t)(fib_entry_src_t *src,
+ const fib_entry_t *fib_entry,
+ fib_path_list_flags_t pl_flags,
+ const fib_route_path_t *path);
+
+/**
+ * Source path remove
+ * the source is remoinvg a path
+ */
+typedef void (*fib_entry_src_path_remove_t)(fib_entry_src_t *src,
+ fib_path_list_flags_t pl_flags,
+ const fib_route_path_t *path);
+
+/**
+ * Source path replace/swap
+ * the source is providing a new set of paths
+ */
+typedef void (*fib_entry_src_path_swap_t)(fib_entry_src_t *src,
+ const fib_entry_t *fib_entry,
+ fib_path_list_flags_t pl_flags,
+ const fib_route_path_t *path);
+
+/**
+ * Set source specific opaque data
+ */
+typedef void (*fib_entry_src_set_data_t)(fib_entry_src_t *src,
+ const fib_entry_t *fib_entry,
+ const void *data);
+
+/**
+ * Get source specific opaque data
+ */
+typedef const void* (*fib_entry_src_get_data_t)(fib_entry_src_t *src,
+ const fib_entry_t *fib_entry);
+
+/**
+ * Virtual function table each FIB entry source will register
+ */
+typedef struct fib_entry_src_vft_t_ {
+ fib_entry_src_init_t fesv_init;
+ fib_entry_src_deinit_t fesv_deinit;
+ fib_entry_src_activate_t fesv_activate;
+ fib_entry_src_deactivate_t fesv_deactivate;
+ fib_entry_src_add_t fesv_add;
+ fib_entry_src_remove_t fesv_remove;
+ fib_entry_src_path_swap_t fesv_path_swap;
+ fib_entry_src_path_add_t fesv_path_add;
+ fib_entry_src_path_remove_t fesv_path_remove;
+ fib_entry_src_cover_change_t fesv_cover_change;
+ fib_entry_src_cover_update_t fesv_cover_update;
+ fib_entry_src_format_t fesv_format;
+ fib_entry_src_installed_t fesv_installed;
+ fib_entry_src_fwd_update_t fesv_fwd_update;
+ fib_entry_src_get_data_t fesv_get_data;
+ fib_entry_src_set_data_t fesv_set_data;
+} fib_entry_src_vft_t;
+
+#define FOR_EACH_SRC_ADDED(_entry, _src, _source, action) \
+{ \
+ vec_foreach(_src, _entry->fe_srcs) \
+ { \
+ if (_src->fes_flags & FIB_ENTRY_SRC_FLAG_ADDED) { \
+ _source = _src->fes_src; \
+ do { \
+ action; \
+ } while(0); \
+ } \
+ } \
+}
+
+extern u8* fib_entry_src_format(fib_entry_t *entry,
+ fib_source_t source,
+ u8* s);
+
+extern void fib_entry_src_register(fib_source_t source,
+ const fib_entry_src_vft_t *vft);
+
+extern void fib_entry_src_action_init(fib_entry_t *entry,
+ fib_source_t source);
+
+extern void fib_entry_src_action_deinit(fib_entry_t *fib_entry,
+ fib_source_t source);
+
+extern fib_entry_src_cover_res_t fib_entry_src_action_cover_change(
+ fib_entry_t *entry,
+ fib_source_t source);
+
+extern fib_entry_src_cover_res_t fib_entry_src_action_cover_update(
+ fib_entry_t *fib_entry,
+ fib_source_t source);
+
+extern void fib_entry_src_action_activate(fib_entry_t *fib_entry,
+ fib_source_t source);
+
+extern void fib_entry_src_action_deactivate(fib_entry_t *fib_entry,
+ fib_source_t source);
+extern void fib_entry_src_action_reactivate(fib_entry_t *fib_entry,
+ fib_source_t source);
+
+extern fib_entry_t* fib_entry_src_action_add(fib_entry_t *fib_entry,
+ fib_source_t source,
+ fib_entry_flag_t flags,
+ const dpo_id_t *dpo);
+extern fib_entry_t* fib_entry_src_action_update(fib_entry_t *fib_entry,
+ fib_source_t source,
+ fib_entry_flag_t flags,
+ const dpo_id_t *dpo);
+
+extern fib_entry_src_flag_t fib_entry_src_action_remove(fib_entry_t *fib_entry,
+ fib_source_t source);
+
+extern void fib_entry_src_action_install(fib_entry_t *fib_entry,
+ fib_source_t source);
+
+extern void fib_entry_src_action_uninstall(fib_entry_t *fib_entry);
+
+extern fib_entry_t* fib_entry_src_action_path_add(fib_entry_t *fib_entry,
+ fib_source_t source,
+ fib_entry_flag_t flags,
+ const fib_route_path_t *path);
+
+extern fib_entry_t* fib_entry_src_action_path_swap(fib_entry_t *fib_entry,
+ fib_source_t source,
+ fib_entry_flag_t flags,
+ const fib_route_path_t *path);
+
+extern fib_entry_src_flag_t fib_entry_src_action_path_remove(fib_entry_t *fib_entry,
+ fib_source_t source,
+ const fib_route_path_t *path);
+
+extern void fib_entry_src_action_installed(const fib_entry_t *fib_entry,
+ fib_source_t source);
+
+extern fib_forward_chain_type_t fib_entry_get_default_chain_type(
+ const fib_entry_t *fib_entry);
+extern fib_entry_flag_t fib_entry_get_flags_i(const fib_entry_t *fib_entry);
+extern fib_path_list_flags_t fib_entry_src_flags_2_path_list_flags(
+ fib_entry_flag_t eflags);
+
+extern fib_forward_chain_type_t fib_entry_chain_type_fixup(const fib_entry_t *entry,
+ fib_forward_chain_type_t fct);
+
+extern void fib_entry_src_mk_lb (fib_entry_t *fib_entry,
+ const fib_entry_src_t *esrc,
+ fib_forward_chain_type_t fct,
+ dpo_id_t *dpo_lb);
+
+
+/*
+ * Per-source registration. declared here so we save a separate .h file for each
+ */
+extern void fib_entry_src_default_register(void);
+extern void fib_entry_src_rr_register(void);
+extern void fib_entry_src_interface_register(void);
+extern void fib_entry_src_default_route_register(void);
+extern void fib_entry_src_special_register(void);
+extern void fib_entry_src_api_register(void);
+extern void fib_entry_src_adj_register(void);
+extern void fib_entry_src_mpls_register(void);
+extern void fib_entry_src_lisp_register(void);
+
+extern void fib_entry_src_module_init(void);
+
+#endif
diff --git a/src/vnet/fib/fib_entry_src_adj.c b/src/vnet/fib/fib_entry_src_adj.c
new file mode 100644
index 00000000000..64f82a73e07
--- /dev/null
+++ b/src/vnet/fib/fib_entry_src_adj.c
@@ -0,0 +1,207 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "fib_entry.h"
+#include "fib_entry_src.h"
+#include "fib_path_list.h"
+#include "fib_table.h"
+#include "fib_entry_cover.h"
+#include "fib_attached_export.h"
+
+/**
+ * Source initialisation Function
+ */
+static void
+fib_entry_src_adj_init (fib_entry_src_t *src)
+{
+ src->adj.fesa_cover = FIB_NODE_INDEX_INVALID;
+ src->adj.fesa_sibling = FIB_NODE_INDEX_INVALID;
+}
+
+static void
+fib_entry_src_adj_path_swap (fib_entry_src_t *src,
+ const fib_entry_t *entry,
+ fib_path_list_flags_t pl_flags,
+ const fib_route_path_t *paths)
+{
+ src->fes_pl = fib_path_list_create(pl_flags, paths);
+}
+
+static void
+fib_entry_src_adj_remove (fib_entry_src_t *src)
+{
+ src->fes_pl = FIB_NODE_INDEX_INVALID;
+}
+
+
+/*
+ * Source activate.
+ * Called when the source is teh new longer best source on the entry
+ */
+static int
+fib_entry_src_adj_activate (fib_entry_src_t *src,
+ const fib_entry_t *fib_entry)
+{
+ fib_entry_t *cover;
+
+ /*
+ * find the covering prefix. become a dependent thereof.
+ * there should always be a cover, though it may be the default route.
+ */
+ src->adj.fesa_cover = fib_table_get_less_specific(fib_entry->fe_fib_index,
+ &fib_entry->fe_prefix);
+
+ ASSERT(FIB_NODE_INDEX_INVALID != src->adj.fesa_cover);
+ ASSERT(fib_entry_get_index(fib_entry) != src->adj.fesa_cover);
+
+ cover = fib_entry_get(src->adj.fesa_cover);
+
+ ASSERT(cover != fib_entry);
+
+ src->adj.fesa_sibling =
+ fib_entry_cover_track(cover,
+ fib_entry_get_index(fib_entry));
+
+ /*
+ * if the ocver is attached then this adj source entry can install,
+ * via the adj. otherwise install a drop.
+ * This prevents ARP/ND entries that on interface X that do not belong
+ * on X's subnet from being added to the FIB. To do so would allow
+ * nefarious gratuitous ARP requests from attracting traffic to the sender.
+ *
+ * and yes, I really do mean attached and not connected.
+ * this abomination;
+ * ip route add 10.0.0.0/24 Eth0
+ * is attached. and we want adj-fibs to install on Eth0.
+ */
+ return (FIB_ENTRY_FLAG_ATTACHED & fib_entry_get_flags_i(cover));
+}
+
+/*
+ * Source Deactivate.
+ * Called when the source is no longer best source on the entry
+ */
+static void
+fib_entry_src_adj_deactivate (fib_entry_src_t *src,
+ const fib_entry_t *fib_entry)
+{
+ fib_entry_t *cover;
+
+ /*
+ * remove the depednecy on the covering entry
+ */
+ ASSERT(FIB_NODE_INDEX_INVALID != src->adj.fesa_cover);
+ cover = fib_entry_get(src->adj.fesa_cover);
+
+ fib_entry_cover_untrack(cover, src->adj.fesa_sibling);
+
+ /*
+ * tell the cover this entry no longer needs exporting
+ */
+ fib_attached_export_covered_removed(cover, fib_entry_get_index(fib_entry));
+
+ src->adj.fesa_cover = FIB_NODE_INDEX_INVALID;
+}
+
+static u8*
+fib_entry_src_adj_format (fib_entry_src_t *src,
+ u8* s)
+{
+ return (format(s, "cover:%d", src->adj.fesa_cover));
+}
+
+static void
+fib_entry_src_adj_installed (fib_entry_src_t *src,
+ const fib_entry_t *fib_entry)
+{
+ /*
+ * The adj source now rules! poke our cover to get exported
+ */
+ fib_entry_t *cover;
+
+ ASSERT(FIB_NODE_INDEX_INVALID != src->adj.fesa_cover);
+ cover = fib_entry_get(src->adj.fesa_cover);
+
+ fib_attached_export_covered_added(cover,
+ fib_entry_get_index(fib_entry));
+}
+
+static fib_entry_src_cover_res_t
+fib_entry_src_adj_cover_change (fib_entry_src_t *src,
+ const fib_entry_t *fib_entry)
+{
+ fib_entry_src_cover_res_t res = {
+ .install = !0,
+ .bw_reason = FIB_NODE_BW_REASON_FLAG_NONE,
+ };
+
+ fib_entry_src_adj_deactivate(src, fib_entry);
+
+ res.install = fib_entry_src_adj_activate(src, fib_entry);
+
+ if (res.install) {
+ /*
+ * ADJ fib can install
+ */
+ res.bw_reason = FIB_NODE_BW_REASON_FLAG_EVALUATE;
+ }
+
+ return (res);
+}
+
+/*
+ * fib_entry_src_adj_cover_update
+ */
+static fib_entry_src_cover_res_t
+fib_entry_src_adj_cover_update (fib_entry_src_t *src,
+ const fib_entry_t *fib_entry)
+{
+ /*
+ * the cover has updated, i.e. its forwarding or flags
+ * have changed. do'nt decativate/activate here, since this
+ * prefix is updated during the covers walk.
+ */
+ fib_entry_src_cover_res_t res = {
+ .install = !0,
+ .bw_reason = FIB_NODE_BW_REASON_FLAG_NONE,
+ };
+ fib_entry_t *cover;
+
+ ASSERT(FIB_NODE_INDEX_INVALID != src->adj.fesa_cover);
+
+ cover = fib_entry_get(src->adj.fesa_cover);
+
+ res.install = (FIB_ENTRY_FLAG_ATTACHED & fib_entry_get_flags_i(cover));
+
+ return (res);
+}
+
+const static fib_entry_src_vft_t adj_src_vft = {
+ .fesv_init = fib_entry_src_adj_init,
+ .fesv_path_swap = fib_entry_src_adj_path_swap,
+ .fesv_remove = fib_entry_src_adj_remove,
+ .fesv_activate = fib_entry_src_adj_activate,
+ .fesv_deactivate = fib_entry_src_adj_deactivate,
+ .fesv_format = fib_entry_src_adj_format,
+ .fesv_installed = fib_entry_src_adj_installed,
+ .fesv_cover_change = fib_entry_src_adj_cover_change,
+ .fesv_cover_update = fib_entry_src_adj_cover_update,
+};
+
+void
+fib_entry_src_adj_register (void)
+{
+ fib_entry_src_register(FIB_SOURCE_ADJ, &adj_src_vft);
+}
diff --git a/src/vnet/fib/fib_entry_src_api.c b/src/vnet/fib/fib_entry_src_api.c
new file mode 100644
index 00000000000..edc8a47bc17
--- /dev/null
+++ b/src/vnet/fib/fib_entry_src_api.c
@@ -0,0 +1,119 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "fib_entry.h"
+#include "fib_entry_src.h"
+#include "fib_path_list.h"
+
+/**
+ * Source initialisation Function
+ */
+static void
+fib_entry_src_api_init (fib_entry_src_t *src)
+{
+}
+
+/**
+ * Source deinitialisation Function
+ */
+static void
+fib_entry_src_api_deinit (fib_entry_src_t *src)
+{
+}
+
+static void
+fib_entry_src_api_path_swap (fib_entry_src_t *src,
+ const fib_entry_t *entry,
+ fib_path_list_flags_t pl_flags,
+ const fib_route_path_t *paths)
+{
+ src->fes_pl = fib_path_list_create((FIB_PATH_LIST_FLAG_SHARED | pl_flags),
+ paths);
+}
+
+static void
+fib_entry_src_api_path_add (fib_entry_src_t *src,
+ const fib_entry_t *entry,
+ fib_path_list_flags_t pl_flags,
+ const fib_route_path_t *paths)
+{
+ if (FIB_NODE_INDEX_INVALID == src->fes_pl)
+ {
+ src->fes_pl =
+ fib_path_list_create((FIB_PATH_LIST_FLAG_SHARED | pl_flags), paths);
+ }
+ else
+ {
+ src->fes_pl =
+ fib_path_list_copy_and_path_add(src->fes_pl,
+ (FIB_PATH_LIST_FLAG_SHARED | pl_flags),
+ paths);
+ }
+}
+
+static void
+fib_entry_src_api_path_remove (fib_entry_src_t *src,
+ fib_path_list_flags_t pl_flags,
+ const fib_route_path_t *paths)
+{
+ if (FIB_NODE_INDEX_INVALID != src->fes_pl)
+ {
+ src->fes_pl =
+ fib_path_list_copy_and_path_remove(src->fes_pl,
+ (FIB_PATH_LIST_FLAG_SHARED | pl_flags),
+ paths);
+ }
+}
+
+static void
+fib_entry_src_api_add (fib_entry_src_t *src,
+ const fib_entry_t *entry,
+ fib_entry_flag_t flags,
+ fib_protocol_t proto,
+ const dpo_id_t *dpo)
+{
+ if (FIB_ENTRY_FLAG_NONE != flags)
+ {
+ src->fes_pl = fib_path_list_create_special(
+ proto,
+ fib_entry_src_flags_2_path_list_flags(flags),
+ dpo);
+ }
+}
+
+static void
+fib_entry_src_api_remove (fib_entry_src_t *src)
+{
+ src->fes_pl = FIB_NODE_INDEX_INVALID;
+}
+
+const static fib_entry_src_vft_t api_src_vft = {
+ .fesv_init = fib_entry_src_api_init,
+ .fesv_deinit = fib_entry_src_api_deinit,
+ .fesv_add = fib_entry_src_api_add,
+ .fesv_remove = fib_entry_src_api_remove,
+ .fesv_path_add = fib_entry_src_api_path_add,
+ .fesv_path_swap = fib_entry_src_api_path_swap,
+ .fesv_path_remove = fib_entry_src_api_path_remove,
+};
+
+void
+fib_entry_src_api_register (void)
+{
+ fib_entry_src_register(FIB_SOURCE_PLUGIN_HI, &api_src_vft);
+ fib_entry_src_register(FIB_SOURCE_API, &api_src_vft);
+ fib_entry_src_register(FIB_SOURCE_CLI, &api_src_vft);
+ fib_entry_src_register(FIB_SOURCE_DHCP, &api_src_vft);
+}
diff --git a/src/vnet/fib/fib_entry_src_default.c b/src/vnet/fib/fib_entry_src_default.c
new file mode 100644
index 00000000000..9846cf56e64
--- /dev/null
+++ b/src/vnet/fib/fib_entry_src_default.c
@@ -0,0 +1,121 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "fib_entry.h"
+#include "fib_entry_src.h"
+#include "fib_path_list.h"
+
+/**
+ * Source initialisation Function
+ */
+static void
+fib_entry_src_default_init (fib_entry_src_t *src)
+{
+}
+
+/**
+ * Source deinitialisation Function
+ */
+static void
+fib_entry_src_default_deinit (fib_entry_src_t *src)
+{
+}
+
+static void
+fib_entry_src_cover_change (fib_entry_src_t *src)
+{
+}
+
+/**
+ * Source deinitialisation Function
+ */
+static void
+fib_entry_src_default_deinit (fib_entry_src_t *src)
+{
+}
+
+static void
+fib_entry_src_default_path_add (fib_entry_src_t *src,
+ fib_protocol_t proto,
+ const ip46_address_t *next_hop,
+ u32 next_hop_sw_if_index,
+ u32 next_hop_fib_index,
+ u32 next_hop_weight)
+{
+}
+
+static void
+fib_entry_src_default_path_remove (fib_entry_src_t *src,
+ fib_protocol_t proto,
+ const ip46_address_t *next_hop,
+ u32 next_hop_sw_if_index,
+ u32 next_hop_fib_index,
+ u32 next_hop_weight)
+{
+}
+
+
+/*
+ * Source activate.
+ * Called when the source is teh new longer best source on the entry
+ */
+static void
+fib_entry_src_default_activate (fib_entry_src_t *src,
+ const fib_entry_t *fib_entry)
+{
+}
+
+/*
+ * Source Deactivate.
+ * Called when the source is no longer best source on the entry
+ */
+static void
+fib_entry_src_default_deactivate (fib_entry_src_t *src,
+ const fib_entry_t *fib_entry)
+{
+}
+
+static void
+fib_entry_src_default_add (fib_entry_src_t *src,
+ fib_entry_flag_t flags,
+ fib_protocol_t proto)
+{
+}
+
+static void
+fib_entry_src_default_remove (fib_entry_src_t *src)
+{
+}
+
+const static fib_entry_src_vft_t default_src_vft = {
+ .fesv_init = fib_entry_src_default_init,
+ .fesv_deinit = fib_entry_src_default_deinit,
+ .fesv_add = fib_entry_src_default_add,
+ .fesv_remove = fib_entry_src_default_remove,
+ .fesv_path_add = fib_entry_src_default_path_add,
+ .fesv_path_remove = fib_entry_src_default_path_remove,
+ .fesv_activate = fib_entry_src_default_activate,
+ .fesv_deactivate = fib_entry_src_default_deactivate,
+};
+
+void
+fib_entry_src_default_register (void)
+{
+ fib_source_t source;
+
+ FOR_EACH_FIB_SOURCE(source) {
+ fib_entry_src_register(source, &default_src_vft);
+ }
+}
diff --git a/src/vnet/fib/fib_entry_src_default_route.c b/src/vnet/fib/fib_entry_src_default_route.c
new file mode 100644
index 00000000000..9f4e7c36952
--- /dev/null
+++ b/src/vnet/fib/fib_entry_src_default_route.c
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "fib_entry.h"
+#include "fib_entry_src.h"
+
+/**
+ * Source initialisation Function
+ */
+static void
+fib_entry_src_default_route_init (fib_entry_src_t *src)
+{
+ src->fes_flags = FIB_ENTRY_SRC_FLAG_NONE;
+}
+
+static void
+fib_entry_src_default_route_remove (fib_entry_src_t *src)
+{
+ src->fes_pl = FIB_NODE_INDEX_INVALID;
+}
+
+static void
+fib_entry_src_default_route_add (fib_entry_src_t *src,
+ const fib_entry_t *entry,
+ fib_entry_flag_t flags,
+ fib_protocol_t proto,
+ const dpo_id_t *dpo)
+{
+ src->fes_pl = fib_path_list_create_special(proto,
+ FIB_PATH_LIST_FLAG_DROP,
+ dpo);
+}
+
+const static fib_entry_src_vft_t interface_src_vft = {
+ .fesv_init = fib_entry_src_default_route_init,
+ .fesv_add = fib_entry_src_default_route_add,
+ .fesv_remove = fib_entry_src_default_route_remove,
+};
+
+void
+fib_entry_src_default_route_register (void)
+{
+ fib_entry_src_register(FIB_SOURCE_DEFAULT_ROUTE, &interface_src_vft);
+}
+
+
diff --git a/src/vnet/fib/fib_entry_src_interface.c b/src/vnet/fib/fib_entry_src_interface.c
new file mode 100644
index 00000000000..ca04716ed8f
--- /dev/null
+++ b/src/vnet/fib/fib_entry_src_interface.c
@@ -0,0 +1,195 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "fib_entry.h"
+#include "fib_entry_src.h"
+#include "fib_path_list.h"
+#include "fib_internal.h"
+#include "fib_table.h"
+#include "fib_entry_cover.h"
+#include "fib_attached_export.h"
+
+/**
+ * Source initialisation Function
+ */
+static void
+fib_entry_src_interface_init (fib_entry_src_t *src)
+{
+ src->interface.fesi_cover = FIB_NODE_INDEX_INVALID;
+ src->interface.fesi_sibling = FIB_NODE_INDEX_INVALID;
+}
+
+static void
+fib_entry_src_interface_path_swap (fib_entry_src_t *src,
+ const fib_entry_t *entry,
+ fib_path_list_flags_t pl_flags,
+ const fib_route_path_t *paths)
+{
+ ip_adjacency_t *adj;
+
+ src->fes_pl = fib_path_list_create(pl_flags, paths);
+
+ /*
+ * this is a hack to get the entry's prefix into the glean adjacnecy
+ * so that it is available for fast retreival in the switch path.
+ */
+ if (!(FIB_ENTRY_FLAG_LOCAL & src->fes_entry_flags))
+ {
+ adj = adj_get(fib_path_list_get_adj(
+ src->fes_pl,
+ fib_entry_get_default_chain_type(entry)));
+
+ if (IP_LOOKUP_NEXT_GLEAN == adj->lookup_next_index)
+ {
+ /*
+ * the connected prefix will link to a glean on a non-p2p
+ * interface.
+ */
+ adj->sub_type.glean.receive_addr = entry->fe_prefix.fp_addr;
+ }
+ }
+}
+
+/*
+ * Source activate.
+ * Called when the source is teh new longer best source on the entry
+ */
+static int
+fib_entry_src_interface_activate (fib_entry_src_t *src,
+ const fib_entry_t *fib_entry)
+{
+ fib_entry_t *cover;
+
+ if (FIB_ENTRY_FLAG_LOCAL & src->fes_entry_flags)
+ {
+ /*
+ * Track the covering attached/connected cover. This is so that
+ * during an attached export of the cover, this local prefix is
+ * also exported
+ */
+ src->interface.fesi_cover =
+ fib_table_get_less_specific(fib_entry->fe_fib_index,
+ &fib_entry->fe_prefix);
+
+ ASSERT(FIB_NODE_INDEX_INVALID != src->interface.fesi_cover);
+
+ cover = fib_entry_get(src->interface.fesi_cover);
+
+ src->interface.fesi_sibling =
+ fib_entry_cover_track(cover, fib_entry_get_index(fib_entry));
+ }
+
+ return (!0);
+}
+
+
+/*
+ * Source Deactivate.
+ * Called when the source is no longer best source on the entry
+ */
+static void
+fib_entry_src_interface_deactivate (fib_entry_src_t *src,
+ const fib_entry_t *fib_entry)
+{
+ fib_entry_t *cover;
+
+ /*
+ * remove the depednecy on the covering entry
+ */
+ if (FIB_NODE_INDEX_INVALID != src->interface.fesi_cover)
+ {
+ cover = fib_entry_get(src->interface.fesi_cover);
+
+ fib_entry_cover_untrack(cover, src->interface.fesi_sibling);
+
+ src->interface.fesi_cover = FIB_NODE_INDEX_INVALID;
+ }
+}
+
+static fib_entry_src_cover_res_t
+fib_entry_src_interface_cover_change (fib_entry_src_t *src,
+ const fib_entry_t *fib_entry)
+{
+ fib_entry_src_cover_res_t res = {
+ .install = !0,
+ .bw_reason = FIB_NODE_BW_REASON_FLAG_NONE,
+ };
+
+ if (FIB_NODE_INDEX_INVALID == src->interface.fesi_cover)
+ {
+ /*
+ * not tracking the cover. surprised we got poked?
+ */
+ return (res);
+ }
+
+ /*
+ * this function is called when this entry's cover has a more specific
+ * entry inserted benaeth it. That does not necessarily mean that this
+ * entry is covered by the new prefix. check that
+ */
+ if (src->rr.fesr_cover != fib_table_get_less_specific(fib_entry->fe_fib_index,
+ &fib_entry->fe_prefix))
+ {
+ fib_entry_src_interface_deactivate(src, fib_entry);
+ fib_entry_src_interface_activate(src, fib_entry);
+ }
+ return (res);
+}
+
+static void
+fib_entry_src_interface_installed (fib_entry_src_t *src,
+ const fib_entry_t *fib_entry)
+{
+ /*
+ * The interface source now rules! poke our cover to get exported
+ */
+ fib_entry_t *cover;
+
+ if (FIB_NODE_INDEX_INVALID != src->interface.fesi_cover)
+ {
+ cover = fib_entry_get(src->interface.fesi_cover);
+
+ fib_attached_export_covered_added(cover,
+ fib_entry_get_index(fib_entry));
+ }
+}
+
+static u8*
+fib_entry_src_interface_format (fib_entry_src_t *src,
+ u8* s)
+{
+ return (format(s, "cover:%d", src->interface.fesi_cover));
+}
+
+const static fib_entry_src_vft_t interface_src_vft = {
+ .fesv_init = fib_entry_src_interface_init,
+ .fesv_path_swap = fib_entry_src_interface_path_swap,
+ .fesv_activate = fib_entry_src_interface_activate,
+ .fesv_deactivate = fib_entry_src_interface_deactivate,
+ .fesv_format = fib_entry_src_interface_format,
+ .fesv_installed = fib_entry_src_interface_installed,
+ .fesv_cover_change = fib_entry_src_interface_cover_change,
+ /*
+ * not concerned about updates to the cover. the cover will
+ * decide to export or not
+ */
+};
+
+void
+fib_entry_src_interface_register (void)
+{
+ fib_entry_src_register(FIB_SOURCE_INTERFACE, &interface_src_vft);
+}
diff --git a/src/vnet/fib/fib_entry_src_lisp.c b/src/vnet/fib/fib_entry_src_lisp.c
new file mode 100644
index 00000000000..7f8b91bbab6
--- /dev/null
+++ b/src/vnet/fib/fib_entry_src_lisp.c
@@ -0,0 +1,133 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "fib_entry.h"
+#include "fib_entry_src.h"
+#include "fib_path_list.h"
+
+/**
+ * Source initialisation Function
+ */
+static void
+fib_entry_src_lisp_init (fib_entry_src_t *src)
+{
+}
+
+/**
+ * Source deinitialisation Function
+ */
+static void
+fib_entry_src_lisp_deinit (fib_entry_src_t *src)
+{
+}
+
+static void
+fib_entry_src_lisp_path_swap (fib_entry_src_t *src,
+ const fib_entry_t *entry,
+ fib_path_list_flags_t pl_flags,
+ const fib_route_path_t *paths)
+{
+ src->fes_pl = fib_path_list_create((FIB_PATH_LIST_FLAG_SHARED | pl_flags),
+ paths);
+}
+
+static void
+fib_entry_src_lisp_path_add (fib_entry_src_t *src,
+ const fib_entry_t *entry,
+ fib_path_list_flags_t pl_flags,
+ const fib_route_path_t *paths)
+{
+ if (FIB_NODE_INDEX_INVALID == src->fes_pl)
+ {
+ src->fes_pl =
+ fib_path_list_create((FIB_PATH_LIST_FLAG_SHARED | pl_flags), paths);
+ }
+ else
+ {
+ src->fes_pl =
+ fib_path_list_copy_and_path_add(src->fes_pl,
+ (FIB_PATH_LIST_FLAG_SHARED | pl_flags),
+ paths);
+ }
+}
+
+static void
+fib_entry_src_lisp_path_remove (fib_entry_src_t *src,
+ fib_path_list_flags_t pl_flags,
+ const fib_route_path_t *paths)
+{
+ if (FIB_NODE_INDEX_INVALID != src->fes_pl)
+ {
+ src->fes_pl =
+ fib_path_list_copy_and_path_remove(src->fes_pl,
+ (FIB_PATH_LIST_FLAG_SHARED | pl_flags),
+ paths);
+ }
+}
+
+static void
+fib_entry_src_lisp_add (fib_entry_src_t *src,
+ const fib_entry_t *entry,
+ fib_entry_flag_t flags,
+ fib_protocol_t proto,
+ const dpo_id_t *dpo)
+{
+ if (FIB_ENTRY_FLAG_NONE != flags)
+ {
+ src->fes_pl = fib_path_list_create_special(
+ proto,
+ fib_entry_src_flags_2_path_list_flags(flags),
+ dpo);
+ }
+}
+
+static void
+fib_entry_src_lisp_remove (fib_entry_src_t *src)
+{
+ src->fes_pl = FIB_NODE_INDEX_INVALID;
+}
+
+static void
+fib_entry_src_lisp_set_data (fib_entry_src_t *src,
+ const fib_entry_t *entry,
+ const void *data)
+{
+ src->lisp.fesl_fib_index = *(u32*)data;
+}
+
+static const void*
+fib_entry_src_lisp_get_data (fib_entry_src_t *src,
+ const fib_entry_t *entry)
+{
+ return (&(src->lisp.fesl_fib_index));
+}
+
+const static fib_entry_src_vft_t api_src_vft = {
+ .fesv_init = fib_entry_src_lisp_init,
+ .fesv_deinit = fib_entry_src_lisp_deinit,
+ .fesv_add = fib_entry_src_lisp_add,
+ .fesv_remove = fib_entry_src_lisp_remove,
+ .fesv_path_add = fib_entry_src_lisp_path_add,
+ .fesv_path_swap = fib_entry_src_lisp_path_swap,
+ .fesv_path_remove = fib_entry_src_lisp_path_remove,
+ .fesv_set_data = fib_entry_src_lisp_set_data,
+ .fesv_get_data = fib_entry_src_lisp_get_data,
+};
+
+void
+fib_entry_src_lisp_register (void)
+{
+ fib_entry_src_register(FIB_SOURCE_LISP, &api_src_vft);
+}
diff --git a/src/vnet/fib/fib_entry_src_mpls.c b/src/vnet/fib/fib_entry_src_mpls.c
new file mode 100644
index 00000000000..14c7310fbf3
--- /dev/null
+++ b/src/vnet/fib/fib_entry_src_mpls.c
@@ -0,0 +1,196 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/mpls/mpls_types.h>
+#include <vnet/dpo/drop_dpo.h>
+
+#include <vnet/fib/fib_table.h>
+#include <vnet/fib/fib_entry.h>
+#include <vnet/fib/fib_entry_src.h>
+#include <vnet/fib/mpls_fib.h>
+
+/**
+ * Source initialisation Function
+ */
+static void
+fib_entry_src_mpls_init (fib_entry_src_t *src)
+{
+ mpls_eos_bit_t eos;
+
+ src->fes_flags = FIB_ENTRY_SRC_FLAG_NONE;
+ src->mpls.fesm_label = MPLS_LABEL_INVALID;
+
+ FOR_EACH_MPLS_EOS_BIT(eos)
+ {
+ src->mpls.fesm_lfes[eos] = FIB_NODE_INDEX_INVALID;
+ }
+}
+
+/**
+ * Source deinitialisation Function
+ */
+static void
+fib_entry_src_mpls_deinit (fib_entry_src_t *src)
+{
+}
+
+static void
+fib_entry_src_mpls_remove (fib_entry_src_t *src)
+{
+ src->fes_pl = FIB_NODE_INDEX_INVALID;
+ src->mpls.fesm_label = MPLS_LABEL_INVALID;
+}
+
+static void
+fib_entry_src_mpls_add (fib_entry_src_t *src,
+ const fib_entry_t *entry,
+ fib_entry_flag_t flags,
+ fib_protocol_t proto,
+ const dpo_id_t *dpo)
+{
+ src->fes_pl =
+ fib_path_list_create_special(proto,
+ FIB_PATH_LIST_FLAG_DROP,
+ drop_dpo_get(fib_proto_to_dpo(proto)));
+}
+
+static void
+fib_entry_src_mpls_set_data (fib_entry_src_t *src,
+ const fib_entry_t *entry,
+ const void *data)
+{
+ fib_protocol_t payload_proto;
+ fib_node_index_t fei;
+ mpls_label_t label;
+ mpls_eos_bit_t eos;
+
+ /*
+ * post MPLS table alloc and the possible rea-alloc of fib entrys
+ * the entry pointer will no longer be valid. so save its index
+ */
+ payload_proto = entry->fe_prefix.fp_proto;
+ fei = fib_entry_get_index(entry);
+ label = *(mpls_label_t*)data;
+
+ if (MPLS_LABEL_INVALID == label)
+ {
+ /*
+ * removing the local label
+ */
+ FOR_EACH_MPLS_EOS_BIT(eos)
+ {
+ fib_table_entry_delete_index(src->mpls.fesm_lfes[eos],
+ FIB_SOURCE_SPECIAL);
+ }
+ fib_table_unlock(MPLS_FIB_DEFAULT_TABLE_ID, FIB_PROTOCOL_MPLS);
+ src->mpls.fesm_label = label;
+ }
+ else
+ {
+ fib_prefix_t prefix = {
+ .fp_proto = FIB_PROTOCOL_MPLS,
+ .fp_label = label,
+ };
+ fib_node_index_t fib_index;
+ dpo_id_t dpo = DPO_INVALID;
+
+ /*
+ * adding a new local label. make sure the MPLS fib exists.
+ */
+ if (MPLS_LABEL_INVALID == src->mpls.fesm_label)
+ {
+ fib_index =
+ fib_table_find_or_create_and_lock(FIB_PROTOCOL_MPLS,
+ MPLS_FIB_DEFAULT_TABLE_ID);
+ }
+ else
+ {
+ fib_index = mpls_fib_index_from_table_id(MPLS_FIB_DEFAULT_TABLE_ID);
+
+ /*
+ * if this is a change in label, reomve the old one first
+ */
+ if (src->mpls.fesm_label != label)
+ {
+ FOR_EACH_MPLS_EOS_BIT(eos)
+ {
+ ASSERT(FIB_NODE_INDEX_INVALID != src->mpls.fesm_lfes[eos]);
+ fib_table_entry_delete_index(src->mpls.fesm_lfes[eos],
+ FIB_SOURCE_SPECIAL);
+ }
+ }
+ }
+
+ src->mpls.fesm_label = label;
+
+ FOR_EACH_MPLS_EOS_BIT(eos)
+ {
+ prefix.fp_eos = eos;
+ prefix.fp_payload_proto = fib_proto_to_dpo(payload_proto);
+
+ fib_entry_contribute_forwarding(fei,
+ (eos ?
+ FIB_FORW_CHAIN_TYPE_MPLS_EOS :
+ FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS),
+ &dpo);
+ src->mpls.fesm_lfes[eos] =
+ fib_table_entry_special_dpo_add(fib_index,
+ &prefix,
+ FIB_SOURCE_SPECIAL,
+ FIB_ENTRY_FLAG_EXCLUSIVE,
+ &dpo);
+ dpo_reset(&dpo);
+ }
+ }
+}
+
+static const void *
+fib_entry_src_mpls_get_data (fib_entry_src_t *src,
+ const fib_entry_t *entry)
+{
+ return (&(src->mpls.fesm_label));
+}
+
+static u8*
+fib_entry_src_mpls_format (fib_entry_src_t *src,
+ u8* s)
+{
+ return (format(s, "MPLS local-label:%d", src->mpls.fesm_label));
+}
+
+const static fib_entry_src_vft_t mpls_src_vft = {
+ .fesv_init = fib_entry_src_mpls_init,
+ .fesv_deinit = fib_entry_src_mpls_deinit,
+ .fesv_add = fib_entry_src_mpls_add,
+ .fesv_remove = fib_entry_src_mpls_remove,
+ .fesv_format = fib_entry_src_mpls_format,
+ .fesv_set_data = fib_entry_src_mpls_set_data,
+ .fesv_get_data = fib_entry_src_mpls_get_data,
+ /*
+ * .fesv_fwd_update = fib_entry_src_mpls_fwd_update,
+ * When the forwarding for the IP entry is updated, any MPLS chains
+ * it has created are also updated. Since the MPLS entry will have already
+ * installed that chain/load-balance there is no need to update the netry
+ * FIXME: later: propagate any walk to the children of the MPLS entry. for SR
+ */
+};
+
+void
+fib_entry_src_mpls_register (void)
+{
+ fib_entry_src_register(FIB_SOURCE_MPLS, &mpls_src_vft);
+}
+
+
diff --git a/src/vnet/fib/fib_entry_src_rr.c b/src/vnet/fib/fib_entry_src_rr.c
new file mode 100644
index 00000000000..ff15c54e281
--- /dev/null
+++ b/src/vnet/fib/fib_entry_src_rr.c
@@ -0,0 +1,293 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vlib/vlib.h>
+#include <vnet/ip/format.h>
+#include <vnet/ip/lookup.h>
+#include <vnet/adj/adj.h>
+#include <vnet/dpo/drop_dpo.h>
+
+#include "fib_entry_src.h"
+#include "fib_entry_cover.h"
+#include "fib_entry.h"
+#include "fib_table.h"
+
+/*
+ * fib_entry_src_rr_resolve_via_connected
+ *
+ * Resolve via a connected cover.
+ */
+static void
+fib_entry_src_rr_resolve_via_connected (fib_entry_src_t *src,
+ const fib_entry_t *fib_entry,
+ const fib_entry_t *cover)
+{
+ const fib_route_path_t path = {
+ .frp_proto = fib_entry->fe_prefix.fp_proto,
+ .frp_addr = fib_entry->fe_prefix.fp_addr,
+ .frp_sw_if_index = fib_entry_get_resolving_interface(
+ fib_entry_get_index(cover)),
+ .frp_fib_index = ~0,
+ .frp_weight = 1,
+ };
+ fib_route_path_t *paths = NULL;
+ vec_add1(paths, path);
+
+ /*
+ * since the cover is connected, the address this entry corresponds
+ * to is a peer (ARP-able for) on the interface to which the cover is
+ * connected. The fact we resolve via the cover, just means this RR
+ * source is the first SRC to use said peer. The ARP source will be along
+ * shortly to over-rule this RR source.
+ */
+ src->fes_pl = fib_path_list_create(FIB_PATH_LIST_FLAG_NONE, paths);
+ src->fes_entry_flags = fib_entry_get_flags(fib_entry_get_index(cover));
+
+ vec_free(paths);
+}
+
+
+/**
+ * Source initialisation Function
+ */
+static void
+fib_entry_src_rr_init (fib_entry_src_t *src)
+{
+ src->rr.fesr_cover = FIB_NODE_INDEX_INVALID;
+ src->rr.fesr_sibling = FIB_NODE_INDEX_INVALID;
+}
+
+/*
+ * Source activation. Called when the source is the new best source on the entry
+ */
+static int
+fib_entry_src_rr_activate (fib_entry_src_t *src,
+ const fib_entry_t *fib_entry)
+{
+ fib_entry_t *cover;
+
+ /*
+ * find the covering prefix. become a dependent thereof.
+ * for IP there should always be a cover, though it may be the default route.
+ * For MPLS there is never a cover.
+ */
+ if (FIB_PROTOCOL_MPLS == fib_entry->fe_prefix.fp_proto)
+ {
+ src->fes_pl = fib_path_list_create_special(FIB_PROTOCOL_MPLS,
+ FIB_PATH_LIST_FLAG_DROP,
+ NULL);
+ fib_path_list_lock(src->fes_pl);
+ return (!0);
+ }
+
+ src->rr.fesr_cover = fib_table_get_less_specific(fib_entry->fe_fib_index,
+ &fib_entry->fe_prefix);
+
+ ASSERT(FIB_NODE_INDEX_INVALID != src->rr.fesr_cover);
+
+ cover = fib_entry_get(src->rr.fesr_cover);
+
+ src->rr.fesr_sibling =
+ fib_entry_cover_track(cover, fib_entry_get_index(fib_entry));
+
+ /*
+ * if the ocver is attached then install an attached-host path
+ * (like an adj-fib). Otherwise inherit the forwarding from the cover
+ */
+ if (FIB_ENTRY_FLAG_ATTACHED & fib_entry_get_flags_i(cover))
+ {
+ fib_entry_src_rr_resolve_via_connected(src, fib_entry, cover);
+ }
+ else
+ {
+ /*
+ * use the path-list of the cover, unless it would form a loop.
+ * that is unless the cover is via this entry.
+ * If a loop were to form it would be a 1 level loop (i.e. X via X),
+ * and there would be 2 locks on the path-list; one since its used
+ * by the cover, and 1 from here. The first lock will go when the
+ * cover is removed, the second, and last, when the covered walk
+ * occurs during the cover's removel - this is not a place where
+ * we can handle last lock gone.
+ * In short, don't let the loop form. The usual rules of 'we must
+ * let it form so we know when it breaks' don't apply here, since
+ * the loop will break when the cover changes, and this function
+ * will be called again when that happens.
+ */
+ fib_node_index_t *entries = NULL;
+ fib_protocol_t proto;
+
+ proto = fib_entry->fe_prefix.fp_proto;
+ vec_add1(entries, fib_entry_get_index(fib_entry));
+
+ if (fib_path_list_recursive_loop_detect(cover->fe_parent,
+ &entries))
+ {
+ src->fes_pl = fib_path_list_create_special(
+ proto,
+ FIB_PATH_LIST_FLAG_DROP,
+ drop_dpo_get(fib_proto_to_dpo(proto)));
+ }
+ else
+ {
+ src->fes_pl = cover->fe_parent;
+ }
+ vec_free(entries);
+
+ }
+ fib_path_list_lock(src->fes_pl);
+
+ /*
+ * return go for install
+ */
+ return (!0);
+}
+
+/**
+ * Source Deactivate.
+ * Called when the source is no longer best source on the entry
+ */
+static void
+fib_entry_src_rr_deactivate (fib_entry_src_t *src,
+ const fib_entry_t *fib_entry)
+{
+ fib_entry_t *cover;
+
+ /*
+ * remove the depednecy on the covering entry
+ */
+ if (FIB_NODE_INDEX_INVALID != src->rr.fesr_cover)
+ {
+ cover = fib_entry_get(src->rr.fesr_cover);
+ fib_entry_cover_untrack(cover, src->rr.fesr_sibling);
+ src->rr.fesr_cover = FIB_NODE_INDEX_INVALID;
+ }
+
+ fib_path_list_unlock(src->fes_pl);
+ src->fes_pl = FIB_NODE_INDEX_INVALID;
+ src->fes_entry_flags = FIB_ENTRY_FLAG_NONE;
+}
+
+static fib_entry_src_cover_res_t
+fib_entry_src_rr_cover_change (fib_entry_src_t *src,
+ const fib_entry_t *fib_entry)
+{
+ fib_entry_src_cover_res_t res = {
+ .install = !0,
+ .bw_reason = FIB_NODE_BW_REASON_FLAG_NONE,
+ };
+
+ if (FIB_NODE_INDEX_INVALID == src->rr.fesr_cover)
+ {
+ /*
+ * the source may be added, but it is not active
+ * if it is not tracking the cover.
+ */
+ return (res);
+ }
+
+ /*
+ * this function is called when this entry's cover has a more specific
+ * entry inserted benaeth it. That does not necessarily mean that this
+ * entry is covered by the new prefix. check that
+ */
+ if (src->rr.fesr_cover != fib_table_get_less_specific(fib_entry->fe_fib_index,
+ &fib_entry->fe_prefix))
+ {
+ fib_entry_src_rr_deactivate(src, fib_entry);
+ fib_entry_src_rr_activate(src, fib_entry);
+
+ /*
+ * dependent children need to re-resolve to the new forwarding info
+ */
+ res.bw_reason = FIB_NODE_BW_REASON_FLAG_EVALUATE;
+ }
+ return (res);
+}
+
+/*
+ * fib_entry_src_rr_cover_update
+ *
+ * This entry's cover has updated its forwarding info. This entry
+ * will need to re-inheret.
+ */
+static fib_entry_src_cover_res_t
+fib_entry_src_rr_cover_update (fib_entry_src_t *src,
+ const fib_entry_t *fib_entry)
+{
+ fib_entry_src_cover_res_t res = {
+ .install = !0,
+ .bw_reason = FIB_NODE_BW_REASON_FLAG_NONE,
+ };
+ fib_node_index_t old_path_list;
+ fib_entry_t *cover;
+
+ if (FIB_NODE_INDEX_INVALID == src->rr.fesr_cover)
+ {
+ /*
+ * the source may be added, but it is not active
+ * if it is not tracking the cover.
+ */
+ return (res);
+ }
+
+ cover = fib_entry_get(src->rr.fesr_cover);
+ old_path_list = src->fes_pl;
+
+ /*
+ * if the ocver is attached then install an attached-host path
+ * (like an adj-fib). Otherwise inherit the forwarding from the cover
+ */
+ if (FIB_ENTRY_FLAG_ATTACHED & fib_entry_get_flags_i(cover))
+ {
+ fib_entry_src_rr_resolve_via_connected(src, fib_entry, cover);
+ }
+ else
+ {
+ src->fes_pl = cover->fe_parent;
+ }
+ fib_path_list_lock(src->fes_pl);
+ fib_path_list_unlock(old_path_list);
+
+ /*
+ * dependent children need to re-resolve to the new forwarding info
+ */
+ res.bw_reason = FIB_NODE_BW_REASON_FLAG_EVALUATE;
+
+ return (res);
+}
+
+static u8*
+fib_entry_src_rr_format (fib_entry_src_t *src,
+ u8* s)
+{
+ return (format(s, "cover:%d", src->rr.fesr_cover));
+}
+
+const static fib_entry_src_vft_t rr_src_vft = {
+ .fesv_init = fib_entry_src_rr_init,
+ .fesv_activate = fib_entry_src_rr_activate,
+ .fesv_deactivate = fib_entry_src_rr_deactivate,
+ .fesv_cover_change = fib_entry_src_rr_cover_change,
+ .fesv_cover_update = fib_entry_src_rr_cover_update,
+ .fesv_format = fib_entry_src_rr_format,
+};
+
+void
+fib_entry_src_rr_register (void)
+{
+ fib_entry_src_register(FIB_SOURCE_RR, &rr_src_vft);
+ fib_entry_src_register(FIB_SOURCE_URPF_EXEMPT, &rr_src_vft);
+}
diff --git a/src/vnet/fib/fib_entry_src_special.c b/src/vnet/fib/fib_entry_src_special.c
new file mode 100644
index 00000000000..52a6134e337
--- /dev/null
+++ b/src/vnet/fib/fib_entry_src_special.c
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "fib_entry.h"
+#include "fib_entry_src.h"
+
+/**
+ * Source initialisation Function
+ */
+static void
+fib_entry_src_special_init (fib_entry_src_t *src)
+{
+ src->fes_flags = FIB_ENTRY_SRC_FLAG_NONE;
+}
+
+/**
+ * Source deinitialisation Function
+ */
+static void
+fib_entry_src_special_deinit (fib_entry_src_t *src)
+{
+}
+
+static void
+fib_entry_src_special_remove (fib_entry_src_t *src)
+{
+ src->fes_pl = FIB_NODE_INDEX_INVALID;
+}
+
+static void
+fib_entry_src_special_add (fib_entry_src_t *src,
+ const fib_entry_t *entry,
+ fib_entry_flag_t flags,
+ fib_protocol_t proto,
+ const dpo_id_t *dpo)
+{
+ src->fes_pl =
+ fib_path_list_create_special(proto,
+ fib_entry_src_flags_2_path_list_flags(flags),
+ dpo);
+}
+
+const static fib_entry_src_vft_t special_src_vft = {
+ .fesv_init = fib_entry_src_special_init,
+ .fesv_deinit = fib_entry_src_special_deinit,
+ .fesv_add = fib_entry_src_special_add,
+ .fesv_remove = fib_entry_src_special_remove,
+};
+
+void
+fib_entry_src_special_register (void)
+{
+ fib_entry_src_register(FIB_SOURCE_SPECIAL, &special_src_vft);
+ fib_entry_src_register(FIB_SOURCE_MAP, &special_src_vft);
+ fib_entry_src_register(FIB_SOURCE_SIXRD, &special_src_vft);
+ fib_entry_src_register(FIB_SOURCE_CLASSIFY, &special_src_vft);
+ fib_entry_src_register(FIB_SOURCE_SR, &special_src_vft);
+ fib_entry_src_register(FIB_SOURCE_AE, &special_src_vft);
+}
diff --git a/src/vnet/fib/fib_internal.h b/src/vnet/fib/fib_internal.h
new file mode 100644
index 00000000000..2d980bcce0a
--- /dev/null
+++ b/src/vnet/fib/fib_internal.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __FIB_INTERNAL_H__
+#define __FIB_INTERNAL_H__
+
+#include <vnet/ip/ip.h>
+#include <vnet/dpo/dpo.h>
+
+/**
+ * Big train switch; FIB debugs on or off
+ */
+#undef FIB_DEBUG
+
+extern void fib_prefix_from_mpls_label(mpls_label_t label,
+ fib_prefix_t *prf);
+
+extern int fib_route_path_cmp(const fib_route_path_t *rpath1,
+ const fib_route_path_t *rpath2);
+
+/**
+ * @brief
+ * Add or update an entry in the FIB's forwarding table.
+ * This is called from the fib_entry code. It is not meant to be used
+ * by the client/source.
+ *
+ * @param fib_index
+ * The index of the FIB
+ *
+ * @param prefix
+ * The prefix for the entry to add/update
+ *
+ * @param dpo
+ * The data-path object to use for forwarding
+ */
+extern void fib_table_fwding_dpo_update(u32 fib_index,
+ const fib_prefix_t *prefix,
+ const dpo_id_t *dpo);
+/**
+ * @brief
+ * remove an entry in the FIB's forwarding table
+ *
+ * @param fib_index
+ * The index of the FIB
+ *
+ * @param prefix
+ * The prefix for the entry to add/update
+ *
+ * @param dpo
+ * The data-path object to use for forwarding
+ */
+extern void fib_table_fwding_dpo_remove(u32 fib_index,
+ const fib_prefix_t *prefix,
+ const dpo_id_t *dpo);
+
+
+#endif
diff --git a/src/vnet/fib/fib_node.c b/src/vnet/fib/fib_node.c
new file mode 100644
index 00000000000..db3e22bb3b8
--- /dev/null
+++ b/src/vnet/fib/fib_node.c
@@ -0,0 +1,277 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/fib/fib_node.h>
+#include <vnet/fib/fib_node_list.h>
+
+/*
+ * The per-type vector of virtual function tables
+ */
+static fib_node_vft_t *fn_vfts;
+
+/**
+ * The last registered new type
+ */
+static fib_node_type_t last_new_type = FIB_NODE_TYPE_LAST;
+
+/*
+ * the node type names
+ */
+static const char *fn_type_names[] = FIB_NODE_TYPES;
+
+const char*
+fib_node_type_get_name (fib_node_type_t type)
+{
+ if (type < FIB_NODE_TYPE_LAST)
+ return (fn_type_names[type]);
+ else
+ {
+ if (NULL != fn_vfts[type].fnv_format)
+ {
+ return ("fixme");
+ }
+ else
+ {
+ return ("unknown");
+ }
+ }
+}
+
+/**
+ * fib_node_register_type
+ *
+ * Register the function table for a given type
+ */
+void
+fib_node_register_type (fib_node_type_t type,
+ const fib_node_vft_t *vft)
+{
+ /*
+ * assert that one only registration is made per-node type
+ */
+ if (vec_len(fn_vfts) > type)
+ ASSERT(NULL == fn_vfts[type].fnv_get);
+
+ /*
+ * Assert that we are getting each of the required functions
+ */
+ ASSERT(NULL != vft->fnv_get);
+ ASSERT(NULL != vft->fnv_last_lock);
+
+ vec_validate(fn_vfts, type);
+ fn_vfts[type] = *vft;
+}
+
+fib_node_type_t
+fib_node_register_new_type (const fib_node_vft_t *vft)
+{
+ fib_node_type_t new_type;
+
+ new_type = ++last_new_type;
+
+ fib_node_register_type(new_type, vft);
+
+ return (new_type);
+}
+
+static u8*
+fib_node_format (fib_node_ptr_t *fnp, u8*s)
+{
+ return (format(s, "{%s:%d}", fn_type_names[fnp->fnp_type], fnp->fnp_index));
+}
+
+u32
+fib_node_child_add (fib_node_type_t parent_type,
+ fib_node_index_t parent_index,
+ fib_node_type_t type,
+ fib_node_index_t index)
+{
+ fib_node_t *parent;
+
+ parent = fn_vfts[parent_type].fnv_get(parent_index);
+
+ /*
+ * return the index of the sibling in the child list
+ */
+ fib_node_lock(parent);
+
+ if (FIB_NODE_INDEX_INVALID == parent->fn_children)
+ {
+ parent->fn_children = fib_node_list_create();
+ }
+
+ return (fib_node_list_push_front(parent->fn_children,
+ 0, type,
+ index));
+}
+
+void
+fib_node_child_remove (fib_node_type_t parent_type,
+ fib_node_index_t parent_index,
+ fib_node_index_t sibling_index)
+{
+ fib_node_t *parent;
+
+ parent = fn_vfts[parent_type].fnv_get(parent_index);
+
+ fib_node_list_remove(parent->fn_children, sibling_index);
+
+ if (0 == fib_node_list_get_size(parent->fn_children))
+ {
+ fib_node_list_destroy(&parent->fn_children);
+ }
+
+ fib_node_unlock(parent);
+}
+
+u32
+fib_node_get_n_children (fib_node_type_t parent_type,
+ fib_node_index_t parent_index)
+{
+ fib_node_t *parent;
+
+ parent = fn_vfts[parent_type].fnv_get(parent_index);
+
+ return (fib_node_list_get_size(parent->fn_children));
+}
+
+
+fib_node_back_walk_rc_t
+fib_node_back_walk_one (fib_node_ptr_t *ptr,
+ fib_node_back_walk_ctx_t *ctx)
+{
+ fib_node_t *node;
+
+ node = fn_vfts[ptr->fnp_type].fnv_get(ptr->fnp_index);
+
+ return (fn_vfts[ptr->fnp_type].fnv_back_walk(node, ctx));
+}
+
+static int
+fib_node_ptr_format_one_child (fib_node_ptr_t *ptr,
+ void *arg)
+{
+ u8 **s = (u8**) arg;
+
+ *s = fib_node_format(ptr, *s);
+
+ return (1);
+}
+
+u8*
+fib_node_children_format (fib_node_list_t list,
+ u8 *s)
+{
+ fib_node_list_walk(list, fib_node_ptr_format_one_child, (void*)&s);
+
+ return (s);
+}
+
+void
+fib_node_init (fib_node_t *node,
+ fib_node_type_t type)
+{
+#if CLIB_DEBUG > 0
+ /**
+ * The node's type. make sure we are dynamic/down casting correctly
+ */
+ node->fn_type = type;
+#endif
+ node->fn_locks = 0;
+ node->fn_vft = &fn_vfts[type];
+ node->fn_children = FIB_NODE_INDEX_INVALID;
+}
+
+void
+fib_node_deinit (fib_node_t *node)
+{
+ fib_node_list_destroy(&node->fn_children);
+}
+
+void
+fib_node_lock (fib_node_t *node)
+{
+ node->fn_locks++;
+}
+
+void
+fib_node_unlock (fib_node_t *node)
+{
+ node->fn_locks--;
+
+ if (0 == node->fn_locks)
+ {
+ node->fn_vft->fnv_last_lock(node);
+ }
+}
+
+void
+fib_show_memory_usage (const char *name,
+ u32 in_use_elts,
+ u32 allocd_elts,
+ size_t size_elt)
+{
+ vlib_cli_output (vlib_get_main(), "%=30s %=5d %=8d/%=9d %d/%d ",
+ name, size_elt,
+ in_use_elts, allocd_elts,
+ in_use_elts*size_elt, allocd_elts*size_elt);
+}
+
+static clib_error_t *
+fib_memory_show (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ fib_node_vft_t *vft;
+
+ vlib_cli_output (vm, "FIB memory");
+ vlib_cli_output (vm, "%=30s %=5s %=8s/%=9s totals",
+ "Name","Size", "in-use", "allocated");
+
+ vec_foreach(vft, fn_vfts)
+ {
+ if (NULL != vft->fnv_mem_show)
+ vft->fnv_mem_show();
+ }
+
+ fib_node_list_memory_show();
+
+ return (NULL);
+}
+
+/* *INDENT-OFF* */
+/*?
+ * The '<em>sh fib memory </em>' command displays the memory usage for each
+ * FIB object type.
+ *
+ * @cliexpar
+ * @cliexstart{show fib memory}
+ * FIB memory
+ * Name Size in-use /allocated totals
+ * Entry 120 11 / 11 1320/1320
+ * Entry Source 32 11 / 11 352/352
+ * Entry Path-Extensions 44 0 / 0 0/0
+ * Path-list 40 11 / 11 440/440
+ * Path 88 11 / 11 968/968
+ * Node-list elements 20 11 / 11 220/220
+ * Node-list heads 8 13 / 13 104/104
+ * @cliexend
+?*/
+VLIB_CLI_COMMAND (show_fib_memory, static) = {
+ .path = "show fib memory",
+ .function = fib_memory_show,
+ .short_help = "show fib memory",
+};
+/* *INDENT-ON* */
diff --git a/src/vnet/fib/fib_node.h b/src/vnet/fib/fib_node.h
new file mode 100644
index 00000000000..3ad8ee95b64
--- /dev/null
+++ b/src/vnet/fib/fib_node.h
@@ -0,0 +1,371 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __FIB_NODE_H__
+#define __FIB_NODE_H__
+
+#include <vnet/fib/fib_types.h>
+
+/**
+ * The types of nodes in a FIB graph
+ */
+typedef enum fib_node_type_t_ {
+ /**
+ * Marker. New types after this one.
+ */
+ FIB_NODE_TYPE_FIRST = 0,
+ /**
+ * See the respective fib_*.h files for descriptions of these objects.
+ */
+ FIB_NODE_TYPE_WALK,
+ FIB_NODE_TYPE_ENTRY,
+ FIB_NODE_TYPE_PATH_LIST,
+ FIB_NODE_TYPE_PATH,
+ FIB_NODE_TYPE_ADJ,
+ FIB_NODE_TYPE_MPLS_ENTRY,
+ FIB_NODE_TYPE_MPLS_TUNNEL,
+ FIB_NODE_TYPE_LISP_GPE_FWD_ENTRY,
+ FIB_NODE_TYPE_LISP_ADJ,
+ FIB_NODE_TYPE_GRE_TUNNEL,
+ FIB_NODE_TYPE_VXLAN_TUNNEL,
+ /**
+ * Marker. New types before this one. leave the test last.
+ */
+ FIB_NODE_TYPE_TEST,
+ FIB_NODE_TYPE_LAST = FIB_NODE_TYPE_TEST,
+} fib_node_type_t;
+
+#define FIB_NODE_TYPE_MAX (FIB_NODE_TYPE_LAST + 1)
+
+#define FIB_NODE_TYPES { \
+ [FIB_NODE_TYPE_ENTRY] = "entry", \
+ [FIB_NODE_TYPE_WALK] = "walk", \
+ [FIB_NODE_TYPE_PATH_LIST] = "path-list", \
+ [FIB_NODE_TYPE_PATH] = "path", \
+ [FIB_NODE_TYPE_MPLS_ENTRY] = "mpls-entry", \
+ [FIB_NODE_TYPE_MPLS_TUNNEL] = "mpls-tunnel", \
+ [FIB_NODE_TYPE_ADJ] = "adj", \
+ [FIB_NODE_TYPE_LISP_GPE_FWD_ENTRY] = "lisp-gpe-fwd-entry", \
+ [FIB_NODE_TYPE_LISP_ADJ] = "lisp-adj", \
+ [FIB_NODE_TYPE_GRE_TUNNEL] = "gre-tunnel", \
+ [FIB_NODE_TYPE_VXLAN_TUNNEL] = "vxlan-tunnel", \
+}
+
+/**
+ * Reasons for backwalking the FIB object graph
+ */
+typedef enum fib_node_back_walk_reason_t_ {
+ /**
+ * Marker. Add new ones after.
+ */
+ FIB_NODE_BW_REASON_FIRST = 0,
+ /**
+ * Walk to re-resolve the child.
+ * Used when the parent is no longer a valid resolution target
+ */
+ FIB_NODE_BW_REASON_RESOLVE = FIB_NODE_BW_REASON_FIRST,
+ /**
+ * Walk to re-evaluate the forwarding contributed by the parent.
+ * Used when a parent's forwarding changes and the child needs to
+ * incorporate this change in its forwarding.
+ */
+ FIB_NODE_BW_REASON_EVALUATE,
+ /**
+ * A resolving interface has come up
+ */
+ FIB_NODE_BW_REASON_INTERFACE_UP,
+ /**
+ * A resolving interface has gone down
+ */
+ FIB_NODE_BW_REASON_INTERFACE_DOWN,
+ /**
+ * A resolving interface has been deleted.
+ */
+ FIB_NODE_BW_REASON_INTERFACE_DELETE,
+ /**
+ * Walk to re-collapse the multipath adjs when the rewrite of
+ * a unipath adjacency changes
+ */
+ FIB_NODE_BW_REASON_ADJ_UPDATE,
+ /**
+ * Walk to update children to inform them the adjacency is now down.
+ */
+ FIB_NODE_BW_REASON_ADJ_DOWN,
+ /**
+ * Marker. Add new before and update
+ */
+ FIB_NODE_BW_REASON_LAST = FIB_NODE_BW_REASON_ADJ_DOWN,
+} fib_node_back_walk_reason_t;
+
+#define FIB_NODE_BW_REASONS { \
+ [FIB_NODE_BW_REASON_RESOLVE] = "resolve", \
+ [FIB_NODE_BW_REASON_EVALUATE] = "evaluate", \
+ [FIB_NODE_BW_REASON_INTERFACE_UP] = "if-up", \
+ [FIB_NODE_BW_REASON_INTERFACE_DOWN] = "if-down", \
+ [FIB_NODE_BW_REASON_INTERFACE_DELETE] = "if-delete", \
+ [FIB_NODE_BW_REASON_ADJ_UPDATE] = "adj-update", \
+ [FIB_NODE_BW_REASON_ADJ_DOWN] = "adj-down", \
+}
+
+#define FOR_EACH_FIB_NODE_BW_REASON(_item) \
+ for (_item = FIB_NODE_BW_REASON_FIRST; \
+ _item <= FIB_NODE_BW_REASON_LAST; \
+ _item++)
+
+/**
+ * Flags enum constructed from the reaons
+ */
+typedef enum fib_node_bw_reason_flag_t_ {
+ FIB_NODE_BW_REASON_FLAG_NONE = 0,
+ FIB_NODE_BW_REASON_FLAG_RESOLVE = (1 << FIB_NODE_BW_REASON_RESOLVE),
+ FIB_NODE_BW_REASON_FLAG_EVALUATE = (1 << FIB_NODE_BW_REASON_EVALUATE),
+ FIB_NODE_BW_REASON_FLAG_INTERFACE_UP = (1 << FIB_NODE_BW_REASON_INTERFACE_UP),
+ FIB_NODE_BW_REASON_FLAG_INTERFACE_DOWN = (1 << FIB_NODE_BW_REASON_INTERFACE_DOWN),
+ FIB_NODE_BW_REASON_FLAG_INTERFACE_DELETE = (1 << FIB_NODE_BW_REASON_INTERFACE_DELETE),
+ FIB_NODE_BW_REASON_FLAG_ADJ_UPDATE = (1 << FIB_NODE_BW_REASON_ADJ_UPDATE),
+ FIB_NODE_BW_REASON_FLAG_ADJ_DOWN = (1 << FIB_NODE_BW_REASON_ADJ_DOWN),
+} __attribute__ ((packed)) fib_node_bw_reason_flag_t;
+
+STATIC_ASSERT(sizeof(fib_node_bw_reason_flag_t) < 2,
+ "BW Reason enum < 2 byte. Consequences for cover_upd_res_t");
+
+/**
+ * Flags on the walk
+ */
+typedef enum fib_node_bw_flags_t_
+{
+ /**
+ * Force the walk to be synchronous
+ */
+ FIB_NODE_BW_FLAG_FORCE_SYNC = (1 << 0),
+} fib_node_bw_flags_t;
+
+/**
+ * Forward eclarations
+ */
+struct fib_node_t_;
+
+/**
+ * A representation of one pointer to another node.
+ * To fully qualify a node, one must know its type and its index so it
+ * can be retrieved from the appropriate pool. Direct pointers to nodes
+ * are forbidden, since all nodes are allocated from pools, which are vectors,
+ * and thus subject to realloc at any time.
+ */
+typedef struct fib_node_ptr_t_ {
+ /**
+ * node type
+ */
+ fib_node_type_t fnp_type;
+ /**
+ * node's index
+ */
+ fib_node_index_t fnp_index;
+} fib_node_ptr_t;
+
+/**
+ * @brief A list of FIB nodes.
+ */
+typedef u32 fib_node_list_t;
+
+/**
+ * Context passed between object during a back walk.
+ */
+typedef struct fib_node_back_walk_ctx_t_ {
+ /**
+ * The reason/trigger for the backwalk
+ */
+ fib_node_bw_reason_flag_t fnbw_reason;
+
+ /**
+ * additional flags for the walk
+ */
+ fib_node_bw_flags_t fnbw_flags;
+
+ /**
+ * the number of levels the walk has already traversed.
+ * this value is maintained by the walk infra, tp limit the depth of
+ * a walk so it does not run indefinately the presence of a loop/cycle
+ * in the graph.
+ */
+ u32 fnbw_depth;
+} fib_node_back_walk_ctx_t;
+
+/**
+ * We consider a depth of 32 to be sufficient to cover all sane
+ * network topologies. Anything more is then an indication that
+ * there is a loop/cycle in the FIB graph.
+ * Note that all object types contribute to 1 to the depth.
+ */
+#define FIB_NODE_GRAPH_MAX_DEPTH ((u32)32)
+
+/**
+ * A callback function for walking a node dependency list
+ */
+typedef int (*fib_node_ptr_walk_t)(fib_node_ptr_t *depend,
+ void *ctx);
+
+/**
+ * A list of dependent nodes.
+ * This is currently implemented as a hash_table of fib_node_ptr_t
+ */
+typedef fib_node_ptr_t fib_node_ptr_list_t;
+
+/**
+ * Return code from a back walk function
+ */
+typedef enum fib_node_back_walk_rc_t_ {
+ FIB_NODE_BACK_WALK_MERGE,
+ FIB_NODE_BACK_WALK_CONTINUE,
+} fib_node_back_walk_rc_t;
+
+/**
+ * Function definition to backwalk a FIB node
+ */
+typedef fib_node_back_walk_rc_t (*fib_node_back_walk_t)(
+ struct fib_node_t_ *node,
+ fib_node_back_walk_ctx_t *ctx);
+
+/**
+ * Function definition to get a FIB node from its index
+ */
+typedef struct fib_node_t_* (*fib_node_get_t)(fib_node_index_t index);
+
+/**
+ * Function definition to inform the FIB node that its last lock has gone.
+ */
+typedef void (*fib_node_last_lock_gone_t)(struct fib_node_t_ *node);
+
+/**
+ * Function definition to display the amount of memory used by a type.
+ * Implementations should call fib_show_memory_usage()
+ */
+typedef void (*fib_node_memory_show_t)(void);
+
+/**
+ * A FIB graph nodes virtual function table
+ */
+typedef struct fib_node_vft_t_ {
+ fib_node_get_t fnv_get;
+ fib_node_last_lock_gone_t fnv_last_lock;
+ fib_node_back_walk_t fnv_back_walk;
+ format_function_t *fnv_format;
+ fib_node_memory_show_t fnv_mem_show;
+} fib_node_vft_t;
+
+/**
+ * An node in the FIB graph
+ *
+ * Objects in the FIB form a graph.
+ */
+typedef struct fib_node_t_ {
+#if CLIB_DEBUG > 0
+ /**
+ * The node's type. make sure we are dynamic/down casting correctly
+ */
+ fib_node_type_t fn_type;
+#endif
+ /**
+ * The node's VFT.
+ * we could store the type here instead, and lookup the VFT using that. But
+ * I like this better,
+ */
+ const fib_node_vft_t *fn_vft;
+
+ /**
+ * Vector of nodes that depend upon/use/share this node
+ */
+ fib_node_list_t fn_children;
+
+ /**
+ * Number of dependents on this node. This number includes the number
+ * of children
+ */
+ u32 fn_locks;
+} fib_node_t;
+
+/**
+ * @brief
+ * Register the function table for a given type
+ *
+ * @param ft
+ * FIB node type
+ *
+ * @param vft
+ * virtual function table
+ */
+extern void fib_node_register_type (fib_node_type_t ft,
+ const fib_node_vft_t *vft);
+
+/**
+ * @brief
+ * Create a new FIB node type and Register the function table for it.
+ *
+ * @param vft
+ * virtual function table
+ *
+ * @return new FIB node type
+ */
+extern fib_node_type_t fib_node_register_new_type (const fib_node_vft_t *vft);
+
+/**
+ * @brief Show the memory usage for a type
+ *
+ * This should be invoked by the type in response to the infra calling
+ * its registered memory show function
+ *
+ * @param name the name of the type
+ * @param in_use_elts The number of elements in use
+ * @param allocd_elts The number of allocated pool elemenets
+ * @param size_elt The size of one element
+ */
+extern void fib_show_memory_usage(const char *name,
+ u32 in_use_elts,
+ u32 allocd_elts,
+ size_t size_elt);
+
+extern void fib_node_init(fib_node_t *node,
+ fib_node_type_t ft);
+extern void fib_node_deinit(fib_node_t *node);
+
+extern void fib_node_lock(fib_node_t *node);
+extern void fib_node_unlock(fib_node_t *node);
+
+extern u32 fib_node_get_n_children(fib_node_type_t parent_type,
+ fib_node_index_t parent_index);
+extern u32 fib_node_child_add(fib_node_type_t parent_type,
+ fib_node_index_t parent_index,
+ fib_node_type_t child_type,
+ fib_node_index_t child_index);
+extern void fib_node_child_remove(fib_node_type_t parent_type,
+ fib_node_index_t parent_index,
+ fib_node_index_t sibling_index);
+
+extern fib_node_back_walk_rc_t fib_node_back_walk_one(fib_node_ptr_t *ptr,
+ fib_node_back_walk_ctx_t *ctx);
+
+extern u8* fib_node_children_format(fib_node_list_t list,
+ u8 *s);
+
+extern const char* fib_node_type_get_name(fib_node_type_t type);
+
+static inline int
+fib_node_index_is_valid (fib_node_index_t ni)
+{
+ return (FIB_NODE_INDEX_INVALID != ni);
+}
+
+#endif
+
diff --git a/src/vnet/fib/fib_node_list.c b/src/vnet/fib/fib_node_list.c
new file mode 100644
index 00000000000..ceb951b466b
--- /dev/null
+++ b/src/vnet/fib/fib_node_list.c
@@ -0,0 +1,390 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * @brief a hetrogeneous w.r.t. FIB node type, of FIB nodes.
+ * Since we cannot use C pointers, due to memeory reallocs, the next/prev
+ * are described as key:{type,index}.
+ */
+
+#include <vnet/fib/fib_node_list.h>
+
+/**
+ * @brief An element in the list
+ */
+typedef struct fib_node_list_elt_t_
+{
+ /**
+ * The index of the list this element is in
+ */
+ fib_node_list_t fnle_list;
+
+ /**
+ * The owner of this element
+ */
+ fib_node_ptr_t fnle_owner;
+
+ /**
+ * The next element in the list
+ */
+ u32 fnle_next;
+
+ /**
+ * The previous element in the list
+ */
+ u32 fnle_prev;
+} fib_node_list_elt_t;
+
+/**
+ * @brief A list of FIB nodes
+ */
+typedef struct fib_node_list_head_t_
+{
+ /**
+ * The head element
+ */
+ u32 fnlh_head;
+
+ /**
+ * Number of elements in the list
+ */
+ u32 fnlh_n_elts;
+} fib_node_list_head_t;
+
+/**
+ * Pools of list elements and heads
+ */
+static fib_node_list_elt_t *fib_node_list_elt_pool;
+static fib_node_list_head_t *fib_node_list_head_pool;
+
+static index_t
+fib_node_list_elt_get_index (fib_node_list_elt_t *elt)
+{
+ return (elt - fib_node_list_elt_pool);
+}
+
+static fib_node_list_elt_t *
+fib_node_list_elt_get (index_t fi)
+{
+ return (pool_elt_at_index(fib_node_list_elt_pool, fi));
+}
+
+static index_t
+fib_node_list_head_get_index (fib_node_list_head_t *head)
+{
+ return (head - fib_node_list_head_pool);
+}
+static fib_node_list_head_t *
+fib_node_list_head_get (fib_node_list_t fi)
+{
+ return (pool_elt_at_index(fib_node_list_head_pool, fi));
+}
+
+static fib_node_list_elt_t *
+fib_node_list_elt_create (fib_node_list_head_t *head,
+ int id,
+ fib_node_type_t type,
+ fib_node_index_t index)
+{
+ fib_node_list_elt_t *elt;
+
+ pool_get(fib_node_list_elt_pool, elt);
+
+ elt->fnle_list = fib_node_list_head_get_index(head);
+ elt->fnle_owner.fnp_type = type;
+ elt->fnle_owner.fnp_index = index;
+
+ elt->fnle_next = FIB_NODE_INDEX_INVALID;
+ elt->fnle_prev = FIB_NODE_INDEX_INVALID;
+
+ return (elt);
+}
+
+static void
+fib_node_list_head_init (fib_node_list_head_t *head)
+{
+ head->fnlh_n_elts = 0;
+ head->fnlh_head = FIB_NODE_INDEX_INVALID;
+}
+
+/**
+ * @brief Create a new node list.
+ */
+fib_node_list_t
+fib_node_list_create (void)
+{
+ fib_node_list_head_t *head;
+
+ pool_get(fib_node_list_head_pool, head);
+
+ fib_node_list_head_init(head);
+
+ return (fib_node_list_head_get_index(head));
+}
+
+void
+fib_node_list_destroy (fib_node_list_t *list)
+{
+ fib_node_list_head_t *head;
+
+ if (FIB_NODE_INDEX_INVALID == *list)
+ return;
+
+ head = fib_node_list_head_get(*list);
+ ASSERT(0 == head->fnlh_n_elts);
+
+ pool_put(fib_node_list_head_pool, head);
+ *list = FIB_NODE_INDEX_INVALID;
+}
+
+
+/**
+ * @brief Insert an element at the from of the list.
+ */
+u32
+fib_node_list_push_front (fib_node_list_t list,
+ int owner_id,
+ fib_node_type_t type,
+ fib_node_index_t index)
+{
+ fib_node_list_elt_t *elt, *next;
+ fib_node_list_head_t *head;
+
+ head = fib_node_list_head_get(list);
+ elt = fib_node_list_elt_create(head, owner_id, type, index);
+
+ elt->fnle_prev = FIB_NODE_INDEX_INVALID;
+ elt->fnle_next = head->fnlh_head;
+
+ if (FIB_NODE_INDEX_INVALID != head->fnlh_head)
+ {
+ next = fib_node_list_elt_get(head->fnlh_head);
+ next->fnle_prev = fib_node_list_elt_get_index(elt);
+ }
+ head->fnlh_head = fib_node_list_elt_get_index(elt);
+
+ head->fnlh_n_elts++;
+
+ return (fib_node_list_elt_get_index(elt));
+}
+
+u32
+fib_node_list_push_back (fib_node_list_t list,
+ int owner_id,
+ fib_node_type_t type,
+ fib_node_index_t index)
+{
+ ASSERT(0);
+ return (FIB_NODE_INDEX_INVALID);
+}
+
+static void
+fib_node_list_extract (fib_node_list_head_t *head,
+ fib_node_list_elt_t *elt)
+{
+ fib_node_list_elt_t *next, *prev;
+
+ if (FIB_NODE_INDEX_INVALID != elt->fnle_next)
+ {
+ next = fib_node_list_elt_get(elt->fnle_next);
+ next->fnle_prev = elt->fnle_prev;
+ }
+
+ if (FIB_NODE_INDEX_INVALID != elt->fnle_prev)
+ {
+ prev = fib_node_list_elt_get(elt->fnle_prev);
+ prev->fnle_next = elt->fnle_next;
+ }
+ else
+ {
+ ASSERT (fib_node_list_elt_get_index(elt) == head->fnlh_head);
+ head->fnlh_head = elt->fnle_next;
+ }
+}
+
+static void
+fib_node_list_insert_after (fib_node_list_head_t *head,
+ fib_node_list_elt_t *prev,
+ fib_node_list_elt_t *elt)
+{
+ fib_node_list_elt_t *next;
+
+ elt->fnle_next = prev->fnle_next;
+ if (FIB_NODE_INDEX_INVALID != prev->fnle_next)
+ {
+ next = fib_node_list_elt_get(prev->fnle_next);
+ next->fnle_prev = fib_node_list_elt_get_index(elt);
+ }
+ prev->fnle_next = fib_node_list_elt_get_index(elt);
+ elt->fnle_prev = fib_node_list_elt_get_index(prev);
+}
+
+void
+fib_node_list_remove (fib_node_list_t list,
+ u32 sibling)
+{
+ fib_node_list_head_t *head;
+ fib_node_list_elt_t *elt;
+
+ head = fib_node_list_head_get(list);
+ elt = fib_node_list_elt_get(sibling);
+
+ fib_node_list_extract(head, elt);
+
+ head->fnlh_n_elts--;
+ pool_put(fib_node_list_elt_pool, elt);
+}
+
+void
+fib_node_list_elt_remove (u32 sibling)
+{
+ fib_node_list_elt_t *elt;
+
+ elt = fib_node_list_elt_get(sibling);
+
+ fib_node_list_remove(elt->fnle_list, sibling);
+}
+
+/**
+ * @brief Advance the sibling one step (toward the tail) in the list.
+ * return 0 if at the end of the list, 1 otherwise.
+ */
+int
+fib_node_list_advance (u32 sibling)
+{
+ fib_node_list_elt_t *elt, *next;
+ fib_node_list_head_t *head;
+
+ elt = fib_node_list_elt_get(sibling);
+ head = fib_node_list_head_get(elt->fnle_list);
+
+ if (FIB_NODE_INDEX_INVALID != elt->fnle_next)
+ {
+ /*
+ * not at the end of the list
+ */
+ next = fib_node_list_elt_get(elt->fnle_next);
+
+ fib_node_list_extract(head, elt);
+ fib_node_list_insert_after(head, next, elt);
+
+ return (1);
+ }
+ else
+ {
+ return (0);
+ }
+}
+
+int
+fib_node_list_elt_get_next (u32 sibling,
+ fib_node_ptr_t *ptr)
+{
+ fib_node_list_elt_t *elt, *next;
+
+ elt = fib_node_list_elt_get(sibling);
+
+ if (FIB_NODE_INDEX_INVALID != elt->fnle_next)
+ {
+ next = fib_node_list_elt_get(elt->fnle_next);
+
+ *ptr = next->fnle_owner;
+ return (1);
+ }
+ else
+ {
+ ptr->fnp_index = FIB_NODE_INDEX_INVALID;
+ return (0);
+ }
+}
+
+u32
+fib_node_list_get_size (fib_node_list_t list)
+{
+ fib_node_list_head_t *head;
+
+ if (FIB_NODE_INDEX_INVALID == list)
+ {
+ return (0);
+ }
+
+ head = fib_node_list_head_get(list);
+
+ return (head->fnlh_n_elts);
+}
+
+int
+fib_node_list_get_front (fib_node_list_t list,
+ fib_node_ptr_t *ptr)
+{
+ fib_node_list_head_t *head;
+ fib_node_list_elt_t *elt;
+
+
+ if (0 == fib_node_list_get_size(list))
+ {
+ ptr->fnp_index = FIB_NODE_INDEX_INVALID;
+ return (0);
+ }
+
+ head = fib_node_list_head_get(list);
+ elt = fib_node_list_elt_get(head->fnlh_head);
+
+ *ptr = elt->fnle_owner;
+
+ return (1);
+}
+
+/**
+ * @brief Walk the list of node. This must be safe w.r.t. the removal
+ * of nodes during the walk.
+ */
+void
+fib_node_list_walk (fib_node_list_t list,
+ fib_node_list_walk_cb_t fn,
+ void *args)
+{
+ fib_node_list_elt_t *elt;
+ fib_node_list_head_t *head;
+ u32 sibling;
+
+ if (FIB_NODE_INDEX_INVALID == list)
+ {
+ return;
+ }
+
+ head = fib_node_list_head_get(list);
+ sibling = head->fnlh_head;
+
+ while (FIB_NODE_INDEX_INVALID != sibling)
+ {
+ elt = fib_node_list_elt_get(sibling);
+ sibling = elt->fnle_next;
+
+ fn(&elt->fnle_owner, args);
+ }
+}
+
+void
+fib_node_list_memory_show (void)
+{
+ fib_show_memory_usage("Node-list elements",
+ pool_elts(fib_node_list_elt_pool),
+ pool_len(fib_node_list_elt_pool),
+ sizeof(fib_node_list_elt_t));
+ fib_show_memory_usage("Node-list heads",
+ pool_elts(fib_node_list_head_pool),
+ pool_len(fib_node_list_head_pool),
+ sizeof(fib_node_list_head_t));
+}
diff --git a/src/vnet/fib/fib_node_list.h b/src/vnet/fib/fib_node_list.h
new file mode 100644
index 00000000000..9567b9669e8
--- /dev/null
+++ b/src/vnet/fib/fib_node_list.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * @brief a hetrogeneous w.r.t. FIB node type, list of FIB nodes.
+ * Since we cannot use C pointers, due to memeory reallocs, the next/prev
+ * are described as an index to an element. Each element contains a pointer
+ * (key:{type, index}) to a FIB node.
+ */
+
+#ifndef __FIB_NODE_LIST_H__
+#define __FIB_NODE_LIST_H__
+
+#include <vnet/fib/fib_node.h>
+
+extern fib_node_list_t fib_node_list_create(void);
+extern void fib_node_list_destroy(fib_node_list_t *list);
+
+extern u32 fib_node_list_push_front(fib_node_list_t head,
+ int owner_id,
+ fib_node_type_t type,
+ fib_node_index_t index);
+extern u32 fib_node_list_push_back(fib_node_list_t head,
+ int owner_id,
+ fib_node_type_t type,
+ fib_node_index_t index);
+extern void fib_node_list_remove(fib_node_list_t head,
+ u32 sibling);
+extern void fib_node_list_elt_remove(u32 sibling);
+
+extern int fib_node_list_advance(u32 sibling);
+
+extern int fib_node_list_get_front(fib_node_list_t head,
+ fib_node_ptr_t *ptr);
+
+extern int fib_node_list_elt_get_next(u32 elt,
+ fib_node_ptr_t *ptr);
+
+extern u32 fib_node_list_get_size(fib_node_list_t head);
+
+/**
+ * @brief Callback function invoked during a list walk
+ */
+typedef int (*fib_node_list_walk_cb_t)(fib_node_ptr_t *owner,
+ void *args);
+
+extern void fib_node_list_walk(fib_node_list_t head,
+ fib_node_list_walk_cb_t fn,
+ void *args);
+
+extern void fib_node_list_memory_show(void);
+
+#endif
diff --git a/src/vnet/fib/fib_path.c b/src/vnet/fib/fib_path.c
new file mode 100644
index 00000000000..809e3e166da
--- /dev/null
+++ b/src/vnet/fib/fib_path.c
@@ -0,0 +1,2001 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+#include <vnet/ip/format.h>
+#include <vnet/ip/ip.h>
+#include <vnet/dpo/drop_dpo.h>
+#include <vnet/dpo/receive_dpo.h>
+#include <vnet/dpo/load_balance_map.h>
+#include <vnet/dpo/lookup_dpo.h>
+
+#include <vnet/adj/adj.h>
+
+#include <vnet/fib/fib_path.h>
+#include <vnet/fib/fib_node.h>
+#include <vnet/fib/fib_table.h>
+#include <vnet/fib/fib_entry.h>
+#include <vnet/fib/fib_path_list.h>
+#include <vnet/fib/fib_internal.h>
+#include <vnet/fib/fib_urpf_list.h>
+
+/**
+ * Enurmeration of path types
+ */
+typedef enum fib_path_type_t_ {
+ /**
+ * Marker. Add new types after this one.
+ */
+ FIB_PATH_TYPE_FIRST = 0,
+ /**
+ * Attached-nexthop. An interface and a nexthop are known.
+ */
+ FIB_PATH_TYPE_ATTACHED_NEXT_HOP = FIB_PATH_TYPE_FIRST,
+ /**
+ * attached. Only the interface is known.
+ */
+ FIB_PATH_TYPE_ATTACHED,
+ /**
+ * recursive. Only the next-hop is known.
+ */
+ FIB_PATH_TYPE_RECURSIVE,
+ /**
+ * special. nothing is known. so we drop.
+ */
+ FIB_PATH_TYPE_SPECIAL,
+ /**
+ * exclusive. user provided adj.
+ */
+ FIB_PATH_TYPE_EXCLUSIVE,
+ /**
+ * deag. Link to a lookup adj in the next table
+ */
+ FIB_PATH_TYPE_DEAG,
+ /**
+ * receive. it's for-us.
+ */
+ FIB_PATH_TYPE_RECEIVE,
+ /**
+ * Marker. Add new types before this one, then update it.
+ */
+ FIB_PATH_TYPE_LAST = FIB_PATH_TYPE_RECEIVE,
+} __attribute__ ((packed)) fib_path_type_t;
+
+/**
+ * The maximum number of path_types
+ */
+#define FIB_PATH_TYPE_MAX (FIB_PATH_TYPE_LAST + 1)
+
+#define FIB_PATH_TYPES { \
+ [FIB_PATH_TYPE_ATTACHED_NEXT_HOP] = "attached-nexthop", \
+ [FIB_PATH_TYPE_ATTACHED] = "attached", \
+ [FIB_PATH_TYPE_RECURSIVE] = "recursive", \
+ [FIB_PATH_TYPE_SPECIAL] = "special", \
+ [FIB_PATH_TYPE_EXCLUSIVE] = "exclusive", \
+ [FIB_PATH_TYPE_DEAG] = "deag", \
+ [FIB_PATH_TYPE_RECEIVE] = "receive", \
+}
+
+#define FOR_EACH_FIB_PATH_TYPE(_item) \
+ for (_item = FIB_PATH_TYPE_FIRST; _item <= FIB_PATH_TYPE_LAST; _item++)
+
+/**
+ * Enurmeration of path operational (i.e. derived) attributes
+ */
+typedef enum fib_path_oper_attribute_t_ {
+ /**
+ * Marker. Add new types after this one.
+ */
+ FIB_PATH_OPER_ATTRIBUTE_FIRST = 0,
+ /**
+ * The path forms part of a recursive loop.
+ */
+ FIB_PATH_OPER_ATTRIBUTE_RECURSIVE_LOOP = FIB_PATH_OPER_ATTRIBUTE_FIRST,
+ /**
+ * The path is resolved
+ */
+ FIB_PATH_OPER_ATTRIBUTE_RESOLVED,
+ /**
+ * The path has become a permanent drop.
+ */
+ FIB_PATH_OPER_ATTRIBUTE_DROP,
+ /**
+ * Marker. Add new types before this one, then update it.
+ */
+ FIB_PATH_OPER_ATTRIBUTE_LAST = FIB_PATH_OPER_ATTRIBUTE_DROP,
+} __attribute__ ((packed)) fib_path_oper_attribute_t;
+
+/**
+ * The maximum number of path operational attributes
+ */
+#define FIB_PATH_OPER_ATTRIBUTE_MAX (FIB_PATH_OPER_ATTRIBUTE_LAST + 1)
+
+#define FIB_PATH_OPER_ATTRIBUTES { \
+ [FIB_PATH_OPER_ATTRIBUTE_RECURSIVE_LOOP] = "recursive-loop", \
+ [FIB_PATH_OPER_ATTRIBUTE_RESOLVED] = "resolved", \
+ [FIB_PATH_OPER_ATTRIBUTE_DROP] = "drop", \
+}
+
+#define FOR_EACH_FIB_PATH_OPER_ATTRIBUTE(_item) \
+ for (_item = FIB_PATH_OPER_ATTRIBUTE_FIRST; \
+ _item <= FIB_PATH_OPER_ATTRIBUTE_LAST; \
+ _item++)
+
+/**
+ * Path flags from the attributes
+ */
+typedef enum fib_path_oper_flags_t_ {
+ FIB_PATH_OPER_FLAG_NONE = 0,
+ FIB_PATH_OPER_FLAG_RECURSIVE_LOOP = (1 << FIB_PATH_OPER_ATTRIBUTE_RECURSIVE_LOOP),
+ FIB_PATH_OPER_FLAG_DROP = (1 << FIB_PATH_OPER_ATTRIBUTE_DROP),
+ FIB_PATH_OPER_FLAG_RESOLVED = (1 << FIB_PATH_OPER_ATTRIBUTE_RESOLVED),
+} __attribute__ ((packed)) fib_path_oper_flags_t;
+
+/**
+ * A FIB path
+ */
+typedef struct fib_path_t_ {
+ /**
+ * A path is a node in the FIB graph.
+ */
+ fib_node_t fp_node;
+
+ /**
+ * The index of the path-list to which this path belongs
+ */
+ u32 fp_pl_index;
+
+ /**
+ * This marks the start of the memory area used to hash
+ * the path
+ */
+ STRUCT_MARK(path_hash_start);
+
+ /**
+ * Configuration Flags
+ */
+ fib_path_cfg_flags_t fp_cfg_flags;
+
+ /**
+ * The type of the path. This is the selector for the union
+ */
+ fib_path_type_t fp_type;
+
+ /**
+ * The protocol of the next-hop, i.e. the address family of the
+ * next-hop's address. We can't derive this from the address itself
+ * since the address can be all zeros
+ */
+ fib_protocol_t fp_nh_proto;
+
+ /**
+ * UCMP [unnormalised] weigt
+ */
+ u32 fp_weight;
+
+ /**
+ * per-type union of the data required to resolve the path
+ */
+ union {
+ struct {
+ /**
+ * The next-hop
+ */
+ ip46_address_t fp_nh;
+ /**
+ * The interface
+ */
+ u32 fp_interface;
+ } attached_next_hop;
+ struct {
+ /**
+ * The interface
+ */
+ u32 fp_interface;
+ } attached;
+ struct {
+ union
+ {
+ /**
+ * The next-hop
+ */
+ ip46_address_t fp_ip;
+ /**
+ * The local label to resolve through.
+ */
+ mpls_label_t fp_local_label;
+ } fp_nh;
+ /**
+ * The FIB table index in which to find the next-hop.
+ * This needs to be fixed. We should lookup the adjacencies in
+ * a separate table of adjacencies, rather than from the FIB.
+ * Two reasons I can think of:
+ * - consider:
+ * int ip addr Gig0 10.0.0.1/24
+ * ip route 10.0.0.2/32 via Gig1 192.168.1.2
+ * ip route 1.1.1.1/32 via Gig0 10.0.0.2
+ * this is perfectly valid.
+ * Packets addressed to 10.0.0.2 should be sent via Gig1.
+ * Packets address to 1.1.1.1 should be sent via Gig0.
+ * when we perform the adj resolution from the FIB for the path
+ * "via Gig0 10.0.0.2" the lookup will result in the route via Gig1
+ * and so we will pick up the adj via Gig1 - which was not what the
+ * operator wanted.
+ * - we can only return link-type IPv4 and so not the link-type MPLS.
+ * more on this in a later commit.
+ *
+ * The table ID should only belong to a recursive path and indicate
+ * which FIB should be used to resolve the next-hop.
+ */
+ fib_node_index_t fp_tbl_id;
+ } recursive;
+ struct {
+ /**
+ * The FIB index in which to perfom the next lookup
+ */
+ fib_node_index_t fp_tbl_id;
+ } deag;
+ struct {
+ } special;
+ struct {
+ /**
+ * The user provided 'exclusive' DPO
+ */
+ dpo_id_t fp_ex_dpo;
+ } exclusive;
+ struct {
+ /**
+ * The interface on which the local address is configured
+ */
+ u32 fp_interface;
+ /**
+ * The next-hop
+ */
+ ip46_address_t fp_addr;
+ } receive;
+ };
+ STRUCT_MARK(path_hash_end);
+
+ /**
+ * Memebers in this last section represent information that is
+ * dervied during resolution. It should not be copied to new paths
+ * nor compared.
+ */
+
+ /**
+ * Operational Flags
+ */
+ fib_path_oper_flags_t fp_oper_flags;
+
+ /**
+ * the resolving via fib. not part of the union, since it it not part
+ * of the path's hash.
+ */
+ fib_node_index_t fp_via_fib;
+
+ /**
+ * The Data-path objects through which this path resolves for IP.
+ */
+ dpo_id_t fp_dpo;
+
+ /**
+ * the index of this path in the parent's child list.
+ */
+ u32 fp_sibling;
+} fib_path_t;
+
+/*
+ * Array of strings/names for the path types and attributes
+ */
+static const char *fib_path_type_names[] = FIB_PATH_TYPES;
+static const char *fib_path_oper_attribute_names[] = FIB_PATH_OPER_ATTRIBUTES;
+static const char *fib_path_cfg_attribute_names[] = FIB_PATH_CFG_ATTRIBUTES;
+
+/*
+ * The memory pool from which we allocate all the paths
+ */
+static fib_path_t *fib_path_pool;
+
+/*
+ * Debug macro
+ */
+#ifdef FIB_DEBUG
+#define FIB_PATH_DBG(_p, _fmt, _args...) \
+{ \
+ u8 *_tmp = NULL; \
+ _tmp = fib_path_format(fib_path_get_index(_p), _tmp); \
+ clib_warning("path:[%d:%s]:" _fmt, \
+ fib_path_get_index(_p), _tmp, \
+ ##_args); \
+ vec_free(_tmp); \
+}
+#else
+#define FIB_PATH_DBG(_p, _fmt, _args...)
+#endif
+
+static fib_path_t *
+fib_path_get (fib_node_index_t index)
+{
+ return (pool_elt_at_index(fib_path_pool, index));
+}
+
+static fib_node_index_t
+fib_path_get_index (fib_path_t *path)
+{
+ return (path - fib_path_pool);
+}
+
+static fib_node_t *
+fib_path_get_node (fib_node_index_t index)
+{
+ return ((fib_node_t*)fib_path_get(index));
+}
+
+static fib_path_t*
+fib_path_from_fib_node (fib_node_t *node)
+{
+#if CLIB_DEBUG > 0
+ ASSERT(FIB_NODE_TYPE_PATH == node->fn_type);
+#endif
+ return ((fib_path_t*)node);
+}
+
+u8 *
+format_fib_path (u8 * s, va_list * args)
+{
+ fib_path_t *path = va_arg (*args, fib_path_t *);
+ vnet_main_t * vnm = vnet_get_main();
+ fib_path_oper_attribute_t oattr;
+ fib_path_cfg_attribute_t cattr;
+
+ s = format (s, " index:%d ", fib_path_get_index(path));
+ s = format (s, "pl-index:%d ", path->fp_pl_index);
+ s = format (s, "%U ", format_fib_protocol, path->fp_nh_proto);
+ s = format (s, "weight=%d ", path->fp_weight);
+ s = format (s, "%s: ", fib_path_type_names[path->fp_type]);
+ if (FIB_PATH_OPER_FLAG_NONE != path->fp_oper_flags) {
+ s = format(s, " oper-flags:");
+ FOR_EACH_FIB_PATH_OPER_ATTRIBUTE(oattr) {
+ if ((1<<oattr) & path->fp_oper_flags) {
+ s = format (s, "%s,", fib_path_oper_attribute_names[oattr]);
+ }
+ }
+ }
+ if (FIB_PATH_CFG_FLAG_NONE != path->fp_cfg_flags) {
+ s = format(s, " cfg-flags:");
+ FOR_EACH_FIB_PATH_CFG_ATTRIBUTE(cattr) {
+ if ((1<<cattr) & path->fp_cfg_flags) {
+ s = format (s, "%s,", fib_path_cfg_attribute_names[cattr]);
+ }
+ }
+ }
+ s = format(s, "\n ");
+
+ switch (path->fp_type)
+ {
+ case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
+ s = format (s, "%U", format_ip46_address,
+ &path->attached_next_hop.fp_nh,
+ IP46_TYPE_ANY);
+ if (path->fp_oper_flags & FIB_PATH_OPER_FLAG_DROP)
+ {
+ s = format (s, " if_index:%d", path->attached_next_hop.fp_interface);
+ }
+ else
+ {
+ s = format (s, " %U",
+ format_vnet_sw_interface_name,
+ vnm,
+ vnet_get_sw_interface(
+ vnm,
+ path->attached_next_hop.fp_interface));
+ if (vnet_sw_interface_is_p2p(vnet_get_main(),
+ path->attached_next_hop.fp_interface))
+ {
+ s = format (s, " (p2p)");
+ }
+ }
+ if (!dpo_id_is_valid(&path->fp_dpo))
+ {
+ s = format(s, "\n unresolved");
+ }
+ else
+ {
+ s = format(s, "\n %U",
+ format_dpo_id,
+ &path->fp_dpo, 13);
+ }
+ break;
+ case FIB_PATH_TYPE_ATTACHED:
+ if (path->fp_oper_flags & FIB_PATH_OPER_FLAG_DROP)
+ {
+ s = format (s, " if_index:%d", path->attached_next_hop.fp_interface);
+ }
+ else
+ {
+ s = format (s, " %U",
+ format_vnet_sw_interface_name,
+ vnm,
+ vnet_get_sw_interface(
+ vnm,
+ path->attached.fp_interface));
+ }
+ break;
+ case FIB_PATH_TYPE_RECURSIVE:
+ if (FIB_PROTOCOL_MPLS == path->fp_nh_proto)
+ {
+ s = format (s, "via %U",
+ format_mpls_unicast_label,
+ path->recursive.fp_nh.fp_local_label);
+ }
+ else
+ {
+ s = format (s, "via %U",
+ format_ip46_address,
+ &path->recursive.fp_nh.fp_ip,
+ IP46_TYPE_ANY);
+ }
+ s = format (s, " in fib:%d",
+ path->recursive.fp_tbl_id,
+ path->fp_via_fib);
+ s = format (s, " via-fib:%d", path->fp_via_fib);
+ s = format (s, " via-dpo:[%U:%d]",
+ format_dpo_type, path->fp_dpo.dpoi_type,
+ path->fp_dpo.dpoi_index);
+
+ break;
+ case FIB_PATH_TYPE_RECEIVE:
+ case FIB_PATH_TYPE_SPECIAL:
+ case FIB_PATH_TYPE_DEAG:
+ case FIB_PATH_TYPE_EXCLUSIVE:
+ if (dpo_id_is_valid(&path->fp_dpo))
+ {
+ s = format(s, "%U", format_dpo_id,
+ &path->fp_dpo, 2);
+ }
+ break;
+ }
+ return (s);
+}
+
+u8 *
+fib_path_format (fib_node_index_t pi, u8 *s)
+{
+ fib_path_t *path;
+
+ path = fib_path_get(pi);
+ ASSERT(NULL != path);
+
+ return (format (s, "%U", format_fib_path, path));
+}
+
+u8 *
+fib_path_adj_format (fib_node_index_t pi,
+ u32 indent,
+ u8 *s)
+{
+ fib_path_t *path;
+
+ path = fib_path_get(pi);
+ ASSERT(NULL != path);
+
+ if (!dpo_id_is_valid(&path->fp_dpo))
+ {
+ s = format(s, " unresolved");
+ }
+ else
+ {
+ s = format(s, "%U", format_dpo_id,
+ &path->fp_dpo, 2);
+ }
+
+ return (s);
+}
+
+/*
+ * fib_path_last_lock_gone
+ *
+ * We don't share paths, we share path lists, so the [un]lock functions
+ * are no-ops
+ */
+static void
+fib_path_last_lock_gone (fib_node_t *node)
+{
+ ASSERT(0);
+}
+
+static const adj_index_t
+fib_path_attached_next_hop_get_adj (fib_path_t *path,
+ vnet_link_t link)
+{
+ if (vnet_sw_interface_is_p2p(vnet_get_main(),
+ path->attached_next_hop.fp_interface))
+ {
+ /*
+ * if the interface is p2p then the adj for the specific
+ * neighbour on that link will never exist. on p2p links
+ * the subnet address (the attached route) links to the
+ * auto-adj (see below), we want that adj here too.
+ */
+ return (adj_nbr_add_or_lock(path->fp_nh_proto,
+ link,
+ &zero_addr,
+ path->attached_next_hop.fp_interface));
+ }
+ else
+ {
+ return (adj_nbr_add_or_lock(path->fp_nh_proto,
+ link,
+ &path->attached_next_hop.fp_nh,
+ path->attached_next_hop.fp_interface));
+ }
+}
+
+static void
+fib_path_attached_next_hop_set (fib_path_t *path)
+{
+ /*
+ * resolve directly via the adjacnecy discribed by the
+ * interface and next-hop
+ */
+ if (!vnet_sw_interface_is_admin_up(vnet_get_main(),
+ path->attached_next_hop.fp_interface))
+ {
+ path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
+ }
+
+ dpo_set(&path->fp_dpo,
+ DPO_ADJACENCY,
+ fib_proto_to_dpo(path->fp_nh_proto),
+ fib_path_attached_next_hop_get_adj(
+ path,
+ fib_proto_to_link(path->fp_nh_proto)));
+
+ /*
+ * become a child of the adjacency so we receive updates
+ * when its rewrite changes
+ */
+ path->fp_sibling = adj_child_add(path->fp_dpo.dpoi_index,
+ FIB_NODE_TYPE_PATH,
+ fib_path_get_index(path));
+}
+
+/*
+ * create of update the paths recursive adj
+ */
+static void
+fib_path_recursive_adj_update (fib_path_t *path,
+ fib_forward_chain_type_t fct,
+ dpo_id_t *dpo)
+{
+ dpo_id_t via_dpo = DPO_INVALID;
+
+ /*
+ * get the DPO to resolve through from the via-entry
+ */
+ fib_entry_contribute_forwarding(path->fp_via_fib,
+ fct,
+ &via_dpo);
+
+
+ /*
+ * hope for the best - clear if restrictions apply.
+ */
+ path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
+
+ /*
+ * Validate any recursion constraints and over-ride the via
+ * adj if not met
+ */
+ if (path->fp_oper_flags & FIB_PATH_OPER_FLAG_RECURSIVE_LOOP)
+ {
+ path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
+ dpo_copy(&via_dpo, drop_dpo_get(fib_proto_to_dpo(path->fp_nh_proto)));
+ }
+ else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RESOLVE_HOST)
+ {
+ /*
+ * the via FIB must be a host route.
+ * note the via FIB just added will always be a host route
+ * since it is an RR source added host route. So what we need to
+ * check is whether the route has other sources. If it does then
+ * some other source has added it as a host route. If it doesn't
+ * then it was added only here and inherits forwarding from a cover.
+ * the cover is not a host route.
+ * The RR source is the lowest priority source, so we check if it
+ * is the best. if it is there are no other sources.
+ */
+ if (fib_entry_get_best_source(path->fp_via_fib) >= FIB_SOURCE_RR)
+ {
+ path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
+ dpo_copy(&via_dpo, drop_dpo_get(fib_proto_to_dpo(path->fp_nh_proto)));
+
+ /*
+ * PIC edge trigger. let the load-balance maps know
+ */
+ load_balance_map_path_state_change(fib_path_get_index(path));
+ }
+ }
+ else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RESOLVE_ATTACHED)
+ {
+ /*
+ * RR source entries inherit the flags from the cover, so
+ * we can check the via directly
+ */
+ if (!(FIB_ENTRY_FLAG_ATTACHED & fib_entry_get_flags(path->fp_via_fib)))
+ {
+ path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
+ dpo_copy(&via_dpo, drop_dpo_get(fib_proto_to_dpo(path->fp_nh_proto)));
+
+ /*
+ * PIC edge trigger. let the load-balance maps know
+ */
+ load_balance_map_path_state_change(fib_path_get_index(path));
+ }
+ }
+
+ /*
+ * update the path's contributed DPO
+ */
+ dpo_copy(dpo, &via_dpo);
+
+ FIB_PATH_DBG(path, "recursive update: %U",
+ fib_get_lookup_main(path->fp_nh_proto),
+ &path->fp_dpo, 2);
+
+ dpo_reset(&via_dpo);
+}
+
+/*
+ * fib_path_is_permanent_drop
+ *
+ * Return !0 if the path is configured to permanently drop,
+ * despite other attributes.
+ */
+static int
+fib_path_is_permanent_drop (fib_path_t *path)
+{
+ return ((path->fp_cfg_flags & FIB_PATH_CFG_FLAG_DROP) ||
+ (path->fp_oper_flags & FIB_PATH_OPER_FLAG_DROP));
+}
+
+/*
+ * fib_path_unresolve
+ *
+ * Remove our dependency on the resolution target
+ */
+static void
+fib_path_unresolve (fib_path_t *path)
+{
+ /*
+ * the forced drop path does not need unresolving
+ */
+ if (fib_path_is_permanent_drop(path))
+ {
+ return;
+ }
+
+ switch (path->fp_type)
+ {
+ case FIB_PATH_TYPE_RECURSIVE:
+ if (FIB_NODE_INDEX_INVALID != path->fp_via_fib)
+ {
+ fib_prefix_t pfx;
+
+ fib_entry_get_prefix(path->fp_via_fib, &pfx);
+ fib_entry_child_remove(path->fp_via_fib,
+ path->fp_sibling);
+ fib_table_entry_special_remove(path->recursive.fp_tbl_id,
+ &pfx,
+ FIB_SOURCE_RR);
+ path->fp_via_fib = FIB_NODE_INDEX_INVALID;
+ }
+ break;
+ case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
+ case FIB_PATH_TYPE_ATTACHED:
+ adj_child_remove(path->fp_dpo.dpoi_index,
+ path->fp_sibling);
+ adj_unlock(path->fp_dpo.dpoi_index);
+ break;
+ case FIB_PATH_TYPE_EXCLUSIVE:
+ dpo_reset(&path->exclusive.fp_ex_dpo);
+ break;
+ case FIB_PATH_TYPE_SPECIAL:
+ case FIB_PATH_TYPE_RECEIVE:
+ case FIB_PATH_TYPE_DEAG:
+ /*
+ * these hold only the path's DPO, which is reset below.
+ */
+ break;
+ }
+
+ /*
+ * release the adj we were holding and pick up the
+ * drop just in case.
+ */
+ dpo_reset(&path->fp_dpo);
+ path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
+
+ return;
+}
+
+static fib_forward_chain_type_t
+fib_path_proto_to_chain_type (fib_protocol_t proto)
+{
+ switch (proto)
+ {
+ case FIB_PROTOCOL_IP4:
+ return (FIB_FORW_CHAIN_TYPE_UNICAST_IP4);
+ case FIB_PROTOCOL_IP6:
+ return (FIB_FORW_CHAIN_TYPE_UNICAST_IP6);
+ case FIB_PROTOCOL_MPLS:
+ return (FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS);
+ }
+ return (FIB_FORW_CHAIN_TYPE_UNICAST_IP4);
+}
+
+/*
+ * fib_path_back_walk_notify
+ *
+ * A back walk has reach this path.
+ */
+static fib_node_back_walk_rc_t
+fib_path_back_walk_notify (fib_node_t *node,
+ fib_node_back_walk_ctx_t *ctx)
+{
+ fib_path_t *path;
+
+ path = fib_path_from_fib_node(node);
+
+ switch (path->fp_type)
+ {
+ case FIB_PATH_TYPE_RECURSIVE:
+ if (FIB_NODE_BW_REASON_FLAG_EVALUATE & ctx->fnbw_reason)
+ {
+ /*
+ * modify the recursive adjacency to use the new forwarding
+ * of the via-fib.
+ * this update is visible to packets in flight in the DP.
+ */
+ fib_path_recursive_adj_update(
+ path,
+ fib_path_proto_to_chain_type(path->fp_nh_proto),
+ &path->fp_dpo);
+ }
+ if ((FIB_NODE_BW_REASON_FLAG_ADJ_UPDATE & ctx->fnbw_reason) ||
+ (FIB_NODE_BW_REASON_FLAG_ADJ_DOWN & ctx->fnbw_reason))
+ {
+ /*
+ * ADJ updates (complete<->incomplete) do not need to propagate to
+ * recursive entries.
+ * The only reason its needed as far back as here, is that the adj
+ * and the incomplete adj are a different DPO type, so the LBs need
+ * to re-stack.
+ * If this walk was quashed in the fib_entry, then any non-fib_path
+ * children (like tunnels that collapse out the LB when they stack)
+ * would not see the update.
+ */
+ return (FIB_NODE_BACK_WALK_CONTINUE);
+ }
+ break;
+ case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
+ /*
+FIXME comment
+ * ADJ_UPDATE backwalk pass silently through here and up to
+ * the path-list when the multipath adj collapse occurs.
+ * The reason we do this is that the assumtption is that VPP
+ * runs in an environment where the Control-Plane is remote
+ * and hence reacts slowly to link up down. In order to remove
+ * this down link from the ECMP set quickly, we back-walk.
+ * VPP also has dedicated CPUs, so we are not stealing resources
+ * from the CP to do so.
+ */
+ if (FIB_NODE_BW_REASON_FLAG_INTERFACE_UP & ctx->fnbw_reason)
+ {
+ if (path->fp_oper_flags & FIB_PATH_OPER_FLAG_RESOLVED)
+ {
+ /*
+ * alreday resolved. no need to walk back again
+ */
+ return (FIB_NODE_BACK_WALK_CONTINUE);
+ }
+ path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
+ }
+ if (FIB_NODE_BW_REASON_FLAG_INTERFACE_DOWN & ctx->fnbw_reason)
+ {
+ if (!(path->fp_oper_flags & FIB_PATH_OPER_FLAG_RESOLVED))
+ {
+ /*
+ * alreday unresolved. no need to walk back again
+ */
+ return (FIB_NODE_BACK_WALK_CONTINUE);
+ }
+ path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
+ }
+ if (FIB_NODE_BW_REASON_FLAG_INTERFACE_DELETE & ctx->fnbw_reason)
+ {
+ /*
+ * The interface this path resolves through has been deleted.
+ * This will leave the path in a permanent drop state. The route
+ * needs to be removed and readded (and hence the path-list deleted)
+ * before it can forward again.
+ */
+ fib_path_unresolve(path);
+ path->fp_oper_flags |= FIB_PATH_OPER_FLAG_DROP;
+ }
+ if (FIB_NODE_BW_REASON_FLAG_ADJ_UPDATE & ctx->fnbw_reason)
+ {
+ /*
+ * restack the DPO to pick up the correct DPO sub-type
+ */
+ uword if_is_up;
+ adj_index_t ai;
+
+ if_is_up = vnet_sw_interface_is_admin_up(
+ vnet_get_main(),
+ path->attached_next_hop.fp_interface);
+
+ if (if_is_up)
+ {
+ path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
+ }
+
+ ai = fib_path_attached_next_hop_get_adj(
+ path,
+ fib_proto_to_link(path->fp_nh_proto));
+
+ dpo_set(&path->fp_dpo, DPO_ADJACENCY,
+ fib_proto_to_dpo(path->fp_nh_proto),
+ ai);
+ adj_unlock(ai);
+
+ if (!if_is_up)
+ {
+ /*
+ * If the interface is not up there is no reason to walk
+ * back to children. if we did they would only evalute
+ * that this path is unresolved and hence it would
+ * not contribute the adjacency - so it would be wasted
+ * CPU time.
+ */
+ return (FIB_NODE_BACK_WALK_CONTINUE);
+ }
+ }
+ if (FIB_NODE_BW_REASON_FLAG_ADJ_DOWN & ctx->fnbw_reason)
+ {
+ if (!(path->fp_oper_flags & FIB_PATH_OPER_FLAG_RESOLVED))
+ {
+ /*
+ * alreday unresolved. no need to walk back again
+ */
+ return (FIB_NODE_BACK_WALK_CONTINUE);
+ }
+ /*
+ * the adj has gone down. the path is no longer resolved.
+ */
+ path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
+ }
+ break;
+ case FIB_PATH_TYPE_ATTACHED:
+ /*
+ * FIXME; this could schedule a lower priority walk, since attached
+ * routes are not usually in ECMP configurations so the backwalk to
+ * the FIB entry does not need to be high priority
+ */
+ if (FIB_NODE_BW_REASON_FLAG_INTERFACE_UP & ctx->fnbw_reason)
+ {
+ path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
+ }
+ if (FIB_NODE_BW_REASON_FLAG_INTERFACE_DOWN & ctx->fnbw_reason)
+ {
+ path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
+ }
+ if (FIB_NODE_BW_REASON_FLAG_INTERFACE_DELETE & ctx->fnbw_reason)
+ {
+ fib_path_unresolve(path);
+ path->fp_oper_flags |= FIB_PATH_OPER_FLAG_DROP;
+ }
+ break;
+ case FIB_PATH_TYPE_DEAG:
+ /*
+ * FIXME When VRF delete is allowed this will need a poke.
+ */
+ case FIB_PATH_TYPE_SPECIAL:
+ case FIB_PATH_TYPE_RECEIVE:
+ case FIB_PATH_TYPE_EXCLUSIVE:
+ /*
+ * these path types have no parents. so to be
+ * walked from one is unexpected.
+ */
+ ASSERT(0);
+ break;
+ }
+
+ /*
+ * propagate the backwalk further to the path-list
+ */
+ fib_path_list_back_walk(path->fp_pl_index, ctx);
+
+ return (FIB_NODE_BACK_WALK_CONTINUE);
+}
+
+static void
+fib_path_memory_show (void)
+{
+ fib_show_memory_usage("Path",
+ pool_elts(fib_path_pool),
+ pool_len(fib_path_pool),
+ sizeof(fib_path_t));
+}
+
+/*
+ * The FIB path's graph node virtual function table
+ */
+static const fib_node_vft_t fib_path_vft = {
+ .fnv_get = fib_path_get_node,
+ .fnv_last_lock = fib_path_last_lock_gone,
+ .fnv_back_walk = fib_path_back_walk_notify,
+ .fnv_mem_show = fib_path_memory_show,
+};
+
+static fib_path_cfg_flags_t
+fib_path_route_flags_to_cfg_flags (const fib_route_path_t *rpath)
+{
+ fib_path_cfg_flags_t cfg_flags = FIB_PATH_CFG_FLAG_NONE;
+
+ if (rpath->frp_flags & FIB_ROUTE_PATH_RESOLVE_VIA_HOST)
+ cfg_flags |= FIB_PATH_CFG_FLAG_RESOLVE_HOST;
+ if (rpath->frp_flags & FIB_ROUTE_PATH_RESOLVE_VIA_ATTACHED)
+ cfg_flags |= FIB_PATH_CFG_FLAG_RESOLVE_ATTACHED;
+
+ return (cfg_flags);
+}
+
+/*
+ * fib_path_create
+ *
+ * Create and initialise a new path object.
+ * return the index of the path.
+ */
+fib_node_index_t
+fib_path_create (fib_node_index_t pl_index,
+ fib_protocol_t nh_proto,
+ fib_path_cfg_flags_t flags,
+ const fib_route_path_t *rpath)
+{
+ fib_path_t *path;
+
+ pool_get(fib_path_pool, path);
+ memset(path, 0, sizeof(*path));
+
+ fib_node_init(&path->fp_node,
+ FIB_NODE_TYPE_PATH);
+
+ dpo_reset(&path->fp_dpo);
+ path->fp_pl_index = pl_index;
+ path->fp_nh_proto = nh_proto;
+ path->fp_via_fib = FIB_NODE_INDEX_INVALID;
+ path->fp_weight = rpath->frp_weight;
+ if (0 == path->fp_weight)
+ {
+ /*
+ * a weight of 0 is a meaningless value. We could either reject it, and thus force
+ * clients to always use 1, or we can accept it and fixup approrpiately.
+ */
+ path->fp_weight = 1;
+ }
+ path->fp_cfg_flags = flags;
+ path->fp_cfg_flags |= fib_path_route_flags_to_cfg_flags(rpath);
+
+ /*
+ * deduce the path's tpye from the parementers and save what is needed.
+ */
+ if (~0 != rpath->frp_sw_if_index)
+ {
+ if (flags & FIB_PATH_CFG_FLAG_LOCAL)
+ {
+ path->fp_type = FIB_PATH_TYPE_RECEIVE;
+ path->receive.fp_interface = rpath->frp_sw_if_index;
+ path->receive.fp_addr = rpath->frp_addr;
+ }
+ else
+ {
+ if (ip46_address_is_zero(&rpath->frp_addr))
+ {
+ path->fp_type = FIB_PATH_TYPE_ATTACHED;
+ path->attached.fp_interface = rpath->frp_sw_if_index;
+ }
+ else
+ {
+ path->fp_type = FIB_PATH_TYPE_ATTACHED_NEXT_HOP;
+ path->attached_next_hop.fp_interface = rpath->frp_sw_if_index;
+ path->attached_next_hop.fp_nh = rpath->frp_addr;
+ }
+ }
+ }
+ else
+ {
+ if (ip46_address_is_zero(&rpath->frp_addr))
+ {
+ if (~0 == rpath->frp_fib_index)
+ {
+ path->fp_type = FIB_PATH_TYPE_SPECIAL;
+ }
+ else
+ {
+ path->fp_type = FIB_PATH_TYPE_DEAG;
+ path->deag.fp_tbl_id = rpath->frp_fib_index;
+ }
+ }
+ else
+ {
+ path->fp_type = FIB_PATH_TYPE_RECURSIVE;
+ if (FIB_PROTOCOL_MPLS == path->fp_nh_proto)
+ {
+ path->recursive.fp_nh.fp_local_label = rpath->frp_local_label;
+ }
+ else
+ {
+ path->recursive.fp_nh.fp_ip = rpath->frp_addr;
+ }
+ path->recursive.fp_tbl_id = rpath->frp_fib_index;
+ }
+ }
+
+ FIB_PATH_DBG(path, "create");
+
+ return (fib_path_get_index(path));
+}
+
+/*
+ * fib_path_create_special
+ *
+ * Create and initialise a new path object.
+ * return the index of the path.
+ */
+fib_node_index_t
+fib_path_create_special (fib_node_index_t pl_index,
+ fib_protocol_t nh_proto,
+ fib_path_cfg_flags_t flags,
+ const dpo_id_t *dpo)
+{
+ fib_path_t *path;
+
+ pool_get(fib_path_pool, path);
+ memset(path, 0, sizeof(*path));
+
+ fib_node_init(&path->fp_node,
+ FIB_NODE_TYPE_PATH);
+ dpo_reset(&path->fp_dpo);
+
+ path->fp_pl_index = pl_index;
+ path->fp_weight = 1;
+ path->fp_nh_proto = nh_proto;
+ path->fp_via_fib = FIB_NODE_INDEX_INVALID;
+ path->fp_cfg_flags = flags;
+
+ if (FIB_PATH_CFG_FLAG_DROP & flags)
+ {
+ path->fp_type = FIB_PATH_TYPE_SPECIAL;
+ }
+ else if (FIB_PATH_CFG_FLAG_LOCAL & flags)
+ {
+ path->fp_type = FIB_PATH_TYPE_RECEIVE;
+ path->attached.fp_interface = FIB_NODE_INDEX_INVALID;
+ }
+ else
+ {
+ path->fp_type = FIB_PATH_TYPE_EXCLUSIVE;
+ ASSERT(NULL != dpo);
+ dpo_copy(&path->exclusive.fp_ex_dpo, dpo);
+ }
+
+ return (fib_path_get_index(path));
+}
+
+/*
+ * fib_path_copy
+ *
+ * Copy a path. return index of new path.
+ */
+fib_node_index_t
+fib_path_copy (fib_node_index_t path_index,
+ fib_node_index_t path_list_index)
+{
+ fib_path_t *path, *orig_path;
+
+ pool_get(fib_path_pool, path);
+
+ orig_path = fib_path_get(path_index);
+ ASSERT(NULL != orig_path);
+
+ memcpy(path, orig_path, sizeof(*path));
+
+ FIB_PATH_DBG(path, "create-copy:%d", path_index);
+
+ /*
+ * reset the dynamic section
+ */
+ fib_node_init(&path->fp_node, FIB_NODE_TYPE_PATH);
+ path->fp_oper_flags = FIB_PATH_OPER_FLAG_NONE;
+ path->fp_pl_index = path_list_index;
+ path->fp_via_fib = FIB_NODE_INDEX_INVALID;
+ memset(&path->fp_dpo, 0, sizeof(path->fp_dpo));
+ dpo_reset(&path->fp_dpo);
+
+ return (fib_path_get_index(path));
+}
+
+/*
+ * fib_path_destroy
+ *
+ * destroy a path that is no longer required
+ */
+void
+fib_path_destroy (fib_node_index_t path_index)
+{
+ fib_path_t *path;
+
+ path = fib_path_get(path_index);
+
+ ASSERT(NULL != path);
+ FIB_PATH_DBG(path, "destroy");
+
+ fib_path_unresolve(path);
+
+ fib_node_deinit(&path->fp_node);
+ pool_put(fib_path_pool, path);
+}
+
+/*
+ * fib_path_destroy
+ *
+ * destroy a path that is no longer required
+ */
+uword
+fib_path_hash (fib_node_index_t path_index)
+{
+ fib_path_t *path;
+
+ path = fib_path_get(path_index);
+
+ return (hash_memory(STRUCT_MARK_PTR(path, path_hash_start),
+ (STRUCT_OFFSET_OF(fib_path_t, path_hash_end) -
+ STRUCT_OFFSET_OF(fib_path_t, path_hash_start)),
+ 0));
+}
+
+/*
+ * fib_path_cmp_i
+ *
+ * Compare two paths for equivalence.
+ */
+static int
+fib_path_cmp_i (const fib_path_t *path1,
+ const fib_path_t *path2)
+{
+ int res;
+
+ res = 1;
+
+ /*
+ * paths of different types and protocol are not equal.
+ * different weights only are the same path.
+ */
+ if (path1->fp_type != path2->fp_type)
+ {
+ res = (path1->fp_type - path2->fp_type);
+ }
+ if (path1->fp_nh_proto != path2->fp_nh_proto)
+ {
+ res = (path1->fp_nh_proto - path2->fp_nh_proto);
+ }
+ else
+ {
+ /*
+ * both paths are of the same type.
+ * consider each type and its attributes in turn.
+ */
+ switch (path1->fp_type)
+ {
+ case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
+ res = ip46_address_cmp(&path1->attached_next_hop.fp_nh,
+ &path2->attached_next_hop.fp_nh);
+ if (0 == res) {
+ res = vnet_sw_interface_compare(
+ vnet_get_main(),
+ path1->attached_next_hop.fp_interface,
+ path2->attached_next_hop.fp_interface);
+ }
+ break;
+ case FIB_PATH_TYPE_ATTACHED:
+ res = vnet_sw_interface_compare(
+ vnet_get_main(),
+ path1->attached.fp_interface,
+ path2->attached.fp_interface);
+ break;
+ case FIB_PATH_TYPE_RECURSIVE:
+ res = ip46_address_cmp(&path1->recursive.fp_nh,
+ &path2->recursive.fp_nh);
+
+ if (0 == res)
+ {
+ res = (path1->recursive.fp_tbl_id - path2->recursive.fp_tbl_id);
+ }
+ break;
+ case FIB_PATH_TYPE_DEAG:
+ res = (path1->deag.fp_tbl_id - path2->deag.fp_tbl_id);
+ break;
+ case FIB_PATH_TYPE_SPECIAL:
+ case FIB_PATH_TYPE_RECEIVE:
+ case FIB_PATH_TYPE_EXCLUSIVE:
+ res = 0;
+ break;
+ }
+ }
+ return (res);
+}
+
+/*
+ * fib_path_cmp_for_sort
+ *
+ * Compare two paths for equivalence. Used during path sorting.
+ * As usual 0 means equal.
+ */
+int
+fib_path_cmp_for_sort (void * v1,
+ void * v2)
+{
+ fib_node_index_t *pi1 = v1, *pi2 = v2;
+ fib_path_t *path1, *path2;
+
+ path1 = fib_path_get(*pi1);
+ path2 = fib_path_get(*pi2);
+
+ return (fib_path_cmp_i(path1, path2));
+}
+
+/*
+ * fib_path_cmp
+ *
+ * Compare two paths for equivalence.
+ */
+int
+fib_path_cmp (fib_node_index_t pi1,
+ fib_node_index_t pi2)
+{
+ fib_path_t *path1, *path2;
+
+ path1 = fib_path_get(pi1);
+ path2 = fib_path_get(pi2);
+
+ return (fib_path_cmp_i(path1, path2));
+}
+
+int
+fib_path_cmp_w_route_path (fib_node_index_t path_index,
+ const fib_route_path_t *rpath)
+{
+ fib_path_t *path;
+ int res;
+
+ path = fib_path_get(path_index);
+
+ res = 1;
+
+ if (path->fp_weight != rpath->frp_weight)
+ {
+ res = (path->fp_weight - rpath->frp_weight);
+ }
+ else
+ {
+ /*
+ * both paths are of the same type.
+ * consider each type and its attributes in turn.
+ */
+ switch (path->fp_type)
+ {
+ case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
+ res = ip46_address_cmp(&path->attached_next_hop.fp_nh,
+ &rpath->frp_addr);
+ if (0 == res)
+ {
+ res = vnet_sw_interface_compare(
+ vnet_get_main(),
+ path->attached_next_hop.fp_interface,
+ rpath->frp_sw_if_index);
+ }
+ break;
+ case FIB_PATH_TYPE_ATTACHED:
+ res = vnet_sw_interface_compare(
+ vnet_get_main(),
+ path->attached.fp_interface,
+ rpath->frp_sw_if_index);
+ break;
+ case FIB_PATH_TYPE_RECURSIVE:
+ if (FIB_PROTOCOL_MPLS == path->fp_nh_proto)
+ {
+ res = path->recursive.fp_nh.fp_local_label - rpath->frp_local_label;
+ }
+ else
+ {
+ res = ip46_address_cmp(&path->recursive.fp_nh.fp_ip,
+ &rpath->frp_addr);
+ }
+
+ if (0 == res)
+ {
+ res = (path->recursive.fp_tbl_id - rpath->frp_fib_index);
+ }
+ break;
+ case FIB_PATH_TYPE_DEAG:
+ res = (path->deag.fp_tbl_id - rpath->frp_fib_index);
+ break;
+ case FIB_PATH_TYPE_SPECIAL:
+ case FIB_PATH_TYPE_RECEIVE:
+ case FIB_PATH_TYPE_EXCLUSIVE:
+ res = 0;
+ break;
+ }
+ }
+ return (res);
+}
+
+/*
+ * fib_path_recursive_loop_detect
+ *
+ * A forward walk of the FIB object graph to detect for a cycle/loop. This
+ * walk is initiated when an entry is linking to a new path list or from an old.
+ * The entry vector passed contains all the FIB entrys that are children of this
+ * path (it is all the entries encountered on the walk so far). If this vector
+ * contains the entry this path resolve via, then a loop is about to form.
+ * The loop must be allowed to form, since we need the dependencies in place
+ * so that we can track when the loop breaks.
+ * However, we MUST not produce a loop in the forwarding graph (else packets
+ * would loop around the switch path until the loop breaks), so we mark recursive
+ * paths as looped so that they do not contribute forwarding information.
+ * By marking the path as looped, an etry such as;
+ * X/Y
+ * via a.a.a.a (looped)
+ * via b.b.b.b (not looped)
+ * can still forward using the info provided by b.b.b.b only
+ */
+int
+fib_path_recursive_loop_detect (fib_node_index_t path_index,
+ fib_node_index_t **entry_indicies)
+{
+ fib_path_t *path;
+
+ path = fib_path_get(path_index);
+
+ /*
+ * the forced drop path is never looped, cos it is never resolved.
+ */
+ if (fib_path_is_permanent_drop(path))
+ {
+ return (0);
+ }
+
+ switch (path->fp_type)
+ {
+ case FIB_PATH_TYPE_RECURSIVE:
+ {
+ fib_node_index_t *entry_index, *entries;
+ int looped = 0;
+ entries = *entry_indicies;
+
+ vec_foreach(entry_index, entries) {
+ if (*entry_index == path->fp_via_fib)
+ {
+ /*
+ * the entry that is about to link to this path-list (or
+ * one of this path-list's children) is the same entry that
+ * this recursive path resolves through. this is a cycle.
+ * abort the walk.
+ */
+ looped = 1;
+ break;
+ }
+ }
+
+ if (looped)
+ {
+ FIB_PATH_DBG(path, "recursive loop formed");
+ path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RECURSIVE_LOOP;
+
+ dpo_copy(&path->fp_dpo,
+ drop_dpo_get(fib_proto_to_dpo(path->fp_nh_proto)));
+ }
+ else
+ {
+ /*
+ * no loop here yet. keep forward walking the graph.
+ */
+ if (fib_entry_recursive_loop_detect(path->fp_via_fib, entry_indicies))
+ {
+ FIB_PATH_DBG(path, "recursive loop formed");
+ path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RECURSIVE_LOOP;
+ }
+ else
+ {
+ FIB_PATH_DBG(path, "recursive loop cleared");
+ path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RECURSIVE_LOOP;
+ }
+ }
+ break;
+ }
+ case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
+ case FIB_PATH_TYPE_ATTACHED:
+ case FIB_PATH_TYPE_SPECIAL:
+ case FIB_PATH_TYPE_DEAG:
+ case FIB_PATH_TYPE_RECEIVE:
+ case FIB_PATH_TYPE_EXCLUSIVE:
+ /*
+ * these path types cannot be part of a loop, since they are the leaves
+ * of the graph.
+ */
+ break;
+ }
+
+ return (fib_path_is_looped(path_index));
+}
+
+int
+fib_path_resolve (fib_node_index_t path_index)
+{
+ fib_path_t *path;
+
+ path = fib_path_get(path_index);
+
+ /*
+ * hope for the best.
+ */
+ path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
+
+ /*
+ * the forced drop path resolves via the drop adj
+ */
+ if (fib_path_is_permanent_drop(path))
+ {
+ dpo_copy(&path->fp_dpo,
+ drop_dpo_get(fib_proto_to_dpo(path->fp_nh_proto)));
+ path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
+ return (fib_path_is_resolved(path_index));
+ }
+
+ switch (path->fp_type)
+ {
+ case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
+ fib_path_attached_next_hop_set(path);
+ break;
+ case FIB_PATH_TYPE_ATTACHED:
+ /*
+ * path->attached.fp_interface
+ */
+ if (!vnet_sw_interface_is_admin_up(vnet_get_main(),
+ path->attached.fp_interface))
+ {
+ path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
+ }
+ if (vnet_sw_interface_is_p2p(vnet_get_main(),
+ path->attached.fp_interface))
+ {
+ /*
+ * point-2-point interfaces do not require a glean, since
+ * there is nothing to ARP. Install a rewrite/nbr adj instead
+ */
+ dpo_set(&path->fp_dpo,
+ DPO_ADJACENCY,
+ fib_proto_to_dpo(path->fp_nh_proto),
+ adj_nbr_add_or_lock(
+ path->fp_nh_proto,
+ fib_proto_to_link(path->fp_nh_proto),
+ &zero_addr,
+ path->attached.fp_interface));
+ }
+ else
+ {
+ dpo_set(&path->fp_dpo,
+ DPO_ADJACENCY_GLEAN,
+ fib_proto_to_dpo(path->fp_nh_proto),
+ adj_glean_add_or_lock(path->fp_nh_proto,
+ path->attached.fp_interface,
+ NULL));
+ }
+ /*
+ * become a child of the adjacency so we receive updates
+ * when the interface state changes
+ */
+ path->fp_sibling = adj_child_add(path->fp_dpo.dpoi_index,
+ FIB_NODE_TYPE_PATH,
+ fib_path_get_index(path));
+
+ break;
+ case FIB_PATH_TYPE_RECURSIVE:
+ {
+ /*
+ * Create a RR source entry in the table for the address
+ * that this path recurses through.
+ * This resolve action is recursive, hence we may create
+ * more paths in the process. more creates mean maybe realloc
+ * of this path.
+ */
+ fib_node_index_t fei;
+ fib_prefix_t pfx;
+
+ ASSERT(FIB_NODE_INDEX_INVALID == path->fp_via_fib);
+
+ if (FIB_PROTOCOL_MPLS == path->fp_nh_proto)
+ {
+ fib_prefix_from_mpls_label(path->recursive.fp_nh.fp_local_label, &pfx);
+ }
+ else
+ {
+ fib_prefix_from_ip46_addr(&path->recursive.fp_nh.fp_ip, &pfx);
+ }
+
+ fei = fib_table_entry_special_add(path->recursive.fp_tbl_id,
+ &pfx,
+ FIB_SOURCE_RR,
+ FIB_ENTRY_FLAG_NONE,
+ ADJ_INDEX_INVALID);
+
+ path = fib_path_get(path_index);
+ path->fp_via_fib = fei;
+
+ /*
+ * become a dependent child of the entry so the path is
+ * informed when the forwarding for the entry changes.
+ */
+ path->fp_sibling = fib_entry_child_add(path->fp_via_fib,
+ FIB_NODE_TYPE_PATH,
+ fib_path_get_index(path));
+
+ /*
+ * create and configure the IP DPO
+ */
+ fib_path_recursive_adj_update(
+ path,
+ fib_path_proto_to_chain_type(path->fp_nh_proto),
+ &path->fp_dpo);
+
+ break;
+ }
+ case FIB_PATH_TYPE_SPECIAL:
+ /*
+ * Resolve via the drop
+ */
+ dpo_copy(&path->fp_dpo,
+ drop_dpo_get(fib_proto_to_dpo(path->fp_nh_proto)));
+ break;
+ case FIB_PATH_TYPE_DEAG:
+ /*
+ * Resolve via a lookup DPO.
+ * FIXME. control plane should add routes with a table ID
+ */
+ lookup_dpo_add_or_lock_w_fib_index(path->deag.fp_tbl_id,
+ fib_proto_to_dpo(path->fp_nh_proto),
+ LOOKUP_INPUT_DST_ADDR,
+ LOOKUP_TABLE_FROM_CONFIG,
+ &path->fp_dpo);
+ break;
+ case FIB_PATH_TYPE_RECEIVE:
+ /*
+ * Resolve via a receive DPO.
+ */
+ receive_dpo_add_or_lock(fib_proto_to_dpo(path->fp_nh_proto),
+ path->receive.fp_interface,
+ &path->receive.fp_addr,
+ &path->fp_dpo);
+ break;
+ case FIB_PATH_TYPE_EXCLUSIVE:
+ /*
+ * Resolve via the user provided DPO
+ */
+ dpo_copy(&path->fp_dpo, &path->exclusive.fp_ex_dpo);
+ break;
+ }
+
+ return (fib_path_is_resolved(path_index));
+}
+
+u32
+fib_path_get_resolving_interface (fib_node_index_t path_index)
+{
+ fib_path_t *path;
+
+ path = fib_path_get(path_index);
+
+ switch (path->fp_type)
+ {
+ case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
+ return (path->attached_next_hop.fp_interface);
+ case FIB_PATH_TYPE_ATTACHED:
+ return (path->attached.fp_interface);
+ case FIB_PATH_TYPE_RECEIVE:
+ return (path->receive.fp_interface);
+ case FIB_PATH_TYPE_RECURSIVE:
+ return (fib_entry_get_resolving_interface(path->fp_via_fib));
+ case FIB_PATH_TYPE_SPECIAL:
+ case FIB_PATH_TYPE_DEAG:
+ case FIB_PATH_TYPE_EXCLUSIVE:
+ break;
+ }
+ return (~0);
+}
+
+adj_index_t
+fib_path_get_adj (fib_node_index_t path_index)
+{
+ fib_path_t *path;
+
+ path = fib_path_get(path_index);
+
+ ASSERT(dpo_is_adj(&path->fp_dpo));
+ if (dpo_is_adj(&path->fp_dpo))
+ {
+ return (path->fp_dpo.dpoi_index);
+ }
+ return (ADJ_INDEX_INVALID);
+}
+
+int
+fib_path_get_weight (fib_node_index_t path_index)
+{
+ fib_path_t *path;
+
+ path = fib_path_get(path_index);
+
+ ASSERT(path);
+
+ return (path->fp_weight);
+}
+
+/**
+ * @brief Contribute the path's adjacency to the list passed.
+ * By calling this function over all paths, recursively, a child
+ * can construct its full set of forwarding adjacencies, and hence its
+ * uRPF list.
+ */
+void
+fib_path_contribute_urpf (fib_node_index_t path_index,
+ index_t urpf)
+{
+ fib_path_t *path;
+
+ if (!fib_path_is_resolved(path_index))
+ return;
+
+ path = fib_path_get(path_index);
+
+ switch (path->fp_type)
+ {
+ case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
+ fib_urpf_list_append(urpf, path->attached_next_hop.fp_interface);
+ break;
+
+ case FIB_PATH_TYPE_ATTACHED:
+ fib_urpf_list_append(urpf, path->attached.fp_interface);
+ break;
+
+ case FIB_PATH_TYPE_RECURSIVE:
+ fib_entry_contribute_urpf(path->fp_via_fib, urpf);
+ break;
+
+ case FIB_PATH_TYPE_EXCLUSIVE:
+ case FIB_PATH_TYPE_SPECIAL:
+ /*
+ * these path types may link to an adj, if that's what
+ * the clinet gave
+ */
+ if (dpo_is_adj(&path->fp_dpo))
+ {
+ ip_adjacency_t *adj;
+
+ adj = adj_get(path->fp_dpo.dpoi_index);
+
+ fib_urpf_list_append(urpf, adj->rewrite_header.sw_if_index);
+ }
+ break;
+
+ case FIB_PATH_TYPE_DEAG:
+ case FIB_PATH_TYPE_RECEIVE:
+ /*
+ * these path types don't link to an adj
+ */
+ break;
+ }
+}
+
+void
+fib_path_contribute_forwarding (fib_node_index_t path_index,
+ fib_forward_chain_type_t fct,
+ dpo_id_t *dpo)
+{
+ fib_path_t *path;
+
+ path = fib_path_get(path_index);
+
+ ASSERT(path);
+ ASSERT(FIB_FORW_CHAIN_TYPE_MPLS_EOS != fct);
+
+ FIB_PATH_DBG(path, "contribute");
+
+ /*
+ * The DPO stored in the path was created when the path was resolved.
+ * This then represents the path's 'native' protocol; IP.
+ * For all others will need to go find something else.
+ */
+ if (fib_path_proto_to_chain_type(path->fp_nh_proto) == fct)
+ {
+ dpo_copy(dpo, &path->fp_dpo);
+ }
+ else
+ {
+ switch (path->fp_type)
+ {
+ case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
+ switch (fct)
+ {
+ case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
+ case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
+ case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
+ case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
+ case FIB_FORW_CHAIN_TYPE_ETHERNET:
+ {
+ adj_index_t ai;
+
+ /*
+ * get a appropriate link type adj.
+ */
+ ai = fib_path_attached_next_hop_get_adj(
+ path,
+ fib_forw_chain_type_to_link_type(fct));
+ dpo_set(dpo, DPO_ADJACENCY,
+ fib_forw_chain_type_to_dpo_proto(fct), ai);
+ adj_unlock(ai);
+
+ break;
+ }
+ }
+ break;
+ case FIB_PATH_TYPE_RECURSIVE:
+ switch (fct)
+ {
+ case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
+ case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
+ case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
+ case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
+ fib_path_recursive_adj_update(path, fct, dpo);
+ break;
+ case FIB_FORW_CHAIN_TYPE_ETHERNET:
+ ASSERT(0);
+ break;
+ }
+ break;
+ case FIB_PATH_TYPE_DEAG:
+ switch (fct)
+ {
+ case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
+ lookup_dpo_add_or_lock_w_table_id(MPLS_FIB_DEFAULT_TABLE_ID,
+ DPO_PROTO_MPLS,
+ LOOKUP_INPUT_DST_ADDR,
+ LOOKUP_TABLE_FROM_CONFIG,
+ dpo);
+ break;
+ case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
+ case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
+ case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
+ dpo_copy(dpo, &path->fp_dpo);
+ break;
+ case FIB_FORW_CHAIN_TYPE_ETHERNET:
+ ASSERT(0);
+ break;
+ }
+ break;
+ case FIB_PATH_TYPE_EXCLUSIVE:
+ dpo_copy(dpo, &path->exclusive.fp_ex_dpo);
+ break;
+ case FIB_PATH_TYPE_ATTACHED:
+ case FIB_PATH_TYPE_RECEIVE:
+ case FIB_PATH_TYPE_SPECIAL:
+ ASSERT(0);
+ break;
+ }
+
+ }
+}
+
+load_balance_path_t *
+fib_path_append_nh_for_multipath_hash (fib_node_index_t path_index,
+ fib_forward_chain_type_t fct,
+ load_balance_path_t *hash_key)
+{
+ load_balance_path_t *mnh;
+ fib_path_t *path;
+
+ path = fib_path_get(path_index);
+
+ ASSERT(path);
+
+ if (fib_path_is_resolved(path_index))
+ {
+ vec_add2(hash_key, mnh, 1);
+
+ mnh->path_weight = path->fp_weight;
+ mnh->path_index = path_index;
+ fib_path_contribute_forwarding(path_index, fct, &mnh->path_dpo);
+ }
+
+ return (hash_key);
+}
+
+int
+fib_path_is_recursive (fib_node_index_t path_index)
+{
+ fib_path_t *path;
+
+ path = fib_path_get(path_index);
+
+ return (FIB_PATH_TYPE_RECURSIVE == path->fp_type);
+}
+
+int
+fib_path_is_exclusive (fib_node_index_t path_index)
+{
+ fib_path_t *path;
+
+ path = fib_path_get(path_index);
+
+ return (FIB_PATH_TYPE_EXCLUSIVE == path->fp_type);
+}
+
+int
+fib_path_is_deag (fib_node_index_t path_index)
+{
+ fib_path_t *path;
+
+ path = fib_path_get(path_index);
+
+ return (FIB_PATH_TYPE_DEAG == path->fp_type);
+}
+
+int
+fib_path_is_resolved (fib_node_index_t path_index)
+{
+ fib_path_t *path;
+
+ path = fib_path_get(path_index);
+
+ return (dpo_id_is_valid(&path->fp_dpo) &&
+ (path->fp_oper_flags & FIB_PATH_OPER_FLAG_RESOLVED) &&
+ !fib_path_is_looped(path_index) &&
+ !fib_path_is_permanent_drop(path));
+}
+
+int
+fib_path_is_looped (fib_node_index_t path_index)
+{
+ fib_path_t *path;
+
+ path = fib_path_get(path_index);
+
+ return (path->fp_oper_flags & FIB_PATH_OPER_FLAG_RECURSIVE_LOOP);
+}
+
+int
+fib_path_encode (fib_node_index_t path_list_index,
+ fib_node_index_t path_index,
+ void *ctx)
+{
+ fib_route_path_encode_t **api_rpaths = ctx;
+ fib_route_path_encode_t *api_rpath;
+ fib_path_t *path;
+
+ path = fib_path_get(path_index);
+ if (!path)
+ return (0);
+ vec_add2(*api_rpaths, api_rpath, 1);
+ api_rpath->rpath.frp_weight = path->fp_weight;
+ api_rpath->rpath.frp_proto = path->fp_nh_proto;
+ api_rpath->rpath.frp_sw_if_index = ~0;
+ api_rpath->dpo = path->exclusive.fp_ex_dpo;
+ switch (path->fp_type)
+ {
+ case FIB_PATH_TYPE_RECEIVE:
+ api_rpath->rpath.frp_addr = path->receive.fp_addr;
+ api_rpath->rpath.frp_sw_if_index = path->receive.fp_interface;
+ break;
+ case FIB_PATH_TYPE_ATTACHED:
+ api_rpath->rpath.frp_sw_if_index = path->attached.fp_interface;
+ break;
+ case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
+ api_rpath->rpath.frp_sw_if_index = path->attached_next_hop.fp_interface;
+ api_rpath->rpath.frp_addr = path->attached_next_hop.fp_nh;
+ break;
+ case FIB_PATH_TYPE_SPECIAL:
+ break;
+ case FIB_PATH_TYPE_DEAG:
+ break;
+ case FIB_PATH_TYPE_RECURSIVE:
+ api_rpath->rpath.frp_addr = path->recursive.fp_nh.fp_ip;
+ break;
+ default:
+ break;
+ }
+ return (1);
+}
+
+fib_protocol_t
+fib_path_get_proto (fib_node_index_t path_index)
+{
+ fib_path_t *path;
+
+ path = fib_path_get(path_index);
+
+ return (path->fp_nh_proto);
+}
+
+void
+fib_path_module_init (void)
+{
+ fib_node_register_type (FIB_NODE_TYPE_PATH, &fib_path_vft);
+}
+
+static clib_error_t *
+show_fib_path_command (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ fib_node_index_t pi;
+ fib_path_t *path;
+
+ if (unformat (input, "%d", &pi))
+ {
+ /*
+ * show one in detail
+ */
+ if (!pool_is_free_index(fib_path_pool, pi))
+ {
+ path = fib_path_get(pi);
+ u8 *s = fib_path_format(pi, NULL);
+ s = format(s, "children:");
+ s = fib_node_children_format(path->fp_node.fn_children, s);
+ vlib_cli_output (vm, "%s", s);
+ vec_free(s);
+ }
+ else
+ {
+ vlib_cli_output (vm, "path %d invalid", pi);
+ }
+ }
+ else
+ {
+ vlib_cli_output (vm, "FIB Paths");
+ pool_foreach(path, fib_path_pool,
+ ({
+ vlib_cli_output (vm, "%U", format_fib_path, path);
+ }));
+ }
+
+ return (NULL);
+}
+
+VLIB_CLI_COMMAND (show_fib_path, static) = {
+ .path = "show fib paths",
+ .function = show_fib_path_command,
+ .short_help = "show fib paths",
+};
diff --git a/src/vnet/fib/fib_path.h b/src/vnet/fib/fib_path.h
new file mode 100644
index 00000000000..91f49d09234
--- /dev/null
+++ b/src/vnet/fib/fib_path.h
@@ -0,0 +1,158 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Given a route of the form;
+ * q.r.s.t/Y
+ * via <interface> <next-hop>
+ *
+ * The prefix is: q.r.s.t./Y
+ * the path is: 'via <interface> <next-hop>
+ *
+ * The path is the description of where to send the traffic, and the
+ * the prefix is a description of which traffic to send.
+ * It is the aim of the FIB to resolve the path, i.e. to find the corresponding
+ * adjacency to match the path's description.
+ */
+
+#ifndef __FIB_PATH_H__
+#define __FIB_PATH_H__
+
+#include <vnet/ip/ip.h>
+#include <vnet/dpo/load_balance.h>
+
+#include <vnet/fib/fib_types.h>
+#include <vnet/adj/adj_types.h>
+
+/**
+ * Enurmeration of path configuration attributes
+ */
+typedef enum fib_path_cfg_attribute_t_ {
+ /**
+ * Marker. Add new types after this one.
+ */
+ FIB_PATH_CFG_ATTRIBUTE_FIRST = 0,
+ /**
+ * The path is forced to a drop, whatever the next-hop info says.
+ * something somewhere knows better...
+ */
+ FIB_PATH_CFG_ATTRIBUTE_DROP = FIB_PATH_CFG_ATTRIBUTE_FIRST,
+ /**
+ * The path uses an adj that is exclusive. I.e. it is known only by
+ * the source of the route.
+ */
+ FIB_PATH_CFG_ATTRIBUTE_EXCLUSIVE,
+ /**
+ * Recursion constraint via host
+ */
+ FIB_PATH_CFG_ATTRIBUTE_RESOLVE_HOST,
+ /**
+ * Recursion constraint via attached
+ */
+ FIB_PATH_CFG_ATTRIBUTE_RESOLVE_ATTACHED,
+ /**
+ * The path is a for-us path
+ */
+ FIB_PATH_CFG_ATTRIBUTE_LOCAL,
+ /**
+ * Marker. Add new types before this one, then update it.
+ */
+ FIB_PATH_CFG_ATTRIBUTE_LAST = FIB_PATH_CFG_ATTRIBUTE_LOCAL,
+} __attribute__ ((packed)) fib_path_cfg_attribute_t;
+
+/**
+ * The maximum number of path attributes
+ */
+#define FIB_PATH_CFG_ATTRIBUTE_MAX (FIB_PATH_CFG_ATTRIBUTE_LAST + 1)
+
+#define FIB_PATH_CFG_ATTRIBUTES { \
+ [FIB_PATH_CFG_ATTRIBUTE_DROP] = "drop", \
+ [FIB_PATH_CFG_ATTRIBUTE_EXCLUSIVE] = "exclusive", \
+ [FIB_PATH_CFG_ATTRIBUTE_RESOLVE_HOST] = "resolve-host", \
+ [FIB_PATH_CFG_ATTRIBUTE_RESOLVE_ATTACHED] = "resolve-attached", \
+ [FIB_PATH_CFG_ATTRIBUTE_LOCAL] = "local", \
+}
+
+#define FOR_EACH_FIB_PATH_CFG_ATTRIBUTE(_item) \
+ for (_item = FIB_PATH_CFG_ATTRIBUTE_FIRST; \
+ _item <= FIB_PATH_CFG_ATTRIBUTE_LAST; \
+ _item++)
+
+/**
+ * Path config flags from the attributes
+ */
+typedef enum fib_path_cfg_flags_t_ {
+ FIB_PATH_CFG_FLAG_NONE = 0,
+ FIB_PATH_CFG_FLAG_DROP = (1 << FIB_PATH_CFG_ATTRIBUTE_DROP),
+ FIB_PATH_CFG_FLAG_EXCLUSIVE = (1 << FIB_PATH_CFG_ATTRIBUTE_EXCLUSIVE),
+ FIB_PATH_CFG_FLAG_RESOLVE_HOST = (1 << FIB_PATH_CFG_ATTRIBUTE_RESOLVE_HOST),
+ FIB_PATH_CFG_FLAG_RESOLVE_ATTACHED = (1 << FIB_PATH_CFG_ATTRIBUTE_RESOLVE_ATTACHED),
+ FIB_PATH_CFG_FLAG_LOCAL = (1 << FIB_PATH_CFG_ATTRIBUTE_LOCAL),
+} __attribute__ ((packed)) fib_path_cfg_flags_t;
+
+
+extern u8 *fib_path_format(fib_node_index_t pi, u8 *s);
+extern u8 *fib_path_adj_format(fib_node_index_t pi,
+ u32 indent,
+ u8 *s);
+
+extern u8 * format_fib_path(u8 * s, va_list * args);
+
+extern fib_node_index_t fib_path_create(fib_node_index_t pl_index,
+ fib_protocol_t nh_proto,
+ fib_path_cfg_flags_t flags,
+ const fib_route_path_t *path);
+extern fib_node_index_t fib_path_create_special(fib_node_index_t pl_index,
+ fib_protocol_t nh_proto,
+ fib_path_cfg_flags_t flags,
+ const dpo_id_t *dpo);
+
+extern int fib_path_cmp(fib_node_index_t path_index1,
+ fib_node_index_t path_index2);
+extern int fib_path_cmp_for_sort(void * a1, void * a2);
+extern int fib_path_cmp_w_route_path(fib_node_index_t path_index,
+ const fib_route_path_t *rpath);
+extern fib_node_index_t fib_path_copy(fib_node_index_t path_index,
+ fib_node_index_t path_list_index);
+extern int fib_path_resolve(fib_node_index_t path_index);
+extern int fib_path_is_resolved(fib_node_index_t path_index);
+extern int fib_path_is_recursive(fib_node_index_t path_index);
+extern int fib_path_is_exclusive(fib_node_index_t path_index);
+extern int fib_path_is_deag(fib_node_index_t path_index);
+extern int fib_path_is_looped(fib_node_index_t path_index);
+extern fib_protocol_t fib_path_get_proto(fib_node_index_t path_index);
+extern void fib_path_destroy(fib_node_index_t path_index);
+extern uword fib_path_hash(fib_node_index_t path_index);
+extern load_balance_path_t * fib_path_append_nh_for_multipath_hash(
+ fib_node_index_t path_index,
+ fib_forward_chain_type_t fct,
+ load_balance_path_t *hash_key);
+extern void fib_path_contribute_forwarding(fib_node_index_t path_index,
+ fib_forward_chain_type_t type,
+ dpo_id_t *dpo);
+extern void fib_path_contribute_urpf(fib_node_index_t path_index,
+ index_t urpf);
+extern adj_index_t fib_path_get_adj(fib_node_index_t path_index);
+extern int fib_path_recursive_loop_detect(fib_node_index_t path_index,
+ fib_node_index_t **entry_indicies);
+extern u32 fib_path_get_resolving_interface(fib_node_index_t fib_entry_index);
+extern int fib_path_get_weight(fib_node_index_t path_index);
+
+extern void fib_path_module_init(void);
+extern int fib_path_encode(fib_node_index_t path_list_index,
+ fib_node_index_t path_index,
+ void *ctx);
+
+#endif
diff --git a/src/vnet/fib/fib_path_ext.c b/src/vnet/fib/fib_path_ext.c
new file mode 100644
index 00000000000..f75b5626c04
--- /dev/null
+++ b/src/vnet/fib/fib_path_ext.c
@@ -0,0 +1,231 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/mpls/mpls.h>
+#include <vnet/dpo/mpls_label_dpo.h>
+#include <vnet/dpo/load_balance.h>
+#include <vnet/dpo/drop_dpo.h>
+
+#include <vnet/fib/fib_path_ext.h>
+#include <vnet/fib/fib_entry_src.h>
+#include <vnet/fib/fib_path.h>
+#include <vnet/fib/fib_path_list.h>
+#include <vnet/fib/fib_internal.h>
+
+u8 *
+format_fib_path_ext (u8 * s, va_list * args)
+{
+ fib_path_ext_t *path_ext;
+ u32 ii;
+
+ path_ext = va_arg (*args, fib_path_ext_t *);
+
+ s = format(s, "path:%d labels:",
+ path_ext->fpe_path_index);
+ for (ii = 0; ii < vec_len(path_ext->fpe_path.frp_label_stack); ii++)
+ {
+ s = format(s, "%U ",
+ format_mpls_unicast_label,
+ path_ext->fpe_path.frp_label_stack[ii]);
+ }
+ return (s);
+}
+
+int
+fib_path_ext_cmp (fib_path_ext_t *path_ext,
+ const fib_route_path_t *rpath)
+{
+ return (fib_route_path_cmp(&path_ext->fpe_path, rpath));
+}
+
+static int
+fib_path_ext_match (fib_node_index_t pl_index,
+ fib_node_index_t path_index,
+ void *ctx)
+{
+ fib_path_ext_t *path_ext = ctx;
+
+ if (!fib_path_cmp_w_route_path(path_index,
+ &path_ext->fpe_path))
+ {
+ path_ext->fpe_path_index = path_index;
+ return (0);
+ }
+ // keep going
+ return (1);
+}
+
+void
+fib_path_ext_resolve (fib_path_ext_t *path_ext,
+ fib_node_index_t path_list_index)
+{
+ /*
+ * Find the path on the path list that this is an extension for
+ */
+ path_ext->fpe_path_index = FIB_NODE_INDEX_INVALID;
+ fib_path_list_walk(path_list_index,
+ fib_path_ext_match,
+ path_ext);
+}
+
+void
+fib_path_ext_init (fib_path_ext_t *path_ext,
+ fib_node_index_t path_list_index,
+ const fib_route_path_t *rpath)
+{
+ path_ext->fpe_path = *rpath;
+ path_ext->fpe_path_index = FIB_NODE_INDEX_INVALID;
+
+ fib_path_ext_resolve(path_ext, path_list_index);
+}
+
+/**
+ * @brief Return true if the label stack is implicit null
+ */
+static int
+fib_path_ext_is_imp_null (fib_path_ext_t *path_ext)
+{
+ return ((1 == vec_len(path_ext->fpe_label_stack)) &&
+ (MPLS_IETF_IMPLICIT_NULL_LABEL == path_ext->fpe_label_stack[0]));
+}
+
+load_balance_path_t *
+fib_path_ext_stack (fib_path_ext_t *path_ext,
+ const fib_entry_t *entry,
+ fib_forward_chain_type_t child_fct,
+ load_balance_path_t *nhs)
+{
+ fib_forward_chain_type_t parent_fct;
+ load_balance_path_t *nh;
+
+ if (!fib_path_is_resolved(path_ext->fpe_path_index))
+ return (nhs);
+
+ /*
+ * Since we are stacking this path-extension, it must have a valid out
+ * label. From the chain type request by the child, determine what
+ * chain type we will request from the parent.
+ */
+ switch (child_fct)
+ {
+ case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
+ {
+ /*
+ * The EOS chain is a tricky since, when the path has an imp NULL one cannot know
+ * the adjacency to link to without knowing what the packets payload protocol
+ * will be once the label is popped.
+ */
+ if (fib_path_ext_is_imp_null(path_ext))
+ {
+ parent_fct = fib_entry_chain_type_fixup(entry, child_fct);
+ }
+ else
+ {
+ /*
+ * we have a label to stack. packets will thus be labelled when
+ * they encounter the child, ergo, non-eos.
+ */
+ parent_fct = FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS;
+ }
+ break;
+ }
+ case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
+ case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
+ if (fib_path_ext_is_imp_null(path_ext))
+ {
+ /*
+ * implicit-null label for the eos or IP chain, need to pick up
+ * the IP adj
+ */
+ parent_fct = child_fct;
+ }
+ else
+ {
+ /*
+ * we have a label to stack. packets will thus be labelled when
+ * they encounter the child, ergo, non-eos.
+ */
+ parent_fct = FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS;
+ }
+ break;
+ case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
+ parent_fct = child_fct;
+ break;
+ default:
+ return (nhs);
+ break;
+ }
+
+ dpo_id_t via_dpo = DPO_INVALID;
+
+ /*
+ * The next object in the graph after the imposition of the label
+ * will be the DPO contributed by the path through which the packets
+ * are to be sent. We stack the MPLS Label DPO on this path DPO
+ */
+ fib_path_contribute_forwarding(path_ext->fpe_path_index,
+ parent_fct,
+ &via_dpo);
+
+ if (dpo_is_drop(&via_dpo) ||
+ load_balance_is_drop(&via_dpo))
+ {
+ /*
+ * don't stack a path extension on a drop. doing so will create
+ * a LB bucket entry on drop, and we will lose a percentage of traffic.
+ */
+ }
+ else
+ {
+ vec_add2(nhs, nh, 1);
+ nh->path_weight = fib_path_get_weight(path_ext->fpe_path_index);
+ nh->path_index = path_ext->fpe_path_index;
+ dpo_copy(&nh->path_dpo, &via_dpo);
+
+ /*
+ * The label is stackable for this chain type
+ * construct the mpls header that will be imposed in the data-path
+ */
+ if (!fib_path_ext_is_imp_null(path_ext))
+ {
+ /*
+ * we use the parent protocol for the label so that
+ * we pickup the correct MPLS imposition nodes to do
+ * ip[46] processing.
+ */
+ dpo_proto_t chain_proto;
+ mpls_eos_bit_t eos;
+ index_t mldi;
+
+ eos = (child_fct == FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS ?
+ MPLS_NON_EOS :
+ MPLS_EOS);
+ chain_proto = fib_forw_chain_type_to_dpo_proto(child_fct);
+
+ mldi = mpls_label_dpo_create(path_ext->fpe_label_stack,
+ eos, 255, 0,
+ chain_proto,
+ &nh->path_dpo);
+
+ dpo_set(&nh->path_dpo,
+ DPO_MPLS_LABEL,
+ chain_proto,
+ mldi);
+ }
+ }
+ dpo_reset(&via_dpo);
+
+ return (nhs);
+}
diff --git a/src/vnet/fib/fib_path_ext.h b/src/vnet/fib/fib_path_ext.h
new file mode 100644
index 00000000000..cf8f8df00c6
--- /dev/null
+++ b/src/vnet/fib/fib_path_ext.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __FIB_PATH_EXT_H__
+#define __FIB_PATH_EXT_H__
+
+#include <vnet/mpls/mpls.h>
+#include <vnet/fib/fib_types.h>
+
+/**
+ * A path extension is a per-entry addition to the forwarding information
+ * when packets are sent for that entry over that path.
+ *
+ * For example:
+ * ip route add 1.1.1.1/32 via 10.10.10.10 out-label 100
+ *
+ * The out-going MPLS label value 100 is a path-extension. It is a value sepcific
+ * to the entry 1.1.1.1/32 and valid only when packets are sent via 10.10.10.10.
+ */
+typedef struct fib_path_ext_t_
+{
+ /**
+ * A description of the path that is being extended.
+ * This description is used to match this extension with the [changing]
+ * instance of a fib_path_t that is extended
+ */
+ fib_route_path_t fpe_path;
+#define fpe_label_stack fpe_path.frp_label_stack
+
+ /**
+ * The index of the path. This is the global index, not the path's
+ * position in the path-list.
+ */
+ fib_node_index_t fpe_path_index;
+} fib_path_ext_t;
+
+struct fib_entry_t_;
+
+extern u8 * format_fib_path_ext(u8 * s, va_list * args);
+
+extern void fib_path_ext_init(fib_path_ext_t *path_ext,
+ fib_node_index_t path_list_index,
+ const fib_route_path_t *rpath);
+
+extern int fib_path_ext_cmp(fib_path_ext_t *path_ext,
+ const fib_route_path_t *rpath);
+
+extern void fib_path_ext_resolve(fib_path_ext_t *path_ext,
+ fib_node_index_t path_list_index);
+
+extern load_balance_path_t *fib_path_ext_stack(fib_path_ext_t *path_ext,
+ const struct fib_entry_t_ *entry,
+ fib_forward_chain_type_t fct,
+ load_balance_path_t *nhs);
+
+#endif
+
diff --git a/src/vnet/fib/fib_path_list.c b/src/vnet/fib/fib_path_list.c
new file mode 100644
index 00000000000..5b35e9b87e7
--- /dev/null
+++ b/src/vnet/fib/fib_path_list.c
@@ -0,0 +1,1223 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vppinfra/mhash.h>
+#include <vnet/ip/ip.h>
+#include <vnet/adj/adj.h>
+#include <vnet/dpo/load_balance.h>
+#include <vnet/dpo/load_balance_map.h>
+
+#include <vnet/fib/fib_path_list.h>
+#include <vnet/fib/fib_internal.h>
+#include <vnet/fib/fib_node_list.h>
+#include <vnet/fib/fib_walk.h>
+#include <vnet/fib/fib_urpf_list.h>
+
+/**
+ * FIB path-list
+ * A representation of the list/set of path trough which a prefix is reachable
+ */
+typedef struct fib_path_list_t_ {
+ /**
+ * A path-list is a node in the FIB graph.
+ */
+ fib_node_t fpl_node;
+
+ /**
+ * Flags on the path-list
+ */
+ fib_path_list_flags_t fpl_flags;
+
+ /**
+ * The next-hop protocol for the paths in this path list.
+ * Note that fixing the proto here means we don't support a mix of
+ * v4 and v6 paths. ho hum.
+ */
+ fib_protocol_t fpl_nh_proto;
+
+ /**
+ * Vector of paths indicies for all configured paths.
+ * For shareable path-lists this list MUST not change.
+ */
+ fib_node_index_t *fpl_paths;
+
+ /**
+ * the RPF list calculated for this path list
+ */
+ fib_node_index_t fpl_urpf;
+} fib_path_list_t;
+
+/*
+ * Array of strings/names for the FIB sources
+ */
+static const char *fib_path_list_attr_names[] = FIB_PATH_LIST_ATTRIBUTES;
+
+/*
+ * The memory pool from which we allocate all the path-lists
+ */
+static fib_path_list_t * fib_path_list_pool;
+
+/*
+ * The data-base of shared path-lists
+ */
+static uword *fib_path_list_db;
+
+/*
+ * Debug macro
+ */
+#ifdef FIB_DEBUG
+#define FIB_PATH_LIST_DBG(_pl, _fmt, _args...) \
+{ \
+ u8 *_tmp = 0; \
+ _tmp = fib_path_list_format( \
+ fib_path_list_get_index(_pl), _tmp); \
+ clib_warning("pl:[%d:%p:%p:%s]:" _fmt, \
+ fib_path_list_get_index(_pl), \
+ _pl, _pl->fpl_paths, _tmp, \
+ ##_args); \
+ vec_free(_tmp); \
+}
+#else
+#define FIB_PATH_LIST_DBG(_pl, _fmt, _args...)
+#endif
+
+static fib_path_list_t *
+fib_path_list_get (fib_node_index_t index)
+{
+ return (pool_elt_at_index(fib_path_list_pool, index));
+}
+
+static fib_node_t *
+fib_path_list_get_node (fib_node_index_t index)
+{
+ return ((fib_node_t*)fib_path_list_get(index));
+}
+
+static fib_path_list_t*
+fib_path_list_from_fib_node (fib_node_t *node)
+{
+#if CLIB_DEBUG > 0
+ ASSERT(FIB_NODE_TYPE_PATH_LIST == node->fn_type);
+#endif
+ return ((fib_path_list_t*)node);
+}
+
+static fib_node_index_t
+fib_path_list_get_index (fib_path_list_t *path_list)
+{
+ return (path_list - fib_path_list_pool);
+}
+
+static u8 *
+format_fib_path_list (u8 * s, va_list * args)
+{
+ fib_path_list_attribute_t attr;
+ fib_node_index_t *path_index;
+ fib_path_list_t *path_list;
+
+ path_list = va_arg (*args, fib_path_list_t *);
+
+ s = format (s, " index:%u", fib_path_list_get_index(path_list));
+ s = format (s, " locks:%u", path_list->fpl_node.fn_locks);
+ s = format (s, " proto:%U", format_fib_protocol, path_list->fpl_nh_proto);
+
+ if (FIB_PATH_LIST_FLAG_NONE != path_list->fpl_flags)
+ {
+ s = format (s, " flags:");
+ FOR_EACH_PATH_LIST_ATTRIBUTE(attr)
+ {
+ if ((1<<attr) & path_list->fpl_flags)
+ {
+ s = format (s, "%s,", fib_path_list_attr_names[attr]);
+ }
+ }
+ }
+ s = format (s, " %U\n", format_fib_urpf_list, path_list->fpl_urpf);
+
+ vec_foreach (path_index, path_list->fpl_paths)
+ {
+ s = fib_path_format(*path_index, s);
+ s = format(s, "\n");
+ }
+
+ return (s);
+}
+
+u8 *
+fib_path_list_adjs_format (fib_node_index_t path_list_index,
+ u32 indent,
+ u8 * s)
+{
+ fib_path_list_t *path_list;
+ u32 i;
+
+ path_list = fib_path_list_get(path_list_index);
+
+ vec_foreach_index (i, path_list->fpl_paths)
+ {
+ s = fib_path_adj_format(path_list->fpl_paths[i],
+ indent, s);
+ }
+
+ return (s);
+}
+
+
+u8 *
+fib_path_list_format (fib_node_index_t path_list_index,
+ u8 * s)
+{
+ fib_path_list_t *path_list;
+
+ path_list = fib_path_list_get(path_list_index);
+
+ return (format(s, "%U", format_fib_path_list, path_list));
+}
+
+static uword
+fib_path_list_hash (fib_path_list_t *path_list)
+{
+ uword old_path_list_hash, new_path_list_hash, path_hash;
+ fib_node_index_t *path_index;
+
+ ASSERT(path_list);
+
+ new_path_list_hash = old_path_list_hash = vec_len(path_list->fpl_paths);
+
+ vec_foreach (path_index, path_list->fpl_paths)
+ {
+ path_hash = fib_path_hash(*path_index);
+#if uword_bits == 64
+ hash_mix64(path_hash, old_path_list_hash, new_path_list_hash);
+#else
+ hash_mix32(path_hash, old_path_list_hash, new_path_list_hash);
+#endif
+ }
+
+ return (new_path_list_hash);
+}
+
+always_inline uword
+fib_path_list_db_hash_key_from_index (uword index)
+{
+ return 1 + 2*index;
+}
+
+always_inline uword
+fib_path_list_db_hash_key_is_index (uword key)
+{
+ return key & 1;
+}
+
+always_inline uword
+fib_path_list_db_hash_key_2_index (uword key)
+{
+ ASSERT (fib_path_list_db_hash_key_is_index (key));
+ return key / 2;
+}
+
+static fib_path_list_t*
+fib_path_list_db_get_from_hash_key (uword key)
+{
+ fib_path_list_t *path_list;
+
+ if (fib_path_list_db_hash_key_is_index (key))
+ {
+ fib_node_index_t path_list_index;
+
+ path_list_index = fib_path_list_db_hash_key_2_index(key);
+ path_list = fib_path_list_get(path_list_index);
+ }
+ else
+ {
+ path_list = uword_to_pointer (key, fib_path_list_t *);
+ }
+
+ return (path_list);
+}
+
+static uword
+fib_path_list_db_hash_key_sum (hash_t * h,
+ uword key)
+{
+ fib_path_list_t *path_list;
+
+ path_list = fib_path_list_db_get_from_hash_key(key);
+
+ return (fib_path_list_hash(path_list));
+}
+
+static uword
+fib_path_list_db_hash_key_equal (hash_t * h,
+ uword key1,
+ uword key2)
+{
+ fib_path_list_t *path_list1, *path_list2;
+
+ path_list1 = fib_path_list_db_get_from_hash_key(key1);
+ path_list2 = fib_path_list_db_get_from_hash_key(key2);
+
+ return (fib_path_list_hash(path_list1) ==
+ fib_path_list_hash(path_list2));
+}
+
+static fib_node_index_t
+fib_path_list_db_find (fib_path_list_t *path_list)
+{
+ uword *p;
+
+ p = hash_get(fib_path_list_db, path_list);
+
+ if (NULL != p)
+ {
+ return p[0];
+ }
+
+ return (FIB_NODE_INDEX_INVALID);
+}
+
+static void
+fib_path_list_db_insert (fib_node_index_t path_list_index)
+{
+ fib_path_list_t *path_list;
+
+ path_list = fib_path_list_get(path_list_index);
+
+ ASSERT(FIB_NODE_INDEX_INVALID == fib_path_list_db_find(path_list));
+
+ hash_set (fib_path_list_db,
+ fib_path_list_db_hash_key_from_index(path_list_index),
+ path_list_index);
+
+ FIB_PATH_LIST_DBG(path_list, "DB-inserted");
+}
+
+static void
+fib_path_list_db_remove (fib_node_index_t path_list_index)
+{
+ fib_path_list_t *path_list;
+
+ path_list = fib_path_list_get(path_list_index);
+
+ ASSERT(FIB_NODE_INDEX_INVALID != fib_path_list_db_find(path_list));
+
+ hash_unset(fib_path_list_db,
+ fib_path_list_db_hash_key_from_index(path_list_index));
+
+ FIB_PATH_LIST_DBG(path_list, "DB-removed");
+}
+
+static void
+fib_path_list_destroy (fib_path_list_t *path_list)
+{
+ fib_node_index_t *path_index;
+
+ FIB_PATH_LIST_DBG(path_list, "destroy");
+
+ vec_foreach (path_index, path_list->fpl_paths)
+ {
+ fib_path_destroy(*path_index);
+ }
+
+ vec_free(path_list->fpl_paths);
+ fib_urpf_list_unlock(path_list->fpl_urpf);
+
+ fib_node_deinit(&path_list->fpl_node);
+ pool_put(fib_path_list_pool, path_list);
+}
+
+static void
+fib_path_list_last_lock_gone (fib_node_t *node)
+{
+ fib_path_list_t *path_list;
+
+ path_list = fib_path_list_from_fib_node(node);
+
+ FIB_PATH_LIST_DBG(path_list, "last-lock");
+
+ if (path_list->fpl_flags & FIB_PATH_LIST_FLAG_SHARED)
+ {
+ fib_path_list_db_remove(fib_path_list_get_index(path_list));
+ }
+ fib_path_list_destroy(path_list);
+}
+
+/*
+ * fib_path_mk_lb
+ *
+ * update the multipath adj this path-list will contribute to its
+ * children's forwarding.
+ */
+static void
+fib_path_list_mk_lb (fib_path_list_t *path_list,
+ fib_forward_chain_type_t fct,
+ dpo_id_t *dpo)
+{
+ load_balance_path_t *hash_key;
+ fib_node_index_t *path_index;
+
+ hash_key = NULL;
+
+ if (!dpo_id_is_valid(dpo))
+ {
+ /*
+ * first time create
+ */
+ dpo_set(dpo,
+ DPO_LOAD_BALANCE,
+ fib_forw_chain_type_to_dpo_proto(fct),
+ load_balance_create(0,
+ fib_forw_chain_type_to_dpo_proto(fct),
+ 0 /* FIXME FLOW HASH */));
+ }
+
+ /*
+ * We gather the DPOs from resolved paths.
+ */
+ vec_foreach (path_index, path_list->fpl_paths)
+ {
+ hash_key = fib_path_append_nh_for_multipath_hash(
+ *path_index,
+ fct,
+ hash_key);
+ }
+
+ /*
+ * Path-list load-balances, which if used, would be shared and hence
+ * never need a load-balance map.
+ */
+ load_balance_multipath_update(dpo, hash_key, LOAD_BALANCE_FLAG_NONE);
+
+ FIB_PATH_LIST_DBG(path_list, "mk lb: %d", dpo->dpoi_index);
+
+ vec_free(hash_key);
+}
+
+/**
+ * @brief [re]build the path list's uRPF list
+ */
+static void
+fib_path_list_mk_urpf (fib_path_list_t *path_list)
+{
+ fib_node_index_t *path_index;
+
+ /*
+ * ditch the old one. by iterating through all paths we are going
+ * to re-find all the adjs that were in the old one anyway. If we
+ * keep the old one, then the |sort|uniq requires more work.
+ * All users of the RPF list have their own lock, so we can release
+ * immediately.
+ */
+ fib_urpf_list_unlock(path_list->fpl_urpf);
+ path_list->fpl_urpf = fib_urpf_list_alloc_and_lock();
+
+ vec_foreach (path_index, path_list->fpl_paths)
+ {
+ fib_path_contribute_urpf(*path_index, path_list->fpl_urpf);
+ }
+
+ fib_urpf_list_bake(path_list->fpl_urpf);
+}
+
+/**
+ * @brief Contribute (add) this path list's uRPF list. This allows the child
+ * to construct an aggregate list.
+ */
+void
+fib_path_list_contribute_urpf (fib_node_index_t path_list_index,
+ index_t urpf)
+{
+ fib_path_list_t *path_list;
+
+ path_list = fib_path_list_get(path_list_index);
+
+ fib_urpf_list_combine(urpf, path_list->fpl_urpf);
+}
+
+/**
+ * @brief Return the the child the RPF list pre-built for this path list
+ */
+index_t
+fib_path_list_get_urpf (fib_node_index_t path_list_index)
+{
+ fib_path_list_t *path_list;
+
+ path_list = fib_path_list_get(path_list_index);
+
+ return (path_list->fpl_urpf);
+}
+
+/*
+ * fib_path_list_back_walk
+ *
+ * Called from one of this path-list's paths to progate
+ * a back walk
+ */
+void
+fib_path_list_back_walk (fib_node_index_t path_list_index,
+ fib_node_back_walk_ctx_t *ctx)
+{
+ fib_path_list_t *path_list;
+
+ path_list = fib_path_list_get(path_list_index);
+
+ fib_path_list_mk_urpf(path_list);
+
+ /*
+ * propagate the backwalk further
+ */
+ if (32 >= fib_node_list_get_size(path_list->fpl_node.fn_children))
+ {
+ /*
+ * only a few children. continue the walk synchronously
+ */
+ fib_walk_sync(FIB_NODE_TYPE_PATH_LIST, path_list_index, ctx);
+ }
+ else
+ {
+ /*
+ * many children. schedule a async walk
+ */
+ fib_walk_async(FIB_NODE_TYPE_PATH_LIST,
+ path_list_index,
+ FIB_WALK_PRIORITY_LOW,
+ ctx);
+ }
+}
+
+/*
+ * fib_path_list_back_walk_notify
+ *
+ * A back walk has reach this path-list.
+ */
+static fib_node_back_walk_rc_t
+fib_path_list_back_walk_notify (fib_node_t *node,
+ fib_node_back_walk_ctx_t *ctx)
+{
+ /*
+ * the path-list is not a direct child of any other node type
+ * paths, which do not change thier to-list-mapping, save the
+ * list they are a member of, and invoke the BW function directly.
+ */
+ ASSERT(0);
+
+ return (FIB_NODE_BACK_WALK_CONTINUE);
+}
+
+/*
+ * Display the path-list memory usage
+ */
+static void
+fib_path_list_memory_show (void)
+{
+ fib_show_memory_usage("Path-list",
+ pool_elts(fib_path_list_pool),
+ pool_len(fib_path_list_pool),
+ sizeof(fib_path_list_t));
+ fib_urpf_list_show_mem();
+}
+
+/*
+ * The FIB path-list's graph node virtual function table
+ */
+static const fib_node_vft_t fib_path_list_vft = {
+ .fnv_get = fib_path_list_get_node,
+ .fnv_last_lock = fib_path_list_last_lock_gone,
+ .fnv_back_walk = fib_path_list_back_walk_notify,
+ .fnv_mem_show = fib_path_list_memory_show,
+};
+
+static fib_path_list_t *
+fib_path_list_alloc (fib_node_index_t *path_list_index)
+{
+ fib_path_list_t *path_list;
+
+ pool_get(fib_path_list_pool, path_list);
+ memset(path_list, 0, sizeof(*path_list));
+
+ fib_node_init(&path_list->fpl_node,
+ FIB_NODE_TYPE_PATH_LIST);
+ path_list->fpl_urpf = INDEX_INVALID;
+
+ if (NULL != path_list_index)
+ {
+ *path_list_index = fib_path_list_get_index(path_list);
+ }
+
+ FIB_PATH_LIST_DBG(path_list, "alloc");
+
+ return (path_list);
+}
+
+static fib_path_list_t *
+fib_path_list_resolve (fib_path_list_t *path_list)
+{
+ fib_node_index_t *path_index, *paths, path_list_index;
+
+ ASSERT(!(path_list->fpl_flags & FIB_PATH_LIST_FLAG_RESOLVED));
+
+ /*
+ * resolving a path-list is a recursive action. this means more path
+ * lists can be created during this call, and hence this path-list
+ * can be realloc'd. so we work with copies.
+ * this function is called only once per-path list, so its no great overhead.
+ */
+ path_list_index = fib_path_list_get_index(path_list);
+ paths = vec_dup(path_list->fpl_paths);
+
+ vec_foreach (path_index, paths)
+ {
+ fib_path_resolve(*path_index);
+ }
+
+ vec_free(paths);
+ path_list = fib_path_list_get(path_list_index);
+
+ FIB_PATH_LIST_DBG(path_list, "resovled");
+ fib_path_list_mk_urpf(path_list);
+
+ return (path_list);
+}
+
+u32
+fib_path_list_get_resolving_interface (fib_node_index_t path_list_index)
+{
+ fib_node_index_t *path_index;
+ fib_path_list_t *path_list;
+ u32 sw_if_index;
+
+ path_list = fib_path_list_get(path_list_index);
+
+ sw_if_index = ~0;
+ vec_foreach (path_index, path_list->fpl_paths)
+ {
+ sw_if_index = fib_path_get_resolving_interface(*path_index);
+ if (~0 != sw_if_index)
+ {
+ return (sw_if_index);
+ }
+ }
+
+ return (sw_if_index);
+}
+
+fib_protocol_t
+fib_path_list_get_proto (fib_node_index_t path_list_index)
+{
+ fib_path_list_t *path_list;
+
+ path_list = fib_path_list_get(path_list_index);
+
+ /*
+ * we don't support a mix of path protocols, so we can return the proto
+ * of the first
+ */
+ return (fib_path_get_proto(path_list->fpl_paths[0]));
+}
+
+int
+fib_path_list_is_looped (fib_node_index_t path_list_index)
+{
+ fib_path_list_t *path_list;
+
+ path_list = fib_path_list_get(path_list_index);
+
+ return (path_list->fpl_flags & FIB_PATH_LIST_FLAG_LOOPED);
+}
+
+static fib_path_cfg_flags_t
+fib_path_list_flags_2_path_flags (fib_path_list_flags_t plf)
+{
+ fib_path_cfg_flags_t pf = FIB_PATH_CFG_FLAG_NONE;
+
+ if (plf & FIB_PATH_LIST_FLAG_LOCAL)
+ {
+ pf |= FIB_PATH_CFG_FLAG_LOCAL;
+ }
+ if (plf & FIB_PATH_LIST_FLAG_DROP)
+ {
+ pf |= FIB_PATH_CFG_FLAG_DROP;
+ }
+ if (plf & FIB_PATH_LIST_FLAG_EXCLUSIVE)
+ {
+ pf |= FIB_PATH_CFG_FLAG_EXCLUSIVE;
+ }
+
+ return (pf);
+}
+
+static fib_path_list_flags_t
+fib_path_list_flags_fixup (fib_path_list_flags_t flags)
+{
+ /*
+ * we do no share drop nor exclusive path-lists
+ */
+ if (flags & FIB_PATH_LIST_FLAG_DROP ||
+ flags & FIB_PATH_LIST_FLAG_EXCLUSIVE)
+ {
+ flags &= ~FIB_PATH_LIST_FLAG_SHARED;
+ }
+
+ return (flags);
+}
+
+fib_node_index_t
+fib_path_list_create (fib_path_list_flags_t flags,
+ const fib_route_path_t *rpaths)
+{
+ fib_node_index_t path_list_index, old_path_list_index;
+ fib_path_list_t *path_list;
+ int i;
+
+ flags = fib_path_list_flags_fixup(flags);
+ path_list = fib_path_list_alloc(&path_list_index);
+ path_list->fpl_flags = flags;
+ /*
+ * we'll assume for now all paths are the same next-hop protocol
+ */
+ path_list->fpl_nh_proto = rpaths[0].frp_proto;
+
+ vec_foreach_index(i, rpaths)
+ {
+ vec_add1(path_list->fpl_paths,
+ fib_path_create(path_list_index,
+ path_list->fpl_nh_proto,
+ fib_path_list_flags_2_path_flags(flags),
+ &rpaths[i]));
+ }
+
+ /*
+ * If a shared path list is requested, consult the DB for a match
+ */
+ if (flags & FIB_PATH_LIST_FLAG_SHARED)
+ {
+ /*
+ * check for a matching path-list in the DB.
+ * If we find one then we can return the existing one and destroy the
+ * new one just created.
+ */
+ old_path_list_index = fib_path_list_db_find(path_list);
+ if (FIB_NODE_INDEX_INVALID != old_path_list_index)
+ {
+ fib_path_list_destroy(path_list);
+
+ path_list_index = old_path_list_index;
+ }
+ else
+ {
+ /*
+ * if there was not a matching path-list, then this
+ * new one will need inserting into the DB and resolving.
+ */
+ fib_path_list_db_insert(path_list_index);
+ path_list = fib_path_list_resolve(path_list);
+ }
+ }
+ else
+ {
+ /*
+ * no shared path list requested. resolve and use the one
+ * just created.
+ */
+ path_list = fib_path_list_resolve(path_list);
+ }
+
+ return (path_list_index);
+}
+
+fib_node_index_t
+fib_path_list_create_special (fib_protocol_t nh_proto,
+ fib_path_list_flags_t flags,
+ const dpo_id_t *dpo)
+{
+ fib_node_index_t path_index, path_list_index;
+ fib_path_list_t *path_list;
+
+ path_list = fib_path_list_alloc(&path_list_index);
+ path_list->fpl_flags = flags;
+ path_list->fpl_nh_proto = nh_proto;
+
+ path_index =
+ fib_path_create_special(path_list_index,
+ path_list->fpl_nh_proto,
+ fib_path_list_flags_2_path_flags(flags),
+ dpo);
+ vec_add1(path_list->fpl_paths, path_index);
+
+ /*
+ * we don't share path-lists. we can do PIC on them so why bother.
+ */
+ path_list = fib_path_list_resolve(path_list);
+
+ return (path_list_index);
+}
+
+/*
+ * fib_path_list_copy_and_path_add
+ *
+ * Create a copy of a path-list and append one more path to it.
+ * The path-list returned could either have been newly created, or
+ * can be a shared path-list from the data-base.
+ */
+fib_node_index_t
+fib_path_list_copy_and_path_add (fib_node_index_t orig_path_list_index,
+ fib_path_list_flags_t flags,
+ const fib_route_path_t *rpaths)
+{
+ fib_node_index_t path_index, new_path_index, *orig_path_index;
+ fib_path_list_t *path_list, *orig_path_list;
+ fib_node_index_t path_list_index;
+ fib_node_index_t pi;
+
+ ASSERT(1 == vec_len(rpaths));
+
+ /*
+ * alloc the new list before we retrieve the old one, lest
+ * the alloc result in a realloc
+ */
+ path_list = fib_path_list_alloc(&path_list_index);
+
+ orig_path_list = fib_path_list_get(orig_path_list_index);
+
+ FIB_PATH_LIST_DBG(orig_path_list, "copy-add");
+
+ flags = fib_path_list_flags_fixup(flags);
+ path_list->fpl_flags = flags;
+ path_list->fpl_nh_proto = orig_path_list->fpl_nh_proto;
+ vec_validate(path_list->fpl_paths, vec_len(orig_path_list->fpl_paths));
+ pi = 0;
+
+ new_path_index = fib_path_create(path_list_index,
+ path_list->fpl_nh_proto,
+ fib_path_list_flags_2_path_flags(flags),
+ rpaths);
+
+ vec_foreach (orig_path_index, orig_path_list->fpl_paths)
+ {
+ /*
+ * don't add duplicate paths
+ * In the unlikely event the path is a duplicate, then we'll
+ * find a matching path-list later and this one will be toast.
+ */
+ if (0 != fib_path_cmp(new_path_index, *orig_path_index))
+ {
+ path_index = fib_path_copy(*orig_path_index, path_list_index);
+ path_list->fpl_paths[pi++] = path_index;
+ }
+ else
+ {
+ _vec_len(path_list->fpl_paths) = vec_len(orig_path_list->fpl_paths);
+ }
+ }
+
+ path_list->fpl_paths[pi] = new_path_index;
+
+ /*
+ * we sort the paths since the key for the path-list is
+ * the description of the paths it contains. The paths need to
+ * be sorted else this description will differ.
+ */
+ vec_sort_with_function(path_list->fpl_paths, fib_path_cmp_for_sort);
+
+ FIB_PATH_LIST_DBG(path_list, "path-added");
+
+ /*
+ * If a shared path list is requested, consult the DB for a match
+ */
+ if (path_list->fpl_flags & FIB_PATH_LIST_FLAG_SHARED)
+ {
+ fib_node_index_t exist_path_list_index;
+ /*
+ * check for a matching path-list in the DB.
+ * If we find one then we can return the existing one and destroy the
+ * new one just created.
+ */
+ exist_path_list_index = fib_path_list_db_find(path_list);
+ if (FIB_NODE_INDEX_INVALID != exist_path_list_index)
+ {
+ fib_path_list_destroy(path_list);
+
+ path_list_index = exist_path_list_index;
+ }
+ else
+ {
+ /*
+ * if there was not a matching path-list, then this
+ * new one will need inserting into the DB and resolving.
+ */
+ fib_path_list_db_insert(path_list_index);
+
+ path_list = fib_path_list_resolve(path_list);
+ }
+ }
+ else
+ {
+ /*
+ * no shared path list requested. resolve and use the one
+ * just created.
+ */
+ path_list = fib_path_list_resolve(path_list);
+ }
+
+ return (path_list_index);
+}
+
+/*
+ * fib_path_list_copy_and_path_remove
+ *
+ * Copy the path-list excluding the path passed.
+ * If the path is the last one, then the index reurned will be invalid.
+ * i.e. the path-list is toast.
+ */
+fib_node_index_t
+fib_path_list_copy_and_path_remove (fib_node_index_t orig_path_list_index,
+ fib_path_list_flags_t flags,
+ const fib_route_path_t *rpaths)
+{
+ fib_node_index_t path_index, *orig_path_index, path_list_index, tmp_path_index;
+ fib_path_list_t *path_list, *orig_path_list;
+ fib_node_index_t pi;
+
+ ASSERT(1 == vec_len(rpaths));
+
+ path_list = fib_path_list_alloc(&path_list_index);
+
+ flags = fib_path_list_flags_fixup(flags);
+ orig_path_list = fib_path_list_get(orig_path_list_index);
+
+ FIB_PATH_LIST_DBG(orig_path_list, "copy-remove");
+
+ path_list->fpl_flags = flags;
+ path_list->fpl_nh_proto = orig_path_list->fpl_nh_proto;
+ /*
+ * allocate as many paths as we might need in one go, rather than
+ * using vec_add to do a few at a time.
+ */
+ if (vec_len(orig_path_list->fpl_paths) > 1)
+ {
+ vec_validate(path_list->fpl_paths, vec_len(orig_path_list->fpl_paths) - 2);
+ }
+ pi = 0;
+
+ /*
+ * create a representation of the path to be removed, so it
+ * can be used as a comparison object during the copy.
+ */
+ tmp_path_index = fib_path_create(path_list_index,
+ path_list->fpl_nh_proto,
+ fib_path_list_flags_2_path_flags(flags),
+ rpaths);
+
+ vec_foreach (orig_path_index, orig_path_list->fpl_paths)
+ {
+ if (0 != fib_path_cmp(tmp_path_index, *orig_path_index)) {
+ path_index = fib_path_copy(*orig_path_index, path_list_index);
+ if (pi < vec_len(path_list->fpl_paths))
+ {
+ path_list->fpl_paths[pi++] = path_index;
+ }
+ else
+ {
+ /*
+ * this is the unlikely case that the path being
+ * removed does not match one in the path-list, so
+ * we end up with as many paths as we started with.
+ * the paths vector was sized above with the expectation
+ * that we would have 1 less.
+ */
+ vec_add1(path_list->fpl_paths, path_index);
+ }
+ }
+ }
+
+ /*
+ * done with the temporary now
+ */
+ fib_path_destroy(tmp_path_index);
+
+ /*
+ * if there are no paths, then the new path-list is aborted
+ */
+ if (0 == vec_len(path_list->fpl_paths)) {
+ FIB_PATH_LIST_DBG(path_list, "last-path-removed");
+
+ fib_path_list_destroy(path_list);
+
+ path_list_index = FIB_NODE_INDEX_INVALID;
+ } else {
+ /*
+ * we sort the paths since the key for the path-list is
+ * the description of the paths it contains. The paths need to
+ * be sorted else this description will differ.
+ */
+ vec_sort_with_function(path_list->fpl_paths, fib_path_cmp_for_sort);
+
+ /*
+ * If a shared path list is requested, consult the DB for a match
+ */
+ if (path_list->fpl_flags & FIB_PATH_LIST_FLAG_SHARED)
+ {
+ fib_node_index_t exist_path_list_index;
+
+ /*
+ * check for a matching path-list in the DB.
+ * If we find one then we can return the existing one and destroy the
+ * new one just created.
+ */
+ exist_path_list_index = fib_path_list_db_find(path_list);
+ if (FIB_NODE_INDEX_INVALID != exist_path_list_index)
+ {
+ fib_path_list_destroy(path_list);
+
+ path_list_index = exist_path_list_index;
+ }
+ else
+ {
+ /*
+ * if there was not a matching path-list, then this
+ * new one will need inserting into the DB and resolving.
+ */
+ fib_path_list_db_insert(path_list_index);
+
+ path_list = fib_path_list_resolve(path_list);
+ }
+ }
+ else
+ {
+ /*
+ * no shared path list requested. resolve and use the one
+ * just created.
+ */
+ path_list = fib_path_list_resolve(path_list);
+ }
+ }
+
+ return (path_list_index);
+}
+
+/*
+ * fib_path_list_contribute_forwarding
+ *
+ * Return the index of a load-balance that user of this path-list should
+ * use for forwarding
+ */
+void
+fib_path_list_contribute_forwarding (fib_node_index_t path_list_index,
+ fib_forward_chain_type_t type,
+ dpo_id_t *dpo)
+{
+ fib_path_list_t *path_list;
+
+ path_list = fib_path_list_get(path_list_index);
+
+ fib_path_list_mk_lb(path_list, type, dpo);
+}
+
+/*
+ * fib_path_list_get_adj
+ *
+ * Return the index of a adjacency for the first path that user of this
+ * path-list should use for forwarding
+ */
+adj_index_t
+fib_path_list_get_adj (fib_node_index_t path_list_index,
+ fib_forward_chain_type_t type)
+{
+ fib_path_list_t *path_list;
+
+ path_list = fib_path_list_get(path_list_index);
+ return (fib_path_get_adj(path_list->fpl_paths[0]));
+}
+
+int
+fib_path_list_recursive_loop_detect (fib_node_index_t path_list_index,
+ fib_node_index_t **entry_indicies)
+{
+ fib_node_index_t *path_index;
+ int is_looped, list_looped;
+ fib_path_list_t *path_list;
+
+ list_looped = 0;
+ path_list = fib_path_list_get(path_list_index);
+
+ vec_foreach (path_index, path_list->fpl_paths)
+ {
+ fib_node_index_t *copy, **copy_ptr;
+
+ /*
+ * we need a copy of the nodes visited so that when we add entries
+ * we explore on the nth path and a looped is detected, those entries
+ * are not again searched for n+1 path and so finding a loop that does
+ * not exist.
+ */
+ copy = vec_dup(*entry_indicies);
+ copy_ptr = &copy;
+
+ is_looped = fib_path_recursive_loop_detect(*path_index, copy_ptr);
+ list_looped += is_looped;
+ }
+
+ FIB_PATH_LIST_DBG(path_list, "loop-detect: eval:%d", eval);
+
+ if (list_looped)
+ {
+ path_list->fpl_flags |= FIB_PATH_LIST_FLAG_LOOPED;
+ }
+ else
+ {
+ path_list->fpl_flags &= ~FIB_PATH_LIST_FLAG_LOOPED;
+ }
+
+ return (list_looped);
+}
+
+u32
+fib_path_list_child_add (fib_node_index_t path_list_index,
+ fib_node_type_t child_type,
+ fib_node_index_t child_index)
+{
+ return (fib_node_child_add(FIB_NODE_TYPE_PATH_LIST,
+ path_list_index,
+ child_type,
+ child_index));
+}
+
+void
+fib_path_list_child_remove (fib_node_index_t path_list_index,
+ u32 si)
+{
+ fib_node_child_remove(FIB_NODE_TYPE_PATH_LIST,
+ path_list_index,
+ si);
+}
+
+void
+fib_path_list_lock(fib_node_index_t path_list_index)
+{
+ fib_path_list_t *path_list;
+
+ if (FIB_NODE_INDEX_INVALID != path_list_index)
+ {
+ path_list = fib_path_list_get(path_list_index);
+
+ fib_node_lock(&path_list->fpl_node);
+ FIB_PATH_LIST_DBG(path_list, "lock");
+ }
+}
+
+void
+fib_path_list_unlock (fib_node_index_t path_list_index)
+{
+ fib_path_list_t *path_list;
+
+ if (FIB_NODE_INDEX_INVALID != path_list_index)
+ {
+ path_list = fib_path_list_get(path_list_index);
+ FIB_PATH_LIST_DBG(path_list, "unlock");
+
+ fib_node_unlock(&path_list->fpl_node);
+ }
+}
+
+u32
+fib_path_list_pool_size (void)
+{
+ return (pool_elts(fib_path_list_pool));
+}
+
+u32
+fib_path_list_db_size (void)
+{
+ return (hash_elts(fib_path_list_db));
+}
+
+void
+fib_path_list_walk (fib_node_index_t path_list_index,
+ fib_path_list_walk_fn_t func,
+ void *ctx)
+{
+ fib_node_index_t *path_index;
+ fib_path_list_t *path_list;
+
+ path_list = fib_path_list_get(path_list_index);
+
+ vec_foreach(path_index, path_list->fpl_paths)
+ {
+ if (!func(path_list_index, *path_index, ctx))
+ break;
+ }
+}
+
+
+void
+fib_path_list_module_init (void)
+{
+ fib_node_register_type (FIB_NODE_TYPE_PATH_LIST, &fib_path_list_vft);
+
+ fib_path_list_db = hash_create2 (/* elts */ 0,
+ /* user */ 0,
+ /* value_bytes */ sizeof (fib_node_index_t),
+ fib_path_list_db_hash_key_sum,
+ fib_path_list_db_hash_key_equal,
+ /* format pair/arg */
+ 0, 0);
+}
+
+static clib_error_t *
+show_fib_path_list_command (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ fib_path_list_t *path_list;
+ fib_node_index_t pli;
+
+ if (unformat (input, "%d", &pli))
+ {
+ /*
+ * show one in detail
+ */
+ if (!pool_is_free_index(fib_path_list_pool, pli))
+ {
+ path_list = fib_path_list_get(pli);
+ u8 *s = fib_path_list_format(pli, NULL);
+ s = format(s, "children:");
+ s = fib_node_children_format(path_list->fpl_node.fn_children, s);
+ vlib_cli_output (vm, "%s", s);
+ vec_free(s);
+ }
+ else
+ {
+ vlib_cli_output (vm, "path list %d invalid", pli);
+ }
+ }
+ else
+ {
+ /*
+ * show all
+ */
+ vlib_cli_output (vm, "FIB Path Lists");
+ pool_foreach(path_list, fib_path_list_pool,
+ ({
+ vlib_cli_output (vm, "%U", format_fib_path_list, path_list);
+ }));
+ }
+ return (NULL);
+}
+
+VLIB_CLI_COMMAND (show_fib_path_list, static) = {
+ .path = "show fib path-lists",
+ .function = show_fib_path_list_command,
+ .short_help = "show fib path-lists",
+};
diff --git a/src/vnet/fib/fib_path_list.h b/src/vnet/fib/fib_path_list.h
new file mode 100644
index 00000000000..8bc1b20b6bf
--- /dev/null
+++ b/src/vnet/fib/fib_path_list.h
@@ -0,0 +1,158 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __FIB_PATH_LIST_H__
+#define __FIB_PATH_LIST_H__
+
+#include <vlib/vlib.h>
+#include <vnet/adj/adj.h>
+
+#include "fib_node.h"
+#include "fib_path.h"
+
+/**
+ * Enumeration of path-list flags.
+ */
+typedef enum fib_path_list_attribute_t_ {
+ /**
+ * Marker. Add new flags after this one.
+ */
+ FIB_PATH_LIST_ATTRIBUTE_FIRST = 0,
+ /**
+ * This path list is shareable. Shareable path-lists
+ * are inserted into the path-list data-base.
+ * All path-list are inherently shareable, the reason we share some and
+ * not others is to limit the size of the path-list database. This DB must
+ * be searched for each route update.
+ */
+ FIB_PATH_LIST_ATTRIBUTE_SHARED = FIB_PATH_LIST_ATTRIBUTE_FIRST,
+ /**
+ * explicit drop path-list. Used when the entry source needs to
+ * force a drop, despite the fact the path info is present.
+ */
+ FIB_PATH_LIST_ATTRIBUTE_DROP,
+ /**
+ * explicit local path-list.
+ */
+ FIB_PATH_LIST_ATTRIBUTE_LOCAL,
+ /**
+ * exclusive path-list. Exclusive means the path will resolve via the
+ * exclusive (user provided) adj.
+ */
+ FIB_PATH_LIST_ATTRIBUTE_EXCLUSIVE,
+ /**
+ * resolved path-list
+ */
+ FIB_PATH_LIST_ATTRIBUTE_RESOLVED,
+ /**
+ * looped path-list. one path looped implies the whole list is
+ */
+ FIB_PATH_LIST_ATTRIBUTE_LOOPED,
+ /**
+ * Marher. Add new flags before this one, and then update it.
+ */
+ FIB_PATH_LIST_ATTRIBUTE_LAST = FIB_PATH_LIST_ATTRIBUTE_LOOPED,
+} fib_path_list_attribute_t;
+
+typedef enum fib_path_list_flags_t_ {
+ FIB_PATH_LIST_FLAG_NONE = 0,
+ FIB_PATH_LIST_FLAG_SHARED = (1 << FIB_PATH_LIST_ATTRIBUTE_SHARED),
+ FIB_PATH_LIST_FLAG_DROP = (1 << FIB_PATH_LIST_ATTRIBUTE_DROP),
+ FIB_PATH_LIST_FLAG_LOCAL = (1 << FIB_PATH_LIST_ATTRIBUTE_LOCAL),
+ FIB_PATH_LIST_FLAG_EXCLUSIVE = (1 << FIB_PATH_LIST_ATTRIBUTE_EXCLUSIVE),
+ FIB_PATH_LIST_FLAG_RESOLVED = (1 << FIB_PATH_LIST_ATTRIBUTE_RESOLVED),
+ FIB_PATH_LIST_FLAG_LOOPED = (1 << FIB_PATH_LIST_ATTRIBUTE_LOOPED),
+} fib_path_list_flags_t;
+
+#define FIB_PATH_LIST_ATTRIBUTES { \
+ [FIB_PATH_LIST_ATTRIBUTE_SHARED] = "shared", \
+ [FIB_PATH_LIST_ATTRIBUTE_RESOLVED] = "resolved", \
+ [FIB_PATH_LIST_ATTRIBUTE_DROP] = "drop", \
+ [FIB_PATH_LIST_ATTRIBUTE_EXCLUSIVE] = "exclusive", \
+ [FIB_PATH_LIST_ATTRIBUTE_LOCAL] = "local", \
+ [FIB_PATH_LIST_ATTRIBUTE_LOOPED] = "looped", \
+}
+
+#define FOR_EACH_PATH_LIST_ATTRIBUTE(_item) \
+ for (_item = FIB_PATH_LIST_ATTRIBUTE_FIRST; \
+ _item <= FIB_PATH_LIST_ATTRIBUTE_LAST; \
+ _item++)
+
+extern fib_node_index_t fib_path_list_create(fib_path_list_flags_t flags,
+ const fib_route_path_t *paths);
+extern fib_node_index_t fib_path_list_create_special(fib_protocol_t nh_proto,
+ fib_path_list_flags_t flags,
+ const dpo_id_t *dpo);
+
+extern fib_node_index_t fib_path_list_copy_and_path_add(
+ fib_node_index_t pl_index,
+ fib_path_list_flags_t flags,
+ const fib_route_path_t *path);
+extern fib_node_index_t fib_path_list_copy_and_path_remove(
+ fib_node_index_t pl_index,
+ fib_path_list_flags_t flags,
+ const fib_route_path_t *path);
+extern void fib_path_list_contribute_forwarding(fib_node_index_t path_list_index,
+ fib_forward_chain_type_t type,
+ dpo_id_t *dpo);
+extern void fib_path_list_contribute_urpf(fib_node_index_t path_index,
+ index_t urpf);
+extern index_t fib_path_list_get_urpf(fib_node_index_t path_list_index);
+extern index_t fib_path_list_get_adj(fib_node_index_t path_list_index,
+ fib_forward_chain_type_t type);
+
+extern u32 fib_path_list_child_add(fib_node_index_t pl_index,
+ fib_node_type_t type,
+ fib_node_index_t child_index);
+extern void fib_path_list_child_remove(fib_node_index_t pl_index,
+ fib_node_index_t sibling_index);
+extern void fib_path_list_back_walk(fib_node_index_t pl_index,
+ fib_node_back_walk_ctx_t *ctx);
+extern void fib_path_list_lock(fib_node_index_t pl_index);
+extern void fib_path_list_unlock(fib_node_index_t pl_index);
+extern int fib_path_list_recursive_loop_detect(fib_node_index_t path_list_index,
+ fib_node_index_t **entry_indicies);
+extern u32 fib_path_list_get_resolving_interface(fib_node_index_t path_list_index);
+extern int fib_path_list_is_looped(fib_node_index_t path_list_index);
+extern fib_protocol_t fib_path_list_get_proto(fib_node_index_t path_list_index);
+extern u8 * fib_path_list_format(fib_node_index_t pl_index,
+ u8 * s);
+extern u8 * fib_path_list_adjs_format(fib_node_index_t pl_index,
+ u32 indent,
+ u8 * s);
+extern index_t fib_path_list_lb_map_add_or_lock(fib_node_index_t pl_index,
+ const fib_node_index_t *pis);
+/**
+ * A callback function type for walking a path-list's paths
+ */
+typedef int (*fib_path_list_walk_fn_t)(fib_node_index_t pl_index,
+ fib_node_index_t path_index,
+ void *ctx);
+
+extern void fib_path_list_walk(fib_node_index_t pl_index,
+ fib_path_list_walk_fn_t func,
+ void *ctx);
+
+extern void fib_path_list_module_init(void);
+
+extern void fib_path_list_module_init(void);
+
+/*
+ * functions for testing.
+ */
+u32 fib_path_list_pool_size(void);
+u32 fib_path_list_db_size(void);
+
+#endif
diff --git a/src/vnet/fib/fib_table.c b/src/vnet/fib/fib_table.c
new file mode 100644
index 00000000000..76db42d0ec7
--- /dev/null
+++ b/src/vnet/fib/fib_table.c
@@ -0,0 +1,1104 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vlib/vlib.h>
+#include <vnet/dpo/drop_dpo.h>
+
+#include <vnet/fib/fib_table.h>
+#include <vnet/fib/fib_entry_cover.h>
+#include <vnet/fib/fib_internal.h>
+#include <vnet/fib/ip4_fib.h>
+#include <vnet/fib/ip6_fib.h>
+#include <vnet/fib/mpls_fib.h>
+
+fib_table_t *
+fib_table_get (fib_node_index_t index,
+ fib_protocol_t proto)
+{
+ switch (proto)
+ {
+ case FIB_PROTOCOL_IP4:
+ return (pool_elt_at_index(ip4_main.fibs, index));
+ case FIB_PROTOCOL_IP6:
+ return (pool_elt_at_index(ip6_main.fibs, index));
+ case FIB_PROTOCOL_MPLS:
+ return (pool_elt_at_index(mpls_main.fibs, index));
+ }
+ ASSERT(0);
+ return (NULL);
+}
+
+static inline fib_node_index_t
+fib_table_lookup_i (fib_table_t *fib_table,
+ const fib_prefix_t *prefix)
+{
+ switch (prefix->fp_proto)
+ {
+ case FIB_PROTOCOL_IP4:
+ return (ip4_fib_table_lookup(&fib_table->v4,
+ &prefix->fp_addr.ip4,
+ prefix->fp_len));
+ case FIB_PROTOCOL_IP6:
+ return (ip6_fib_table_lookup(fib_table->ft_index,
+ &prefix->fp_addr.ip6,
+ prefix->fp_len));
+ case FIB_PROTOCOL_MPLS:
+ return (mpls_fib_table_lookup(&fib_table->mpls,
+ prefix->fp_label,
+ prefix->fp_eos));
+ }
+ return (FIB_NODE_INDEX_INVALID);
+}
+
+fib_node_index_t
+fib_table_lookup (u32 fib_index,
+ const fib_prefix_t *prefix)
+{
+ return (fib_table_lookup_i(fib_table_get(fib_index, prefix->fp_proto), prefix));
+}
+
+static inline fib_node_index_t
+fib_table_lookup_exact_match_i (const fib_table_t *fib_table,
+ const fib_prefix_t *prefix)
+{
+ switch (prefix->fp_proto)
+ {
+ case FIB_PROTOCOL_IP4:
+ return (ip4_fib_table_lookup_exact_match(&fib_table->v4,
+ &prefix->fp_addr.ip4,
+ prefix->fp_len));
+ case FIB_PROTOCOL_IP6:
+ return (ip6_fib_table_lookup_exact_match(fib_table->ft_index,
+ &prefix->fp_addr.ip6,
+ prefix->fp_len));
+ case FIB_PROTOCOL_MPLS:
+ return (mpls_fib_table_lookup(&fib_table->mpls,
+ prefix->fp_label,
+ prefix->fp_eos));
+ }
+ return (FIB_NODE_INDEX_INVALID);
+}
+
+fib_node_index_t
+fib_table_lookup_exact_match (u32 fib_index,
+ const fib_prefix_t *prefix)
+{
+ return (fib_table_lookup_exact_match_i(fib_table_get(fib_index,
+ prefix->fp_proto),
+ prefix));
+}
+
+static fib_node_index_t
+fib_table_get_less_specific_i (fib_table_t *fib_table,
+ const fib_prefix_t *prefix)
+{
+ fib_prefix_t pfx;
+
+ pfx = *prefix;
+
+ if (FIB_PROTOCOL_MPLS == pfx.fp_proto)
+ {
+ return (FIB_NODE_INDEX_INVALID);
+ }
+
+ /*
+ * in the absence of a tree structure for the table that allows for an O(1)
+ * parent get, a cheeky way to find the cover is to LPM for the prefix with
+ * mask-1.
+ * there should always be a cover, though it may be the default route. the
+ * default route's cover is the default route.
+ */
+ if (pfx.fp_len != 0) {
+ pfx.fp_len -= 1;
+ }
+
+ return (fib_table_lookup_i(fib_table, &pfx));
+}
+
+fib_node_index_t
+fib_table_get_less_specific (u32 fib_index,
+ const fib_prefix_t *prefix)
+{
+ return (fib_table_get_less_specific_i(fib_table_get(fib_index,
+ prefix->fp_proto),
+ prefix));
+}
+
+static void
+fib_table_entry_remove (fib_table_t *fib_table,
+ const fib_prefix_t *prefix,
+ fib_node_index_t fib_entry_index)
+{
+ vlib_smp_unsafe_warning();
+
+ fib_table->ft_total_route_counts--;
+
+ switch (prefix->fp_proto)
+ {
+ case FIB_PROTOCOL_IP4:
+ ip4_fib_table_entry_remove(&fib_table->v4,
+ &prefix->fp_addr.ip4,
+ prefix->fp_len);
+ break;
+ case FIB_PROTOCOL_IP6:
+ ip6_fib_table_entry_remove(fib_table->ft_index,
+ &prefix->fp_addr.ip6,
+ prefix->fp_len);
+ break;
+ case FIB_PROTOCOL_MPLS:
+ mpls_fib_table_entry_remove(&fib_table->mpls,
+ prefix->fp_label,
+ prefix->fp_eos);
+ break;
+ }
+
+ fib_entry_unlock(fib_entry_index);
+}
+
+static void
+fib_table_post_insert_actions (fib_table_t *fib_table,
+ const fib_prefix_t *prefix,
+ fib_node_index_t fib_entry_index)
+{
+ fib_node_index_t fib_entry_cover_index;
+
+ /*
+ * no cover relationships in the MPLS FIB
+ */
+ if (FIB_PROTOCOL_MPLS == prefix->fp_proto)
+ return;
+
+ /*
+ * find and inform the covering entry that a new more specific
+ * has been inserted beneath it
+ */
+ fib_entry_cover_index = fib_table_get_less_specific_i(fib_table, prefix);
+ /*
+ * the indicies are the same when the default route is first added
+ */
+ if (fib_entry_cover_index != fib_entry_index)
+ {
+ fib_entry_cover_change_notify(fib_entry_cover_index,
+ fib_entry_index);
+ }
+}
+
+static void
+fib_table_entry_insert (fib_table_t *fib_table,
+ const fib_prefix_t *prefix,
+ fib_node_index_t fib_entry_index)
+{
+ vlib_smp_unsafe_warning();
+
+ fib_entry_lock(fib_entry_index);
+ fib_table->ft_total_route_counts++;
+
+ switch (prefix->fp_proto)
+ {
+ case FIB_PROTOCOL_IP4:
+ ip4_fib_table_entry_insert(&fib_table->v4,
+ &prefix->fp_addr.ip4,
+ prefix->fp_len,
+ fib_entry_index);
+ break;
+ case FIB_PROTOCOL_IP6:
+ ip6_fib_table_entry_insert(fib_table->ft_index,
+ &prefix->fp_addr.ip6,
+ prefix->fp_len,
+ fib_entry_index);
+ break;
+ case FIB_PROTOCOL_MPLS:
+ mpls_fib_table_entry_insert(&fib_table->mpls,
+ prefix->fp_label,
+ prefix->fp_eos,
+ fib_entry_index);
+ break;
+ }
+
+ fib_table_post_insert_actions(fib_table, prefix, fib_entry_index);
+}
+
+void
+fib_table_fwding_dpo_update (u32 fib_index,
+ const fib_prefix_t *prefix,
+ const dpo_id_t *dpo)
+{
+ vlib_smp_unsafe_warning();
+
+ switch (prefix->fp_proto)
+ {
+ case FIB_PROTOCOL_IP4:
+ return (ip4_fib_table_fwding_dpo_update(ip4_fib_get(fib_index),
+ &prefix->fp_addr.ip4,
+ prefix->fp_len,
+ dpo));
+ case FIB_PROTOCOL_IP6:
+ return (ip6_fib_table_fwding_dpo_update(fib_index,
+ &prefix->fp_addr.ip6,
+ prefix->fp_len,
+ dpo));
+ case FIB_PROTOCOL_MPLS:
+ return (mpls_fib_forwarding_table_update(mpls_fib_get(fib_index),
+ prefix->fp_label,
+ prefix->fp_eos,
+ dpo));
+ }
+}
+
+void
+fib_table_fwding_dpo_remove (u32 fib_index,
+ const fib_prefix_t *prefix,
+ const dpo_id_t *dpo)
+{
+ vlib_smp_unsafe_warning();
+
+ switch (prefix->fp_proto)
+ {
+ case FIB_PROTOCOL_IP4:
+ return (ip4_fib_table_fwding_dpo_remove(ip4_fib_get(fib_index),
+ &prefix->fp_addr.ip4,
+ prefix->fp_len,
+ dpo));
+ case FIB_PROTOCOL_IP6:
+ return (ip6_fib_table_fwding_dpo_remove(fib_index,
+ &prefix->fp_addr.ip6,
+ prefix->fp_len,
+ dpo));
+ case FIB_PROTOCOL_MPLS:
+ return (mpls_fib_forwarding_table_reset(mpls_fib_get(fib_index),
+ prefix->fp_label,
+ prefix->fp_eos));
+ }
+}
+
+
+fib_node_index_t
+fib_table_entry_special_dpo_add (u32 fib_index,
+ const fib_prefix_t *prefix,
+ fib_source_t source,
+ fib_entry_flag_t flags,
+ const dpo_id_t *dpo)
+{
+ fib_node_index_t fib_entry_index;
+ fib_table_t *fib_table;
+
+ fib_table = fib_table_get(fib_index, prefix->fp_proto);
+ fib_entry_index = fib_table_lookup_exact_match_i(fib_table, prefix);
+
+ if (FIB_NODE_INDEX_INVALID == fib_entry_index)
+ {
+ fib_entry_index = fib_entry_create_special(fib_index, prefix,
+ source, flags,
+ dpo);
+
+ fib_table_entry_insert(fib_table, prefix, fib_entry_index);
+ fib_table->ft_src_route_counts[source]++;
+ }
+ else
+ {
+ int was_sourced;
+
+ was_sourced = fib_entry_is_sourced(fib_entry_index, source);
+ fib_entry_special_add(fib_entry_index, source, flags, dpo);
+
+ if (was_sourced != fib_entry_is_sourced(fib_entry_index, source))
+ {
+ fib_table->ft_src_route_counts[source]++;
+ }
+ }
+
+
+ return (fib_entry_index);
+}
+
+fib_node_index_t
+fib_table_entry_special_dpo_update (u32 fib_index,
+ const fib_prefix_t *prefix,
+ fib_source_t source,
+ fib_entry_flag_t flags,
+ const dpo_id_t *dpo)
+{
+ fib_node_index_t fib_entry_index;
+ fib_table_t *fib_table;
+
+ fib_table = fib_table_get(fib_index, prefix->fp_proto);
+ fib_entry_index = fib_table_lookup_exact_match_i(fib_table, prefix);
+
+ if (FIB_NODE_INDEX_INVALID == fib_entry_index)
+ {
+ fib_entry_index = fib_entry_create_special(fib_index, prefix,
+ source, flags,
+ dpo);
+
+ fib_table_entry_insert(fib_table, prefix, fib_entry_index);
+ fib_table->ft_src_route_counts[source]++;
+ }
+ else
+ {
+ int was_sourced;
+
+ was_sourced = fib_entry_is_sourced(fib_entry_index, source);
+
+ if (was_sourced)
+ fib_entry_special_update(fib_entry_index, source, flags, dpo);
+ else
+ fib_entry_special_add(fib_entry_index, source, flags, dpo);
+
+ if (was_sourced != fib_entry_is_sourced(fib_entry_index, source))
+ {
+ fib_table->ft_src_route_counts[source]++;
+ }
+ }
+
+ return (fib_entry_index);
+}
+
+fib_node_index_t
+fib_table_entry_special_add (u32 fib_index,
+ const fib_prefix_t *prefix,
+ fib_source_t source,
+ fib_entry_flag_t flags,
+ adj_index_t adj_index)
+{
+ fib_node_index_t fib_entry_index;
+ dpo_id_t tmp_dpo = DPO_INVALID;
+
+ if (ADJ_INDEX_INVALID != adj_index)
+ {
+ dpo_set(&tmp_dpo,
+ DPO_ADJACENCY,
+ FIB_PROTOCOL_MAX,
+ adj_index);
+ }
+ else
+ {
+ dpo_copy(&tmp_dpo, drop_dpo_get(fib_proto_to_dpo(prefix->fp_proto)));
+ }
+
+ fib_entry_index = fib_table_entry_special_dpo_add(fib_index, prefix, source,
+ flags, &tmp_dpo);
+
+ dpo_unlock(&tmp_dpo);
+
+ return (fib_entry_index);
+}
+
+void
+fib_table_entry_special_remove (u32 fib_index,
+ const fib_prefix_t *prefix,
+ fib_source_t source)
+{
+ /*
+ * 1 is it present
+ * yes => remove source
+ * 2 - is it still sourced?
+ * no => cover walk
+ */
+ fib_node_index_t fib_entry_index;
+ fib_table_t *fib_table;
+
+ fib_table = fib_table_get(fib_index, prefix->fp_proto);
+ fib_entry_index = fib_table_lookup_exact_match_i(fib_table, prefix);
+
+ if (FIB_NODE_INDEX_INVALID == fib_entry_index)
+ {
+ /*
+ * removing an etry that does not exist. i'll allow it.
+ */
+ }
+ else
+ {
+ fib_entry_src_flag_t src_flag;
+ int was_sourced;
+
+ /*
+ * don't nobody go nowhere
+ */
+ fib_entry_lock(fib_entry_index);
+ was_sourced = fib_entry_is_sourced(fib_entry_index, source);
+
+ src_flag = fib_entry_special_remove(fib_entry_index, source);
+
+ if (!(FIB_ENTRY_SRC_FLAG_ADDED & src_flag))
+ {
+ /*
+ * last source gone. remove from the table
+ */
+ fib_table_entry_remove(fib_table, prefix, fib_entry_index);
+
+ /*
+ * now the entry is no longer in the table, we can
+ * inform the entries that it covers to re-calculate their cover
+ */
+ fib_entry_cover_change_notify(fib_entry_index,
+ FIB_NODE_INDEX_INVALID);
+ }
+ /*
+ * else
+ * still has sources, leave it be.
+ */
+ if (was_sourced != fib_entry_is_sourced(fib_entry_index, source))
+ {
+ fib_table->ft_src_route_counts[source]--;
+ }
+
+ fib_entry_unlock(fib_entry_index);
+ }
+}
+
+/**
+ * fib_table_route_path_fixup
+ *
+ * Convert attached hosts to attached next-hops.
+ *
+ * This special case is required because an attached path will link to a
+ * glean, and the FIB entry will have the interface or API/CLI source. When
+ * the ARP/ND process is completes then that source (which will provide a
+ * complete adjacency) will be lower priority and so the FIB entry will
+ * remain linked to a glean and traffic will never reach the hosts. For
+ * an ATTAHCED_HOST path we can link the path directly to the [incomplete]
+ * adjacency.
+ */
+static void
+fib_table_route_path_fixup (const fib_prefix_t *prefix,
+ fib_route_path_t *path)
+{
+ if (fib_prefix_is_host(prefix) &&
+ ip46_address_is_zero(&path->frp_addr) &&
+ path->frp_sw_if_index != ~0)
+ {
+ path->frp_addr = prefix->fp_addr;
+ }
+}
+
+fib_node_index_t
+fib_table_entry_path_add (u32 fib_index,
+ const fib_prefix_t *prefix,
+ fib_source_t source,
+ fib_entry_flag_t flags,
+ fib_protocol_t next_hop_proto,
+ const ip46_address_t *next_hop,
+ u32 next_hop_sw_if_index,
+ u32 next_hop_fib_index,
+ u32 next_hop_weight,
+ mpls_label_t *next_hop_labels,
+ fib_route_path_flags_t path_flags)
+{
+ fib_route_path_t path = {
+ .frp_proto = next_hop_proto,
+ .frp_addr = (NULL == next_hop? zero_addr : *next_hop),
+ .frp_sw_if_index = next_hop_sw_if_index,
+ .frp_fib_index = next_hop_fib_index,
+ .frp_weight = next_hop_weight,
+ .frp_flags = path_flags,
+ .frp_label_stack = next_hop_labels,
+ };
+ fib_node_index_t fib_entry_index;
+ fib_route_path_t *paths = NULL;
+
+ vec_add1(paths, path);
+
+ fib_entry_index = fib_table_entry_path_add2(fib_index, prefix,
+ source, flags, paths);
+
+ vec_free(paths);
+ return (fib_entry_index);
+}
+
+fib_node_index_t
+fib_table_entry_path_add2 (u32 fib_index,
+ const fib_prefix_t *prefix,
+ fib_source_t source,
+ fib_entry_flag_t flags,
+ fib_route_path_t *rpath)
+{
+ fib_node_index_t fib_entry_index;
+ fib_table_t *fib_table;
+ u32 ii;
+
+ fib_table = fib_table_get(fib_index, prefix->fp_proto);
+ fib_entry_index = fib_table_lookup_exact_match_i(fib_table, prefix);
+
+ for (ii = 0; ii < vec_len(rpath); ii++)
+ {
+ fib_table_route_path_fixup(prefix, &rpath[ii]);
+ }
+
+ if (FIB_NODE_INDEX_INVALID == fib_entry_index)
+ {
+ fib_entry_index = fib_entry_create(fib_index, prefix,
+ source, flags,
+ rpath);
+
+ fib_table_entry_insert(fib_table, prefix, fib_entry_index);
+ fib_table->ft_src_route_counts[source]++;
+ }
+ else
+ {
+ int was_sourced;
+
+ was_sourced = fib_entry_is_sourced(fib_entry_index, source);
+ fib_entry_path_add(fib_entry_index, source, flags, rpath);;
+
+ if (was_sourced != fib_entry_is_sourced(fib_entry_index, source))
+ {
+ fib_table->ft_src_route_counts[source]++;
+ }
+ }
+
+ return (fib_entry_index);
+}
+
+void
+fib_table_entry_path_remove2 (u32 fib_index,
+ const fib_prefix_t *prefix,
+ fib_source_t source,
+ fib_route_path_t *rpath)
+{
+ /*
+ * 1 is it present
+ * yes => remove source
+ * 2 - is it still sourced?
+ * no => cover walk
+ */
+ fib_node_index_t fib_entry_index;
+ fib_table_t *fib_table;
+ u32 ii;
+
+ fib_table = fib_table_get(fib_index, prefix->fp_proto);
+ fib_entry_index = fib_table_lookup_exact_match_i(fib_table, prefix);
+
+ for (ii = 0; ii < vec_len(rpath); ii++)
+ {
+ fib_table_route_path_fixup(prefix, &rpath[ii]);
+ }
+
+ if (FIB_NODE_INDEX_INVALID == fib_entry_index)
+ {
+ /*
+ * removing an etry that does not exist. i'll allow it.
+ */
+ }
+ else
+ {
+ fib_entry_src_flag_t src_flag;
+ int was_sourced;
+
+ /*
+ * don't nobody go nowhere
+ */
+ fib_entry_lock(fib_entry_index);
+ was_sourced = fib_entry_is_sourced(fib_entry_index, source);
+
+ src_flag = fib_entry_path_remove(fib_entry_index, source, rpath);
+
+ if (!(FIB_ENTRY_SRC_FLAG_ADDED & src_flag))
+ {
+ /*
+ * last source gone. remove from the table
+ */
+ fib_table_entry_remove(fib_table, prefix, fib_entry_index);
+
+ /*
+ * now the entry is no longer in the table, we can
+ * inform the entries that it covers to re-calculate their cover
+ */
+ fib_entry_cover_change_notify(fib_entry_index,
+ FIB_NODE_INDEX_INVALID);
+ }
+ /*
+ * else
+ * still has sources, leave it be.
+ */
+ if (was_sourced != fib_entry_is_sourced(fib_entry_index, source))
+ {
+ fib_table->ft_src_route_counts[source]--;
+ }
+
+ fib_entry_unlock(fib_entry_index);
+ }
+}
+
+void
+fib_table_entry_path_remove (u32 fib_index,
+ const fib_prefix_t *prefix,
+ fib_source_t source,
+ fib_protocol_t next_hop_proto,
+ const ip46_address_t *next_hop,
+ u32 next_hop_sw_if_index,
+ u32 next_hop_fib_index,
+ u32 next_hop_weight,
+ fib_route_path_flags_t path_flags)
+{
+ /*
+ * 1 is it present
+ * yes => remove source
+ * 2 - is it still sourced?
+ * no => cover walk
+ */
+ fib_route_path_t path = {
+ .frp_proto = next_hop_proto,
+ .frp_addr = (NULL == next_hop? zero_addr : *next_hop),
+ .frp_sw_if_index = next_hop_sw_if_index,
+ .frp_fib_index = next_hop_fib_index,
+ .frp_weight = next_hop_weight,
+ .frp_flags = path_flags,
+ };
+ fib_route_path_t *paths = NULL;
+
+ fib_table_route_path_fixup(prefix, &path);
+ vec_add1(paths, path);
+
+ fib_table_entry_path_remove2(fib_index, prefix, source, paths);
+
+ vec_free(paths);
+}
+
+static int
+fib_route_path_cmp_for_sort (void * v1,
+ void * v2)
+{
+ return (fib_route_path_cmp(v1, v2));
+}
+
+fib_node_index_t
+fib_table_entry_update (u32 fib_index,
+ const fib_prefix_t *prefix,
+ fib_source_t source,
+ fib_entry_flag_t flags,
+ fib_route_path_t *paths)
+{
+ fib_node_index_t fib_entry_index;
+ fib_table_t *fib_table;
+ u32 ii;
+
+ fib_table = fib_table_get(fib_index, prefix->fp_proto);
+ fib_entry_index = fib_table_lookup_exact_match_i(fib_table, prefix);
+
+ for (ii = 0; ii < vec_len(paths); ii++)
+ {
+ fib_table_route_path_fixup(prefix, &paths[ii]);
+ }
+ /*
+ * sort the paths provided by the control plane. this means
+ * the paths and the extension on the entry will be sorted.
+ */
+ vec_sort_with_function(paths, fib_route_path_cmp_for_sort);
+
+ if (FIB_NODE_INDEX_INVALID == fib_entry_index)
+ {
+ fib_entry_index = fib_entry_create(fib_index, prefix,
+ source, flags,
+ paths);
+
+ fib_table_entry_insert(fib_table, prefix, fib_entry_index);
+ fib_table->ft_src_route_counts[source]++;
+ }
+ else
+ {
+ int was_sourced;
+
+ was_sourced = fib_entry_is_sourced(fib_entry_index, source);
+ fib_entry_update(fib_entry_index, source, flags, paths);
+
+ if (was_sourced != fib_entry_is_sourced(fib_entry_index, source))
+ {
+ fib_table->ft_src_route_counts[source]++;
+ }
+ }
+
+ return (fib_entry_index);
+}
+
+fib_node_index_t
+fib_table_entry_update_one_path (u32 fib_index,
+ const fib_prefix_t *prefix,
+ fib_source_t source,
+ fib_entry_flag_t flags,
+ fib_protocol_t next_hop_proto,
+ const ip46_address_t *next_hop,
+ u32 next_hop_sw_if_index,
+ u32 next_hop_fib_index,
+ u32 next_hop_weight,
+ mpls_label_t *next_hop_labels,
+ fib_route_path_flags_t path_flags)
+{
+ fib_node_index_t fib_entry_index;
+ fib_route_path_t path = {
+ .frp_proto = next_hop_proto,
+ .frp_addr = (NULL == next_hop? zero_addr : *next_hop),
+ .frp_sw_if_index = next_hop_sw_if_index,
+ .frp_fib_index = next_hop_fib_index,
+ .frp_weight = next_hop_weight,
+ .frp_flags = path_flags,
+ .frp_label_stack = next_hop_labels,
+ };
+ fib_route_path_t *paths = NULL;
+
+ fib_table_route_path_fixup(prefix, &path);
+ vec_add1(paths, path);
+
+ fib_entry_index =
+ fib_table_entry_update(fib_index, prefix, source, flags, paths);
+
+ vec_free(paths);
+
+ return (fib_entry_index);
+}
+
+static void
+fib_table_entry_delete_i (u32 fib_index,
+ fib_node_index_t fib_entry_index,
+ const fib_prefix_t *prefix,
+ fib_source_t source)
+{
+ fib_entry_src_flag_t src_flag;
+ fib_table_t *fib_table;
+ int was_sourced;
+
+ fib_table = fib_table_get(fib_index, prefix->fp_proto);
+ was_sourced = fib_entry_is_sourced(fib_entry_index, source);
+
+ /*
+ * don't nobody go nowhere
+ */
+ fib_entry_lock(fib_entry_index);
+
+ src_flag = fib_entry_delete(fib_entry_index, source);
+
+ if (!(FIB_ENTRY_SRC_FLAG_ADDED & src_flag))
+ {
+ /*
+ * last source gone. remove from the table
+ */
+ fib_table_entry_remove(fib_table, prefix, fib_entry_index);
+
+ /*
+ * now the entry is no longer in the table, we can
+ * inform the entries that it covers to re-calculate their cover
+ */
+ fib_entry_cover_change_notify(fib_entry_index,
+ FIB_NODE_INDEX_INVALID);
+ }
+ /*
+ * else
+ * still has sources, leave it be.
+ */
+ if (was_sourced != fib_entry_is_sourced(fib_entry_index, source))
+ {
+ fib_table->ft_src_route_counts[source]--;
+ }
+
+ fib_entry_unlock(fib_entry_index);
+}
+
+void
+fib_table_entry_delete (u32 fib_index,
+ const fib_prefix_t *prefix,
+ fib_source_t source)
+{
+ fib_node_index_t fib_entry_index;
+
+ fib_entry_index = fib_table_lookup_exact_match(fib_index, prefix);
+
+ if (FIB_NODE_INDEX_INVALID == fib_entry_index)
+ {
+ /*
+ * removing an etry that does not exist.
+ * i'll allow it, but i won't like it.
+ */
+ clib_warning("%U not in FIB", format_fib_prefix, prefix);
+ }
+ else
+ {
+ fib_table_entry_delete_i(fib_index, fib_entry_index, prefix, source);
+ }
+}
+
+void
+fib_table_entry_delete_index (fib_node_index_t fib_entry_index,
+ fib_source_t source)
+{
+ fib_prefix_t prefix;
+
+ fib_entry_get_prefix(fib_entry_index, &prefix);
+
+ fib_table_entry_delete_i(fib_entry_get_fib_index(fib_entry_index),
+ fib_entry_index, &prefix, source);
+}
+
+fib_node_index_t
+fib_table_entry_local_label_add (u32 fib_index,
+ const fib_prefix_t *prefix,
+ mpls_label_t label)
+{
+ fib_node_index_t fib_entry_index;
+
+ fib_entry_index = fib_table_lookup_exact_match(fib_index, prefix);
+
+ if (FIB_NODE_INDEX_INVALID == fib_entry_index ||
+ !fib_entry_is_sourced(fib_entry_index, FIB_SOURCE_MPLS))
+ {
+ /*
+ * only source the prefix once. this allows the label change
+ * operation to work
+ */
+ fib_entry_index = fib_table_entry_special_dpo_add(fib_index, prefix,
+ FIB_SOURCE_MPLS,
+ FIB_ENTRY_FLAG_NONE,
+ NULL);
+ }
+
+ fib_entry_set_source_data(fib_entry_index, FIB_SOURCE_MPLS, &label);
+
+ return (fib_entry_index);
+}
+
+void
+fib_table_entry_local_label_remove (u32 fib_index,
+ const fib_prefix_t *prefix,
+ mpls_label_t label)
+{
+ fib_node_index_t fib_entry_index;
+ const void *data;
+ mpls_label_t pl;
+
+ fib_entry_index = fib_table_lookup_exact_match(fib_index, prefix);
+
+ if (FIB_NODE_INDEX_INVALID == fib_entry_index)
+ return;
+
+ data = fib_entry_get_source_data(fib_entry_index, FIB_SOURCE_MPLS);
+
+ if (NULL == data)
+ return;
+
+ pl = *(mpls_label_t*)data;
+
+ if (pl != label)
+ return;
+
+ pl = MPLS_LABEL_INVALID;
+
+ fib_entry_set_source_data(fib_entry_index, FIB_SOURCE_MPLS, &pl);
+ fib_table_entry_special_remove(fib_index,
+ prefix,
+ FIB_SOURCE_MPLS);
+}
+
+u32
+fib_table_get_index_for_sw_if_index (fib_protocol_t proto,
+ u32 sw_if_index)
+{
+ switch (proto)
+ {
+ case FIB_PROTOCOL_IP4:
+ return (ip4_fib_table_get_index_for_sw_if_index(sw_if_index));
+ case FIB_PROTOCOL_IP6:
+ return (ip6_fib_table_get_index_for_sw_if_index(sw_if_index));
+ case FIB_PROTOCOL_MPLS:
+ return (mpls_fib_table_get_index_for_sw_if_index(sw_if_index));
+ }
+ return (~0);
+}
+
+flow_hash_config_t
+fib_table_get_flow_hash_config (u32 fib_index,
+ fib_protocol_t proto)
+{
+ switch (proto)
+ {
+ case FIB_PROTOCOL_IP4:
+ return (ip4_fib_table_get_flow_hash_config(fib_index));
+ case FIB_PROTOCOL_IP6:
+ return (ip6_fib_table_get_flow_hash_config(fib_index));
+ case FIB_PROTOCOL_MPLS:
+ return (mpls_fib_table_get_flow_hash_config(fib_index));
+ }
+ return (0);
+}
+
+
+u32
+fib_table_get_table_id_for_sw_if_index (fib_protocol_t proto,
+ u32 sw_if_index)
+{
+ fib_table_t *fib_table;
+
+ fib_table = fib_table_get(fib_table_get_index_for_sw_if_index(
+ proto, sw_if_index),
+ proto);
+
+ return ((NULL != fib_table ? fib_table->ft_table_id : ~0));
+}
+
+u32
+fib_table_find (fib_protocol_t proto,
+ u32 table_id)
+{
+ switch (proto)
+ {
+ case FIB_PROTOCOL_IP4:
+ return (ip4_fib_index_from_table_id(table_id));
+ case FIB_PROTOCOL_IP6:
+ return (ip6_fib_index_from_table_id(table_id));
+ case FIB_PROTOCOL_MPLS:
+ return (mpls_fib_index_from_table_id(table_id));
+ }
+ return (~0);
+}
+
+u32
+fib_table_find_or_create_and_lock (fib_protocol_t proto,
+ u32 table_id)
+{
+ fib_table_t *fib_table;
+ fib_node_index_t fi;
+
+ switch (proto)
+ {
+ case FIB_PROTOCOL_IP4:
+ fi = ip4_fib_table_find_or_create_and_lock(table_id);
+ break;
+ case FIB_PROTOCOL_IP6:
+ fi = ip6_fib_table_find_or_create_and_lock(table_id);
+ break;
+ case FIB_PROTOCOL_MPLS:
+ fi = mpls_fib_table_find_or_create_and_lock(table_id);
+ break;
+ default:
+ return (~0);
+ }
+
+ fib_table = fib_table_get(fi, proto);
+
+ fib_table->ft_desc = format(NULL, "%U-VRF:%d",
+ format_fib_protocol, proto,
+ table_id);
+
+ return (fi);
+}
+
+u32
+fib_table_create_and_lock (fib_protocol_t proto,
+ const char *const fmt,
+ ...)
+{
+ fib_table_t *fib_table;
+ fib_node_index_t fi;
+ va_list ap;
+
+ va_start(ap, fmt);
+
+ switch (proto)
+ {
+ case FIB_PROTOCOL_IP4:
+ fi = ip4_fib_table_create_and_lock();
+ break;
+ case FIB_PROTOCOL_IP6:
+ fi = ip6_fib_table_create_and_lock();
+ break;
+ case FIB_PROTOCOL_MPLS:
+ fi = mpls_fib_table_create_and_lock();
+ break;
+ default:
+ return (~0);
+ }
+
+ fib_table = fib_table_get(fi, proto);
+
+ fib_table->ft_desc = va_format(fib_table->ft_desc, fmt, &ap);
+
+ va_end(ap);
+ return (fi);
+}
+
+static void
+fib_table_destroy (fib_table_t *fib_table)
+{
+ vec_free(fib_table->ft_desc);
+
+ switch (fib_table->ft_proto)
+ {
+ case FIB_PROTOCOL_IP4:
+ ip4_fib_table_destroy(&fib_table->v4);
+ break;
+ case FIB_PROTOCOL_IP6:
+ ip6_fib_table_destroy(fib_table->ft_index);
+ break;
+ case FIB_PROTOCOL_MPLS:
+ mpls_fib_table_destroy(&fib_table->mpls);
+ break;
+ }
+}
+
+void
+fib_table_unlock (u32 fib_index,
+ fib_protocol_t proto)
+{
+ fib_table_t *fib_table;
+
+ fib_table = fib_table_get(fib_index, proto);
+ fib_table->ft_locks--;
+
+ if (0 == fib_table->ft_locks)
+ {
+ fib_table_destroy(fib_table);
+ }
+}
+void
+fib_table_lock (u32 fib_index,
+ fib_protocol_t proto)
+{
+ fib_table_t *fib_table;
+
+ fib_table = fib_table_get(fib_index, proto);
+ fib_table->ft_locks++;
+}
+
+u32
+fib_table_get_num_entries (u32 fib_index,
+ fib_protocol_t proto,
+ fib_source_t source)
+{
+ fib_table_t *fib_table;
+
+ fib_table = fib_table_get(fib_index, proto);
+
+ return (fib_table->ft_src_route_counts[source]);
+}
+
+u8*
+format_fib_table_name (u8* s, va_list ap)
+{
+ fib_node_index_t fib_index = va_arg(ap, fib_node_index_t);
+ fib_protocol_t proto = va_arg(ap, int); // int promotion
+ fib_table_t *fib_table;
+
+ fib_table = fib_table_get(fib_index, proto);
+
+ s = format(s, "%v", fib_table->ft_desc);
+
+ return (s);
+}
+
+void
+fib_table_flush (u32 fib_index,
+ fib_protocol_t proto,
+ fib_source_t source)
+{
+ // FIXME
+ ASSERT(0);
+}
diff --git a/src/vnet/fib/fib_table.h b/src/vnet/fib/fib_table.h
new file mode 100644
index 00000000000..cfec516de1a
--- /dev/null
+++ b/src/vnet/fib/fib_table.h
@@ -0,0 +1,732 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __FIB_TABLE_H__
+#define __FIB_TABLE_H__
+
+#include <vnet/ip/ip.h>
+#include <vnet/adj/adj.h>
+#include <vnet/fib/fib_entry.h>
+#include <vnet/mpls/mpls.h>
+#include <vnet/mpls/packet.h>
+
+/**
+ * @brief
+ * A protocol Independent FIB table
+ */
+typedef struct fib_table_t_
+{
+ /**
+ * A union of the protocol specific FIBs that provide the
+ * underlying LPM mechanism.
+ * This element is first in the struct so that it is in the
+ * first cache line.
+ */
+ union {
+ ip4_fib_t v4;
+ ip6_fib_t v6;
+ mpls_fib_t mpls;
+ };
+
+ /**
+ * Which protocol this table serves. Used to switch on the union above.
+ */
+ fib_protocol_t ft_proto;
+
+ /**
+ * number of locks on the table
+ */
+ u16 ft_locks;
+
+ /**
+ * Table ID (hash key) for this FIB.
+ */
+ u32 ft_table_id;
+
+ /**
+ * Index into FIB vector.
+ */
+ fib_node_index_t ft_index;
+
+ /**
+ * flow hash configuration
+ */
+ u32 ft_flow_hash_config;
+
+ /**
+ * Per-source route counters
+ */
+ u32 ft_src_route_counts[FIB_SOURCE_MAX];
+
+ /**
+ * Total route counters
+ */
+ u32 ft_total_route_counts;
+
+ /**
+ * Table description
+ */
+ u8* ft_desc;
+} fib_table_t;
+
+/**
+ * @brief
+ * Format the description/name of the table
+ */
+extern u8* format_fib_table_name(u8* s, va_list ap);
+
+/**
+ * @brief
+ * Perfom a longest prefix match in the non-forwarding table
+ *
+ * @param fib_index
+ * The index of the FIB
+ *
+ * @param prefix
+ * The prefix to lookup
+ *
+ * @return
+ * The index of the fib_entry_t for the best match, which may be the default route
+ */
+extern fib_node_index_t fib_table_lookup(u32 fib_index,
+ const fib_prefix_t *prefix);
+
+/**
+ * @brief
+ * Perfom an exact match in the non-forwarding table
+ *
+ * @param fib_index
+ * The index of the FIB
+ *
+ * @param prefix
+ * The prefix to lookup
+ *
+ * @return
+ * The index of the fib_entry_t for the exact match, or INVALID
+ * is there is no match.
+ */
+extern fib_node_index_t fib_table_lookup_exact_match(u32 fib_index,
+ const fib_prefix_t *prefix);
+
+/**
+ * @brief
+ * Get the less specific (covering) prefix
+ *
+ * @param fib_index
+ * The index of the FIB
+ *
+ * @param prefix
+ * The prefix to lookup
+ *
+ * @return
+ * The index of the less specific fib_entry_t.
+ */
+extern fib_node_index_t fib_table_get_less_specific(u32 fib_index,
+ const fib_prefix_t *prefix);
+
+/**
+ * @brief
+ * Add a 'special' entry to the FIB that links to the adj passed
+ * A special entry is an entry that the FIB is not expect to resolve
+ * via the usual mechanisms (i.e. recurisve or neighbour adj DB lookup).
+ * Instead the client/source provides the adj to link to.
+ * This add is reference counting per-source. So n 'removes' are required
+ * for n 'adds', if the entry is no longer required.
+ *
+ * @param fib_index
+ * The index of the FIB
+ *
+ * @param prefix
+ * The prefix to add
+ *
+ * @param source
+ * The ID of the client/source adding the entry.
+ *
+ * @param flags
+ * Flags for the entry.
+ *
+ * @param adj_index
+ * The adjacency to link to.
+ *
+ * @return
+ * the index of the fib_entry_t that is created (or exists already).
+ */
+extern fib_node_index_t fib_table_entry_special_add(u32 fib_index,
+ const fib_prefix_t *prefix,
+ fib_source_t source,
+ fib_entry_flag_t flags,
+ adj_index_t adj_index);
+
+/**
+ * @brief
+ * Add a 'special' entry to the FIB that links to the DPO passed
+ * A special entry is an entry that the FIB is not expect to resolve
+ * via the usual mechanisms (i.e. recurisve or neighbour adj DB lookup).
+ * Instead the client/source provides the DPO to link to.
+ * This add is reference counting per-source. So n 'removes' are required
+ * for n 'adds', if the entry is no longer required.
+ *
+ * @param fib_index
+ * The index of the FIB
+ *
+ * @param prefix
+ * The prefix to add
+ *
+ * @param source
+ * The ID of the client/source adding the entry.
+ *
+ * @param flags
+ * Flags for the entry.
+ *
+ * @param dpo
+ * The DPO to link to.
+ *
+ * @return
+ * the index of the fib_entry_t that is created (or existed already).
+ */
+extern fib_node_index_t fib_table_entry_special_dpo_add(u32 fib_index,
+ const fib_prefix_t *prefix,
+ fib_source_t source,
+ fib_entry_flag_t stype,
+ const dpo_id_t *dpo);
+
+/**
+ * @brief
+ * Update a 'special' entry to the FIB that links to the DPO passed
+ * A special entry is an entry that the FIB is not expect to resolve
+ * via the usual mechanisms (i.e. recurisve or neighbour adj DB lookup).
+ * Instead the client/source provides the DPO to link to.
+ * Special entries are add/remove reference counted per-source. So n
+ * 'removes' are required for n 'adds', if the entry is no longer required.
+ * An 'update' is an 'add' if no 'add' has already been called, otherwise an 'add'
+ * is therefore assumed to act on the reference instance of that add.
+ *
+ * @param fib_entry_index
+ * The index of the FIB entry to update
+ *
+ * @param source
+ * The ID of the client/source adding the entry.
+ *
+ * @param flags
+ * Flags for the entry.
+ *
+ * @param dpo
+ * The DPO to link to.
+ *
+ * @return
+ * the index of the fib_entry_t that is created (or existed already).
+ */
+extern fib_node_index_t fib_table_entry_special_dpo_update (u32 fib_index,
+ const fib_prefix_t *prefix,
+ fib_source_t source,
+ fib_entry_flag_t stype,
+ const dpo_id_t *dpo);
+
+/**
+ * @brief
+ * Remove a 'special' entry from the FIB.
+ * This add is reference counting per-source. So n 'removes' are required
+ * for n 'adds', if the entry is no longer required.
+ *
+ * @param fib_index
+ * The index of the FIB
+ *
+ * @param prefix
+ * The prefix to remove
+ *
+ * @param source
+ * The ID of the client/source adding the entry.
+ *
+ */
+extern void fib_table_entry_special_remove(u32 fib_index,
+ const fib_prefix_t *prefix,
+ fib_source_t source);
+
+/**
+ * @brief
+ * Add one path to an entry (aka route) in the FIB. If the entry does not
+ * exist, it will be created.
+ * See the documentation for fib_route_path_t for more descirptions of
+ * the path parameters.
+ *
+ * @param fib_index
+ * The index of the FIB
+ *
+ * @param prefix
+ * The prefix for the entry to add
+ *
+ * @param source
+ * The ID of the client/source adding the entry.
+ *
+ * @param flags
+ * Flags for the entry.
+ *
+ * @paran next_hop_proto
+ * The protocol of the next hop. This cannot be derived in the event that
+ * the next hop is all zeros.
+ *
+ * @param next_hop
+ * The address of the next-hop.
+ *
+ * @param sw_if_index
+ * The index of the interface.
+ *
+ * @param next_hop_fib_index,
+ * The fib index of the next-hop for recursive resolution
+ *
+ * @param next_hop_weight
+ * [un]equal cost path weight
+ *
+ * @param next_hop_label_stack
+ * The path's out-going label stack. NULL is there is none.
+ *
+ * @param pf
+ * Flags for the path
+ *
+ * @return
+ * the index of the fib_entry_t that is created (or existed already).
+ */
+extern fib_node_index_t fib_table_entry_path_add(u32 fib_index,
+ const fib_prefix_t *prefix,
+ fib_source_t source,
+ fib_entry_flag_t flags,
+ fib_protocol_t next_hop_proto,
+ const ip46_address_t *next_hop,
+ u32 next_hop_sw_if_index,
+ u32 next_hop_fib_index,
+ u32 next_hop_weight,
+ mpls_label_t *next_hop_label_stack,
+ fib_route_path_flags_t pf);
+/**
+ * @brief
+ * Add n paths to an entry (aka route) in the FIB. If the entry does not
+ * exist, it will be created.
+ * See the documentation for fib_route_path_t for more descirptions of
+ * the path parameters.
+ *
+ * @param fib_index
+ * The index of the FIB
+ *
+ * @param prefix
+ * The prefix for the entry to add
+ *
+ * @param source
+ * The ID of the client/source adding the entry.
+ *
+ * @param flags
+ * Flags for the entry.
+ *
+ * @param rpaths
+ * A vector of paths. Not const since they may be modified.
+ *
+ * @return
+ * the index of the fib_entry_t that is created (or existed already).
+ */
+extern fib_node_index_t fib_table_entry_path_add2(u32 fib_index,
+ const fib_prefix_t *prefix,
+ fib_source_t source,
+ fib_entry_flag_t flags,
+ fib_route_path_t *rpath);
+
+/**
+ * @brief
+ * remove one path to an entry (aka route) in the FIB. If this is the entry's
+ * last path, then the entry will be removed, unless it has other sources.
+ * See the documentation for fib_route_path_t for more descirptions of
+ * the path parameters.
+ *
+ * @param fib_index
+ * The index of the FIB
+ *
+ * @param prefix
+ * The prefix for the entry to add
+ *
+ * @param source
+ * The ID of the client/source adding the entry.
+ *
+ * @paran next_hop_proto
+ * The protocol of the next hop. This cannot be derived in the event that
+ * the next hop is all zeros.
+ *
+ * @param next_hop
+ * The address of the next-hop.
+ *
+ * @param sw_if_index
+ * The index of the interface.
+ *
+ * @param next_hop_fib_index,
+ * The fib index of the next-hop for recursive resolution
+ *
+ * @param next_hop_weight
+ * [un]equal cost path weight
+ *
+ * @param pf
+ * Flags for the path
+ */
+extern void fib_table_entry_path_remove(u32 fib_index,
+ const fib_prefix_t *prefix,
+ fib_source_t source,
+ fib_protocol_t next_hop_proto,
+ const ip46_address_t *next_hop,
+ u32 next_hop_sw_if_index,
+ u32 next_hop_fib_index,
+ u32 next_hop_weight,
+ fib_route_path_flags_t pf);
+
+/**
+ * @brief
+ * Remove n paths to an entry (aka route) in the FIB. If this is the entry's
+ * last path, then the entry will be removed, unless it has other sources.
+ * See the documentation for fib_route_path_t for more descirptions of
+ * the path parameters.
+ *
+ * @param fib_index
+ * The index of the FIB
+ *
+ * @param prefix
+ * The prefix for the entry to add
+ *
+ * @param source
+ * The ID of the client/source adding the entry.
+ *
+ * @param rpaths
+ * A vector of paths.
+ */
+extern void fib_table_entry_path_remove2(u32 fib_index,
+ const fib_prefix_t *prefix,
+ fib_source_t source,
+ fib_route_path_t *paths);
+
+/**
+ * @brief
+ * Update an entry to have a new set of paths. If the entry does not
+ * exist, it will be created.
+ * The difference between an 'path-add' and an update, is that path-add is
+ * an incremental addition of paths, whereas an update is a wholesale swap.
+ *
+ * @param fib_index
+ * The index of the FIB
+ *
+ * @param prefix
+ * The prefix for the entry to add
+ *
+ * @param source
+ * The ID of the client/source adding the entry.
+ *
+ * @param rpaths
+ * A vector of paths. Not const since they may be modified.
+ *
+ * @return
+ * the index of the fib_entry_t that is created (or existed already).
+ */
+extern fib_node_index_t fib_table_entry_update(u32 fib_index,
+ const fib_prefix_t *prefix,
+ fib_source_t source,
+ fib_entry_flag_t flags,
+ fib_route_path_t *paths);
+
+/**
+ * @brief
+ * Update the entry to have just one path. If the entry does not
+ * exist, it will be created.
+ * See the documentation for fib_route_path_t for more descirptions of
+ * the path parameters.
+ *
+ * @param fib_index
+ * The index of the FIB
+ *
+ * @param prefix
+ * The prefix for the entry to add
+ *
+ * @param source
+ * The ID of the client/source adding the entry.
+ *
+ * @param flags
+ * Flags for the entry.
+ *
+ * @paran next_hop_proto
+ * The protocol of the next hop. This cannot be derived in the event that
+ * the next hop is all zeros.
+ *
+ * @param next_hop
+ * The address of the next-hop.
+ *
+ * @param sw_if_index
+ * The index of the interface.
+ *
+ * @param next_hop_fib_index,
+ * The fib index of the next-hop for recursive resolution
+ *
+ * @param next_hop_weight
+ * [un]equal cost path weight
+ *
+ * @param next_hop_label_stack
+ * The path's out-going label stack. NULL is there is none.
+ *
+ * @param pf
+ * Flags for the path
+ *
+ * @return
+ * the index of the fib_entry_t that is created (or existed already).
+ */
+extern fib_node_index_t fib_table_entry_update_one_path(u32 fib_index,
+ const fib_prefix_t *prefix,
+ fib_source_t source,
+ fib_entry_flag_t flags,
+ fib_protocol_t next_hop_proto,
+ const ip46_address_t *next_hop,
+ u32 next_hop_sw_if_index,
+ u32 next_hop_fib_index,
+ u32 next_hop_weight,
+ mpls_label_t *next_hop_label_stack,
+ fib_route_path_flags_t pf);
+
+/**
+ * @brief
+ * Add a MPLS local label for the prefix/route. If the entry does not
+ * exist, it will be created. In theory more than one local label can be
+ * added, but this is not yet supported.
+ *
+ * @param fib_index
+ * The index of the FIB
+ *
+ * @param prefix
+ * The prefix for the entry to which to add the label
+ *
+ * @param label
+ * The MPLS label to add
+ *
+ * @return
+ * the index of the fib_entry_t that is created (or existed already).
+ */
+extern fib_node_index_t fib_table_entry_local_label_add(u32 fib_index,
+ const fib_prefix_t *prefix,
+ mpls_label_t label);
+/**
+ * @brief
+ * remove a MPLS local label for the prefix/route.
+ *
+ * @param fib_index
+ * The index of the FIB
+ *
+ * @param prefix
+ * The prefix for the entry to which to add the label
+ *
+ * @param label
+ * The MPLS label to add
+ */
+extern void fib_table_entry_local_label_remove(u32 fib_index,
+ const fib_prefix_t *prefix,
+ mpls_label_t label);
+
+/**
+ * @brief
+ * Delete a FIB entry. If the entry has no more sources, then it is
+ * removed from the table.
+ *
+ * @param fib_index
+ * The index of the FIB
+ *
+ * @param prefix
+ * The prefix for the entry to remove
+ *
+ * @param source
+ * The ID of the client/source adding the entry.
+ */
+extern void fib_table_entry_delete(u32 fib_index,
+ const fib_prefix_t *prefix,
+ fib_source_t source);
+
+/**
+ * @brief
+ * Delete a FIB entry. If the entry has no more sources, then it is
+ * removed from the table.
+ *
+ * @param entry_index
+ * The index of the FIB entry
+ *
+ * @param source
+ * The ID of the client/source adding the entry.
+ */
+extern void fib_table_entry_delete_index(fib_node_index_t entry_index,
+ fib_source_t source);
+
+/**
+ * @brief
+ * Flush all entries from a table for the source
+ *
+ * @param fib_index
+ * The index of the FIB
+ *
+ * @paran proto
+ * The protocol of the entries in the table
+ *
+ * @param source
+ * the source to flush
+ */
+extern void fib_table_flush(u32 fib_index,
+ fib_protocol_t proto,
+ fib_source_t source);
+
+/**
+ * @brief
+ * Get the index of the FIB bound to the interface
+ *
+ * @paran proto
+ * The protocol of the FIB (and thus the entries therein)
+ *
+ * @param sw_if_index
+ * The interface index
+ *
+ * @return fib_index
+ * The index of the FIB
+ */
+extern u32 fib_table_get_index_for_sw_if_index(fib_protocol_t proto,
+ u32 sw_if_index);
+
+/**
+ * @brief
+ * Get the Table-ID of the FIB bound to the interface
+ *
+ * @paran proto
+ * The protocol of the FIB (and thus the entries therein)
+ *
+ * @param sw_if_index
+ * The interface index
+ *
+ * @return fib_index
+ * The tableID of the FIB
+ */
+extern u32 fib_table_get_table_id_for_sw_if_index(fib_protocol_t proto,
+ u32 sw_if_index);
+
+/**
+ * @brief
+ * Get the index of the FIB for a Table-ID. This DOES NOT create the
+ * FIB if it does not exist.
+ *
+ * @paran proto
+ * The protocol of the FIB (and thus the entries therein)
+ *
+ * @param table-id
+ * The Table-ID
+ *
+ * @return fib_index
+ * The index of the FIB, which may be INVALID.
+ */
+extern u32 fib_table_find(fib_protocol_t proto, u32 table_id);
+
+
+/**
+ * @brief
+ * Get the index of the FIB for a Table-ID. This DOES create the
+ * FIB if it does not exist.
+ *
+ * @paran proto
+ * The protocol of the FIB (and thus the entries therein)
+ *
+ * @param table-id
+ * The Table-ID
+ *
+ * @return fib_index
+ * The index of the FIB
+ */
+extern u32 fib_table_find_or_create_and_lock(fib_protocol_t proto,
+ u32 table_id);
+
+/**
+ * @brief
+ * Create a new table with no table ID. This means it does not get
+ * added to the hash-table and so can only be found by using the index returned.
+ *
+ * @paran proto
+ * The protocol of the FIB (and thus the entries therein)
+ *
+ * @param fmt
+ * A string to describe the table
+ *
+ * @return fib_index
+ * The index of the FIB
+ */
+extern u32 fib_table_create_and_lock(fib_protocol_t proto,
+ const char *const fmt,
+ ...);
+
+/**
+ * @brief
+ * Get the flow hash configured used by the table
+ *
+ * @param fib_index
+ * The index of the FIB
+ *
+ * @paran proto
+ * The protocol of the FIB (and thus the entries therein)
+ *
+ * @return The flow hash config
+ */
+extern flow_hash_config_t fib_table_get_flow_hash_config(u32 fib_index,
+ fib_protocol_t proto);
+
+/**
+ * @brief
+ * Take a reference counting lock on the table
+ *
+ * @param fib_index
+ * The index of the FIB
+ *
+ * @paran proto
+ * The protocol of the FIB (and thus the entries therein)
+ */
+extern void fib_table_unlock(u32 fib_index,
+ fib_protocol_t proto);
+
+/**
+ * @brief
+ * Release a reference counting lock on the table. When the last lock
+ * has gone. the FIB is deleted.
+ *
+ * @param fib_index
+ * The index of the FIB
+ *
+ * @paran proto
+ * The protocol of the FIB (and thus the entries therein)
+ */
+extern void fib_table_lock(u32 fib_index,
+ fib_protocol_t proto);
+
+/**
+ * @brief
+ * Return the number of entries in the FIB added by a given source.
+ *
+ * @param fib_index
+ * The index of the FIB
+ *
+ * @paran proto
+ * The protocol of the FIB (and thus the entries therein)
+ *
+ * @return number of sourced entries.
+ */
+extern u32 fib_table_get_num_entries(u32 fib_index,
+ fib_protocol_t proto,
+ fib_source_t source);
+
+/**
+ * @brief
+ * Get a pointer to a FIB table
+ */
+extern fib_table_t *fib_table_get(fib_node_index_t index,
+ fib_protocol_t proto);
+
+#endif
diff --git a/src/vnet/fib/fib_test.c b/src/vnet/fib/fib_test.c
new file mode 100644
index 00000000000..5083db26872
--- /dev/null
+++ b/src/vnet/fib/fib_test.c
@@ -0,0 +1,7112 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/fib/ip6_fib.h>
+#include <vnet/fib/ip4_fib.h>
+#include <vnet/fib/mpls_fib.h>
+#include <vnet/adj/adj.h>
+#include <vnet/dpo/load_balance.h>
+#include <vnet/dpo/load_balance_map.h>
+#include <vnet/dpo/mpls_label_dpo.h>
+#include <vnet/dpo/lookup_dpo.h>
+#include <vnet/dpo/drop_dpo.h>
+#include <vnet/dpo/receive_dpo.h>
+#include <vnet/dpo/ip_null_dpo.h>
+
+#include <vnet/mpls/mpls.h>
+
+#include <vnet/fib/fib_path_list.h>
+#include <vnet/fib/fib_entry_src.h>
+#include <vnet/fib/fib_walk.h>
+#include <vnet/fib/fib_node_list.h>
+#include <vnet/fib/fib_urpf_list.h>
+
+#define FIB_TEST_I(_cond, _comment, _args...) \
+({ \
+ int _evald = (_cond); \
+ if (!(_evald)) { \
+ fformat(stderr, "FAIL:%d: " _comment "\n", \
+ __LINE__, ##_args); \
+ } else { \
+ fformat(stderr, "PASS:%d: " _comment "\n", \
+ __LINE__, ##_args); \
+ } \
+ _evald; \
+})
+#define FIB_TEST(_cond, _comment, _args...) \
+{ \
+ if (!FIB_TEST_I(_cond, _comment, ##_args)) { \
+ return 1; \
+ ASSERT(!("FAIL: " _comment)); \
+ } \
+}
+
+/**
+ * A 'i'm not fussed is this is not efficient' store of test data
+ */
+typedef struct test_main_t_ {
+ /**
+ * HW if indicies
+ */
+ u32 hw_if_indicies[4];
+ /**
+ * HW interfaces
+ */
+ vnet_hw_interface_t * hw[4];
+
+} test_main_t;
+static test_main_t test_main;
+
+/* fake ethernet device class, distinct from "fake-ethX" */
+static u8 * format_test_interface_name (u8 * s, va_list * args)
+{
+ u32 dev_instance = va_arg (*args, u32);
+ return format (s, "test-eth%d", dev_instance);
+}
+
+static uword dummy_interface_tx (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ clib_warning ("you shouldn't be here, leaking buffers...");
+ return frame->n_vectors;
+}
+
+static clib_error_t *
+test_interface_admin_up_down (vnet_main_t * vnm,
+ u32 hw_if_index,
+ u32 flags)
+{
+ u32 hw_flags = (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) ?
+ VNET_HW_INTERFACE_FLAG_LINK_UP : 0;
+ vnet_hw_interface_set_flags (vnm, hw_if_index, hw_flags);
+ return 0;
+}
+
+VNET_DEVICE_CLASS (test_interface_device_class,static) = {
+ .name = "Test interface",
+ .format_device_name = format_test_interface_name,
+ .tx_function = dummy_interface_tx,
+ .admin_up_down_function = test_interface_admin_up_down,
+};
+
+static u8 *hw_address;
+
+static int
+fib_test_mk_intf (u32 ninterfaces)
+{
+ clib_error_t * error = NULL;
+ test_main_t *tm = &test_main;
+ u8 byte;
+ u32 i;
+
+ ASSERT(ninterfaces <= ARRAY_LEN(tm->hw_if_indicies));
+
+ for (i=0; i<6; i++)
+ {
+ byte = 0xd0+i;
+ vec_add1(hw_address, byte);
+ }
+
+ for (i = 0; i < ninterfaces; i++)
+ {
+ hw_address[5] = i;
+
+ error = ethernet_register_interface(vnet_get_main(),
+ test_interface_device_class.index,
+ i /* instance */,
+ hw_address,
+ &tm->hw_if_indicies[i],
+ /* flag change */ 0);
+
+ FIB_TEST((NULL == error), "ADD interface %d", i);
+
+ error = vnet_hw_interface_set_flags(vnet_get_main(),
+ tm->hw_if_indicies[i],
+ VNET_HW_INTERFACE_FLAG_LINK_UP);
+ tm->hw[i] = vnet_get_hw_interface(vnet_get_main(),
+ tm->hw_if_indicies[i]);
+ vec_validate (ip4_main.fib_index_by_sw_if_index,
+ tm->hw[i]->sw_if_index);
+ vec_validate (ip6_main.fib_index_by_sw_if_index,
+ tm->hw[i]->sw_if_index);
+ ip4_main.fib_index_by_sw_if_index[tm->hw[i]->sw_if_index] = 0;
+ ip6_main.fib_index_by_sw_if_index[tm->hw[i]->sw_if_index] = 0;
+
+ error = vnet_sw_interface_set_flags(vnet_get_main(),
+ tm->hw[i]->sw_if_index,
+ VNET_SW_INTERFACE_FLAG_ADMIN_UP);
+ FIB_TEST((NULL == error), "UP interface %d", i);
+ }
+ /*
+ * re-eval after the inevitable realloc
+ */
+ for (i = 0; i < ninterfaces; i++)
+ {
+ tm->hw[i] = vnet_get_hw_interface(vnet_get_main(),
+ tm->hw_if_indicies[i]);
+ }
+
+ return (0);
+}
+
+#define FIB_TEST_REC_FORW(_rec_prefix, _via_prefix, _bucket) \
+{ \
+ const dpo_id_t *_rec_dpo = fib_entry_contribute_ip_forwarding( \
+ fib_table_lookup_exact_match(fib_index, (_rec_prefix))); \
+ const dpo_id_t *_via_dpo = fib_entry_contribute_ip_forwarding( \
+ fib_table_lookup(fib_index, (_via_prefix))); \
+ FIB_TEST(!dpo_cmp(_via_dpo, \
+ load_balance_get_bucket(_rec_dpo->dpoi_index, \
+ _bucket)), \
+ "%U is recursive via %U", \
+ format_fib_prefix, (_rec_prefix), \
+ format_fib_prefix, _via_prefix); \
+}
+
+#define FIB_TEST_LB_BUCKET_VIA_ADJ(_prefix, _bucket, _ai) \
+{ \
+ const dpo_id_t *_dpo = fib_entry_contribute_ip_forwarding( \
+ fib_table_lookup_exact_match(fib_index, (_prefix))); \
+ const dpo_id_t *_dpo1 = \
+ load_balance_get_bucket(_dpo->dpoi_index, _bucket); \
+ FIB_TEST(DPO_ADJACENCY == _dpo1->dpoi_type, "type is %U", \
+ format_dpo_type, _dpo1->dpoi_type); \
+ FIB_TEST((_ai == _dpo1->dpoi_index), \
+ "%U bucket %d resolves via %U", \
+ format_fib_prefix, (_prefix), \
+ _bucket, \
+ format_dpo_id, _dpo1, 0); \
+}
+
+#define FIB_TEST_RPF(_cond, _comment, _args...) \
+{ \
+ if (!FIB_TEST_I(_cond, _comment, ##_args)) { \
+ return (0); \
+ } \
+}
+
+static int
+fib_test_urpf_is_equal (fib_node_index_t fei,
+ fib_forward_chain_type_t fct,
+ u32 num, ...)
+{
+ dpo_id_t dpo = DPO_INVALID;
+ fib_urpf_list_t *urpf;
+ index_t ui;
+ va_list ap;
+ int ii;
+
+ va_start(ap, num);
+
+ fib_entry_contribute_forwarding(fei, fct, &dpo);
+ ui = load_balance_get_urpf(dpo.dpoi_index);
+
+ urpf = fib_urpf_list_get(ui);
+
+ FIB_TEST_RPF(num == vec_len(urpf->furpf_itfs),
+ "RPF:%U len %d == %d",
+ format_fib_urpf_list, ui,
+ num, vec_len(urpf->furpf_itfs));
+ FIB_TEST_RPF(num == fib_urpf_check_size(ui),
+ "RPF:%U check-size %d == %d",
+ format_fib_urpf_list, ui,
+ num, vec_len(urpf->furpf_itfs));
+
+ for (ii = 0; ii < num; ii++)
+ {
+ adj_index_t ai = va_arg(ap, adj_index_t);
+
+ FIB_TEST_RPF(ai == urpf->furpf_itfs[ii],
+ "RPF:%d item:%d - %d == %d",
+ ui, ii, ai, urpf->furpf_itfs[ii]);
+ FIB_TEST_RPF(fib_urpf_check(ui, ai),
+ "RPF:%d %d found",
+ ui, ai);
+ }
+
+ dpo_reset(&dpo);
+
+ va_end(ap);
+
+ return (1);
+}
+
+static u8*
+fib_test_build_rewrite (u8 *eth_addr)
+{
+ u8* rewrite = NULL;
+
+ vec_validate(rewrite, 13);
+
+ memcpy(rewrite, eth_addr, 6);
+ memcpy(rewrite+6, eth_addr, 6);
+
+ return (rewrite);
+}
+
+typedef enum fib_test_lb_bucket_type_t_ {
+ FT_LB_LABEL_O_ADJ,
+ FT_LB_LABEL_STACK_O_ADJ,
+ FT_LB_LABEL_O_LB,
+ FT_LB_O_LB,
+ FT_LB_SPECIAL,
+ FT_LB_ADJ,
+} fib_test_lb_bucket_type_t;
+
+typedef struct fib_test_lb_bucket_t_ {
+ fib_test_lb_bucket_type_t type;
+
+ union
+ {
+ struct
+ {
+ mpls_eos_bit_t eos;
+ mpls_label_t label;
+ u8 ttl;
+ adj_index_t adj;
+ } label_o_adj;
+ struct
+ {
+ mpls_eos_bit_t eos;
+ mpls_label_t label_stack[8];
+ u8 label_stack_size;
+ u8 ttl;
+ adj_index_t adj;
+ } label_stack_o_adj;
+ struct
+ {
+ mpls_eos_bit_t eos;
+ mpls_label_t label;
+ u8 ttl;
+ index_t lb;
+ } label_o_lb;
+ struct
+ {
+ index_t adj;
+ } adj;
+ struct
+ {
+ index_t lb;
+ } lb;
+ struct
+ {
+ index_t adj;
+ } special;
+ };
+} fib_test_lb_bucket_t;
+
+#define FIB_TEST_LB(_cond, _comment, _args...) \
+{ \
+ if (!FIB_TEST_I(_cond, _comment, ##_args)) { \
+ return (0); \
+ } \
+}
+
+static int
+fib_test_validate_lb_v (const load_balance_t *lb,
+ u16 n_buckets,
+ va_list ap)
+{
+ const dpo_id_t *dpo;
+ int bucket;
+
+ FIB_TEST_LB((n_buckets == lb->lb_n_buckets), "n_buckets = %d", lb->lb_n_buckets);
+
+ for (bucket = 0; bucket < n_buckets; bucket++)
+ {
+ const fib_test_lb_bucket_t *exp;
+
+ exp = va_arg(ap, fib_test_lb_bucket_t*);
+ dpo = load_balance_get_bucket_i(lb, bucket);
+
+ switch (exp->type)
+ {
+ case FT_LB_LABEL_STACK_O_ADJ:
+ {
+ const mpls_label_dpo_t *mld;
+ mpls_label_t hdr;
+ u32 ii;
+
+ FIB_TEST_LB((DPO_MPLS_LABEL == dpo->dpoi_type),
+ "bucket %d stacks on %U",
+ bucket,
+ format_dpo_type, dpo->dpoi_type);
+
+ mld = mpls_label_dpo_get(dpo->dpoi_index);
+
+ FIB_TEST_LB(exp->label_stack_o_adj.label_stack_size == mld->mld_n_labels,
+ "label stack size",
+ mld->mld_n_labels);
+
+ for (ii = 0; ii < mld->mld_n_labels; ii++)
+ {
+ hdr = clib_net_to_host_u32(mld->mld_hdr[ii].label_exp_s_ttl);
+ FIB_TEST_LB((vnet_mpls_uc_get_label(hdr) ==
+ exp->label_stack_o_adj.label_stack[ii]),
+ "bucket %d stacks on label %d",
+ bucket,
+ exp->label_stack_o_adj.label_stack[ii]);
+
+ if (ii == mld->mld_n_labels-1)
+ {
+ FIB_TEST_LB((vnet_mpls_uc_get_s(hdr) ==
+ exp->label_o_adj.eos),
+ "bucket %d stacks on label %d %U!=%U",
+ bucket,
+ exp->label_stack_o_adj.label_stack[ii],
+ format_mpls_eos_bit, exp->label_o_adj.eos,
+ format_mpls_eos_bit, vnet_mpls_uc_get_s(hdr));
+ }
+ else
+ {
+ FIB_TEST_LB((vnet_mpls_uc_get_s(hdr) == MPLS_NON_EOS),
+ "bucket %d stacks on label %d %U",
+ bucket,
+ exp->label_stack_o_adj.label_stack[ii],
+ format_mpls_eos_bit, vnet_mpls_uc_get_s(hdr));
+ }
+ }
+
+ FIB_TEST_LB((DPO_ADJACENCY_INCOMPLETE == mld->mld_dpo.dpoi_type),
+ "bucket %d label stacks on %U",
+ bucket,
+ format_dpo_type, mld->mld_dpo.dpoi_type);
+
+ FIB_TEST_LB((exp->label_stack_o_adj.adj == mld->mld_dpo.dpoi_index),
+ "bucket %d label stacks on adj %d",
+ bucket,
+ exp->label_stack_o_adj.adj);
+ }
+ break;
+ case FT_LB_LABEL_O_ADJ:
+ {
+ const mpls_label_dpo_t *mld;
+ mpls_label_t hdr;
+ FIB_TEST_LB((DPO_MPLS_LABEL == dpo->dpoi_type),
+ "bucket %d stacks on %U",
+ bucket,
+ format_dpo_type, dpo->dpoi_type);
+
+ mld = mpls_label_dpo_get(dpo->dpoi_index);
+ hdr = clib_net_to_host_u32(mld->mld_hdr[0].label_exp_s_ttl);
+
+ FIB_TEST_LB((vnet_mpls_uc_get_label(hdr) ==
+ exp->label_o_adj.label),
+ "bucket %d stacks on label %d",
+ bucket,
+ exp->label_o_adj.label);
+
+ FIB_TEST_LB((vnet_mpls_uc_get_s(hdr) ==
+ exp->label_o_adj.eos),
+ "bucket %d stacks on label %d %U",
+ bucket,
+ exp->label_o_adj.label,
+ format_mpls_eos_bit, exp->label_o_adj.eos);
+
+ FIB_TEST_LB((DPO_ADJACENCY_INCOMPLETE == mld->mld_dpo.dpoi_type),
+ "bucket %d label stacks on %U",
+ bucket,
+ format_dpo_type, mld->mld_dpo.dpoi_type);
+
+ FIB_TEST_LB((exp->label_o_adj.adj == mld->mld_dpo.dpoi_index),
+ "bucket %d label stacks on adj %d",
+ bucket,
+ exp->label_o_adj.adj);
+ }
+ break;
+ case FT_LB_LABEL_O_LB:
+ {
+ const mpls_label_dpo_t *mld;
+ mpls_label_t hdr;
+
+ FIB_TEST_LB((DPO_MPLS_LABEL == dpo->dpoi_type),
+ "bucket %d stacks on %U",
+ bucket,
+ format_dpo_type, dpo->dpoi_type);
+
+ mld = mpls_label_dpo_get(dpo->dpoi_index);
+ hdr = clib_net_to_host_u32(mld->mld_hdr[0].label_exp_s_ttl);
+
+ FIB_TEST_LB(1 == mld->mld_n_labels, "label stack size",
+ mld->mld_n_labels);
+ FIB_TEST_LB((vnet_mpls_uc_get_label(hdr) ==
+ exp->label_o_lb.label),
+ "bucket %d stacks on label %d",
+ bucket,
+ exp->label_o_lb.label);
+
+ FIB_TEST_LB((vnet_mpls_uc_get_s(hdr) ==
+ exp->label_o_lb.eos),
+ "bucket %d stacks on label %d %U",
+ bucket,
+ exp->label_o_lb.label,
+ format_mpls_eos_bit, exp->label_o_lb.eos);
+
+ FIB_TEST_LB((DPO_LOAD_BALANCE == mld->mld_dpo.dpoi_type),
+ "bucket %d label stacks on %U",
+ bucket,
+ format_dpo_type, mld->mld_dpo.dpoi_type);
+
+ FIB_TEST_LB((exp->label_o_lb.lb == mld->mld_dpo.dpoi_index),
+ "bucket %d label stacks on LB %d",
+ bucket,
+ exp->label_o_lb.lb);
+ }
+ break;
+ case FT_LB_ADJ:
+ FIB_TEST_I(((DPO_ADJACENCY == dpo->dpoi_type) ||
+ (DPO_ADJACENCY_INCOMPLETE == dpo->dpoi_type)),
+ "bucket %d stacks on %U",
+ bucket,
+ format_dpo_type, dpo->dpoi_type);
+ FIB_TEST_LB((exp->adj.adj == dpo->dpoi_index),
+ "bucket %d stacks on adj %d",
+ bucket,
+ exp->adj.adj);
+ break;
+ case FT_LB_O_LB:
+ FIB_TEST_I((DPO_LOAD_BALANCE == dpo->dpoi_type),
+ "bucket %d stacks on %U",
+ bucket,
+ format_dpo_type, dpo->dpoi_type);
+ FIB_TEST_LB((exp->lb.lb == dpo->dpoi_index),
+ "bucket %d stacks on lb %d",
+ bucket,
+ exp->lb.lb);
+ break;
+ case FT_LB_SPECIAL:
+ FIB_TEST_I((DPO_DROP == dpo->dpoi_type),
+ "bucket %d stacks on %U",
+ bucket,
+ format_dpo_type, dpo->dpoi_type);
+ FIB_TEST_LB((exp->special.adj == dpo->dpoi_index),
+ "bucket %d stacks on drop %d",
+ bucket,
+ exp->special.adj);
+ break;
+ }
+ }
+ return (!0);
+}
+
+static int
+fib_test_validate_entry (fib_node_index_t fei,
+ fib_forward_chain_type_t fct,
+ u16 n_buckets,
+ ...)
+{
+ dpo_id_t dpo = DPO_INVALID;
+ const load_balance_t *lb;
+ fib_prefix_t pfx;
+ index_t fw_lbi;
+ u32 fib_index;
+ va_list ap;
+ int res;
+
+ va_start(ap, n_buckets);
+
+ fib_entry_get_prefix(fei, &pfx);
+ fib_index = fib_entry_get_fib_index(fei);
+ fib_entry_contribute_forwarding(fei, fct, &dpo);
+
+ FIB_TEST_LB((DPO_LOAD_BALANCE == dpo.dpoi_type),
+ "Entry links to %U",
+ format_dpo_type, dpo.dpoi_type);
+ lb = load_balance_get(dpo.dpoi_index);
+
+ res = fib_test_validate_lb_v(lb, n_buckets, ap);
+
+ /*
+ * ensure that the LB contributed by the entry is the
+ * same as the LB in the forwarding tables
+ */
+ if (fct == fib_entry_get_default_chain_type(fib_entry_get(fei)))
+ {
+ switch (pfx.fp_proto)
+ {
+ case FIB_PROTOCOL_IP4:
+ fw_lbi = ip4_fib_forwarding_lookup(fib_index, &pfx.fp_addr.ip4);
+ break;
+ case FIB_PROTOCOL_IP6:
+ fw_lbi = ip6_fib_table_fwding_lookup(&ip6_main, fib_index, &pfx.fp_addr.ip6);
+ break;
+ case FIB_PROTOCOL_MPLS:
+ {
+ mpls_unicast_header_t hdr = {
+ .label_exp_s_ttl = 0,
+ };
+
+ vnet_mpls_uc_set_label(&hdr.label_exp_s_ttl, pfx.fp_label);
+ vnet_mpls_uc_set_s(&hdr.label_exp_s_ttl, pfx.fp_eos);
+ hdr.label_exp_s_ttl = clib_host_to_net_u32(hdr.label_exp_s_ttl);
+
+ fw_lbi = mpls_fib_table_forwarding_lookup(fib_index, &hdr);
+ break;
+ }
+ default:
+ fw_lbi = 0;
+ }
+ FIB_TEST_LB((fw_lbi == dpo.dpoi_index),
+ "Contributed LB = FW LB: %U\n %U",
+ format_load_balance, fw_lbi, 0,
+ format_load_balance, dpo.dpoi_index, 0);
+ }
+
+ dpo_reset(&dpo);
+
+ va_end(ap);
+
+ return (res);
+}
+
+static int
+fib_test_v4 (void)
+{
+ /*
+ * In the default table check for the presence and correct forwarding
+ * of the special entries
+ */
+ fib_node_index_t dfrt, fei, ai, ai2, locked_ai, ai_01, ai_02, ai_03;
+ const dpo_id_t *dpo, *dpo1, *dpo2, *dpo_drop;
+ const ip_adjacency_t *adj;
+ const load_balance_t *lb;
+ test_main_t *tm;
+ u32 fib_index;
+ int ii;
+
+ /* via 10.10.10.1 */
+ ip46_address_t nh_10_10_10_1 = {
+ .ip4.as_u32 = clib_host_to_net_u32(0x0a0a0a01),
+ };
+ /* via 10.10.10.2 */
+ ip46_address_t nh_10_10_10_2 = {
+ .ip4.as_u32 = clib_host_to_net_u32(0x0a0a0a02),
+ };
+
+ tm = &test_main;
+
+ /* Find or create FIB table 11 */
+ fib_index = fib_table_find_or_create_and_lock(FIB_PROTOCOL_IP4, 11);
+
+ for (ii = 0; ii < 4; ii++)
+ {
+ ip4_main.fib_index_by_sw_if_index[tm->hw[ii]->sw_if_index] = fib_index;
+ }
+
+ fib_prefix_t pfx_0_0_0_0_s_0 = {
+ .fp_len = 0,
+ .fp_proto = FIB_PROTOCOL_IP4,
+ .fp_addr = {
+ .ip4 = {
+ {0}
+ },
+ },
+ };
+
+ fib_prefix_t pfx = {
+ .fp_len = 0,
+ .fp_proto = FIB_PROTOCOL_IP4,
+ .fp_addr = {
+ .ip4 = {
+ {0}
+ },
+ },
+ };
+
+ dpo_drop = drop_dpo_get(DPO_PROTO_IP4);
+
+ dfrt = fib_table_lookup(fib_index, &pfx_0_0_0_0_s_0);
+ FIB_TEST((FIB_NODE_INDEX_INVALID != dfrt), "default route present");
+ FIB_TEST(load_balance_is_drop(fib_entry_contribute_ip_forwarding(dfrt)),
+ "Default route is DROP");
+
+ pfx.fp_len = 32;
+ fei = fib_table_lookup(fib_index, &pfx);
+ FIB_TEST((FIB_NODE_INDEX_INVALID != fei), "all zeros route present");
+ FIB_TEST(load_balance_is_drop(fib_entry_contribute_ip_forwarding(fei)),
+ "all 0s route is DROP");
+
+ pfx.fp_addr.ip4.as_u32 = clib_host_to_net_u32(0xffffffff);
+ pfx.fp_len = 32;
+ fei = fib_table_lookup(fib_index, &pfx);
+ FIB_TEST((FIB_NODE_INDEX_INVALID != fei), "all ones route present");
+ FIB_TEST(load_balance_is_drop(fib_entry_contribute_ip_forwarding(fei)),
+ "all 1s route is DROP");
+
+ pfx.fp_addr.ip4.as_u32 = clib_host_to_net_u32(0xe0000000);
+ pfx.fp_len = 8;
+ fei = fib_table_lookup(fib_index, &pfx);
+ FIB_TEST((FIB_NODE_INDEX_INVALID != fei), "all-mcast route present");
+ FIB_TEST(load_balance_is_drop(fib_entry_contribute_ip_forwarding(fei)),
+ "all-mcast route is DROP");
+
+ pfx.fp_addr.ip4.as_u32 = clib_host_to_net_u32(0xf0000000);
+ pfx.fp_len = 8;
+ fei = fib_table_lookup(fib_index, &pfx);
+ FIB_TEST((FIB_NODE_INDEX_INVALID != fei), "class-e route present");
+ FIB_TEST(load_balance_is_drop(fib_entry_contribute_ip_forwarding(fei)),
+ "class-e route is DROP");
+
+ /*
+ * at this stage there are 5 entries in the test FIB (plus 5 in the default),
+ * all of which are special sourced and so none of which share path-lists.
+ * There are also 6 entries, and 6 non-shared path-lists, in the v6 default
+ * table
+ */
+#define NBR (5+5+6)
+ FIB_TEST((0 == fib_path_list_db_size()), "path list DB is empty");
+ FIB_TEST((NBR == fib_path_list_pool_size()), "path list pool size is %d",
+ fib_path_list_pool_size());
+ FIB_TEST((NBR == fib_entry_pool_size()), "entry pool size is %d",
+ fib_entry_pool_size());
+
+ /*
+ * add interface routes.
+ * validate presence of /24 attached and /32 recieve.
+ * test for the presence of the receive address in the glean and local adj
+ */
+ fib_prefix_t local_pfx = {
+ .fp_len = 24,
+ .fp_proto = FIB_PROTOCOL_IP4,
+ .fp_addr = {
+ .ip4 = {
+ .as_u32 = clib_host_to_net_u32(0x0a0a0a0a),
+ },
+ },
+ };
+
+ fib_table_entry_update_one_path(fib_index, &local_pfx,
+ FIB_SOURCE_INTERFACE,
+ (FIB_ENTRY_FLAG_CONNECTED |
+ FIB_ENTRY_FLAG_ATTACHED),
+ FIB_PROTOCOL_IP4,
+ NULL,
+ tm->hw[0]->sw_if_index,
+ ~0, // invalid fib index
+ 1, // weight
+ NULL,
+ FIB_ROUTE_PATH_FLAG_NONE);
+ fei = fib_table_lookup(fib_index, &local_pfx);
+ FIB_TEST((FIB_NODE_INDEX_INVALID != fei), "attached interface route present");
+ FIB_TEST(((FIB_ENTRY_FLAG_ATTACHED | FIB_ENTRY_FLAG_CONNECTED) ==
+ fib_entry_get_flags(fei)),
+ "Flags set on attached interface");
+
+ ai = fib_entry_get_adj(fei);
+ FIB_TEST((FIB_NODE_INDEX_INVALID != ai), "attached interface route adj present");
+ adj = adj_get(ai);
+ FIB_TEST((IP_LOOKUP_NEXT_GLEAN == adj->lookup_next_index),
+ "attached interface adj is glean");
+ FIB_TEST((0 == ip46_address_cmp(&local_pfx.fp_addr,
+ &adj->sub_type.glean.receive_addr)),
+ "attached interface adj is receive ok");
+
+ local_pfx.fp_len = 32;
+ fib_table_entry_update_one_path(fib_index, &local_pfx,
+ FIB_SOURCE_INTERFACE,
+ (FIB_ENTRY_FLAG_CONNECTED |
+ FIB_ENTRY_FLAG_LOCAL),
+ FIB_PROTOCOL_IP4,
+ NULL,
+ tm->hw[0]->sw_if_index,
+ ~0, // invalid fib index
+ 1, // weight
+ NULL,
+ FIB_ROUTE_PATH_FLAG_NONE);
+ fei = fib_table_lookup(fib_index, &local_pfx);
+ FIB_TEST(((FIB_ENTRY_FLAG_LOCAL | FIB_ENTRY_FLAG_CONNECTED) ==
+ fib_entry_get_flags(fei)),
+ "Flags set on local interface");
+
+ FIB_TEST((FIB_NODE_INDEX_INVALID != fei), "local interface route present");
+
+ dpo = fib_entry_contribute_ip_forwarding(fei);
+ FIB_TEST(fib_test_urpf_is_equal(fei, FIB_FORW_CHAIN_TYPE_UNICAST_IP4, 0),
+ "RPF list for local length 0");
+ dpo = load_balance_get_bucket(dpo->dpoi_index, 0);
+ FIB_TEST((DPO_RECEIVE == dpo->dpoi_type),
+ "local interface adj is local");
+ receive_dpo_t *rd = receive_dpo_get(dpo->dpoi_index);
+
+ FIB_TEST((0 == ip46_address_cmp(&local_pfx.fp_addr,
+ &rd->rd_addr)),
+ "local interface adj is receive ok");
+
+ FIB_TEST((2 == fib_table_get_num_entries(fib_index,
+ FIB_PROTOCOL_IP4,
+ FIB_SOURCE_INTERFACE)),
+ "2 Interface Source'd prefixes");
+
+ /*
+ * +2 interface routes +2 non-shared path-lists
+ */
+ FIB_TEST((0 == fib_path_list_db_size()), "path list DB is empty");
+ FIB_TEST((NBR+2 == fib_path_list_pool_size()), "path list pool size is%d",
+ fib_path_list_pool_size());
+ FIB_TEST((NBR+2 == fib_entry_pool_size()), "entry pool size is %d",
+ fib_entry_pool_size());
+
+ /*
+ * Modify the default route to be via an adj not yet known.
+ * this sources the defalut route with the API source, which is
+ * a higher preference to the DEFAULT_ROUTE source
+ */
+ pfx.fp_addr.ip4.as_u32 = 0;
+ pfx.fp_len = 0;
+ fib_table_entry_path_add(fib_index, &pfx,
+ FIB_SOURCE_API,
+ FIB_ENTRY_FLAG_NONE,
+ FIB_PROTOCOL_IP4,
+ &nh_10_10_10_1,
+ tm->hw[0]->sw_if_index,
+ ~0, // invalid fib index
+ 1,
+ NULL,
+ FIB_ROUTE_PATH_FLAG_NONE);
+ fei = fib_table_lookup(fib_index, &pfx);
+ FIB_TEST((FIB_ENTRY_FLAG_NONE == fib_entry_get_flags(fei)),
+ "Flags set on API route");
+
+ FIB_TEST((fei == dfrt), "default route same index");
+ ai = fib_entry_get_adj(fei);
+ FIB_TEST((FIB_NODE_INDEX_INVALID != ai), "default route adj present");
+ adj = adj_get(ai);
+ FIB_TEST((IP_LOOKUP_NEXT_ARP == adj->lookup_next_index),
+ "adj is incomplete");
+ FIB_TEST((0 == ip46_address_cmp(&nh_10_10_10_1, &adj->sub_type.nbr.next_hop)),
+ "adj nbr next-hop ok");
+ FIB_TEST((1 == fib_table_get_num_entries(fib_index,
+ FIB_PROTOCOL_IP4,
+ FIB_SOURCE_API)),
+ "1 API Source'd prefixes");
+
+ /*
+ * find the adj in the shared db
+ */
+ locked_ai = adj_nbr_add_or_lock(FIB_PROTOCOL_IP4,
+ VNET_LINK_IP4,
+ &nh_10_10_10_1,
+ tm->hw[0]->sw_if_index);
+ FIB_TEST((locked_ai == ai), "ADJ NBR DB find");
+ adj_unlock(locked_ai);
+
+ /*
+ * +1 shared path-list
+ */
+ FIB_TEST((1 == fib_path_list_db_size()), "path list DB population:%d",
+ fib_path_list_db_size());
+ FIB_TEST((NBR+3 == fib_path_list_pool_size()), "path list pool size is%d",
+ fib_path_list_pool_size());
+ FIB_TEST((NBR+2 == fib_entry_pool_size()), "entry pool size is %d",
+ fib_entry_pool_size());
+
+ /*
+ * remove the API source from the default route. We expected
+ * the route to remain, sourced by DEFAULT_ROUTE, and hence a DROP
+ */
+ pfx.fp_addr.ip4.as_u32 = 0;
+ pfx.fp_len = 0;
+ fib_table_entry_path_remove(fib_index, &pfx,
+ FIB_SOURCE_API,
+ FIB_PROTOCOL_IP4,
+ &nh_10_10_10_1,
+ tm->hw[0]->sw_if_index,
+ ~0, // non-recursive path, so no FIB index
+ 1,
+ FIB_ROUTE_PATH_FLAG_NONE);
+
+ fei = fib_table_lookup(fib_index, &pfx);
+
+ FIB_TEST((fei == dfrt), "default route same index");
+ FIB_TEST(load_balance_is_drop(fib_entry_contribute_ip_forwarding(fei)),
+ "Default route is DROP");
+
+ /*
+ * -1 shared-path-list
+ */
+ FIB_TEST((0 == fib_path_list_db_size()), "path list DB is empty");
+ FIB_TEST((NBR+2 == fib_path_list_pool_size()), "path list pool size is%d",
+ fib_path_list_pool_size());
+ FIB_TEST((NBR+2 == fib_entry_pool_size()), "entry pool size is %d",
+ fib_entry_pool_size());
+
+ /*
+ * Add an 2 ARP entry => a complete ADJ plus adj-fib.
+ */
+ fib_prefix_t pfx_10_10_10_1_s_32 = {
+ .fp_len = 32,
+ .fp_proto = FIB_PROTOCOL_IP4,
+ .fp_addr = {
+ /* 10.10.10.1 */
+ .ip4.as_u32 = clib_host_to_net_u32(0x0a0a0a01),
+ },
+ };
+ fib_prefix_t pfx_10_10_10_2_s_32 = {
+ .fp_len = 32,
+ .fp_proto = FIB_PROTOCOL_IP4,
+ .fp_addr = {
+ /* 10.10.10.2 */
+ .ip4.as_u32 = clib_host_to_net_u32(0x0a0a0a02),
+ },
+ };
+ fib_prefix_t pfx_11_11_11_11_s_32 = {
+ .fp_len = 32,
+ .fp_proto = FIB_PROTOCOL_IP4,
+ .fp_addr = {
+ /* 11.11.11.11 */
+ .ip4.as_u32 = clib_host_to_net_u32(0x0b0b0b0b),
+ },
+ };
+ u8 eth_addr[] = {
+ 0xde, 0xde, 0xde, 0xba, 0xba, 0xba,
+ };
+
+ ip46_address_t nh_12_12_12_12 = {
+ .ip4.as_u32 = clib_host_to_net_u32(0x0c0c0c0c),
+ };
+ adj_index_t ai_12_12_12_12;
+
+ /*
+ * Add a route via an incomplete ADJ. then complete the ADJ
+ * Expect the route LB is updated to use complete adj type.
+ */
+ fei = fib_table_entry_update_one_path(fib_index,
+ &pfx_11_11_11_11_s_32,
+ FIB_SOURCE_API,
+ FIB_ENTRY_FLAG_ATTACHED,
+ FIB_PROTOCOL_IP4,
+ &pfx_10_10_10_1_s_32.fp_addr,
+ tm->hw[0]->sw_if_index,
+ ~0, // invalid fib index
+ 1,
+ NULL,
+ FIB_ROUTE_PATH_FLAG_NONE);
+
+ dpo = fib_entry_contribute_ip_forwarding(fei);
+ dpo1 = load_balance_get_bucket(dpo->dpoi_index, 0);
+ FIB_TEST(DPO_ADJACENCY_INCOMPLETE == dpo1->dpoi_type,
+ "11.11.11.11/32 via incomplete adj");
+
+ ai_01 = adj_nbr_add_or_lock(FIB_PROTOCOL_IP4,
+ VNET_LINK_IP4,
+ &pfx_10_10_10_1_s_32.fp_addr,
+ tm->hw[0]->sw_if_index);
+ FIB_TEST((FIB_NODE_INDEX_INVALID != ai_01), "adj created");
+ adj = adj_get(ai_01);
+ FIB_TEST((IP_LOOKUP_NEXT_ARP == adj->lookup_next_index),
+ "adj is incomplete");
+ FIB_TEST((0 == ip46_address_cmp(&pfx_10_10_10_1_s_32.fp_addr,
+ &adj->sub_type.nbr.next_hop)),
+ "adj nbr next-hop ok");
+
+ adj_nbr_update_rewrite(ai_01, ADJ_NBR_REWRITE_FLAG_COMPLETE,
+ fib_test_build_rewrite(eth_addr));
+ FIB_TEST((IP_LOOKUP_NEXT_REWRITE == adj->lookup_next_index),
+ "adj is complete");
+ FIB_TEST((0 == ip46_address_cmp(&pfx_10_10_10_1_s_32.fp_addr,
+ &adj->sub_type.nbr.next_hop)),
+ "adj nbr next-hop ok");
+ ai = fib_entry_get_adj(fei);
+ FIB_TEST((ai_01 == ai), "ADJ-FIB resolves via adj");
+
+ dpo = fib_entry_contribute_ip_forwarding(fei);
+ dpo1 = load_balance_get_bucket(dpo->dpoi_index, 0);
+ FIB_TEST(DPO_ADJACENCY == dpo1->dpoi_type,
+ "11.11.11.11/32 via complete adj");
+ FIB_TEST(fib_test_urpf_is_equal(fei, FIB_FORW_CHAIN_TYPE_UNICAST_IP4, 1,
+ tm->hw[0]->sw_if_index),
+ "RPF list for adj-fib contains adj");
+
+ ai_12_12_12_12 = adj_nbr_add_or_lock(FIB_PROTOCOL_IP4,
+ VNET_LINK_IP4,
+ &nh_12_12_12_12,
+ tm->hw[1]->sw_if_index);
+ FIB_TEST((FIB_NODE_INDEX_INVALID != ai_12_12_12_12), "adj created");
+ adj = adj_get(ai_12_12_12_12);
+ FIB_TEST((IP_LOOKUP_NEXT_ARP == adj->lookup_next_index),
+ "adj is incomplete");
+ FIB_TEST((0 == ip46_address_cmp(&nh_12_12_12_12,
+ &adj->sub_type.nbr.next_hop)),
+ "adj nbr next-hop ok");
+ adj_nbr_update_rewrite(ai_12_12_12_12, ADJ_NBR_REWRITE_FLAG_COMPLETE,
+ fib_test_build_rewrite(eth_addr));
+ FIB_TEST((IP_LOOKUP_NEXT_REWRITE == adj->lookup_next_index),
+ "adj is complete");
+
+ /*
+ * add the adj fib
+ */
+ fei = fib_table_entry_update_one_path(fib_index,
+ &pfx_10_10_10_1_s_32,
+ FIB_SOURCE_ADJ,
+ FIB_ENTRY_FLAG_ATTACHED,
+ FIB_PROTOCOL_IP4,
+ &pfx_10_10_10_1_s_32.fp_addr,
+ tm->hw[0]->sw_if_index,
+ ~0, // invalid fib index
+ 1,
+ NULL,
+ FIB_ROUTE_PATH_FLAG_NONE);
+ FIB_TEST((FIB_ENTRY_FLAG_ATTACHED == fib_entry_get_flags(fei)),
+ "Flags set on adj-fib");
+ ai = fib_entry_get_adj(fei);
+ FIB_TEST((ai_01 == ai), "ADJ-FIB resolves via adj");
+
+ fib_table_entry_path_remove(fib_index,
+ &pfx_11_11_11_11_s_32,
+ FIB_SOURCE_API,
+ FIB_PROTOCOL_IP4,
+ &pfx_10_10_10_1_s_32.fp_addr,
+ tm->hw[0]->sw_if_index,
+ ~0, // invalid fib index
+ 1,
+ FIB_ROUTE_PATH_FLAG_NONE);
+
+ eth_addr[5] = 0xb2;
+
+ ai_02 = adj_nbr_add_or_lock(FIB_PROTOCOL_IP4,
+ VNET_LINK_IP4,
+ &pfx_10_10_10_2_s_32.fp_addr,
+ tm->hw[0]->sw_if_index);
+ FIB_TEST((FIB_NODE_INDEX_INVALID != ai_02), "adj created");
+ adj = adj_get(ai_02);
+ FIB_TEST((IP_LOOKUP_NEXT_ARP == adj->lookup_next_index),
+ "adj is incomplete");
+ FIB_TEST((0 == ip46_address_cmp(&pfx_10_10_10_2_s_32.fp_addr,
+ &adj->sub_type.nbr.next_hop)),
+ "adj nbr next-hop ok");
+
+ adj_nbr_update_rewrite(ai_02, ADJ_NBR_REWRITE_FLAG_COMPLETE,
+ fib_test_build_rewrite(eth_addr));
+ FIB_TEST((IP_LOOKUP_NEXT_REWRITE == adj->lookup_next_index),
+ "adj is complete");
+ FIB_TEST((0 == ip46_address_cmp(&pfx_10_10_10_2_s_32.fp_addr,
+ &adj->sub_type.nbr.next_hop)),
+ "adj nbr next-hop ok");
+ FIB_TEST((ai_01 != ai_02), "ADJs are different");
+
+ fib_table_entry_update_one_path(fib_index,
+ &pfx_10_10_10_2_s_32,
+ FIB_SOURCE_ADJ,
+ FIB_ENTRY_FLAG_ATTACHED,
+ FIB_PROTOCOL_IP4,
+ &pfx_10_10_10_2_s_32.fp_addr,
+ tm->hw[0]->sw_if_index,
+ ~0, // invalid fib index
+ 1,
+ NULL,
+ FIB_ROUTE_PATH_FLAG_NONE);
+
+ fei = fib_table_lookup(fib_index, &pfx_10_10_10_2_s_32);
+ ai = fib_entry_get_adj(fei);
+ FIB_TEST((ai_02 == ai), "ADJ-FIB resolves via adj");
+
+ /*
+ * +2 adj-fibs, and their non-shared path-lists
+ */
+ FIB_TEST((0 == fib_path_list_db_size()), "path list DB is empty");
+ FIB_TEST((NBR+4 == fib_path_list_pool_size()), "path list pool size is %d",
+ fib_path_list_pool_size());
+ FIB_TEST((NBR+4 == fib_entry_pool_size()), "entry pool size is %d",
+ fib_entry_pool_size());
+
+ /*
+ * Add 2 routes via the first ADJ. ensure path-list sharing
+ */
+ fib_prefix_t pfx_1_1_1_1_s_32 = {
+ .fp_len = 32,
+ .fp_proto = FIB_PROTOCOL_IP4,
+ .fp_addr = {
+ /* 1.1.1.1/32 */
+ .ip4.as_u32 = clib_host_to_net_u32(0x01010101),
+ },
+ };
+
+ fib_table_entry_path_add(fib_index,
+ &pfx_1_1_1_1_s_32,
+ FIB_SOURCE_API,
+ FIB_ENTRY_FLAG_NONE,
+ FIB_PROTOCOL_IP4,
+ &nh_10_10_10_1,
+ tm->hw[0]->sw_if_index,
+ ~0, // invalid fib index
+ 1,
+ NULL,
+ FIB_ROUTE_PATH_FLAG_NONE);
+ fei = fib_table_lookup(fib_index, &pfx_1_1_1_1_s_32);
+ ai = fib_entry_get_adj(fei);
+ FIB_TEST((ai_01 == ai), "1.1.1.1 resolves via 10.10.10.1");
+
+ /*
+ * +1 entry and a shared path-list
+ */
+ FIB_TEST((1 == fib_path_list_db_size()), "path list DB is empty");
+ FIB_TEST((NBR+5 == fib_path_list_pool_size()), "path list pool size is %d",
+ fib_path_list_pool_size());
+ FIB_TEST((NBR+5 == fib_entry_pool_size()), "entry pool size is %d",
+ fib_entry_pool_size());
+
+ /* 1.1.2.0/24 */
+ fib_prefix_t pfx_1_1_2_0_s_24 = {
+ .fp_len = 24,
+ .fp_proto = FIB_PROTOCOL_IP4,
+ .fp_addr = {
+ .ip4.as_u32 = clib_host_to_net_u32(0x01010200),
+ }
+ };
+
+ fib_table_entry_path_add(fib_index,
+ &pfx_1_1_2_0_s_24,
+ FIB_SOURCE_API,
+ FIB_ENTRY_FLAG_NONE,
+ FIB_PROTOCOL_IP4,
+ &nh_10_10_10_1,
+ tm->hw[0]->sw_if_index,
+ ~0, // invalid fib index
+ 1,
+ NULL,
+ FIB_ROUTE_PATH_FLAG_NONE);
+ fei = fib_table_lookup(fib_index, &pfx_1_1_2_0_s_24);
+ ai = fib_entry_get_adj(fei);
+ FIB_TEST((ai_01 == ai), "1.1.2.0/24 resolves via 10.10.10.1");
+
+ /*
+ * +1 entry only
+ */
+ FIB_TEST((1 == fib_path_list_db_size()), "path list DB is empty");
+ FIB_TEST((NBR+5 == fib_path_list_pool_size()), "path list pool size is %d",
+ fib_path_list_pool_size());
+ FIB_TEST((NBR+6 == fib_entry_pool_size()), "entry pool size is %d",
+ fib_entry_pool_size());
+
+ /*
+ * modify 1.1.2.0/24 to use multipath.
+ */
+ fib_table_entry_path_add(fib_index,
+ &pfx_1_1_2_0_s_24,
+ FIB_SOURCE_API,
+ FIB_ENTRY_FLAG_NONE,
+ FIB_PROTOCOL_IP4,
+ &nh_10_10_10_2,
+ tm->hw[0]->sw_if_index,
+ ~0, // invalid fib index
+ 1,
+ NULL,
+ FIB_ROUTE_PATH_FLAG_NONE);
+ fei = fib_table_lookup(fib_index, &pfx_1_1_2_0_s_24);
+ dpo = fib_entry_contribute_ip_forwarding(fei);
+ FIB_TEST(fib_test_urpf_is_equal(fei, FIB_FORW_CHAIN_TYPE_UNICAST_IP4,
+ 1, tm->hw[0]->sw_if_index),
+ "RPF list for 1.1.2.0/24 contains both adjs");
+
+ dpo1 = load_balance_get_bucket(dpo->dpoi_index, 0);
+ FIB_TEST(DPO_ADJACENCY == dpo1->dpoi_type, "type is %d", dpo1->dpoi_type);
+ FIB_TEST((ai_01 == dpo1->dpoi_index),
+ "1.1.2.0/24 bucket 0 resolves via 10.10.10.1 (%d=%d)",
+ ai_01, dpo1->dpoi_index);
+
+ dpo1 = load_balance_get_bucket(dpo->dpoi_index, 1);
+ FIB_TEST(DPO_ADJACENCY == dpo1->dpoi_type, "type is %d", dpo1->dpoi_type);
+ FIB_TEST((ai_02 == dpo1->dpoi_index),
+ "1.1.2.0/24 bucket 1 resolves via 10.10.10.2");
+
+ /*
+ * +1 shared-pathlist
+ */
+ FIB_TEST((2 == fib_path_list_db_size()), "path list DB is empty");
+ FIB_TEST((NBR+6 == fib_path_list_pool_size()), "path list pool size is %d",
+ fib_path_list_pool_size());
+ FIB_TEST((NBR+6 == fib_entry_pool_size()), "entry pool size is %d",
+ fib_entry_pool_size());
+
+ /*
+ * revert the modify
+ */
+ fib_table_entry_path_remove(fib_index,
+ &pfx_1_1_2_0_s_24,
+ FIB_SOURCE_API,
+ FIB_PROTOCOL_IP4,
+ &nh_10_10_10_2,
+ tm->hw[0]->sw_if_index,
+ ~0,
+ 1,
+ FIB_ROUTE_PATH_FLAG_NONE);
+ fei = fib_table_lookup(fib_index, &pfx_1_1_2_0_s_24);
+ dpo = fib_entry_contribute_ip_forwarding(fei);
+ FIB_TEST(fib_test_urpf_is_equal(fei, FIB_FORW_CHAIN_TYPE_UNICAST_IP4,
+ 1, tm->hw[0]->sw_if_index),
+ "RPF list for 1.1.2.0/24 contains one adj");
+
+ ai = fib_entry_get_adj(fei);
+ FIB_TEST((ai_01 == ai), "1.1.2.0/24 resolves via 10.10.10.1");
+
+ /*
+ * +1 shared-pathlist
+ */
+ FIB_TEST((1 == fib_path_list_db_size()), "path list DB is %d",
+ fib_path_list_db_size());
+ FIB_TEST((NBR+5 == fib_path_list_pool_size()), "path list pool size is %d",
+ fib_path_list_pool_size());
+ FIB_TEST((NBR+6 == fib_entry_pool_size()), "entry pool size is %d",
+ fib_entry_pool_size());
+
+ /*
+ * Add 2 recursive routes:
+ * 100.100.100.100/32 via 1.1.1.1/32 => the via entry is installed.
+ * 100.100.100.101/32 via 1.1.1.1/32 => the via entry is installed.
+ */
+ fib_prefix_t bgp_100_pfx = {
+ .fp_len = 32,
+ .fp_proto = FIB_PROTOCOL_IP4,
+ .fp_addr = {
+ /* 100.100.100.100/32 */
+ .ip4.as_u32 = clib_host_to_net_u32(0x64646464),
+ },
+ };
+ /* via 1.1.1.1 */
+ ip46_address_t nh_1_1_1_1 = {
+ .ip4.as_u32 = clib_host_to_net_u32(0x01010101),
+ };
+
+ fei = fib_table_entry_path_add(fib_index,
+ &bgp_100_pfx,
+ FIB_SOURCE_API,
+ FIB_ENTRY_FLAG_NONE,
+ FIB_PROTOCOL_IP4,
+ &nh_1_1_1_1,
+ ~0, // no index provided.
+ fib_index, // nexthop in same fib as route
+ 1,
+ NULL,
+ FIB_ROUTE_PATH_FLAG_NONE);
+
+ FIB_TEST_REC_FORW(&bgp_100_pfx, &pfx_1_1_1_1_s_32, 0);
+ FIB_TEST(fib_test_urpf_is_equal(fei, FIB_FORW_CHAIN_TYPE_UNICAST_IP4, 1,
+ tm->hw[0]->sw_if_index),
+ "RPF list for adj-fib contains adj");
+
+ /*
+ * +1 entry and +1 shared-path-list
+ */
+ FIB_TEST((2 == fib_path_list_db_size()), "path list DB population:%d",
+ fib_path_list_db_size());
+ FIB_TEST((NBR+6 == fib_path_list_pool_size()), "path list pool size is %d",
+ fib_path_list_pool_size());
+ FIB_TEST((NBR+7 == fib_entry_pool_size()), "entry pool size is %d",
+ fib_entry_pool_size());
+
+ fib_prefix_t bgp_101_pfx = {
+ .fp_len = 32,
+ .fp_proto = FIB_PROTOCOL_IP4,
+ .fp_addr = {
+ /* 100.100.100.101/32 */
+ .ip4.as_u32 = clib_host_to_net_u32(0x64646465),
+ },
+ };
+
+ fib_table_entry_path_add(fib_index,
+ &bgp_101_pfx,
+ FIB_SOURCE_API,
+ FIB_ENTRY_FLAG_NONE,
+ FIB_PROTOCOL_IP4,
+ &nh_1_1_1_1,
+ ~0, // no index provided.
+ fib_index, // nexthop in same fib as route
+ 1,
+ NULL,
+ FIB_ROUTE_PATH_FLAG_NONE);
+
+ FIB_TEST_REC_FORW(&bgp_101_pfx, &pfx_1_1_1_1_s_32, 0);
+ FIB_TEST(fib_test_urpf_is_equal(fei, FIB_FORW_CHAIN_TYPE_UNICAST_IP4, 1,
+ tm->hw[0]->sw_if_index),
+ "RPF list for adj-fib contains adj");
+
+ /*
+ * +1 entry, but the recursive path-list is shared.
+ */
+ FIB_TEST((2 == fib_path_list_db_size()), "path list DB population:%d",
+ fib_path_list_db_size());
+ FIB_TEST((NBR+6 == fib_path_list_pool_size()), "path list pool size is %d",
+ fib_path_list_pool_size());
+ FIB_TEST((NBR+8 == fib_entry_pool_size()), "entry pool size is %d",
+ fib_entry_pool_size());
+
+ /*
+ * An EXCLUSIVE route; one where the user (me) provides the exclusive
+ * adjacency through which the route will resovle
+ */
+ fib_prefix_t ex_pfx = {
+ .fp_len = 32,
+ .fp_proto = FIB_PROTOCOL_IP4,
+ .fp_addr = {
+ /* 4.4.4.4/32 */
+ .ip4.as_u32 = clib_host_to_net_u32(0x04040404),
+ },
+ };
+
+ fib_table_entry_special_add(fib_index,
+ &ex_pfx,
+ FIB_SOURCE_SPECIAL,
+ FIB_ENTRY_FLAG_EXCLUSIVE,
+ locked_ai);
+ fei = fib_table_lookup_exact_match(fib_index, &ex_pfx);
+ FIB_TEST((ai == fib_entry_get_adj(fei)),
+ "Exclusive route links to user adj");
+
+ fib_table_entry_special_remove(fib_index,
+ &ex_pfx,
+ FIB_SOURCE_SPECIAL);
+ FIB_TEST(FIB_NODE_INDEX_INVALID ==
+ fib_table_lookup_exact_match(fib_index, &ex_pfx),
+ "Exclusive reoute removed");
+
+ /*
+ * An EXCLUSIVE route; one where the user (me) provides the exclusive
+ * adjacency through which the route will resovle
+ */
+ dpo_id_t ex_dpo = DPO_INVALID;
+
+ lookup_dpo_add_or_lock_w_fib_index(fib_index,
+ DPO_PROTO_IP4,
+ LOOKUP_INPUT_DST_ADDR,
+ LOOKUP_TABLE_FROM_CONFIG,
+ &ex_dpo);
+
+ fib_table_entry_special_dpo_add(fib_index,
+ &ex_pfx,
+ FIB_SOURCE_SPECIAL,
+ FIB_ENTRY_FLAG_EXCLUSIVE,
+ &ex_dpo);
+ fei = fib_table_lookup_exact_match(fib_index, &ex_pfx);
+ dpo = fib_entry_contribute_ip_forwarding(fei);
+ FIB_TEST(!dpo_cmp(&ex_dpo, load_balance_get_bucket(dpo->dpoi_index, 0)),
+ "exclusive remote uses lookup DPO");
+
+ /*
+ * update the exclusive to use a different DPO
+ */
+ ip_null_dpo_add_and_lock(DPO_PROTO_IP4,
+ IP_NULL_ACTION_SEND_ICMP_UNREACH,
+ &ex_dpo);
+ fib_table_entry_special_dpo_update(fib_index,
+ &ex_pfx,
+ FIB_SOURCE_SPECIAL,
+ FIB_ENTRY_FLAG_EXCLUSIVE,
+ &ex_dpo);
+ dpo = fib_entry_contribute_ip_forwarding(fei);
+ FIB_TEST(!dpo_cmp(&ex_dpo, load_balance_get_bucket(dpo->dpoi_index, 0)),
+ "exclusive remote uses now uses NULL DPO");
+
+ fib_table_entry_special_remove(fib_index,
+ &ex_pfx,
+ FIB_SOURCE_SPECIAL);
+ FIB_TEST(FIB_NODE_INDEX_INVALID ==
+ fib_table_lookup_exact_match(fib_index, &ex_pfx),
+ "Exclusive reoute removed");
+ dpo_reset(&ex_dpo);
+
+ /*
+ * Add a recursive route:
+ * 200.200.200.200/32 via 1.1.1.2/32 => the via entry is NOT installed.
+ */
+ fib_prefix_t bgp_200_pfx = {
+ .fp_len = 32,
+ .fp_proto = FIB_PROTOCOL_IP4,
+ .fp_addr = {
+ /* 200.200.200.200/32 */
+ .ip4.as_u32 = clib_host_to_net_u32(0xc8c8c8c8),
+ },
+ };
+ /* via 1.1.1.2 */
+ fib_prefix_t pfx_1_1_1_2_s_32 = {
+ .fp_len = 32,
+ .fp_proto = FIB_PROTOCOL_IP4,
+ .fp_addr = {
+ .ip4.as_u32 = clib_host_to_net_u32(0x01010102),
+ },
+ };
+
+ fib_table_entry_path_add(fib_index,
+ &bgp_200_pfx,
+ FIB_SOURCE_API,
+ FIB_ENTRY_FLAG_NONE,
+ FIB_PROTOCOL_IP4,
+ &pfx_1_1_1_2_s_32.fp_addr,
+ ~0, // no index provided.
+ fib_index, // nexthop in same fib as route
+ 1,
+ NULL,
+ FIB_ROUTE_PATH_FLAG_NONE);
+
+ FIB_TEST_REC_FORW(&bgp_200_pfx, &pfx_1_1_1_2_s_32, 0);
+
+ /*
+ * the adj should be recursive via drop, since the route resolves via
+ * the default route, which is itself a DROP
+ */
+ fei = fib_table_lookup(fib_index, &pfx_1_1_1_2_s_32);
+ dpo1 = fib_entry_contribute_ip_forwarding(fei);
+ FIB_TEST(load_balance_is_drop(dpo1), "1.1.1.2/32 is drop");
+ FIB_TEST(fib_test_urpf_is_equal(fei, FIB_FORW_CHAIN_TYPE_UNICAST_IP4, 0),
+ "RPF list for 1.1.1.2/32 contains 0 adjs");
+
+ /*
+ * +2 entry and +1 shared-path-list
+ */
+ FIB_TEST((3 == fib_path_list_db_size()), "path list DB population:%d",
+ fib_path_list_db_size());
+ FIB_TEST((NBR+7 == fib_path_list_pool_size()), "path list pool size is %d",
+ fib_path_list_pool_size());
+ FIB_TEST((NBR+10 == fib_entry_pool_size()), "entry pool size is %d",
+ fib_entry_pool_size());
+
+ /*
+ * Unequal Cost load-balance. 3:1 ratio. fits in a 4 bucket LB
+ * The paths are sort by NH first. in this case the the path with greater
+ * weight is first in the set. This ordering is to test the RPF sort|uniq logic
+ */
+ fib_prefix_t pfx_1_2_3_4_s_32 = {
+ .fp_len = 32,
+ .fp_proto = FIB_PROTOCOL_IP4,
+ .fp_addr = {
+ .ip4.as_u32 = clib_host_to_net_u32(0x01020304),
+ },
+ };
+ fib_table_entry_path_add(fib_index,
+ &pfx_1_2_3_4_s_32,
+ FIB_SOURCE_API,
+ FIB_ENTRY_FLAG_NONE,
+ FIB_PROTOCOL_IP4,
+ &nh_10_10_10_1,
+ tm->hw[0]->sw_if_index,
+ ~0,
+ 1,
+ NULL,
+ FIB_ROUTE_PATH_FLAG_NONE);
+ fei = fib_table_entry_path_add(fib_index,
+ &pfx_1_2_3_4_s_32,
+ FIB_SOURCE_API,
+ FIB_ENTRY_FLAG_NONE,
+ FIB_PROTOCOL_IP4,
+ &nh_12_12_12_12,
+ tm->hw[1]->sw_if_index,
+ ~0,
+ 3,
+ NULL,
+ FIB_ROUTE_PATH_FLAG_NONE);
+
+ FIB_TEST((FIB_NODE_INDEX_INVALID != fei), "1.2.3.4/32 presnet");
+ dpo = fib_entry_contribute_ip_forwarding(fei);
+ lb = load_balance_get(dpo->dpoi_index);
+ FIB_TEST((lb->lb_n_buckets == 4),
+ "1.2.3.4/32 LB has %d bucket",
+ lb->lb_n_buckets);
+
+ FIB_TEST_LB_BUCKET_VIA_ADJ(&pfx_1_2_3_4_s_32, 0, ai_12_12_12_12);
+ FIB_TEST_LB_BUCKET_VIA_ADJ(&pfx_1_2_3_4_s_32, 1, ai_12_12_12_12);
+ FIB_TEST_LB_BUCKET_VIA_ADJ(&pfx_1_2_3_4_s_32, 2, ai_12_12_12_12);
+ FIB_TEST_LB_BUCKET_VIA_ADJ(&pfx_1_2_3_4_s_32, 3, ai_01);
+
+ FIB_TEST(fib_test_urpf_is_equal(fei, FIB_FORW_CHAIN_TYPE_UNICAST_IP4, 2,
+ tm->hw[0]->sw_if_index,
+ tm->hw[1]->sw_if_index),
+ "RPF list for 1.2.3.4/32 contains both adjs");
+
+
+ /*
+ * Unequal Cost load-balance. 4:1 ratio.
+ * fits in a 16 bucket LB with ratio 13:3
+ */
+ fib_prefix_t pfx_1_2_3_5_s_32 = {
+ .fp_len = 32,
+ .fp_proto = FIB_PROTOCOL_IP4,
+ .fp_addr = {
+ .ip4.as_u32 = clib_host_to_net_u32(0x01020305),
+ },
+ };
+ fib_table_entry_path_add(fib_index,
+ &pfx_1_2_3_5_s_32,
+ FIB_SOURCE_API,
+ FIB_ENTRY_FLAG_NONE,
+ FIB_PROTOCOL_IP4,
+ &nh_12_12_12_12,
+ tm->hw[1]->sw_if_index,
+ ~0,
+ 1,
+ NULL,
+ FIB_ROUTE_PATH_FLAG_NONE);
+ fei = fib_table_entry_path_add(fib_index,
+ &pfx_1_2_3_5_s_32,
+ FIB_SOURCE_API,
+ FIB_ENTRY_FLAG_NONE,
+ FIB_PROTOCOL_IP4,
+ &nh_10_10_10_1,
+ tm->hw[0]->sw_if_index,
+ ~0,
+ 4,
+ NULL,
+ FIB_ROUTE_PATH_FLAG_NONE);
+
+ FIB_TEST((FIB_NODE_INDEX_INVALID != fei), "1.2.3.5/32 presnet");
+ dpo = fib_entry_contribute_ip_forwarding(fei);
+ lb = load_balance_get(dpo->dpoi_index);
+ FIB_TEST((lb->lb_n_buckets == 16),
+ "1.2.3.5/32 LB has %d bucket",
+ lb->lb_n_buckets);
+
+ FIB_TEST_LB_BUCKET_VIA_ADJ(&pfx_1_2_3_5_s_32, 0, ai_01);
+ FIB_TEST_LB_BUCKET_VIA_ADJ(&pfx_1_2_3_5_s_32, 1, ai_01);
+ FIB_TEST_LB_BUCKET_VIA_ADJ(&pfx_1_2_3_5_s_32, 2, ai_01);
+ FIB_TEST_LB_BUCKET_VIA_ADJ(&pfx_1_2_3_5_s_32, 3, ai_01);
+ FIB_TEST_LB_BUCKET_VIA_ADJ(&pfx_1_2_3_5_s_32, 4, ai_01);
+ FIB_TEST_LB_BUCKET_VIA_ADJ(&pfx_1_2_3_5_s_32, 5, ai_01);
+ FIB_TEST_LB_BUCKET_VIA_ADJ(&pfx_1_2_3_5_s_32, 6, ai_01);
+ FIB_TEST_LB_BUCKET_VIA_ADJ(&pfx_1_2_3_5_s_32, 7, ai_01);
+ FIB_TEST_LB_BUCKET_VIA_ADJ(&pfx_1_2_3_5_s_32, 8, ai_01);
+ FIB_TEST_LB_BUCKET_VIA_ADJ(&pfx_1_2_3_5_s_32, 9, ai_01);
+ FIB_TEST_LB_BUCKET_VIA_ADJ(&pfx_1_2_3_5_s_32, 10, ai_01);
+ FIB_TEST_LB_BUCKET_VIA_ADJ(&pfx_1_2_3_5_s_32, 11, ai_01);
+ FIB_TEST_LB_BUCKET_VIA_ADJ(&pfx_1_2_3_5_s_32, 12, ai_01);
+ FIB_TEST_LB_BUCKET_VIA_ADJ(&pfx_1_2_3_5_s_32, 13, ai_12_12_12_12);
+ FIB_TEST_LB_BUCKET_VIA_ADJ(&pfx_1_2_3_5_s_32, 14, ai_12_12_12_12);
+ FIB_TEST_LB_BUCKET_VIA_ADJ(&pfx_1_2_3_5_s_32, 15, ai_12_12_12_12);
+
+ FIB_TEST(fib_test_urpf_is_equal(fei, FIB_FORW_CHAIN_TYPE_UNICAST_IP4, 2,
+ tm->hw[0]->sw_if_index,
+ tm->hw[1]->sw_if_index),
+ "RPF list for 1.2.3.4/32 contains both adjs");
+
+ /*
+ * Test UCMP with a large weight skew - this produces load-balance objects with large
+ * numbers of buckets to accommodate the skew. By updating said load-balances we are
+ * laso testing the LB in placce modify code when number of buckets is large.
+ */
+ fib_prefix_t pfx_6_6_6_6_s_32 = {
+ .fp_len = 32,
+ .fp_proto = FIB_PROTOCOL_IP4,
+ .fp_addr = {
+ /* 1.1.1.1/32 */
+ .ip4.as_u32 = clib_host_to_net_u32(0x06060606),
+ },
+ };
+ fib_test_lb_bucket_t ip_6_6_6_6_o_10_10_10_1 = {
+ .type = FT_LB_ADJ,
+ .adj = {
+ .adj = ai_01,
+ },
+ };
+ fib_test_lb_bucket_t ip_6_6_6_6_o_10_10_10_2 = {
+ .type = FT_LB_ADJ,
+ .adj = {
+ .adj = ai_02,
+ },
+ };
+ fib_test_lb_bucket_t ip_6_6_6_6_o_12_12_12_12 = {
+ .type = FT_LB_ADJ,
+ .adj = {
+ .adj = ai_12_12_12_12,
+ },
+ };
+ fib_table_entry_update_one_path(fib_index,
+ &pfx_6_6_6_6_s_32,
+ FIB_SOURCE_API,
+ FIB_ENTRY_FLAG_NONE,
+ FIB_PROTOCOL_IP4,
+ &nh_10_10_10_1,
+ tm->hw[0]->sw_if_index,
+ ~0, // invalid fib index
+ 0, // zero weigth
+ NULL,
+ FIB_ROUTE_PATH_FLAG_NONE);
+
+ fei = fib_table_lookup(fib_index, &pfx_6_6_6_6_s_32);
+ FIB_TEST(fib_test_validate_entry(fei,
+ FIB_FORW_CHAIN_TYPE_UNICAST_IP4,
+ 1,
+ &ip_6_6_6_6_o_10_10_10_1),
+ "6.6.6.6/32 via 10.10.10.1");
+
+ fib_table_entry_path_add(fib_index,
+ &pfx_6_6_6_6_s_32,
+ FIB_SOURCE_API,
+ FIB_ENTRY_FLAG_NONE,
+ FIB_PROTOCOL_IP4,
+ &nh_10_10_10_2,
+ tm->hw[0]->sw_if_index,
+ ~0, // invalid fib index
+ 100,
+ NULL,
+ FIB_ROUTE_PATH_FLAG_NONE);
+
+ fei = fib_table_lookup(fib_index, &pfx_6_6_6_6_s_32);
+ FIB_TEST(fib_test_validate_entry(fei,
+ FIB_FORW_CHAIN_TYPE_UNICAST_IP4,
+ 64,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_1),
+ "6.6.6.6/32 via 10.10.10.1 and 10.10.10.2 in 63:1 ratio");
+
+ fib_table_entry_path_add(fib_index,
+ &pfx_6_6_6_6_s_32,
+ FIB_SOURCE_API,
+ FIB_ENTRY_FLAG_NONE,
+ FIB_PROTOCOL_IP4,
+ &nh_12_12_12_12,
+ tm->hw[1]->sw_if_index,
+ ~0, // invalid fib index
+ 100,
+ NULL,
+ FIB_ROUTE_PATH_FLAG_NONE);
+
+ fei = fib_table_lookup(fib_index, &pfx_6_6_6_6_s_32);
+ FIB_TEST(fib_test_validate_entry(fei,
+ FIB_FORW_CHAIN_TYPE_UNICAST_IP4,
+ 128,
+ &ip_6_6_6_6_o_10_10_10_1,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_12_12_12_12,
+ &ip_6_6_6_6_o_12_12_12_12,
+ &ip_6_6_6_6_o_12_12_12_12,
+ &ip_6_6_6_6_o_12_12_12_12,
+ &ip_6_6_6_6_o_12_12_12_12,
+ &ip_6_6_6_6_o_12_12_12_12,
+ &ip_6_6_6_6_o_12_12_12_12,
+ &ip_6_6_6_6_o_12_12_12_12,
+ &ip_6_6_6_6_o_12_12_12_12,
+ &ip_6_6_6_6_o_12_12_12_12,
+ &ip_6_6_6_6_o_12_12_12_12,
+ &ip_6_6_6_6_o_12_12_12_12,
+ &ip_6_6_6_6_o_12_12_12_12,
+ &ip_6_6_6_6_o_12_12_12_12,
+ &ip_6_6_6_6_o_12_12_12_12,
+ &ip_6_6_6_6_o_12_12_12_12,
+ &ip_6_6_6_6_o_12_12_12_12,
+ &ip_6_6_6_6_o_12_12_12_12,
+ &ip_6_6_6_6_o_12_12_12_12,
+ &ip_6_6_6_6_o_12_12_12_12,
+ &ip_6_6_6_6_o_12_12_12_12,
+ &ip_6_6_6_6_o_12_12_12_12,
+ &ip_6_6_6_6_o_12_12_12_12,
+ &ip_6_6_6_6_o_12_12_12_12,
+ &ip_6_6_6_6_o_12_12_12_12,
+ &ip_6_6_6_6_o_12_12_12_12,
+ &ip_6_6_6_6_o_12_12_12_12,
+ &ip_6_6_6_6_o_12_12_12_12,
+ &ip_6_6_6_6_o_12_12_12_12,
+ &ip_6_6_6_6_o_12_12_12_12,
+ &ip_6_6_6_6_o_12_12_12_12,
+ &ip_6_6_6_6_o_12_12_12_12,
+ &ip_6_6_6_6_o_12_12_12_12,
+ &ip_6_6_6_6_o_12_12_12_12,
+ &ip_6_6_6_6_o_12_12_12_12,
+ &ip_6_6_6_6_o_12_12_12_12,
+ &ip_6_6_6_6_o_12_12_12_12,
+ &ip_6_6_6_6_o_12_12_12_12,
+ &ip_6_6_6_6_o_12_12_12_12,
+ &ip_6_6_6_6_o_12_12_12_12,
+ &ip_6_6_6_6_o_12_12_12_12,
+ &ip_6_6_6_6_o_12_12_12_12,
+ &ip_6_6_6_6_o_12_12_12_12,
+ &ip_6_6_6_6_o_12_12_12_12,
+ &ip_6_6_6_6_o_12_12_12_12,
+ &ip_6_6_6_6_o_12_12_12_12,
+ &ip_6_6_6_6_o_12_12_12_12,
+ &ip_6_6_6_6_o_12_12_12_12,
+ &ip_6_6_6_6_o_12_12_12_12,
+ &ip_6_6_6_6_o_12_12_12_12,
+ &ip_6_6_6_6_o_12_12_12_12,
+ &ip_6_6_6_6_o_12_12_12_12,
+ &ip_6_6_6_6_o_12_12_12_12,
+ &ip_6_6_6_6_o_12_12_12_12,
+ &ip_6_6_6_6_o_12_12_12_12,
+ &ip_6_6_6_6_o_12_12_12_12,
+ &ip_6_6_6_6_o_12_12_12_12,
+ &ip_6_6_6_6_o_12_12_12_12,
+ &ip_6_6_6_6_o_12_12_12_12,
+ &ip_6_6_6_6_o_12_12_12_12,
+ &ip_6_6_6_6_o_12_12_12_12,
+ &ip_6_6_6_6_o_12_12_12_12,
+ &ip_6_6_6_6_o_12_12_12_12),
+ "6.6.6.6/32 via 10.10.10.1 and 10.10.10.2 in 63:1 ratio");
+
+ fib_table_entry_path_remove(fib_index,
+ &pfx_6_6_6_6_s_32,
+ FIB_SOURCE_API,
+ FIB_PROTOCOL_IP4,
+ &nh_12_12_12_12,
+ tm->hw[1]->sw_if_index,
+ ~0, // invalid fib index
+ 100,
+ FIB_ROUTE_PATH_FLAG_NONE);
+
+ fei = fib_table_lookup(fib_index, &pfx_6_6_6_6_s_32);
+ FIB_TEST(fib_test_validate_entry(fei,
+ FIB_FORW_CHAIN_TYPE_UNICAST_IP4,
+ 64,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_2,
+ &ip_6_6_6_6_o_10_10_10_1),
+ "6.6.6.6/32 via 10.10.10.1 and 10.10.10.2 in 63:1 ratio");
+
+ fib_table_entry_path_remove(fib_index,
+ &pfx_6_6_6_6_s_32,
+ FIB_SOURCE_API,
+ FIB_PROTOCOL_IP4,
+ &nh_10_10_10_2,
+ tm->hw[0]->sw_if_index,
+ ~0, // invalid fib index
+ 100,
+ FIB_ROUTE_PATH_FLAG_NONE);
+
+ fei = fib_table_lookup(fib_index, &pfx_6_6_6_6_s_32);
+ FIB_TEST(fib_test_validate_entry(fei,
+ FIB_FORW_CHAIN_TYPE_UNICAST_IP4,
+ 1,
+ &ip_6_6_6_6_o_10_10_10_1),
+ "6.6.6.6/32 via 10.10.10.1");
+
+ fib_table_entry_delete(fib_index, &pfx_6_6_6_6_s_32, FIB_SOURCE_API);
+
+ /*
+ * A recursive via the two unequal cost entries
+ */
+ fib_prefix_t bgp_44_s_32 = {
+ .fp_len = 32,
+ .fp_proto = FIB_PROTOCOL_IP4,
+ .fp_addr = {
+ /* 200.200.200.201/32 */
+ .ip4.as_u32 = clib_host_to_net_u32(0x44444444),
+ },
+ };
+ fei = fib_table_entry_path_add(fib_index,
+ &bgp_44_s_32,
+ FIB_SOURCE_API,
+ FIB_ENTRY_FLAG_NONE,
+ FIB_PROTOCOL_IP4,
+ &pfx_1_2_3_4_s_32.fp_addr,
+ ~0,
+ fib_index,
+ 1,
+ NULL,
+ FIB_ROUTE_PATH_FLAG_NONE);
+ fei = fib_table_entry_path_add(fib_index,
+ &bgp_44_s_32,
+ FIB_SOURCE_API,
+ FIB_ENTRY_FLAG_NONE,
+ FIB_PROTOCOL_IP4,
+ &pfx_1_2_3_5_s_32.fp_addr,
+ ~0,
+ fib_index,
+ 1,
+ NULL,
+ FIB_ROUTE_PATH_FLAG_NONE);
+
+ FIB_TEST_REC_FORW(&bgp_44_s_32, &pfx_1_2_3_4_s_32, 0);
+ FIB_TEST_REC_FORW(&bgp_44_s_32, &pfx_1_2_3_5_s_32, 1);
+ FIB_TEST(fib_test_urpf_is_equal(fei, FIB_FORW_CHAIN_TYPE_UNICAST_IP4, 2,
+ tm->hw[0]->sw_if_index,
+ tm->hw[1]->sw_if_index),
+ "RPF list for 1.2.3.4/32 contains both adjs");
+
+ /*
+ * test the uRPF check functions
+ */
+ dpo_id_t dpo_44 = DPO_INVALID;
+ index_t urpfi;
+
+ fib_entry_contribute_forwarding(fei, FIB_FORW_CHAIN_TYPE_UNICAST_IP4, &dpo_44);
+ urpfi = load_balance_get_urpf(dpo_44.dpoi_index);
+
+ FIB_TEST(fib_urpf_check(urpfi, tm->hw[0]->sw_if_index),
+ "uRPF check for 68.68.68.68/32 on %d OK",
+ tm->hw[0]->sw_if_index);
+ FIB_TEST(fib_urpf_check(urpfi, tm->hw[1]->sw_if_index),
+ "uRPF check for 68.68.68.68/32 on %d OK",
+ tm->hw[1]->sw_if_index);
+ FIB_TEST(!fib_urpf_check(urpfi, 99),
+ "uRPF check for 68.68.68.68/32 on 99 not-OK",
+ 99);
+ dpo_reset(&dpo_44);
+
+ fib_table_entry_delete(fib_index,
+ &bgp_44_s_32,
+ FIB_SOURCE_API);
+ fib_table_entry_delete(fib_index,
+ &pfx_1_2_3_5_s_32,
+ FIB_SOURCE_API);
+ fib_table_entry_delete(fib_index,
+ &pfx_1_2_3_4_s_32,
+ FIB_SOURCE_API);
+
+ /*
+ * Add a recursive route:
+ * 200.200.200.201/32 via 1.1.1.200/32 => the via entry is NOT installed.
+ */
+ fib_prefix_t bgp_201_pfx = {
+ .fp_len = 32,
+ .fp_proto = FIB_PROTOCOL_IP4,
+ .fp_addr = {
+ /* 200.200.200.201/32 */
+ .ip4.as_u32 = clib_host_to_net_u32(0xc8c8c8c9),
+ },
+ };
+ /* via 1.1.1.200 */
+ fib_prefix_t pfx_1_1_1_200_s_32 = {
+ .fp_len = 32,
+ .fp_proto = FIB_PROTOCOL_IP4,
+ .fp_addr = {
+ .ip4.as_u32 = clib_host_to_net_u32(0x010101c8),
+ },
+ };
+
+ fib_table_entry_path_add(fib_index,
+ &bgp_201_pfx,
+ FIB_SOURCE_API,
+ FIB_ENTRY_FLAG_NONE,
+ FIB_PROTOCOL_IP4,
+ &pfx_1_1_1_200_s_32.fp_addr,
+ ~0, // no index provided.
+ fib_index, // nexthop in same fib as route
+ 1,
+ NULL,
+ FIB_ROUTE_PATH_FLAG_NONE);
+
+ FIB_TEST_REC_FORW(&bgp_201_pfx, &pfx_1_1_1_200_s_32, 0);
+
+ fei = fib_table_lookup_exact_match(fib_index, &pfx_1_1_1_200_s_32);
+ FIB_TEST((FIB_ENTRY_FLAG_NONE == fib_entry_get_flags(fei)),
+ "Flags set on RR via non-attached");
+ FIB_TEST(fib_test_urpf_is_equal(fei, FIB_FORW_CHAIN_TYPE_UNICAST_IP4, 0),
+ "RPF list for BGP route empty");
+
+ /*
+ * +2 entry (BGP & RR) and +1 shared-path-list
+ */
+ FIB_TEST((4 == fib_path_list_db_size()), "path list DB population:%d",
+ fib_path_list_db_size());
+ FIB_TEST((NBR+8 == fib_path_list_pool_size()), "path list pool size is %d",
+ fib_path_list_pool_size());
+ FIB_TEST((NBR+12 == fib_entry_pool_size()), "entry pool size is %d",
+ fib_entry_pool_size());
+
+ /*
+ * insert a route that covers the missing 1.1.1.2/32. we epxect
+ * 200.200.200.200/32 and 200.200.200.201/32 to resolve through it.
+ */
+ fib_prefix_t pfx_1_1_1_0_s_24 = {
+ .fp_len = 24,
+ .fp_proto = FIB_PROTOCOL_IP4,
+ .fp_addr = {
+ /* 1.1.1.0/24 */
+ .ip4.as_u32 = clib_host_to_net_u32(0x01010100),
+ },
+ };
+
+ fib_table_entry_path_add(fib_index,
+ &pfx_1_1_1_0_s_24,
+ FIB_SOURCE_API,
+ FIB_ENTRY_FLAG_NONE,
+ FIB_PROTOCOL_IP4,
+ &nh_10_10_10_1,
+ tm->hw[0]->sw_if_index,
+ ~0, // invalid fib index
+ 1,
+ NULL,
+ FIB_ROUTE_PATH_FLAG_NONE);
+ fei = fib_table_lookup(fib_index, &pfx_1_1_1_0_s_24);
+ dpo1 = fib_entry_contribute_ip_forwarding(fei);
+ ai = fib_entry_get_adj(fei);
+ FIB_TEST((ai_01 == ai), "1.1.1.0/24 resolves via 10.10.10.1");
+ fei = fib_table_lookup(fib_index, &pfx_1_1_1_2_s_32);
+ dpo1 = fib_entry_contribute_ip_forwarding(fei);
+ ai = fib_entry_get_adj(fei);
+ FIB_TEST((ai_01 == ai), "1.1.1.2/32 resolves via 10.10.10.1");
+ fei = fib_table_lookup(fib_index, &pfx_1_1_1_200_s_32);
+ dpo1 = fib_entry_contribute_ip_forwarding(fei);
+ ai = fib_entry_get_adj(fei);
+ FIB_TEST((ai_01 == ai), "1.1.1.200/24 resolves via 10.10.10.1");
+
+ /*
+ * +1 entry. 1.1.1.1/32 already uses 10.10.10.1 so no new pah-list
+ */
+ FIB_TEST((4 == fib_path_list_db_size()), "path list DB population:%d",
+ fib_path_list_db_size());
+ FIB_TEST((NBR+8 == fib_path_list_pool_size()), "path list pool size is %d",
+ fib_path_list_pool_size());
+ FIB_TEST((NBR+13 == fib_entry_pool_size()), "entry pool size is %d",
+ fib_entry_pool_size());
+
+ /*
+ * the recursive adj for 200.200.200.200 should be updated.
+ */
+ FIB_TEST_REC_FORW(&bgp_201_pfx, &pfx_1_1_1_200_s_32, 0);
+ FIB_TEST_REC_FORW(&bgp_200_pfx, &pfx_1_1_1_2_s_32, 0);
+ fei = fib_table_lookup(fib_index, &bgp_200_pfx);
+ FIB_TEST(fib_test_urpf_is_equal(fei, FIB_FORW_CHAIN_TYPE_UNICAST_IP4, 1,
+ tm->hw[0]->sw_if_index),
+ "RPF list for BGP route has itf index 0");
+
+ /*
+ * insert a more specific route than 1.1.1.0/24 that also covers the
+ * missing 1.1.1.2/32, but not 1.1.1.200/32. we epxect
+ * 200.200.200.200 to resolve through it.
+ */
+ fib_prefix_t pfx_1_1_1_0_s_28 = {
+ .fp_len = 28,
+ .fp_proto = FIB_PROTOCOL_IP4,
+ .fp_addr = {
+ /* 1.1.1.0/24 */
+ .ip4.as_u32 = clib_host_to_net_u32(0x01010100),
+ },
+ };
+
+ fib_table_entry_path_add(fib_index,
+ &pfx_1_1_1_0_s_28,
+ FIB_SOURCE_API,
+ FIB_ENTRY_FLAG_NONE,
+ FIB_PROTOCOL_IP4,
+ &nh_10_10_10_2,
+ tm->hw[0]->sw_if_index,
+ ~0, // invalid fib index
+ 1,
+ NULL,
+ FIB_ROUTE_PATH_FLAG_NONE);
+ fei = fib_table_lookup(fib_index, &pfx_1_1_1_0_s_28);
+ dpo2 = fib_entry_contribute_ip_forwarding(fei);
+ ai = fib_entry_get_adj(fei);
+ FIB_TEST((ai_02 == ai), "1.1.1.0/24 resolves via 10.10.10.2");
+
+ /*
+ * +1 entry. +1 shared path-list
+ */
+ FIB_TEST((5 == fib_path_list_db_size()), "path list DB population:%d",
+ fib_path_list_db_size());
+ FIB_TEST((NBR+9 == fib_path_list_pool_size()), "path list pool size is %d",
+ fib_path_list_pool_size());
+ FIB_TEST((NBR+14 == fib_entry_pool_size()), "entry pool size is %d",
+ fib_entry_pool_size());
+
+ /*
+ * the recursive adj for 200.200.200.200 should be updated.
+ * 200.200.200.201 remains unchanged.
+ */
+ FIB_TEST_REC_FORW(&bgp_201_pfx, &pfx_1_1_1_200_s_32, 0);
+ FIB_TEST_REC_FORW(&bgp_200_pfx, &pfx_1_1_1_2_s_32, 0);
+
+ /*
+ * remove this /28. 200.200.200.200/32 should revert back to via 1.1.1.0/24
+ */
+ fib_table_entry_path_remove(fib_index,
+ &pfx_1_1_1_0_s_28,
+ FIB_SOURCE_API,
+ FIB_PROTOCOL_IP4,
+ &nh_10_10_10_2,
+ tm->hw[0]->sw_if_index,
+ ~0,
+ 1,
+ FIB_ROUTE_PATH_FLAG_NONE);
+ FIB_TEST((fib_table_lookup_exact_match(fib_index, &pfx_1_1_1_0_s_28) ==
+ FIB_NODE_INDEX_INVALID),
+ "1.1.1.0/28 removed");
+ FIB_TEST((fib_table_lookup(fib_index, &pfx_1_1_1_0_s_28) ==
+ fib_table_lookup(fib_index, &pfx_1_1_1_0_s_24)),
+ "1.1.1.0/28 lookup via /24");
+ FIB_TEST_REC_FORW(&bgp_201_pfx, &pfx_1_1_1_200_s_32, 0);
+ FIB_TEST_REC_FORW(&bgp_200_pfx, &pfx_1_1_1_2_s_32, 0);
+
+ /*
+ * -1 entry. -1 shared path-list
+ */
+ FIB_TEST((4 == fib_path_list_db_size()), "path list DB population:%d",
+ fib_path_list_db_size());
+ FIB_TEST((NBR+8 == fib_path_list_pool_size()), "path list pool size is %d",
+ fib_path_list_pool_size());
+ FIB_TEST((NBR+13 == fib_entry_pool_size()), "entry pool size is %d",
+ fib_entry_pool_size());
+
+ /*
+ * remove 1.1.1.0/24. 200.200.200.200/32 should revert back to via 0.0.0.0/0
+ */
+ fib_table_entry_path_remove(fib_index,
+ &pfx_1_1_1_0_s_24,
+ FIB_SOURCE_API,
+ FIB_PROTOCOL_IP4,
+ &nh_10_10_10_1,
+ tm->hw[0]->sw_if_index,
+ ~0,
+ 1,
+ FIB_ROUTE_PATH_FLAG_NONE);
+ FIB_TEST((fib_table_lookup_exact_match(fib_index, &pfx_1_1_1_0_s_24) ==
+ FIB_NODE_INDEX_INVALID),
+ "1.1.1.0/24 removed");
+
+ fei = fib_table_lookup(fib_index, &pfx_1_1_1_2_s_32);
+ FIB_TEST(load_balance_is_drop(fib_entry_contribute_ip_forwarding(fei)),
+ "1.1.1.2/32 route is DROP");
+ fei = fib_table_lookup(fib_index, &pfx_1_1_1_200_s_32);
+ FIB_TEST(load_balance_is_drop(fib_entry_contribute_ip_forwarding(fei)),
+ "1.1.1.200/32 route is DROP");
+
+ FIB_TEST_REC_FORW(&bgp_201_pfx, &pfx_1_1_1_200_s_32, 0);
+ FIB_TEST_REC_FORW(&bgp_200_pfx, &pfx_1_1_1_2_s_32, 0);
+
+ /*
+ * -1 entry
+ */
+ FIB_TEST((4 == fib_path_list_db_size()), "path list DB population:%d",
+ fib_path_list_db_size());
+ FIB_TEST((NBR+8 == fib_path_list_pool_size()), "path list pool size is %d",
+ fib_path_list_pool_size());
+ FIB_TEST((NBR+12 == fib_entry_pool_size()), "entry pool size is %d",
+ fib_entry_pool_size());
+
+ /*
+ * insert the missing 1.1.1.2/32
+ */
+ fei = fib_table_entry_path_add(fib_index,
+ &pfx_1_1_1_2_s_32,
+ FIB_SOURCE_API,
+ FIB_ENTRY_FLAG_NONE,
+ FIB_PROTOCOL_IP4,
+ &nh_10_10_10_1,
+ tm->hw[0]->sw_if_index,
+ ~0, // invalid fib index
+ 1,
+ NULL,
+ FIB_ROUTE_PATH_FLAG_NONE);
+ dpo1 = fib_entry_contribute_ip_forwarding(fei);
+ ai = fib_entry_get_adj(fei);
+ FIB_TEST((ai = ai_01), "1.1.1.2/32 resolves via 10.10.10.1");
+
+ FIB_TEST_REC_FORW(&bgp_201_pfx, &pfx_1_1_1_200_s_32, 0);
+ FIB_TEST_REC_FORW(&bgp_200_pfx, &pfx_1_1_1_2_s_32, 0);
+
+ /*
+ * no change. 1.1.1.2/32 was already there RR sourced.
+ */
+ FIB_TEST((4 == fib_path_list_db_size()), "path list DB population:%d",
+ fib_path_list_db_size());
+ FIB_TEST((NBR+8 == fib_path_list_pool_size()), "path list pool size is %d",
+ fib_path_list_pool_size());
+ FIB_TEST((NBR+12 == fib_entry_pool_size()), "entry pool size is %d",
+ fib_entry_pool_size());
+
+ /*
+ * remove 200.200.200.201/32 which does not have a valid via FIB
+ */
+ fib_table_entry_path_remove(fib_index,
+ &bgp_201_pfx,
+ FIB_SOURCE_API,
+ FIB_PROTOCOL_IP4,
+ &pfx_1_1_1_200_s_32.fp_addr,
+ ~0, // no index provided.
+ fib_index,
+ 1,
+ FIB_ROUTE_PATH_FLAG_NONE);
+
+ /*
+ * -2 entries (BGP and RR). -1 shared path-list;
+ */
+ FIB_TEST((fib_table_lookup_exact_match(fib_index, &bgp_201_pfx) ==
+ FIB_NODE_INDEX_INVALID),
+ "200.200.200.201/32 removed");
+ FIB_TEST((fib_table_lookup_exact_match(fib_index, &pfx_1_1_1_200_s_32) ==
+ FIB_NODE_INDEX_INVALID),
+ "1.1.1.200/32 removed");
+
+ FIB_TEST((3 == fib_path_list_db_size()), "path list DB population:%d",
+ fib_path_list_db_size());
+ FIB_TEST((NBR+7 == fib_path_list_pool_size()), "path list pool size is %d",
+ fib_path_list_pool_size());
+ FIB_TEST((NBR+10 == fib_entry_pool_size()), "entry pool size is %d",
+ fib_entry_pool_size());
+
+ /*
+ * remove 200.200.200.200/32 which does have a valid via FIB
+ */
+ fib_table_entry_path_remove(fib_index,
+ &bgp_200_pfx,
+ FIB_SOURCE_API,
+ FIB_PROTOCOL_IP4,
+ &pfx_1_1_1_2_s_32.fp_addr,
+ ~0, // no index provided.
+ fib_index,
+ 1,
+ FIB_ROUTE_PATH_FLAG_NONE);
+
+ FIB_TEST((fib_table_lookup_exact_match(fib_index, &bgp_200_pfx) ==
+ FIB_NODE_INDEX_INVALID),
+ "200.200.200.200/32 removed");
+ FIB_TEST((fib_table_lookup_exact_match(fib_index, &pfx_1_1_1_2_s_32) !=
+ FIB_NODE_INDEX_INVALID),
+ "1.1.1.2/32 still present");
+
+ /*
+ * -1 entry (BGP, the RR source is also API sourced). -1 shared path-list;
+ */
+ FIB_TEST((2 == fib_path_list_db_size()), "path list DB population:%d",
+ fib_path_list_db_size());
+ FIB_TEST((NBR+6 == fib_path_list_pool_size()), "path list pool size is %d",
+ fib_path_list_pool_size());
+ FIB_TEST((NBR+9 == fib_entry_pool_size()), "entry pool size is %d",
+ fib_entry_pool_size());
+
+ /*
+ * A recursive prefix that has a 2 path load-balance.
+ * It also shares a next-hop with other BGP prefixes and hence
+ * test the ref counting of RR sourced prefixes and 2 level LB.
+ */
+ const fib_prefix_t bgp_102 = {
+ .fp_len = 32,
+ .fp_proto = FIB_PROTOCOL_IP4,
+ .fp_addr = {
+ /* 100.100.100.101/32 */
+ .ip4.as_u32 = clib_host_to_net_u32(0x64646466),
+ },
+ };
+ fib_table_entry_path_add(fib_index,
+ &bgp_102,
+ FIB_SOURCE_API,
+ FIB_ENTRY_FLAG_NONE,
+ FIB_PROTOCOL_IP4,
+ &pfx_1_1_1_1_s_32.fp_addr,
+ ~0, // no index provided.
+ fib_index, // same as route
+ 1,
+ NULL,
+ FIB_ROUTE_PATH_FLAG_NONE);
+ fib_table_entry_path_add(fib_index,
+ &bgp_102,
+ FIB_SOURCE_API,
+ FIB_ENTRY_FLAG_NONE,
+ FIB_PROTOCOL_IP4,
+ &pfx_1_1_1_2_s_32.fp_addr,
+ ~0, // no index provided.
+ fib_index, // same as route's FIB
+ 1,
+ NULL,
+ FIB_ROUTE_PATH_FLAG_NONE);
+ fei = fib_table_lookup_exact_match(fib_index, &bgp_102);
+ FIB_TEST((FIB_NODE_INDEX_INVALID != fei), "100.100.100.102/32 presnet");
+ dpo = fib_entry_contribute_ip_forwarding(fei);
+
+ fei = fib_table_lookup_exact_match(fib_index, &pfx_1_1_1_1_s_32);
+ dpo1 = fib_entry_contribute_ip_forwarding(fei);
+ fei = fib_table_lookup_exact_match(fib_index, &pfx_1_1_1_2_s_32);
+ dpo2 = fib_entry_contribute_ip_forwarding(fei);
+
+ lb = load_balance_get(dpo->dpoi_index);
+ FIB_TEST((lb->lb_n_buckets == 2), "Recursive LB has %d bucket", lb->lb_n_buckets);
+ FIB_TEST(!dpo_cmp(dpo1, load_balance_get_bucket(dpo->dpoi_index, 0)),
+ "First via 10.10.10.1");
+ FIB_TEST(!dpo_cmp(dpo2, load_balance_get_bucket(dpo->dpoi_index, 1)),
+ "Second via 10.10.10.1");
+
+ fib_table_entry_path_remove(fib_index,
+ &bgp_102,
+ FIB_SOURCE_API,
+ FIB_PROTOCOL_IP4,
+ &pfx_1_1_1_1_s_32.fp_addr,
+ ~0, // no index provided.
+ fib_index, // same as route's FIB
+ 1,
+ FIB_ROUTE_PATH_FLAG_NONE);
+ fib_table_entry_path_remove(fib_index,
+ &bgp_102,
+ FIB_SOURCE_API,
+ FIB_PROTOCOL_IP4,
+ &pfx_1_1_1_2_s_32.fp_addr,
+ ~0, // no index provided.
+ fib_index, // same as route's FIB
+ 1,
+ FIB_ROUTE_PATH_FLAG_NONE);
+ fei = fib_table_lookup_exact_match(fib_index, &bgp_102);
+ FIB_TEST((FIB_NODE_INDEX_INVALID == fei), "100.100.100.102/32 removed");
+
+ /*
+ * remove the remaining recursives
+ */
+ fib_table_entry_path_remove(fib_index,
+ &bgp_100_pfx,
+ FIB_SOURCE_API,
+ FIB_PROTOCOL_IP4,
+ &pfx_1_1_1_1_s_32.fp_addr,
+ ~0, // no index provided.
+ fib_index, // same as route's FIB
+ 1,
+ FIB_ROUTE_PATH_FLAG_NONE);
+ fib_table_entry_path_remove(fib_index,
+ &bgp_101_pfx,
+ FIB_SOURCE_API,
+ FIB_PROTOCOL_IP4,
+ &pfx_1_1_1_1_s_32.fp_addr,
+ ~0, // no index provided.
+ fib_index, // same as route's FIB
+ 1,
+ FIB_ROUTE_PATH_FLAG_NONE);
+ FIB_TEST((fib_table_lookup_exact_match(fib_index, &bgp_100_pfx) ==
+ FIB_NODE_INDEX_INVALID),
+ "100.100.100.100/32 removed");
+ FIB_TEST((fib_table_lookup_exact_match(fib_index, &bgp_101_pfx) ==
+ FIB_NODE_INDEX_INVALID),
+ "100.100.100.101/32 removed");
+
+ /*
+ * -2 entry (2*BGP, the RR source is also API sourced). -1 shared path-list;
+ */
+ FIB_TEST((1 == fib_path_list_db_size()), "path list DB population:%d",
+ fib_path_list_db_size());
+ FIB_TEST((NBR+5 == fib_path_list_pool_size()), "path list pool size is %d",
+ fib_path_list_pool_size());
+ FIB_TEST((NBR+7 == fib_entry_pool_size()), "entry pool size is %d",
+ fib_entry_pool_size());
+
+ /*
+ * Add a recursive route via a connected cover, using an adj-fib that does exist
+ */
+ fib_table_entry_path_add(fib_index,
+ &bgp_200_pfx,
+ FIB_SOURCE_API,
+ FIB_ENTRY_FLAG_NONE,
+ FIB_PROTOCOL_IP4,
+ &nh_10_10_10_1,
+ ~0, // no index provided.
+ fib_index, // Same as route's FIB
+ 1,
+ NULL,
+ FIB_ROUTE_PATH_FLAG_NONE);
+
+ /*
+ * +1 entry. +1 shared path-list (recursive via 10.10.10.1)
+ */
+ FIB_TEST((2 == fib_path_list_db_size()), "path list DB population:%d",
+ fib_path_list_db_size());
+ FIB_TEST((NBR+6 == fib_path_list_pool_size()), "path list pool size is %d",
+ fib_path_list_pool_size());
+ FIB_TEST((NBR+8 == fib_entry_pool_size()), "entry pool size is %d",
+ fib_entry_pool_size());
+
+ fei = fib_table_lookup_exact_match(fib_index, &bgp_200_pfx);
+ dpo = fib_entry_contribute_ip_forwarding(fei);
+
+ fei = fib_table_lookup_exact_match(fib_index, &pfx_10_10_10_1_s_32);
+ dpo1 = fib_entry_contribute_ip_forwarding(fei);
+
+ FIB_TEST(!dpo_cmp(dpo1, load_balance_get_bucket(dpo->dpoi_index, 0)),
+ "200.200.200.200/32 is recursive via adj for 10.10.10.1");
+
+ FIB_TEST((FIB_ENTRY_FLAG_ATTACHED == fib_entry_get_flags(fei)),
+ "Flags set on RR via existing attached");
+
+ /*
+ * Add a recursive route via a connected cover, using and adj-fib that does
+ * not exist
+ */
+ ip46_address_t nh_10_10_10_3 = {
+ .ip4.as_u32 = clib_host_to_net_u32(0x0a0a0a03),
+ };
+ fib_prefix_t pfx_10_10_10_3 = {
+ .fp_len = 32,
+ .fp_proto = FIB_PROTOCOL_IP4,
+ .fp_addr = nh_10_10_10_3,
+ };
+
+ fib_table_entry_path_add(fib_index,
+ &bgp_201_pfx,
+ FIB_SOURCE_API,
+ FIB_ENTRY_FLAG_NONE,
+ FIB_PROTOCOL_IP4,
+ &nh_10_10_10_3,
+ ~0, // no index provided.
+ fib_index,
+ 1,
+ NULL,
+ FIB_ROUTE_PATH_FLAG_NONE);
+
+ /*
+ * +2 entries (BGP and RR). +1 shared path-list (recursive via 10.10.10.3) and
+ * one unshared non-recursive via 10.10.10.3
+ */
+ FIB_TEST((3 == fib_path_list_db_size()), "path list DB population:%d",
+ fib_path_list_db_size());
+ FIB_TEST((NBR+8 == fib_path_list_pool_size()), "path list pool size is %d",
+ fib_path_list_pool_size());
+ FIB_TEST((NBR+10 == fib_entry_pool_size()), "entry pool size is %d",
+ fib_entry_pool_size());
+
+ ai_03 = adj_nbr_add_or_lock(FIB_PROTOCOL_IP4,
+ VNET_LINK_IP4,
+ &nh_10_10_10_3,
+ tm->hw[0]->sw_if_index);
+
+ fei = fib_table_lookup_exact_match(fib_index, &bgp_201_pfx);
+ dpo = fib_entry_contribute_ip_forwarding(fei);
+ fei = fib_table_lookup_exact_match(fib_index, &pfx_10_10_10_3);
+ dpo1 = fib_entry_contribute_ip_forwarding(fei);
+
+ ai = fib_entry_get_adj(fei);
+ FIB_TEST((ai == ai_03), "adj for 10.10.10.3/32 is via adj for 10.10.10.3");
+ FIB_TEST(((FIB_ENTRY_FLAG_ATTACHED | FIB_ENTRY_FLAG_CONNECTED) ==
+ fib_entry_get_flags(fei)),
+ "Flags set on RR via non-existing attached");
+
+ FIB_TEST(!dpo_cmp(dpo1, load_balance_get_bucket(dpo->dpoi_index, 0)),
+ "adj for 200.200.200.200/32 is recursive via adj for 10.10.10.3");
+
+ adj_unlock(ai_03);
+
+ /*
+ * remove the recursives
+ */
+ fib_table_entry_path_remove(fib_index,
+ &bgp_200_pfx,
+ FIB_SOURCE_API,
+ FIB_PROTOCOL_IP4,
+ &nh_10_10_10_1,
+ ~0, // no index provided.
+ fib_index, // same as route's FIB
+ 1,
+ FIB_ROUTE_PATH_FLAG_NONE);
+ fib_table_entry_path_remove(fib_index,
+ &bgp_201_pfx,
+ FIB_SOURCE_API,
+ FIB_PROTOCOL_IP4,
+ &nh_10_10_10_3,
+ ~0, // no index provided.
+ fib_index, // same as route's FIB
+ 1,
+ FIB_ROUTE_PATH_FLAG_NONE);
+
+ FIB_TEST((fib_table_lookup_exact_match(fib_index, &bgp_201_pfx) ==
+ FIB_NODE_INDEX_INVALID),
+ "200.200.200.201/32 removed");
+ FIB_TEST((fib_table_lookup_exact_match(fib_index, &bgp_200_pfx) ==
+ FIB_NODE_INDEX_INVALID),
+ "200.200.200.200/32 removed");
+ FIB_TEST((fib_table_lookup_exact_match(fib_index, &pfx_10_10_10_3) ==
+ FIB_NODE_INDEX_INVALID),
+ "10.10.10.3/32 removed");
+
+ /*
+ * -3 entries (2*BGP and RR). -2 shared path-list (recursive via 10.10.10.3 &
+ * 10.10.10.1) and one unshared non-recursive via 10.10.10.3
+ */
+ FIB_TEST((1 == fib_path_list_db_size()), "path list DB population:%d",
+ fib_path_list_db_size());
+ FIB_TEST((NBR+5 == fib_path_list_pool_size()), "path list pool size is %d",
+ fib_path_list_pool_size());
+ FIB_TEST((NBR+7 == fib_entry_pool_size()), "entry pool size is %d",
+ fib_entry_pool_size());
+
+
+ /*
+ * RECURSION LOOPS
+ * Add 5.5.5.5/32 -> 5.5.5.6/32 -> 5.5.5.7/32 -> 5.5.5.5/32
+ */
+ fib_prefix_t pfx_5_5_5_5_s_32 = {
+ .fp_len = 32,
+ .fp_proto = FIB_PROTOCOL_IP4,
+ .fp_addr = {
+ .ip4.as_u32 = clib_host_to_net_u32(0x05050505),
+ },
+ };
+ fib_prefix_t pfx_5_5_5_6_s_32 = {
+ .fp_len = 32,
+ .fp_proto = FIB_PROTOCOL_IP4,
+ .fp_addr = {
+ .ip4.as_u32 = clib_host_to_net_u32(0x05050506),
+ },
+ };
+ fib_prefix_t pfx_5_5_5_7_s_32 = {
+ .fp_len = 32,
+ .fp_proto = FIB_PROTOCOL_IP4,
+ .fp_addr = {
+ .ip4.as_u32 = clib_host_to_net_u32(0x05050507),
+ },
+ };
+
+ fib_table_entry_path_add(fib_index,
+ &pfx_5_5_5_5_s_32,
+ FIB_SOURCE_API,
+ FIB_ENTRY_FLAG_NONE,
+ FIB_PROTOCOL_IP4,
+ &pfx_5_5_5_6_s_32.fp_addr,
+ ~0, // no index provided.
+ fib_index,
+ 1,
+ NULL,
+ FIB_ROUTE_PATH_FLAG_NONE);
+ fib_table_entry_path_add(fib_index,
+ &pfx_5_5_5_6_s_32,
+ FIB_SOURCE_API,
+ FIB_ENTRY_FLAG_NONE,
+ FIB_PROTOCOL_IP4,
+ &pfx_5_5_5_7_s_32.fp_addr,
+ ~0, // no index provided.
+ fib_index,
+ 1,
+ NULL,
+ FIB_ROUTE_PATH_FLAG_NONE);
+ fib_table_entry_path_add(fib_index,
+ &pfx_5_5_5_7_s_32,
+ FIB_SOURCE_API,
+ FIB_ENTRY_FLAG_NONE,
+ FIB_PROTOCOL_IP4,
+ &pfx_5_5_5_5_s_32.fp_addr,
+ ~0, // no index provided.
+ fib_index,
+ 1,
+ NULL,
+ FIB_ROUTE_PATH_FLAG_NONE);
+ /*
+ * +3 entries, +3 shared path-list
+ */
+ FIB_TEST((4 == fib_path_list_db_size()), "path list DB population:%d",
+ fib_path_list_db_size());
+ FIB_TEST((NBR+8 == fib_path_list_pool_size()), "path list pool size is %d",
+ fib_path_list_pool_size());
+ FIB_TEST((NBR+10 == fib_entry_pool_size()), "entry pool size is %d",
+ fib_entry_pool_size());
+
+ /*
+ * All the entries have only looped paths, so they are all drop
+ */
+ fei = fib_table_lookup(fib_index, &pfx_5_5_5_7_s_32);
+ FIB_TEST(load_balance_is_drop(fib_entry_contribute_ip_forwarding(fei)),
+ "LB for 5.5.5.7/32 is via adj for DROP");
+ fei = fib_table_lookup(fib_index, &pfx_5_5_5_5_s_32);
+ FIB_TEST(load_balance_is_drop(fib_entry_contribute_ip_forwarding(fei)),
+ "LB for 5.5.5.5/32 is via adj for DROP");
+ fei = fib_table_lookup(fib_index, &pfx_5_5_5_6_s_32);
+ FIB_TEST(load_balance_is_drop(fib_entry_contribute_ip_forwarding(fei)),
+ "LB for 5.5.5.6/32 is via adj for DROP");
+
+ /*
+ * provide 5.5.5.6/32 with alternate path.
+ * this will allow only 5.5.5.6/32 to forward with this path, the others
+ * are still drop since the loop is still present.
+ */
+ fib_table_entry_path_add(fib_index,
+ &pfx_5_5_5_6_s_32,
+ FIB_SOURCE_API,
+ FIB_ENTRY_FLAG_NONE,
+ FIB_PROTOCOL_IP4,
+ &nh_10_10_10_1,
+ tm->hw[0]->sw_if_index,
+ ~0,
+ 1,
+ NULL,
+ FIB_ROUTE_PATH_FLAG_NONE);
+
+
+ fei = fib_table_lookup(fib_index, &pfx_5_5_5_6_s_32);
+ dpo1 = fib_entry_contribute_ip_forwarding(fei);
+
+ lb = load_balance_get(dpo1->dpoi_index);
+ FIB_TEST((lb->lb_n_buckets == 1), "5.5.5.6 LB has %d bucket", lb->lb_n_buckets);
+
+ dpo2 = load_balance_get_bucket(dpo1->dpoi_index, 0);
+ FIB_TEST(DPO_ADJACENCY == dpo2->dpoi_type, "type is %d", dpo2->dpoi_type);
+ FIB_TEST((ai_01 == dpo2->dpoi_index),
+ "5.5.5.6 bucket 0 resolves via 10.10.10.2");
+
+ fei = fib_table_lookup(fib_index, &pfx_5_5_5_7_s_32);
+ FIB_TEST(load_balance_is_drop(fib_entry_contribute_ip_forwarding(fei)),
+ "LB for 5.5.5.7/32 is via adj for DROP");
+ fei = fib_table_lookup(fib_index, &pfx_5_5_5_5_s_32);
+ FIB_TEST(load_balance_is_drop(fib_entry_contribute_ip_forwarding(fei)),
+ "LB for 5.5.5.5/32 is via adj for DROP");
+
+ /*
+ * remove the alternate path for 5.5.5.6/32
+ * back to all drop
+ */
+ fib_table_entry_path_remove(fib_index,
+ &pfx_5_5_5_6_s_32,
+ FIB_SOURCE_API,
+ FIB_PROTOCOL_IP4,
+ &nh_10_10_10_1,
+ tm->hw[0]->sw_if_index,
+ ~0,
+ 1,
+ FIB_ROUTE_PATH_FLAG_NONE);
+
+ fei = fib_table_lookup(fib_index, &pfx_5_5_5_7_s_32);
+ FIB_TEST(load_balance_is_drop(fib_entry_contribute_ip_forwarding(fei)),
+ "LB for 5.5.5.7/32 is via adj for DROP");
+ fei = fib_table_lookup(fib_index, &pfx_5_5_5_5_s_32);
+ FIB_TEST(load_balance_is_drop(fib_entry_contribute_ip_forwarding(fei)),
+ "LB for 5.5.5.5/32 is via adj for DROP");
+ fei = fib_table_lookup(fib_index, &pfx_5_5_5_6_s_32);
+ FIB_TEST(load_balance_is_drop(fib_entry_contribute_ip_forwarding(fei)),
+ "LB for 5.5.5.6/32 is via adj for DROP");
+
+ /*
+ * break the loop by giving 5.5.5.5/32 a new set of paths
+ * expect all to forward via this new path.
+ */
+ fib_table_entry_update_one_path(fib_index,
+ &pfx_5_5_5_5_s_32,
+ FIB_SOURCE_API,
+ FIB_ENTRY_FLAG_NONE,
+ FIB_PROTOCOL_IP4,
+ &nh_10_10_10_1,
+ tm->hw[0]->sw_if_index,
+ ~0, // invalid fib index
+ 1,
+ NULL,
+ FIB_ROUTE_PATH_FLAG_NONE);
+
+ fei = fib_table_lookup(fib_index, &pfx_5_5_5_5_s_32);
+ dpo1 = fib_entry_contribute_ip_forwarding(fei);
+ lb = load_balance_get(dpo1->dpoi_index);
+ FIB_TEST((lb->lb_n_buckets == 1), "5.5.5.5 LB has %d bucket", lb->lb_n_buckets);
+
+ dpo2 = load_balance_get_bucket(dpo1->dpoi_index, 0);
+ FIB_TEST(DPO_ADJACENCY == dpo2->dpoi_type, "type is %d", dpo2->dpoi_type);
+ FIB_TEST((ai_01 == dpo2->dpoi_index),
+ "5.5.5.5 bucket 0 resolves via 10.10.10.2");
+
+ fei = fib_table_lookup_exact_match(fib_index, &pfx_5_5_5_7_s_32);
+ dpo2 = fib_entry_contribute_ip_forwarding(fei);
+
+ lb = load_balance_get(dpo2->dpoi_index);
+ FIB_TEST((lb->lb_n_buckets == 1), "Recursive LB has %d bucket", lb->lb_n_buckets);
+ FIB_TEST(!dpo_cmp(dpo1, load_balance_get_bucket(dpo2->dpoi_index, 0)),
+ "5.5.5.5.7 via 5.5.5.5");
+
+ fei = fib_table_lookup_exact_match(fib_index, &pfx_5_5_5_6_s_32);
+ dpo1 = fib_entry_contribute_ip_forwarding(fei);
+
+ lb = load_balance_get(dpo1->dpoi_index);
+ FIB_TEST((lb->lb_n_buckets == 1), "Recursive LB has %d bucket", lb->lb_n_buckets);
+ FIB_TEST(!dpo_cmp(dpo2, load_balance_get_bucket(dpo1->dpoi_index, 0)),
+ "5.5.5.5.6 via 5.5.5.7");
+
+ /*
+ * revert back to the loop. so we can remove the prefixes with
+ * the loop intact
+ */
+ fib_table_entry_update_one_path(fib_index,
+ &pfx_5_5_5_5_s_32,
+ FIB_SOURCE_API,
+ FIB_ENTRY_FLAG_NONE,
+ FIB_PROTOCOL_IP4,
+ &pfx_5_5_5_6_s_32.fp_addr,
+ ~0, // no index provided.
+ fib_index,
+ 1,
+ NULL,
+ FIB_ROUTE_PATH_FLAG_NONE);
+
+ fei = fib_table_lookup(fib_index, &pfx_5_5_5_7_s_32);
+ FIB_TEST(load_balance_is_drop(fib_entry_contribute_ip_forwarding(fei)),
+ "LB for 5.5.5.7/32 is via adj for DROP");
+ fei = fib_table_lookup(fib_index, &pfx_5_5_5_5_s_32);
+ FIB_TEST(load_balance_is_drop(fib_entry_contribute_ip_forwarding(fei)),
+ "LB for 5.5.5.5/32 is via adj for DROP");
+ fei = fib_table_lookup(fib_index, &pfx_5_5_5_6_s_32);
+ FIB_TEST(load_balance_is_drop(fib_entry_contribute_ip_forwarding(fei)),
+ "LB for 5.5.5.6/32 is via adj for DROP");
+
+ /*
+ * remove all the 5.5.5.x/32 prefixes
+ */
+ fib_table_entry_path_remove(fib_index,
+ &pfx_5_5_5_5_s_32,
+ FIB_SOURCE_API,
+ FIB_PROTOCOL_IP4,
+ &pfx_5_5_5_6_s_32.fp_addr,
+ ~0, // no index provided.
+ fib_index, // same as route's FIB
+ 1,
+ FIB_ROUTE_PATH_FLAG_NONE);
+ fib_table_entry_path_remove(fib_index,
+ &pfx_5_5_5_6_s_32,
+ FIB_SOURCE_API,
+ FIB_PROTOCOL_IP4,
+ &pfx_5_5_5_7_s_32.fp_addr,
+ ~0, // no index provided.
+ fib_index, // same as route's FIB
+ 1,
+ FIB_ROUTE_PATH_FLAG_NONE);
+ fib_table_entry_path_remove(fib_index,
+ &pfx_5_5_5_7_s_32,
+ FIB_SOURCE_API,
+ FIB_PROTOCOL_IP4,
+ &pfx_5_5_5_5_s_32.fp_addr,
+ ~0, // no index provided.
+ fib_index, // same as route's FIB
+ 1,
+ FIB_ROUTE_PATH_FLAG_NONE);
+ fib_table_entry_path_remove(fib_index,
+ &pfx_5_5_5_6_s_32,
+ FIB_SOURCE_API,
+ FIB_PROTOCOL_IP4,
+ &nh_10_10_10_2,
+ ~0, // no index provided.
+ fib_index, // same as route's FIB
+ 1,
+ FIB_ROUTE_PATH_FLAG_NONE);
+
+ /*
+ * -3 entries, -3 shared path-list
+ */
+ FIB_TEST((1 == fib_path_list_db_size()), "path list DB population:%d",
+ fib_path_list_db_size());
+ FIB_TEST((NBR+5 == fib_path_list_pool_size()), "path list pool size is %d",
+ fib_path_list_pool_size());
+ FIB_TEST((NBR+7 == fib_entry_pool_size()), "entry pool size is %d",
+ fib_entry_pool_size());
+
+ /*
+ * Single level loop 5.5.5.5/32 via 5.5.5.5/32
+ */
+ fib_table_entry_path_add(fib_index,
+ &pfx_5_5_5_6_s_32,
+ FIB_SOURCE_API,
+ FIB_ENTRY_FLAG_NONE,
+ FIB_PROTOCOL_IP4,
+ &pfx_5_5_5_6_s_32.fp_addr,
+ ~0, // no index provided.
+ fib_index,
+ 1,
+ NULL,
+ FIB_ROUTE_PATH_FLAG_NONE);
+ fei = fib_table_lookup(fib_index, &pfx_5_5_5_6_s_32);
+ FIB_TEST(load_balance_is_drop(fib_entry_contribute_ip_forwarding(fei)),
+ "1-level 5.5.5.6/32 loop is via adj for DROP");
+
+ fib_table_entry_path_remove(fib_index,
+ &pfx_5_5_5_6_s_32,
+ FIB_SOURCE_API,
+ FIB_PROTOCOL_IP4,
+ &pfx_5_5_5_6_s_32.fp_addr,
+ ~0, // no index provided.
+ fib_index, // same as route's FIB
+ 1,
+ FIB_ROUTE_PATH_FLAG_NONE);
+ FIB_TEST(FIB_NODE_INDEX_INVALID ==
+ fib_table_lookup_exact_match(fib_index, &pfx_5_5_5_6_s_32),
+ "1-level 5.5.5.6/32 loop is removed");
+
+ /*
+ * A recursive route whose next-hop is covered by the prefix.
+ * This would mean the via-fib, which inherits forwarding from its
+ * cover, thus picks up forwarding from the prfix, which is via the
+ * via-fib, and we have a loop.
+ */
+ fib_prefix_t pfx_23_23_23_0_s_24 = {
+ .fp_len = 24,
+ .fp_proto = FIB_PROTOCOL_IP4,
+ .fp_addr = {
+ .ip4.as_u32 = clib_host_to_net_u32(0x17171700),
+ },
+ };
+ fib_prefix_t pfx_23_23_23_23_s_32 = {
+ .fp_len = 32,
+ .fp_proto = FIB_PROTOCOL_IP4,
+ .fp_addr = {
+ .ip4.as_u32 = clib_host_to_net_u32(0x17171717),
+ },
+ };
+ fei = fib_table_entry_path_add(fib_index,
+ &pfx_23_23_23_0_s_24,
+ FIB_SOURCE_API,
+ FIB_ENTRY_FLAG_NONE,
+ FIB_PROTOCOL_IP4,
+ &pfx_23_23_23_23_s_32.fp_addr,
+ ~0, // recursive
+ fib_index,
+ 1,
+ NULL,
+ FIB_ROUTE_PATH_FLAG_NONE);
+ dpo = fib_entry_contribute_ip_forwarding(fei);
+ FIB_TEST(load_balance_is_drop(dpo),
+ "23.23.23.0/24 via covered is DROP");
+ fib_table_entry_delete_index(fei, FIB_SOURCE_API);
+
+ /*
+ * add-remove test. no change.
+ */
+ FIB_TEST((1 == fib_path_list_db_size()), "path list DB population:%d",
+ fib_path_list_db_size());
+ FIB_TEST((NBR+5 == fib_path_list_pool_size()), "path list pool size is %d",
+ fib_path_list_pool_size());
+ FIB_TEST((NBR+7 == fib_entry_pool_size()), "entry pool size is %d",
+ fib_entry_pool_size());
+
+ /*
+ * A recursive route with recursion constraints.
+ * 200.200.200.200/32 via 1.1.1.1 is recurse via host constrained
+ */
+ fib_table_entry_path_add(fib_index,
+ &bgp_200_pfx,
+ FIB_SOURCE_API,
+ FIB_ENTRY_FLAG_NONE,
+ FIB_PROTOCOL_IP4,
+ &nh_1_1_1_1,
+ ~0,
+ fib_index,
+ 1,
+ NULL,
+ FIB_ROUTE_PATH_RESOLVE_VIA_HOST);
+
+ fei = fib_table_lookup_exact_match(fib_index, &pfx_1_1_1_1_s_32);
+ dpo2 = fib_entry_contribute_ip_forwarding(fei);
+
+ fei = fib_table_lookup_exact_match(fib_index, &bgp_200_pfx);
+ dpo1 = fib_entry_contribute_ip_forwarding(fei);
+
+ FIB_TEST(!dpo_cmp(dpo2, load_balance_get_bucket(dpo1->dpoi_index, 0)),
+ "adj for 200.200.200.200/32 is recursive via adj for 1.1.1.1");
+
+ /*
+ * save the load-balance. we expect it to be inplace modified
+ */
+ lb = load_balance_get(dpo1->dpoi_index);
+
+ /*
+ * add a covering prefix for the via fib that would otherwise serve
+ * as the resolving route when the host is removed
+ */
+ fib_table_entry_path_add(fib_index,
+ &pfx_1_1_1_0_s_28,
+ FIB_SOURCE_API,
+ FIB_ENTRY_FLAG_NONE,
+ FIB_PROTOCOL_IP4,
+ &nh_10_10_10_1,
+ tm->hw[0]->sw_if_index,
+ ~0, // invalid fib index
+ 1,
+ NULL,
+ FIB_ROUTE_PATH_FLAG_NONE);
+ fei = fib_table_lookup_exact_match(fib_index, &pfx_1_1_1_0_s_28);
+ ai = fib_entry_get_adj(fei);
+ FIB_TEST((ai == ai_01),
+ "adj for 1.1.1.0/28 is via adj for 1.1.1.1");
+
+ /*
+ * remove the host via FIB - expect the BGP prefix to be drop
+ */
+ fib_table_entry_path_remove(fib_index,
+ &pfx_1_1_1_1_s_32,
+ FIB_SOURCE_API,
+ FIB_PROTOCOL_IP4,
+ &nh_10_10_10_1,
+ tm->hw[0]->sw_if_index,
+ ~0, // invalid fib index
+ 1,
+ FIB_ROUTE_PATH_FLAG_NONE);
+
+ FIB_TEST(!dpo_cmp(dpo_drop, load_balance_get_bucket(dpo1->dpoi_index, 0)),
+ "adj for 200.200.200.200/32 is recursive via adj for DROP");
+
+ /*
+ * add the via-entry host reoute back. expect to resolve again
+ */
+ fib_table_entry_path_add(fib_index,
+ &pfx_1_1_1_1_s_32,
+ FIB_SOURCE_API,
+ FIB_ENTRY_FLAG_NONE,
+ FIB_PROTOCOL_IP4,
+ &nh_10_10_10_1,
+ tm->hw[0]->sw_if_index,
+ ~0, // invalid fib index
+ 1,
+ NULL,
+ FIB_ROUTE_PATH_FLAG_NONE);
+ FIB_TEST(!dpo_cmp(dpo2, load_balance_get_bucket(dpo1->dpoi_index, 0)),
+ "adj for 200.200.200.200/32 is recursive via adj for 1.1.1.1");
+
+ /*
+ * add another path for the recursive. it will then have 2.
+ */
+ fib_prefix_t pfx_1_1_1_3_s_32 = {
+ .fp_len = 32,
+ .fp_proto = FIB_PROTOCOL_IP4,
+ .fp_addr = {
+ .ip4.as_u32 = clib_host_to_net_u32(0x01010103),
+ },
+ };
+ fib_table_entry_path_add(fib_index,
+ &pfx_1_1_1_3_s_32,
+ FIB_SOURCE_API,
+ FIB_ENTRY_FLAG_NONE,
+ FIB_PROTOCOL_IP4,
+ &nh_10_10_10_2,
+ tm->hw[0]->sw_if_index,
+ ~0, // invalid fib index
+ 1,
+ NULL,
+ FIB_ROUTE_PATH_FLAG_NONE);
+
+ fib_table_entry_path_add(fib_index,
+ &bgp_200_pfx,
+ FIB_SOURCE_API,
+ FIB_ENTRY_FLAG_NONE,
+ FIB_PROTOCOL_IP4,
+ &pfx_1_1_1_3_s_32.fp_addr,
+ ~0,
+ fib_index,
+ 1,
+ NULL,
+ FIB_ROUTE_PATH_RESOLVE_VIA_HOST);
+
+ fei = fib_table_lookup_exact_match(fib_index, &bgp_200_pfx);
+ dpo = fib_entry_contribute_ip_forwarding(fei);
+
+ fei = fib_table_lookup_exact_match(fib_index, &pfx_1_1_1_1_s_32);
+ dpo2 = fib_entry_contribute_ip_forwarding(fei);
+ FIB_TEST(!dpo_cmp(dpo2, load_balance_get_bucket(dpo->dpoi_index, 0)),
+ "adj for 200.200.200.200/32 is recursive via adj for 1.1.1.1");
+ fei = fib_table_lookup_exact_match(fib_index, &pfx_1_1_1_3_s_32);
+ dpo1 = fib_entry_contribute_ip_forwarding(fei);
+ FIB_TEST(!dpo_cmp(dpo1, load_balance_get_bucket(dpo->dpoi_index, 1)),
+ "adj for 200.200.200.200/32 is recursive via adj for 1.1.1.3");
+
+ /*
+ * expect the lb-map used by the recursive's load-balance is using both buckets
+ */
+ load_balance_map_t *lbm;
+ index_t lbmi;
+
+ lb = load_balance_get(dpo->dpoi_index);
+ lbmi = lb->lb_map;
+ load_balance_map_lock(lbmi);
+ lbm = load_balance_map_get(lbmi);
+
+ FIB_TEST(lbm->lbm_buckets[0] == 0,
+ "LB maps's bucket 0 is %d",
+ lbm->lbm_buckets[0]);
+ FIB_TEST(lbm->lbm_buckets[1] == 1,
+ "LB maps's bucket 1 is %d",
+ lbm->lbm_buckets[1]);
+
+ /*
+ * withdraw one of the /32 via-entrys.
+ * that ECMP path will be unresolved and forwarding should continue on the
+ * other available path. this is an iBGP PIC edge failover.
+ * Test the forwarding changes without re-fetching the adj from the
+ * recursive entry. this ensures its the same one that is updated; i.e. an
+ * inplace-modify.
+ */
+ fib_table_entry_path_remove(fib_index,
+ &pfx_1_1_1_1_s_32,
+ FIB_SOURCE_API,
+ FIB_PROTOCOL_IP4,
+ &nh_10_10_10_1,
+ tm->hw[0]->sw_if_index,
+ ~0, // invalid fib index
+ 1,
+ FIB_ROUTE_PATH_FLAG_NONE);
+
+ fei = fib_table_lookup_exact_match(fib_index, &bgp_200_pfx);
+ FIB_TEST(!dpo_cmp(dpo, fib_entry_contribute_ip_forwarding(fei)),
+ "post PIC 200.200.200.200/32 was inplace modified");
+
+ FIB_TEST(!dpo_cmp(dpo1, load_balance_get_bucket_i(lb, 0)),
+ "post PIC adj for 200.200.200.200/32 is recursive"
+ " via adj for 1.1.1.3");
+
+ /*
+ * the LB maps that was locked above should have been modified to remove
+ * the path that was down, and thus its bucket points to a path that is
+ * still up.
+ */
+ FIB_TEST(lbm->lbm_buckets[0] == 1,
+ "LB maps's bucket 0 is %d",
+ lbm->lbm_buckets[0]);
+ FIB_TEST(lbm->lbm_buckets[1] == 1,
+ "LB maps's bucket 1 is %d",
+ lbm->lbm_buckets[1]);
+
+ load_balance_map_unlock(lb->lb_map);
+
+ /*
+ * add it back. again
+ */
+ fib_table_entry_path_add(fib_index,
+ &pfx_1_1_1_1_s_32,
+ FIB_SOURCE_API,
+ FIB_ENTRY_FLAG_NONE,
+ FIB_PROTOCOL_IP4,
+ &nh_10_10_10_1,
+ tm->hw[0]->sw_if_index,
+ ~0, // invalid fib index
+ 1,
+ NULL,
+ FIB_ROUTE_PATH_FLAG_NONE);
+
+ FIB_TEST(!dpo_cmp(dpo2, load_balance_get_bucket_i(lb, 0)),
+ "post PIC recovery adj for 200.200.200.200/32 is recursive "
+ "via adj for 1.1.1.1");
+ FIB_TEST(!dpo_cmp(dpo1, load_balance_get_bucket_i(lb, 1)),
+ "post PIC recovery adj for 200.200.200.200/32 is recursive "
+ "via adj for 1.1.1.3");
+
+ fei = fib_table_lookup_exact_match(fib_index, &bgp_200_pfx);
+ dpo = fib_entry_contribute_ip_forwarding(fei);
+ FIB_TEST(lb == load_balance_get(dpo->dpoi_index),
+ "post PIC 200.200.200.200/32 was inplace modified");
+
+ /*
+ * add a 3rd path. this makes the LB 16 buckets.
+ */
+ fib_table_entry_path_add(fib_index,
+ &bgp_200_pfx,
+ FIB_SOURCE_API,
+ FIB_ENTRY_FLAG_NONE,
+ FIB_PROTOCOL_IP4,
+ &pfx_1_1_1_2_s_32.fp_addr,
+ ~0,
+ fib_index,
+ 1,
+ NULL,
+ FIB_ROUTE_PATH_RESOLVE_VIA_HOST);
+
+ fei = fib_table_lookup_exact_match(fib_index, &bgp_200_pfx);
+ dpo = fib_entry_contribute_ip_forwarding(fei);
+ FIB_TEST(lb == load_balance_get(dpo->dpoi_index),
+ "200.200.200.200/32 was inplace modified for 3rd path");
+ FIB_TEST(16 == lb->lb_n_buckets,
+ "200.200.200.200/32 was inplace modified for 3rd path to 16 buckets");
+
+ lbmi = lb->lb_map;
+ load_balance_map_lock(lbmi);
+ lbm = load_balance_map_get(lbmi);
+
+ for (ii = 0; ii < 16; ii++)
+ {
+ FIB_TEST(lbm->lbm_buckets[ii] == ii,
+ "LB Map for 200.200.200.200/32 at %d is %d",
+ ii, lbm->lbm_buckets[ii]);
+ }
+
+ /*
+ * trigger PIC by removing the first via-entry
+ * the first 6 buckets of the map should map to the next 6
+ */
+ fib_table_entry_path_remove(fib_index,
+ &pfx_1_1_1_1_s_32,
+ FIB_SOURCE_API,
+ FIB_PROTOCOL_IP4,
+ &nh_10_10_10_1,
+ tm->hw[0]->sw_if_index,
+ ~0,
+ 1,
+ FIB_ROUTE_PATH_FLAG_NONE);
+
+ fei = fib_table_lookup_exact_match(fib_index, &bgp_200_pfx);
+ dpo = fib_entry_contribute_ip_forwarding(fei);
+ FIB_TEST(lb == load_balance_get(dpo->dpoi_index),
+ "200.200.200.200/32 was inplace modified for 3rd path");
+ FIB_TEST(2 == lb->lb_n_buckets,
+ "200.200.200.200/32 was inplace modified for 3rd path remove to 2 buckets");
+
+ for (ii = 0; ii < 6; ii++)
+ {
+ FIB_TEST(lbm->lbm_buckets[ii] == ii+6,
+ "LB Map for 200.200.200.200/32 at %d is %d",
+ ii, lbm->lbm_buckets[ii]);
+ }
+ for (ii = 6; ii < 16; ii++)
+ {
+ FIB_TEST(lbm->lbm_buckets[ii] == ii,
+ "LB Map for 200.200.200.200/32 at %d is %d",
+ ii, lbm->lbm_buckets[ii]);
+ }
+
+
+ /*
+ * tidy up
+ */
+ fib_table_entry_path_add(fib_index,
+ &pfx_1_1_1_1_s_32,
+ FIB_SOURCE_API,
+ FIB_ENTRY_FLAG_NONE,
+ FIB_PROTOCOL_IP4,
+ &nh_10_10_10_1,
+ tm->hw[0]->sw_if_index,
+ ~0,
+ 1,
+ NULL,
+ FIB_ROUTE_PATH_FLAG_NONE);
+
+ fib_table_entry_path_remove(fib_index,
+ &bgp_200_pfx,
+ FIB_SOURCE_API,
+ FIB_PROTOCOL_IP4,
+ &pfx_1_1_1_2_s_32.fp_addr,
+ ~0,
+ fib_index,
+ 1,
+ MPLS_LABEL_INVALID);
+ fib_table_entry_path_remove(fib_index,
+ &bgp_200_pfx,
+ FIB_SOURCE_API,
+ FIB_PROTOCOL_IP4,
+ &nh_1_1_1_1,
+ ~0,
+ fib_index,
+ 1,
+ FIB_ROUTE_PATH_RESOLVE_VIA_HOST);
+ fib_table_entry_path_remove(fib_index,
+ &bgp_200_pfx,
+ FIB_SOURCE_API,
+ FIB_PROTOCOL_IP4,
+ &pfx_1_1_1_3_s_32.fp_addr,
+ ~0,
+ fib_index,
+ 1,
+ FIB_ROUTE_PATH_RESOLVE_VIA_HOST);
+ fib_table_entry_delete(fib_index,
+ &pfx_1_1_1_3_s_32,
+ FIB_SOURCE_API);
+ fib_table_entry_delete(fib_index,
+ &pfx_1_1_1_0_s_28,
+ FIB_SOURCE_API);
+ FIB_TEST((FIB_NODE_INDEX_INVALID ==
+ fib_table_lookup_exact_match(fib_index, &pfx_1_1_1_0_s_28)),
+ "1.1.1.1/28 removed");
+ FIB_TEST((FIB_NODE_INDEX_INVALID ==
+ fib_table_lookup_exact_match(fib_index, &pfx_1_1_1_3_s_32)),
+ "1.1.1.3/32 removed");
+ FIB_TEST((FIB_NODE_INDEX_INVALID ==
+ fib_table_lookup_exact_match(fib_index, &bgp_200_pfx)),
+ "200.200.200.200/32 removed");
+
+ /*
+ * add-remove test. no change.
+ */
+ FIB_TEST((1 == fib_path_list_db_size()), "path list DB population:%d",
+ fib_path_list_db_size());
+ FIB_TEST((NBR+5 == fib_path_list_pool_size()), "path list pool size is %d",
+ fib_path_list_pool_size());
+ FIB_TEST((NBR+7 == fib_entry_pool_size()), "entry pool size is %d",
+ fib_entry_pool_size());
+
+ /*
+ * A route whose paths are built up iteratively and then removed
+ * all at once
+ */
+ fib_prefix_t pfx_4_4_4_4_s_32 = {
+ .fp_len = 32,
+ .fp_proto = FIB_PROTOCOL_IP4,
+ .fp_addr = {
+ /* 4.4.4.4/32 */
+ .ip4.as_u32 = clib_host_to_net_u32(0x04040404),
+ },
+ };
+
+ fib_table_entry_path_add(fib_index,
+ &pfx_4_4_4_4_s_32,
+ FIB_SOURCE_API,
+ FIB_ENTRY_FLAG_NONE,
+ FIB_PROTOCOL_IP4,
+ &nh_10_10_10_1,
+ tm->hw[0]->sw_if_index,
+ ~0,
+ 1,
+ NULL,
+ FIB_ROUTE_PATH_FLAG_NONE);
+ fib_table_entry_path_add(fib_index,
+ &pfx_4_4_4_4_s_32,
+ FIB_SOURCE_API,
+ FIB_ENTRY_FLAG_NONE,
+ FIB_PROTOCOL_IP4,
+ &nh_10_10_10_2,
+ tm->hw[0]->sw_if_index,
+ ~0,
+ 1,
+ NULL,
+ FIB_ROUTE_PATH_FLAG_NONE);
+ fib_table_entry_path_add(fib_index,
+ &pfx_4_4_4_4_s_32,
+ FIB_SOURCE_API,
+ FIB_ENTRY_FLAG_NONE,
+ FIB_PROTOCOL_IP4,
+ &nh_10_10_10_3,
+ tm->hw[0]->sw_if_index,
+ ~0,
+ 1,
+ NULL,
+ FIB_ROUTE_PATH_FLAG_NONE);
+ FIB_TEST(FIB_NODE_INDEX_INVALID !=
+ fib_table_lookup_exact_match(fib_index, &pfx_4_4_4_4_s_32),
+ "4.4.4.4/32 present");
+
+ fib_table_entry_delete(fib_index,
+ &pfx_4_4_4_4_s_32,
+ FIB_SOURCE_API);
+ FIB_TEST(FIB_NODE_INDEX_INVALID ==
+ fib_table_lookup_exact_match(fib_index, &pfx_4_4_4_4_s_32),
+ "4.4.4.4/32 removed");
+
+ /*
+ * add-remove test. no change.
+ */
+ FIB_TEST((1 == fib_path_list_db_size()), "path list DB population:%d",
+ fib_path_list_db_size());
+ FIB_TEST((NBR+5 == fib_path_list_pool_size()), "path list pool size is %d",
+ fib_path_list_pool_size());
+ FIB_TEST((NBR+7 == fib_entry_pool_size()), "entry pool size is %d",
+ fib_entry_pool_size());
+
+ /*
+ * A route with multiple paths at once
+ */
+ fib_route_path_t *r_paths = NULL;
+
+ for (ii = 0; ii < 4; ii++)
+ {
+ fib_route_path_t r_path = {
+ .frp_proto = FIB_PROTOCOL_IP4,
+ .frp_addr = {
+ .ip4.as_u32 = clib_host_to_net_u32(0x0a0a0a02 + ii),
+ },
+ .frp_sw_if_index = tm->hw[0]->sw_if_index,
+ .frp_weight = 1,
+ .frp_fib_index = ~0,
+ };
+ vec_add1(r_paths, r_path);
+ }
+
+ fib_table_entry_update(fib_index,
+ &pfx_4_4_4_4_s_32,
+ FIB_SOURCE_API,
+ FIB_ENTRY_FLAG_NONE,
+ r_paths);
+
+ fei = fib_table_lookup_exact_match(fib_index, &pfx_4_4_4_4_s_32);
+ FIB_TEST((FIB_NODE_INDEX_INVALID != fei), "4.4.4.4/32 present");
+ dpo = fib_entry_contribute_ip_forwarding(fei);
+
+ lb = load_balance_get(dpo->dpoi_index);
+ FIB_TEST((lb->lb_n_buckets == 4), "4.4.4.4/32 lb over %d paths", lb->lb_n_buckets);
+
+ fib_table_entry_delete(fib_index,
+ &pfx_4_4_4_4_s_32,
+ FIB_SOURCE_API);
+ FIB_TEST(FIB_NODE_INDEX_INVALID ==
+ fib_table_lookup_exact_match(fib_index, &pfx_4_4_4_4_s_32),
+ "4.4.4.4/32 removed");
+ vec_free(r_paths);
+
+ /*
+ * add-remove test. no change.
+ */
+ FIB_TEST((1 == fib_path_list_db_size()), "path list DB population:%d",
+ fib_path_list_db_size());
+ FIB_TEST((NBR+5 == fib_path_list_pool_size()), "path list pool size is %d",
+ fib_path_list_pool_size());
+ FIB_TEST((NBR+7 == fib_entry_pool_size()), "entry pool size is %d",
+ fib_entry_pool_size());
+
+ /*
+ * A route deag route
+ */
+ fib_table_entry_path_add(fib_index,
+ &pfx_4_4_4_4_s_32,
+ FIB_SOURCE_API,
+ FIB_ENTRY_FLAG_NONE,
+ FIB_PROTOCOL_IP4,
+ &zero_addr,
+ ~0,
+ fib_index,
+ 1,
+ NULL,
+ FIB_ROUTE_PATH_FLAG_NONE);
+
+ fei = fib_table_lookup_exact_match(fib_index, &pfx_4_4_4_4_s_32);
+ FIB_TEST((FIB_NODE_INDEX_INVALID != fei), "4.4.4.4/32 present");
+
+ dpo = fib_entry_contribute_ip_forwarding(fei);
+ dpo = load_balance_get_bucket(dpo->dpoi_index, 0);
+ lookup_dpo_t *lkd = lookup_dpo_get(dpo->dpoi_index);
+
+ FIB_TEST((fib_index == lkd->lkd_fib_index),
+ "4.4.4.4/32 is deag in %d %U",
+ lkd->lkd_fib_index,
+ format_dpo_id, dpo, 0);
+
+ fib_table_entry_delete(fib_index,
+ &pfx_4_4_4_4_s_32,
+ FIB_SOURCE_API);
+ FIB_TEST(FIB_NODE_INDEX_INVALID ==
+ fib_table_lookup_exact_match(fib_index, &pfx_4_4_4_4_s_32),
+ "4.4.4.4/32 removed");
+ vec_free(r_paths);
+
+ /*
+ * add-remove test. no change.
+ */
+ FIB_TEST((1 == fib_path_list_db_size()), "path list DB population:%d",
+ fib_path_list_db_size());
+ FIB_TEST((NBR+5 == fib_path_list_pool_size()), "path list pool size is %d",
+ fib_path_list_pool_size());
+ FIB_TEST((NBR+7 == fib_entry_pool_size()), "entry pool size is %d",
+ fib_entry_pool_size());
+
+ /*
+ * Duplicate paths:
+ * add a recursive with duplicate paths. Expect the duplicate to be ignored.
+ */
+ fib_prefix_t pfx_34_1_1_1_s_32 = {
+ .fp_len = 32,
+ .fp_proto = FIB_PROTOCOL_IP4,
+ .fp_addr = {
+ .ip4.as_u32 = clib_host_to_net_u32(0x22010101),
+ },
+ };
+ fib_prefix_t pfx_34_34_1_1_s_32 = {
+ .fp_len = 32,
+ .fp_proto = FIB_PROTOCOL_IP4,
+ .fp_addr = {
+ .ip4.as_u32 = clib_host_to_net_u32(0x22220101),
+ },
+ };
+ fei = fib_table_entry_path_add(fib_index,
+ &pfx_34_1_1_1_s_32,
+ FIB_SOURCE_API,
+ FIB_ENTRY_FLAG_NONE,
+ FIB_PROTOCOL_IP4,
+ &pfx_34_34_1_1_s_32.fp_addr,
+ ~0,
+ fib_index,
+ 1,
+ NULL,
+ FIB_ROUTE_PATH_FLAG_NONE);
+ fei = fib_table_entry_path_add(fib_index,
+ &pfx_34_1_1_1_s_32,
+ FIB_SOURCE_API,
+ FIB_ENTRY_FLAG_NONE,
+ FIB_PROTOCOL_IP4,
+ &pfx_34_34_1_1_s_32.fp_addr,
+ ~0,
+ fib_index,
+ 1,
+ NULL,
+ FIB_ROUTE_PATH_FLAG_NONE);
+ FIB_TEST_REC_FORW(&pfx_34_1_1_1_s_32, &pfx_34_34_1_1_s_32, 0);
+ fib_table_entry_delete_index(fei, FIB_SOURCE_API);
+
+ /*
+ * CLEANUP
+ * remove: 1.1.1.2/32, 1.1.2.0/24 and 1.1.1.1/32
+ * all of which are via 10.10.10.1, Itf1
+ */
+ fib_table_entry_path_remove(fib_index,
+ &pfx_1_1_1_2_s_32,
+ FIB_SOURCE_API,
+ FIB_PROTOCOL_IP4,
+ &nh_10_10_10_1,
+ tm->hw[0]->sw_if_index,
+ ~0,
+ 1,
+ FIB_ROUTE_PATH_FLAG_NONE);
+ fib_table_entry_path_remove(fib_index,
+ &pfx_1_1_1_1_s_32,
+ FIB_SOURCE_API,
+ FIB_PROTOCOL_IP4,
+ &nh_10_10_10_1,
+ tm->hw[0]->sw_if_index,
+ ~0,
+ 1,
+ FIB_ROUTE_PATH_FLAG_NONE);
+ fib_table_entry_path_remove(fib_index,
+ &pfx_1_1_2_0_s_24,
+ FIB_SOURCE_API,
+ FIB_PROTOCOL_IP4,
+ &nh_10_10_10_1,
+ tm->hw[0]->sw_if_index,
+ ~0,
+ 1,
+ FIB_ROUTE_PATH_FLAG_NONE);
+
+ FIB_TEST(FIB_NODE_INDEX_INVALID ==
+ fib_table_lookup_exact_match(fib_index, &pfx_1_1_1_1_s_32),
+ "1.1.1.1/32 removed");
+ FIB_TEST(FIB_NODE_INDEX_INVALID ==
+ fib_table_lookup_exact_match(fib_index, &pfx_1_1_1_2_s_32),
+ "1.1.1.2/32 removed");
+ FIB_TEST(FIB_NODE_INDEX_INVALID ==
+ fib_table_lookup_exact_match(fib_index, &pfx_1_1_2_0_s_24),
+ "1.1.2.0/24 removed");
+
+ /*
+ * -3 entries and -1 shared path-list
+ */
+ FIB_TEST((0 == fib_path_list_db_size()), "path list DB population:%d",
+ fib_path_list_db_size());
+ FIB_TEST((NBR+4 == fib_path_list_pool_size()), "path list pool size is %d",
+ fib_path_list_pool_size());
+ FIB_TEST((NBR+4 == fib_entry_pool_size()), "entry pool size is %d",
+ fib_entry_pool_size());
+
+ /*
+ * An attached-host route. Expect to link to the incomplete adj
+ */
+ fib_prefix_t pfx_4_1_1_1_s_32 = {
+ .fp_len = 32,
+ .fp_proto = FIB_PROTOCOL_IP4,
+ .fp_addr = {
+ /* 4.1.1.1/32 */
+ .ip4.as_u32 = clib_host_to_net_u32(0x04010101),
+ },
+ };
+ fib_table_entry_path_add(fib_index,
+ &pfx_4_1_1_1_s_32,
+ FIB_SOURCE_API,
+ FIB_ENTRY_FLAG_NONE,
+ FIB_PROTOCOL_IP4,
+ &zero_addr,
+ tm->hw[0]->sw_if_index,
+ fib_index,
+ 1,
+ NULL,
+ FIB_ROUTE_PATH_FLAG_NONE);
+
+ fei = fib_table_lookup_exact_match(fib_index, &pfx_4_1_1_1_s_32);
+ FIB_TEST((FIB_NODE_INDEX_INVALID != fei), "4.1.1.1/32 present");
+ ai = fib_entry_get_adj(fei);
+
+ ai2 = adj_nbr_add_or_lock(FIB_PROTOCOL_IP4,
+ VNET_LINK_IP4,
+ &pfx_4_1_1_1_s_32.fp_addr,
+ tm->hw[0]->sw_if_index);
+ FIB_TEST((ai == ai2), "Attached-host link to incomplete ADJ");
+ adj_unlock(ai2);
+
+ /*
+ * +1 entry and +1 shared path-list
+ */
+ FIB_TEST((1 == fib_path_list_db_size()), "path list DB population:%d",
+ fib_path_list_db_size());
+ FIB_TEST((NBR+5 == fib_path_list_pool_size()), "path list pool size is %d",
+ fib_path_list_pool_size());
+ FIB_TEST((NBR+5 == fib_entry_pool_size()), "entry pool size is %d",
+ fib_entry_pool_size());
+
+ fib_table_entry_delete(fib_index,
+ &pfx_4_1_1_1_s_32,
+ FIB_SOURCE_API);
+
+ FIB_TEST((0 == fib_path_list_db_size()), "path list DB population:%d",
+ fib_path_list_db_size());
+ FIB_TEST((NBR+4 == fib_path_list_pool_size()), "path list pool size is %d",
+ fib_path_list_pool_size());
+ FIB_TEST((NBR+4 == fib_entry_pool_size()), "entry pool size is %d",
+ fib_entry_pool_size());
+
+ /*
+ * add a v6 prefix via v4 next-hops
+ */
+ fib_prefix_t pfx_2001_s_64 = {
+ .fp_len = 64,
+ .fp_proto = FIB_PROTOCOL_IP6,
+ .fp_addr = {
+ .ip6.as_u64[0] = clib_host_to_net_u64(0x2001000000000000),
+ },
+ };
+ fei = fib_table_entry_path_add(0, //default v6 table
+ &pfx_2001_s_64,
+ FIB_SOURCE_API,
+ FIB_ENTRY_FLAG_NONE,
+ FIB_PROTOCOL_IP4,
+ &nh_10_10_10_1,
+ tm->hw[0]->sw_if_index,
+ fib_index,
+ 1,
+ NULL,
+ FIB_ROUTE_PATH_FLAG_NONE);
+
+ fei = fib_table_lookup_exact_match(0, &pfx_2001_s_64);
+ FIB_TEST((FIB_NODE_INDEX_INVALID != fei), "2001::/64 present");
+ ai = fib_entry_get_adj(fei);
+ adj = adj_get(ai);
+ FIB_TEST((adj->lookup_next_index == IP_LOOKUP_NEXT_ARP),
+ "2001::/64 via ARP-adj");
+ FIB_TEST((adj->ia_link == VNET_LINK_IP6),
+ "2001::/64 is link type v6");
+ FIB_TEST((adj->ia_nh_proto == FIB_PROTOCOL_IP4),
+ "2001::/64 ADJ-adj is NH proto v4");
+ fib_table_entry_delete(0, &pfx_2001_s_64, FIB_SOURCE_API);
+
+ /*
+ * add a uRPF exempt prefix:
+ * test:
+ * - it's forwarding is drop
+ * - it's uRPF list is not empty
+ * - the uRPF list for the default route (it's cover) is empty
+ */
+ fei = fib_table_entry_special_add(fib_index,
+ &pfx_4_1_1_1_s_32,
+ FIB_SOURCE_URPF_EXEMPT,
+ FIB_ENTRY_FLAG_DROP,
+ ADJ_INDEX_INVALID);
+ dpo = fib_entry_contribute_ip_forwarding(fei);
+ FIB_TEST(load_balance_is_drop(dpo),
+ "uRPF exempt 4.1.1.1/32 DROP");
+ FIB_TEST(fib_test_urpf_is_equal(fei, FIB_FORW_CHAIN_TYPE_UNICAST_IP4, 1, 0),
+ "uRPF list for exempt prefix has itf index 0");
+ fei = fib_table_lookup_exact_match(fib_index, &pfx_0_0_0_0_s_0);
+ FIB_TEST(fib_test_urpf_is_equal(fei, FIB_FORW_CHAIN_TYPE_UNICAST_IP4, 0),
+ "uRPF list for 0.0.0.0/0 empty");
+
+ fib_table_entry_delete(fib_index, &pfx_4_1_1_1_s_32, FIB_SOURCE_URPF_EXEMPT);
+
+ /*
+ * CLEANUP
+ * remove adj-fibs:
+ */
+ fib_table_entry_delete(fib_index,
+ &pfx_10_10_10_1_s_32,
+ FIB_SOURCE_ADJ);
+ fib_table_entry_delete(fib_index,
+ &pfx_10_10_10_2_s_32,
+ FIB_SOURCE_ADJ);
+ FIB_TEST(FIB_NODE_INDEX_INVALID ==
+ fib_table_lookup_exact_match(fib_index, &pfx_10_10_10_1_s_32),
+ "10.10.10.1/32 adj-fib removed");
+ FIB_TEST(FIB_NODE_INDEX_INVALID ==
+ fib_table_lookup_exact_match(fib_index, &pfx_10_10_10_2_s_32),
+ "10.10.10.2/32 adj-fib removed");
+
+ /*
+ * -2 entries and -2 non-shared path-list
+ */
+ FIB_TEST((0 == fib_path_list_db_size()), "path list DB population:%d",
+ fib_path_list_db_size());
+ FIB_TEST((NBR+2 == fib_path_list_pool_size()), "path list pool size is %d",
+ fib_path_list_pool_size());
+ FIB_TEST((NBR+2 == fib_entry_pool_size()), "entry pool size is %d",
+ fib_entry_pool_size());
+
+ /*
+ * unlock the adjacencies for which this test provided a rewrite.
+ * These are the last locks on these adjs. they should thus go away.
+ */
+ adj_unlock(ai_02);
+ adj_unlock(ai_01);
+ adj_unlock(ai_12_12_12_12);
+
+ FIB_TEST((0 == adj_nbr_db_size()), "ADJ DB size is %d",
+ adj_nbr_db_size());
+
+ /*
+ * CLEANUP
+ * remove the interface prefixes
+ */
+ local_pfx.fp_len = 32;
+ fib_table_entry_special_remove(fib_index, &local_pfx,
+ FIB_SOURCE_INTERFACE);
+ fei = fib_table_lookup(fib_index, &local_pfx);
+
+ FIB_TEST(FIB_NODE_INDEX_INVALID ==
+ fib_table_lookup_exact_match(fib_index, &local_pfx),
+ "10.10.10.10/32 adj-fib removed");
+
+ local_pfx.fp_len = 24;
+ fib_table_entry_delete(fib_index, &local_pfx,
+ FIB_SOURCE_INTERFACE);
+
+ FIB_TEST(FIB_NODE_INDEX_INVALID ==
+ fib_table_lookup_exact_match(fib_index, &local_pfx),
+ "10.10.10.10/24 adj-fib removed");
+
+ /*
+ * -2 entries and -2 non-shared path-list
+ */
+ FIB_TEST((0 == fib_path_list_db_size()), "path list DB population:%d",
+ fib_path_list_db_size());
+ FIB_TEST((NBR == fib_path_list_pool_size()), "path list pool size is %d",
+ fib_path_list_pool_size());
+ FIB_TEST((NBR == fib_entry_pool_size()), "entry pool size is %d",
+ fib_entry_pool_size());
+
+ /*
+ * Last but not least, remove the VRF
+ */
+ FIB_TEST((0 == fib_table_get_num_entries(fib_index,
+ FIB_PROTOCOL_IP4,
+ FIB_SOURCE_API)),
+ "NO API Source'd prefixes");
+ FIB_TEST((0 == fib_table_get_num_entries(fib_index,
+ FIB_PROTOCOL_IP4,
+ FIB_SOURCE_RR)),
+ "NO RR Source'd prefixes");
+ FIB_TEST((0 == fib_table_get_num_entries(fib_index,
+ FIB_PROTOCOL_IP4,
+ FIB_SOURCE_INTERFACE)),
+ "NO INterface Source'd prefixes");
+
+ fib_table_unlock(fib_index, FIB_PROTOCOL_IP4);
+
+ FIB_TEST((0 == fib_path_list_db_size()), "path list DB population:%d",
+ fib_path_list_db_size());
+ FIB_TEST((NBR-5 == fib_path_list_pool_size()), "path list pool size is %d",
+ fib_path_list_pool_size());
+ FIB_TEST((NBR-5 == fib_entry_pool_size()), "entry pool size is %d",
+ fib_entry_pool_size());
+ FIB_TEST((NBR-5 == pool_elts(fib_urpf_list_pool)), "uRPF pool size is %d",
+ pool_elts(fib_urpf_list_pool));
+
+ return 0;
+}
+
+static int
+fib_test_v6 (void)
+{
+ /*
+ * In the default table check for the presence and correct forwarding
+ * of the special entries
+ */
+ fib_node_index_t dfrt, fei, ai, locked_ai, ai_01, ai_02;
+ const dpo_id_t *dpo, *dpo_drop;
+ const ip_adjacency_t *adj;
+ const receive_dpo_t *rd;
+ test_main_t *tm;
+ u32 fib_index;
+ int ii;
+
+ FIB_TEST((0 == adj_nbr_db_size()), "ADJ DB size is %d",
+ adj_nbr_db_size());
+
+ /* via 2001:0:0:1::2 */
+ ip46_address_t nh_2001_2 = {
+ .ip6 = {
+ .as_u64 = {
+ [0] = clib_host_to_net_u64(0x2001000000000001),
+ [1] = clib_host_to_net_u64(0x0000000000000002),
+ },
+ },
+ };
+
+ tm = &test_main;
+
+ dpo_drop = drop_dpo_get(DPO_PROTO_IP6);
+
+ /* Find or create FIB table 11 */
+ fib_index = fib_table_find_or_create_and_lock(FIB_PROTOCOL_IP6, 11);
+
+ for (ii = 0; ii < 4; ii++)
+ {
+ ip6_main.fib_index_by_sw_if_index[tm->hw[ii]->sw_if_index] = fib_index;
+ }
+
+ fib_prefix_t pfx_0_0 = {
+ .fp_len = 0,
+ .fp_proto = FIB_PROTOCOL_IP6,
+ .fp_addr = {
+ .ip6 = {
+ {0, 0},
+ },
+ },
+ };
+
+ dfrt = fib_table_lookup(fib_index, &pfx_0_0);
+ FIB_TEST((FIB_NODE_INDEX_INVALID != dfrt), "default route present");
+ FIB_TEST(load_balance_is_drop(fib_entry_contribute_ip_forwarding(dfrt)),
+ "Default route is DROP");
+
+ dpo = fib_entry_contribute_ip_forwarding(dfrt);
+ FIB_TEST((dpo->dpoi_index == ip6_fib_table_fwding_lookup(
+ &ip6_main,
+ 1,
+ &pfx_0_0.fp_addr.ip6)),
+ "default-route; fwd and non-fwd tables match");
+
+ // FIXME - check specials.
+
+ /*
+ * At this stage there is one v4 FIB with 5 routes and two v6 FIBs
+ * each with 6 entries. All entries are special so no path-list sharing.
+ */
+#define NPS (5+6+6)
+ FIB_TEST((0 == fib_path_list_db_size()), "path list DB is empty");
+ FIB_TEST((NPS == fib_path_list_pool_size()), "path list pool size is %d",
+ fib_path_list_pool_size());
+ FIB_TEST((NPS == fib_entry_pool_size()), "entry pool size is %d",
+ fib_entry_pool_size());
+
+ /*
+ * add interface routes.
+ * validate presence of /64 attached and /128 recieve.
+ * test for the presence of the receive address in the glean and local adj
+ *
+ * receive on 2001:0:0:1::1/128
+ */
+ fib_prefix_t local_pfx = {
+ .fp_len = 64,
+ .fp_proto = FIB_PROTOCOL_IP6,
+ .fp_addr = {
+ .ip6 = {
+ .as_u64 = {
+ [0] = clib_host_to_net_u64(0x2001000000000001),
+ [1] = clib_host_to_net_u64(0x0000000000000001),
+ },
+ },
+ }
+ };
+
+ fib_table_entry_update_one_path(fib_index, &local_pfx,
+ FIB_SOURCE_INTERFACE,
+ (FIB_ENTRY_FLAG_CONNECTED |
+ FIB_ENTRY_FLAG_ATTACHED),
+ FIB_PROTOCOL_IP6,
+ NULL,
+ tm->hw[0]->sw_if_index,
+ ~0,
+ 1,
+ NULL,
+ FIB_ROUTE_PATH_FLAG_NONE);
+ fei = fib_table_lookup_exact_match(fib_index, &local_pfx);
+
+ FIB_TEST((FIB_NODE_INDEX_INVALID != fei), "attached interface route present");
+
+ ai = fib_entry_get_adj(fei);
+ FIB_TEST((FIB_NODE_INDEX_INVALID != ai), "attached interface route adj present");
+ adj = adj_get(ai);
+ FIB_TEST((IP_LOOKUP_NEXT_GLEAN == adj->lookup_next_index),
+ "attached interface adj is glean");
+ FIB_TEST((0 == ip46_address_cmp(&local_pfx.fp_addr,
+ &adj->sub_type.glean.receive_addr)),
+ "attached interface adj is receive ok");
+ dpo = fib_entry_contribute_ip_forwarding(fei);
+ FIB_TEST((dpo->dpoi_index == ip6_fib_table_fwding_lookup(
+ &ip6_main,
+ 1,
+ &local_pfx.fp_addr.ip6)),
+ "attached-route; fwd and non-fwd tables match");
+
+ local_pfx.fp_len = 128;
+ fib_table_entry_update_one_path(fib_index, &local_pfx,
+ FIB_SOURCE_INTERFACE,
+ (FIB_ENTRY_FLAG_CONNECTED |
+ FIB_ENTRY_FLAG_LOCAL),
+ FIB_PROTOCOL_IP6,
+ NULL,
+ tm->hw[0]->sw_if_index,
+ ~0, // invalid fib index
+ 1,
+ NULL,
+ FIB_ROUTE_PATH_FLAG_NONE);
+ fei = fib_table_lookup(fib_index, &local_pfx);
+
+ FIB_TEST((FIB_NODE_INDEX_INVALID != fei), "local interface route present");
+
+ dpo = fib_entry_contribute_ip_forwarding(fei);
+ dpo = load_balance_get_bucket(dpo->dpoi_index, 0);
+ FIB_TEST((DPO_RECEIVE == dpo->dpoi_type),
+ "local interface adj is local");
+ rd = receive_dpo_get(dpo->dpoi_index);
+
+ FIB_TEST((0 == ip46_address_cmp(&local_pfx.fp_addr,
+ &rd->rd_addr)),
+ "local interface adj is receive ok");
+
+ dpo = fib_entry_contribute_ip_forwarding(fei);
+ FIB_TEST((dpo->dpoi_index == ip6_fib_table_fwding_lookup(
+ &ip6_main,
+ 1,
+ &local_pfx.fp_addr.ip6)),
+ "local-route; fwd and non-fwd tables match");
+
+ /*
+ * +2 entries. +2 unshared path-lists
+ */
+ FIB_TEST((0 == fib_path_list_db_size()), "path list DB is empty");
+ FIB_TEST((NPS+2 == fib_path_list_pool_size()), "path list pool size is%d",
+ fib_path_list_pool_size());
+ FIB_TEST((NPS+2 == fib_entry_pool_size()), "entry pool size is %d",
+ fib_entry_pool_size());
+
+ /*
+ * Modify the default route to be via an adj not yet known.
+ * this sources the defalut route with the API source, which is
+ * a higher preference to the DEFAULT_ROUTE source
+ */
+ fib_table_entry_path_add(fib_index, &pfx_0_0,
+ FIB_SOURCE_API,
+ FIB_ENTRY_FLAG_NONE,
+ FIB_PROTOCOL_IP6,
+ &nh_2001_2,
+ tm->hw[0]->sw_if_index,
+ ~0,
+ 1,
+ NULL,
+ FIB_ROUTE_PATH_FLAG_NONE);
+ fei = fib_table_lookup(fib_index, &pfx_0_0);
+
+ FIB_TEST((fei == dfrt), "default route same index");
+ ai = fib_entry_get_adj(fei);
+ FIB_TEST((FIB_NODE_INDEX_INVALID != ai), "default route adj present");
+ adj = adj_get(ai);
+ FIB_TEST((IP_LOOKUP_NEXT_ARP == adj->lookup_next_index),
+ "adj is incomplete");
+ FIB_TEST((0 == ip46_address_cmp(&nh_2001_2, &adj->sub_type.nbr.next_hop)),
+ "adj nbr next-hop ok");
+
+ /*
+ * find the adj in the shared db
+ */
+ locked_ai = adj_nbr_add_or_lock(FIB_PROTOCOL_IP6,
+ VNET_LINK_IP6,
+ &nh_2001_2,
+ tm->hw[0]->sw_if_index);
+ FIB_TEST((locked_ai == ai), "ADJ NBR DB find");
+ adj_unlock(locked_ai);
+
+ /*
+ * no more entires. +1 shared path-list
+ */
+ FIB_TEST((1 == fib_path_list_db_size()), "path list DB population:%d",
+ fib_path_list_db_size());
+ FIB_TEST((NPS+3 == fib_path_list_pool_size()), "path list pool size is%d",
+ fib_path_list_pool_size());
+ FIB_TEST((NPS+2 == fib_entry_pool_size()), "entry pool size is %d",
+ fib_entry_pool_size());
+
+ /*
+ * remove the API source from the default route. We expected
+ * the route to remain, sourced by DEFAULT_ROUTE, and hence a DROP
+ */
+ fib_table_entry_path_remove(fib_index, &pfx_0_0,
+ FIB_SOURCE_API,
+ FIB_PROTOCOL_IP6,
+ &nh_2001_2,
+ tm->hw[0]->sw_if_index,
+ ~0,
+ 1,
+ FIB_ROUTE_PATH_FLAG_NONE);
+ fei = fib_table_lookup(fib_index, &pfx_0_0);
+
+ FIB_TEST((fei == dfrt), "default route same index");
+ FIB_TEST(load_balance_is_drop(fib_entry_contribute_ip_forwarding(dfrt)),
+ "Default route is DROP");
+
+ /*
+ * no more entires. -1 shared path-list
+ */
+ FIB_TEST((0 == fib_path_list_db_size()), "path list DB population:%d",
+ fib_path_list_db_size());
+ FIB_TEST((NPS+2 == fib_path_list_pool_size()), "path list pool size is%d",
+ fib_path_list_pool_size());
+ FIB_TEST((NPS+2 == fib_entry_pool_size()), "entry pool size is %d",
+ fib_entry_pool_size());
+
+ /*
+ * Add an 2 ARP entry => a complete ADJ plus adj-fib.
+ */
+ fib_prefix_t pfx_2001_1_2_s_128 = {
+ .fp_len = 128,
+ .fp_proto = FIB_PROTOCOL_IP6,
+ .fp_addr = {
+ .ip6 = {
+ .as_u64 = {
+ [0] = clib_host_to_net_u64(0x2001000000000001),
+ [1] = clib_host_to_net_u64(0x0000000000000002),
+ },
+ },
+ }
+ };
+ fib_prefix_t pfx_2001_1_3_s_128 = {
+ .fp_len = 128,
+ .fp_proto = FIB_PROTOCOL_IP6,
+ .fp_addr = {
+ .ip6 = {
+ .as_u64 = {
+ [0] = clib_host_to_net_u64(0x2001000000000001),
+ [1] = clib_host_to_net_u64(0x0000000000000003),
+ },
+ },
+ }
+ };
+ u8 eth_addr[] = {
+ 0xde, 0xde, 0xde, 0xba, 0xba, 0xba,
+ };
+
+ ai_01 = adj_nbr_add_or_lock(FIB_PROTOCOL_IP6,
+ VNET_LINK_IP6,
+ &pfx_2001_1_2_s_128.fp_addr,
+ tm->hw[0]->sw_if_index);
+ FIB_TEST((FIB_NODE_INDEX_INVALID != ai_01), "adj created");
+ adj = adj_get(ai_01);
+ FIB_TEST((IP_LOOKUP_NEXT_ARP == adj->lookup_next_index),
+ "adj is incomplete");
+ FIB_TEST((0 == ip46_address_cmp(&pfx_2001_1_2_s_128.fp_addr,
+ &adj->sub_type.nbr.next_hop)),
+ "adj nbr next-hop ok");
+
+ adj_nbr_update_rewrite(ai_01, ADJ_NBR_REWRITE_FLAG_COMPLETE,
+ fib_test_build_rewrite(eth_addr));
+ FIB_TEST((IP_LOOKUP_NEXT_REWRITE == adj->lookup_next_index),
+ "adj is complete");
+ FIB_TEST((0 == ip46_address_cmp(&pfx_2001_1_2_s_128.fp_addr,
+ &adj->sub_type.nbr.next_hop)),
+ "adj nbr next-hop ok");
+
+ fib_table_entry_update_one_path(fib_index,
+ &pfx_2001_1_2_s_128,
+ FIB_SOURCE_ADJ,
+ FIB_ENTRY_FLAG_ATTACHED,
+ FIB_PROTOCOL_IP6,
+ &pfx_2001_1_2_s_128.fp_addr,
+ tm->hw[0]->sw_if_index,
+ ~0,
+ 1,
+ NULL,
+ FIB_ROUTE_PATH_FLAG_NONE);
+
+ fei = fib_table_lookup(fib_index, &pfx_2001_1_2_s_128);
+ ai = fib_entry_get_adj(fei);
+ FIB_TEST((ai_01 == ai), "ADJ-FIB resolves via adj");
+
+ eth_addr[5] = 0xb2;
+
+ ai_02 = adj_nbr_add_or_lock(FIB_PROTOCOL_IP6,
+ VNET_LINK_IP6,
+ &pfx_2001_1_3_s_128.fp_addr,
+ tm->hw[0]->sw_if_index);
+ FIB_TEST((FIB_NODE_INDEX_INVALID != ai_02), "adj created");
+ adj = adj_get(ai_02);
+ FIB_TEST((IP_LOOKUP_NEXT_ARP == adj->lookup_next_index),
+ "adj is incomplete");
+ FIB_TEST((0 == ip46_address_cmp(&pfx_2001_1_3_s_128.fp_addr,
+ &adj->sub_type.nbr.next_hop)),
+ "adj nbr next-hop ok");
+
+ adj_nbr_update_rewrite(ai_02, ADJ_NBR_REWRITE_FLAG_COMPLETE,
+ fib_test_build_rewrite(eth_addr));
+ FIB_TEST((IP_LOOKUP_NEXT_REWRITE == adj->lookup_next_index),
+ "adj is complete");
+ FIB_TEST((0 == ip46_address_cmp(&pfx_2001_1_3_s_128.fp_addr,
+ &adj->sub_type.nbr.next_hop)),
+ "adj nbr next-hop ok");
+ FIB_TEST((ai_01 != ai_02), "ADJs are different");
+
+ fib_table_entry_update_one_path(fib_index,
+ &pfx_2001_1_3_s_128,
+ FIB_SOURCE_ADJ,
+ FIB_ENTRY_FLAG_ATTACHED,
+ FIB_PROTOCOL_IP6,
+ &pfx_2001_1_3_s_128.fp_addr,
+ tm->hw[0]->sw_if_index,
+ ~0,
+ 1,
+ NULL,
+ FIB_ROUTE_PATH_FLAG_NONE);
+
+ fei = fib_table_lookup(fib_index, &pfx_2001_1_3_s_128);
+ ai = fib_entry_get_adj(fei);
+ FIB_TEST((ai_02 == ai), "ADJ-FIB resolves via adj");
+
+ /*
+ * +2 entries, +2 unshread path-lists.
+ */
+ FIB_TEST((0 == fib_path_list_db_size()), "path list DB population:%d",
+ fib_path_list_db_size());
+ FIB_TEST((NPS+4 == fib_path_list_pool_size()), "path list pool size is%d",
+ fib_path_list_pool_size());
+ FIB_TEST((NPS+4 == fib_entry_pool_size()), "entry pool size is %d",
+ fib_entry_pool_size());
+
+ /*
+ * Add a 2 routes via the first ADJ. ensure path-list sharing
+ */
+ fib_prefix_t pfx_2001_a_s_64 = {
+ .fp_len = 64,
+ .fp_proto = FIB_PROTOCOL_IP6,
+ .fp_addr = {
+ .ip6 = {
+ .as_u64 = {
+ [0] = clib_host_to_net_u64(0x200100000000000a),
+ [1] = clib_host_to_net_u64(0x0000000000000000),
+ },
+ },
+ }
+ };
+ fib_prefix_t pfx_2001_b_s_64 = {
+ .fp_len = 64,
+ .fp_proto = FIB_PROTOCOL_IP6,
+ .fp_addr = {
+ .ip6 = {
+ .as_u64 = {
+ [0] = clib_host_to_net_u64(0x200100000000000b),
+ [1] = clib_host_to_net_u64(0x0000000000000000),
+ },
+ },
+ }
+ };
+
+ fib_table_entry_path_add(fib_index,
+ &pfx_2001_a_s_64,
+ FIB_SOURCE_API,
+ FIB_ENTRY_FLAG_NONE,
+ FIB_PROTOCOL_IP6,
+ &nh_2001_2,
+ tm->hw[0]->sw_if_index,
+ ~0,
+ 1,
+ NULL,
+ FIB_ROUTE_PATH_FLAG_NONE);
+ fei = fib_table_lookup(fib_index, &pfx_2001_a_s_64);
+ ai = fib_entry_get_adj(fei);
+ FIB_TEST((ai_01 == ai), "2001::a/64 resolves via 2001:0:0:1::1");
+ fib_table_entry_path_add(fib_index,
+ &pfx_2001_b_s_64,
+ FIB_SOURCE_API,
+ FIB_ENTRY_FLAG_NONE,
+ FIB_PROTOCOL_IP6,
+ &nh_2001_2,
+ tm->hw[0]->sw_if_index,
+ ~0,
+ 1,
+ NULL,
+ FIB_ROUTE_PATH_FLAG_NONE);
+ fei = fib_table_lookup(fib_index, &pfx_2001_b_s_64);
+ ai = fib_entry_get_adj(fei);
+ FIB_TEST((ai_01 == ai), "2001::b/64 resolves via 2001:0:0:1::1");
+
+ /*
+ * +2 entries, +1 shared path-list.
+ */
+ FIB_TEST((1 == fib_path_list_db_size()), "path list DB population:%d",
+ fib_path_list_db_size());
+ FIB_TEST((NPS+5 == fib_path_list_pool_size()), "path list pool size is%d",
+ fib_path_list_pool_size());
+ FIB_TEST((NPS+6 == fib_entry_pool_size()), "entry pool size is %d",
+ fib_entry_pool_size());
+
+ /*
+ * add a v4 prefix via a v6 next-hop
+ */
+ fib_prefix_t pfx_1_1_1_1_s_32 = {
+ .fp_len = 32,
+ .fp_proto = FIB_PROTOCOL_IP4,
+ .fp_addr = {
+ .ip4.as_u32 = 0x01010101,
+ },
+ };
+ fei = fib_table_entry_path_add(0, // default table
+ &pfx_1_1_1_1_s_32,
+ FIB_SOURCE_API,
+ FIB_ENTRY_FLAG_NONE,
+ FIB_PROTOCOL_IP6,
+ &nh_2001_2,
+ tm->hw[0]->sw_if_index,
+ ~0,
+ 1,
+ NULL,
+ FIB_ROUTE_PATH_FLAG_NONE);
+ FIB_TEST(fei == fib_table_lookup_exact_match(0, &pfx_1_1_1_1_s_32),
+ "1.1.1.1/32 o v6 route present");
+ ai = fib_entry_get_adj(fei);
+ adj = adj_get(ai);
+ FIB_TEST((adj->lookup_next_index == IP_LOOKUP_NEXT_ARP),
+ "1.1.1.1/32 via ARP-adj");
+ FIB_TEST((adj->ia_link == VNET_LINK_IP4),
+ "1.1.1.1/32 ADJ-adj is link type v4");
+ FIB_TEST((adj->ia_nh_proto == FIB_PROTOCOL_IP6),
+ "1.1.1.1/32 ADJ-adj is NH proto v6");
+ fib_table_entry_delete(0, &pfx_1_1_1_1_s_32, FIB_SOURCE_API);
+
+ /*
+ * An attached route
+ */
+ fib_prefix_t pfx_2001_c_s_64 = {
+ .fp_len = 64,
+ .fp_proto = FIB_PROTOCOL_IP6,
+ .fp_addr = {
+ .ip6 = {
+ .as_u64 = {
+ [0] = clib_host_to_net_u64(0x200100000000000c),
+ [1] = clib_host_to_net_u64(0x0000000000000000),
+ },
+ },
+ }
+ };
+ fib_table_entry_path_add(fib_index,
+ &pfx_2001_c_s_64,
+ FIB_SOURCE_CLI,
+ FIB_ENTRY_FLAG_ATTACHED,
+ FIB_PROTOCOL_IP6,
+ NULL,
+ tm->hw[0]->sw_if_index,
+ ~0,
+ 1,
+ NULL,
+ FIB_ROUTE_PATH_FLAG_NONE);
+ fei = fib_table_lookup_exact_match(fib_index, &pfx_2001_c_s_64);
+ FIB_TEST((FIB_NODE_INDEX_INVALID != fei), "attached route present");
+ ai = fib_entry_get_adj(fei);
+ adj = adj_get(ai);
+ FIB_TEST((adj->lookup_next_index == IP_LOOKUP_NEXT_GLEAN),
+ "2001:0:0:c/64 attached resolves via glean");
+
+ fib_table_entry_path_remove(fib_index,
+ &pfx_2001_c_s_64,
+ FIB_SOURCE_CLI,
+ FIB_PROTOCOL_IP6,
+ NULL,
+ tm->hw[0]->sw_if_index,
+ ~0,
+ 1,
+ FIB_ROUTE_PATH_FLAG_NONE);
+ fei = fib_table_lookup_exact_match(fib_index, &pfx_2001_c_s_64);
+ FIB_TEST((FIB_NODE_INDEX_INVALID == fei), "attached route removed");
+
+ /*
+ * Shutdown the interface on which we have a connected and through
+ * which the routes are reachable.
+ * This will result in the connected, adj-fibs, and routes linking to drop
+ * The local/for-us prefix continues to receive.
+ */
+ clib_error_t * error;
+
+ error = vnet_sw_interface_set_flags(vnet_get_main(),
+ tm->hw[0]->sw_if_index,
+ ~VNET_SW_INTERFACE_FLAG_ADMIN_UP);
+ FIB_TEST((NULL == error), "Interface shutdown OK");
+
+ fei = fib_table_lookup_exact_match(fib_index, &pfx_2001_b_s_64);
+ dpo = fib_entry_contribute_ip_forwarding(fei);
+ FIB_TEST(!dpo_cmp(dpo_drop, load_balance_get_bucket(dpo->dpoi_index, 0)),
+ "2001::b/64 resolves via drop");
+
+ fei = fib_table_lookup_exact_match(fib_index, &pfx_2001_a_s_64);
+ dpo = fib_entry_contribute_ip_forwarding(fei);
+ FIB_TEST(!dpo_cmp(dpo_drop, load_balance_get_bucket(dpo->dpoi_index, 0)),
+ "2001::a/64 resolves via drop");
+ fei = fib_table_lookup_exact_match(fib_index, &pfx_2001_1_3_s_128);
+ dpo = fib_entry_contribute_ip_forwarding(fei);
+ FIB_TEST(!dpo_cmp(dpo_drop, load_balance_get_bucket(dpo->dpoi_index, 0)),
+ "2001:0:0:1::3/64 resolves via drop");
+ fei = fib_table_lookup_exact_match(fib_index, &pfx_2001_1_2_s_128);
+ dpo = fib_entry_contribute_ip_forwarding(fei);
+ FIB_TEST(!dpo_cmp(dpo_drop, load_balance_get_bucket(dpo->dpoi_index, 0)),
+ "2001:0:0:1::2/64 resolves via drop");
+ fei = fib_table_lookup_exact_match(fib_index, &local_pfx);
+ dpo = fib_entry_contribute_ip_forwarding(fei);
+ FIB_TEST(dpo_cmp(dpo_drop, load_balance_get_bucket(dpo->dpoi_index, 0)),
+ "2001:0:0:1::1/128 not drop");
+ local_pfx.fp_len = 64;
+ fei = fib_table_lookup_exact_match(fib_index, &local_pfx);
+ dpo = fib_entry_contribute_ip_forwarding(fei);
+ FIB_TEST(!dpo_cmp(dpo_drop, load_balance_get_bucket(dpo->dpoi_index, 0)),
+ "2001:0:0:1/64 resolves via drop");
+
+ /*
+ * no change
+ */
+ FIB_TEST((1 == fib_path_list_db_size()), "path list DB population:%d",
+ fib_path_list_db_size());
+ FIB_TEST((NPS+5 == fib_path_list_pool_size()), "path list pool size is%d",
+ fib_path_list_pool_size());
+ FIB_TEST((NPS+6 == fib_entry_pool_size()), "entry pool size is %d",
+ fib_entry_pool_size());
+
+ /*
+ * shutdown one of the other interfaces, then add a connected.
+ * and swap one of the routes to it.
+ */
+ error = vnet_sw_interface_set_flags(vnet_get_main(),
+ tm->hw[1]->sw_if_index,
+ ~VNET_SW_INTERFACE_FLAG_ADMIN_UP);
+ FIB_TEST((NULL == error), "Interface 1 shutdown OK");
+
+ fib_prefix_t connected_pfx = {
+ .fp_len = 64,
+ .fp_proto = FIB_PROTOCOL_IP6,
+ .fp_addr = {
+ .ip6 = {
+ /* 2001:0:0:2::1/64 */
+ .as_u64 = {
+ [0] = clib_host_to_net_u64(0x2001000000000002),
+ [1] = clib_host_to_net_u64(0x0000000000000001),
+ },
+ },
+ }
+ };
+ fib_table_entry_update_one_path(fib_index, &connected_pfx,
+ FIB_SOURCE_INTERFACE,
+ (FIB_ENTRY_FLAG_CONNECTED |
+ FIB_ENTRY_FLAG_ATTACHED),
+ FIB_PROTOCOL_IP6,
+ NULL,
+ tm->hw[1]->sw_if_index,
+ ~0,
+ 1,
+ NULL,
+ FIB_ROUTE_PATH_FLAG_NONE);
+ fei = fib_table_lookup_exact_match(fib_index, &connected_pfx);
+ FIB_TEST((FIB_NODE_INDEX_INVALID != fei), "attached interface route present");
+ dpo = fib_entry_contribute_ip_forwarding(fei);
+ dpo = load_balance_get_bucket(dpo->dpoi_index, 0);
+ FIB_TEST(!dpo_cmp(dpo, dpo_drop),
+ "2001:0:0:2/64 not resolves via drop");
+
+ connected_pfx.fp_len = 128;
+ fib_table_entry_update_one_path(fib_index, &connected_pfx,
+ FIB_SOURCE_INTERFACE,
+ (FIB_ENTRY_FLAG_CONNECTED |
+ FIB_ENTRY_FLAG_LOCAL),
+ FIB_PROTOCOL_IP6,
+ NULL,
+ tm->hw[0]->sw_if_index,
+ ~0, // invalid fib index
+ 1,
+ NULL,
+ FIB_ROUTE_PATH_FLAG_NONE);
+ fei = fib_table_lookup(fib_index, &connected_pfx);
+
+ FIB_TEST((FIB_NODE_INDEX_INVALID != fei), "local interface route present");
+ dpo = fib_entry_contribute_ip_forwarding(fei);
+ dpo = load_balance_get_bucket(dpo->dpoi_index, 0);
+ FIB_TEST((DPO_RECEIVE == dpo->dpoi_type),
+ "local interface adj is local");
+ rd = receive_dpo_get(dpo->dpoi_index);
+ FIB_TEST((0 == ip46_address_cmp(&connected_pfx.fp_addr,
+ &rd->rd_addr)),
+ "local interface adj is receive ok");
+
+ /*
+ * +2 entries, +2 unshared path-lists
+ */
+ FIB_TEST((1 == fib_path_list_db_size()), "path list DB population:%d",
+ fib_path_list_db_size());
+ FIB_TEST((NPS+7 == fib_path_list_pool_size()), "path list pool size is%d",
+ fib_path_list_pool_size());
+ FIB_TEST((NPS+8 == fib_entry_pool_size()), "entry pool size is %d",
+ fib_entry_pool_size());
+
+
+ /*
+ * bring the interface back up. we expected the routes to return
+ * to normal forwarding.
+ */
+ error = vnet_sw_interface_set_flags(vnet_get_main(),
+ tm->hw[0]->sw_if_index,
+ VNET_SW_INTERFACE_FLAG_ADMIN_UP);
+ FIB_TEST((NULL == error), "Interface bring-up OK");
+ fei = fib_table_lookup_exact_match(fib_index, &pfx_2001_a_s_64);
+ ai = fib_entry_get_adj(fei);
+ FIB_TEST((ai_01 == ai), "2001::a/64 resolves via 2001:0:0:1::1");
+ fei = fib_table_lookup_exact_match(fib_index, &pfx_2001_b_s_64);
+ ai = fib_entry_get_adj(fei);
+ FIB_TEST((ai_01 == ai), "2001::b/64 resolves via 2001:0:0:1::1");
+ fei = fib_table_lookup_exact_match(fib_index, &pfx_2001_1_3_s_128);
+ ai = fib_entry_get_adj(fei);
+ FIB_TEST((ai_02 == ai), "ADJ-FIB resolves via adj");
+ fei = fib_table_lookup_exact_match(fib_index, &pfx_2001_1_2_s_128);
+ ai = fib_entry_get_adj(fei);
+ FIB_TEST((ai_01 == ai), "ADJ-FIB resolves via adj");
+ local_pfx.fp_len = 64;
+ fei = fib_table_lookup_exact_match(fib_index, &local_pfx);
+ ai = fib_entry_get_adj(fei);
+ adj = adj_get(ai);
+ FIB_TEST((IP_LOOKUP_NEXT_GLEAN == adj->lookup_next_index),
+ "attached interface adj is glean");
+
+ /*
+ * Same test as above, but this time the HW interface goes down
+ */
+ error = vnet_hw_interface_set_flags(vnet_get_main(),
+ tm->hw_if_indicies[0],
+ ~VNET_HW_INTERFACE_FLAG_LINK_UP);
+ FIB_TEST((NULL == error), "Interface shutdown OK");
+
+ fei = fib_table_lookup_exact_match(fib_index, &pfx_2001_b_s_64);
+ dpo = fib_entry_contribute_ip_forwarding(fei);
+ FIB_TEST(!dpo_cmp(dpo_drop, load_balance_get_bucket(dpo->dpoi_index, 0)),
+ "2001::b/64 resolves via drop");
+ fei = fib_table_lookup_exact_match(fib_index, &pfx_2001_a_s_64);
+ dpo = fib_entry_contribute_ip_forwarding(fei);
+ FIB_TEST(!dpo_cmp(dpo_drop, load_balance_get_bucket(dpo->dpoi_index, 0)),
+ "2001::a/64 resolves via drop");
+ fei = fib_table_lookup_exact_match(fib_index, &pfx_2001_1_3_s_128);
+ dpo = fib_entry_contribute_ip_forwarding(fei);
+ FIB_TEST(!dpo_cmp(dpo_drop, load_balance_get_bucket(dpo->dpoi_index, 0)),
+ "2001:0:0:1::3/128 resolves via drop");
+ fei = fib_table_lookup_exact_match(fib_index, &pfx_2001_1_2_s_128);
+ dpo = fib_entry_contribute_ip_forwarding(fei);
+ FIB_TEST(!dpo_cmp(dpo_drop, load_balance_get_bucket(dpo->dpoi_index, 0)),
+ "2001:0:0:1::2/128 resolves via drop");
+ local_pfx.fp_len = 128;
+ fei = fib_table_lookup_exact_match(fib_index, &local_pfx);
+ dpo = fib_entry_contribute_ip_forwarding(fei);
+ FIB_TEST(dpo_cmp(dpo_drop, load_balance_get_bucket(dpo->dpoi_index, 0)),
+ "2001:0:0:1::1/128 not drop");
+ local_pfx.fp_len = 64;
+ fei = fib_table_lookup_exact_match(fib_index, &local_pfx);
+ dpo = fib_entry_contribute_ip_forwarding(fei);
+ FIB_TEST(!dpo_cmp(dpo_drop, load_balance_get_bucket(dpo->dpoi_index, 0)),
+ "2001:0:0:1/64 resolves via drop");
+
+ error = vnet_hw_interface_set_flags(vnet_get_main(),
+ tm->hw_if_indicies[0],
+ VNET_HW_INTERFACE_FLAG_LINK_UP);
+ FIB_TEST((NULL == error), "Interface bring-up OK");
+ fei = fib_table_lookup_exact_match(fib_index, &pfx_2001_a_s_64);
+ ai = fib_entry_get_adj(fei);
+ FIB_TEST((ai_01 == ai), "2001::a/64 resolves via 2001:0:0:1::1");
+ fei = fib_table_lookup_exact_match(fib_index, &pfx_2001_b_s_64);
+ ai = fib_entry_get_adj(fei);
+ FIB_TEST((ai_01 == ai), "2001::b/64 resolves via 2001:0:0:1::1");
+ fei = fib_table_lookup_exact_match(fib_index, &pfx_2001_1_3_s_128);
+ ai = fib_entry_get_adj(fei);
+ FIB_TEST((ai_02 == ai), "ADJ-FIB resolves via adj");
+ fei = fib_table_lookup_exact_match(fib_index, &pfx_2001_1_2_s_128);
+ ai = fib_entry_get_adj(fei);
+ FIB_TEST((ai_01 == ai), "ADJ-FIB resolves via adj");
+ local_pfx.fp_len = 64;
+ fei = fib_table_lookup_exact_match(fib_index, &local_pfx);
+ ai = fib_entry_get_adj(fei);
+ adj = adj_get(ai);
+ FIB_TEST((IP_LOOKUP_NEXT_GLEAN == adj->lookup_next_index),
+ "attached interface adj is glean");
+
+ /*
+ * Delete the interface that the routes reolve through.
+ * Again no routes are removed. They all point to drop.
+ *
+ * This is considered an error case. The control plane should
+ * not remove interfaces through which routes resolve, but
+ * such things can happen. ALL affected routes will drop.
+ */
+ vnet_delete_hw_interface(vnet_get_main(), tm->hw_if_indicies[0]);
+
+ fei = fib_table_lookup_exact_match(fib_index, &pfx_2001_b_s_64);
+ FIB_TEST(load_balance_is_drop(fib_entry_contribute_ip_forwarding(fei)),
+ "2001::b/64 resolves via drop");
+ fei = fib_table_lookup_exact_match(fib_index, &pfx_2001_a_s_64);
+ FIB_TEST(load_balance_is_drop(fib_entry_contribute_ip_forwarding(fei)),
+ "2001::b/64 resolves via drop");
+ fei = fib_table_lookup_exact_match(fib_index, &pfx_2001_1_3_s_128);
+ FIB_TEST(load_balance_is_drop(fib_entry_contribute_ip_forwarding(fei)),
+ "2001:0:0:1::3/64 resolves via drop");
+ fei = fib_table_lookup_exact_match(fib_index, &pfx_2001_1_2_s_128);
+ FIB_TEST(load_balance_is_drop(fib_entry_contribute_ip_forwarding(fei)),
+ "2001:0:0:1::2/64 resolves via drop");
+ fei = fib_table_lookup_exact_match(fib_index, &local_pfx);
+ FIB_TEST(load_balance_is_drop(fib_entry_contribute_ip_forwarding(fei)),
+ "2001:0:0:1::1/128 is drop");
+ local_pfx.fp_len = 64;
+ fei = fib_table_lookup_exact_match(fib_index, &local_pfx);
+ FIB_TEST(load_balance_is_drop(fib_entry_contribute_ip_forwarding(fei)),
+ "2001:0:0:1/64 resolves via drop");
+
+ /*
+ * no change
+ */
+ FIB_TEST((1 == fib_path_list_db_size()), "path list DB population:%d",
+ fib_path_list_db_size());
+ FIB_TEST((NPS+7 == fib_path_list_pool_size()), "path list pool size is%d",
+ fib_path_list_pool_size());
+ FIB_TEST((NPS+8 == fib_entry_pool_size()), "entry pool size is %d",
+ fib_entry_pool_size());
+
+ /*
+ * Add the interface back. routes stay unresolved.
+ */
+ error = ethernet_register_interface(vnet_get_main(),
+ test_interface_device_class.index,
+ 0 /* instance */,
+ hw_address,
+ &tm->hw_if_indicies[0],
+ /* flag change */ 0);
+
+ fei = fib_table_lookup_exact_match(fib_index, &pfx_2001_b_s_64);
+ FIB_TEST(load_balance_is_drop(fib_entry_contribute_ip_forwarding(fei)),
+ "2001::b/64 resolves via drop");
+ fei = fib_table_lookup_exact_match(fib_index, &pfx_2001_a_s_64);
+ FIB_TEST(load_balance_is_drop(fib_entry_contribute_ip_forwarding(fei)),
+ "2001::b/64 resolves via drop");
+ fei = fib_table_lookup_exact_match(fib_index, &pfx_2001_1_3_s_128);
+ FIB_TEST(load_balance_is_drop(fib_entry_contribute_ip_forwarding(fei)),
+ "2001:0:0:1::3/64 resolves via drop");
+ fei = fib_table_lookup_exact_match(fib_index, &pfx_2001_1_2_s_128);
+ FIB_TEST(load_balance_is_drop(fib_entry_contribute_ip_forwarding(fei)),
+ "2001:0:0:1::2/64 resolves via drop");
+ fei = fib_table_lookup_exact_match(fib_index, &local_pfx);
+ FIB_TEST(load_balance_is_drop(fib_entry_contribute_ip_forwarding(fei)),
+ "2001:0:0:1::1/128 is drop");
+ local_pfx.fp_len = 64;
+ fei = fib_table_lookup_exact_match(fib_index, &local_pfx);
+ FIB_TEST(load_balance_is_drop(fib_entry_contribute_ip_forwarding(fei)),
+ "2001:0:0:1/64 resolves via drop");
+
+ /*
+ * CLEANUP ALL the routes
+ */
+ fib_table_entry_delete(fib_index,
+ &pfx_2001_c_s_64,
+ FIB_SOURCE_API);
+ fib_table_entry_delete(fib_index,
+ &pfx_2001_a_s_64,
+ FIB_SOURCE_API);
+ fib_table_entry_delete(fib_index,
+ &pfx_2001_b_s_64,
+ FIB_SOURCE_API);
+ fib_table_entry_delete(fib_index,
+ &pfx_2001_1_3_s_128,
+ FIB_SOURCE_ADJ);
+ fib_table_entry_delete(fib_index,
+ &pfx_2001_1_2_s_128,
+ FIB_SOURCE_ADJ);
+ local_pfx.fp_len = 64;
+ fib_table_entry_delete(fib_index, &local_pfx,
+ FIB_SOURCE_INTERFACE);
+ local_pfx.fp_len = 128;
+ fib_table_entry_special_remove(fib_index, &local_pfx,
+ FIB_SOURCE_INTERFACE);
+ connected_pfx.fp_len = 64;
+ fib_table_entry_delete(fib_index, &connected_pfx,
+ FIB_SOURCE_INTERFACE);
+ connected_pfx.fp_len = 128;
+ fib_table_entry_special_remove(fib_index, &connected_pfx,
+ FIB_SOURCE_INTERFACE);
+
+ FIB_TEST((FIB_NODE_INDEX_INVALID ==
+ fib_table_lookup_exact_match(fib_index, &pfx_2001_a_s_64)),
+ "2001::a/64 removed");
+ FIB_TEST((FIB_NODE_INDEX_INVALID ==
+ fib_table_lookup_exact_match(fib_index, &pfx_2001_b_s_64)),
+ "2001::b/64 removed");
+ FIB_TEST((FIB_NODE_INDEX_INVALID ==
+ fib_table_lookup_exact_match(fib_index, &pfx_2001_1_3_s_128)),
+ "2001:0:0:1::3/128 removed");
+ FIB_TEST((FIB_NODE_INDEX_INVALID ==
+ fib_table_lookup_exact_match(fib_index, &pfx_2001_1_2_s_128)),
+ "2001:0:0:1::3/128 removed");
+ local_pfx.fp_len = 64;
+ FIB_TEST((FIB_NODE_INDEX_INVALID ==
+ fib_table_lookup_exact_match(fib_index, &local_pfx)),
+ "2001:0:0:1/64 removed");
+ local_pfx.fp_len = 128;
+ FIB_TEST((FIB_NODE_INDEX_INVALID ==
+ fib_table_lookup_exact_match(fib_index, &local_pfx)),
+ "2001:0:0:1::1/128 removed");
+ connected_pfx.fp_len = 64;
+ FIB_TEST((FIB_NODE_INDEX_INVALID ==
+ fib_table_lookup_exact_match(fib_index, &connected_pfx)),
+ "2001:0:0:2/64 removed");
+ connected_pfx.fp_len = 128;
+ FIB_TEST((FIB_NODE_INDEX_INVALID ==
+ fib_table_lookup_exact_match(fib_index, &connected_pfx)),
+ "2001:0:0:2::1/128 removed");
+
+ /*
+ * -8 entries. -7 path-lists (1 was shared).
+ */
+ FIB_TEST((0 == fib_path_list_db_size()), "path list DB population:%d",
+ fib_path_list_db_size());
+ FIB_TEST((NPS == fib_path_list_pool_size()), "path list pool size is%d",
+ fib_path_list_pool_size());
+ FIB_TEST((NPS == fib_entry_pool_size()), "entry pool size is %d",
+ fib_entry_pool_size());
+
+ /*
+ * now remove the VRF
+ */
+ fib_table_unlock(fib_index, FIB_PROTOCOL_IP6);
+
+ FIB_TEST((0 == fib_path_list_db_size()), "path list DB population:%d",
+ fib_path_list_db_size());
+ FIB_TEST((NPS-6 == fib_path_list_pool_size()), "path list pool size is%d",
+ fib_path_list_pool_size());
+ FIB_TEST((NPS-6 == fib_entry_pool_size()), "entry pool size is %d",
+ fib_entry_pool_size());
+
+ adj_unlock(ai_02);
+ adj_unlock(ai_01);
+
+ /*
+ * return the interfaces to up state
+ */
+ error = vnet_sw_interface_set_flags(vnet_get_main(),
+ tm->hw[0]->sw_if_index,
+ VNET_SW_INTERFACE_FLAG_ADMIN_UP);
+ error = vnet_sw_interface_set_flags(vnet_get_main(),
+ tm->hw[1]->sw_if_index,
+ VNET_SW_INTERFACE_FLAG_ADMIN_UP);
+
+ FIB_TEST((0 == adj_nbr_db_size()), "ADJ DB size is %d",
+ adj_nbr_db_size());
+
+ return (0);
+}
+
+/*
+ * Test Attached Exports
+ */
+static int
+fib_test_ae (void)
+{
+ const dpo_id_t *dpo, *dpo_drop;
+ const u32 fib_index = 0;
+ fib_node_index_t fei;
+ test_main_t *tm;
+ ip4_main_t *im;
+
+ tm = &test_main;
+ im = &ip4_main;
+
+ FIB_TEST((0 == adj_nbr_db_size()), "ADJ DB size is %d",
+ adj_nbr_db_size());
+
+ /*
+ * add interface routes. We'll assume this works. It's more rigorously
+ * tested elsewhere.
+ */
+ fib_prefix_t local_pfx = {
+ .fp_len = 24,
+ .fp_proto = FIB_PROTOCOL_IP4,
+ .fp_addr = {
+ .ip4 = {
+ /* 10.10.10.10 */
+ .as_u32 = clib_host_to_net_u32(0x0a0a0a0a),
+ },
+ },
+ };
+
+ vec_validate(im->fib_index_by_sw_if_index, tm->hw[0]->sw_if_index);
+ im->fib_index_by_sw_if_index[tm->hw[0]->sw_if_index] = fib_index;
+
+ dpo_drop = drop_dpo_get(DPO_PROTO_IP4);
+
+ fib_table_entry_update_one_path(fib_index, &local_pfx,
+ FIB_SOURCE_INTERFACE,
+ (FIB_ENTRY_FLAG_CONNECTED |
+ FIB_ENTRY_FLAG_ATTACHED),
+ FIB_PROTOCOL_IP4,
+ NULL,
+ tm->hw[0]->sw_if_index,
+ ~0,
+ 1,
+ NULL,
+ FIB_ROUTE_PATH_FLAG_NONE);
+ fei = fib_table_lookup_exact_match(fib_index, &local_pfx);
+ FIB_TEST((FIB_NODE_INDEX_INVALID != fei),
+ "attached interface route present");
+
+ local_pfx.fp_len = 32;
+ fib_table_entry_update_one_path(fib_index, &local_pfx,
+ FIB_SOURCE_INTERFACE,
+ (FIB_ENTRY_FLAG_CONNECTED |
+ FIB_ENTRY_FLAG_LOCAL),
+ FIB_PROTOCOL_IP4,
+ NULL,
+ tm->hw[0]->sw_if_index,
+ ~0, // invalid fib index
+ 1,
+ NULL,
+ FIB_ROUTE_PATH_FLAG_NONE);
+ fei = fib_table_lookup_exact_match(fib_index, &local_pfx);
+
+ FIB_TEST((FIB_NODE_INDEX_INVALID != fei),
+ "local interface route present");
+
+ /*
+ * Add an 2 ARP entry => a complete ADJ plus adj-fib.
+ */
+ fib_prefix_t pfx_10_10_10_1_s_32 = {
+ .fp_len = 32,
+ .fp_proto = FIB_PROTOCOL_IP4,
+ .fp_addr = {
+ /* 10.10.10.1 */
+ .ip4.as_u32 = clib_host_to_net_u32(0x0a0a0a01),
+ },
+ };
+ fib_node_index_t ai;
+
+ fib_table_entry_update_one_path(fib_index,
+ &pfx_10_10_10_1_s_32,
+ FIB_SOURCE_ADJ,
+ FIB_ENTRY_FLAG_ATTACHED,
+ FIB_PROTOCOL_IP4,
+ &pfx_10_10_10_1_s_32.fp_addr,
+ tm->hw[0]->sw_if_index,
+ ~0, // invalid fib index
+ 1,
+ NULL,
+ FIB_ROUTE_PATH_FLAG_NONE);
+
+ fei = fib_table_lookup(fib_index, &pfx_10_10_10_1_s_32);
+ FIB_TEST((FIB_NODE_INDEX_INVALID != fei), "ADJ-fib1 created");
+ ai = fib_entry_get_adj(fei);
+
+ /*
+ * create another FIB table into which routes will be imported
+ */
+ u32 import_fib_index1;
+
+ import_fib_index1 = fib_table_find_or_create_and_lock(FIB_PROTOCOL_IP4, 11);
+
+ /*
+ * Add an attached route in the import FIB
+ */
+ local_pfx.fp_len = 24;
+ fib_table_entry_update_one_path(import_fib_index1,
+ &local_pfx,
+ FIB_SOURCE_API,
+ FIB_ENTRY_FLAG_NONE,
+ FIB_PROTOCOL_IP4,
+ NULL,
+ tm->hw[0]->sw_if_index,
+ ~0, // invalid fib index
+ 1,
+ NULL,
+ FIB_ROUTE_PATH_FLAG_NONE);
+ fei = fib_table_lookup_exact_match(import_fib_index1, &local_pfx);
+ FIB_TEST((FIB_NODE_INDEX_INVALID != fei), "attached export created");
+
+ /*
+ * check for the presence of the adj-fibs in the import table
+ */
+ fei = fib_table_lookup_exact_match(import_fib_index1, &pfx_10_10_10_1_s_32);
+ FIB_TEST((FIB_NODE_INDEX_INVALID != fei), "ADJ-fib1 imported");
+ FIB_TEST((ai == fib_entry_get_adj(fei)),
+ "adj-fib1 Import uses same adj as export");
+
+ /*
+ * check for the presence of the local in the import table
+ */
+ local_pfx.fp_len = 32;
+ fei = fib_table_lookup_exact_match(import_fib_index1, &local_pfx);
+ FIB_TEST((FIB_NODE_INDEX_INVALID != fei), "local imported");
+
+ /*
+ * Add another adj-fin in the export table. Expect this
+ * to get magically exported;
+ */
+ fib_prefix_t pfx_10_10_10_2_s_32 = {
+ .fp_len = 32,
+ .fp_proto = FIB_PROTOCOL_IP4,
+ .fp_addr = {
+ /* 10.10.10.2 */
+ .ip4.as_u32 = clib_host_to_net_u32(0x0a0a0a02),
+ },
+ };
+
+ fib_table_entry_update_one_path(fib_index,
+ &pfx_10_10_10_2_s_32,
+ FIB_SOURCE_ADJ,
+ FIB_ENTRY_FLAG_ATTACHED,
+ FIB_PROTOCOL_IP4,
+ &pfx_10_10_10_2_s_32.fp_addr,
+ tm->hw[0]->sw_if_index,
+ ~0, // invalid fib index
+ 1,
+ NULL,
+ FIB_ROUTE_PATH_FLAG_NONE);
+ fei = fib_table_lookup_exact_match(fib_index, &pfx_10_10_10_2_s_32);
+ FIB_TEST((FIB_NODE_INDEX_INVALID != fei), "ADJ-fib2 present");
+ ai = fib_entry_get_adj(fei);
+
+ fei = fib_table_lookup_exact_match(import_fib_index1, &pfx_10_10_10_2_s_32);
+ FIB_TEST((FIB_NODE_INDEX_INVALID != fei), "ADJ-fib2 imported");
+ FIB_TEST((ai == fib_entry_get_adj(fei)),
+ "Import uses same adj as export");
+ FIB_TEST((FIB_ENTRY_FLAG_ATTACHED & fib_entry_get_flags(fei)),
+ "ADJ-fib2 imported flags %d",
+ fib_entry_get_flags(fei));
+
+ /*
+ * create a 2nd FIB table into which routes will be imported
+ */
+ u32 import_fib_index2;
+
+ import_fib_index2 = fib_table_find_or_create_and_lock(FIB_PROTOCOL_IP4, 12);
+
+ /*
+ * Add an attached route in the import FIB
+ */
+ local_pfx.fp_len = 24;
+ fib_table_entry_update_one_path(import_fib_index2,
+ &local_pfx,
+ FIB_SOURCE_API,
+ FIB_ENTRY_FLAG_NONE,
+ FIB_PROTOCOL_IP4,
+ NULL,
+ tm->hw[0]->sw_if_index,
+ ~0, // invalid fib index
+ 1,
+ NULL,
+ FIB_ROUTE_PATH_FLAG_NONE);
+ fei = fib_table_lookup_exact_match(import_fib_index1, &local_pfx);
+ FIB_TEST((FIB_NODE_INDEX_INVALID != fei), "attached export created");
+
+ /*
+ * check for the presence of all the adj-fibs and local in the import table
+ */
+ fei = fib_table_lookup_exact_match(import_fib_index2, &pfx_10_10_10_1_s_32);
+ FIB_TEST((FIB_NODE_INDEX_INVALID != fei), "ADJ-fib1 imported");
+ fei = fib_table_lookup_exact_match(import_fib_index2, &pfx_10_10_10_2_s_32);
+ FIB_TEST((FIB_NODE_INDEX_INVALID != fei), "ADJ-fib2 imported");
+ local_pfx.fp_len = 32;
+ fei = fib_table_lookup_exact_match(import_fib_index2, &local_pfx);
+ FIB_TEST((FIB_NODE_INDEX_INVALID != fei), "local imported");
+
+ /*
+ * add a 3rd adj-fib. expect it to be exported to both tables.
+ */
+ fib_prefix_t pfx_10_10_10_3_s_32 = {
+ .fp_len = 32,
+ .fp_proto = FIB_PROTOCOL_IP4,
+ .fp_addr = {
+ /* 10.10.10.3 */
+ .ip4.as_u32 = clib_host_to_net_u32(0x0a0a0a03),
+ },
+ };
+
+ fib_table_entry_update_one_path(fib_index,
+ &pfx_10_10_10_3_s_32,
+ FIB_SOURCE_ADJ,
+ FIB_ENTRY_FLAG_ATTACHED,
+ FIB_PROTOCOL_IP4,
+ &pfx_10_10_10_3_s_32.fp_addr,
+ tm->hw[0]->sw_if_index,
+ ~0, // invalid fib index
+ 1,
+ NULL,
+ FIB_ROUTE_PATH_FLAG_NONE);
+ fei = fib_table_lookup_exact_match(fib_index, &pfx_10_10_10_3_s_32);
+ FIB_TEST((FIB_NODE_INDEX_INVALID != fei), "ADJ-fib3 present");
+ ai = fib_entry_get_adj(fei);
+
+ fei = fib_table_lookup_exact_match(import_fib_index1, &pfx_10_10_10_3_s_32);
+ FIB_TEST((FIB_NODE_INDEX_INVALID != fei), "ADJ-fib3 imported to FIB1");
+ FIB_TEST((ai == fib_entry_get_adj(fei)),
+ "Import uses same adj as export");
+ fei = fib_table_lookup_exact_match(import_fib_index2, &pfx_10_10_10_3_s_32);
+ FIB_TEST((FIB_NODE_INDEX_INVALID != fei), "ADJ-fib3 imported to FIB2");
+ FIB_TEST((ai == fib_entry_get_adj(fei)),
+ "Import uses same adj as export");
+
+ /*
+ * remove the 3rd adj fib. we expect it to be removed from both FIBs
+ */
+ fib_table_entry_delete(fib_index,
+ &pfx_10_10_10_3_s_32,
+ FIB_SOURCE_ADJ);
+
+ fei = fib_table_lookup_exact_match(fib_index, &pfx_10_10_10_3_s_32);
+ FIB_TEST((FIB_NODE_INDEX_INVALID == fei), "ADJ-fib3 remved");
+
+ fei = fib_table_lookup_exact_match(import_fib_index1, &pfx_10_10_10_3_s_32);
+ FIB_TEST((FIB_NODE_INDEX_INVALID == fei), "ADJ-fib3 removed from FIB1");
+
+ fei = fib_table_lookup_exact_match(import_fib_index2, &pfx_10_10_10_3_s_32);
+ FIB_TEST((FIB_NODE_INDEX_INVALID == fei), "ADJ-fib3 removed from FIB2");
+
+ /*
+ * remove the attached route from the 2nd FIB. expect the imported
+ * entires to be removed
+ */
+ local_pfx.fp_len = 24;
+ fib_table_entry_delete(import_fib_index2,
+ &local_pfx,
+ FIB_SOURCE_API);
+ fei = fib_table_lookup_exact_match(import_fib_index2, &local_pfx);
+ FIB_TEST((FIB_NODE_INDEX_INVALID == fei), "attached export removed");
+
+ fei = fib_table_lookup_exact_match(import_fib_index2, &pfx_10_10_10_1_s_32);
+ FIB_TEST((FIB_NODE_INDEX_INVALID == fei), "ADJ-fib1 removed from FIB2");
+ fei = fib_table_lookup_exact_match(import_fib_index2, &pfx_10_10_10_2_s_32);
+ FIB_TEST((FIB_NODE_INDEX_INVALID == fei), "ADJ-fib2 removed from FIB2");
+ local_pfx.fp_len = 32;
+ fei = fib_table_lookup_exact_match(import_fib_index2, &local_pfx);
+ FIB_TEST((FIB_NODE_INDEX_INVALID == fei), "local removed from FIB2");
+
+ fei = fib_table_lookup_exact_match(import_fib_index1, &pfx_10_10_10_1_s_32);
+ FIB_TEST((FIB_NODE_INDEX_INVALID != fei), "ADJ-fib1 still in FIB1");
+ fei = fib_table_lookup_exact_match(import_fib_index1, &pfx_10_10_10_2_s_32);
+ FIB_TEST((FIB_NODE_INDEX_INVALID != fei), "ADJ-fib2 still in FIB1");
+ local_pfx.fp_len = 32;
+ fei = fib_table_lookup_exact_match(import_fib_index1, &local_pfx);
+ FIB_TEST((FIB_NODE_INDEX_INVALID != fei), "local still in FIB1");
+
+ /*
+ * modify the route in FIB1 so it is no longer attached. expect the imported
+ * entires to be removed
+ */
+ local_pfx.fp_len = 24;
+ fib_table_entry_update_one_path(import_fib_index1,
+ &local_pfx,
+ FIB_SOURCE_API,
+ FIB_ENTRY_FLAG_NONE,
+ FIB_PROTOCOL_IP4,
+ &pfx_10_10_10_2_s_32.fp_addr,
+ tm->hw[0]->sw_if_index,
+ ~0, // invalid fib index
+ 1,
+ NULL,
+ FIB_ROUTE_PATH_FLAG_NONE);
+ fei = fib_table_lookup_exact_match(import_fib_index1, &pfx_10_10_10_1_s_32);
+ FIB_TEST((FIB_NODE_INDEX_INVALID == fei), "ADJ-fib1 removed from FIB1");
+ fei = fib_table_lookup_exact_match(import_fib_index1, &pfx_10_10_10_2_s_32);
+ FIB_TEST((FIB_NODE_INDEX_INVALID == fei), "ADJ-fib2 removed from FIB1");
+ local_pfx.fp_len = 32;
+ fei = fib_table_lookup_exact_match(import_fib_index1, &local_pfx);
+ FIB_TEST((FIB_NODE_INDEX_INVALID == fei), "local removed from FIB1");
+
+ /*
+ * modify it back to attached. expect the adj-fibs back
+ */
+ local_pfx.fp_len = 24;
+ fib_table_entry_update_one_path(import_fib_index1,
+ &local_pfx,
+ FIB_SOURCE_API,
+ FIB_ENTRY_FLAG_NONE,
+ FIB_PROTOCOL_IP4,
+ NULL,
+ tm->hw[0]->sw_if_index,
+ ~0, // invalid fib index
+ 1,
+ NULL,
+ FIB_ROUTE_PATH_FLAG_NONE);
+ fei = fib_table_lookup_exact_match(import_fib_index1, &pfx_10_10_10_1_s_32);
+ FIB_TEST((FIB_NODE_INDEX_INVALID != fei), "ADJ-fib1 imported in FIB1");
+ fei = fib_table_lookup_exact_match(import_fib_index1, &pfx_10_10_10_2_s_32);
+ FIB_TEST((FIB_NODE_INDEX_INVALID != fei), "ADJ-fib2 imported in FIB1");
+ local_pfx.fp_len = 32;
+ fei = fib_table_lookup_exact_match(import_fib_index1, &local_pfx);
+ FIB_TEST((FIB_NODE_INDEX_INVALID != fei), "local imported in FIB1");
+
+ /*
+ * add a covering attached next-hop for the interface address, so we have
+ * a valid adj to find when we check the forwarding tables
+ */
+ fib_prefix_t pfx_10_0_0_0_s_8 = {
+ .fp_len = 8,
+ .fp_proto = FIB_PROTOCOL_IP4,
+ .fp_addr = {
+ /* 10.0.0.0 */
+ .ip4.as_u32 = clib_host_to_net_u32(0x0a000000),
+ },
+ };
+
+ fei = fib_table_entry_update_one_path(fib_index,
+ &pfx_10_0_0_0_s_8,
+ FIB_SOURCE_API,
+ FIB_ENTRY_FLAG_NONE,
+ FIB_PROTOCOL_IP4,
+ &pfx_10_10_10_3_s_32.fp_addr,
+ tm->hw[0]->sw_if_index,
+ ~0, // invalid fib index
+ 1,
+ NULL,
+ FIB_ROUTE_PATH_FLAG_NONE);
+ dpo = fib_entry_contribute_ip_forwarding(fei);
+
+ /*
+ * remove the route in the export fib. expect the adj-fibs to be removed
+ */
+ local_pfx.fp_len = 24;
+ fib_table_entry_delete(fib_index,
+ &local_pfx,
+ FIB_SOURCE_INTERFACE);
+
+ fei = fib_table_lookup_exact_match(import_fib_index1, &pfx_10_10_10_1_s_32);
+ FIB_TEST((FIB_NODE_INDEX_INVALID == fei), "Delete export: ADJ-fib1 removed from FIB1");
+ fei = fib_table_lookup_exact_match(import_fib_index1, &pfx_10_10_10_2_s_32);
+ FIB_TEST((FIB_NODE_INDEX_INVALID == fei), "ADJ-fib2 removed from FIB1");
+ local_pfx.fp_len = 32;
+ fei = fib_table_lookup_exact_match(import_fib_index1, &local_pfx);
+ FIB_TEST((FIB_NODE_INDEX_INVALID == fei), "local removed from FIB1");
+
+ /*
+ * the adj-fibs in the export VRF are present in the FIB table,
+ * but not installed in forwarding, since they have no attached cover.
+ * Consequently a lookup in the MTRIE gives the adj for the covering
+ * route 10.0.0.0/8.
+ */
+ fei = fib_table_lookup_exact_match(fib_index, &pfx_10_10_10_1_s_32);
+ FIB_TEST((FIB_NODE_INDEX_INVALID != fei), "ADJ-fib1 in export");
+
+ index_t lbi;
+ lbi = ip4_fib_forwarding_lookup(fib_index, &pfx_10_10_10_1_s_32.fp_addr.ip4);
+ FIB_TEST(lbi == dpo->dpoi_index,
+ "10.10.10.1 forwards on \n%U not \n%U",
+ format_load_balance, lbi, 0,
+ format_dpo_id, dpo, 0);
+ lbi = ip4_fib_forwarding_lookup(fib_index, &pfx_10_10_10_2_s_32.fp_addr.ip4);
+ FIB_TEST(lbi == dpo->dpoi_index,
+ "10.10.10.2 forwards on %U", format_dpo_id, dpo, 0);
+ lbi = ip4_fib_forwarding_lookup(fib_index, &pfx_10_10_10_3_s_32.fp_addr.ip4);
+ FIB_TEST(lbi == dpo->dpoi_index,
+ "10.10.10.3 forwards on %U", format_dpo_id, dpo, 0);
+
+ /*
+ * add the export prefix back, but not as attached.
+ * No adj-fibs in export nor import tables
+ */
+ local_pfx.fp_len = 24;
+ fei = fib_table_entry_update_one_path(fib_index,
+ &local_pfx,
+ FIB_SOURCE_API,
+ FIB_ENTRY_FLAG_NONE,
+ FIB_PROTOCOL_IP4,
+ &pfx_10_10_10_1_s_32.fp_addr,
+ tm->hw[0]->sw_if_index,
+ ~0, // invalid fib index
+ 1,
+ NULL,
+ FIB_ROUTE_PATH_FLAG_NONE);
+ dpo = fib_entry_contribute_ip_forwarding(fei);
+
+ fei = fib_table_lookup_exact_match(fib_index, &pfx_10_10_10_1_s_32);
+ FIB_TEST((FIB_NODE_INDEX_INVALID != fei), "non-attached in export: ADJ-fib1 in export");
+ lbi = ip4_fib_forwarding_lookup(fib_index, &pfx_10_10_10_1_s_32.fp_addr.ip4);
+ FIB_TEST(lbi == dpo->dpoi_index,
+ "10.10.10.1 forwards on %U", format_dpo_id, dpo, 0);
+ fei = fib_table_lookup_exact_match(fib_index, &pfx_10_10_10_1_s_32);
+ FIB_TEST((FIB_NODE_INDEX_INVALID != fei), "ADJ-fib1 in export");
+ lbi = ip4_fib_forwarding_lookup(fib_index, &pfx_10_10_10_2_s_32.fp_addr.ip4);
+ FIB_TEST(lbi == dpo->dpoi_index,
+ "10.10.10.2 forwards on %U", format_dpo_id, dpo, 0);
+
+ fei = fib_table_lookup_exact_match(import_fib_index1, &pfx_10_10_10_1_s_32);
+ FIB_TEST((FIB_NODE_INDEX_INVALID == fei), "ADJ-fib1 removed from FIB1");
+ fei = fib_table_lookup_exact_match(import_fib_index1, &pfx_10_10_10_2_s_32);
+ FIB_TEST((FIB_NODE_INDEX_INVALID == fei), "ADJ-fib2 removed from FIB1");
+ local_pfx.fp_len = 32;
+ fei = fib_table_lookup_exact_match(import_fib_index1, &local_pfx);
+ FIB_TEST((FIB_NODE_INDEX_INVALID == fei), "local removed from FIB1");
+
+ /*
+ * modify the export prefix so it is attached. expect all covereds to return
+ */
+ local_pfx.fp_len = 24;
+ fib_table_entry_update_one_path(fib_index,
+ &local_pfx,
+ FIB_SOURCE_API,
+ FIB_ENTRY_FLAG_NONE,
+ FIB_PROTOCOL_IP4,
+ NULL,
+ tm->hw[0]->sw_if_index,
+ ~0, // invalid fib index
+ 1,
+ NULL,
+ FIB_ROUTE_PATH_FLAG_NONE);
+
+ fei = fib_table_lookup_exact_match(fib_index, &pfx_10_10_10_1_s_32);
+ FIB_TEST((FIB_NODE_INDEX_INVALID != fei), "ADJ-fib1 reinstalled in export");
+ dpo = fib_entry_contribute_ip_forwarding(fei);
+ FIB_TEST(dpo_cmp(dpo_drop, load_balance_get_bucket(dpo->dpoi_index, 0)),
+ "Adj-fib1 is not drop in export");
+ fei = fib_table_lookup_exact_match(fib_index, &pfx_10_10_10_2_s_32);
+ FIB_TEST((FIB_NODE_INDEX_INVALID != fei), "ADJ-fib2 reinstalled in export");
+ local_pfx.fp_len = 32;
+ fei = fib_table_lookup_exact_match(fib_index, &local_pfx);
+ FIB_TEST((FIB_NODE_INDEX_INVALID != fei), "local reinstalled in export");
+ fei = fib_table_lookup_exact_match(import_fib_index1, &pfx_10_10_10_1_s_32);
+ FIB_TEST((FIB_NODE_INDEX_INVALID != fei), "attached in export: ADJ-fib1 imported");
+ dpo = fib_entry_contribute_ip_forwarding(fei);
+ FIB_TEST(dpo_cmp(dpo_drop, load_balance_get_bucket(dpo->dpoi_index, 0)),
+ "Adj-fib1 is not drop in export");
+ fei = fib_table_lookup_exact_match(import_fib_index1, &pfx_10_10_10_1_s_32);
+ FIB_TEST((FIB_NODE_INDEX_INVALID != fei), "ADJ-fib1 imported");
+ fei = fib_table_lookup_exact_match(import_fib_index1, &pfx_10_10_10_2_s_32);
+ FIB_TEST((FIB_NODE_INDEX_INVALID != fei), "ADJ-fib2 imported");
+ local_pfx.fp_len = 32;
+ fei = fib_table_lookup_exact_match(import_fib_index1, &local_pfx);
+ FIB_TEST((FIB_NODE_INDEX_INVALID != fei), "local imported");
+
+ /*
+ * modify the export prefix so connected. no change.
+ */
+ local_pfx.fp_len = 24;
+ fib_table_entry_update_one_path(fib_index, &local_pfx,
+ FIB_SOURCE_INTERFACE,
+ (FIB_ENTRY_FLAG_CONNECTED |
+ FIB_ENTRY_FLAG_ATTACHED),
+ FIB_PROTOCOL_IP4,
+ NULL,
+ tm->hw[0]->sw_if_index,
+ ~0,
+ 1,
+ NULL,
+ FIB_ROUTE_PATH_FLAG_NONE);
+
+ fei = fib_table_lookup_exact_match(fib_index, &pfx_10_10_10_1_s_32);
+ FIB_TEST((FIB_NODE_INDEX_INVALID != fei), "ADJ-fib1 reinstalled in export");
+ dpo = fib_entry_contribute_ip_forwarding(fei);
+ FIB_TEST(dpo_cmp(dpo_drop, load_balance_get_bucket(dpo->dpoi_index, 0)),
+ "Adj-fib1 is not drop in export");
+ fei = fib_table_lookup_exact_match(fib_index, &pfx_10_10_10_2_s_32);
+ FIB_TEST((FIB_NODE_INDEX_INVALID != fei), "ADJ-fib2 reinstalled in export");
+ local_pfx.fp_len = 32;
+ fei = fib_table_lookup_exact_match(fib_index, &local_pfx);
+ FIB_TEST((FIB_NODE_INDEX_INVALID != fei), "local reinstalled in export");
+ fei = fib_table_lookup_exact_match(import_fib_index1, &pfx_10_10_10_1_s_32);
+ FIB_TEST((FIB_NODE_INDEX_INVALID != fei), "attached in export: ADJ-fib1 imported");
+ dpo = fib_entry_contribute_ip_forwarding(fei);
+ FIB_TEST(dpo_cmp(dpo_drop, load_balance_get_bucket(dpo->dpoi_index, 0)),
+ "Adj-fib1 is not drop in export");
+ fei = fib_table_lookup_exact_match(import_fib_index1, &pfx_10_10_10_2_s_32);
+ FIB_TEST((FIB_NODE_INDEX_INVALID != fei), "ADJ-fib2 imported");
+ local_pfx.fp_len = 32;
+ fei = fib_table_lookup_exact_match(import_fib_index1, &local_pfx);
+ FIB_TEST((FIB_NODE_INDEX_INVALID != fei), "local imported");
+
+ /*
+ * CLEANUP
+ */
+ fib_table_entry_delete(fib_index,
+ &pfx_10_0_0_0_s_8,
+ FIB_SOURCE_API);
+ fib_table_entry_delete(fib_index,
+ &pfx_10_10_10_1_s_32,
+ FIB_SOURCE_ADJ);
+ fib_table_entry_delete(fib_index,
+ &pfx_10_10_10_2_s_32,
+ FIB_SOURCE_ADJ);
+ local_pfx.fp_len = 32;
+ fib_table_entry_delete(fib_index,
+ &local_pfx,
+ FIB_SOURCE_INTERFACE);
+ local_pfx.fp_len = 24;
+ fib_table_entry_delete(fib_index,
+ &local_pfx,
+ FIB_SOURCE_API);
+ fib_table_entry_delete(fib_index,
+ &local_pfx,
+ FIB_SOURCE_INTERFACE);
+ local_pfx.fp_len = 24;
+ fib_table_entry_delete(import_fib_index1,
+ &local_pfx,
+ FIB_SOURCE_API);
+
+ fib_table_unlock(import_fib_index1, FIB_PROTOCOL_IP4);
+ fib_table_unlock(import_fib_index2, FIB_PROTOCOL_IP4);
+
+ FIB_TEST((0 == adj_nbr_db_size()), "ADJ DB size is %d",
+ adj_nbr_db_size());
+
+ return (0);
+}
+
+
+/*
+ * Test the recursive route route handling for GRE tunnels
+ */
+static int
+fib_test_label (void)
+{
+ fib_node_index_t fei, ai_mpls_10_10_10_1, ai_v4_10_10_11_1, ai_v4_10_10_11_2, ai_mpls_10_10_11_2, ai_mpls_10_10_11_1;
+ const u32 fib_index = 0;
+ test_main_t *tm;
+ ip4_main_t *im;
+ int lb_count, ii;
+
+ lb_count = pool_elts(load_balance_pool);
+ tm = &test_main;
+ im = &ip4_main;
+
+ /*
+ * add interface routes. We'll assume this works. It's more rigorously
+ * tested elsewhere.
+ */
+ fib_prefix_t local0_pfx = {
+ .fp_len = 24,
+ .fp_proto = FIB_PROTOCOL_IP4,
+ .fp_addr = {
+ .ip4 = {
+ /* 10.10.10.10 */
+ .as_u32 = clib_host_to_net_u32(0x0a0a0a0a),
+ },
+ },
+ };
+
+ FIB_TEST((0 == adj_nbr_db_size()), "ADJ DB size is %d",
+ adj_nbr_db_size());
+
+ vec_validate(im->fib_index_by_sw_if_index, tm->hw[0]->sw_if_index);
+ im->fib_index_by_sw_if_index[tm->hw[0]->sw_if_index] = fib_index;
+
+ fib_table_entry_update_one_path(fib_index, &local0_pfx,
+ FIB_SOURCE_INTERFACE,
+ (FIB_ENTRY_FLAG_CONNECTED |
+ FIB_ENTRY_FLAG_ATTACHED),
+ FIB_PROTOCOL_IP4,
+ NULL,
+ tm->hw[0]->sw_if_index,
+ ~0,
+ 1,
+ NULL,
+ FIB_ROUTE_PATH_FLAG_NONE);
+ fei = fib_table_lookup_exact_match(fib_index, &local0_pfx);
+ FIB_TEST((FIB_NODE_INDEX_INVALID != fei),
+ "attached interface route present");
+
+ local0_pfx.fp_len = 32;
+ fib_table_entry_update_one_path(fib_index, &local0_pfx,
+ FIB_SOURCE_INTERFACE,
+ (FIB_ENTRY_FLAG_CONNECTED |
+ FIB_ENTRY_FLAG_LOCAL),
+ FIB_PROTOCOL_IP4,
+ NULL,
+ tm->hw[0]->sw_if_index,
+ ~0, // invalid fib index
+ 1,
+ NULL,
+ FIB_ROUTE_PATH_FLAG_NONE);
+ fei = fib_table_lookup_exact_match(fib_index, &local0_pfx);
+
+ FIB_TEST((FIB_NODE_INDEX_INVALID != fei),
+ "local interface route present");
+
+ fib_prefix_t local1_pfx = {
+ .fp_len = 24,
+ .fp_proto = FIB_PROTOCOL_IP4,
+ .fp_addr = {
+ .ip4 = {
+ /* 10.10.11.10 */
+ .as_u32 = clib_host_to_net_u32(0x0a0a0b0a),
+ },
+ },
+ };
+
+ vec_validate(im->fib_index_by_sw_if_index, tm->hw[1]->sw_if_index);
+ im->fib_index_by_sw_if_index[tm->hw[1]->sw_if_index] = fib_index;
+
+ fib_table_entry_update_one_path(fib_index, &local1_pfx,
+ FIB_SOURCE_INTERFACE,
+ (FIB_ENTRY_FLAG_CONNECTED |
+ FIB_ENTRY_FLAG_ATTACHED),
+ FIB_PROTOCOL_IP4,
+ NULL,
+ tm->hw[1]->sw_if_index,
+ ~0,
+ 1,
+ NULL,
+ FIB_ROUTE_PATH_FLAG_NONE);
+ fei = fib_table_lookup_exact_match(fib_index, &local1_pfx);
+ FIB_TEST((FIB_NODE_INDEX_INVALID != fei),
+ "attached interface route present");
+
+ local1_pfx.fp_len = 32;
+ fib_table_entry_update_one_path(fib_index, &local1_pfx,
+ FIB_SOURCE_INTERFACE,
+ (FIB_ENTRY_FLAG_CONNECTED |
+ FIB_ENTRY_FLAG_LOCAL),
+ FIB_PROTOCOL_IP4,
+ NULL,
+ tm->hw[1]->sw_if_index,
+ ~0, // invalid fib index
+ 1,
+ NULL,
+ FIB_ROUTE_PATH_FLAG_NONE);
+ fei = fib_table_lookup_exact_match(fib_index, &local1_pfx);
+
+ FIB_TEST((FIB_NODE_INDEX_INVALID != fei),
+ "local interface route present");
+
+ ip46_address_t nh_10_10_10_1 = {
+ .ip4 = {
+ .as_u32 = clib_host_to_net_u32(0x0a0a0a01),
+ },
+ };
+ ip46_address_t nh_10_10_11_1 = {
+ .ip4 = {
+ .as_u32 = clib_host_to_net_u32(0x0a0a0b01),
+ },
+ };
+ ip46_address_t nh_10_10_11_2 = {
+ .ip4 = {
+ .as_u32 = clib_host_to_net_u32(0x0a0a0b02),
+ },
+ };
+
+ ai_v4_10_10_11_1 = adj_nbr_add_or_lock(FIB_PROTOCOL_IP4,
+ VNET_LINK_IP4,
+ &nh_10_10_11_1,
+ tm->hw[1]->sw_if_index);
+ ai_v4_10_10_11_2 = adj_nbr_add_or_lock(FIB_PROTOCOL_IP4,
+ VNET_LINK_IP4,
+ &nh_10_10_11_2,
+ tm->hw[1]->sw_if_index);
+ ai_mpls_10_10_10_1 = adj_nbr_add_or_lock(FIB_PROTOCOL_IP4,
+ VNET_LINK_MPLS,
+ &nh_10_10_10_1,
+ tm->hw[0]->sw_if_index);
+ ai_mpls_10_10_11_2 = adj_nbr_add_or_lock(FIB_PROTOCOL_IP4,
+ VNET_LINK_MPLS,
+ &nh_10_10_11_2,
+ tm->hw[1]->sw_if_index);
+ ai_mpls_10_10_11_1 = adj_nbr_add_or_lock(FIB_PROTOCOL_IP4,
+ VNET_LINK_MPLS,
+ &nh_10_10_11_1,
+ tm->hw[1]->sw_if_index);
+
+ /*
+ * Add an etry with one path with a real out-going label
+ */
+ fib_prefix_t pfx_1_1_1_1_s_32 = {
+ .fp_len = 32,
+ .fp_proto = FIB_PROTOCOL_IP4,
+ .fp_addr = {
+ .ip4.as_u32 = clib_host_to_net_u32(0x01010101),
+ },
+ };
+ fib_test_lb_bucket_t l99_eos_o_10_10_10_1 = {
+ .type = FT_LB_LABEL_O_ADJ,
+ .label_o_adj = {
+ .adj = ai_mpls_10_10_10_1,
+ .label = 99,
+ .eos = MPLS_EOS,
+ },
+ };
+ fib_test_lb_bucket_t l99_neos_o_10_10_10_1 = {
+ .type = FT_LB_LABEL_O_ADJ,
+ .label_o_adj = {
+ .adj = ai_mpls_10_10_10_1,
+ .label = 99,
+ .eos = MPLS_NON_EOS,
+ },
+ };
+ mpls_label_t *l99 = NULL;
+ vec_add1(l99, 99);
+
+ fib_table_entry_update_one_path(fib_index,
+ &pfx_1_1_1_1_s_32,
+ FIB_SOURCE_API,
+ FIB_ENTRY_FLAG_NONE,
+ FIB_PROTOCOL_IP4,
+ &nh_10_10_10_1,
+ tm->hw[0]->sw_if_index,
+ ~0, // invalid fib index
+ 1,
+ l99,
+ FIB_ROUTE_PATH_FLAG_NONE);
+
+ fei = fib_table_lookup(fib_index, &pfx_1_1_1_1_s_32);
+ FIB_TEST((FIB_NODE_INDEX_INVALID != fei), "1.1.1.1/32 created");
+
+ FIB_TEST(fib_test_validate_entry(fei,
+ FIB_FORW_CHAIN_TYPE_UNICAST_IP4,
+ 1,
+ &l99_eos_o_10_10_10_1),
+ "1.1.1.1/32 LB 1 bucket via label 99 over 10.10.10.1");
+
+ /*
+ * add a path with an implicit NULL label
+ */
+ fib_test_lb_bucket_t a_o_10_10_11_1 = {
+ .type = FT_LB_ADJ,
+ .adj = {
+ .adj = ai_v4_10_10_11_1,
+ },
+ };
+ fib_test_lb_bucket_t a_mpls_o_10_10_11_1 = {
+ .type = FT_LB_ADJ,
+ .adj = {
+ .adj = ai_mpls_10_10_11_1,
+ },
+ };
+ mpls_label_t *l_imp_null = NULL;
+ vec_add1(l_imp_null, MPLS_IETF_IMPLICIT_NULL_LABEL);
+
+ fei = fib_table_entry_path_add(fib_index,
+ &pfx_1_1_1_1_s_32,
+ FIB_SOURCE_API,
+ FIB_ENTRY_FLAG_NONE,
+ FIB_PROTOCOL_IP4,
+ &nh_10_10_11_1,
+ tm->hw[1]->sw_if_index,
+ ~0, // invalid fib index
+ 1,
+ l_imp_null,
+ FIB_ROUTE_PATH_FLAG_NONE);
+
+ FIB_TEST(fib_test_validate_entry(fei,
+ FIB_FORW_CHAIN_TYPE_UNICAST_IP4,
+ 2,
+ &l99_eos_o_10_10_10_1,
+ &a_o_10_10_11_1),
+ "1.1.1.1/32 LB 2 buckets via: "
+ "label 99 over 10.10.10.1, "
+ "adj over 10.10.11.1");
+
+ /*
+ * assign the route a local label
+ */
+ fib_table_entry_local_label_add(fib_index,
+ &pfx_1_1_1_1_s_32,
+ 24001);
+
+ fib_prefix_t pfx_24001_eos = {
+ .fp_proto = FIB_PROTOCOL_MPLS,
+ .fp_label = 24001,
+ .fp_eos = MPLS_EOS,
+ };
+ fib_prefix_t pfx_24001_neos = {
+ .fp_proto = FIB_PROTOCOL_MPLS,
+ .fp_label = 24001,
+ .fp_eos = MPLS_NON_EOS,
+ };
+
+ /*
+ * The EOS entry should link to both the paths,
+ * and use an ip adj for the imp-null
+ * The NON-EOS entry should link to both the paths,
+ * and use an mpls adj for the imp-null
+ */
+ fei = fib_table_lookup(MPLS_FIB_DEFAULT_TABLE_ID,
+ &pfx_24001_eos);
+ FIB_TEST(fib_test_validate_entry(fei,
+ FIB_FORW_CHAIN_TYPE_MPLS_EOS,
+ 2,
+ &l99_eos_o_10_10_10_1,
+ &a_o_10_10_11_1),
+ "24001/eos LB 2 buckets via: "
+ "label 99 over 10.10.10.1, "
+ "adj over 10.10.11.1");
+
+
+ fei = fib_table_lookup(MPLS_FIB_DEFAULT_TABLE_ID,
+ &pfx_24001_neos);
+ FIB_TEST(fib_test_validate_entry(fei,
+ FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS,
+ 2,
+ &l99_neos_o_10_10_10_1,
+ &a_mpls_o_10_10_11_1),
+ "24001/neos LB 1 bucket via: "
+ "label 99 over 10.10.10.1 ",
+ "mpls-adj via 10.10.11.1");
+
+ /*
+ * add an unlabelled path, this is excluded from the neos chains,
+ */
+ fib_test_lb_bucket_t adj_o_10_10_11_2 = {
+ .type = FT_LB_ADJ,
+ .adj = {
+ .adj = ai_v4_10_10_11_2,
+ },
+ };
+
+ fei = fib_table_entry_path_add(fib_index,
+ &pfx_1_1_1_1_s_32,
+ FIB_SOURCE_API,
+ FIB_ENTRY_FLAG_NONE,
+ FIB_PROTOCOL_IP4,
+ &nh_10_10_11_2,
+ tm->hw[1]->sw_if_index,
+ ~0, // invalid fib index
+ 1,
+ NULL,
+ FIB_ROUTE_PATH_FLAG_NONE);
+
+ FIB_TEST(fib_test_validate_entry(fei,
+ FIB_FORW_CHAIN_TYPE_UNICAST_IP4,
+ 16, // 3 choices spread over 16 buckets
+ &l99_eos_o_10_10_10_1,
+ &l99_eos_o_10_10_10_1,
+ &l99_eos_o_10_10_10_1,
+ &l99_eos_o_10_10_10_1,
+ &l99_eos_o_10_10_10_1,
+ &l99_eos_o_10_10_10_1,
+ &a_o_10_10_11_1,
+ &a_o_10_10_11_1,
+ &a_o_10_10_11_1,
+ &a_o_10_10_11_1,
+ &a_o_10_10_11_1,
+ &adj_o_10_10_11_2,
+ &adj_o_10_10_11_2,
+ &adj_o_10_10_11_2,
+ &adj_o_10_10_11_2,
+ &adj_o_10_10_11_2),
+ "1.1.1.1/32 LB 16 buckets via: "
+ "label 99 over 10.10.10.1, "
+ "adj over 10.10.11.1",
+ "adj over 10.10.11.2");
+
+ /*
+ * get and lock a reference to the non-eos of the via entry 1.1.1.1/32
+ */
+ dpo_id_t non_eos_1_1_1_1 = DPO_INVALID;
+ fib_entry_contribute_forwarding(fei,
+ FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS,
+ &non_eos_1_1_1_1);
+
+ /*
+ * n-eos has only the 2 labelled paths
+ */
+ fei = fib_table_lookup(MPLS_FIB_DEFAULT_TABLE_ID,
+ &pfx_24001_neos);
+
+ FIB_TEST(fib_test_validate_entry(fei,
+ FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS,
+ 2,
+ &l99_neos_o_10_10_10_1,
+ &a_mpls_o_10_10_11_1),
+ "24001/neos LB 2 buckets via: "
+ "label 99 over 10.10.10.1, "
+ "adj-mpls over 10.10.11.2");
+
+ /*
+ * A labelled recursive
+ */
+ fib_prefix_t pfx_2_2_2_2_s_32 = {
+ .fp_len = 32,
+ .fp_proto = FIB_PROTOCOL_IP4,
+ .fp_addr = {
+ .ip4.as_u32 = clib_host_to_net_u32(0x02020202),
+ },
+ };
+ fib_test_lb_bucket_t l1600_eos_o_1_1_1_1 = {
+ .type = FT_LB_LABEL_O_LB,
+ .label_o_lb = {
+ .lb = non_eos_1_1_1_1.dpoi_index,
+ .label = 1600,
+ .eos = MPLS_EOS,
+ },
+ };
+ mpls_label_t *l1600 = NULL;
+ vec_add1(l1600, 1600);
+
+ fib_table_entry_update_one_path(fib_index,
+ &pfx_2_2_2_2_s_32,
+ FIB_SOURCE_API,
+ FIB_ENTRY_FLAG_NONE,
+ FIB_PROTOCOL_IP4,
+ &pfx_1_1_1_1_s_32.fp_addr,
+ ~0,
+ fib_index,
+ 1,
+ l1600,
+ FIB_ROUTE_PATH_FLAG_NONE);
+
+ fei = fib_table_lookup(fib_index, &pfx_2_2_2_2_s_32);
+ FIB_TEST(fib_test_validate_entry(fei,
+ FIB_FORW_CHAIN_TYPE_UNICAST_IP4,
+ 1,
+ &l1600_eos_o_1_1_1_1),
+ "2.2.2.2.2/32 LB 1 buckets via: "
+ "label 1600 over 1.1.1.1");
+
+ dpo_id_t dpo_44 = DPO_INVALID;
+ index_t urpfi;
+
+ fib_entry_contribute_forwarding(fei, FIB_FORW_CHAIN_TYPE_UNICAST_IP4, &dpo_44);
+ urpfi = load_balance_get_urpf(dpo_44.dpoi_index);
+
+ FIB_TEST(fib_urpf_check(urpfi, tm->hw[0]->sw_if_index),
+ "uRPF check for 2.2.2.2/32 on %d OK",
+ tm->hw[0]->sw_if_index);
+ FIB_TEST(fib_urpf_check(urpfi, tm->hw[1]->sw_if_index),
+ "uRPF check for 2.2.2.2/32 on %d OK",
+ tm->hw[1]->sw_if_index);
+ FIB_TEST(!fib_urpf_check(urpfi, 99),
+ "uRPF check for 2.2.2.2/32 on 99 not-OK",
+ 99);
+
+ fib_entry_contribute_forwarding(fei, FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS, &dpo_44);
+ FIB_TEST(urpfi == load_balance_get_urpf(dpo_44.dpoi_index),
+ "Shared uRPF on IP and non-EOS chain");
+
+ dpo_reset(&dpo_44);
+
+ /*
+ * we are holding a lock on the non-eos LB of the via-entry.
+ * do a PIC-core failover by shutting the link of the via-entry.
+ *
+ * shut down the link with the valid label
+ */
+ vnet_sw_interface_set_flags(vnet_get_main(),
+ tm->hw[0]->sw_if_index,
+ 0);
+
+ fei = fib_table_lookup(fib_index, &pfx_1_1_1_1_s_32);
+ FIB_TEST(fib_test_validate_entry(fei,
+ FIB_FORW_CHAIN_TYPE_UNICAST_IP4,
+ 2,
+ &a_o_10_10_11_1,
+ &adj_o_10_10_11_2),
+ "1.1.1.1/32 LB 2 buckets via: "
+ "adj over 10.10.11.1, ",
+ "adj-v4 over 10.10.11.2");
+
+ fei = fib_table_lookup(MPLS_FIB_DEFAULT_TABLE_ID,
+ &pfx_24001_eos);
+ FIB_TEST(fib_test_validate_entry(fei,
+ FIB_FORW_CHAIN_TYPE_MPLS_EOS,
+ 2,
+ &a_o_10_10_11_1,
+ &adj_o_10_10_11_2),
+ "24001/eos LB 2 buckets via: "
+ "adj over 10.10.11.1, ",
+ "adj-v4 over 10.10.11.2");
+
+ fei = fib_table_lookup(MPLS_FIB_DEFAULT_TABLE_ID,
+ &pfx_24001_neos);
+ FIB_TEST(fib_test_validate_entry(fei,
+ FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS,
+ 1,
+ &a_mpls_o_10_10_11_1),
+ "24001/neos LB 1 buckets via: "
+ "adj-mpls over 10.10.11.2");
+
+ /*
+ * test that the pre-failover load-balance has been in-place
+ * modified
+ */
+ dpo_id_t current = DPO_INVALID;
+ fib_entry_contribute_forwarding(fei,
+ FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS,
+ &current);
+
+ FIB_TEST(!dpo_cmp(&non_eos_1_1_1_1,
+ &current),
+ "PIC-core LB inplace modified %U %U",
+ format_dpo_id, &non_eos_1_1_1_1, 0,
+ format_dpo_id, &current, 0);
+
+ dpo_reset(&non_eos_1_1_1_1);
+ dpo_reset(&current);
+
+ /*
+ * no-shut the link with the valid label
+ */
+ vnet_sw_interface_set_flags(vnet_get_main(),
+ tm->hw[0]->sw_if_index,
+ VNET_SW_INTERFACE_FLAG_ADMIN_UP);
+
+ fei = fib_table_lookup(fib_index, &pfx_1_1_1_1_s_32);
+ FIB_TEST(fib_test_validate_entry(fei,
+ FIB_FORW_CHAIN_TYPE_UNICAST_IP4,
+ 16, // 3 choices spread over 16 buckets
+ &l99_eos_o_10_10_10_1,
+ &l99_eos_o_10_10_10_1,
+ &l99_eos_o_10_10_10_1,
+ &l99_eos_o_10_10_10_1,
+ &l99_eos_o_10_10_10_1,
+ &l99_eos_o_10_10_10_1,
+ &a_o_10_10_11_1,
+ &a_o_10_10_11_1,
+ &a_o_10_10_11_1,
+ &a_o_10_10_11_1,
+ &a_o_10_10_11_1,
+ &adj_o_10_10_11_2,
+ &adj_o_10_10_11_2,
+ &adj_o_10_10_11_2,
+ &adj_o_10_10_11_2,
+ &adj_o_10_10_11_2),
+ "1.1.1.1/32 LB 16 buckets via: "
+ "label 99 over 10.10.10.1, "
+ "adj over 10.10.11.1",
+ "adj-v4 over 10.10.11.2");
+
+
+ fei = fib_table_lookup(MPLS_FIB_DEFAULT_TABLE_ID,
+ &pfx_24001_eos);
+ FIB_TEST(fib_test_validate_entry(fei,
+ FIB_FORW_CHAIN_TYPE_MPLS_EOS,
+ 16, // 3 choices spread over 16 buckets
+ &l99_eos_o_10_10_10_1,
+ &l99_eos_o_10_10_10_1,
+ &l99_eos_o_10_10_10_1,
+ &l99_eos_o_10_10_10_1,
+ &l99_eos_o_10_10_10_1,
+ &l99_eos_o_10_10_10_1,
+ &a_o_10_10_11_1,
+ &a_o_10_10_11_1,
+ &a_o_10_10_11_1,
+ &a_o_10_10_11_1,
+ &a_o_10_10_11_1,
+ &adj_o_10_10_11_2,
+ &adj_o_10_10_11_2,
+ &adj_o_10_10_11_2,
+ &adj_o_10_10_11_2,
+ &adj_o_10_10_11_2),
+ "24001/eos LB 16 buckets via: "
+ "label 99 over 10.10.10.1, "
+ "adj over 10.10.11.1",
+ "adj-v4 over 10.10.11.2");
+
+ fei = fib_table_lookup(MPLS_FIB_DEFAULT_TABLE_ID,
+ &pfx_24001_neos);
+ FIB_TEST(fib_test_validate_entry(fei,
+ FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS,
+ 2,
+ &l99_neos_o_10_10_10_1,
+ &a_mpls_o_10_10_11_1),
+ "24001/neos LB 2 buckets via: "
+ "label 99 over 10.10.10.1, "
+ "adj-mpls over 10.10.11.2");
+
+ /*
+ * remove the first path with the valid label
+ */
+ fib_table_entry_path_remove(fib_index,
+ &pfx_1_1_1_1_s_32,
+ FIB_SOURCE_API,
+ FIB_PROTOCOL_IP4,
+ &nh_10_10_10_1,
+ tm->hw[0]->sw_if_index,
+ ~0, // invalid fib index
+ 1,
+ FIB_ROUTE_PATH_FLAG_NONE);
+
+ fei = fib_table_lookup(fib_index, &pfx_1_1_1_1_s_32);
+ FIB_TEST(fib_test_validate_entry(fei,
+ FIB_FORW_CHAIN_TYPE_UNICAST_IP4,
+ 2,
+ &a_o_10_10_11_1,
+ &adj_o_10_10_11_2),
+ "1.1.1.1/32 LB 2 buckets via: "
+ "adj over 10.10.11.1",
+ "adj-v4 over 10.10.11.2");
+
+ fei = fib_table_lookup(MPLS_FIB_DEFAULT_TABLE_ID,
+ &pfx_24001_eos);
+ FIB_TEST(fib_test_validate_entry(fei,
+ FIB_FORW_CHAIN_TYPE_MPLS_EOS,
+ 2,
+ &a_o_10_10_11_1,
+ &adj_o_10_10_11_2),
+ "24001/eos LB 2 buckets via: "
+ "adj over 10.10.11.1",
+ "adj-v4 over 10.10.11.2");
+
+ fei = fib_table_lookup(MPLS_FIB_DEFAULT_TABLE_ID,
+ &pfx_24001_neos);
+
+ FIB_TEST(fib_test_validate_entry(fei,
+ FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS,
+ 1,
+ &a_mpls_o_10_10_11_1),
+ "24001/neos LB 1 buckets via: "
+ "adj-mpls over 10.10.11.2");
+
+ /*
+ * remove the other path with a valid label
+ */
+ fib_test_lb_bucket_t bucket_drop = {
+ .type = FT_LB_SPECIAL,
+ .special = {
+ .adj = DPO_PROTO_IP4,
+ },
+ };
+
+ fib_table_entry_path_remove(fib_index,
+ &pfx_1_1_1_1_s_32,
+ FIB_SOURCE_API,
+ FIB_PROTOCOL_IP4,
+ &nh_10_10_11_1,
+ tm->hw[1]->sw_if_index,
+ ~0, // invalid fib index
+ 1,
+ FIB_ROUTE_PATH_FLAG_NONE);
+
+ fei = fib_table_lookup(fib_index, &pfx_1_1_1_1_s_32);
+ FIB_TEST(fib_test_validate_entry(fei,
+ FIB_FORW_CHAIN_TYPE_UNICAST_IP4,
+ 1,
+ &adj_o_10_10_11_2),
+ "1.1.1.1/32 LB 1 buckets via: "
+ "adj over 10.10.11.2");
+
+ fei = fib_table_lookup(MPLS_FIB_DEFAULT_TABLE_ID,
+ &pfx_24001_eos);
+ FIB_TEST(fib_test_validate_entry(fei,
+ FIB_FORW_CHAIN_TYPE_MPLS_EOS,
+ 1,
+ &adj_o_10_10_11_2),
+ "24001/eos LB 1 buckets via: "
+ "adj over 10.10.11.2");
+
+ fei = fib_table_lookup(MPLS_FIB_DEFAULT_TABLE_ID,
+ &pfx_24001_neos);
+ FIB_TEST(fib_test_validate_entry(fei,
+ FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS,
+ 1,
+ &bucket_drop),
+ "24001/eos LB 1 buckets via: DROP");
+
+ /*
+ * add back the path with the valid label
+ */
+ l99 = NULL;
+ vec_add1(l99, 99);
+
+ fib_table_entry_path_add(fib_index,
+ &pfx_1_1_1_1_s_32,
+ FIB_SOURCE_API,
+ FIB_ENTRY_FLAG_NONE,
+ FIB_PROTOCOL_IP4,
+ &nh_10_10_10_1,
+ tm->hw[0]->sw_if_index,
+ ~0, // invalid fib index
+ 1,
+ l99,
+ FIB_ROUTE_PATH_FLAG_NONE);
+
+ fei = fib_table_lookup(fib_index, &pfx_1_1_1_1_s_32);
+ FIB_TEST(fib_test_validate_entry(fei,
+ FIB_FORW_CHAIN_TYPE_UNICAST_IP4,
+ 2,
+ &l99_eos_o_10_10_10_1,
+ &adj_o_10_10_11_2),
+ "1.1.1.1/32 LB 2 buckets via: "
+ "label 99 over 10.10.10.1, "
+ "adj over 10.10.11.2");
+
+ fei = fib_table_lookup(MPLS_FIB_DEFAULT_TABLE_ID,
+ &pfx_24001_eos);
+ FIB_TEST(fib_test_validate_entry(fei,
+ FIB_FORW_CHAIN_TYPE_MPLS_EOS,
+ 2,
+ &l99_eos_o_10_10_10_1,
+ &adj_o_10_10_11_2),
+ "24001/eos LB 2 buckets via: "
+ "label 99 over 10.10.10.1, "
+ "adj over 10.10.11.2");
+
+ fei = fib_table_lookup(MPLS_FIB_DEFAULT_TABLE_ID,
+ &pfx_24001_neos);
+ FIB_TEST(fib_test_validate_entry(fei,
+ FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS,
+ 1,
+ &l99_neos_o_10_10_10_1),
+ "24001/neos LB 1 buckets via: "
+ "label 99 over 10.10.10.1");
+
+ /*
+ * change the local label
+ */
+ fib_table_entry_local_label_add(fib_index,
+ &pfx_1_1_1_1_s_32,
+ 25005);
+
+ fib_prefix_t pfx_25005_eos = {
+ .fp_proto = FIB_PROTOCOL_MPLS,
+ .fp_label = 25005,
+ .fp_eos = MPLS_EOS,
+ };
+ fib_prefix_t pfx_25005_neos = {
+ .fp_proto = FIB_PROTOCOL_MPLS,
+ .fp_label = 25005,
+ .fp_eos = MPLS_NON_EOS,
+ };
+
+ FIB_TEST((FIB_NODE_INDEX_INVALID ==
+ fib_table_lookup(fib_index, &pfx_24001_eos)),
+ "24001/eos removed after label change");
+ FIB_TEST((FIB_NODE_INDEX_INVALID ==
+ fib_table_lookup(fib_index, &pfx_24001_neos)),
+ "24001/eos removed after label change");
+
+ fei = fib_table_lookup(MPLS_FIB_DEFAULT_TABLE_ID,
+ &pfx_25005_eos);
+ FIB_TEST(fib_test_validate_entry(fei,
+ FIB_FORW_CHAIN_TYPE_MPLS_EOS,
+ 2,
+ &l99_eos_o_10_10_10_1,
+ &adj_o_10_10_11_2),
+ "25005/eos LB 2 buckets via: "
+ "label 99 over 10.10.10.1, "
+ "adj over 10.10.11.2");
+
+ fei = fib_table_lookup(MPLS_FIB_DEFAULT_TABLE_ID,
+ &pfx_25005_neos);
+ FIB_TEST(fib_test_validate_entry(fei,
+ FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS,
+ 1,
+ &l99_neos_o_10_10_10_1),
+ "25005/neos LB 1 buckets via: "
+ "label 99 over 10.10.10.1");
+
+ /*
+ * remove the local label.
+ * the check that the MPLS entries are gone is done by the fact the
+ * MPLS table is no longer present.
+ */
+ fib_table_entry_local_label_remove(fib_index,
+ &pfx_1_1_1_1_s_32,
+ 25005);
+
+ fei = fib_table_lookup(fib_index, &pfx_1_1_1_1_s_32);
+ FIB_TEST(fib_test_validate_entry(fei,
+ FIB_FORW_CHAIN_TYPE_UNICAST_IP4,
+ 2,
+ &l99_eos_o_10_10_10_1,
+ &adj_o_10_10_11_2),
+ "24001/eos LB 2 buckets via: "
+ "label 99 over 10.10.10.1, "
+ "adj over 10.10.11.2");
+
+ FIB_TEST((FIB_NODE_INDEX_INVALID ==
+ mpls_fib_index_from_table_id(MPLS_FIB_DEFAULT_TABLE_ID)),
+ "No more MPLS FIB entries => table removed");
+
+ /*
+ * add another via-entry for the recursive
+ */
+ fib_prefix_t pfx_1_1_1_2_s_32 = {
+ .fp_len = 32,
+ .fp_proto = FIB_PROTOCOL_IP4,
+ .fp_addr = {
+ .ip4.as_u32 = clib_host_to_net_u32(0x01010102),
+ },
+ };
+ fib_test_lb_bucket_t l101_eos_o_10_10_10_1 = {
+ .type = FT_LB_LABEL_O_ADJ,
+ .label_o_adj = {
+ .adj = ai_mpls_10_10_10_1,
+ .label = 101,
+ .eos = MPLS_EOS,
+ },
+ };
+ mpls_label_t *l101 = NULL;
+ vec_add1(l101, 101);
+
+ fei = fib_table_entry_update_one_path(fib_index,
+ &pfx_1_1_1_2_s_32,
+ FIB_SOURCE_API,
+ FIB_ENTRY_FLAG_NONE,
+ FIB_PROTOCOL_IP4,
+ &nh_10_10_10_1,
+ tm->hw[0]->sw_if_index,
+ ~0, // invalid fib index
+ 1,
+ l101,
+ FIB_ROUTE_PATH_FLAG_NONE);
+
+ FIB_TEST(fib_test_validate_entry(fei,
+ FIB_FORW_CHAIN_TYPE_UNICAST_IP4,
+ 1,
+ &l101_eos_o_10_10_10_1),
+ "1.1.1.2/32 LB 1 buckets via: "
+ "label 101 over 10.10.10.1");
+
+ dpo_id_t non_eos_1_1_1_2 = DPO_INVALID;
+ fib_entry_contribute_forwarding(fib_table_lookup(fib_index,
+ &pfx_1_1_1_1_s_32),
+ FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS,
+ &non_eos_1_1_1_1);
+ fib_entry_contribute_forwarding(fib_table_lookup(fib_index,
+ &pfx_1_1_1_2_s_32),
+ FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS,
+ &non_eos_1_1_1_2);
+
+ fib_test_lb_bucket_t l1601_eos_o_1_1_1_2 = {
+ .type = FT_LB_LABEL_O_LB,
+ .label_o_lb = {
+ .lb = non_eos_1_1_1_2.dpoi_index,
+ .label = 1601,
+ .eos = MPLS_EOS,
+ },
+ };
+ mpls_label_t *l1601 = NULL;
+ vec_add1(l1601, 1601);
+
+ l1600_eos_o_1_1_1_1.label_o_lb.lb = non_eos_1_1_1_1.dpoi_index;
+
+ fei = fib_table_entry_path_add(fib_index,
+ &pfx_2_2_2_2_s_32,
+ FIB_SOURCE_API,
+ FIB_ENTRY_FLAG_NONE,
+ FIB_PROTOCOL_IP4,
+ &pfx_1_1_1_2_s_32.fp_addr,
+ ~0,
+ fib_index,
+ 1,
+ l1601,
+ FIB_ROUTE_PATH_FLAG_NONE);
+
+ FIB_TEST(fib_test_validate_entry(fei,
+ FIB_FORW_CHAIN_TYPE_UNICAST_IP4,
+ 2,
+ &l1600_eos_o_1_1_1_1,
+ &l1601_eos_o_1_1_1_2),
+ "2.2.2.2/32 LB 2 buckets via: "
+ "label 1600 via 1.1,1.1, "
+ "label 16001 via 1.1.1.2");
+
+ /*
+ * update the via-entry so it no longer has an imp-null path.
+ * the LB for the recursive can use an imp-null
+ */
+ l_imp_null = NULL;
+ vec_add1(l_imp_null, MPLS_IETF_IMPLICIT_NULL_LABEL);
+
+ fei = fib_table_entry_update_one_path(fib_index,
+ &pfx_1_1_1_2_s_32,
+ FIB_SOURCE_API,
+ FIB_ENTRY_FLAG_NONE,
+ FIB_PROTOCOL_IP4,
+ &nh_10_10_11_1,
+ tm->hw[1]->sw_if_index,
+ ~0, // invalid fib index
+ 1,
+ l_imp_null,
+ FIB_ROUTE_PATH_FLAG_NONE);
+
+ FIB_TEST(fib_test_validate_entry(fei,
+ FIB_FORW_CHAIN_TYPE_UNICAST_IP4,
+ 1,
+ &a_o_10_10_11_1),
+ "1.1.1.2/32 LB 1 buckets via: "
+ "adj 10.10.11.1");
+
+ fei = fib_table_lookup(fib_index, &pfx_2_2_2_2_s_32);
+ FIB_TEST(fib_test_validate_entry(fei,
+ FIB_FORW_CHAIN_TYPE_UNICAST_IP4,
+ 2,
+ &l1600_eos_o_1_1_1_1,
+ &l1601_eos_o_1_1_1_2),
+ "2.2.2.2/32 LB 2 buckets via: "
+ "label 1600 via 1.1,1.1, "
+ "label 16001 via 1.1.1.2");
+
+ /*
+ * update the via-entry so it no longer has labelled paths.
+ * the LB for the recursive should exclue this via form its LB
+ */
+ fei = fib_table_entry_update_one_path(fib_index,
+ &pfx_1_1_1_2_s_32,
+ FIB_SOURCE_API,
+ FIB_ENTRY_FLAG_NONE,
+ FIB_PROTOCOL_IP4,
+ &nh_10_10_11_1,
+ tm->hw[1]->sw_if_index,
+ ~0, // invalid fib index
+ 1,
+ NULL,
+ FIB_ROUTE_PATH_FLAG_NONE);
+
+ FIB_TEST(fib_test_validate_entry(fei,
+ FIB_FORW_CHAIN_TYPE_UNICAST_IP4,
+ 1,
+ &a_o_10_10_11_1),
+ "1.1.1.2/32 LB 1 buckets via: "
+ "adj 10.10.11.1");
+
+ fei = fib_table_lookup(fib_index, &pfx_2_2_2_2_s_32);
+ FIB_TEST(fib_test_validate_entry(fei,
+ FIB_FORW_CHAIN_TYPE_UNICAST_IP4,
+ 1,
+ &l1600_eos_o_1_1_1_1),
+ "2.2.2.2/32 LB 1 buckets via: "
+ "label 1600 via 1.1,1.1");
+
+ dpo_reset(&non_eos_1_1_1_1);
+ dpo_reset(&non_eos_1_1_1_2);
+
+ /*
+ * Add a recursive with no out-labels. We expect to use the IP of the via
+ */
+ fib_prefix_t pfx_2_2_2_3_s_32 = {
+ .fp_len = 32,
+ .fp_proto = FIB_PROTOCOL_IP4,
+ .fp_addr = {
+ .ip4.as_u32 = clib_host_to_net_u32(0x02020203),
+ },
+ };
+ dpo_id_t ip_1_1_1_1 = DPO_INVALID;
+
+ fib_table_entry_update_one_path(fib_index,
+ &pfx_2_2_2_3_s_32,
+ FIB_SOURCE_API,
+ FIB_ENTRY_FLAG_NONE,
+ FIB_PROTOCOL_IP4,
+ &pfx_1_1_1_1_s_32.fp_addr,
+ ~0,
+ fib_index,
+ 1,
+ NULL,
+ FIB_ROUTE_PATH_FLAG_NONE);
+
+ fib_entry_contribute_forwarding(fib_table_lookup(fib_index,
+ &pfx_1_1_1_1_s_32),
+ FIB_FORW_CHAIN_TYPE_UNICAST_IP4,
+ &ip_1_1_1_1);
+
+ fib_test_lb_bucket_t ip_o_1_1_1_1 = {
+ .type = FT_LB_O_LB,
+ .lb = {
+ .lb = ip_1_1_1_1.dpoi_index,
+ },
+ };
+
+ fei = fib_table_lookup(fib_index, &pfx_2_2_2_3_s_32);
+ FIB_TEST(fib_test_validate_entry(fei,
+ FIB_FORW_CHAIN_TYPE_UNICAST_IP4,
+ 1,
+ &ip_o_1_1_1_1),
+ "2.2.2.2.3/32 LB 1 buckets via: "
+ "ip 1.1.1.1");
+
+ /*
+ * Add a recursive with an imp-null out-label.
+ * We expect to use the IP of the via
+ */
+ fib_prefix_t pfx_2_2_2_4_s_32 = {
+ .fp_len = 32,
+ .fp_proto = FIB_PROTOCOL_IP4,
+ .fp_addr = {
+ .ip4.as_u32 = clib_host_to_net_u32(0x02020204),
+ },
+ };
+
+ fib_table_entry_update_one_path(fib_index,
+ &pfx_2_2_2_4_s_32,
+ FIB_SOURCE_API,
+ FIB_ENTRY_FLAG_NONE,
+ FIB_PROTOCOL_IP4,
+ &pfx_1_1_1_1_s_32.fp_addr,
+ ~0,
+ fib_index,
+ 1,
+ NULL,
+ FIB_ROUTE_PATH_FLAG_NONE);
+
+ fei = fib_table_lookup(fib_index, &pfx_2_2_2_4_s_32);
+ FIB_TEST(fib_test_validate_entry(fei,
+ FIB_FORW_CHAIN_TYPE_UNICAST_IP4,
+ 1,
+ &ip_o_1_1_1_1),
+ "2.2.2.2.4/32 LB 1 buckets via: "
+ "ip 1.1.1.1");
+
+ dpo_reset(&ip_1_1_1_1);
+
+ /*
+ * Create an entry with a deep label stack
+ */
+ fib_prefix_t pfx_2_2_5_5_s_32 = {
+ .fp_len = 32,
+ .fp_proto = FIB_PROTOCOL_IP4,
+ .fp_addr = {
+ .ip4.as_u32 = clib_host_to_net_u32(0x02020505),
+ },
+ };
+ fib_test_lb_bucket_t ls_eos_o_10_10_10_1 = {
+ .type = FT_LB_LABEL_STACK_O_ADJ,
+ .label_stack_o_adj = {
+ .adj = ai_mpls_10_10_11_1,
+ .label_stack_size = 8,
+ .label_stack = {
+ 200, 201, 202, 203, 204, 205, 206, 207
+ },
+ .eos = MPLS_EOS,
+ },
+ };
+ mpls_label_t *label_stack = NULL;
+ vec_validate(label_stack, 7);
+ for (ii = 0; ii < 8; ii++)
+ {
+ label_stack[ii] = ii + 200;
+ }
+
+ fei = fib_table_entry_update_one_path(fib_index,
+ &pfx_2_2_5_5_s_32,
+ FIB_SOURCE_API,
+ FIB_ENTRY_FLAG_NONE,
+ FIB_PROTOCOL_IP4,
+ &nh_10_10_11_1,
+ tm->hw[1]->sw_if_index,
+ ~0, // invalid fib index
+ 1,
+ label_stack,
+ FIB_ROUTE_PATH_FLAG_NONE);
+
+ FIB_TEST(fib_test_validate_entry(fei,
+ FIB_FORW_CHAIN_TYPE_UNICAST_IP4,
+ 1,
+ &ls_eos_o_10_10_10_1),
+ "2.2.5.5/32 LB 1 buckets via: "
+ "adj 10.10.11.1");
+ fib_table_entry_delete_index(fei, FIB_SOURCE_API);
+
+ /*
+ * cleanup
+ */
+ fib_table_entry_delete(fib_index,
+ &pfx_1_1_1_2_s_32,
+ FIB_SOURCE_API);
+
+ fei = fib_table_lookup(fib_index, &pfx_2_2_2_2_s_32);
+ FIB_TEST(fib_test_validate_entry(fei,
+ FIB_FORW_CHAIN_TYPE_UNICAST_IP4,
+ 1,
+ &l1600_eos_o_1_1_1_1),
+ "2.2.2.2/32 LB 1 buckets via: "
+ "label 1600 via 1.1,1.1");
+
+ fib_table_entry_delete(fib_index,
+ &pfx_1_1_1_1_s_32,
+ FIB_SOURCE_API);
+
+ FIB_TEST(fib_test_validate_entry(fei,
+ FIB_FORW_CHAIN_TYPE_UNICAST_IP4,
+ 1,
+ &bucket_drop),
+ "2.2.2.2/32 LB 1 buckets via: DROP");
+
+ fib_table_entry_delete(fib_index,
+ &pfx_2_2_2_2_s_32,
+ FIB_SOURCE_API);
+ fib_table_entry_delete(fib_index,
+ &pfx_2_2_2_3_s_32,
+ FIB_SOURCE_API);
+ fib_table_entry_delete(fib_index,
+ &pfx_2_2_2_4_s_32,
+ FIB_SOURCE_API);
+
+ adj_unlock(ai_mpls_10_10_10_1);
+ adj_unlock(ai_mpls_10_10_11_2);
+ adj_unlock(ai_v4_10_10_11_1);
+ adj_unlock(ai_v4_10_10_11_2);
+ adj_unlock(ai_mpls_10_10_11_1);
+
+ FIB_TEST((0 == adj_nbr_db_size()), "ADJ DB size is %d",
+ adj_nbr_db_size());
+
+ local0_pfx.fp_len = 32;
+ fib_table_entry_delete(fib_index,
+ &local0_pfx,
+ FIB_SOURCE_INTERFACE);
+ local0_pfx.fp_len = 24;
+ fib_table_entry_delete(fib_index,
+ &local0_pfx,
+ FIB_SOURCE_INTERFACE);
+ local1_pfx.fp_len = 32;
+ fib_table_entry_delete(fib_index,
+ &local1_pfx,
+ FIB_SOURCE_INTERFACE);
+ local1_pfx.fp_len = 24;
+ fib_table_entry_delete(fib_index,
+ &local1_pfx,
+ FIB_SOURCE_INTERFACE);
+
+ /*
+ * +1 for the drop LB in the MPLS tables.
+ */
+ FIB_TEST(lb_count+1 == pool_elts(load_balance_pool),
+ "Load-balance resources freed %d of %d",
+ lb_count+1, pool_elts(load_balance_pool));
+
+ return (0);
+}
+
+#define N_TEST_CHILDREN 4
+#define PARENT_INDEX 0
+
+typedef struct fib_node_test_t_
+{
+ fib_node_t node;
+ u32 sibling;
+ u32 index;
+ fib_node_back_walk_ctx_t *ctxs;
+ u32 destroyed;
+} fib_node_test_t;
+
+static fib_node_test_t fib_test_nodes[N_TEST_CHILDREN+1];
+
+#define PARENT() (&fib_test_nodes[PARENT_INDEX].node)
+
+#define FOR_EACH_TEST_CHILD(_tc) \
+ for (ii = 1, (_tc) = &fib_test_nodes[1]; \
+ ii < N_TEST_CHILDREN+1; \
+ ii++, (_tc) = &fib_test_nodes[ii])
+
+static fib_node_t *
+fib_test_child_get_node (fib_node_index_t index)
+{
+ return (&fib_test_nodes[index].node);
+}
+
+static int fib_test_walk_spawns_walks;
+
+static fib_node_back_walk_rc_t
+fib_test_child_back_walk_notify (fib_node_t *node,
+ fib_node_back_walk_ctx_t *ctx)
+{
+ fib_node_test_t *tc = (fib_node_test_t*) node;
+
+ vec_add1(tc->ctxs, *ctx);
+
+ if (1 == fib_test_walk_spawns_walks)
+ fib_walk_sync(FIB_NODE_TYPE_TEST, tc->index, ctx);
+ if (2 == fib_test_walk_spawns_walks)
+ fib_walk_async(FIB_NODE_TYPE_TEST, tc->index,
+ FIB_WALK_PRIORITY_HIGH, ctx);
+
+ return (FIB_NODE_BACK_WALK_CONTINUE);
+}
+
+static void
+fib_test_child_last_lock_gone (fib_node_t *node)
+{
+ fib_node_test_t *tc = (fib_node_test_t *)node;
+
+ tc->destroyed = 1;
+}
+
+/**
+ * The FIB walk's graph node virtual function table
+ */
+static const fib_node_vft_t fib_test_child_vft = {
+ .fnv_get = fib_test_child_get_node,
+ .fnv_last_lock = fib_test_child_last_lock_gone,
+ .fnv_back_walk = fib_test_child_back_walk_notify,
+};
+
+/*
+ * the function (that should have been static but isn't so I can do this)
+ * that processes the walk from the async queue,
+ */
+f64 fib_walk_process_queues(vlib_main_t * vm,
+ const f64 quota);
+u32 fib_walk_queue_get_size(fib_walk_priority_t prio);
+
+static int
+fib_test_walk (void)
+{
+ fib_node_back_walk_ctx_t high_ctx = {}, low_ctx = {};
+ fib_node_test_t *tc;
+ vlib_main_t *vm;
+ u32 ii;
+
+ vm = vlib_get_main();
+ fib_node_register_type(FIB_NODE_TYPE_TEST, &fib_test_child_vft);
+
+ /*
+ * init a fake node on which we will add children
+ */
+ fib_node_init(&fib_test_nodes[PARENT_INDEX].node,
+ FIB_NODE_TYPE_TEST);
+
+ FOR_EACH_TEST_CHILD(tc)
+ {
+ fib_node_init(&tc->node, FIB_NODE_TYPE_TEST);
+ fib_node_lock(&tc->node);
+ tc->ctxs = NULL;
+ tc->index = ii;
+ tc->sibling = fib_node_child_add(FIB_NODE_TYPE_TEST,
+ PARENT_INDEX,
+ FIB_NODE_TYPE_TEST, ii);
+ }
+
+ /*
+ * enqueue a walk across the parents children.
+ */
+ high_ctx.fnbw_reason = FIB_NODE_BW_REASON_FLAG_RESOLVE;
+
+ fib_walk_async(FIB_NODE_TYPE_TEST, PARENT_INDEX,
+ FIB_WALK_PRIORITY_HIGH, &high_ctx);
+ FIB_TEST(N_TEST_CHILDREN+1 == fib_node_list_get_size(PARENT()->fn_children),
+ "Parent has %d children pre-walk",
+ fib_node_list_get_size(PARENT()->fn_children));
+
+ /*
+ * give the walk a large amount of time so it gets to the end
+ */
+ fib_walk_process_queues(vm, 1);
+
+ FOR_EACH_TEST_CHILD(tc)
+ {
+ FIB_TEST(1 == vec_len(tc->ctxs),
+ "%d child visitsed %d times",
+ ii, vec_len(tc->ctxs));
+ vec_free(tc->ctxs);
+ }
+ FIB_TEST(0 == fib_walk_queue_get_size(FIB_WALK_PRIORITY_HIGH),
+ "Queue is empty post walk");
+ FIB_TEST(N_TEST_CHILDREN == fib_node_list_get_size(PARENT()->fn_children),
+ "Parent has %d children post walk",
+ fib_node_list_get_size(PARENT()->fn_children));
+
+ /*
+ * walk again. should be no increase in the number of visits, since
+ * the walk will have terminated.
+ */
+ fib_walk_process_queues(vm, 1);
+
+ FOR_EACH_TEST_CHILD(tc)
+ {
+ FIB_TEST(0 == vec_len(tc->ctxs),
+ "%d child visitsed %d times",
+ ii, vec_len(tc->ctxs));
+ }
+
+ /*
+ * schedule a low and hig priority walk. expect the high to be performed
+ * before the low.
+ * schedule the high prio walk first so that it is further from the head
+ * of the dependency list. that way it won't merge with the low one.
+ */
+ high_ctx.fnbw_reason = FIB_NODE_BW_REASON_FLAG_RESOLVE;
+ low_ctx.fnbw_reason = FIB_NODE_BW_REASON_FLAG_ADJ_UPDATE;
+
+ fib_walk_async(FIB_NODE_TYPE_TEST, PARENT_INDEX,
+ FIB_WALK_PRIORITY_HIGH, &high_ctx);
+ fib_walk_async(FIB_NODE_TYPE_TEST, PARENT_INDEX,
+ FIB_WALK_PRIORITY_LOW, &low_ctx);
+
+ fib_walk_process_queues(vm, 1);
+
+ FOR_EACH_TEST_CHILD(tc)
+ {
+ FIB_TEST(high_ctx.fnbw_reason == tc->ctxs[0].fnbw_reason,
+ "%d child visitsed by high prio walk", ii);
+ FIB_TEST(low_ctx.fnbw_reason == tc->ctxs[1].fnbw_reason,
+ "%d child visitsed by low prio walk", ii);
+ vec_free(tc->ctxs);
+ }
+ FIB_TEST(0 == fib_walk_queue_get_size(FIB_WALK_PRIORITY_HIGH),
+ "Queue is empty post prio walk");
+ FIB_TEST(N_TEST_CHILDREN == fib_node_list_get_size(PARENT()->fn_children),
+ "Parent has %d children post prio walk",
+ fib_node_list_get_size(PARENT()->fn_children));
+
+ /*
+ * schedule 2 walks of the same priority that can be megred.
+ * expect that each child is thus visited only once.
+ */
+ high_ctx.fnbw_reason = FIB_NODE_BW_REASON_FLAG_RESOLVE;
+ low_ctx.fnbw_reason = FIB_NODE_BW_REASON_FLAG_RESOLVE;
+
+ fib_walk_async(FIB_NODE_TYPE_TEST, PARENT_INDEX,
+ FIB_WALK_PRIORITY_HIGH, &high_ctx);
+ fib_walk_async(FIB_NODE_TYPE_TEST, PARENT_INDEX,
+ FIB_WALK_PRIORITY_HIGH, &low_ctx);
+
+ fib_walk_process_queues(vm, 1);
+
+ FOR_EACH_TEST_CHILD(tc)
+ {
+ FIB_TEST(1 == vec_len(tc->ctxs),
+ "%d child visitsed %d times during merge walk",
+ ii, vec_len(tc->ctxs));
+ vec_free(tc->ctxs);
+ }
+ FIB_TEST(0 == fib_walk_queue_get_size(FIB_WALK_PRIORITY_HIGH),
+ "Queue is empty post merge walk");
+ FIB_TEST(N_TEST_CHILDREN == fib_node_list_get_size(PARENT()->fn_children),
+ "Parent has %d children post merge walk",
+ fib_node_list_get_size(PARENT()->fn_children));
+
+ /*
+ * schedule 2 walks of the same priority that cannot be megred.
+ * expect that each child is thus visited twice and in the order
+ * in which the walks were scheduled.
+ */
+ high_ctx.fnbw_reason = FIB_NODE_BW_REASON_FLAG_RESOLVE;
+ low_ctx.fnbw_reason = FIB_NODE_BW_REASON_FLAG_ADJ_UPDATE;
+
+ fib_walk_async(FIB_NODE_TYPE_TEST, PARENT_INDEX,
+ FIB_WALK_PRIORITY_HIGH, &high_ctx);
+ fib_walk_async(FIB_NODE_TYPE_TEST, PARENT_INDEX,
+ FIB_WALK_PRIORITY_HIGH, &low_ctx);
+
+ fib_walk_process_queues(vm, 1);
+
+ FOR_EACH_TEST_CHILD(tc)
+ {
+ FIB_TEST(high_ctx.fnbw_reason == tc->ctxs[0].fnbw_reason,
+ "%d child visitsed by high prio walk", ii);
+ FIB_TEST(low_ctx.fnbw_reason == tc->ctxs[1].fnbw_reason,
+ "%d child visitsed by low prio walk", ii);
+ vec_free(tc->ctxs);
+ }
+ FIB_TEST(0 == fib_walk_queue_get_size(FIB_WALK_PRIORITY_HIGH),
+ "Queue is empty post no-merge walk");
+ FIB_TEST(N_TEST_CHILDREN == fib_node_list_get_size(PARENT()->fn_children),
+ "Parent has %d children post no-merge walk",
+ fib_node_list_get_size(PARENT()->fn_children));
+
+ /*
+ * schedule a walk that makes one one child progress.
+ * we do this by giving the queue draining process zero
+ * time quanta. it's a do..while loop, so it does something.
+ */
+ high_ctx.fnbw_reason = FIB_NODE_BW_REASON_FLAG_RESOLVE;
+
+ fib_walk_async(FIB_NODE_TYPE_TEST, PARENT_INDEX,
+ FIB_WALK_PRIORITY_HIGH, &high_ctx);
+ fib_walk_process_queues(vm, 0);
+
+ FOR_EACH_TEST_CHILD(tc)
+ {
+ if (ii == N_TEST_CHILDREN)
+ {
+ FIB_TEST(1 == vec_len(tc->ctxs),
+ "%d child visitsed %d times in zero quanta walk",
+ ii, vec_len(tc->ctxs));
+ }
+ else
+ {
+ FIB_TEST(0 == vec_len(tc->ctxs),
+ "%d child visitsed %d times in 0 quanta walk",
+ ii, vec_len(tc->ctxs));
+ }
+ }
+ FIB_TEST(1 == fib_walk_queue_get_size(FIB_WALK_PRIORITY_HIGH),
+ "Queue is not empty post zero quanta walk");
+ FIB_TEST(N_TEST_CHILDREN+1 == fib_node_list_get_size(PARENT()->fn_children),
+ "Parent has %d children post zero qunta walk",
+ fib_node_list_get_size(PARENT()->fn_children));
+
+ /*
+ * another one step
+ */
+ fib_walk_process_queues(vm, 0);
+
+ FOR_EACH_TEST_CHILD(tc)
+ {
+ if (ii >= N_TEST_CHILDREN-1)
+ {
+ FIB_TEST(1 == vec_len(tc->ctxs),
+ "%d child visitsed %d times in 2nd zero quanta walk",
+ ii, vec_len(tc->ctxs));
+ }
+ else
+ {
+ FIB_TEST(0 == vec_len(tc->ctxs),
+ "%d child visitsed %d times in 2nd 0 quanta walk",
+ ii, vec_len(tc->ctxs));
+ }
+ }
+ FIB_TEST(1 == fib_walk_queue_get_size(FIB_WALK_PRIORITY_HIGH),
+ "Queue is not empty post zero quanta walk");
+ FIB_TEST(N_TEST_CHILDREN+1 == fib_node_list_get_size(PARENT()->fn_children),
+ "Parent has %d children post zero qunta walk",
+ fib_node_list_get_size(PARENT()->fn_children));
+
+ /*
+ * schedule another walk that will catch-up and merge.
+ */
+ fib_walk_async(FIB_NODE_TYPE_TEST, PARENT_INDEX,
+ FIB_WALK_PRIORITY_HIGH, &high_ctx);
+ fib_walk_process_queues(vm, 1);
+
+ FOR_EACH_TEST_CHILD(tc)
+ {
+ if (ii >= N_TEST_CHILDREN-1)
+ {
+ FIB_TEST(2 == vec_len(tc->ctxs),
+ "%d child visitsed %d times in 2nd zero quanta merge walk",
+ ii, vec_len(tc->ctxs));
+ vec_free(tc->ctxs);
+ }
+ else
+ {
+ FIB_TEST(1 == vec_len(tc->ctxs),
+ "%d child visitsed %d times in 2nd 0 quanta merge walk",
+ ii, vec_len(tc->ctxs));
+ vec_free(tc->ctxs);
+ }
+ }
+ FIB_TEST(0 == fib_walk_queue_get_size(FIB_WALK_PRIORITY_HIGH),
+ "Queue is not empty post 2nd zero quanta merge walk");
+ FIB_TEST(N_TEST_CHILDREN == fib_node_list_get_size(PARENT()->fn_children),
+ "Parent has %d children post 2nd zero qunta merge walk",
+ fib_node_list_get_size(PARENT()->fn_children));
+
+ /*
+ * park a async walk in the middle of the list, then have an sync walk catch
+ * it. same expectations as async catches async.
+ */
+ high_ctx.fnbw_reason = FIB_NODE_BW_REASON_FLAG_RESOLVE;
+
+ fib_walk_async(FIB_NODE_TYPE_TEST, PARENT_INDEX,
+ FIB_WALK_PRIORITY_HIGH, &high_ctx);
+
+ fib_walk_process_queues(vm, 0);
+ fib_walk_process_queues(vm, 0);
+
+ fib_walk_sync(FIB_NODE_TYPE_TEST, PARENT_INDEX, &high_ctx);
+
+ FOR_EACH_TEST_CHILD(tc)
+ {
+ if (ii >= N_TEST_CHILDREN-1)
+ {
+ FIB_TEST(2 == vec_len(tc->ctxs),
+ "%d child visitsed %d times in sync catches async walk",
+ ii, vec_len(tc->ctxs));
+ vec_free(tc->ctxs);
+ }
+ else
+ {
+ FIB_TEST(1 == vec_len(tc->ctxs),
+ "%d child visitsed %d times in sync catches async walk",
+ ii, vec_len(tc->ctxs));
+ vec_free(tc->ctxs);
+ }
+ }
+ FIB_TEST(0 == fib_walk_queue_get_size(FIB_WALK_PRIORITY_HIGH),
+ "Queue is not empty post 2nd zero quanta merge walk");
+ FIB_TEST(N_TEST_CHILDREN == fib_node_list_get_size(PARENT()->fn_children),
+ "Parent has %d children post 2nd zero qunta merge walk",
+ fib_node_list_get_size(PARENT()->fn_children));
+
+ /*
+ * make the parent a child of one of its children, thus inducing a routing loop.
+ */
+ fib_test_nodes[PARENT_INDEX].sibling =
+ fib_node_child_add(FIB_NODE_TYPE_TEST,
+ 1, // the first child
+ FIB_NODE_TYPE_TEST,
+ PARENT_INDEX);
+
+ /*
+ * execute a sync walk from the parent. each child visited spawns more sync
+ * walks. we expect the walk to terminate.
+ */
+ fib_test_walk_spawns_walks = 1;
+
+ fib_walk_sync(FIB_NODE_TYPE_TEST, PARENT_INDEX, &high_ctx);
+
+ FOR_EACH_TEST_CHILD(tc)
+ {
+ /*
+ * child 1 - which is last in the list - has the loop.
+ * the other children a re thus visitsed first. the we meet
+ * child 1. we go round the loop again, visting the other children.
+ * then we meet the walk in the dep list and bail. child 1 is not visitsed
+ * again.
+ */
+ if (1 == ii)
+ {
+ FIB_TEST(1 == vec_len(tc->ctxs),
+ "child %d visitsed %d times during looped sync walk",
+ ii, vec_len(tc->ctxs));
+ }
+ else
+ {
+ FIB_TEST(2 == vec_len(tc->ctxs),
+ "child %d visitsed %d times during looped sync walk",
+ ii, vec_len(tc->ctxs));
+ }
+ vec_free(tc->ctxs);
+ }
+ FIB_TEST(N_TEST_CHILDREN == fib_node_list_get_size(PARENT()->fn_children),
+ "Parent has %d children post sync loop walk",
+ fib_node_list_get_size(PARENT()->fn_children));
+
+ /*
+ * the walk doesn't reach the max depth because the infra knows that sync
+ * meets sync implies a loop and bails early.
+ */
+ FIB_TEST(high_ctx.fnbw_depth == 9,
+ "Walk context depth %d post sync loop walk",
+ high_ctx.fnbw_depth);
+
+ /*
+ * execute an async walk of the graph loop, with each child spawns sync walks
+ */
+ high_ctx.fnbw_depth = 0;
+ fib_walk_async(FIB_NODE_TYPE_TEST, PARENT_INDEX,
+ FIB_WALK_PRIORITY_HIGH, &high_ctx);
+
+ fib_walk_process_queues(vm, 1);
+
+ FOR_EACH_TEST_CHILD(tc)
+ {
+ /*
+ * we don't really care how many times the children are visisted, as long as
+ * it is more than once.
+ */
+ FIB_TEST(1 <= vec_len(tc->ctxs),
+ "child %d visitsed %d times during looped aync spawns sync walk",
+ ii, vec_len(tc->ctxs));
+ vec_free(tc->ctxs);
+ }
+
+ /*
+ * execute an async walk of the graph loop, with each child spawns async walks
+ */
+ fib_test_walk_spawns_walks = 2;
+ high_ctx.fnbw_depth = 0;
+ fib_walk_async(FIB_NODE_TYPE_TEST, PARENT_INDEX,
+ FIB_WALK_PRIORITY_HIGH, &high_ctx);
+
+ fib_walk_process_queues(vm, 1);
+
+ FOR_EACH_TEST_CHILD(tc)
+ {
+ /*
+ * we don't really care how many times the children are visisted, as long as
+ * it is more than once.
+ */
+ FIB_TEST(1 <= vec_len(tc->ctxs),
+ "child %d visitsed %d times during looped async spawns async walk",
+ ii, vec_len(tc->ctxs));
+ vec_free(tc->ctxs);
+ }
+
+
+ fib_node_child_remove(FIB_NODE_TYPE_TEST,
+ 1, // the first child
+ fib_test_nodes[PARENT_INDEX].sibling);
+
+ /*
+ * cleanup
+ */
+ FOR_EACH_TEST_CHILD(tc)
+ {
+ fib_node_child_remove(FIB_NODE_TYPE_TEST, PARENT_INDEX,
+ tc->sibling);
+ fib_node_deinit(&tc->node);
+ fib_node_unlock(&tc->node);
+ }
+ fib_node_deinit(PARENT());
+
+ /*
+ * The parent will be destroyed when the last lock on it goes.
+ * this test ensures all the walk objects are unlocking it.
+ */
+ FIB_TEST((1 == fib_test_nodes[PARENT_INDEX].destroyed),
+ "Parent was destroyed");
+
+ return (0);
+}
+
+static int
+lfib_test (void)
+{
+ const mpls_label_t deag_label = 50;
+ const u32 lfib_index = 0;
+ const u32 fib_index = 0;
+ dpo_id_t dpo = DPO_INVALID;
+ const dpo_id_t *dpo1;
+ fib_node_index_t lfe;
+ lookup_dpo_t *lkd;
+ test_main_t *tm;
+ int lb_count;
+ adj_index_t ai_mpls_10_10_10_1;
+
+ tm = &test_main;
+ lb_count = pool_elts(load_balance_pool);
+
+ FIB_TEST((0 == adj_nbr_db_size()), "ADJ DB size is %d",
+ adj_nbr_db_size());
+
+ /*
+ * MPLS enable an interface so we get the MPLS table created
+ */
+ mpls_sw_interface_enable_disable(&mpls_main,
+ tm->hw[0]->sw_if_index,
+ 1);
+
+ ip46_address_t nh_10_10_10_1 = {
+ .ip4.as_u32 = clib_host_to_net_u32(0x0a0a0a01),
+ };
+ ai_mpls_10_10_10_1 = adj_nbr_add_or_lock(FIB_PROTOCOL_IP4,
+ VNET_LINK_MPLS,
+ &nh_10_10_10_1,
+ tm->hw[0]->sw_if_index);
+
+ /*
+ * Test the specials stack properly.
+ */
+ fib_prefix_t exp_null_v6_pfx = {
+ .fp_proto = FIB_PROTOCOL_MPLS,
+ .fp_eos = MPLS_EOS,
+ .fp_label = MPLS_IETF_IPV6_EXPLICIT_NULL_LABEL,
+ .fp_payload_proto = DPO_PROTO_IP6,
+ };
+ lfe = fib_table_lookup(lfib_index, &exp_null_v6_pfx);
+ FIB_TEST((FIB_NODE_INDEX_INVALID != lfe),
+ "%U/%U present",
+ format_mpls_unicast_label, MPLS_IETF_IPV6_EXPLICIT_NULL_LABEL,
+ format_mpls_eos_bit, MPLS_EOS);
+ fib_entry_contribute_forwarding(lfe,
+ FIB_FORW_CHAIN_TYPE_MPLS_EOS,
+ &dpo);
+ dpo1 = load_balance_get_bucket(dpo.dpoi_index, 0);
+ lkd = lookup_dpo_get(dpo1->dpoi_index);
+
+ FIB_TEST((fib_index == lkd->lkd_fib_index),
+ "%U/%U is deag in %d %U",
+ format_mpls_unicast_label, deag_label,
+ format_mpls_eos_bit, MPLS_EOS,
+ lkd->lkd_fib_index,
+ format_dpo_id, &dpo, 0);
+ FIB_TEST((LOOKUP_INPUT_DST_ADDR == lkd->lkd_input),
+ "%U/%U is dst deag",
+ format_mpls_unicast_label, deag_label,
+ format_mpls_eos_bit, MPLS_EOS);
+ FIB_TEST((LOOKUP_TABLE_FROM_INPUT_INTERFACE == lkd->lkd_table),
+ "%U/%U is lookup in interface's table",
+ format_mpls_unicast_label, deag_label,
+ format_mpls_eos_bit, MPLS_EOS);
+ FIB_TEST((DPO_PROTO_IP6 == lkd->lkd_proto),
+ "%U/%U is %U dst deag",
+ format_mpls_unicast_label, deag_label,
+ format_mpls_eos_bit, MPLS_EOS,
+ format_dpo_proto, lkd->lkd_proto);
+
+
+ /*
+ * A route deag route for EOS
+ */
+ fib_prefix_t pfx = {
+ .fp_proto = FIB_PROTOCOL_MPLS,
+ .fp_eos = MPLS_EOS,
+ .fp_label = deag_label,
+ .fp_payload_proto = DPO_PROTO_IP4,
+ };
+ lfe = fib_table_entry_path_add(lfib_index,
+ &pfx,
+ FIB_SOURCE_CLI,
+ FIB_ENTRY_FLAG_NONE,
+ FIB_PROTOCOL_IP4,
+ &zero_addr,
+ ~0,
+ fib_index,
+ 1,
+ NULL,
+ FIB_ROUTE_PATH_FLAG_NONE);
+
+ FIB_TEST((lfe == fib_table_lookup(lfib_index, &pfx)),
+ "%U/%U present",
+ format_mpls_unicast_label, deag_label,
+ format_mpls_eos_bit, MPLS_EOS);
+
+ fib_entry_contribute_forwarding(lfe,
+ FIB_FORW_CHAIN_TYPE_MPLS_EOS,
+ &dpo);
+ dpo1 = load_balance_get_bucket(dpo.dpoi_index, 0);
+ lkd = lookup_dpo_get(dpo1->dpoi_index);
+
+ FIB_TEST((fib_index == lkd->lkd_fib_index),
+ "%U/%U is deag in %d %U",
+ format_mpls_unicast_label, deag_label,
+ format_mpls_eos_bit, MPLS_EOS,
+ lkd->lkd_fib_index,
+ format_dpo_id, &dpo, 0);
+ FIB_TEST((LOOKUP_INPUT_DST_ADDR == lkd->lkd_input),
+ "%U/%U is dst deag",
+ format_mpls_unicast_label, deag_label,
+ format_mpls_eos_bit, MPLS_EOS);
+ FIB_TEST((DPO_PROTO_IP4 == lkd->lkd_proto),
+ "%U/%U is %U dst deag",
+ format_mpls_unicast_label, deag_label,
+ format_mpls_eos_bit, MPLS_EOS,
+ format_dpo_proto, lkd->lkd_proto);
+
+ fib_table_entry_delete_index(lfe, FIB_SOURCE_CLI);
+
+ FIB_TEST((FIB_NODE_INDEX_INVALID == fib_table_lookup(lfib_index,
+ &pfx)),
+ "%U/%U not present",
+ format_mpls_unicast_label, deag_label,
+ format_mpls_eos_bit, MPLS_EOS);
+
+ /*
+ * A route deag route for non-EOS
+ */
+ pfx.fp_eos = MPLS_NON_EOS;
+ lfe = fib_table_entry_path_add(lfib_index,
+ &pfx,
+ FIB_SOURCE_CLI,
+ FIB_ENTRY_FLAG_NONE,
+ FIB_PROTOCOL_IP4,
+ &zero_addr,
+ ~0,
+ lfib_index,
+ 1,
+ NULL,
+ FIB_ROUTE_PATH_FLAG_NONE);
+
+ FIB_TEST((lfe == fib_table_lookup(lfib_index, &pfx)),
+ "%U/%U present",
+ format_mpls_unicast_label, deag_label,
+ format_mpls_eos_bit, MPLS_NON_EOS);
+
+ fib_entry_contribute_forwarding(lfe,
+ FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS,
+ &dpo);
+ dpo1 = load_balance_get_bucket(dpo.dpoi_index, 0);
+ lkd = lookup_dpo_get(dpo1->dpoi_index);
+
+ FIB_TEST((fib_index == lkd->lkd_fib_index),
+ "%U/%U is deag in %d %U",
+ format_mpls_unicast_label, deag_label,
+ format_mpls_eos_bit, MPLS_NON_EOS,
+ lkd->lkd_fib_index,
+ format_dpo_id, &dpo, 0);
+ FIB_TEST((LOOKUP_INPUT_DST_ADDR == lkd->lkd_input),
+ "%U/%U is dst deag",
+ format_mpls_unicast_label, deag_label,
+ format_mpls_eos_bit, MPLS_NON_EOS);
+
+ FIB_TEST((DPO_PROTO_MPLS == lkd->lkd_proto),
+ "%U/%U is %U dst deag",
+ format_mpls_unicast_label, deag_label,
+ format_mpls_eos_bit, MPLS_NON_EOS,
+ format_dpo_proto, lkd->lkd_proto);
+
+ fib_table_entry_delete_index(lfe, FIB_SOURCE_CLI);
+
+ FIB_TEST((FIB_NODE_INDEX_INVALID == fib_table_lookup(lfib_index,
+ &pfx)),
+ "%U/%U not present",
+ format_mpls_unicast_label, deag_label,
+ format_mpls_eos_bit, MPLS_EOS);
+
+ dpo_reset(&dpo);
+
+ /*
+ * An MPLS x-connect
+ */
+ fib_prefix_t pfx_1200 = {
+ .fp_len = 21,
+ .fp_proto = FIB_PROTOCOL_MPLS,
+ .fp_label = 1200,
+ .fp_eos = MPLS_NON_EOS,
+ };
+ fib_test_lb_bucket_t neos_o_10_10_10_1 = {
+ .type = FT_LB_LABEL_STACK_O_ADJ,
+ .label_stack_o_adj = {
+ .adj = ai_mpls_10_10_10_1,
+ .label_stack_size = 4,
+ .label_stack = {
+ 200, 300, 400, 500,
+ },
+ .eos = MPLS_NON_EOS,
+ },
+ };
+ dpo_id_t neos_1200 = DPO_INVALID;
+ dpo_id_t ip_1200 = DPO_INVALID;
+ mpls_label_t *l200 = NULL;
+ vec_add1(l200, 200);
+ vec_add1(l200, 300);
+ vec_add1(l200, 400);
+ vec_add1(l200, 500);
+
+ lfe = fib_table_entry_update_one_path(fib_index,
+ &pfx_1200,
+ FIB_SOURCE_API,
+ FIB_ENTRY_FLAG_NONE,
+ FIB_PROTOCOL_IP4,
+ &nh_10_10_10_1,
+ tm->hw[0]->sw_if_index,
+ ~0, // invalid fib index
+ 1,
+ l200,
+ FIB_ROUTE_PATH_FLAG_NONE);
+
+ FIB_TEST(fib_test_validate_entry(lfe,
+ FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS,
+ 1,
+ &neos_o_10_10_10_1),
+ "1200/0 LB 1 buckets via: "
+ "adj 10.10.11.1");
+
+ /*
+ * A recursive route via the MPLS x-connect
+ */
+ fib_prefix_t pfx_2_2_2_3_s_32 = {
+ .fp_len = 32,
+ .fp_proto = FIB_PROTOCOL_IP4,
+ .fp_addr = {
+ .ip4.as_u32 = clib_host_to_net_u32(0x02020203),
+ },
+ };
+ fib_route_path_t *rpaths = NULL, rpath = {
+ .frp_proto = FIB_PROTOCOL_MPLS,
+ .frp_local_label = 1200,
+ .frp_sw_if_index = ~0, // recurive
+ .frp_fib_index = 0, // Default MPLS fib
+ .frp_weight = 1,
+ .frp_flags = FIB_ROUTE_PATH_FLAG_NONE,
+ .frp_label_stack = NULL,
+ };
+ vec_add1(rpaths, rpath);
+
+ fib_table_entry_path_add2(fib_index,
+ &pfx_2_2_2_3_s_32,
+ FIB_SOURCE_API,
+ FIB_ENTRY_FLAG_NONE,
+ rpaths);
+
+ /*
+ * A labelled recursive route via the MPLS x-connect
+ */
+ fib_prefix_t pfx_2_2_2_4_s_32 = {
+ .fp_len = 32,
+ .fp_proto = FIB_PROTOCOL_IP4,
+ .fp_addr = {
+ .ip4.as_u32 = clib_host_to_net_u32(0x02020204),
+ },
+ };
+ mpls_label_t *l999 = NULL;
+ vec_add1(l999, 999);
+ rpaths[0].frp_label_stack = l999,
+
+ fib_table_entry_path_add2(fib_index,
+ &pfx_2_2_2_4_s_32,
+ FIB_SOURCE_API,
+ FIB_ENTRY_FLAG_NONE,
+ rpaths);
+
+ fib_entry_contribute_forwarding(fib_table_lookup(fib_index, &pfx_1200),
+ FIB_FORW_CHAIN_TYPE_UNICAST_IP4,
+ &ip_1200);
+ fib_entry_contribute_forwarding(fib_table_lookup(fib_index, &pfx_1200),
+ FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS,
+ &neos_1200);
+
+ fib_test_lb_bucket_t ip_o_1200 = {
+ .type = FT_LB_O_LB,
+ .lb = {
+ .lb = ip_1200.dpoi_index,
+ },
+ };
+ fib_test_lb_bucket_t mpls_o_1200 = {
+ .type = FT_LB_LABEL_O_LB,
+ .label_o_lb = {
+ .lb = neos_1200.dpoi_index,
+ .label = 999,
+ .eos = MPLS_EOS,
+ },
+ };
+
+ lfe = fib_table_lookup(fib_index, &pfx_2_2_2_3_s_32);
+ FIB_TEST(fib_test_validate_entry(lfe,
+ FIB_FORW_CHAIN_TYPE_UNICAST_IP4,
+ 1,
+ &ip_o_1200),
+ "2.2.2.2.3/32 LB 1 buckets via: label 1200 EOS");
+ lfe = fib_table_lookup(fib_index, &pfx_2_2_2_4_s_32);
+ FIB_TEST(fib_test_validate_entry(lfe,
+ FIB_FORW_CHAIN_TYPE_UNICAST_IP4,
+ 1,
+ &mpls_o_1200),
+ "2.2.2.2.4/32 LB 1 buckets via: label 1200 non-EOS");
+
+ fib_table_entry_delete(fib_index, &pfx_1200, FIB_SOURCE_API);
+ fib_table_entry_delete(fib_index, &pfx_2_2_2_3_s_32, FIB_SOURCE_API);
+ fib_table_entry_delete(fib_index, &pfx_2_2_2_4_s_32, FIB_SOURCE_API);
+
+ dpo_reset(&neos_1200);
+ dpo_reset(&ip_1200);
+
+ /*
+ * A recursive via a label that does not exist
+ */
+ fib_test_lb_bucket_t bucket_drop = {
+ .type = FT_LB_SPECIAL,
+ .special = {
+ .adj = DPO_PROTO_MPLS,
+ },
+ };
+
+ rpaths[0].frp_label_stack = NULL;
+ lfe = fib_table_entry_path_add2(fib_index,
+ &pfx_2_2_2_4_s_32,
+ FIB_SOURCE_API,
+ FIB_ENTRY_FLAG_NONE,
+ rpaths);
+
+ fib_entry_contribute_forwarding(fib_table_lookup(fib_index, &pfx_1200),
+ FIB_FORW_CHAIN_TYPE_UNICAST_IP4,
+ &ip_1200);
+ ip_o_1200.lb.lb = ip_1200.dpoi_index;
+
+ FIB_TEST(fib_test_validate_entry(lfe,
+ FIB_FORW_CHAIN_TYPE_UNICAST_IP4,
+ 1,
+ &ip_o_1200),
+ "2.2.2.2.4/32 LB 1 buckets via: label 1200 EOS");
+ lfe = fib_table_lookup(fib_index, &pfx_1200);
+ FIB_TEST(fib_test_validate_entry(lfe,
+ FIB_FORW_CHAIN_TYPE_UNICAST_IP4,
+ 1,
+ &bucket_drop),
+ "2.2.2.4/32 LB 1 buckets via: ip4-DROP");
+
+ fib_table_entry_delete(fib_index, &pfx_2_2_2_4_s_32, FIB_SOURCE_API);
+
+ dpo_reset(&ip_1200);
+
+ /*
+ * cleanup
+ */
+ mpls_sw_interface_enable_disable(&mpls_main,
+ tm->hw[0]->sw_if_index,
+ 0);
+
+ FIB_TEST(lb_count == pool_elts(load_balance_pool),
+ "Load-balance resources freed %d of %d",
+ lb_count, pool_elts(load_balance_pool));
+
+ return (0);
+}
+
+static clib_error_t *
+fib_test (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd_arg)
+{
+ int res;
+
+ res = 0;
+ fib_test_mk_intf(4);
+
+ if (unformat (input, "ip"))
+ {
+ res += fib_test_v4();
+ res += fib_test_v6();
+ }
+ else if (unformat (input, "label"))
+ {
+ res += fib_test_label();
+ }
+ else if (unformat (input, "ae"))
+ {
+ res += fib_test_ae();
+ }
+ else if (unformat (input, "lfib"))
+ {
+ res += lfib_test();
+ }
+ else if (unformat (input, "walk"))
+ {
+ res += fib_test_walk();
+ }
+ else
+ {
+ /*
+ * These walk UT aren't run as part of the full suite, since the
+ * fib-walk process must be disabled in order for the tests to work
+ *
+ * fib_test_walk();
+ */
+ res += fib_test_v4();
+ res += fib_test_v6();
+ res += fib_test_ae();
+ res += fib_test_label();
+ res += lfib_test();
+ }
+
+ if (res)
+ {
+ return clib_error_return(0, "FIB Unit Test Failed");
+ }
+ else
+ {
+ return (NULL);
+ }
+}
+
+VLIB_CLI_COMMAND (test_fib_command, static) = {
+ .path = "test fib",
+ .short_help = "fib unit tests - DO NOT RUN ON A LIVE SYSTEM",
+ .function = fib_test,
+};
+
+clib_error_t *
+fib_test_init (vlib_main_t *vm)
+{
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (fib_test_init);
diff --git a/src/vnet/fib/fib_types.c b/src/vnet/fib/fib_types.c
new file mode 100644
index 00000000000..b66e71940a5
--- /dev/null
+++ b/src/vnet/fib/fib_types.c
@@ -0,0 +1,326 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/ip/ip.h>
+
+#include <vnet/fib/fib_types.h>
+#include <vnet/fib/fib_internal.h>
+#include <vnet/mpls/mpls.h>
+
+/*
+ * arrays of protocol and link names
+ */
+static const char* fib_protocol_names[] = FIB_PROTOCOLS;
+static const char* vnet_link_names[] = VNET_LINKS;
+static const char* fib_forw_chain_names[] = FIB_FORW_CHAINS;
+
+u8 *
+format_fib_protocol (u8 * s, va_list ap)
+{
+ fib_protocol_t proto = va_arg(ap, int); // fib_protocol_t promotion
+
+ return (format (s, "%s", fib_protocol_names[proto]));
+}
+
+u8 *
+format_vnet_link (u8 * s, va_list ap)
+{
+ vnet_link_t link = va_arg(ap, int); // vnet_link_t promotion
+
+ return (format (s, "%s", vnet_link_names[link]));
+}
+
+u8 *
+format_fib_forw_chain_type (u8 * s, va_list * args)
+{
+ fib_forward_chain_type_t fct = va_arg(*args, int);
+
+ return (format (s, "%s", fib_forw_chain_names[fct]));
+}
+
+void
+fib_prefix_from_ip46_addr (const ip46_address_t *addr,
+ fib_prefix_t *pfx)
+{
+ ASSERT(!ip46_address_is_zero(addr));
+
+ pfx->fp_proto = ((ip46_address_is_ip4(addr) ?
+ FIB_PROTOCOL_IP4 :
+ FIB_PROTOCOL_IP6));
+ pfx->fp_len = ((ip46_address_is_ip4(addr) ?
+ 32 : 128));
+ pfx->fp_addr = *addr;
+}
+
+void
+fib_prefix_from_mpls_label (mpls_label_t label,
+ fib_prefix_t *pfx)
+{
+ pfx->fp_proto = FIB_PROTOCOL_MPLS;
+ pfx->fp_len = 21;
+ pfx->fp_label = label;
+ pfx->fp_eos = MPLS_NON_EOS;
+}
+
+int
+fib_prefix_cmp (const fib_prefix_t *p1,
+ const fib_prefix_t *p2)
+{
+ int res;
+
+ res = (p1->fp_proto - p2->fp_proto);
+
+ if (0 == res)
+ {
+ switch (p1->fp_proto)
+ {
+ case FIB_PROTOCOL_IP4:
+ case FIB_PROTOCOL_IP6:
+ res = (p1->fp_len - p2->fp_len);
+
+ if (0 == res)
+ {
+ res = ip46_address_cmp(&p1->fp_addr, &p2->fp_addr);
+ }
+ break;
+ case FIB_PROTOCOL_MPLS:
+ res = (p1->fp_label - p2->fp_label);
+
+ if (0 == res)
+ {
+ res = (p1->fp_eos - p2->fp_eos);
+ }
+ break;
+ }
+ }
+
+ return (res);
+}
+
+int
+fib_prefix_is_cover (const fib_prefix_t *p1,
+ const fib_prefix_t *p2)
+{
+ switch (p1->fp_proto)
+ {
+ case FIB_PROTOCOL_IP4:
+ return (ip4_destination_matches_route(&ip4_main,
+ &p1->fp_addr.ip4,
+ &p2->fp_addr.ip4,
+ p1->fp_len));
+ case FIB_PROTOCOL_IP6:
+ return (ip6_destination_matches_route(&ip6_main,
+ &p1->fp_addr.ip6,
+ &p2->fp_addr.ip6,
+ p1->fp_len));
+ case FIB_PROTOCOL_MPLS:
+ break;
+ }
+ return (0);
+}
+
+int
+fib_prefix_is_host (const fib_prefix_t *prefix)
+{
+ switch (prefix->fp_proto)
+ {
+ case FIB_PROTOCOL_IP4:
+ return (prefix->fp_len == 32);
+ case FIB_PROTOCOL_IP6:
+ return (prefix->fp_len == 128);
+ case FIB_PROTOCOL_MPLS:
+ return (!0);
+ }
+ return (0);
+}
+
+u8 *
+format_fib_prefix (u8 * s, va_list * args)
+{
+ fib_prefix_t *fp = va_arg (*args, fib_prefix_t *);
+
+ /*
+ * protocol specific so it prints ::/0 correctly.
+ */
+ switch (fp->fp_proto)
+ {
+ case FIB_PROTOCOL_IP6:
+ {
+ ip6_address_t p6 = fp->fp_addr.ip6;
+
+ ip6_address_mask(&p6, &(ip6_main.fib_masks[fp->fp_len]));
+ s = format (s, "%U", format_ip6_address, &p6);
+ break;
+ }
+ case FIB_PROTOCOL_IP4:
+ {
+ ip4_address_t p4 = fp->fp_addr.ip4;
+ p4.as_u32 &= ip4_main.fib_masks[fp->fp_len];
+
+ s = format (s, "%U", format_ip4_address, &p4);
+ break;
+ }
+ case FIB_PROTOCOL_MPLS:
+ s = format (s, "%U:%U",
+ format_mpls_unicast_label, fp->fp_label,
+ format_mpls_eos_bit, fp->fp_eos);
+ break;
+ }
+ s = format (s, "/%d", fp->fp_len);
+
+ return (s);
+}
+
+int
+fib_route_path_cmp (const fib_route_path_t *rpath1,
+ const fib_route_path_t *rpath2)
+{
+ int res;
+
+ res = ip46_address_cmp(&rpath1->frp_addr,
+ &rpath2->frp_addr);
+
+ if (0 != res) return (res);
+
+ if (~0 != rpath1->frp_sw_if_index &&
+ ~0 != rpath2->frp_sw_if_index)
+ {
+ res = vnet_sw_interface_compare(vnet_get_main(),
+ rpath1->frp_sw_if_index,
+ rpath2->frp_sw_if_index);
+ }
+ else
+ {
+ res = rpath1->frp_sw_if_index - rpath2->frp_sw_if_index;
+ }
+
+ if (0 != res) return (res);
+
+ if (ip46_address_is_zero(&rpath1->frp_addr))
+ {
+ res = rpath1->frp_fib_index - rpath2->frp_fib_index;
+ }
+
+ return (res);
+}
+
+dpo_proto_t
+fib_proto_to_dpo (fib_protocol_t fib_proto)
+{
+ switch (fib_proto)
+ {
+ case FIB_PROTOCOL_IP6:
+ return (DPO_PROTO_IP6);
+ case FIB_PROTOCOL_IP4:
+ return (DPO_PROTO_IP4);
+ case FIB_PROTOCOL_MPLS:
+ return (DPO_PROTO_MPLS);
+ }
+ ASSERT(0);
+ return (0);
+}
+
+fib_protocol_t
+dpo_proto_to_fib (dpo_proto_t dpo_proto)
+{
+ switch (dpo_proto)
+ {
+ case DPO_PROTO_IP6:
+ return (FIB_PROTOCOL_IP6);
+ case DPO_PROTO_IP4:
+ return (FIB_PROTOCOL_IP4);
+ case DPO_PROTO_MPLS:
+ return (FIB_PROTOCOL_MPLS);
+ default:
+ break;
+ }
+ ASSERT(0);
+ return (0);
+}
+
+vnet_link_t
+fib_proto_to_link (fib_protocol_t proto)
+{
+ switch (proto)
+ {
+ case FIB_PROTOCOL_IP4:
+ return (VNET_LINK_IP4);
+ case FIB_PROTOCOL_IP6:
+ return (VNET_LINK_IP6);
+ case FIB_PROTOCOL_MPLS:
+ return (VNET_LINK_MPLS);
+ }
+ ASSERT(0);
+ return (0);
+}
+
+fib_forward_chain_type_t
+fib_forw_chain_type_from_dpo_proto (dpo_proto_t proto)
+{
+ switch (proto)
+ {
+ case DPO_PROTO_IP4:
+ return (FIB_FORW_CHAIN_TYPE_UNICAST_IP4);
+ case DPO_PROTO_IP6:
+ return (FIB_FORW_CHAIN_TYPE_UNICAST_IP6);
+ case DPO_PROTO_MPLS:
+ return (FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS);
+ case DPO_PROTO_ETHERNET:
+ return (FIB_FORW_CHAIN_TYPE_ETHERNET);
+ }
+ ASSERT(0);
+ return (FIB_FORW_CHAIN_TYPE_UNICAST_IP4);
+}
+
+vnet_link_t
+fib_forw_chain_type_to_link_type (fib_forward_chain_type_t fct)
+{
+ switch (fct)
+ {
+ case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
+ return (VNET_LINK_IP4);
+ case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
+ return (VNET_LINK_IP6);
+ case FIB_FORW_CHAIN_TYPE_ETHERNET:
+ return (VNET_LINK_ETHERNET);
+ case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
+ /*
+ * insufficient information to to convert
+ */
+ ASSERT(0);
+ break;
+ case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
+ return (VNET_LINK_MPLS);
+ }
+ return (VNET_LINK_IP4);
+}
+
+dpo_proto_t
+fib_forw_chain_type_to_dpo_proto (fib_forward_chain_type_t fct)
+{
+ switch (fct)
+ {
+ case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
+ return (DPO_PROTO_IP4);
+ case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
+ return (DPO_PROTO_IP6);
+ case FIB_FORW_CHAIN_TYPE_ETHERNET:
+ return (DPO_PROTO_ETHERNET);
+ case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
+ case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
+ return (DPO_PROTO_MPLS);
+ }
+ return (DPO_PROTO_IP4);
+}
diff --git a/src/vnet/fib/fib_types.h b/src/vnet/fib/fib_types.h
new file mode 100644
index 00000000000..0a15fef1b28
--- /dev/null
+++ b/src/vnet/fib/fib_types.h
@@ -0,0 +1,340 @@
+ /*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __FIB_TYPES_H__
+#define __FIB_TYPES_H__
+
+#include <vlib/vlib.h>
+#include <vnet/ip/ip6_packet.h>
+#include <vnet/mpls/packet.h>
+#include <vnet/dpo/dpo.h>
+
+/**
+ * A typedef of a node index.
+ * we make this typedef so the code becomes easier for a human to parse.
+ */
+typedef u32 fib_node_index_t;
+#define FIB_NODE_INDEX_INVALID ((fib_node_index_t)(~0))
+
+/**
+ * Protocol Type. packed so it consumes a u8 only
+ */
+typedef enum fib_protocol_t_ {
+ FIB_PROTOCOL_IP4 = 0,
+ FIB_PROTOCOL_IP6,
+ FIB_PROTOCOL_MPLS,
+} __attribute__ ((packed)) fib_protocol_t;
+
+#define FIB_PROTOCOLS { \
+ [FIB_PROTOCOL_IP4] = "ipv4", \
+ [FIB_PROTOCOL_IP6] = "ipv6", \
+ [FIB_PROTOCOL_MPLS] = "MPLS", \
+}
+
+/**
+ * Definition outside of enum so it does not need to be included in non-defaulted
+ * switch statements
+ */
+#define FIB_PROTOCOL_MAX (FIB_PROTOCOL_MPLS + 1)
+
+/**
+ * Not part of the enum so it does not have to be handled in switch statements
+ */
+#define FIB_PROTOCOL_NONE (FIB_PROTOCOL_MAX+1)
+
+#define FOR_EACH_FIB_PROTOCOL(_item) \
+ for (_item = FIB_PROTOCOL_IP4; \
+ _item <= FIB_PROTOCOL_MPLS; \
+ _item++)
+
+#define FOR_EACH_FIB_IP_PROTOCOL(_item) \
+ for (_item = FIB_PROTOCOL_IP4; \
+ _item <= FIB_PROTOCOL_IP6; \
+ _item++)
+
+/**
+ * @brief Convert from a protocol to a link type
+ */
+vnet_link_t fib_proto_to_link (fib_protocol_t proto);
+
+/**
+ * FIB output chain type. When a child object requests a forwarding contribution
+ * from a parent, it does so for a particular scenario. This enumererates those
+ * sceanrios
+ */
+typedef enum fib_forward_chain_type_t_ {
+ /**
+ * Contribute an object that is to be used to forward IP4 packets
+ */
+ FIB_FORW_CHAIN_TYPE_UNICAST_IP4,
+ /**
+ * Contribute an object that is to be used to forward IP6 packets
+ */
+ FIB_FORW_CHAIN_TYPE_UNICAST_IP6,
+ /**
+ * Contribute an object that is to be used to forward non-end-of-stack
+ * MPLS packets
+ */
+ FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS,
+ /**
+ * Contribute an object that is to be used to forward end-of-stack
+ * MPLS packets. This is a convenient ID for clients. A real EOS chain
+ * must be pay-load protocol specific. This
+ * option is converted into one of the other three internally.
+ */
+ FIB_FORW_CHAIN_TYPE_MPLS_EOS,
+ /**
+ * Contribute an object that is to be used to forward Ethernet packets.
+ * This is last in the list since it is not valid for many FIB objects,
+ * and thus their array of per-chain-type DPOs can be sized smaller.
+ */
+ FIB_FORW_CHAIN_TYPE_ETHERNET,
+} __attribute__ ((packed)) fib_forward_chain_type_t;
+
+#define FIB_FORW_CHAINS { \
+ [FIB_FORW_CHAIN_TYPE_ETHERNET] = "ethernet", \
+ [FIB_FORW_CHAIN_TYPE_UNICAST_IP4] = "unicast-ip4", \
+ [FIB_FORW_CHAIN_TYPE_UNICAST_IP6] = "unicast-ip6", \
+ [FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS] = "mpls-neos", \
+ [FIB_FORW_CHAIN_TYPE_MPLS_EOS] = "mpls-eos", \
+}
+
+#define FIB_FORW_CHAIN_NUM (FIB_FORW_CHAIN_TYPE_MPLS_ETHERNET+1)
+#define FIB_FORW_CHAIN_MPLS_NUM (FIB_FORW_CHAIN_TYPE_MPLS_EOS+1)
+
+#define FOR_EACH_FIB_FORW_CHAIN(_item) \
+ for (_item = FIB_FORW_CHAIN_TYPE_UNICAST_IP4; \
+ _item <= FIB_FORW_CHAIN_TYPE_ETHERNET; \
+ _item++)
+
+#define FOR_EACH_FIB_FORW_MPLS_CHAIN(_item) \
+ for (_item = FIB_FORW_CHAIN_TYPE_UNICAST_IP4; \
+ _item <= FIB_FORW_CHAIN_TYPE_MPLS_EOS; \
+ _item++)
+
+/**
+ * @brief Convert from a chain type to the adjacencies link type
+ */
+extern vnet_link_t fib_forw_chain_type_to_link_type(fib_forward_chain_type_t fct);
+
+/**
+ * @brief Convert from a payload-protocol to a chain type.
+ */
+extern fib_forward_chain_type_t fib_forw_chain_type_from_dpo_proto(dpo_proto_t proto);
+
+/**
+ * @brief Convert from a chain type to the DPO proto it will install
+ */
+extern dpo_proto_t fib_forw_chain_type_to_dpo_proto(fib_forward_chain_type_t fct);
+
+/**
+ * Aggregrate type for a prefix
+ */
+typedef struct fib_prefix_t_ {
+ /**
+ * The mask length
+ */
+ u16 fp_len;
+
+ /**
+ * protocol type
+ */
+ fib_protocol_t fp_proto;
+
+ /**
+ * Pad to keep the address 4 byte aligned
+ */
+ u8 ___fp___pad;
+
+ union {
+ /**
+ * The address type is not deriveable from the fp_addr member.
+ * If it's v4, then the first 3 u32s of the address will be 0.
+ * v6 addresses (even v4 mapped ones) have at least 2 u32s assigned
+ * to non-zero values. true. but when it's all zero, one cannot decide.
+ */
+ ip46_address_t fp_addr;
+
+ struct {
+ mpls_label_t fp_label;
+ mpls_eos_bit_t fp_eos;
+ /**
+ * This protocol determines the payload protocol of packets
+ * that will be forwarded by this entry once the label is popped.
+ * For a non-eos entry it will be MPLS.
+ */
+ dpo_proto_t fp_payload_proto;
+ };
+ };
+} fib_prefix_t;
+
+STATIC_ASSERT(STRUCT_OFFSET_OF(fib_prefix_t, fp_addr) == 4,
+ "FIB Prefix's address is 4 byte aligned.");
+
+/**
+ * \brief Compare two prefixes for equality
+ */
+extern int fib_prefix_cmp(const fib_prefix_t *p1,
+ const fib_prefix_t *p2);
+
+/**
+ * \brief Compare two prefixes for covering relationship
+ *
+ * \return non-zero if the first prefix is a cover for the second
+ */
+extern int fib_prefix_is_cover(const fib_prefix_t *p1,
+ const fib_prefix_t *p2);
+
+/**
+ * \brief Return true is the prefix is a host prefix
+ */
+extern int fib_prefix_is_host(const fib_prefix_t *p);
+
+
+/**
+ * \brief Host prefix from ip
+ */
+extern void fib_prefix_from_ip46_addr (const ip46_address_t *addr,
+ fib_prefix_t *pfx);
+
+extern u8 * format_fib_prefix(u8 * s, va_list * args);
+extern u8 * format_fib_forw_chain_type(u8 * s, va_list * args);
+
+extern dpo_proto_t fib_proto_to_dpo(fib_protocol_t fib_proto);
+extern fib_protocol_t dpo_proto_to_fib(dpo_proto_t dpo_proto);
+
+/**
+ * Enurmeration of special path/entry types
+ */
+typedef enum fib_special_type_t_ {
+ /**
+ * Marker. Add new types after this one.
+ */
+ FIB_SPECIAL_TYPE_FIRST = 0,
+ /**
+ * Local/for-us paths
+ */
+ FIB_SPECIAL_TYPE_LOCAL = FIB_SPECIAL_TYPE_FIRST,
+ /**
+ * drop paths
+ */
+ FIB_SPECIAL_TYPE_DROP,
+ /**
+ * Marker. Add new types before this one, then update it.
+ */
+ FIB_SPECIAL_TYPE_LAST = FIB_SPECIAL_TYPE_DROP,
+} __attribute__ ((packed)) fib_special_type_t;
+
+/**
+ * The maximum number of types
+ */
+#define FIB_SPEICAL_TYPE_MAX (FIB_SPEICAL_TYPE_LAST + 1)
+
+#define FOR_EACH_FIB_SPEICAL_TYPE(_item) \
+ for (_item = FIB_TYPE_SPEICAL_FIRST; \
+ _item <= FIB_SPEICAL_TYPE_LAST; _item++)
+
+extern u8 * format_fib_protocol(u8 * s, va_list ap);
+extern u8 * format_vnet_link(u8 *s, va_list ap);
+
+/**
+ * Path flags from the control plane
+ */
+typedef enum fib_route_path_flags_t_
+{
+ FIB_ROUTE_PATH_FLAG_NONE = 0,
+ /**
+ * Recursion constraint of via a host prefix
+ */
+ FIB_ROUTE_PATH_RESOLVE_VIA_HOST = (1 << 0),
+ /**
+ * Recursion constraint of via an attahced prefix
+ */
+ FIB_ROUTE_PATH_RESOLVE_VIA_ATTACHED = (1 << 1),
+} fib_route_path_flags_t;
+
+/**
+ * @brief
+ * A representation of a path as described by a route producer.
+ * These paramenters will determine the path 'type', of which there are:
+ * 1) Attached-next-hop:
+ * a single peer on a link.
+ * It is 'attached' because it is in the same sub-net as the router, on a link
+ * directly connected to the route.
+ * It is 'next=hop' since the next-hop address of the peer is known.
+ * 2) Attached:
+ * the next-hop is not known. but we can ARP for it.
+ * 3) Recursive.
+ * The next-hop is known but the interface is not. So to find the adj to use
+ * we must recursively resolve the next-hop.
+ * 3) deaggregate (deag)
+ * A further lookup is required.
+ */
+typedef struct fib_route_path_t_ {
+ /**
+ * The protocol of the address below. We need this since the all
+ * zeros address is ambiguous.
+ */
+ fib_protocol_t frp_proto;
+
+ union {
+ /**
+ * The next-hop address.
+ * Will be NULL for attached paths.
+ * Will be all zeros for attached-next-hop paths on a p2p interface
+ * Will be all zeros for a deag path.
+ */
+ ip46_address_t frp_addr;
+
+ /**
+ * The MPLS local Label to reursively resolve through.
+ * This is valid when the path type is MPLS.
+ */
+ mpls_label_t frp_local_label;
+ };
+ /**
+ * The interface.
+ * Will be invalid for recursive paths.
+ */
+ u32 frp_sw_if_index;
+ /**
+ * The FIB index to lookup the nexthop
+ * Only valid for recursive paths.
+ */
+ u32 frp_fib_index;
+ /**
+ * [un]equal cost path weight
+ */
+ u32 frp_weight;
+ /**
+ * flags on the path
+ */
+ fib_route_path_flags_t frp_flags;
+ /**
+ * The outgoing MPLS label Stack. NULL implies no label.
+ */
+ mpls_label_t *frp_label_stack;
+} fib_route_path_t;
+
+/**
+ * @brief
+ * A representation of a fib path for fib_path_encode to convey the information to the caller
+ */
+typedef struct fib_route_path_encode_t_ {
+ fib_route_path_t rpath;
+ dpo_id_t dpo;
+} fib_route_path_encode_t;
+
+#endif
diff --git a/src/vnet/fib/fib_urpf_list.c b/src/vnet/fib/fib_urpf_list.c
new file mode 100644
index 00000000000..263812ade40
--- /dev/null
+++ b/src/vnet/fib/fib_urpf_list.c
@@ -0,0 +1,260 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/fib/fib_urpf_list.h>
+#include <vnet/adj/adj.h>
+
+/**
+ * @brief pool of all fib_urpf_list
+ */
+fib_urpf_list_t *fib_urpf_list_pool;
+
+u8 *
+format_fib_urpf_list (u8 *s, va_list args)
+{
+ fib_urpf_list_t *urpf;
+ index_t ui;
+ u32 *swi;
+
+ ui = va_arg(args, index_t);
+ urpf = fib_urpf_list_get(ui);
+
+ s = format(s, "uPRF-list:%d len:%d itfs:[",
+ ui, vec_len(urpf->furpf_itfs));
+
+ vec_foreach(swi, urpf->furpf_itfs)
+ {
+ s = format(s, "%d, ", *swi);
+ }
+ s = format(s, "]");
+
+ return (s);
+}
+
+index_t
+fib_urpf_list_alloc_and_lock (void)
+{
+ fib_urpf_list_t *urpf;
+
+ pool_get(fib_urpf_list_pool, urpf);
+ memset(urpf, 0, sizeof(*urpf));
+
+ urpf->furpf_locks++;
+
+ return (urpf - fib_urpf_list_pool);
+}
+
+void
+fib_urpf_list_unlock (index_t ui)
+{
+ fib_urpf_list_t *urpf;
+
+ if (INDEX_INVALID == ui)
+ return;
+
+ urpf = fib_urpf_list_get(ui);
+
+ urpf->furpf_locks--;
+
+ if (0 == urpf->furpf_locks)
+ {
+ vec_free(urpf->furpf_itfs);
+ pool_put(fib_urpf_list_pool, urpf);
+ }
+}
+
+void
+fib_urpf_list_lock (index_t ui)
+{
+ fib_urpf_list_t *urpf;
+
+ urpf = fib_urpf_list_get(ui);
+
+ urpf->furpf_locks++;
+}
+
+/**
+ * @brief Append another interface to the list.
+ */
+void
+fib_urpf_list_append (index_t ui,
+ u32 sw_if_index)
+{
+ fib_urpf_list_t *urpf;
+
+ urpf = fib_urpf_list_get(ui);
+
+ vec_add1(urpf->furpf_itfs, sw_if_index);
+}
+
+/**
+ * @brief Combine to interface lists
+ */
+void
+fib_urpf_list_combine (index_t ui1,
+ index_t ui2)
+{
+ fib_urpf_list_t *urpf1, *urpf2;
+
+ urpf1 = fib_urpf_list_get(ui1);
+ urpf2 = fib_urpf_list_get(ui2);
+
+ vec_append(urpf1->furpf_itfs, urpf2->furpf_itfs);
+}
+
+/**
+ * @brief Sort the interface indicies.
+ * The sort is the first step in obtaining a unique list, so the order,
+ * w.r.t. next-hop, interface,etc is not important. So a sort based on the
+ * index is all we need.
+ */
+static int
+fib_urpf_itf_cmp_for_sort (void * v1,
+ void * v2)
+{
+ fib_node_index_t *i1 = v1, *i2 = v2;
+
+ return (*i2 < *i1);
+}
+
+/**
+ * @brief Convert the uRPF list from the itf set obtained during the walk
+ * to a unique list.
+ */
+void
+fib_urpf_list_bake (index_t ui)
+{
+ fib_urpf_list_t *urpf;
+
+ urpf = fib_urpf_list_get(ui);
+
+ ASSERT(!(urpf->furpf_flags & FIB_URPF_LIST_BAKED));
+
+ if (vec_len(urpf->furpf_itfs) > 1)
+ {
+ u32 i,j;
+
+ /*
+ * cat list | sort | uniq > rpf_list
+ */
+ vec_sort_with_function(urpf->furpf_itfs, fib_urpf_itf_cmp_for_sort);
+
+ i = 0, j = 1;
+ while (j < vec_len(urpf->furpf_itfs))
+ {
+ if (urpf->furpf_itfs[i] == urpf->furpf_itfs[j])
+ {
+ /*
+ * the itfacenct entries are the same.
+ * search forward for a unique one
+ */
+ while (urpf->furpf_itfs[i] == urpf->furpf_itfs[j] &&
+ j < vec_len(urpf->furpf_itfs))
+ {
+ j++;
+ }
+ if (j == vec_len(urpf->furpf_itfs))
+ {
+ /*
+ * ran off the end without finding a unique index.
+ * we are done.
+ */
+ break;
+ }
+ else
+ {
+ urpf->furpf_itfs[i+1] = urpf->furpf_itfs[j];
+ }
+ }
+ i++, j++;
+ }
+
+ /*
+ * set the length of the vector to the number of unique itfs
+ */
+ _vec_len(urpf->furpf_itfs) = i+1;
+ }
+
+ urpf->furpf_flags |= FIB_URPF_LIST_BAKED;
+}
+
+void
+fib_urpf_list_show_mem (void)
+{
+ fib_show_memory_usage("uRPF-list",
+ pool_elts(fib_urpf_list_pool),
+ pool_len(fib_urpf_list_pool),
+ sizeof(fib_urpf_list_t));
+}
+
+static clib_error_t *
+show_fib_urpf_list_command (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ index_t ui;
+
+ if (unformat (input, "%d", &ui))
+ {
+ /*
+ * show one in detail
+ */
+ if (!pool_is_free_index(fib_urpf_list_pool, ui))
+ {
+ vlib_cli_output (vm, "%d@%U",
+ ui,
+ format_fib_urpf_list, ui);
+ }
+ else
+ {
+ vlib_cli_output (vm, "uRPF %d invalid", ui);
+ }
+ }
+ else
+ {
+ /*
+ * show all
+ */
+ vlib_cli_output (vm, "FIB uRPF Entries:");
+ pool_foreach_index(ui, fib_urpf_list_pool,
+ ({
+ vlib_cli_output (vm, "%d@%U",
+ ui,
+ format_fib_urpf_list, ui);
+ }));
+ }
+
+ return (NULL);
+}
+
+/* *INDENT-OFF* */
+/*?
+ * The '<em>sh fib uRPF [index] </em>' command displays the uRPF lists
+ *
+ * @cliexpar
+ * @cliexstart{show fib uRPF}
+ * FIB uRPF Entries:
+ * 0@uPRF-list:0 len:0 itfs:[]
+ * 1@uPRF-list:1 len:2 itfs:[1, 2, ]
+ * 2@uPRF-list:2 len:1 itfs:[3, ]
+ * 3@uPRF-list:3 len:1 itfs:[9, ]
+ * @cliexend
+?*/
+VLIB_CLI_COMMAND (show_fib_urpf_list, static) = {
+ .path = "show fib uRPF",
+ .function = show_fib_urpf_list_command,
+ .short_help = "show fib uRPF",
+};
+/* *INDENT-OFF* */
diff --git a/src/vnet/fib/fib_urpf_list.h b/src/vnet/fib/fib_urpf_list.h
new file mode 100644
index 00000000000..09f475747cf
--- /dev/null
+++ b/src/vnet/fib/fib_urpf_list.h
@@ -0,0 +1,146 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * @brief A unicast RPF list.
+ * The uRPF list is the set of interfaces that a prefix can be reached through.
+ * There are 3 levels of RPF check:
+ * - do we have any route to the source (i.e. it's not drop)
+ * - did the packet arrive on an interface that the source is reachable through
+ * - did the packet arrive from a peer that the source is reachable through
+ * we don't support the last. But it could be done by storing adjs in the uPRF
+ * list rather than interface indices.
+ *
+ * these conditions are checked against the list by:
+ * - the list is not empty
+ * - there is an interface in the list that is on the input interface.
+ * - there is an adj in the list whose MAC address matches the packet's
+ * source MAC and input interface.
+ *
+ * To speed the last two checks the interface list only needs to have the unique
+ * interfaces present. If the uRPF check was instead implemented by forward
+ * walking the DPO chain, then that walk would encounter a great deal of
+ * non-adjacency objects (i.e. load-balances, mpls-labels, etc) and potentially
+ * the same adjacency many times (esp. when UCMP is used).
+ * To that end the uRPF list is a collapsed, unique interface only list.
+ */
+
+#ifndef __FIB_URPF_LIST_H__
+#define __FIB_URPF_LIST_H__
+
+#include <vnet/fib/fib_types.h>
+#include <vnet/adj/adj.h>
+
+/**
+ * @brief flags
+ */
+typedef enum fib_urpf_list_flag_t_
+{
+ /**
+ * @brief Set to indicated that the uRPF list has already been baked.
+ * This is protection against it being baked more than once. These
+ * are not chunky fries - once is enough.
+ */
+ FIB_URPF_LIST_BAKED = (1 << 0),
+} fib_urpf_list_flag_t;
+
+typedef struct fib_urpf_list_t_
+{
+ /**
+ * The list of interfaces that comprise the allowed accepting interfaces
+ */
+ adj_index_t *furpf_itfs;
+
+ /**
+ * flags
+ */
+ fib_urpf_list_flag_t furpf_flags;
+
+ /**
+ * uRPF lists are shared amongst many entries so we require a locking
+ * mechanism.
+ */
+ u32 furpf_locks;
+} fib_urpf_list_t;
+
+extern index_t fib_urpf_list_alloc_and_lock(void);
+extern void fib_urpf_list_unlock(index_t urpf);
+extern void fib_urpf_list_lock(index_t urpf);
+
+extern void fib_urpf_list_append(index_t urpf, adj_index_t adj);
+extern void fib_urpf_list_combine(index_t urpf1, index_t urpf2);
+
+extern void fib_urpf_list_bake(index_t urpf);
+
+extern u8 *format_fib_urpf_list(u8 *s, va_list ap);
+
+extern void fib_urpf_list_show_mem(void);
+
+/**
+ * @brief pool of all fib_urpf_list
+ */
+extern fib_urpf_list_t *fib_urpf_list_pool;
+
+static inline fib_urpf_list_t *
+fib_urpf_list_get (index_t index)
+{
+ return (pool_elt_at_index(fib_urpf_list_pool, index));
+}
+
+/**
+ * @brief Data-Plane function to check an input interface against an uRPF list
+ *
+ * @param ui The uRPF list index to check against. Get this from the load-balance
+ * object that is the result of the FIB lookup
+ * @param sw_if_index The SW interface index to validate
+ *
+ * @return 1 if the interface is found, 0 otherwise
+ */
+always_inline int
+fib_urpf_check (index_t ui, u32 sw_if_index)
+{
+ fib_urpf_list_t *urpf;
+ u32 *swi;
+
+ urpf = fib_urpf_list_get(ui);
+
+ vec_foreach(swi, urpf->furpf_itfs)
+ {
+ if (*swi == sw_if_index)
+ return (1);
+ }
+
+ return (0);
+}
+
+/**
+ * @brief Data-Plane function to check the size of an uRPF list, (i.e. the number
+ * of interfaces in the list).
+ *
+ * @param ui The uRPF list index to check against. Get this from the load-balance
+ * object that is the result of the FIB lookup
+ *
+ * @return the number of interfaces in the list
+ */
+always_inline int
+fib_urpf_check_size (index_t ui)
+{
+ fib_urpf_list_t *urpf;
+
+ urpf = fib_urpf_list_get(ui);
+
+ return (vec_len(urpf->furpf_itfs));
+}
+
+#endif
diff --git a/src/vnet/fib/fib_walk.c b/src/vnet/fib/fib_walk.c
new file mode 100644
index 00000000000..938f7b8c1c6
--- /dev/null
+++ b/src/vnet/fib/fib_walk.c
@@ -0,0 +1,1108 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/fib/fib_walk.h>
+#include <vnet/fib/fib_node_list.h>
+
+/**
+ * The flags on a walk
+ */
+typedef enum fib_walk_flags_t_
+{
+ /**
+ * A synchronous walk.
+ * This walk will run to completion, i.e. visit ALL the children.
+ * It is a depth first traversal of the graph.
+ */
+ FIB_WALK_FLAG_SYNC = (1 << 0),
+ /**
+ * An asynchronous walk.
+ * This walk will be scheduled to run in the background. It will thus visits
+ * the children at a later point in time.
+ * It is a depth first traversal of the graph.
+ */
+ FIB_WALK_FLAG_ASYNC = (1 << 1),
+ /**
+ * An indication that the walk is currently executing.
+ */
+ FIB_WALK_FLAG_EXECUTING = (1 << 2),
+} fib_walk_flags_t;
+
+/**
+ * A representation of a graph walk from a parent object to its children
+ */
+typedef struct fib_walk_t_
+{
+ /**
+ * FIB node linkage. This object is not in the FIB object graph,
+ * but it is present in other node's dependency lists, so it needs to
+ * be pointerable to.
+ */
+ fib_node_t fw_node;
+
+ /**
+ * the walk's flags
+ */
+ fib_walk_flags_t fw_flags;
+
+ /**
+ * Sibling index in the dependency list
+ */
+ u32 fw_dep_sibling;
+
+ /**
+ * Sibling index in the list of all walks
+ */
+ u32 fw_prio_sibling;
+
+ /**
+ * Pointer to the node whose dependants this walk is walking
+ */
+ fib_node_ptr_t fw_parent;
+
+ /**
+ * Number of nodes visited by this walk. saved for debugging purposes.
+ */
+ u32 fw_n_visits;
+
+ /**
+ * Time the walk started
+ */
+ f64 fw_start_time;
+
+ /**
+ * The reasons this walk is occuring.
+ * This is a vector ordered in time. The reasons and the front were started
+ * first, and so should be acted first when a node is visisted.
+ */
+ fib_node_back_walk_ctx_t *fw_ctx;
+} fib_walk_t;
+
+/**
+ * @brief The pool of all walk objects
+ */
+static fib_walk_t *fib_walk_pool;
+
+/**
+ * @brief There's only one event type sent to the walk process
+ */
+#define FIB_WALK_EVENT 0
+
+/**
+ * Statistics maintained per-walk queue
+ */
+typedef enum fib_walk_queue_stats_t_
+{
+ FIB_WALK_SCHEDULED,
+ FIB_WALK_COMPLETED,
+} fib_walk_queue_stats_t;
+#define FIB_WALK_QUEUE_STATS_NUM ((fib_walk_queue_stats_t)(FIB_WALK_COMPLETED+1))
+
+#define FIB_WALK_QUEUE_STATS { \
+ [FIB_WALK_SCHEDULED] = "scheduled", \
+ [FIB_WALK_COMPLETED] = "completed", \
+}
+
+#define FOR_EACH_FIB_WALK_QUEUE_STATS(_wqs) \
+ for ((_wqs) = FIB_WALK_SCHEDULED; \
+ (_wqs) < FIB_WALK_QUEUE_STATS_NUM; \
+ (_wqs)++)
+
+/**
+ * The names of the walk stats
+ */
+static const char * const fib_walk_queue_stats_names[] = FIB_WALK_QUEUE_STATS;
+/**
+ * The names of the walk reasons
+ */
+static const char * const fib_node_bw_reason_names[] = FIB_NODE_BW_REASONS;
+
+/**
+ * A represenation of one queue of walk
+ */
+typedef struct fib_walk_queue_t_
+{
+ /**
+ * Qeuee stats
+ */
+ u64 fwq_stats[FIB_WALK_QUEUE_STATS_NUM];
+
+ /**
+ * The node list which acts as the queue
+ */
+ fib_node_list_t fwq_queue;
+} fib_walk_queue_t;
+
+/**
+ * A set of priority queues for outstanding walks
+ */
+typedef struct fib_walk_queues_t_
+{
+ fib_walk_queue_t fwqs_queues[FIB_WALK_PRIORITY_NUM];
+} fib_walk_queues_t;
+
+/**
+ * The global queues of outstanding walks
+ */
+static fib_walk_queues_t fib_walk_queues;
+
+/**
+ * The names of the walk priorities
+ */
+static const char * const fib_walk_priority_names[] = FIB_WALK_PRIORITIES;
+
+/**
+ * @brief Histogram stats on the lenths of each walk in elemenets visisted.
+ * Store upto 1<<23 elements in increments of 1<<10
+ */
+#define HISTOGRAM_VISITS_PER_WALK_MAX (1<<23)
+#define HISTOGRAM_VISITS_PER_WALK_INCR (1<<10)
+#define HISTOGRAM_VISITS_PER_WALK_N_BUCKETS \
+ (HISTOGRAM_VISITS_PER_WALK_MAX/HISTOGRAM_VISITS_PER_WALK_INCR)
+static u64 fib_walk_hist_vists_per_walk[HISTOGRAM_VISITS_PER_WALK_N_BUCKETS];
+
+/**
+ * @brief History of state for the last 128 walks
+ */
+#define HISTORY_N_WALKS 128
+#define MAX_HISTORY_REASONS 16
+static u32 history_last_walk_pos;
+typedef struct fib_walk_history_t_ {
+ u32 fwh_n_visits;
+ f64 fwh_duration;
+ f64 fwh_completed;
+ fib_node_ptr_t fwh_parent;
+ fib_walk_flags_t fwh_flags;
+ fib_node_bw_reason_flag_t fwh_reason[MAX_HISTORY_REASONS];
+} fib_walk_history_t;
+static fib_walk_history_t fib_walk_history[HISTORY_N_WALKS];
+
+u8*
+format_fib_walk_priority (u8 *s, va_list ap)
+{
+ fib_walk_priority_t prio = va_arg(ap, fib_walk_priority_t);
+
+ ASSERT(prio < FIB_WALK_PRIORITY_NUM);
+
+ return (format(s, "%s", fib_walk_priority_names[prio]));
+}
+static u8*
+format_fib_walk_queue_stats (u8 *s, va_list ap)
+{
+ fib_walk_queue_stats_t wqs = va_arg(ap, fib_walk_queue_stats_t);
+
+ ASSERT(wqs < FIB_WALK_QUEUE_STATS_NUM);
+
+ return (format(s, "%s", fib_walk_queue_stats_names[wqs]));
+}
+
+static index_t
+fib_walk_get_index (fib_walk_t *fwalk)
+{
+ return (fwalk - fib_walk_pool);
+}
+
+static fib_walk_t *
+fib_walk_get (index_t fwi)
+{
+ return (pool_elt_at_index(fib_walk_pool, fwi));
+}
+
+/*
+ * not static so it can be used in the unit tests
+ */
+u32
+fib_walk_queue_get_size (fib_walk_priority_t prio)
+{
+ return (fib_node_list_get_size(fib_walk_queues.fwqs_queues[prio].fwq_queue));
+}
+
+static fib_node_index_t
+fib_walk_queue_get_front (fib_walk_priority_t prio)
+{
+ fib_node_ptr_t wp;
+
+ fib_node_list_get_front(fib_walk_queues.fwqs_queues[prio].fwq_queue, &wp);
+
+ return (wp.fnp_index);
+}
+
+static void
+fib_walk_destroy (fib_walk_t *fwalk)
+{
+ u32 bucket, ii;
+
+ if (FIB_NODE_INDEX_INVALID != fwalk->fw_prio_sibling)
+ {
+ fib_node_list_elt_remove(fwalk->fw_prio_sibling);
+ }
+ fib_node_child_remove(fwalk->fw_parent.fnp_type,
+ fwalk->fw_parent.fnp_index,
+ fwalk->fw_dep_sibling);
+
+ /*
+ * add the stats to the continuous histogram collection.
+ */
+ bucket = (fwalk->fw_n_visits / HISTOGRAM_VISITS_PER_WALK_INCR);
+ bucket = (bucket >= HISTOGRAM_VISITS_PER_WALK_N_BUCKETS ?
+ HISTOGRAM_VISITS_PER_WALK_N_BUCKETS - 1 :
+ bucket);
+ fib_walk_hist_vists_per_walk[bucket]++;
+
+ /*
+ * save stats to the recent history
+ */
+
+ fib_walk_history[history_last_walk_pos].fwh_n_visits =
+ fwalk->fw_n_visits;
+ fib_walk_history[history_last_walk_pos].fwh_completed =
+ vlib_time_now(vlib_get_main());
+ fib_walk_history[history_last_walk_pos].fwh_duration =
+ fib_walk_history[history_last_walk_pos].fwh_completed -
+ fwalk->fw_start_time;
+ fib_walk_history[history_last_walk_pos].fwh_parent =
+ fwalk->fw_parent;
+ fib_walk_history[history_last_walk_pos].fwh_flags =
+ fwalk->fw_flags;
+
+ vec_foreach_index(ii, fwalk->fw_ctx)
+ {
+ if (ii < MAX_HISTORY_REASONS)
+ {
+ fib_walk_history[history_last_walk_pos].fwh_reason[ii] =
+ fwalk->fw_ctx[ii].fnbw_reason;
+ }
+ }
+
+ history_last_walk_pos = (history_last_walk_pos + 1) % HISTORY_N_WALKS;
+
+ fib_node_deinit(&fwalk->fw_node);
+ vec_free(fwalk->fw_ctx);
+ pool_put(fib_walk_pool, fwalk);
+}
+
+/**
+ * return code when advancing a walk
+ */
+typedef enum fib_walk_advance_rc_t_
+{
+ /**
+ * The walk is complete
+ */
+ FIB_WALK_ADVANCE_DONE,
+ /**
+ * the walk has more work
+ */
+ FIB_WALK_ADVANCE_MORE,
+ /**
+ * The walk merged with the one in front
+ */
+ FIB_WALK_ADVANCE_MERGE,
+} fib_walk_advance_rc_t;
+
+/**
+ * @brief Advance the walk one element in its work list
+ */
+static fib_walk_advance_rc_t
+fib_walk_advance (fib_node_index_t fwi)
+{
+ fib_node_back_walk_ctx_t *ctx, *old;
+ fib_node_back_walk_rc_t wrc;
+ fib_node_ptr_t sibling;
+ fib_walk_t *fwalk;
+ int more_elts;
+
+ /*
+ * this walk function is re-entrant - walks acan spawn walks.
+ * fib_walk_t objects come from a pool, so they can realloc. we need
+ * to retch from said pool at the appropriate times.
+ */
+ fwalk = fib_walk_get(fwi);
+
+ more_elts = fib_node_list_elt_get_next(fwalk->fw_dep_sibling, &sibling);
+
+ if (more_elts)
+ {
+ old = fwalk->fw_ctx;
+
+ vec_foreach(ctx, fwalk->fw_ctx)
+ {
+ wrc = fib_node_back_walk_one(&sibling, ctx);
+
+ fwalk = fib_walk_get(fwi);
+ fwalk->fw_n_visits++;
+
+ if (FIB_NODE_BACK_WALK_MERGE == wrc)
+ {
+ /*
+ * this walk has merged with the one further along the node's
+ * dependecy list.
+ */
+ return (FIB_WALK_ADVANCE_MERGE);
+ }
+ if (old != fwalk->fw_ctx)
+ {
+ /*
+ * nasty re-entrant addition of a walk has realloc'd the vector
+ * break out
+ */
+ return (FIB_WALK_ADVANCE_MERGE);
+ }
+ }
+ /*
+ * move foward to the next node to visit
+ */
+ more_elts = fib_node_list_advance(fwalk->fw_dep_sibling);
+ }
+
+ if (more_elts)
+ {
+ return (FIB_WALK_ADVANCE_MORE);
+ }
+
+ return (FIB_WALK_ADVANCE_DONE);
+}
+
+/**
+ * @breif Enurmerate the times of sleep between walks
+ */
+typedef enum fib_walk_sleep_type_t_
+{
+ FIB_WALK_SHORT_SLEEP,
+ FIB_WALK_LONG_SLEEP,
+} fib_walk_sleep_type_t;
+
+#define FIB_WALK_N_SLEEP (FIB_WALK_LONG_SLEEP+1)
+
+/**
+ * @brief Durations for the sleep types
+ */
+static f64 fib_walk_sleep_duration[] = {
+ [FIB_WALK_LONG_SLEEP] = 1e-3,
+ [FIB_WALK_SHORT_SLEEP] = 1e-8,
+};
+
+/**
+ * @brief The time quota for a walk. When more than this amount of time is
+ * spent, the walk process will yield.
+ */
+static f64 quota = 1e-4;
+
+/**
+ * Histogram on the amount of work done (in msecs) in each walk
+ */
+#define N_TIME_BUCKETS 128
+#define TIME_INCREMENTS (N_TIME_BUCKETS/2)
+static u64 fib_walk_work_time_taken[N_TIME_BUCKETS];
+
+/**
+ * Histogram on the number of nodes visted in each quota
+ */
+#define N_ELTS_BUCKETS 128
+static u32 fib_walk_work_nodes_visisted_incr = 2;
+static u64 fib_walk_work_nodes_visited[N_ELTS_BUCKETS];
+
+/**
+ * Histogram of the sleep lengths
+ */
+static u64 fib_walk_sleep_lengths[2];
+
+/**
+ * @brief Service the queues
+ * This is not declared static so that it can be unit tested - i know i know...
+ */
+f64
+fib_walk_process_queues (vlib_main_t * vm,
+ const f64 quota)
+{
+ f64 start_time, consumed_time;
+ fib_walk_sleep_type_t sleep;
+ fib_walk_priority_t prio;
+ fib_walk_advance_rc_t rc;
+ fib_node_index_t fwi;
+ fib_walk_t *fwalk;
+ u32 n_elts;
+ i32 bucket;
+
+ consumed_time = 0;
+ start_time = vlib_time_now(vm);
+ n_elts = 0;
+
+ FOR_EACH_FIB_WALK_PRIORITY(prio)
+ {
+ while (0 != fib_walk_queue_get_size(prio))
+ {
+ fwi = fib_walk_queue_get_front(prio);
+
+ /*
+ * set this walk as executing
+ */
+ fwalk = fib_walk_get(fwi);
+ fwalk->fw_flags |= FIB_WALK_FLAG_EXECUTING;
+
+ do
+ {
+ rc = fib_walk_advance(fwi);
+ n_elts++;
+ consumed_time = (vlib_time_now(vm) - start_time);
+ } while ((consumed_time < quota) &&
+ (FIB_WALK_ADVANCE_MORE == rc));
+
+ /*
+ * if this walk has no more work then pop it from the queue
+ * and move on to the next.
+ */
+ if (FIB_WALK_ADVANCE_MORE != rc)
+ {
+ fwalk = fib_walk_get(fwi);
+ fib_walk_destroy(fwalk);
+ fib_walk_queues.fwqs_queues[prio].fwq_stats[FIB_WALK_COMPLETED]++;
+ }
+ else
+ {
+ /*
+ * passed our work quota. sleep time.
+ */
+ fwalk = fib_walk_get(fwi);
+ fwalk->fw_flags &= ~FIB_WALK_FLAG_EXECUTING;
+ sleep = FIB_WALK_SHORT_SLEEP;
+ goto that_will_do_for_now;
+ }
+ }
+ }
+ /*
+ * got to the end of all the work
+ */
+ sleep = FIB_WALK_LONG_SLEEP;
+
+that_will_do_for_now:
+
+ /*
+ * collect the stats:
+ * - for the number of nodes visisted we store 128 increments
+ * - for the time consumed we store quota/TIME_INCREMENTS increments.
+ */
+ bucket = ((n_elts/fib_walk_work_nodes_visisted_incr) > N_ELTS_BUCKETS ?
+ N_ELTS_BUCKETS-1 :
+ n_elts/fib_walk_work_nodes_visisted_incr);
+ ++fib_walk_work_nodes_visited[bucket];
+
+ bucket = (consumed_time - quota) / (quota / TIME_INCREMENTS);
+ bucket += N_TIME_BUCKETS/2;
+ bucket = (bucket < 0 ? 0 : bucket);
+ bucket = (bucket > N_TIME_BUCKETS-1 ? N_TIME_BUCKETS-1 : bucket);
+ ++fib_walk_work_time_taken[bucket];
+
+ ++fib_walk_sleep_lengths[sleep];
+
+ return (fib_walk_sleep_duration[sleep]);
+}
+
+/**
+ * @brief The 'fib-walk' process's main loop.
+ */
+static uword
+fib_walk_process (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * f)
+{
+ f64 sleep_time;
+
+ sleep_time = fib_walk_sleep_duration[FIB_WALK_SHORT_SLEEP];
+
+ while (1)
+ {
+ vlib_process_wait_for_event_or_clock(vm, sleep_time);
+
+ /*
+ * there may be lots of event queued between the processes,
+ * but the walks we want to schedule are in the priority queues,
+ * so we ignore the process events.
+ */
+ vlib_process_get_events(vm, NULL);
+
+ sleep_time = fib_walk_process_queues(vm, quota);
+ }
+
+ /*
+ * Unreached
+ */
+ ASSERT(!"WTF");
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (fib_walk_process_node,static) = {
+ .function = fib_walk_process,
+ .type = VLIB_NODE_TYPE_PROCESS,
+ .name = "fib-walk",
+};
+/* *INDENT-ON* */
+
+/**
+ * @brief Allocate a new walk object
+ */
+static fib_walk_t *
+fib_walk_alloc (fib_node_type_t parent_type,
+ fib_node_index_t parent_index,
+ fib_walk_flags_t flags,
+ fib_node_back_walk_ctx_t *ctx)
+{
+ fib_walk_t *fwalk;
+
+ pool_get(fib_walk_pool, fwalk);
+
+ fib_node_init(&fwalk->fw_node, FIB_NODE_TYPE_WALK);
+
+ fwalk->fw_flags = flags;
+ fwalk->fw_dep_sibling = FIB_NODE_INDEX_INVALID;
+ fwalk->fw_prio_sibling = FIB_NODE_INDEX_INVALID;
+ fwalk->fw_parent.fnp_index = parent_index;
+ fwalk->fw_parent.fnp_type = parent_type;
+ fwalk->fw_ctx = NULL;
+ fwalk->fw_start_time = vlib_time_now(vlib_get_main());
+ fwalk->fw_n_visits = 0;
+
+ /*
+ * make a copy of the backwalk context so the depth count remains
+ * the same for each sibling visitsed. This is important in the case
+ * where a parent has a loop via one child, but all the others are not.
+ * if the looped child were visited first, the depth count would exceed, the
+ * max and the walk would terminate before it reached the other siblings.
+ */
+ vec_add1(fwalk->fw_ctx, *ctx);
+
+ return (fwalk);
+}
+
+/**
+ * @brief Enqueue a walk onto the appropriate priority queue. Then signal
+ * the background process there is work to do.
+ */
+static index_t
+fib_walk_prio_queue_enquue (fib_walk_priority_t prio,
+ fib_walk_t *fwalk)
+{
+ index_t sibling;
+
+ sibling = fib_node_list_push_front(fib_walk_queues.fwqs_queues[prio].fwq_queue,
+ 0,
+ FIB_NODE_TYPE_WALK,
+ fib_walk_get_index(fwalk));
+ fib_walk_queues.fwqs_queues[prio].fwq_stats[FIB_WALK_SCHEDULED]++;
+
+ /*
+ * poke the fib-walk process to perform the async walk.
+ * we are not passing it specific data, hence the last two args,
+ * the process will drain the queues
+ */
+ vlib_process_signal_event(vlib_get_main(),
+ fib_walk_process_node.index,
+ FIB_WALK_EVENT,
+ FIB_WALK_EVENT);
+
+ return (sibling);
+}
+
+void
+fib_walk_async (fib_node_type_t parent_type,
+ fib_node_index_t parent_index,
+ fib_walk_priority_t prio,
+ fib_node_back_walk_ctx_t *ctx)
+{
+ fib_walk_t *fwalk;
+
+ if (FIB_NODE_GRAPH_MAX_DEPTH < ++ctx->fnbw_depth)
+ {
+ /*
+ * The walk has reached the maximum depth. there is a loop in the graph.
+ * bail.
+ */
+ return;
+ }
+ if (0 == fib_node_get_n_children(parent_type,
+ parent_index))
+ {
+ /*
+ * no children to walk - quit now
+ */
+ return;
+ }
+ if (ctx->fnbw_flags & FIB_NODE_BW_FLAG_FORCE_SYNC)
+ {
+ /*
+ * the originator of the walk wanted it to be synchronous, but the
+ * parent object chose async - denied.
+ */
+ return (fib_walk_sync(parent_type, parent_index, ctx));
+ }
+
+
+ fwalk = fib_walk_alloc(parent_type,
+ parent_index,
+ FIB_WALK_FLAG_ASYNC,
+ ctx);
+
+ fwalk->fw_dep_sibling = fib_node_child_add(parent_type,
+ parent_index,
+ FIB_NODE_TYPE_WALK,
+ fib_walk_get_index(fwalk));
+
+ fwalk->fw_prio_sibling = fib_walk_prio_queue_enquue(prio, fwalk);
+}
+
+/**
+ * @brief Back walk all the children of a FIB node.
+ *
+ * note this is a synchronous depth first walk. Children visited may propagate
+ * the walk to thier children. Other children node types may not propagate,
+ * synchronously but instead queue the walk for later async completion.
+ */
+void
+fib_walk_sync (fib_node_type_t parent_type,
+ fib_node_index_t parent_index,
+ fib_node_back_walk_ctx_t *ctx)
+{
+ fib_walk_advance_rc_t rc;
+ fib_node_index_t fwi;
+ fib_walk_t *fwalk;
+
+ if (FIB_NODE_GRAPH_MAX_DEPTH < ++ctx->fnbw_depth)
+ {
+ /*
+ * The walk has reached the maximum depth. there is a loop in the graph.
+ * bail.
+ */
+ return;
+ }
+ if (0 == fib_node_get_n_children(parent_type,
+ parent_index))
+ {
+ /*
+ * no children to walk - quit now
+ */
+ return;
+ }
+
+ fwalk = fib_walk_alloc(parent_type,
+ parent_index,
+ FIB_WALK_FLAG_SYNC,
+ ctx);
+
+ fwalk->fw_dep_sibling = fib_node_child_add(parent_type,
+ parent_index,
+ FIB_NODE_TYPE_WALK,
+ fib_walk_get_index(fwalk));
+ fwi = fib_walk_get_index(fwalk);
+
+ while (1)
+ {
+ /*
+ * set this walk as executing
+ */
+ fwalk->fw_flags |= FIB_WALK_FLAG_EXECUTING;
+
+ do
+ {
+ rc = fib_walk_advance(fwi);
+ } while (FIB_WALK_ADVANCE_MORE == rc);
+
+
+ /*
+ * this walk function is re-entrant - walks can spawn walks.
+ * fib_walk_t objects come from a pool, so they can realloc. we need
+ * to re-fetch from said pool at the appropriate times.
+ */
+ fwalk = fib_walk_get(fwi);
+
+ if (FIB_WALK_ADVANCE_MERGE == rc)
+ {
+ /*
+ * this sync walk merged with an walk in front.
+ * by reqeusting a sync walk the client wanted all children walked,
+ * so we ditch the walk object in hand and continue with the one
+ * we merged into
+ */
+ fib_node_ptr_t merged_walk;
+
+ fib_node_list_elt_get_next(fwalk->fw_dep_sibling, &merged_walk);
+
+ ASSERT(FIB_NODE_INDEX_INVALID != merged_walk.fnp_index);
+ ASSERT(FIB_NODE_TYPE_WALK == merged_walk.fnp_type);
+
+ fib_walk_destroy(fwalk);
+
+ fwi = merged_walk.fnp_index;
+ fwalk = fib_walk_get(fwi);
+
+ if (FIB_WALK_FLAG_EXECUTING & fwalk->fw_flags)
+ {
+ /*
+ * we are executing a sync walk, and we have met with another
+ * walk that is also executing. since only one walk executs at once
+ * (there is no multi-threading) this implies we have met ourselves
+ * and hence the is a loop in the graph.
+ * This function is re-entrant, so the walk object we met is being
+ * acted on in a stack frame below this one. We must therefore not
+ * continue with it now, but let the stack unwind and along the
+ * appropriate frame to read the depth count and bail.
+ */
+ fwalk = NULL;
+ break;
+ }
+ }
+ else
+ {
+ /*
+ * the walk reached the end of the depdency list.
+ */
+ break;
+ }
+ }
+
+ if (NULL != fwalk)
+ {
+ fib_walk_destroy(fwalk);
+ }
+}
+
+static fib_node_t *
+fib_walk_get_node (fib_node_index_t index)
+{
+ fib_walk_t *fwalk;
+
+ fwalk = fib_walk_get(index);
+
+ return (&(fwalk->fw_node));
+}
+
+/**
+ * Walk objects are not parents, nor are they locked.
+ * are no-ops
+ */
+static void
+fib_walk_last_lock_gone (fib_node_t *node)
+{
+ ASSERT(0);
+}
+
+static fib_walk_t*
+fib_walk_get_from_node (fib_node_t *node)
+{
+ return ((fib_walk_t*)(((char*)node) -
+ STRUCT_OFFSET_OF(fib_walk_t, fw_node)));
+}
+
+/**
+ * @brief Another back walk has reach this walk.
+ * Megre them so there is only one left. It is this node being
+ * visited that will remain, so copy or merge the context onto it.
+ */
+static fib_node_back_walk_rc_t
+fib_walk_back_walk_notify (fib_node_t *node,
+ fib_node_back_walk_ctx_t *ctx)
+{
+ fib_node_back_walk_ctx_t *last;
+ fib_walk_t *fwalk;
+
+ fwalk = fib_walk_get_from_node(node);
+
+ /*
+ * check whether the walk context can be merged with the most recent.
+ * the most recent was the one last added and is thus at the back of the vector.
+ * we can merge walks if the reason for the walk is the same.
+ */
+ last = vec_end(fwalk->fw_ctx) - 1;
+
+ if (last->fnbw_reason == ctx->fnbw_reason)
+ {
+ /*
+ * copy the largest of the depth values. in the presence of a loop,
+ * the same walk will merge with itself. if we take the smaller depth
+ * then it will never end.
+ */
+ last->fnbw_depth = ((last->fnbw_depth >= ctx->fnbw_depth) ?
+ last->fnbw_depth :
+ ctx->fnbw_depth);
+ }
+ else
+ {
+ /*
+ * walks could not be merged, this means that the walk infront needs to
+ * perform different action to this one that has caught up. the one in
+ * front was scheduled first so append the new walk context to the back
+ * of the list.
+ */
+ vec_add1(fwalk->fw_ctx, *ctx);
+ }
+
+ return (FIB_NODE_BACK_WALK_MERGE);
+}
+
+/**
+ * The FIB walk's graph node virtual function table
+ */
+static const fib_node_vft_t fib_walk_vft = {
+ .fnv_get = fib_walk_get_node,
+ .fnv_last_lock = fib_walk_last_lock_gone,
+ .fnv_back_walk = fib_walk_back_walk_notify,
+};
+
+void
+fib_walk_module_init (void)
+{
+ fib_walk_priority_t prio;
+
+ FOR_EACH_FIB_WALK_PRIORITY(prio)
+ {
+ fib_walk_queues.fwqs_queues[prio].fwq_queue = fib_node_list_create();
+ }
+
+ fib_node_register_type(FIB_NODE_TYPE_WALK, &fib_walk_vft);
+}
+
+static u8*
+format_fib_walk (u8* s, va_list ap)
+{
+ fib_node_index_t fwi = va_arg(ap, fib_node_index_t);
+ fib_walk_t *fwalk;
+
+ fwalk = fib_walk_get(fwi);
+
+ return (format(s, " parent:{%s:%d} visits:%d flags:%d",
+ fib_node_type_get_name(fwalk->fw_parent.fnp_type),
+ fwalk->fw_parent.fnp_index,
+ fwalk->fw_n_visits,
+ fwalk->fw_flags));
+}
+
+static clib_error_t *
+fib_walk_show (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ fib_walk_queue_stats_t wqs;
+ fib_walk_priority_t prio;
+ fib_node_ptr_t sibling;
+ fib_node_index_t fwi;
+ fib_walk_t *fwalk;
+ int more_elts, ii;
+ u8 *s = NULL;
+
+#define USEC 1000000
+ vlib_cli_output(vm, "FIB Walk Quota = %.2fusec:", quota * USEC);
+ vlib_cli_output(vm, "FIB Walk queues:");
+
+ FOR_EACH_FIB_WALK_PRIORITY(prio)
+ {
+ vlib_cli_output(vm, " %U priority queue:",
+ format_fib_walk_priority, prio);
+ vlib_cli_output(vm, " Stats: ");
+
+ FOR_EACH_FIB_WALK_QUEUE_STATS(wqs)
+ {
+ vlib_cli_output(vm, " %U:%d",
+ format_fib_walk_queue_stats, wqs,
+ fib_walk_queues.fwqs_queues[prio].fwq_stats[wqs]);
+ }
+ vlib_cli_output(vm, " Occupancy:%d",
+ fib_node_list_get_size(
+ fib_walk_queues.fwqs_queues[prio].fwq_queue));
+
+ more_elts = fib_node_list_get_front(
+ fib_walk_queues.fwqs_queues[prio].fwq_queue,
+ &sibling);
+
+ while (more_elts)
+ {
+ ASSERT(FIB_NODE_INDEX_INVALID != sibling.fnp_index);
+ ASSERT(FIB_NODE_TYPE_WALK == sibling.fnp_type);
+
+ fwi = sibling.fnp_index;
+ fwalk = fib_walk_get(fwi);
+
+ vlib_cli_output(vm, " %U", format_fib_walk, fwi);
+
+ more_elts = fib_node_list_elt_get_next(fwalk->fw_prio_sibling,
+ &sibling);
+ }
+ }
+
+ vlib_cli_output(vm, "Histogram Statistics:");
+ vlib_cli_output(vm, " Number of Elements visit per-quota:");
+ for (ii = 0; ii < N_ELTS_BUCKETS; ii++)
+ {
+ if (0 != fib_walk_work_nodes_visited[ii])
+ s = format(s, "%d:%d ",
+ (ii * fib_walk_work_nodes_visisted_incr),
+ fib_walk_work_nodes_visited[ii]);
+ }
+ vlib_cli_output(vm, " %v", s);
+ vec_free(s);
+
+ vlib_cli_output(vm, " Time consumed per-quota (Quota=%f usec):", quota*USEC);
+ s = format(s, "0:%d ", fib_walk_work_time_taken[0]);
+ for (ii = 1; ii < N_TIME_BUCKETS; ii++)
+ {
+ if (0 != fib_walk_work_time_taken[ii])
+ s = format(s, "%d:%d ", (u32)((((ii - N_TIME_BUCKETS/2) *
+ (quota / TIME_INCREMENTS)) + quota) *
+ USEC),
+ fib_walk_work_time_taken[ii]);
+ }
+ vlib_cli_output(vm, " %v", s);
+ vec_free(s);
+
+ vlib_cli_output(vm, " Sleep Types:");
+ vlib_cli_output(vm, " Short Long:");
+ vlib_cli_output(vm, " %d %d:",
+ fib_walk_sleep_lengths[FIB_WALK_SHORT_SLEEP],
+ fib_walk_sleep_lengths[FIB_WALK_LONG_SLEEP]);
+
+ vlib_cli_output(vm, " Number of Elements visited per-walk:");
+ for (ii = 0; ii < HISTOGRAM_VISITS_PER_WALK_N_BUCKETS; ii++)
+ {
+ if (0 != fib_walk_hist_vists_per_walk[ii])
+ s = format(s, "%d:%d ",
+ ii*HISTOGRAM_VISITS_PER_WALK_INCR,
+ fib_walk_hist_vists_per_walk[ii]);
+ }
+ vlib_cli_output(vm, " %v", s);
+ vec_free(s);
+
+
+ vlib_cli_output(vm, "Brief History (last %d walks):", HISTORY_N_WALKS);
+ ii = history_last_walk_pos - 1;
+ if (ii < 0)
+ ii = HISTORY_N_WALKS - 1;
+
+ while (ii != history_last_walk_pos)
+ {
+ if (0 != fib_walk_history[ii].fwh_reason[0])
+ {
+ fib_node_back_walk_reason_t reason;
+ u8 *s = NULL;
+ u32 jj;
+
+ s = format(s, "[@%d]: %s:%d visits:%d duration:%.2f completed:%.2f ",
+ ii, fib_node_type_get_name(fib_walk_history[ii].fwh_parent.fnp_type),
+ fib_walk_history[ii].fwh_parent.fnp_index,
+ fib_walk_history[ii].fwh_n_visits,
+ fib_walk_history[ii].fwh_duration,
+ fib_walk_history[ii].fwh_completed);
+ if (FIB_WALK_FLAG_SYNC & fib_walk_history[ii].fwh_flags)
+ s = format(s, "sync, ");
+ if (FIB_WALK_FLAG_ASYNC & fib_walk_history[ii].fwh_flags)
+ s = format(s, "async, ");
+
+ s = format(s, "reason:");
+ jj = 0;
+ while (0 != fib_walk_history[ii].fwh_reason[jj])
+ {
+ FOR_EACH_FIB_NODE_BW_REASON(reason) {
+ if ((1<<reason) & fib_walk_history[ii].fwh_reason[jj]) {
+ s = format (s, "%s,", fib_node_bw_reason_names[reason]);
+ }
+ }
+ jj++;
+ }
+ vlib_cli_output(vm, "%v", s);
+ }
+
+ ii--;
+ if (ii < 0)
+ ii = HISTORY_N_WALKS - 1;
+ }
+
+ return (NULL);
+}
+
+VLIB_CLI_COMMAND (fib_walk_show_command, static) = {
+ .path = "show fib walk",
+ .short_help = "show fib walk",
+ .function = fib_walk_show,
+};
+
+static clib_error_t *
+fib_walk_set_quota (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ clib_error_t * error = NULL;
+ f64 new_quota;
+
+ if (unformat (input, "%f", &new_quota))
+ {
+ quota = new_quota;
+ }
+ else
+ {
+ error = clib_error_return(0 , "Pass a float value");
+ }
+
+ return (error);
+}
+
+VLIB_CLI_COMMAND (fib_walk_set_quota_command, static) = {
+ .path = "set fib walk quota",
+ .short_help = "set fib walk quota",
+ .function = fib_walk_set_quota,
+};
+
+static clib_error_t *
+fib_walk_set_histogram_elements_size (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ clib_error_t * error = NULL;
+ u32 new;
+
+ if (unformat (input, "%d", &new))
+ {
+ fib_walk_work_nodes_visisted_incr = new;
+ }
+ else
+ {
+ error = clib_error_return(0 , "Pass an int value");
+ }
+
+ return (error);
+}
+
+VLIB_CLI_COMMAND (fib_walk_set_histogram_elements_size_command, static) = {
+ .path = "set fib walk histogram elements size",
+ .short_help = "set fib walk histogram elements size",
+ .function = fib_walk_set_histogram_elements_size,
+};
+
+static clib_error_t *
+fib_walk_clear (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ memset(fib_walk_hist_vists_per_walk, 0, sizeof(fib_walk_hist_vists_per_walk));
+ memset(fib_walk_history, 0, sizeof(fib_walk_history));
+ memset(fib_walk_work_time_taken, 0, sizeof(fib_walk_work_time_taken));
+ memset(fib_walk_work_nodes_visited, 0, sizeof(fib_walk_work_nodes_visited));
+ memset(fib_walk_sleep_lengths, 0, sizeof(fib_walk_sleep_lengths));
+
+ return (NULL);
+}
+
+VLIB_CLI_COMMAND (fib_walk_clear_command, static) = {
+ .path = "clear fib walk",
+ .short_help = "clear fib walk",
+ .function = fib_walk_clear,
+};
diff --git a/src/vnet/fib/fib_walk.h b/src/vnet/fib/fib_walk.h
new file mode 100644
index 00000000000..7413d8a2c78
--- /dev/null
+++ b/src/vnet/fib/fib_walk.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __FIB_WALK_H__
+#define __FIB_WALK_H__
+
+#include <vnet/fib/fib_node.h>
+
+/**
+ * @brief Walk priorities.
+ * Strict priorities. All walks a priority n are completed before n+1 is started.
+ * Increasing numerical value implies decreasing priority.
+ */
+typedef enum fib_walk_priority_t_
+{
+ FIB_WALK_PRIORITY_HIGH = 0,
+ FIB_WALK_PRIORITY_LOW = 1,
+} fib_walk_priority_t;
+
+#define FIB_WALK_PRIORITY_NUM ((fib_walk_priority_t)(FIB_WALK_PRIORITY_LOW+1))
+
+#define FIB_WALK_PRIORITIES { \
+ [FIB_WALK_PRIORITY_HIGH] = "high", \
+ [FIB_WALK_PRIORITY_LOW] = "low", \
+}
+
+#define FOR_EACH_FIB_WALK_PRIORITY(_prio) \
+ for ((_prio) = FIB_WALK_PRIORITY_HIGH; \
+ (_prio) < FIB_WALK_PRIORITY_NUM; \
+ (_prio)++)
+
+extern void fib_walk_module_init(void);
+
+extern void fib_walk_async(fib_node_type_t parent_type,
+ fib_node_index_t parent_index,
+ fib_walk_priority_t prio,
+ fib_node_back_walk_ctx_t *ctx);
+
+extern void fib_walk_sync(fib_node_type_t parent_type,
+ fib_node_index_t parent_index,
+ fib_node_back_walk_ctx_t *ctx);
+
+extern u8* format_fib_walk_priority(u8 *s, va_list ap);
+
+#endif
+
diff --git a/src/vnet/fib/ip4_fib.c b/src/vnet/fib/ip4_fib.c
new file mode 100644
index 00000000000..f6ebce00837
--- /dev/null
+++ b/src/vnet/fib/ip4_fib.c
@@ -0,0 +1,664 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/fib/fib_table.h>
+#include <vnet/fib/fib_entry.h>
+#include <vnet/fib/ip4_fib.h>
+
+/*
+ * A table of pefixes to be added to tables and the sources for them
+ */
+typedef struct ip4_fib_table_special_prefix_t_ {
+ fib_prefix_t ift_prefix;
+ fib_source_t ift_source;
+ fib_entry_flag_t ift_flag;
+} ip4_fib_table_special_prefix_t;
+
+static const ip4_fib_table_special_prefix_t ip4_specials[] = {
+ {
+ /* 0.0.0.0/0*/
+ .ift_prefix = {
+ .fp_addr = {
+ .ip4.data_u32 = 0,
+ },
+ .fp_len = 0,
+ .fp_proto = FIB_PROTOCOL_IP4,
+ },
+ .ift_source = FIB_SOURCE_DEFAULT_ROUTE,
+ .ift_flag = FIB_ENTRY_FLAG_DROP,
+ },
+ {
+ /* 0.0.0.0/32*/
+ .ift_prefix = {
+ .fp_addr = {
+ .ip4.data_u32 = 0,
+ },
+ .fp_len = 32,
+ .fp_proto = FIB_PROTOCOL_IP4,
+ },
+ .ift_source = FIB_SOURCE_DEFAULT_ROUTE,
+ .ift_flag = FIB_ENTRY_FLAG_DROP,
+ },
+ {
+ /*
+ * 240.0.0.0/4
+ * drop class E
+ */
+ .ift_prefix = {
+ .fp_addr = {
+ .ip4.data_u32 = 0xf0000000,
+ },
+ .fp_len = 4,
+ .fp_proto = FIB_PROTOCOL_IP4,
+ },
+ .ift_source = FIB_SOURCE_SPECIAL,
+ .ift_flag = FIB_ENTRY_FLAG_DROP,
+
+ },
+ {
+ /*
+ * 224.0.0.0/4
+ * drop all mcast
+ */
+ .ift_prefix = {
+ .fp_addr = {
+ .ip4.data_u32 = 0xe0000000,
+ },
+ .fp_len = 4,
+ .fp_proto = FIB_PROTOCOL_IP4,
+ },
+ .ift_source = FIB_SOURCE_SPECIAL,
+ .ift_flag = FIB_ENTRY_FLAG_DROP,
+ },
+ {
+ /*
+ * 255.255.255.255/32
+ * drop, but we'll allow it to be usurped by the likes of DHCP
+ */
+ .ift_prefix = {
+ .fp_addr = {
+ .ip4.data_u32 = 0xffffffff,
+ },
+ .fp_len = 32,
+ .fp_proto = FIB_PROTOCOL_IP4,
+ },
+ .ift_source = FIB_SOURCE_DEFAULT_ROUTE,
+ .ift_flag = FIB_ENTRY_FLAG_DROP,
+ }
+};
+
+
+static u32
+ip4_create_fib_with_table_id (u32 table_id)
+{
+ fib_table_t *fib_table;
+
+ pool_get_aligned(ip4_main.fibs, fib_table, CLIB_CACHE_LINE_BYTES);
+ memset(fib_table, 0, sizeof(*fib_table));
+
+ fib_table->ft_proto = FIB_PROTOCOL_IP4;
+ fib_table->ft_index =
+ fib_table->v4.index =
+ (fib_table - ip4_main.fibs);
+
+ hash_set (ip4_main.fib_index_by_table_id, table_id, fib_table->ft_index);
+
+ fib_table->ft_table_id =
+ fib_table->v4.table_id =
+ table_id;
+ fib_table->ft_flow_hash_config =
+ fib_table->v4.flow_hash_config =
+ IP_FLOW_HASH_DEFAULT;
+ fib_table->v4.fwd_classify_table_index = ~0;
+ fib_table->v4.rev_classify_table_index = ~0;
+
+ fib_table_lock(fib_table->ft_index, FIB_PROTOCOL_IP4);
+
+ ip4_mtrie_init(&fib_table->v4.mtrie);
+
+ /*
+ * add the special entries into the new FIB
+ */
+ int ii;
+
+ for (ii = 0; ii < ARRAY_LEN(ip4_specials); ii++)
+ {
+ fib_prefix_t prefix = ip4_specials[ii].ift_prefix;
+
+ prefix.fp_addr.ip4.data_u32 =
+ clib_host_to_net_u32(prefix.fp_addr.ip4.data_u32);
+
+ fib_table_entry_special_add(fib_table->ft_index,
+ &prefix,
+ ip4_specials[ii].ift_source,
+ ip4_specials[ii].ift_flag,
+ ADJ_INDEX_INVALID);
+ }
+
+ return (fib_table->ft_index);
+}
+
+void
+ip4_fib_table_destroy (ip4_fib_t *fib)
+{
+ fib_table_t *fib_table = (fib_table_t*)fib;
+ int ii;
+
+ /*
+ * remove all the specials we added when the table was created.
+ */
+ for (ii = 0; ii < ARRAY_LEN(ip4_specials); ii++)
+ {
+ fib_prefix_t prefix = ip4_specials[ii].ift_prefix;
+
+ prefix.fp_addr.ip4.data_u32 =
+ clib_host_to_net_u32(prefix.fp_addr.ip4.data_u32);
+
+ fib_table_entry_special_remove(fib_table->ft_index,
+ &prefix,
+ ip4_specials[ii].ift_source);
+ }
+
+ /*
+ * validate no more routes.
+ */
+ ASSERT(0 == fib_table->ft_total_route_counts);
+ FOR_EACH_FIB_SOURCE(ii)
+ {
+ ASSERT(0 == fib_table->ft_src_route_counts[ii]);
+ }
+
+ if (~0 != fib_table->ft_table_id)
+ {
+ hash_unset (ip4_main.fib_index_by_table_id, fib_table->ft_table_id);
+ }
+ pool_put(ip4_main.fibs, fib_table);
+}
+
+
+u32
+ip4_fib_table_find_or_create_and_lock (u32 table_id)
+{
+ u32 index;
+
+ index = ip4_fib_index_from_table_id(table_id);
+ if (~0 == index)
+ return ip4_create_fib_with_table_id(table_id);
+
+ fib_table_lock(index, FIB_PROTOCOL_IP4);
+
+ return (index);
+}
+
+u32
+ip4_fib_table_create_and_lock (void)
+{
+ return (ip4_create_fib_with_table_id(~0));
+}
+
+u32
+ip4_fib_table_get_index_for_sw_if_index (u32 sw_if_index)
+{
+ if (sw_if_index >= vec_len(ip4_main.fib_index_by_sw_if_index))
+ {
+ /*
+ * This is the case for interfaces that are not yet mapped to
+ * a IP table
+ */
+ return (~0);
+ }
+ return (ip4_main.fib_index_by_sw_if_index[sw_if_index]);
+}
+
+flow_hash_config_t
+ip4_fib_table_get_flow_hash_config (u32 fib_index)
+{
+ return (ip4_fib_get(fib_index)->flow_hash_config);
+}
+
+/*
+ * ip4_fib_table_lookup_exact_match
+ *
+ * Exact match prefix lookup
+ */
+fib_node_index_t
+ip4_fib_table_lookup_exact_match (const ip4_fib_t *fib,
+ const ip4_address_t *addr,
+ u32 len)
+{
+ uword * hash, * result;
+ u32 key;
+
+ hash = fib->fib_entry_by_dst_address[len];
+ key = (addr->data_u32 & ip4_main.fib_masks[len]);
+
+ result = hash_get(hash, key);
+
+ if (NULL != result) {
+ return (result[0]);
+ }
+ return (FIB_NODE_INDEX_INVALID);
+}
+
+/*
+ * ip4_fib_table_lookup_adj
+ *
+ * Longest prefix match
+ */
+index_t
+ip4_fib_table_lookup_lb (ip4_fib_t *fib,
+ const ip4_address_t *addr)
+{
+ fib_node_index_t fei;
+
+ fei = ip4_fib_table_lookup(fib, addr, 32);
+
+ if (FIB_NODE_INDEX_INVALID != fei)
+ {
+ const dpo_id_t *dpo;
+
+ dpo = fib_entry_contribute_ip_forwarding(fei);
+
+ return (dpo->dpoi_index);
+ }
+ return (INDEX_INVALID);
+}
+
+/*
+ * ip4_fib_table_lookup
+ *
+ * Longest prefix match
+ */
+fib_node_index_t
+ip4_fib_table_lookup (const ip4_fib_t *fib,
+ const ip4_address_t *addr,
+ u32 len)
+{
+ uword * hash, * result;
+ i32 mask_len;
+ u32 key;
+
+ for (mask_len = len; mask_len >= 0; mask_len--)
+ {
+ hash = fib->fib_entry_by_dst_address[mask_len];
+ key = (addr->data_u32 & ip4_main.fib_masks[mask_len]);
+
+ result = hash_get (hash, key);
+
+ if (NULL != result) {
+ return (result[0]);
+ }
+ }
+ return (FIB_NODE_INDEX_INVALID);
+}
+
+void
+ip4_fib_table_entry_insert (ip4_fib_t *fib,
+ const ip4_address_t *addr,
+ u32 len,
+ fib_node_index_t fib_entry_index)
+{
+ uword * hash, * result;
+ u32 key;
+
+ key = (addr->data_u32 & ip4_main.fib_masks[len]);
+ hash = fib->fib_entry_by_dst_address[len];
+ result = hash_get (hash, key);
+
+ if (NULL == result) {
+ /*
+ * adding a new entry
+ */
+ if (NULL == hash) {
+ hash = hash_create (32 /* elts */, sizeof (uword));
+ hash_set_flags (hash, HASH_FLAG_NO_AUTO_SHRINK);
+ }
+ hash = hash_set(hash, key, fib_entry_index);
+ fib->fib_entry_by_dst_address[len] = hash;
+ }
+ else
+ {
+ ASSERT(0);
+ }
+}
+
+void
+ip4_fib_table_entry_remove (ip4_fib_t *fib,
+ const ip4_address_t *addr,
+ u32 len)
+{
+ uword * hash, * result;
+ u32 key;
+
+ key = (addr->data_u32 & ip4_main.fib_masks[len]);
+ hash = fib->fib_entry_by_dst_address[len];
+ result = hash_get (hash, key);
+
+ if (NULL == result)
+ {
+ /*
+ * removing a non-existant entry. i'll allow it.
+ */
+ }
+ else
+ {
+ hash_unset(hash, key);
+ }
+
+ fib->fib_entry_by_dst_address[len] = hash;
+}
+
+void
+ip4_fib_table_fwding_dpo_update (ip4_fib_t *fib,
+ const ip4_address_t *addr,
+ u32 len,
+ const dpo_id_t *dpo)
+{
+ ip4_fib_mtrie_add_del_route(fib, *addr, len, dpo->dpoi_index, 0); // ADD
+}
+
+void
+ip4_fib_table_fwding_dpo_remove (ip4_fib_t *fib,
+ const ip4_address_t *addr,
+ u32 len,
+ const dpo_id_t *dpo)
+{
+ ip4_fib_mtrie_add_del_route(fib, *addr, len, dpo->dpoi_index, 1); // DELETE
+}
+
+static void
+ip4_fib_table_show_all (ip4_fib_t *fib,
+ vlib_main_t * vm)
+{
+ fib_node_index_t *fib_entry_indicies;
+ fib_node_index_t *fib_entry_index;
+ int i;
+
+ fib_entry_indicies = NULL;
+
+ for (i = 0; i < ARRAY_LEN (fib->fib_entry_by_dst_address); i++)
+ {
+ uword * hash = fib->fib_entry_by_dst_address[i];
+
+ if (NULL != hash)
+ {
+ hash_pair_t * p;
+
+ hash_foreach_pair (p, hash,
+ ({
+ vec_add1(fib_entry_indicies, p->value[0]);
+ }));
+ }
+ }
+
+ vec_sort_with_function(fib_entry_indicies, fib_entry_cmp_for_sort);
+
+ vec_foreach(fib_entry_index, fib_entry_indicies)
+ {
+ vlib_cli_output(vm, "%U",
+ format_fib_entry,
+ *fib_entry_index,
+ FIB_ENTRY_FORMAT_BRIEF);
+ }
+
+ vec_free(fib_entry_indicies);
+}
+
+static void
+ip4_fib_table_show_one (ip4_fib_t *fib,
+ vlib_main_t * vm,
+ ip4_address_t *address,
+ u32 mask_len)
+{
+ vlib_cli_output(vm, "%U",
+ format_fib_entry,
+ ip4_fib_table_lookup(fib, address, mask_len),
+ FIB_ENTRY_FORMAT_DETAIL);
+}
+
+static clib_error_t *
+ip4_show_fib (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ ip4_main_t * im4 = &ip4_main;
+ fib_table_t * fib_table;
+ int verbose, matching, mtrie;
+ ip4_address_t matching_address;
+ u32 matching_mask = 32;
+ int i, table_id = -1, fib_index = ~0;
+
+ verbose = 1;
+ matching = 0;
+ mtrie = 0;
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "brief") || unformat (input, "summary")
+ || unformat (input, "sum"))
+ verbose = 0;
+
+ else if (unformat (input, "mtrie"))
+ mtrie = 1;
+
+ else if (unformat (input, "%U/%d",
+ unformat_ip4_address, &matching_address, &matching_mask))
+ matching = 1;
+
+ else if (unformat (input, "%U", unformat_ip4_address, &matching_address))
+ matching = 1;
+
+ else if (unformat (input, "table %d", &table_id))
+ ;
+ else if (unformat (input, "index %d", &fib_index))
+ ;
+ else
+ break;
+ }
+
+ pool_foreach (fib_table, im4->fibs,
+ ({
+ ip4_fib_t *fib = &fib_table->v4;
+
+ if (table_id >= 0 && table_id != (int)fib->table_id)
+ continue;
+ if (fib_index != ~0 && fib_index != (int)fib->index)
+ continue;
+
+ vlib_cli_output (vm, "%U, fib_index %d, flow hash: %U",
+ format_fib_table_name, fib->index, FIB_PROTOCOL_IP4,
+ fib->index,
+ format_ip_flow_hash_config, fib->flow_hash_config);
+
+ /* Show summary? */
+ if (! verbose)
+ {
+ vlib_cli_output (vm, "%=20s%=16s", "Prefix length", "Count");
+ for (i = 0; i < ARRAY_LEN (fib->fib_entry_by_dst_address); i++)
+ {
+ uword * hash = fib->fib_entry_by_dst_address[i];
+ uword n_elts = hash_elts (hash);
+ if (n_elts > 0)
+ vlib_cli_output (vm, "%20d%16d", i, n_elts);
+ }
+ continue;
+ }
+
+ if (!matching)
+ {
+ ip4_fib_table_show_all(fib, vm);
+ }
+ else
+ {
+ ip4_fib_table_show_one(fib, vm, &matching_address, matching_mask);
+ }
+
+ if (mtrie)
+ vlib_cli_output (vm, "%U", format_ip4_fib_mtrie, &fib->mtrie);
+ }));
+
+ return 0;
+}
+
+/*?
+ * This command displays the IPv4 FIB Tables (VRF Tables) and the route
+ * entries for each table.
+ *
+ * @note This command will run for a long time when the FIB tables are
+ * comprised of millions of entries. For those senarios, consider displaying
+ * a single table or summary mode.
+ *
+ * @cliexpar
+ * Example of how to display all the IPv4 FIB tables:
+ * @cliexstart{show ip fib}
+ * ipv4-VRF:0, fib_index 0, flow hash: src dst sport dport proto
+ * 0.0.0.0/0
+ * unicast-ip4-chain
+ * [@0]: dpo-load-balance: [index:0 buckets:1 uRPF:0 to:[0:0]]
+ * [0] [@0]: dpo-drop ip6
+ * 0.0.0.0/32
+ * unicast-ip4-chain
+ * [@0]: dpo-load-balance: [index:1 buckets:1 uRPF:1 to:[0:0]]
+ * [0] [@0]: dpo-drop ip6
+ * 6.0.1.2/32
+ * unicast-ip4-chain
+ * [@0]: dpo-load-balance: [index:30 buckets:1 uRPF:29 to:[0:0]]
+ * [0] [@3]: arp-ipv4: via 6.0.0.1 af_packet0
+ * 7.0.0.1/32
+ * unicast-ip4-chain
+ * [@0]: dpo-load-balance: [index:31 buckets:4 uRPF:30 to:[0:0]]
+ * [0] [@3]: arp-ipv4: via 6.0.0.2 af_packet0
+ * [1] [@3]: arp-ipv4: via 6.0.0.2 af_packet0
+ * [2] [@3]: arp-ipv4: via 6.0.0.2 af_packet0
+ * [3] [@3]: arp-ipv4: via 6.0.0.1 af_packet0
+ * 224.0.0.0/8
+ * unicast-ip4-chain
+ * [@0]: dpo-load-balance: [index:3 buckets:1 uRPF:3 to:[0:0]]
+ * [0] [@0]: dpo-drop ip6
+ * 240.0.0.0/8
+ * unicast-ip4-chain
+ * [@0]: dpo-load-balance: [index:2 buckets:1 uRPF:2 to:[0:0]]
+ * [0] [@0]: dpo-drop ip6
+ * 255.255.255.255/32
+ * unicast-ip4-chain
+ * [@0]: dpo-load-balance: [index:4 buckets:1 uRPF:4 to:[0:0]]
+ * [0] [@0]: dpo-drop ip6
+ * ipv4-VRF:7, fib_index 1, flow hash: src dst sport dport proto
+ * 0.0.0.0/0
+ * unicast-ip4-chain
+ * [@0]: dpo-load-balance: [index:12 buckets:1 uRPF:11 to:[0:0]]
+ * [0] [@0]: dpo-drop ip6
+ * 0.0.0.0/32
+ * unicast-ip4-chain
+ * [@0]: dpo-load-balance: [index:13 buckets:1 uRPF:12 to:[0:0]]
+ * [0] [@0]: dpo-drop ip6
+ * 172.16.1.0/24
+ * unicast-ip4-chain
+ * [@0]: dpo-load-balance: [index:17 buckets:1 uRPF:16 to:[0:0]]
+ * [0] [@4]: ipv4-glean: af_packet0
+ * 172.16.1.1/32
+ * unicast-ip4-chain
+ * [@0]: dpo-load-balance: [index:18 buckets:1 uRPF:17 to:[1:84]]
+ * [0] [@2]: dpo-receive: 172.16.1.1 on af_packet0
+ * 172.16.1.2/32
+ * unicast-ip4-chain
+ * [@0]: dpo-load-balance: [index:21 buckets:1 uRPF:20 to:[0:0]]
+ * [0] [@5]: ipv4 via 172.16.1.2 af_packet0: IP4: 02:fe:9e:70:7a:2b -> 26:a5:f6:9c:3a:36
+ * 172.16.2.0/24
+ * unicast-ip4-chain
+ * [@0]: dpo-load-balance: [index:19 buckets:1 uRPF:18 to:[0:0]]
+ * [0] [@4]: ipv4-glean: af_packet1
+ * 172.16.2.1/32
+ * unicast-ip4-chain
+ * [@0]: dpo-load-balance: [index:20 buckets:1 uRPF:19 to:[0:0]]
+ * [0] [@2]: dpo-receive: 172.16.2.1 on af_packet1
+ * 224.0.0.0/8
+ * unicast-ip4-chain
+ * [@0]: dpo-load-balance: [index:15 buckets:1 uRPF:14 to:[0:0]]
+ * [0] [@0]: dpo-drop ip6
+ * 240.0.0.0/8
+ * unicast-ip4-chain
+ * [@0]: dpo-load-balance: [index:14 buckets:1 uRPF:13 to:[0:0]]
+ * [0] [@0]: dpo-drop ip6
+ * 255.255.255.255/32
+ * unicast-ip4-chain
+ * [@0]: dpo-load-balance: [index:16 buckets:1 uRPF:15 to:[0:0]]
+ * [0] [@0]: dpo-drop ip6
+ * @cliexend
+ * Example of how to display a single IPv4 FIB table:
+ * @cliexstart{show ip fib table 7}
+ * ipv4-VRF:7, fib_index 1, flow hash: src dst sport dport proto
+ * 0.0.0.0/0
+ * unicast-ip4-chain
+ * [@0]: dpo-load-balance: [index:12 buckets:1 uRPF:11 to:[0:0]]
+ * [0] [@0]: dpo-drop ip6
+ * 0.0.0.0/32
+ * unicast-ip4-chain
+ * [@0]: dpo-load-balance: [index:13 buckets:1 uRPF:12 to:[0:0]]
+ * [0] [@0]: dpo-drop ip6
+ * 172.16.1.0/24
+ * unicast-ip4-chain
+ * [@0]: dpo-load-balance: [index:17 buckets:1 uRPF:16 to:[0:0]]
+ * [0] [@4]: ipv4-glean: af_packet0
+ * 172.16.1.1/32
+ * unicast-ip4-chain
+ * [@0]: dpo-load-balance: [index:18 buckets:1 uRPF:17 to:[1:84]]
+ * [0] [@2]: dpo-receive: 172.16.1.1 on af_packet0
+ * 172.16.1.2/32
+ * unicast-ip4-chain
+ * [@0]: dpo-load-balance: [index:21 buckets:1 uRPF:20 to:[0:0]]
+ * [0] [@5]: ipv4 via 172.16.1.2 af_packet0: IP4: 02:fe:9e:70:7a:2b -> 26:a5:f6:9c:3a:36
+ * 172.16.2.0/24
+ * unicast-ip4-chain
+ * [@0]: dpo-load-balance: [index:19 buckets:1 uRPF:18 to:[0:0]]
+ * [0] [@4]: ipv4-glean: af_packet1
+ * 172.16.2.1/32
+ * unicast-ip4-chain
+ * [@0]: dpo-load-balance: [index:20 buckets:1 uRPF:19 to:[0:0]]
+ * [0] [@2]: dpo-receive: 172.16.2.1 on af_packet1
+ * 224.0.0.0/8
+ * unicast-ip4-chain
+ * [@0]: dpo-load-balance: [index:15 buckets:1 uRPF:14 to:[0:0]]
+ * [0] [@0]: dpo-drop ip6
+ * 240.0.0.0/8
+ * unicast-ip4-chain
+ * [@0]: dpo-load-balance: [index:14 buckets:1 uRPF:13 to:[0:0]]
+ * [0] [@0]: dpo-drop ip6
+ * 255.255.255.255/32
+ * unicast-ip4-chain
+ * [@0]: dpo-load-balance: [index:16 buckets:1 uRPF:15 to:[0:0]]
+ * [0] [@0]: dpo-drop ip6
+ * @cliexend
+ * Example of how to display a summary of all IPv4 FIB tables:
+ * @cliexstart{show ip fib summary}
+ * ipv4-VRF:0, fib_index 0, flow hash: src dst sport dport proto
+ * Prefix length Count
+ * 0 1
+ * 8 2
+ * 32 4
+ * ipv4-VRF:7, fib_index 1, flow hash: src dst sport dport proto
+ * Prefix length Count
+ * 0 1
+ * 8 2
+ * 24 2
+ * 32 4
+ * @cliexend
+ ?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (ip4_show_fib_command, static) = {
+ .path = "show ip fib",
+ .short_help = "show ip fib [summary] [table <table-id>] [index <fib-id>] [<ip4-addr>[/<mask>]] [mtrie]",
+ .function = ip4_show_fib,
+};
+/* *INDENT-ON* */
diff --git a/src/vnet/fib/ip4_fib.h b/src/vnet/fib/ip4_fib.h
new file mode 100644
index 00000000000..cf312cdc629
--- /dev/null
+++ b/src/vnet/fib/ip4_fib.h
@@ -0,0 +1,141 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * @brief The IPv4 FIB
+ *
+ * FIBs are composed of two prefix data-bases (akak tables). The non-forwarding
+ * table contains all the routes that the control plane has programmed, the
+ * forwarding table contains the sub-set of those routes that can be used to
+ * forward packets.
+ * In the IPv4 FIB the non-forwarding table is an array of hash tables indexed
+ * by mask length, the forwarding table is an mtrie
+ *
+ * This IPv4 FIB is used by the protocol independent FIB. So directly using
+ * this APIs in client code is not encouraged. However, this IPv4 FIB can be
+ * used if all the client wants is an IPv4 prefix data-base
+ */
+
+#ifndef __IP4_FIB_H__
+#define __IP4_FIB_H__
+
+#include <vlib/vlib.h>
+#include <vnet/ip/ip.h>
+#include <vnet/fib/fib_entry.h>
+#include <vnet/fib/fib_table.h>
+
+extern fib_node_index_t ip4_fib_table_lookup(const ip4_fib_t *fib,
+ const ip4_address_t *addr,
+ u32 len);
+extern fib_node_index_t ip4_fib_table_lookup_exact_match(const ip4_fib_t *fib,
+ const ip4_address_t *addr,
+ u32 len);
+
+extern void ip4_fib_table_entry_remove(ip4_fib_t *fib,
+ const ip4_address_t *addr,
+ u32 len);
+
+extern void ip4_fib_table_entry_insert(ip4_fib_t *fib,
+ const ip4_address_t *addr,
+ u32 len,
+ fib_node_index_t fib_entry_index);
+extern void ip4_fib_table_destroy(ip4_fib_t *fib);
+
+extern void ip4_fib_table_fwding_dpo_update(ip4_fib_t *fib,
+ const ip4_address_t *addr,
+ u32 len,
+ const dpo_id_t *dpo);
+
+extern void ip4_fib_table_fwding_dpo_remove(ip4_fib_t *fib,
+ const ip4_address_t *addr,
+ u32 len,
+ const dpo_id_t *dpo);
+extern u32 ip4_fib_table_lookup_lb (ip4_fib_t *fib,
+ const ip4_address_t * dst);
+
+/**
+ * @brief Get the FIB at the given index
+ */
+static inline ip4_fib_t *
+ip4_fib_get (u32 index)
+{
+ return (&(pool_elt_at_index(ip4_main.fibs, index)->v4));
+}
+
+always_inline u32
+ip4_fib_lookup (ip4_main_t * im, u32 sw_if_index, ip4_address_t * dst)
+{
+ return (ip4_fib_table_lookup_lb(
+ ip4_fib_get(vec_elt (im->fib_index_by_sw_if_index, sw_if_index)),
+ dst));
+}
+
+/**
+ * @brief Get or create an IPv4 fib.
+ *
+ * Get or create an IPv4 fib with the provided table ID.
+ *
+ * @param table_id
+ * When set to \c ~0, an arbitrary and unused fib ID is picked
+ * and can be retrieved with \c ret->table_id.
+ * Otherwise, the fib ID to be used to retrieve or create the desired fib.
+ * @returns A pointer to the retrieved or created fib.
+ *
+ */
+extern u32 ip4_fib_table_find_or_create_and_lock(u32 table_id);
+extern u32 ip4_fib_table_create_and_lock(void);
+
+
+static inline
+u32 ip4_fib_index_from_table_id (u32 table_id)
+{
+ ip4_main_t * im = &ip4_main;
+ uword * p;
+
+ p = hash_get (im->fib_index_by_table_id, table_id);
+ if (!p)
+ return ~0;
+
+ return p[0];
+}
+
+extern u32 ip4_fib_table_get_index_for_sw_if_index(u32 sw_if_index);
+
+extern flow_hash_config_t ip4_fib_table_get_flow_hash_config(u32 fib_index);
+
+
+always_inline index_t
+ip4_fib_forwarding_lookup (u32 fib_index,
+ const ip4_address_t * addr)
+{
+ ip4_fib_mtrie_leaf_t leaf;
+ ip4_fib_mtrie_t * mtrie;
+
+ mtrie = &ip4_fib_get(fib_index)->mtrie;
+
+ leaf = IP4_FIB_MTRIE_LEAF_ROOT;
+ leaf = ip4_fib_mtrie_lookup_step (mtrie, leaf, addr, 0);
+ leaf = ip4_fib_mtrie_lookup_step (mtrie, leaf, addr, 1);
+ leaf = ip4_fib_mtrie_lookup_step (mtrie, leaf, addr, 2);
+ leaf = ip4_fib_mtrie_lookup_step (mtrie, leaf, addr, 3);
+
+ /* Handle default route. */
+ leaf = (leaf == IP4_FIB_MTRIE_LEAF_EMPTY ? mtrie->default_leaf : leaf);
+
+ return (ip4_fib_mtrie_leaf_get_adj_index(leaf));
+}
+
+
+#endif
+
diff --git a/src/vnet/fib/ip6_fib.c b/src/vnet/fib/ip6_fib.c
new file mode 100644
index 00000000000..d5b9bdcbd52
--- /dev/null
+++ b/src/vnet/fib/ip6_fib.c
@@ -0,0 +1,784 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/fib/ip6_fib.h>
+#include <vnet/fib/fib_table.h>
+
+static void
+vnet_ip6_fib_init (u32 fib_index)
+{
+ fib_prefix_t pfx = {
+ .fp_proto = FIB_PROTOCOL_IP6,
+ .fp_len = 0,
+ .fp_addr = {
+ .ip6 = {
+ { 0, 0, },
+ },
+ }
+ };
+
+ /*
+ * Add the default route.
+ */
+ fib_table_entry_special_add(fib_index,
+ &pfx,
+ FIB_SOURCE_DEFAULT_ROUTE,
+ FIB_ENTRY_FLAG_DROP,
+ ADJ_INDEX_INVALID);
+
+ /*
+ * Add ff02::1:ff00:0/104 via local route for all tables.
+ * This is required for neighbor discovery to work.
+ */
+ ip6_set_solicited_node_multicast_address(&pfx.fp_addr.ip6, 0);
+ pfx.fp_len = 104;
+ fib_table_entry_special_add(fib_index,
+ &pfx,
+ FIB_SOURCE_SPECIAL,
+ FIB_ENTRY_FLAG_LOCAL,
+ ADJ_INDEX_INVALID);
+
+ /*
+ * Add all-routers multicast address via local route for all tables
+ */
+ ip6_set_reserved_multicast_address (&pfx.fp_addr.ip6,
+ IP6_MULTICAST_SCOPE_link_local,
+ IP6_MULTICAST_GROUP_ID_all_routers);
+ pfx.fp_len = 128;
+ fib_table_entry_special_add(fib_index,
+ &pfx,
+ FIB_SOURCE_SPECIAL,
+ FIB_ENTRY_FLAG_LOCAL,
+ ADJ_INDEX_INVALID);
+
+ /*
+ * Add all-nodes multicast address via local route for all tables
+ */
+ ip6_set_reserved_multicast_address (&pfx.fp_addr.ip6,
+ IP6_MULTICAST_SCOPE_link_local,
+ IP6_MULTICAST_GROUP_ID_all_hosts);
+ pfx.fp_len = 128;
+ fib_table_entry_special_add(fib_index,
+ &pfx,
+ FIB_SOURCE_SPECIAL,
+ FIB_ENTRY_FLAG_LOCAL,
+ ADJ_INDEX_INVALID);
+
+ /*
+ * Add all-mldv2 multicast address via local route for all tables
+ */
+ ip6_set_reserved_multicast_address (&pfx.fp_addr.ip6,
+ IP6_MULTICAST_SCOPE_link_local,
+ IP6_MULTICAST_GROUP_ID_mldv2_routers);
+ pfx.fp_len = 128;
+ fib_table_entry_special_add(fib_index,
+ &pfx,
+ FIB_SOURCE_SPECIAL,
+ FIB_ENTRY_FLAG_LOCAL,
+ ADJ_INDEX_INVALID);
+
+ /*
+ * all link local for us
+ */
+ pfx.fp_addr.ip6.as_u64[0] = clib_host_to_net_u64 (0xFE80000000000000ULL);
+ pfx.fp_addr.ip6.as_u64[1] = 0;
+ pfx.fp_len = 10;
+ fib_table_entry_special_add(fib_index,
+ &pfx,
+ FIB_SOURCE_SPECIAL,
+ FIB_ENTRY_FLAG_LOCAL,
+ ADJ_INDEX_INVALID);
+}
+
+static u32
+create_fib_with_table_id (u32 table_id)
+{
+ fib_table_t *fib_table;
+
+ pool_get_aligned(ip6_main.fibs, fib_table, CLIB_CACHE_LINE_BYTES);
+ memset(fib_table, 0, sizeof(*fib_table));
+
+ fib_table->ft_proto = FIB_PROTOCOL_IP6;
+ fib_table->ft_index =
+ fib_table->v6.index =
+ (fib_table - ip6_main.fibs);
+
+ hash_set(ip6_main.fib_index_by_table_id, table_id, fib_table->ft_index);
+
+ fib_table->ft_table_id =
+ fib_table->v6.table_id =
+ table_id;
+ fib_table->ft_flow_hash_config =
+ fib_table->v6.flow_hash_config =
+ IP_FLOW_HASH_DEFAULT;
+
+ vnet_ip6_fib_init(fib_table->ft_index);
+ fib_table_lock(fib_table->ft_index, FIB_PROTOCOL_IP6);
+
+ return (fib_table->ft_index);
+}
+
+u32
+ip6_fib_table_find_or_create_and_lock (u32 table_id)
+{
+ uword * p;
+
+ p = hash_get (ip6_main.fib_index_by_table_id, table_id);
+ if (NULL == p)
+ return create_fib_with_table_id(table_id);
+
+ fib_table_lock(p[0], FIB_PROTOCOL_IP6);
+
+ return (p[0]);
+}
+
+u32
+ip6_fib_table_create_and_lock (void)
+{
+ return (create_fib_with_table_id(~0));
+}
+
+void
+ip6_fib_table_destroy (u32 fib_index)
+{
+ fib_prefix_t pfx = {
+ .fp_proto = FIB_PROTOCOL_IP6,
+ .fp_len = 0,
+ .fp_addr = {
+ .ip6 = {
+ { 0, 0, },
+ },
+ }
+ };
+
+ /*
+ * the default route.
+ */
+ fib_table_entry_special_remove(fib_index,
+ &pfx,
+ FIB_SOURCE_DEFAULT_ROUTE);
+
+
+ /*
+ * ff02::1:ff00:0/104
+ */
+ ip6_set_solicited_node_multicast_address(&pfx.fp_addr.ip6, 0);
+ pfx.fp_len = 104;
+ fib_table_entry_special_remove(fib_index,
+ &pfx,
+ FIB_SOURCE_SPECIAL);
+
+ /*
+ * all-routers multicast address
+ */
+ ip6_set_reserved_multicast_address (&pfx.fp_addr.ip6,
+ IP6_MULTICAST_SCOPE_link_local,
+ IP6_MULTICAST_GROUP_ID_all_routers);
+ pfx.fp_len = 128;
+ fib_table_entry_special_remove(fib_index,
+ &pfx,
+ FIB_SOURCE_SPECIAL);
+
+ /*
+ * all-nodes multicast address
+ */
+ ip6_set_reserved_multicast_address (&pfx.fp_addr.ip6,
+ IP6_MULTICAST_SCOPE_link_local,
+ IP6_MULTICAST_GROUP_ID_all_hosts);
+ pfx.fp_len = 128;
+ fib_table_entry_special_remove(fib_index,
+ &pfx,
+ FIB_SOURCE_SPECIAL);
+
+ /*
+ * all-mldv2 multicast address
+ */
+ ip6_set_reserved_multicast_address (&pfx.fp_addr.ip6,
+ IP6_MULTICAST_SCOPE_link_local,
+ IP6_MULTICAST_GROUP_ID_mldv2_routers);
+ pfx.fp_len = 128;
+ fib_table_entry_special_remove(fib_index,
+ &pfx,
+ FIB_SOURCE_SPECIAL);
+
+ /*
+ * all link local
+ */
+ pfx.fp_addr.ip6.as_u64[0] = clib_host_to_net_u64 (0xFE80000000000000ULL);
+ pfx.fp_addr.ip6.as_u64[1] = 0;
+ pfx.fp_len = 10;
+ fib_table_entry_special_remove(fib_index,
+ &pfx,
+ FIB_SOURCE_SPECIAL);
+
+ fib_table_t *fib_table = fib_table_get(fib_index, FIB_PROTOCOL_IP6);
+ fib_source_t source;
+
+ /*
+ * validate no more routes.
+ */
+ ASSERT(0 == fib_table->ft_total_route_counts);
+ FOR_EACH_FIB_SOURCE(source)
+ {
+ ASSERT(0 == fib_table->ft_src_route_counts[source]);
+ }
+
+ if (~0 != fib_table->ft_table_id)
+ {
+ hash_unset (ip6_main.fib_index_by_table_id, fib_table->ft_table_id);
+ }
+ pool_put(ip6_main.fibs, fib_table);
+}
+
+fib_node_index_t
+ip6_fib_table_lookup (u32 fib_index,
+ const ip6_address_t *addr,
+ u32 len)
+{
+ const ip6_fib_table_instance_t *table;
+ BVT(clib_bihash_kv) kv, value;
+ int i, n_p, rv;
+ u64 fib;
+
+ table = &ip6_main.ip6_table[IP6_FIB_TABLE_NON_FWDING];
+ n_p = vec_len (table->prefix_lengths_in_search_order);
+
+ kv.key[0] = addr->as_u64[0];
+ kv.key[1] = addr->as_u64[1];
+ fib = ((u64)((fib_index))<<32);
+
+ /*
+ * start search from a mask length same length or shorter.
+ * we don't want matches longer than the mask passed
+ */
+ i = 0;
+ while (i < n_p && table->prefix_lengths_in_search_order[i] > len)
+ {
+ i++;
+ }
+
+ for (; i < n_p; i++)
+ {
+ int dst_address_length = table->prefix_lengths_in_search_order[i];
+ ip6_address_t * mask = &ip6_main.fib_masks[dst_address_length];
+
+ ASSERT(dst_address_length >= 0 && dst_address_length <= 128);
+ //As lengths are decreasing, masks are increasingly specific.
+ kv.key[0] &= mask->as_u64[0];
+ kv.key[1] &= mask->as_u64[1];
+ kv.key[2] = fib | dst_address_length;
+
+ rv = BV(clib_bihash_search_inline_2)(&table->ip6_hash, &kv, &value);
+ if (rv == 0)
+ return value.value;
+ }
+
+ return (FIB_NODE_INDEX_INVALID);
+}
+
+fib_node_index_t
+ip6_fib_table_lookup_exact_match (u32 fib_index,
+ const ip6_address_t *addr,
+ u32 len)
+{
+ const ip6_fib_table_instance_t *table;
+ BVT(clib_bihash_kv) kv, value;
+ ip6_address_t *mask;
+ u64 fib;
+ int rv;
+
+ table = &ip6_main.ip6_table[IP6_FIB_TABLE_NON_FWDING];
+ mask = &ip6_main.fib_masks[len];
+ fib = ((u64)((fib_index))<<32);
+
+ kv.key[0] = addr->as_u64[0] & mask->as_u64[0];
+ kv.key[1] = addr->as_u64[1] & mask->as_u64[1];
+ kv.key[2] = fib | len;
+
+ rv = BV(clib_bihash_search_inline_2)(&table->ip6_hash, &kv, &value);
+ if (rv == 0)
+ return value.value;
+
+ return (FIB_NODE_INDEX_INVALID);
+}
+
+static void
+compute_prefix_lengths_in_search_order (ip6_fib_table_instance_t *table)
+{
+ int i;
+ vec_reset_length (table->prefix_lengths_in_search_order);
+ /* Note: bitmap reversed so this is in fact a longest prefix match */
+ clib_bitmap_foreach (i, table->non_empty_dst_address_length_bitmap,
+ ({
+ int dst_address_length = 128 - i;
+ vec_add1(table->prefix_lengths_in_search_order, dst_address_length);
+ }));
+}
+
+void
+ip6_fib_table_entry_remove (u32 fib_index,
+ const ip6_address_t *addr,
+ u32 len)
+{
+ ip6_fib_table_instance_t *table;
+ BVT(clib_bihash_kv) kv;
+ ip6_address_t *mask;
+ u64 fib;
+
+ table = &ip6_main.ip6_table[IP6_FIB_TABLE_NON_FWDING];
+ mask = &ip6_main.fib_masks[len];
+ fib = ((u64)((fib_index))<<32);
+
+ kv.key[0] = addr->as_u64[0] & mask->as_u64[0];
+ kv.key[1] = addr->as_u64[1] & mask->as_u64[1];
+ kv.key[2] = fib | len;
+
+ BV(clib_bihash_add_del)(&table->ip6_hash, &kv, 0);
+
+ /* refcount accounting */
+ ASSERT (table->dst_address_length_refcounts[len] > 0);
+ if (--table->dst_address_length_refcounts[len] == 0)
+ {
+ table->non_empty_dst_address_length_bitmap =
+ clib_bitmap_set (table->non_empty_dst_address_length_bitmap,
+ 128 - len, 0);
+ compute_prefix_lengths_in_search_order (table);
+ }
+}
+
+void
+ip6_fib_table_entry_insert (u32 fib_index,
+ const ip6_address_t *addr,
+ u32 len,
+ fib_node_index_t fib_entry_index)
+{
+ ip6_fib_table_instance_t *table;
+ BVT(clib_bihash_kv) kv;
+ ip6_address_t *mask;
+ u64 fib;
+
+ table = &ip6_main.ip6_table[IP6_FIB_TABLE_NON_FWDING];
+ mask = &ip6_main.fib_masks[len];
+ fib = ((u64)((fib_index))<<32);
+
+ kv.key[0] = addr->as_u64[0] & mask->as_u64[0];
+ kv.key[1] = addr->as_u64[1] & mask->as_u64[1];
+ kv.key[2] = fib | len;
+ kv.value = fib_entry_index;
+
+ BV(clib_bihash_add_del)(&table->ip6_hash, &kv, 1);
+
+ table->dst_address_length_refcounts[len]++;
+
+ table->non_empty_dst_address_length_bitmap =
+ clib_bitmap_set (table->non_empty_dst_address_length_bitmap,
+ 128 - len, 1);
+ compute_prefix_lengths_in_search_order (table);
+}
+
+u32
+ip6_fib_table_fwding_lookup (ip6_main_t * im,
+ u32 fib_index,
+ const ip6_address_t * dst)
+{
+ const ip6_fib_table_instance_t *table;
+ int i, len;
+ int rv;
+ BVT(clib_bihash_kv) kv, value;
+ u64 fib;
+
+ table = &ip6_main.ip6_table[IP6_FIB_TABLE_FWDING];
+ len = vec_len (table->prefix_lengths_in_search_order);
+
+ kv.key[0] = dst->as_u64[0];
+ kv.key[1] = dst->as_u64[1];
+ fib = ((u64)((fib_index))<<32);
+
+ for (i = 0; i < len; i++)
+ {
+ int dst_address_length = table->prefix_lengths_in_search_order[i];
+ ip6_address_t * mask = &ip6_main.fib_masks[dst_address_length];
+
+ ASSERT(dst_address_length >= 0 && dst_address_length <= 128);
+ //As lengths are decreasing, masks are increasingly specific.
+ kv.key[0] &= mask->as_u64[0];
+ kv.key[1] &= mask->as_u64[1];
+ kv.key[2] = fib | dst_address_length;
+
+ rv = BV(clib_bihash_search_inline_2)(&table->ip6_hash, &kv, &value);
+ if (rv == 0)
+ return value.value;
+ }
+
+ /* default route is always present */
+ ASSERT(0);
+ return 0;
+}
+
+u32 ip6_fib_table_fwding_lookup_with_if_index (ip6_main_t * im,
+ u32 sw_if_index,
+ const ip6_address_t * dst)
+{
+ u32 fib_index = vec_elt (im->fib_index_by_sw_if_index, sw_if_index);
+ return ip6_fib_table_fwding_lookup(im, fib_index, dst);
+}
+
+flow_hash_config_t
+ip6_fib_table_get_flow_hash_config (u32 fib_index)
+{
+ return (ip6_fib_get(fib_index)->flow_hash_config);
+}
+
+u32
+ip6_fib_table_get_index_for_sw_if_index (u32 sw_if_index)
+{
+ if (sw_if_index >= vec_len(ip6_main.fib_index_by_sw_if_index))
+ {
+ /*
+ * This is the case for interfaces that are not yet mapped to
+ * a IP table
+ */
+ return (~0);
+ }
+ return (ip6_main.fib_index_by_sw_if_index[sw_if_index]);
+}
+
+void
+ip6_fib_table_fwding_dpo_update (u32 fib_index,
+ const ip6_address_t *addr,
+ u32 len,
+ const dpo_id_t *dpo)
+{
+ ip6_fib_table_instance_t *table;
+ BVT(clib_bihash_kv) kv;
+ ip6_address_t *mask;
+ u64 fib;
+
+ table = &ip6_main.ip6_table[IP6_FIB_TABLE_FWDING];
+ mask = &ip6_main.fib_masks[len];
+ fib = ((u64)((fib_index))<<32);
+
+ kv.key[0] = addr->as_u64[0] & mask->as_u64[0];
+ kv.key[1] = addr->as_u64[1] & mask->as_u64[1];
+ kv.key[2] = fib | len;
+ kv.value = dpo->dpoi_index;
+
+ BV(clib_bihash_add_del)(&table->ip6_hash, &kv, 1);
+
+ table->dst_address_length_refcounts[len]++;
+
+ table->non_empty_dst_address_length_bitmap =
+ clib_bitmap_set (table->non_empty_dst_address_length_bitmap,
+ 128 - len, 1);
+ compute_prefix_lengths_in_search_order (table);
+}
+
+void
+ip6_fib_table_fwding_dpo_remove (u32 fib_index,
+ const ip6_address_t *addr,
+ u32 len,
+ const dpo_id_t *dpo)
+{
+ ip6_fib_table_instance_t *table;
+ BVT(clib_bihash_kv) kv;
+ ip6_address_t *mask;
+ u64 fib;
+
+ table = &ip6_main.ip6_table[IP6_FIB_TABLE_FWDING];
+ mask = &ip6_main.fib_masks[len];
+ fib = ((u64)((fib_index))<<32);
+
+ kv.key[0] = addr->as_u64[0] & mask->as_u64[0];
+ kv.key[1] = addr->as_u64[1] & mask->as_u64[1];
+ kv.key[2] = fib | len;
+ kv.value = dpo->dpoi_index;
+
+ BV(clib_bihash_add_del)(&table->ip6_hash, &kv, 0);
+
+ /* refcount accounting */
+ ASSERT (table->dst_address_length_refcounts[len] > 0);
+ if (--table->dst_address_length_refcounts[len] == 0)
+ {
+ table->non_empty_dst_address_length_bitmap =
+ clib_bitmap_set (table->non_empty_dst_address_length_bitmap,
+ 128 - len, 0);
+ compute_prefix_lengths_in_search_order (table);
+ }
+}
+
+typedef struct ip6_fib_show_ctx_t_ {
+ u32 fib_index;
+ fib_node_index_t *entries;
+} ip6_fib_show_ctx_t;
+
+static void
+ip6_fib_table_collect_entries (clib_bihash_kv_24_8_t * kvp,
+ void *arg)
+{
+ ip6_fib_show_ctx_t *ctx = arg;
+
+ if ((kvp->key[2] >> 32) == ctx->fib_index)
+ {
+ vec_add1(ctx->entries, kvp->value);
+ }
+}
+
+static void
+ip6_fib_table_show_all (ip6_fib_t *fib,
+ vlib_main_t * vm)
+{
+ fib_node_index_t *fib_entry_index;
+ ip6_fib_show_ctx_t ctx = {
+ .fib_index = fib->index,
+ .entries = NULL,
+ };
+ ip6_main_t *im = &ip6_main;
+
+ BV(clib_bihash_foreach_key_value_pair)(&im->ip6_table[IP6_FIB_TABLE_NON_FWDING].ip6_hash,
+ ip6_fib_table_collect_entries,
+ &ctx);
+
+ vec_sort_with_function(ctx.entries, fib_entry_cmp_for_sort);
+
+ vec_foreach(fib_entry_index, ctx.entries)
+ {
+ vlib_cli_output(vm, "%U",
+ format_fib_entry,
+ *fib_entry_index,
+ FIB_ENTRY_FORMAT_BRIEF);
+ }
+
+ vec_free(ctx.entries);
+}
+
+static void
+ip6_fib_table_show_one (ip6_fib_t *fib,
+ vlib_main_t * vm,
+ ip6_address_t *address,
+ u32 mask_len)
+{
+ vlib_cli_output(vm, "%U",
+ format_fib_entry,
+ ip6_fib_table_lookup(fib->index, address, mask_len),
+ FIB_ENTRY_FORMAT_DETAIL);
+}
+
+typedef struct {
+ u32 fib_index;
+ u64 count_by_prefix_length[129];
+} count_routes_in_fib_at_prefix_length_arg_t;
+
+static void count_routes_in_fib_at_prefix_length
+(BVT(clib_bihash_kv) * kvp, void *arg)
+{
+ count_routes_in_fib_at_prefix_length_arg_t * ap = arg;
+ int mask_width;
+
+ if ((kvp->key[2]>>32) != ap->fib_index)
+ return;
+
+ mask_width = kvp->key[2] & 0xFF;
+
+ ap->count_by_prefix_length[mask_width]++;
+}
+
+static clib_error_t *
+ip6_show_fib (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ count_routes_in_fib_at_prefix_length_arg_t _ca, *ca = &_ca;
+ ip6_main_t * im6 = &ip6_main;
+ fib_table_t *fib_table;
+ ip6_fib_t * fib;
+ int verbose, matching;
+ ip6_address_t matching_address;
+ u32 mask_len = 128;
+ int table_id = -1, fib_index = ~0;
+
+ verbose = 1;
+ matching = 0;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "brief") ||
+ unformat (input, "summary") ||
+ unformat (input, "sum"))
+ verbose = 0;
+
+ else if (unformat (input, "%U/%d",
+ unformat_ip6_address, &matching_address, &mask_len))
+ matching = 1;
+
+ else if (unformat (input, "%U", unformat_ip6_address, &matching_address))
+ matching = 1;
+
+ else if (unformat (input, "table %d", &table_id))
+ ;
+ else if (unformat (input, "index %d", &fib_index))
+ ;
+ else
+ break;
+ }
+
+ pool_foreach (fib_table, im6->fibs,
+ ({
+ fib = &(fib_table->v6);
+ if (table_id >= 0 && table_id != (int)fib->table_id)
+ continue;
+ if (fib_index != ~0 && fib_index != (int)fib->index)
+ continue;
+
+ vlib_cli_output (vm, "%s, fib_index %d, flow hash: %U",
+ fib_table->ft_desc, fib->index,
+ format_ip_flow_hash_config, fib->flow_hash_config);
+
+ /* Show summary? */
+ if (! verbose)
+ {
+ BVT(clib_bihash) * h = &im6->ip6_table[IP6_FIB_TABLE_NON_FWDING].ip6_hash;
+ int len;
+
+ vlib_cli_output (vm, "%=20s%=16s", "Prefix length", "Count");
+
+ memset (ca, 0, sizeof(*ca));
+ ca->fib_index = fib->index;
+
+ BV(clib_bihash_foreach_key_value_pair)
+ (h, count_routes_in_fib_at_prefix_length, ca);
+
+ for (len = 128; len >= 0; len--)
+ {
+ if (ca->count_by_prefix_length[len])
+ vlib_cli_output (vm, "%=20d%=16lld",
+ len, ca->count_by_prefix_length[len]);
+ }
+ continue;
+ }
+
+ if (!matching)
+ {
+ ip6_fib_table_show_all(fib, vm);
+ }
+ else
+ {
+ ip6_fib_table_show_one(fib, vm, &matching_address, mask_len);
+ }
+ }));
+
+ return 0;
+}
+
+/*?
+ * This command displays the IPv6 FIB Tables (VRF Tables) and the route
+ * entries for each table.
+ *
+ * @note This command will run for a long time when the FIB tables are
+ * comprised of millions of entries. For those senarios, consider displaying
+ * in summary mode.
+ *
+ * @cliexpar
+ * @parblock
+ * Example of how to display all the IPv6 FIB tables:
+ * @cliexstart{show ip6 fib}
+ * ipv6-VRF:0, fib_index 0, flow hash: src dst sport dport proto
+ * @::/0
+ * unicast-ip6-chain
+ * [@0]: dpo-load-balance: [index:5 buckets:1 uRPF:5 to:[0:0]]
+ * [0] [@0]: dpo-drop ip6
+ * fe80::/10
+ * unicast-ip6-chain
+ * [@0]: dpo-load-balance: [index:10 buckets:1 uRPF:10 to:[0:0]]
+ * [0] [@2]: dpo-receive
+ * ff02::1/128
+ * unicast-ip6-chain
+ * [@0]: dpo-load-balance: [index:8 buckets:1 uRPF:8 to:[0:0]]
+ * [0] [@2]: dpo-receive
+ * ff02::2/128
+ * unicast-ip6-chain
+ * [@0]: dpo-load-balance: [index:7 buckets:1 uRPF:7 to:[0:0]]
+ * [0] [@2]: dpo-receive
+ * ff02::16/128
+ * unicast-ip6-chain
+ * [@0]: dpo-load-balance: [index:9 buckets:1 uRPF:9 to:[0:0]]
+ * [0] [@2]: dpo-receive
+ * ff02::1:ff00:0/104
+ * unicast-ip6-chain
+ * [@0]: dpo-load-balance: [index:6 buckets:1 uRPF:6 to:[0:0]]
+ * [0] [@2]: dpo-receive
+ * ipv6-VRF:8, fib_index 1, flow hash: src dst sport dport proto
+ * @::/0
+ * unicast-ip6-chain
+ * [@0]: dpo-load-balance: [index:21 buckets:1 uRPF:20 to:[0:0]]
+ * [0] [@0]: dpo-drop ip6
+ * @::a:1:1:0:4/126
+ * unicast-ip6-chain
+ * [@0]: dpo-load-balance: [index:27 buckets:1 uRPF:26 to:[0:0]]
+ * [0] [@4]: ipv6-glean: af_packet0
+ * @::a:1:1:0:7/128
+ * unicast-ip6-chain
+ * [@0]: dpo-load-balance: [index:28 buckets:1 uRPF:27 to:[0:0]]
+ * [0] [@2]: dpo-receive: @::a:1:1:0:7 on af_packet0
+ * fe80::/10
+ * unicast-ip6-chain
+ * [@0]: dpo-load-balance: [index:26 buckets:1 uRPF:25 to:[0:0]]
+ * [0] [@2]: dpo-receive
+ * fe80::fe:3eff:fe3e:9222/128
+ * unicast-ip6-chain
+ * [@0]: dpo-load-balance: [index:29 buckets:1 uRPF:28 to:[0:0]]
+ * [0] [@2]: dpo-receive: fe80::fe:3eff:fe3e:9222 on af_packet0
+ * ff02::1/128
+ * unicast-ip6-chain
+ * [@0]: dpo-load-balance: [index:24 buckets:1 uRPF:23 to:[0:0]]
+ * [0] [@2]: dpo-receive
+ * ff02::2/128
+ * unicast-ip6-chain
+ * [@0]: dpo-load-balance: [index:23 buckets:1 uRPF:22 to:[0:0]]
+ * [0] [@2]: dpo-receive
+ * ff02::16/128
+ * unicast-ip6-chain
+ * [@0]: dpo-load-balance: [index:25 buckets:1 uRPF:24 to:[0:0]]
+ * [0] [@2]: dpo-receive
+ * ff02::1:ff00:0/104
+ * unicast-ip6-chain
+ * [@0]: dpo-load-balance: [index:22 buckets:1 uRPF:21 to:[0:0]]
+ * [0] [@2]: dpo-receive
+ * @cliexend
+ *
+ * Example of how to display a summary of all IPv6 FIB tables:
+ * @cliexstart{show ip6 fib summary}
+ * ipv6-VRF:0, fib_index 0, flow hash: src dst sport dport proto
+ * Prefix length Count
+ * 128 3
+ * 104 1
+ * 10 1
+ * 0 1
+ * ipv6-VRF:8, fib_index 1, flow hash: src dst sport dport proto
+ * Prefix length Count
+ * 128 5
+ * 126 1
+ * 104 1
+ * 10 1
+ * 0 1
+ * @cliexend
+ * @endparblock
+ ?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (ip6_show_fib_command, static) = {
+ .path = "show ip6 fib",
+ .short_help = "show ip6 fib [summary] [table <table-id>] [index <fib-id>] [<ip6-addr>[/<width>]]",
+ .function = ip6_show_fib,
+};
+/* *INDENT-ON* */
diff --git a/src/vnet/fib/ip6_fib.h b/src/vnet/fib/ip6_fib.h
new file mode 100644
index 00000000000..f6af993a3c2
--- /dev/null
+++ b/src/vnet/fib/ip6_fib.h
@@ -0,0 +1,130 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __IP6_FIB_H__
+#define __IP6_FIB_H__
+
+#include <vlib/vlib.h>
+#include <vnet/ip/format.h>
+#include <vnet/fib/fib_entry.h>
+#include <vnet/fib/fib_table.h>
+#include <vnet/ip/lookup.h>
+#include <vnet/dpo/load_balance.h>
+
+extern fib_node_index_t ip6_fib_table_lookup(u32 fib_index,
+ const ip6_address_t *addr,
+ u32 len);
+extern fib_node_index_t ip6_fib_table_lookup_exact_match(u32 fib_index,
+ const ip6_address_t *addr,
+ u32 len);
+
+extern void ip6_fib_table_entry_remove(u32 fib_index,
+ const ip6_address_t *addr,
+ u32 len);
+
+extern void ip6_fib_table_entry_insert(u32 fib_index,
+ const ip6_address_t *addr,
+ u32 len,
+ fib_node_index_t fib_entry_index);
+extern void ip6_fib_table_destroy(u32 fib_index);
+
+extern void ip6_fib_table_fwding_dpo_update(u32 fib_index,
+ const ip6_address_t *addr,
+ u32 len,
+ const dpo_id_t *dpo);
+
+extern void ip6_fib_table_fwding_dpo_remove(u32 fib_index,
+ const ip6_address_t *addr,
+ u32 len,
+ const dpo_id_t *dpo);
+
+u32 ip6_fib_table_fwding_lookup_with_if_index(ip6_main_t * im,
+ u32 sw_if_index,
+ const ip6_address_t * dst);
+u32 ip6_fib_table_fwding_lookup(ip6_main_t * im,
+ u32 fib_index,
+ const ip6_address_t * dst);
+
+/**
+ * @biref return the DPO that the LB stacks on.
+ */
+always_inline u32
+ip6_src_lookup_for_packet (ip6_main_t * im,
+ vlib_buffer_t * b,
+ ip6_header_t * i)
+{
+ if (vnet_buffer (b)->ip.adj_index[VLIB_RX] == ~0)
+ {
+ const dpo_id_t *dpo;
+ index_t lbi;
+
+ lbi = ip6_fib_table_fwding_lookup_with_if_index(
+ im,
+ vnet_buffer (b)->sw_if_index[VLIB_RX],
+ &i->src_address);
+
+ dpo = load_balance_get_bucket_i(load_balance_get(lbi), 0);
+
+ if (dpo_is_adj(dpo))
+ {
+ vnet_buffer (b)->ip.adj_index[VLIB_RX] = dpo->dpoi_index;
+ }
+ }
+ return vnet_buffer (b)->ip.adj_index[VLIB_RX];
+}
+
+/**
+ * \brief Get or create an IPv6 fib.
+ *
+ * Get or create an IPv4 fib with the provided table ID.
+ *
+ * \param im
+ * ip4_main pointer.
+ * \param table_id
+ * When set to \c ~0, an arbitrary and unused fib ID is picked
+ * and can be retrieved with \c ret->table_id.
+ * Otherwise, the fib ID to be used to retrieve or create the desired fib.
+ * \returns A pointer to the retrieved or created fib.
+ *
+ */
+extern u32 ip6_fib_table_find_or_create_and_lock(u32 table_id);
+extern u32 ip6_fib_table_create_and_lock(void);
+
+static inline ip6_fib_t *
+ip6_fib_get (fib_node_index_t index)
+{
+ ASSERT(!pool_is_free_index(ip6_main.fibs, index));
+ return (&pool_elt_at_index (ip6_main.fibs, index)->v6);
+}
+
+static inline
+u32 ip6_fib_index_from_table_id (u32 table_id)
+{
+ ip6_main_t * im = &ip6_main;
+ uword * p;
+
+ p = hash_get (im->fib_index_by_table_id, table_id);
+ if (!p)
+ return ~0;
+
+ return p[0];
+}
+
+extern u32 ip6_fib_table_get_index_for_sw_if_index(u32 sw_if_index);
+
+extern flow_hash_config_t ip6_fib_table_get_flow_hash_config(u32 fib_index);
+
+#endif
+
diff --git a/src/vnet/fib/mpls_fib.c b/src/vnet/fib/mpls_fib.c
new file mode 100644
index 00000000000..6a9b1ac2989
--- /dev/null
+++ b/src/vnet/fib/mpls_fib.c
@@ -0,0 +1,439 @@
+/*
+ * mpls_fib.h: The Label/MPLS FIB
+ *
+ * Copyright (c) 2012 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * An MPLS_FIB table;
+ *
+ * The entries in the table are programmed wtih one or more MOIs. These MOIs
+ * may result in different forwarding actions for end-of-stack (EOS) and non-EOS
+ * packets. Whether the two actions are the same more often than they are
+ * different, or vice versa, is a function of the deployment in which the router
+ * is used and thus not predictable.
+ * The desgin choice to make with an MPLS_FIB table is:
+ * 1 - 20 bit key: label only.
+ * When the EOS and non-EOS actions differ the result is a 'EOS-choice' object.
+ * 2 - 21 bit key: label and EOS-bit.
+ * The result is then the specific action based on EOS-bit.
+ *
+ * 20 bit key:
+ * Advantages:
+ * - lower memory overhead, since there are few DB entries.
+ * Disadvantages:
+ * - slower DP performance in the case the chains differ, as more objects are
+ * encounterd in the switch path
+ *
+ * 21 bit key:
+ * Advantages:
+ * - faster DP performance
+ * Disadvantages
+ * - increased memory footprint.
+ *
+ * Switching between schemes based on observed/measured action similarity is not
+ * considered on the grounds of complexity and flip-flopping.
+ *
+ * VPP mantra - favour performance over memory. We choose a 21 bit key.
+ */
+
+#include <vnet/fib/fib_table.h>
+#include <vnet/dpo/load_balance.h>
+#include <vnet/dpo/drop_dpo.h>
+#include <vnet/dpo/punt_dpo.h>
+#include <vnet/dpo/lookup_dpo.h>
+#include <vnet/mpls/mpls.h>
+
+/**
+ * All lookups in an MPLS_FIB table must result in a DPO of type load-balance.
+ * This is the default result which links to drop
+ */
+static index_t mpls_fib_drop_dpo_index = INDEX_INVALID;
+
+/**
+ * FIXME
+ */
+#define MPLS_FLOW_HASH_DEFAULT 0
+
+static inline u32
+mpls_fib_entry_mk_key (mpls_label_t label,
+ mpls_eos_bit_t eos)
+{
+ ASSERT(eos <= 1);
+ return (label << 1 | eos);
+}
+
+u32
+mpls_fib_index_from_table_id (u32 table_id)
+{
+ mpls_main_t *mm = &mpls_main;
+ uword * p;
+
+ p = hash_get (mm->fib_index_by_table_id, table_id);
+ if (!p)
+ return FIB_NODE_INDEX_INVALID;
+
+ return p[0];
+}
+
+static u32
+mpls_fib_create_with_table_id (u32 table_id)
+{
+ dpo_id_t dpo = DPO_INVALID;
+ fib_table_t *fib_table;
+ mpls_eos_bit_t eos;
+ mpls_fib_t *mf;
+ int i;
+
+ pool_get_aligned(mpls_main.fibs, fib_table, CLIB_CACHE_LINE_BYTES);
+ memset(fib_table, 0, sizeof(*fib_table));
+
+ fib_table->ft_proto = FIB_PROTOCOL_MPLS;
+ fib_table->ft_index =
+ (fib_table - mpls_main.fibs);
+
+ hash_set (mpls_main.fib_index_by_table_id, table_id, fib_table->ft_index);
+
+ fib_table->ft_table_id =
+ table_id;
+ fib_table->ft_flow_hash_config =
+ MPLS_FLOW_HASH_DEFAULT;
+ fib_table->v4.fwd_classify_table_index = ~0;
+ fib_table->v4.rev_classify_table_index = ~0;
+
+ fib_table_lock(fib_table->ft_index, FIB_PROTOCOL_MPLS);
+
+ if (INDEX_INVALID == mpls_fib_drop_dpo_index)
+ {
+ mpls_fib_drop_dpo_index = load_balance_create(1, DPO_PROTO_MPLS, 0);
+ load_balance_set_bucket(mpls_fib_drop_dpo_index,
+ 0,
+ drop_dpo_get(DPO_PROTO_MPLS));
+ }
+
+ mf = &fib_table->mpls;
+ mf->mf_entries = hash_create(0, sizeof(fib_node_index_t));
+ for (i = 0; i < MPLS_FIB_DB_SIZE; i++)
+ {
+ /*
+ * initialise each DPO in the data-path lookup table
+ * to be the special MPLS drop
+ */
+ mf->mf_lbs[i] = mpls_fib_drop_dpo_index;
+ }
+
+ /*
+ * non-default forwarding for the special labels.
+ */
+ fib_prefix_t prefix = {
+ .fp_proto = FIB_PROTOCOL_MPLS,
+ .fp_payload_proto = DPO_PROTO_MPLS,
+ };
+
+ /*
+ * PUNT the router alert, both EOS and non-eos
+ */
+ prefix.fp_label = MPLS_IETF_ROUTER_ALERT_LABEL;
+ FOR_EACH_MPLS_EOS_BIT(eos)
+ {
+ prefix.fp_eos = eos;
+ fib_table_entry_special_dpo_add(fib_table->ft_index,
+ &prefix,
+ FIB_SOURCE_SPECIAL,
+ FIB_ENTRY_FLAG_EXCLUSIVE,
+ punt_dpo_get(DPO_PROTO_MPLS));
+ }
+
+ /*
+ * IPv4 explicit NULL EOS lookup in the interface's IPv4 table
+ */
+ prefix.fp_label = MPLS_IETF_IPV4_EXPLICIT_NULL_LABEL;
+ prefix.fp_payload_proto = DPO_PROTO_IP4;
+ prefix.fp_eos = MPLS_EOS;
+
+ lookup_dpo_add_or_lock_w_fib_index(0, // unused
+ DPO_PROTO_IP4,
+ LOOKUP_INPUT_DST_ADDR,
+ LOOKUP_TABLE_FROM_INPUT_INTERFACE,
+ &dpo);
+ fib_table_entry_special_dpo_add(fib_table->ft_index,
+ &prefix,
+ FIB_SOURCE_SPECIAL,
+ FIB_ENTRY_FLAG_EXCLUSIVE,
+ &dpo);
+
+ prefix.fp_payload_proto = DPO_PROTO_MPLS;
+ prefix.fp_eos = MPLS_NON_EOS;
+
+ lookup_dpo_add_or_lock_w_fib_index(0, //unsued
+ DPO_PROTO_MPLS,
+ LOOKUP_INPUT_DST_ADDR,
+ LOOKUP_TABLE_FROM_INPUT_INTERFACE,
+ &dpo);
+ fib_table_entry_special_dpo_add(fib_table->ft_index,
+ &prefix,
+ FIB_SOURCE_SPECIAL,
+ FIB_ENTRY_FLAG_EXCLUSIVE,
+ &dpo);
+
+ /*
+ * IPv6 explicit NULL EOS lookup in the interface's IPv6 table
+ */
+ prefix.fp_label = MPLS_IETF_IPV6_EXPLICIT_NULL_LABEL;
+ prefix.fp_payload_proto = DPO_PROTO_IP6;
+ prefix.fp_eos = MPLS_EOS;
+
+ lookup_dpo_add_or_lock_w_fib_index(0, //unused
+ DPO_PROTO_IP6,
+ LOOKUP_INPUT_DST_ADDR,
+ LOOKUP_TABLE_FROM_INPUT_INTERFACE,
+ &dpo);
+ fib_table_entry_special_dpo_add(fib_table->ft_index,
+ &prefix,
+ FIB_SOURCE_SPECIAL,
+ FIB_ENTRY_FLAG_EXCLUSIVE,
+ &dpo);
+
+ prefix.fp_payload_proto = DPO_PROTO_MPLS;
+ prefix.fp_eos = MPLS_NON_EOS;
+ lookup_dpo_add_or_lock_w_fib_index(0, // unsued
+ DPO_PROTO_MPLS,
+ LOOKUP_INPUT_DST_ADDR,
+ LOOKUP_TABLE_FROM_INPUT_INTERFACE,
+ &dpo);
+ fib_table_entry_special_dpo_add(fib_table->ft_index,
+ &prefix,
+ FIB_SOURCE_SPECIAL,
+ FIB_ENTRY_FLAG_EXCLUSIVE,
+ &dpo);
+
+ return (fib_table->ft_index);
+}
+
+u32
+mpls_fib_table_find_or_create_and_lock (u32 table_id)
+{
+ u32 index;
+
+ index = mpls_fib_index_from_table_id(table_id);
+ if (~0 == index)
+ return mpls_fib_create_with_table_id(table_id);
+
+ fib_table_lock(index, FIB_PROTOCOL_MPLS);
+
+ return (index);
+}
+u32
+mpls_fib_table_create_and_lock (void)
+{
+ return (mpls_fib_create_with_table_id(~0));
+}
+
+void
+mpls_fib_table_destroy (mpls_fib_t *mf)
+{
+ fib_table_t *fib_table = (fib_table_t*)mf;
+ fib_prefix_t prefix = {
+ .fp_proto = FIB_PROTOCOL_MPLS,
+ };
+ mpls_label_t special_labels[] = {
+ MPLS_IETF_ROUTER_ALERT_LABEL,
+ MPLS_IETF_IPV6_EXPLICIT_NULL_LABEL,
+ MPLS_IETF_IPV4_EXPLICIT_NULL_LABEL,
+ };
+ mpls_eos_bit_t eos;
+ u32 ii;
+
+ for (ii = 0; ii < ARRAY_LEN(special_labels); ii++)
+ {
+ FOR_EACH_MPLS_EOS_BIT(eos)
+ {
+ prefix.fp_label = special_labels[ii];
+ prefix.fp_eos = eos;
+
+ fib_table_entry_delete(fib_table->ft_index,
+ &prefix,
+ FIB_SOURCE_SPECIAL);
+ }
+ }
+ if (~0 != fib_table->ft_table_id)
+ {
+ hash_unset(mpls_main.fib_index_by_table_id,
+ fib_table->ft_table_id);
+ }
+ hash_delete(mf->mf_entries);
+
+ pool_put(mpls_main.fibs, fib_table);
+}
+
+fib_node_index_t
+mpls_fib_table_lookup (const mpls_fib_t *mf,
+ mpls_label_t label,
+ mpls_eos_bit_t eos)
+{
+ uword *p;
+
+ p = hash_get(mf->mf_entries, mpls_fib_entry_mk_key(label, eos));
+
+ if (NULL == p)
+ return FIB_NODE_INDEX_INVALID;
+
+ return p[0];
+}
+
+void
+mpls_fib_table_entry_insert (mpls_fib_t *mf,
+ mpls_label_t label,
+ mpls_eos_bit_t eos,
+ fib_node_index_t lfei)
+{
+ hash_set(mf->mf_entries, mpls_fib_entry_mk_key(label, eos), lfei);
+}
+
+void
+mpls_fib_table_entry_remove (mpls_fib_t *mf,
+ mpls_label_t label,
+ mpls_eos_bit_t eos)
+{
+ hash_unset(mf->mf_entries, mpls_fib_entry_mk_key(label, eos));
+}
+
+void
+mpls_fib_forwarding_table_update (mpls_fib_t *mf,
+ mpls_label_t label,
+ mpls_eos_bit_t eos,
+ const dpo_id_t *dpo)
+{
+ mpls_label_t key;
+
+ ASSERT(DPO_LOAD_BALANCE == dpo->dpoi_type);
+
+ key = mpls_fib_entry_mk_key(label, eos);
+
+ mf->mf_lbs[key] = dpo->dpoi_index;
+}
+
+void
+mpls_fib_forwarding_table_reset (mpls_fib_t *mf,
+ mpls_label_t label,
+ mpls_eos_bit_t eos)
+{
+ mpls_label_t key;
+
+ key = mpls_fib_entry_mk_key(label, eos);
+
+ mf->mf_lbs[key] = mpls_fib_drop_dpo_index;
+}
+
+flow_hash_config_t
+mpls_fib_table_get_flow_hash_config (u32 fib_index)
+{
+ // FIXME.
+ return (0);
+}
+
+static void
+mpls_fib_table_show_all (const mpls_fib_t *mpls_fib,
+ vlib_main_t * vm)
+{
+ fib_node_index_t lfei, *lfeip, *lfeis = NULL;
+ mpls_label_t key;
+
+ hash_foreach(key, lfei, mpls_fib->mf_entries,
+ ({
+ vec_add1(lfeis, lfei);
+ }));
+
+ vec_sort_with_function(lfeis, fib_entry_cmp_for_sort);
+
+ vec_foreach(lfeip, lfeis)
+ {
+ vlib_cli_output (vm, "%U",
+ format_fib_entry, *lfeip,
+ FIB_ENTRY_FORMAT_DETAIL);
+ }
+ vec_free(lfeis);
+}
+
+static void
+mpls_fib_table_show_one (const mpls_fib_t *mpls_fib,
+ mpls_label_t label,
+ vlib_main_t * vm)
+{
+ fib_node_index_t lfei;
+ mpls_eos_bit_t eos;
+
+ FOR_EACH_MPLS_EOS_BIT(eos)
+ {
+ lfei = mpls_fib_table_lookup(mpls_fib, label, eos);
+
+ if (FIB_NODE_INDEX_INVALID != lfei)
+ {
+ vlib_cli_output (vm, "%U",
+ format_fib_entry, lfei, FIB_ENTRY_FORMAT_DETAIL);
+ }
+ }
+}
+
+static clib_error_t *
+mpls_fib_show (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ fib_table_t * fib_table;
+ mpls_label_t label;
+ int table_id;
+
+ table_id = -1;
+ label = MPLS_LABEL_INVALID;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ /* if (unformat (input, "brief") || unformat (input, "summary") */
+ /* || unformat (input, "sum")) */
+ /* verbose = 0; */
+
+ if (unformat (input, "%d", &label))
+ continue;
+ else if (unformat (input, "table %d", &table_id))
+ ;
+ else
+ break;
+ }
+
+ pool_foreach (fib_table, mpls_main.fibs,
+ ({
+ if (table_id >= 0 && table_id != fib_table->ft_table_id)
+ continue;
+
+ vlib_cli_output (vm, "%v, fib_index %d",
+ fib_table->ft_desc, mpls_main.fibs - fib_table);
+
+ if (MPLS_LABEL_INVALID == label)
+ {
+ mpls_fib_table_show_all(&(fib_table->mpls), vm);
+ }
+ else
+ {
+ mpls_fib_table_show_one(&(fib_table->mpls), label, vm);
+ }
+ }));
+
+ return 0;
+}
+
+VLIB_CLI_COMMAND (mpls_fib_show_command, static) = {
+ .path = "show mpls fib",
+ .short_help = "show mpls fib [summary] [table <n>]",
+ .function = mpls_fib_show,
+};
diff --git a/src/vnet/fib/mpls_fib.h b/src/vnet/fib/mpls_fib.h
new file mode 100644
index 00000000000..93ae4623016
--- /dev/null
+++ b/src/vnet/fib/mpls_fib.h
@@ -0,0 +1,106 @@
+/*
+ * mpls_fib.h: The Label/MPLS FIB
+ *
+ * Copyright (c) 2012 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __MPLS_FIB_TABLE_H__
+#define __MPLS_FIB_TABLE_H__
+
+#include <vnet/vnet.h>
+#include <vnet/mpls/mpls.h>
+#include <vnet/fib/fib_types.h>
+#include <vnet/dpo/dpo.h>
+#include <vnet/mpls/mpls.h>
+#include <vnet/fib/fib_table.h>
+
+static inline mpls_fib_t*
+mpls_fib_get (fib_node_index_t index)
+{
+ if (!pool_is_free_index(mpls_main.fibs, index))
+ return (&(pool_elt_at_index(mpls_main.fibs, index)->mpls));
+ return (NULL);
+}
+
+extern u32 mpls_fib_table_find_or_create_and_lock(u32 table_id);
+extern u32 mpls_fib_table_create_and_lock(void);
+// extern mpls_fib_t * mpls_fib_find(u32 table_id);
+extern u32 mpls_fib_index_from_table_id(u32 table_id);
+
+extern u8 *format_mpls_fib_table_name(u8 * s, va_list * args);
+
+extern fib_node_index_t mpls_fib_table_entry_add_from_ip_fib_entry (
+ u32 table_id,
+ mpls_label_t label,
+ mpls_eos_bit_t eos,
+ fib_node_index_t fib_entry_index);
+
+
+extern fib_node_index_t mpls_fib_table_lookup(const mpls_fib_t *mf,
+ mpls_label_t label,
+ mpls_eos_bit_t eos);
+
+extern void mpls_fib_table_entry_remove(mpls_fib_t *mf,
+ mpls_label_t label,
+ mpls_eos_bit_t eos);
+extern void mpls_fib_table_entry_insert(mpls_fib_t *mf,
+ mpls_label_t label,
+ mpls_eos_bit_t eos,
+ fib_node_index_t fei);
+extern void mpls_fib_table_destroy(mpls_fib_t *mf);
+
+
+
+extern void mpls_fib_forwarding_table_update(mpls_fib_t *mf,
+ mpls_label_t label,
+ mpls_eos_bit_t eos,
+ const dpo_id_t *dpo);
+extern void mpls_fib_forwarding_table_reset(mpls_fib_t *mf,
+ mpls_label_t label,
+ mpls_eos_bit_t eos);
+
+/**
+ * @brief
+ * Lookup a label and EOS bit in the MPLS_FIB table to retrieve the
+ * load-balance index to be used for packet forwarding.
+ */
+static inline index_t
+mpls_fib_table_forwarding_lookup (u32 mpls_fib_index,
+ const mpls_unicast_header_t *hdr)
+{
+ mpls_label_t label;
+ mpls_fib_t *mf;
+ u32 key;
+
+ label = clib_net_to_host_u32(hdr->label_exp_s_ttl);
+ key = (vnet_mpls_uc_get_label(label) << 1) | vnet_mpls_uc_get_s(label);
+
+ mf = mpls_fib_get(mpls_fib_index);
+
+ return (mf->mf_lbs[key]);
+}
+
+static inline u32
+mpls_fib_table_get_index_for_sw_if_index (u32 sw_if_index)
+{
+ mpls_main_t *mm = &mpls_main;
+
+ ASSERT(vec_len(mm->fib_index_by_sw_if_index) > sw_if_index);
+
+ return (mm->fib_index_by_sw_if_index[sw_if_index]);
+}
+
+extern flow_hash_config_t mpls_fib_table_get_flow_hash_config(u32 fib_index);
+
+#endif
diff --git a/src/vnet/flow/flow_report.c b/src/vnet/flow/flow_report.c
new file mode 100644
index 00000000000..c78a78a9680
--- /dev/null
+++ b/src/vnet/flow/flow_report.c
@@ -0,0 +1,502 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * flow_report.c
+ */
+#include <vnet/flow/flow_report.h>
+#include <vnet/api_errno.h>
+
+flow_report_main_t flow_report_main;
+
+static_always_inline u8 stream_index_valid (u32 index)
+{
+ flow_report_main_t * frm = &flow_report_main;
+ return index < vec_len(frm->streams) &&
+ frm->streams[index].domain_id != ~0;
+}
+
+static_always_inline flow_report_stream_t * add_stream (void)
+{
+ flow_report_main_t * frm = &flow_report_main;
+ u32 i;
+ for (i = 0; i < vec_len(frm->streams); i++)
+ if (!stream_index_valid(i))
+ return &frm->streams[i];
+ u32 index = vec_len(frm->streams);
+ vec_validate(frm->streams, index);
+ return &frm->streams[index];
+}
+
+static_always_inline void delete_stream (u32 index)
+{
+ flow_report_main_t * frm = &flow_report_main;
+ ASSERT (index < vec_len(frm->streams));
+ ASSERT (frm->streams[index].domain_id != ~0);
+ frm->streams[index].domain_id = ~0;
+}
+
+static i32 find_stream (u32 domain_id, u16 src_port)
+{
+ flow_report_main_t * frm = &flow_report_main;
+ flow_report_stream_t * stream;
+ u32 i;
+ for (i = 0; i < vec_len(frm->streams); i++)
+ if (stream_index_valid(i)) {
+ stream = &frm->streams[i];
+ if (domain_id == stream->domain_id) {
+ if (src_port != stream->src_port)
+ return -2;
+ return i;
+ } else if (src_port == stream->src_port) {
+ return -2;
+ }
+ }
+ return -1;
+}
+
+int send_template_packet (flow_report_main_t *frm,
+ flow_report_t *fr,
+ u32 * buffer_indexp)
+{
+ u32 bi0;
+ vlib_buffer_t * b0;
+ ip4_ipfix_template_packet_t * tp;
+ ipfix_message_header_t * h;
+ ip4_header_t * ip;
+ udp_header_t * udp;
+ vlib_main_t * vm = frm->vlib_main;
+ flow_report_stream_t * stream;
+ vlib_buffer_free_list_t *fl;
+
+ ASSERT (buffer_indexp);
+
+ if (fr->update_rewrite || fr->rewrite == 0)
+ {
+ if (frm->ipfix_collector.as_u32 == 0
+ || frm->src_address.as_u32 == 0)
+ {
+ clib_warning ("no collector: disabling flow collector process");
+ vlib_node_set_state (frm->vlib_main, flow_report_process_node.index,
+ VLIB_NODE_STATE_DISABLED);
+ return -1;
+ }
+ vec_free (fr->rewrite);
+ fr->update_rewrite = 1;
+ }
+
+ if (fr->update_rewrite)
+ {
+ fr->rewrite = fr->rewrite_callback (frm, fr,
+ &frm->ipfix_collector,
+ &frm->src_address,
+ frm->collector_port);
+ fr->update_rewrite = 0;
+ }
+
+ if (vlib_buffer_alloc (vm, &bi0, 1) != 1)
+ return -1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+
+ /* Initialize the buffer */
+ fl = vlib_buffer_get_free_list (vm, VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX);
+ vlib_buffer_init_for_free_list (b0, fl);
+ VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b0);
+
+ ASSERT (vec_len (fr->rewrite) < VLIB_BUFFER_DEFAULT_FREE_LIST_BYTES);
+
+ clib_memcpy (b0->data, fr->rewrite, vec_len (fr->rewrite));
+ b0->current_data = 0;
+ b0->current_length = vec_len (fr->rewrite);
+ b0->flags |= (VLIB_BUFFER_TOTAL_LENGTH_VALID | VLIB_BUFFER_FLOW_REPORT);
+ vnet_buffer (b0)->sw_if_index[VLIB_RX] = 0;
+ vnet_buffer (b0)->sw_if_index[VLIB_TX] = frm->fib_index;
+
+ tp = vlib_buffer_get_current (b0);
+ ip = (ip4_header_t *) &tp->ip4;
+ udp = (udp_header_t *) (ip+1);
+ h = (ipfix_message_header_t *)(udp+1);
+
+ /* FIXUP: message header export_time */
+ h->export_time = (u32)
+ (((f64)frm->unix_time_0) +
+ (vlib_time_now(frm->vlib_main) - frm->vlib_time_0));
+ h->export_time = clib_host_to_net_u32(h->export_time);
+
+ stream = &frm->streams[fr->stream_index];
+
+ /* FIXUP: message header sequence_number. Templates do not increase it */
+ h->sequence_number = clib_host_to_net_u32(stream->sequence_number);
+
+ /* FIXUP: udp length */
+ udp->length = clib_host_to_net_u16 (b0->current_length - sizeof (*ip));
+
+ if (frm->udp_checksum)
+ {
+ /* RFC 7011 section 10.3.2. */
+ udp->checksum = ip4_tcp_udp_compute_checksum (vm, b0, ip);
+ if (udp->checksum == 0)
+ udp->checksum = 0xffff;
+ }
+
+ *buffer_indexp = bi0;
+
+ fr->last_template_sent = vlib_time_now (vm);
+
+ return 0;
+}
+
+static uword
+flow_report_process (vlib_main_t * vm,
+ vlib_node_runtime_t * rt,
+ vlib_frame_t * f)
+{
+ flow_report_main_t * frm = &flow_report_main;
+ flow_report_t * fr;
+ u32 ip4_lookup_node_index;
+ vlib_node_t * ip4_lookup_node;
+ vlib_frame_t * nf = 0;
+ u32 template_bi;
+ u32 * to_next;
+ int send_template;
+ f64 now;
+ int rv;
+ uword event_type;
+ uword *event_data = 0;
+
+ /* Wait for Godot... */
+ vlib_process_wait_for_event_or_clock (vm, 1e9);
+ event_type = vlib_process_get_events (vm, &event_data);
+ if (event_type != 1)
+ clib_warning ("bogus kickoff event received, %d", event_type);
+ vec_reset_length (event_data);
+
+ /* Enqueue pkts to ip4-lookup */
+ ip4_lookup_node = vlib_get_node_by_name (vm, (u8 *) "ip4-lookup");
+ ip4_lookup_node_index = ip4_lookup_node->index;
+
+ while (1)
+ {
+ vlib_process_wait_for_event_or_clock (vm, 5.0);
+ event_type = vlib_process_get_events (vm, &event_data);
+ vec_reset_length (event_data);
+
+ vec_foreach (fr, frm->reports)
+ {
+ now = vlib_time_now (vm);
+
+ /* Need to send a template packet? */
+ send_template =
+ now > (fr->last_template_sent + frm->template_interval);
+ send_template += fr->last_template_sent == 0;
+ template_bi = ~0;
+ rv = 0;
+
+ if (send_template)
+ rv = send_template_packet (frm, fr, &template_bi);
+
+ if (rv < 0)
+ continue;
+
+ nf = vlib_get_frame_to_node (vm, ip4_lookup_node_index);
+ nf->n_vectors = 0;
+ to_next = vlib_frame_vector_args (nf);
+
+ if (template_bi != ~0)
+ {
+ to_next[0] = template_bi;
+ to_next++;
+ nf->n_vectors++;
+ }
+
+ nf = fr->flow_data_callback (frm, fr,
+ nf, to_next, ip4_lookup_node_index);
+ if (nf)
+ vlib_put_frame_to_node (vm, ip4_lookup_node_index, nf);
+ }
+ }
+
+ return 0; /* not so much */
+}
+
+VLIB_REGISTER_NODE (flow_report_process_node) = {
+ .function = flow_report_process,
+ .type = VLIB_NODE_TYPE_PROCESS,
+ .name = "flow-report-process",
+};
+
+int vnet_flow_report_add_del (flow_report_main_t *frm,
+ vnet_flow_report_add_del_args_t *a)
+{
+ int i;
+ int found_index = ~0;
+ flow_report_t *fr;
+ flow_report_stream_t * stream;
+ u32 si;
+
+ si = find_stream(a->domain_id, a->src_port);
+ if (si == -2)
+ return VNET_API_ERROR_INVALID_VALUE;
+ if (si == -1 && a->is_add == 0)
+ return VNET_API_ERROR_NO_SUCH_ENTRY;
+
+ for (i = 0; i < vec_len(frm->reports); i++)
+ {
+ fr = vec_elt_at_index (frm->reports, i);
+ if (fr->opaque.as_uword == a->opaque.as_uword
+ && fr->rewrite_callback == a->rewrite_callback
+ && fr->flow_data_callback == a->flow_data_callback)
+ {
+ found_index = i;
+ break;
+ }
+ }
+
+ if (a->is_add == 0)
+ {
+ if (found_index != ~0)
+ {
+ vec_delete (frm->reports, 1, found_index);
+ stream = &frm->streams[si];
+ stream->n_reports--;
+ if (stream->n_reports == 0)
+ delete_stream(si);
+ return 0;
+ }
+ return VNET_API_ERROR_NO_SUCH_ENTRY;
+ }
+
+ if (found_index != ~0)
+ return VNET_API_ERROR_VALUE_EXIST;
+
+ if (si == -1)
+ {
+ stream = add_stream();
+ stream->domain_id = a->domain_id;
+ stream->src_port = a->src_port;
+ stream->sequence_number = 0;
+ stream->n_reports = 0;
+ si = stream - frm->streams;
+ }
+ else
+ stream = &frm->streams[si];
+
+ stream->n_reports++;
+
+ vec_add2 (frm->reports, fr, 1);
+
+ fr->stream_index = si;
+ fr->template_id = 256 + stream->next_template_no;
+ stream->next_template_no = (stream->next_template_no + 1) % (65536 - 256);
+ fr->update_rewrite = 1;
+ fr->opaque = a->opaque;
+ fr->rewrite_callback = a->rewrite_callback;
+ fr->flow_data_callback = a->flow_data_callback;
+
+ return 0;
+}
+
+clib_error_t * flow_report_add_del_error_to_clib_error (int error)
+{
+ switch (error)
+ {
+ case 0:
+ return 0;
+ case VNET_API_ERROR_NO_SUCH_ENTRY:
+ return clib_error_return (0, "Flow report not found");
+ case VNET_API_ERROR_VALUE_EXIST:
+ return clib_error_return (0, "Flow report already exists");
+ case VNET_API_ERROR_INVALID_VALUE:
+ return clib_error_return (0, "Expecting either still unused values "
+ "for both domain_id and src_port "
+ "or already used values for both fields");
+ default:
+ return clib_error_return (0, "vnet_flow_report_add_del returned %d",
+ error);
+ }
+}
+
+void vnet_flow_reports_reset (flow_report_main_t * frm)
+{
+ flow_report_t *fr;
+ u32 i;
+
+ for (i = 0; i < vec_len(frm->streams); i++)
+ if (stream_index_valid(i))
+ frm->streams[i].sequence_number = 0;
+
+ vec_foreach (fr, frm->reports)
+ {
+ fr->update_rewrite = 1;
+ fr->last_template_sent = 0;
+ }
+}
+
+void vnet_stream_reset (flow_report_main_t * frm, u32 stream_index)
+{
+ flow_report_t *fr;
+
+ frm->streams[stream_index].sequence_number = 0;
+
+ vec_foreach (fr, frm->reports)
+ if (frm->reports->stream_index == stream_index) {
+ fr->update_rewrite = 1;
+ fr->last_template_sent = 0;
+ }
+}
+
+int vnet_stream_change (flow_report_main_t * frm,
+ u32 old_domain_id, u16 old_src_port,
+ u32 new_domain_id, u16 new_src_port)
+{
+ i32 stream_index = find_stream (old_domain_id, old_src_port);
+ if (stream_index < 0)
+ return 1;
+ flow_report_stream_t * stream = &frm->streams[stream_index];
+ stream->domain_id = new_domain_id;
+ stream->src_port = new_src_port;
+ if (old_domain_id != new_domain_id || old_src_port != new_src_port)
+ vnet_stream_reset (frm, stream_index);
+ return 0;
+}
+
+static clib_error_t *
+set_ipfix_exporter_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ flow_report_main_t * frm = &flow_report_main;
+ ip4_address_t collector, src;
+ u16 collector_port = UDP_DST_PORT_ipfix;
+ u32 fib_id;
+ u32 fib_index = ~0;
+
+ collector.as_u32 = 0;
+ src.as_u32 = 0;
+ u32 path_mtu = 512; // RFC 7011 section 10.3.3.
+ u32 template_interval = 20;
+ u8 udp_checksum = 0;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) {
+ if (unformat (input, "collector %U", unformat_ip4_address, &collector))
+ ;
+ else if (unformat (input, "port %u", &collector_port))
+ ;
+ else if (unformat (input, "src %U", unformat_ip4_address, &src))
+ ;
+ else if (unformat (input, "fib-id %u", &fib_id))
+ {
+ ip4_main_t * im = &ip4_main;
+ uword * p = hash_get (im->fib_index_by_table_id, fib_id);
+ if (! p)
+ return clib_error_return (0, "fib ID %d doesn't exist\n",
+ fib_id);
+ fib_index = p[0];
+ }
+ else if (unformat (input, "path-mtu %u", &path_mtu))
+ ;
+ else if (unformat (input, "template-interval %u", &template_interval))
+ ;
+ else if (unformat (input, "udp-checksum"))
+ udp_checksum = 1;
+ else
+ break;
+ }
+
+ if (collector.as_u32 == 0)
+ return clib_error_return (0, "collector address required");
+
+ if (src.as_u32 == 0)
+ return clib_error_return (0, "src address required");
+
+ if (path_mtu > 1450 /* vpp does not support fragmentation */)
+ return clib_error_return (0, "too big path-mtu value, maximum is 1450");
+
+ if (path_mtu < 68)
+ return clib_error_return (0, "too small path-mtu value, minimum is 68");
+
+ /* Reset report streams if we are reconfiguring IP addresses */
+ if (frm->ipfix_collector.as_u32 != collector.as_u32 ||
+ frm->src_address.as_u32 != src.as_u32 ||
+ frm->collector_port != collector_port)
+ vnet_flow_reports_reset(frm);
+
+ frm->ipfix_collector.as_u32 = collector.as_u32;
+ frm->collector_port = collector_port;
+ frm->src_address.as_u32 = src.as_u32;
+ frm->fib_index = fib_index;
+ frm->path_mtu = path_mtu;
+ frm->template_interval = template_interval;
+ frm->udp_checksum = udp_checksum;
+
+ vlib_cli_output (vm, "Collector %U, src address %U, "
+ "fib index %d, path MTU %u, "
+ "template resend interval %us, "
+ "udp checksum %s",
+ format_ip4_address, &frm->ipfix_collector,
+ format_ip4_address, &frm->src_address,
+ fib_index, path_mtu, template_interval,
+ udp_checksum ? "enabled" : "disabled");
+
+ /* Turn on the flow reporting process */
+ vlib_process_signal_event (vm, flow_report_process_node.index,
+ 1, 0);
+ return 0;
+}
+
+VLIB_CLI_COMMAND (set_ipfix_exporter_command, static) = {
+ .path = "set ipfix exporter",
+ .short_help = "set ipfix exporter "
+ "collector <ip4-address> [port <port>] "
+ "src <ip4-address> [fib-id <fib-id>] "
+ "[path-mtu <path-mtu>] "
+ "[template-interval <template-interval>]",
+ "[udp-checksum]",
+ .function = set_ipfix_exporter_command_fn,
+};
+
+
+static clib_error_t *
+ipfix_flush_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ /* poke the flow reporting process */
+ vlib_process_signal_event (vm, flow_report_process_node.index,
+ 1, 0);
+ return 0;
+}
+
+VLIB_CLI_COMMAND (ipfix_flush_command, static) = {
+ .path = "ipfix flush",
+ .short_help = "flush the current ipfix data [for make test]",
+ .function = ipfix_flush_command_fn,
+};
+
+static clib_error_t *
+flow_report_init (vlib_main_t *vm)
+{
+ flow_report_main_t * frm = &flow_report_main;
+
+ frm->vlib_main = vm;
+ frm->vnet_main = vnet_get_main();
+ frm->unix_time_0 = time(0);
+ frm->vlib_time_0 = vlib_time_now(frm->vlib_main);
+ frm->fib_index = ~0;
+
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (flow_report_init)
diff --git a/src/vnet/flow/flow_report.h b/src/vnet/flow/flow_report.h
new file mode 100644
index 00000000000..4e764377dc8
--- /dev/null
+++ b/src/vnet/flow/flow_report.h
@@ -0,0 +1,145 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef __included_vnet_flow_report_h__
+#define __included_vnet_flow_report_h__
+
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+#include <vnet/pg/pg.h>
+#include <vnet/ethernet/ethernet.h>
+#include <vnet/ethernet/packet.h>
+#include <vnet/ip/ip_packet.h>
+#include <vnet/ip/ip4_packet.h>
+#include <vnet/ip/ip6_packet.h>
+#include <vnet/ip/udp.h>
+#include <vlib/cli.h>
+#include <vppinfra/error.h>
+#include <vppinfra/hash.h>
+#include <vppinfra/cache.h>
+
+#include <vnet/flow/ipfix_packet.h>
+
+/* Used to build the rewrite */
+typedef struct {
+ ip4_header_t ip4;
+ udp_header_t udp;
+ ipfix_template_packet_t ipfix;
+} ip4_ipfix_template_packet_t;
+
+struct flow_report_main;
+struct flow_report;
+
+typedef u8 * (vnet_flow_rewrite_callback_t)(struct flow_report_main *,
+ struct flow_report *,
+ ip4_address_t *,
+ ip4_address_t *,
+ u16);
+
+typedef vlib_frame_t * (vnet_flow_data_callback_t) (struct flow_report_main *,
+ struct flow_report *,
+ vlib_frame_t *, u32 *,
+ u32);
+
+typedef union {
+ void * as_ptr;
+ uword as_uword;
+} opaque_t;
+
+typedef struct {
+ u32 domain_id;
+ u32 sequence_number;
+ u16 src_port;
+ u16 n_reports;
+ u16 next_template_no;
+} flow_report_stream_t;
+
+typedef struct flow_report {
+ /* ipfix rewrite, set by callback */
+ u8 * rewrite;
+ u16 template_id;
+ u32 stream_index;
+ f64 last_template_sent;
+ int update_rewrite;
+
+ /* Bitmap of fields to send */
+ uword * fields_to_send;
+
+ /* Opaque data */
+ opaque_t opaque;
+
+ /* build-the-rewrite callback */
+ vnet_flow_rewrite_callback_t *rewrite_callback;
+
+ /* Send-flow-data callback */
+ vnet_flow_data_callback_t *flow_data_callback;
+} flow_report_t;
+
+typedef struct flow_report_main {
+ flow_report_t * reports;
+ flow_report_stream_t * streams;
+
+ /* ipfix collector ip address, port, our ip address, fib index */
+ ip4_address_t ipfix_collector;
+ u16 collector_port;
+ ip4_address_t src_address;
+ u32 fib_index;
+
+ /* Path MTU */
+ u32 path_mtu;
+
+ /* time interval in seconds after which to resend templates */
+ u32 template_interval;
+
+ /* UDP checksum calculation enable flag */
+ u8 udp_checksum;
+
+ /* time scale transform. Joy. */
+ u32 unix_time_0;
+ f64 vlib_time_0;
+
+ /* convenience variables */
+ vlib_main_t * vlib_main;
+ vnet_main_t * vnet_main;
+} flow_report_main_t;
+
+extern flow_report_main_t flow_report_main;
+
+extern vlib_node_registration_t flow_report_process_node;
+
+int vnet_flow_report_enable_disable (u32 sw_if_index, u32 table_index,
+ int enable_disable);
+typedef struct {
+ vnet_flow_data_callback_t *flow_data_callback;
+ vnet_flow_rewrite_callback_t *rewrite_callback;
+ opaque_t opaque;
+ int is_add;
+ u32 domain_id;
+ u16 src_port;
+} vnet_flow_report_add_del_args_t;
+
+int vnet_flow_report_add_del (flow_report_main_t *frm,
+ vnet_flow_report_add_del_args_t *a);
+
+clib_error_t * flow_report_add_del_error_to_clib_error (int error);
+
+void vnet_flow_reports_reset (flow_report_main_t * frm);
+
+void vnet_stream_reset (flow_report_main_t * frm, u32 stream_index);
+
+int vnet_stream_change (flow_report_main_t * frm,
+ u32 old_domain_id, u16 old_src_port,
+ u32 new_domain_id, u16 new_src_port);
+
+#endif /* __included_vnet_flow_report_h__ */
diff --git a/src/vnet/flow/flow_report_classify.c b/src/vnet/flow/flow_report_classify.c
new file mode 100644
index 00000000000..cb8fe069681
--- /dev/null
+++ b/src/vnet/flow/flow_report_classify.c
@@ -0,0 +1,529 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <vnet/flow/flow_report.h>
+#include <vnet/flow/flow_report_classify.h>
+#include <vnet/api_errno.h>
+
+/* Common prefix of tcp and udp headers
+ * containing only source and destination port fields */
+typedef struct {
+ u16 src_port, dst_port;
+} tcpudp_header_t;
+
+flow_report_classify_main_t flow_report_classify_main;
+
+u8 * ipfix_classify_template_rewrite (flow_report_main_t * frm,
+ flow_report_t * fr,
+ ip4_address_t * collector_address,
+ ip4_address_t * src_address,
+ u16 collector_port)
+{
+ flow_report_classify_main_t * fcm = &flow_report_classify_main;
+ vnet_classify_table_t * tblp;
+ vnet_classify_main_t * vcm = &vnet_classify_main;
+ u32 flow_table_index = fr->opaque.as_uword;
+ u8 * ip_start;
+ ip4_header_t * ip;
+ ip6_header_t * ip6;
+ tcpudp_header_t * tcpudp;
+ udp_header_t * udp;
+ ipfix_message_header_t * h;
+ ipfix_set_header_t * s;
+ ipfix_template_header_t * t;
+ ipfix_field_specifier_t * f;
+ ipfix_field_specifier_t * first_field;
+ u8 * rewrite = 0;
+ ip4_ipfix_template_packet_t * tp;
+ i32 l3_offset = -2; /* sizeof (ethernet_header_t) - sizeof (u32x4) */
+ u32 field_count = 0;
+ u32 field_index = 0;
+ flow_report_stream_t * stream;
+ u8 ip_version;
+ u8 transport_protocol;
+
+ stream = &frm->streams[fr->stream_index];
+
+ ipfix_classify_table_t * table = &fcm->tables[flow_table_index];
+
+ ip_version = table->ip_version;
+ transport_protocol = table->transport_protocol;
+
+ tblp = pool_elt_at_index (vcm->tables, table->classify_table_index);
+
+ /*
+ * Mumble, assumes that we're not classifying on L2 or first 2 octets
+ * of L3..
+ */
+
+ /* Determine field count */
+ ip_start = ((u8 *)(tblp->mask)) + l3_offset;
+#define _(field,mask,item,length) \
+ if (memcmp(&field, &mask, length) == 0) \
+ { \
+ field_count++; \
+ \
+ fr->fields_to_send = clib_bitmap_set (fr->fields_to_send, \
+ field_index, 1); \
+ } \
+ field_index++;
+ foreach_ipfix_field;
+#undef _
+
+ /* Add packetTotalCount manually */
+ field_count += 1;
+
+ /* $$$ enterprise fields, at some later date */
+
+ /* allocate rewrite space */
+ vec_validate_aligned (rewrite,
+ sizeof (ip4_ipfix_template_packet_t)
+ + field_count * sizeof (ipfix_field_specifier_t) - 1,
+ CLIB_CACHE_LINE_BYTES);
+
+ tp = (ip4_ipfix_template_packet_t *) rewrite;
+ ip = (ip4_header_t *) &tp->ip4;
+ udp = (udp_header_t *) (ip+1);
+ h = (ipfix_message_header_t *)(udp+1);
+ s = (ipfix_set_header_t *)(h+1);
+ t = (ipfix_template_header_t *)(s+1);
+ first_field = f = (ipfix_field_specifier_t *)(t+1);
+
+ ip->ip_version_and_header_length = 0x45;
+ ip->ttl = 254;
+ ip->protocol = IP_PROTOCOL_UDP;
+ ip->src_address.as_u32 = src_address->as_u32;
+ ip->dst_address.as_u32 = collector_address->as_u32;
+ udp->src_port = clib_host_to_net_u16 (stream->src_port);
+ udp->dst_port = clib_host_to_net_u16 (collector_port);
+ udp->length = clib_host_to_net_u16 (vec_len(rewrite) - sizeof (*ip));
+
+ /* FIXUP: message header export_time */
+ /* FIXUP: message header sequence_number */
+ h->domain_id = clib_host_to_net_u32 (stream->domain_id);
+
+ /* Take another trip through the mask and build the template */
+ ip_start = ((u8 *)(tblp->mask)) + l3_offset;
+#define _(field,mask,item,length) \
+ if (memcmp(&field, &mask, length) == 0) \
+ { \
+ f->e_id_length = ipfix_e_id_length (0 /* enterprise */, \
+ item, length); \
+ f++; \
+ }
+ foreach_ipfix_field;
+#undef _
+
+ /* Add packetTotalCount manually */
+ f->e_id_length = ipfix_e_id_length (0 /* enterprise */, packetTotalCount, 8);
+ f++;
+
+ /* Back to the template packet... */
+ ip = (ip4_header_t *) &tp->ip4;
+ udp = (udp_header_t *) (ip+1);
+
+ ASSERT (f - first_field);
+ /* Field count in this template */
+ t->id_count = ipfix_id_count (fr->template_id, f - first_field);
+
+ /* set length in octets*/
+ s->set_id_length = ipfix_set_id_length (2 /* set_id */, (u8 *) f - (u8 *)s);
+
+ /* message length in octets */
+ h->version_length = version_length ((u8 *)f - (u8 *)h);
+
+ ip->length = clib_host_to_net_u16 ((u8 *)f - (u8 *)ip);
+ ip->checksum = ip4_header_checksum (ip);
+
+ return rewrite;
+}
+
+vlib_frame_t * ipfix_classify_send_flows (flow_report_main_t * frm,
+ flow_report_t * fr,
+ vlib_frame_t * f,
+ u32 * to_next,
+ u32 node_index)
+{
+ flow_report_classify_main_t * fcm = &flow_report_classify_main;
+ vnet_classify_main_t * vcm = &vnet_classify_main;
+ u32 flow_table_index = fr->opaque.as_uword;
+ vnet_classify_table_t * t;
+ vnet_classify_bucket_t * b;
+ vnet_classify_entry_t * v, * save_v;
+ vlib_buffer_t *b0 = 0;
+ u32 next_offset = 0;
+ u32 record_offset = 0;
+ u32 bi0 = ~0;
+ int i, j, k;
+ ip4_ipfix_template_packet_t * tp;
+ ipfix_message_header_t * h = 0;
+ ipfix_set_header_t * s = 0;
+ u8 * ip_start;
+ ip4_header_t * ip;
+ ip6_header_t * ip6;
+ tcpudp_header_t * tcpudp;
+ udp_header_t * udp;
+ int field_index;
+ u32 records_this_buffer;
+ u16 new_l0, old_l0;
+ ip_csum_t sum0;
+ vlib_main_t * vm = frm->vlib_main;
+ flow_report_stream_t * stream;
+ u8 ip_version;
+ u8 transport_protocol;
+
+ stream = &frm->streams[fr->stream_index];
+
+ ipfix_classify_table_t * table = &fcm->tables[flow_table_index];
+
+ ip_version = table->ip_version;
+ transport_protocol = table->transport_protocol;
+
+ t = pool_elt_at_index (vcm->tables, table->classify_table_index);
+
+ while (__sync_lock_test_and_set (t->writer_lock, 1))
+ ;
+
+ for (i = 0; i < t->nbuckets; i++)
+ {
+ b = &t->buckets [i];
+ if (b->offset == 0)
+ continue;
+
+ save_v = vnet_classify_get_entry (t, b->offset);
+ for (j = 0; j < (1<<b->log2_pages); j++)
+ {
+ for (k = 0; k < t->entries_per_page; k++)
+ {
+ v = vnet_classify_entry_at_index
+ (t, save_v, j*t->entries_per_page + k);
+
+ if (vnet_classify_entry_is_free (v))
+ continue;
+
+ /* OK, we have something to send... */
+ if (PREDICT_FALSE (b0 == 0))
+ {
+ if (vlib_buffer_alloc (vm, &bi0, 1) != 1)
+ goto flush;
+ b0 = vlib_get_buffer (vm, bi0);
+
+ u32 copy_len = sizeof(ip4_header_t) +
+ sizeof(udp_header_t) +
+ sizeof(ipfix_message_header_t);
+ clib_memcpy (b0->data, fr->rewrite, copy_len);
+ b0->current_data = 0;
+ b0->current_length = copy_len;
+ b0->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
+ vnet_buffer (b0)->sw_if_index[VLIB_RX] = 0;
+ vnet_buffer (b0)->sw_if_index[VLIB_TX] = frm->fib_index;
+
+ tp = vlib_buffer_get_current (b0);
+ ip = (ip4_header_t *) &tp->ip4;
+ udp = (udp_header_t *) (ip+1);
+ h = (ipfix_message_header_t *)(udp+1);
+ s = (ipfix_set_header_t *)(h+1);
+
+ /* FIXUP: message header export_time */
+ h->export_time = (u32)
+ (((f64)frm->unix_time_0) +
+ (vlib_time_now(frm->vlib_main) - frm->vlib_time_0));
+ h->export_time = clib_host_to_net_u32(h->export_time);
+
+ /* FIXUP: message header sequence_number */
+ h->sequence_number = stream->sequence_number;
+ h->sequence_number = clib_host_to_net_u32 (h->sequence_number);
+
+ next_offset = (u32) (((u8 *)(s+1)) - (u8 *)tp);
+ record_offset = next_offset;
+ records_this_buffer = 0;
+ }
+
+ field_index = 0;
+ ip_start = ((u8 *)v->key) - 2;
+#define _(field,mask,item,length) \
+ if (clib_bitmap_get (fr->fields_to_send, field_index)) \
+ { \
+ clib_memcpy (b0->data + next_offset, &field, \
+ length); \
+ next_offset += length; \
+ } \
+ field_index++;
+ foreach_ipfix_field;
+#undef _
+
+ /* Add packetTotalCount manually */
+ {
+ u64 packets = clib_host_to_net_u64 (v->hits);
+ clib_memcpy (b0->data + next_offset, &packets, sizeof (packets));
+ next_offset += sizeof (packets);
+ }
+ records_this_buffer++;
+ stream->sequence_number++;
+
+ /* Next record will have the same size as this record */
+ u32 next_record_size = next_offset - record_offset;
+ record_offset = next_offset;
+
+ if (next_offset + next_record_size > frm->path_mtu)
+ {
+ s->set_id_length = ipfix_set_id_length (fr->template_id,
+ next_offset -
+ (sizeof (*ip) + sizeof (*udp) +
+ sizeof (*h)));
+ h->version_length = version_length (next_offset -
+ (sizeof (*ip) + sizeof (*udp)));
+ b0->current_length = next_offset;
+ b0->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
+
+ tp = vlib_buffer_get_current (b0);
+ ip = (ip4_header_t *) &tp->ip4;
+ udp = (udp_header_t *) (ip+1);
+
+ sum0 = ip->checksum;
+ old_l0 = ip->length;
+ new_l0 =
+ clib_host_to_net_u16 ((u16)next_offset);
+
+ sum0 = ip_csum_update (sum0, old_l0, new_l0, ip4_header_t,
+ length /* changed member */);
+
+ ip->checksum = ip_csum_fold (sum0);
+ ip->length = new_l0;
+ udp->length =
+ clib_host_to_net_u16 (b0->current_length - sizeof (*ip));
+
+ if (frm->udp_checksum)
+ {
+ /* RFC 7011 section 10.3.2. */
+ udp->checksum = ip4_tcp_udp_compute_checksum (vm, b0, ip);
+ if (udp->checksum == 0)
+ udp->checksum = 0xffff;
+ }
+
+ ASSERT (ip->checksum == ip4_header_checksum (ip));
+
+ to_next[0] = bi0;
+ f->n_vectors++;
+ to_next++;
+
+ if (f->n_vectors == VLIB_FRAME_SIZE)
+ {
+ vlib_put_frame_to_node (vm, node_index, f);
+ f = vlib_get_frame_to_node (vm, node_index);
+ f->n_vectors = 0;
+ to_next = vlib_frame_vector_args (f);
+ }
+ b0 = 0;
+ bi0 = ~0;
+ }
+ }
+ }
+ }
+
+ flush:
+ if (b0)
+ {
+ s->set_id_length = ipfix_set_id_length (fr->template_id,
+ next_offset -
+ (sizeof (*ip) + sizeof (*udp) +
+ sizeof (*h)));
+ h->version_length = version_length (next_offset -
+ (sizeof (*ip) + sizeof (*udp)));
+ b0->current_length = next_offset;
+ b0->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
+
+ tp = vlib_buffer_get_current (b0);
+ ip = (ip4_header_t *) &tp->ip4;
+ udp = (udp_header_t *) (ip+1);
+
+ sum0 = ip->checksum;
+ old_l0 = ip->length;
+ new_l0 = clib_host_to_net_u16 ((u16)next_offset);
+
+ sum0 = ip_csum_update (sum0, old_l0, new_l0, ip4_header_t,
+ length /* changed member */);
+
+ ip->checksum = ip_csum_fold (sum0);
+ ip->length = new_l0;
+ udp->length = clib_host_to_net_u16 (b0->current_length - sizeof (*ip));
+
+ if (frm->udp_checksum)
+ {
+ /* RFC 7011 section 10.3.2. */
+ udp->checksum = ip4_tcp_udp_compute_checksum (vm, b0, ip);
+ if (udp->checksum == 0)
+ udp->checksum = 0xffff;
+ }
+
+ ASSERT (ip->checksum == ip4_header_checksum (ip));
+
+ to_next[0] = bi0;
+ f->n_vectors++;
+
+ b0 = 0;
+ bi0 = ~0;
+ }
+
+ *(t->writer_lock) = 0;
+ return f;
+}
+
+static clib_error_t *
+ipfix_classify_table_add_del_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ flow_report_classify_main_t *fcm = &flow_report_classify_main;
+ flow_report_main_t *frm = &flow_report_main;
+ vnet_flow_report_add_del_args_t args;
+ ipfix_classify_table_t * table;
+ int rv;
+ int is_add = -1;
+ u32 classify_table_index = ~0;
+ u8 ip_version = 0;
+ u8 transport_protocol = 255;
+ clib_error_t * error = 0;
+
+ if (fcm->src_port == 0)
+ clib_error_return (0, "call 'set ipfix classify stream' first");
+
+ memset (&args, 0, sizeof (args));
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) {
+ if (unformat (input, "add"))
+ is_add = 1;
+ else if (unformat (input, "del"))
+ is_add = 0;
+ else if (unformat (input, "%d", &classify_table_index))
+ ;
+ else if (unformat (input, "ip4"))
+ ip_version = 4;
+ else if (unformat (input, "ip6"))
+ ip_version = 6;
+ else if (unformat (input, "tcp"))
+ transport_protocol = 6;
+ else if (unformat (input, "udp"))
+ transport_protocol = 17;
+ else
+ return clib_error_return (0, "unknown input `%U'",
+ format_unformat_error, input);
+ }
+
+ if (is_add == -1)
+ return clib_error_return (0, "expecting: add|del");
+ if (classify_table_index == ~0)
+ return clib_error_return (0, "classifier table not specified");
+ if (ip_version == 0)
+ return clib_error_return (0, "IP version not specified");
+
+ table = 0;
+ int i;
+ for (i = 0; i < vec_len(fcm->tables); i++)
+ if (ipfix_classify_table_index_valid(i))
+ if (fcm->tables[i].classify_table_index == classify_table_index) {
+ table = &fcm->tables[i];
+ break;
+ }
+
+ if (is_add) {
+ if (table)
+ return clib_error_return (0, "Specified classifier table already used");
+ table = ipfix_classify_add_table();
+ table->classify_table_index = classify_table_index;
+ } else {
+ if (!table)
+ return clib_error_return (0, "Specified classifier table not registered");
+ }
+
+ table->ip_version = ip_version;
+ table->transport_protocol = transport_protocol;
+
+ args.opaque.as_uword = table - fcm->tables;
+ args.rewrite_callback = ipfix_classify_template_rewrite;
+ args.flow_data_callback = ipfix_classify_send_flows;
+ args.is_add = is_add;
+ args.domain_id = fcm->domain_id;
+ args.src_port = fcm->src_port;
+
+ rv = vnet_flow_report_add_del (frm, &args);
+
+ error = flow_report_add_del_error_to_clib_error(rv);
+
+ /* If deleting, or add failed */
+ if (is_add == 0 || (rv && is_add))
+ ipfix_classify_delete_table (table - fcm->tables);
+
+ return error;
+}
+
+VLIB_CLI_COMMAND (ipfix_classify_table_add_del_command, static) = {
+ .path = "ipfix classify table",
+ .short_help = "ipfix classify table add|del <table-index>",
+ .function = ipfix_classify_table_add_del_command_fn,
+};
+
+static clib_error_t *
+set_ipfix_classify_stream_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ flow_report_classify_main_t *fcm = &flow_report_classify_main;
+ flow_report_main_t *frm = &flow_report_main;
+ u32 domain_id = 1;
+ u32 src_port = UDP_DST_PORT_ipfix;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) {
+ if (unformat (input, "domain %d", &domain_id))
+ ;
+ else if (unformat (input, "src-port %d", &src_port))
+ ;
+ else
+ return clib_error_return (0, "unknown input `%U'",
+ format_unformat_error, input);
+ }
+
+ if (fcm->src_port != 0 &&
+ (fcm->domain_id != domain_id ||
+ fcm->src_port != (u16)src_port)) {
+ int rv = vnet_stream_change (frm, fcm->domain_id, fcm->src_port,
+ domain_id, (u16)src_port);
+ ASSERT (rv == 0);
+ }
+
+ fcm->domain_id = domain_id;
+ fcm->src_port = (u16)src_port;
+
+ return 0;
+}
+
+VLIB_CLI_COMMAND (set_ipfix_classify_stream_command, static) = {
+ .path = "set ipfix classify stream",
+ .short_help = "set ipfix classify stream"
+ "[domain <domain-id>] [src-port <src-port>]",
+ .function = set_ipfix_classify_stream_command_fn,
+};
+
+static clib_error_t *
+flow_report_classify_init (vlib_main_t *vm)
+{
+ clib_error_t * error;
+
+ if ((error = vlib_call_init_function (vm, flow_report_init)))
+ return error;
+
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (flow_report_classify_init);
diff --git a/src/vnet/flow/flow_report_classify.h b/src/vnet/flow/flow_report_classify.h
new file mode 100644
index 00000000000..77d98b586ca
--- /dev/null
+++ b/src/vnet/flow/flow_report_classify.h
@@ -0,0 +1,122 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef __included_flow_report_classify_h__
+#define __included_flow_report_classify_h__
+
+#define foreach_ipfix_ip4_field \
+_(ip->src_address.as_u32, ((u32[]){0xFFFFFFFF}), sourceIPv4Address, 4) \
+_(ip->dst_address.as_u32, ((u32[]){0xFFFFFFFF}), destinationIPv4Address, 4) \
+_(ip->protocol, ((u8[]){0xFF}), protocolIdentifier, 1)
+
+#define foreach_ipfix_ip6_field \
+_(ip6->src_address.as_u8, \
+ ((u32[]){0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF}), \
+ sourceIPv6Address, 16) \
+_(ip6->dst_address.as_u8, \
+ ((u32[]){0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF}), \
+ destinationIPv6Address, 16) \
+_(ip6->protocol, ((u8[]){0xFF}), protocolIdentifier, 1)
+
+#define foreach_ipfix_tcpudp_field \
+_(tcpudp->src_port, ((u16[]){0xFFFF}), sourceTransportPort, 2) \
+_(tcpudp->dst_port, ((u16[]){0xFFFF}), destinationTransportPort, 2)
+
+#define foreach_ipfix_tcp_field \
+_(tcpudp->src_port, ((u16[]){0xFFFF}), tcpSourcePort, 2) \
+_(tcpudp->dst_port, ((u16[]){0xFFFF}), tcpDestinationPort, 2)
+
+#define foreach_ipfix_udp_field \
+_(tcpudp->src_port, ((u16[]){0xFFFF}), udpSourcePort, 2) \
+_(tcpudp->dst_port, ((u16[]){0xFFFF}), udpDestinationPort, 2)
+
+#define foreach_ipfix_transport_protocol_field \
+ switch (transport_protocol) { \
+ case 255: \
+ foreach_ipfix_tcpudp_field; \
+ break; \
+ case 6: \
+ foreach_ipfix_tcp_field; \
+ break; \
+ case 17: \
+ foreach_ipfix_udp_field; \
+ break; \
+ }
+
+#define foreach_ipfix_field \
+ if (ip_version == 4) { \
+ ip = (ip4_header_t *)ip_start; \
+ tcpudp = (tcpudp_header_t *)(ip+1); \
+ foreach_ipfix_ip4_field; \
+ } else { \
+ ip6 = (ip6_header_t *)ip_start; \
+ tcpudp = (tcpudp_header_t *)(ip6+1); \
+ foreach_ipfix_ip6_field; \
+ } \
+ foreach_ipfix_transport_protocol_field
+
+typedef struct {
+ u32 classify_table_index;
+ u8 ip_version;
+ u8 transport_protocol;
+} ipfix_classify_table_t;
+
+typedef struct {
+ u32 domain_id;
+ u16 src_port;
+ ipfix_classify_table_t * tables;
+} flow_report_classify_main_t;
+
+extern flow_report_classify_main_t flow_report_classify_main;
+
+static_always_inline u8 ipfix_classify_table_index_valid (u32 index)
+{
+ flow_report_classify_main_t * fcm = &flow_report_classify_main;
+ return index < vec_len(fcm->tables) &&
+ fcm->tables[index].classify_table_index != ~0;
+}
+
+static_always_inline ipfix_classify_table_t * ipfix_classify_add_table (void)
+{
+ flow_report_classify_main_t * fcm = &flow_report_classify_main;
+ u32 i;
+ for (i = 0; i < vec_len(fcm->tables); i++)
+ if (!ipfix_classify_table_index_valid(i))
+ return &fcm->tables[i];
+ u32 index = vec_len(fcm->tables);
+ vec_validate(fcm->tables, index);
+ return &fcm->tables[index];
+}
+
+static_always_inline void ipfix_classify_delete_table (u32 index)
+{
+ flow_report_classify_main_t * fcm = &flow_report_classify_main;
+ ASSERT (index < vec_len(fcm->tables));
+ ASSERT (fcm->tables[index].classify_table_index != ~0);
+ fcm->tables[index].classify_table_index = ~0;
+}
+
+u8 * ipfix_classify_template_rewrite (flow_report_main_t * frm,
+ flow_report_t * fr,
+ ip4_address_t * collector_address,
+ ip4_address_t * src_address,
+ u16 collector_port);
+
+vlib_frame_t * ipfix_classify_send_flows (flow_report_main_t * frm,
+ flow_report_t * fr,
+ vlib_frame_t * f,
+ u32 * to_next,
+ u32 node_index);
+
+#endif /* __included_flow_report_classify_h__ */
diff --git a/src/vnet/flow/ipfix_info_elements.h b/src/vnet/flow/ipfix_info_elements.h
new file mode 100644
index 00000000000..5d7e935dabb
--- /dev/null
+++ b/src/vnet/flow/ipfix_info_elements.h
@@ -0,0 +1,429 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef __included_ipfix_info_elements_h__
+#define __included_ipfix_info_elements_h__
+
+#define foreach_ipfix_info_element_t \
+_(octetDeltaCount, 1, u64) \
+_(packetDeltaCount, 2, u64) \
+_(deltaFlowCount, 3, u64) \
+_(protocolIdentifier, 4, u8) \
+_(ipClassOfService, 5, u8) \
+_(tcpControlBits, 6, u16) \
+_(sourceTransportPort, 7, u16) \
+_(sourceIPv4Address, 8, ip4_address_t) \
+_(sourceIPv4PrefixLength, 9, u8) \
+_(ingressInterface, 10, u32) \
+_(destinationTransportPort, 11, u16) \
+_(destinationIPv4Address, 12, ip4_address_t) \
+_(destinationIPv4PrefixLength, 13, u8) \
+_(egressInterface, 14, u32) \
+_(ipNextHopIPv4Address, 15, ip4_address_t) \
+_(bgpSourceAsNumber, 16, u32) \
+_(bgpDestinationAsNumber, 17, u32) \
+_(bgpNextHopIPv4Address, 18, ip4_address_t) \
+_(postMCastPacketDeltaCount, 19, u64) \
+_(postMCastOctetDeltaCount, 20, u64) \
+_(flowEndSysUpTime, 21, u32) \
+_(flowStartSysUpTime, 22, u32) \
+_(postOctetDeltaCount, 23, u64) \
+_(postPacketDeltaCount, 24, u64) \
+_(minimumIpTotalLength, 25, u64) \
+_(maximumIpTotalLength, 26, u64) \
+_(sourceIPv6Address, 27, ip6_address_t) \
+_(destinationIPv6Address, 28, ip6_address_t) \
+_(sourceIPv6PrefixLength, 29, u8) \
+_(destinationIPv6PrefixLength, 30, u8) \
+_(flowLabelIPv6, 31, u32) \
+_(icmpTypeCodeIPv4, 32, u16) \
+_(igmpType, 33, u8) \
+_(samplingInterval, 34, u32) \
+_(samplingAlgorithm, 35, u8) \
+_(flowActiveTimeout, 36, u16) \
+_(flowIdleTimeout, 37, u16) \
+_(engineType, 38, u8) \
+_(engineId, 39, u8) \
+_(exportedOctetTotalCount, 40, u64) \
+_(exportedMessageTotalCount, 41, u64) \
+_(exportedFlowRecordTotalCount, 42, u64) \
+_(ipv4RouterSc, 43, ip4_address_t) \
+_(sourceIPv4Prefix, 44, ip4_address_t) \
+_(destinationIPv4Prefix, 45, ip4_address_t) \
+_(mplsTopLabelType, 46, u8) \
+_(mplsTopLabelIPv4Address, 47, ip4_address_t) \
+_(samplerId, 48, u8) \
+_(samplerMode, 49, u8) \
+_(samplerRandomInterval, 50, u32) \
+_(classId, 51, u8) \
+_(minimumTTL, 52, u8) \
+_(maximumTTL, 53, u8) \
+_(fragmentIdentification, 54, u32) \
+_(postIpClassOfService, 55, u8) \
+_(sourceMacAddress, 56, macAddress) \
+_(postDestinationMacAddress, 57, macAddress) \
+_(vlanId, 58, u16) \
+_(postVlanId, 59, u16) \
+_(ipVersion, 60, u8) \
+_(flowDirection, 61, u8) \
+_(ipNextHopIPv6Address, 62, ip6_address_t) \
+_(bgpNextHopIPv6Address, 63, ip6_address_t) \
+_(ipv6ExtensionHeaders, 64, u32) \
+_(mplsTopLabelStackSection, 70, octetArray) \
+_(mplsLabelStackSection2, 71, octetArray) \
+_(mplsLabelStackSection3, 72, octetArray) \
+_(mplsLabelStackSection4, 73, octetArray) \
+_(mplsLabelStackSection5, 74, octetArray) \
+_(mplsLabelStackSection6, 75, octetArray) \
+_(mplsLabelStackSection7, 76, octetArray) \
+_(mplsLabelStackSection8, 77, octetArray) \
+_(mplsLabelStackSection9, 78, octetArray) \
+_(mplsLabelStackSection10, 79, octetArray) \
+_(destinationMacAddress, 80, macAddress) \
+_(postSourceMacAddress, 81, macAddress) \
+_(interfaceName, 82, string) \
+_(interfaceDescription, 83, string) \
+_(samplerName, 84, string) \
+_(octetTotalCount, 85, u64) \
+_(packetTotalCount, 86, u64) \
+_(flagsAndSamplerId, 87, u32) \
+_(fragmentOffset, 88, u16) \
+_(forwardingStatus, 89, u32) \
+_(mplsVpnRouteDistinguisher, 90, octetArray) \
+_(mplsTopLabelPrefixLength, 91, u8) \
+_(srcTrafficIndex, 92, u32) \
+_(dstTrafficIndex, 93, u32) \
+_(applicationDescription, 94, string) \
+_(applicationId, 95, octetArray) \
+_(applicationName, 96, string) \
+_(Assigned, 97, for NetFlow v9 compatibility ) \
+_(postIpDiffServCodePoint, 98, u8) \
+_(multicastReplicationFactor, 99, u32) \
+_(className, 100, string) \
+_(classificationEngineId, 101, u8) \
+_(layer2packetSectionOffset, 102, u16) \
+_(layer2packetSectionSize, 103, u16) \
+_(layer2packetSectionData, 104, octetArray) \
+_(bgpNextAdjacentAsNumber, 128, u32) \
+_(bgpPrevAdjacentAsNumber, 129, u32) \
+_(exporterIPv4Address, 130, ip4_address_t) \
+_(exporterIPv6Address, 131, ip6_address_t) \
+_(droppedOctetDeltaCount, 132, u64) \
+_(droppedPacketDeltaCount, 133, u64) \
+_(droppedOctetTotalCount, 134, u64) \
+_(droppedPacketTotalCount, 135, u64) \
+_(flowEndReason, 136, u8) \
+_(commonPropertiesId, 137, u64) \
+_(observationPointId, 138, u64) \
+_(icmpTypeCodeIPv6, 139, u16) \
+_(mplsTopLabelIPv6Address, 140, ip6_address_t) \
+_(lineCardId, 141, u32) \
+_(portId, 142, u32) \
+_(meteringProcessId, 143, u32) \
+_(exportingProcessId, 144, u32) \
+_(templateId, 145, u16) \
+_(wlanChannelId, 146, u8) \
+_(wlanSSID, 147, string) \
+_(flowId, 148, u64) \
+_(observationDomainId, 149, u32) \
+_(flowStartSeconds, 150, dateTimeSeconds) \
+_(flowEndSeconds, 151, dateTimeSeconds) \
+_(flowStartMilliseconds, 152, dateTimeMilliseconds) \
+_(flowEndMilliseconds, 153, dateTimeMilliseconds) \
+_(flowStartMicroseconds, 154, dateTimeMicroseconds) \
+_(flowEndMicroseconds, 155, dateTimeMicroseconds) \
+_(flowStartNanoseconds, 156, dateTimeNanoseconds) \
+_(flowEndNanoseconds, 157, dateTimeNanoseconds) \
+_(flowStartDeltaMicroseconds, 158, u32) \
+_(flowEndDeltaMicroseconds, 159, u32) \
+_(systemInitTimeMilliseconds, 160, dateTimeMilliseconds) \
+_(flowDurationMilliseconds, 161, u32) \
+_(flowDurationMicroseconds, 162, u32) \
+_(observedFlowTotalCount, 163, u64) \
+_(ignoredPacketTotalCount, 164, u64) \
+_(ignoredOctetTotalCount, 165, u64) \
+_(notSentFlowTotalCount, 166, u64) \
+_(notSentPacketTotalCount, 167, u64) \
+_(notSentOctetTotalCount, 168, u64) \
+_(destinationIPv6Prefix, 169, ip6_address_t) \
+_(sourceIPv6Prefix, 170, ip6_address_t) \
+_(postOctetTotalCount, 171, u64) \
+_(postPacketTotalCount, 172, u64) \
+_(flowKeyIndicator, 173, u64) \
+_(postMCastPacketTotalCount, 174, u64) \
+_(postMCastOctetTotalCount, 175, u64) \
+_(icmpTypeIPv4, 176, u8) \
+_(icmpCodeIPv4, 177, u8) \
+_(icmpTypeIPv6, 178, u8) \
+_(icmpCodeIPv6, 179, u8) \
+_(udpSourcePort, 180, u16) \
+_(udpDestinationPort, 181, u16) \
+_(tcpSourcePort, 182, u16) \
+_(tcpDestinationPort, 183, u16) \
+_(tcpSequenceNumber, 184, u32) \
+_(tcpAcknowledgementNumber, 185, u32) \
+_(tcpWindowSize, 186, u16) \
+_(tcpUrgentPointer, 187, u16) \
+_(tcpHeaderLength, 188, u8) \
+_(ipHeaderLength, 189, u8) \
+_(totalLengthIPv4, 190, u16) \
+_(payloadLengthIPv6, 191, u16) \
+_(ipTTL, 192, u8) \
+_(nextHeaderIPv6, 193, u8) \
+_(mplsPayloadLength, 194, u32) \
+_(ipDiffServCodePoint, 195, u8) \
+_(ipPrecedence, 196, u8) \
+_(fragmentFlags, 197, u8) \
+_(octetDeltaSumOfSquares, 198, u64) \
+_(octetTotalSumOfSquares, 199, u64) \
+_(mplsTopLabelTTL, 200, u8) \
+_(mplsLabelStackLength, 201, u32) \
+_(mplsLabelStackDepth, 202, u32) \
+_(mplsTopLabelExp, 203, u8) \
+_(ipPayloadLength, 204, u32) \
+_(udpMessageLength, 205, u16) \
+_(isMulticast, 206, u8) \
+_(ipv4IHL, 207, u8) \
+_(ipv4Options, 208, u32) \
+_(tcpOptions, 209, u64) \
+_(paddingOctets, 210, octetArray) \
+_(collectorIPv4Address, 211, ip4_address_t) \
+_(collectorIPv6Address, 212, ip6_address_t) \
+_(exportInterface, 213, u32) \
+_(exportProtocolVersion, 214, u8) \
+_(exportTransportProtocol, 215, u8) \
+_(collectorTransportPort, 216, u16) \
+_(exporterTransportPort, 217, u16) \
+_(tcpSynTotalCount, 218, u64) \
+_(tcpFinTotalCount, 219, u64) \
+_(tcpRstTotalCount, 220, u64) \
+_(tcpPshTotalCount, 221, u64) \
+_(tcpAckTotalCount, 222, u64) \
+_(tcpUrgTotalCount, 223, u64) \
+_(ipTotalLength, 224, u64) \
+_(postNATSourceIPv4Address, 225, ip4_address_t) \
+_(postNATDestinationIPv4Address, 226, ip4_address_t) \
+_(postNAPTSourceTransportPort, 227, u16) \
+_(postNAPTDestinationTransportPort, 228, u16) \
+_(natOriginatingAddressRealm, 229, u8) \
+_(natEvent, 230, u8) \
+_(initiatorOctets, 231, u64) \
+_(responderOctets, 232, u64) \
+_(firewallEvent, 233, u8) \
+_(ingressVRFID, 234, u32) \
+_(egressVRFID, 235, u32) \
+_(VRFname, 236, string) \
+_(postMplsTopLabelExp, 237, u8) \
+_(tcpWindowScale, 238, u16) \
+_(biflowDirection, 239, u8) \
+_(ethernetHeaderLength, 240, u8) \
+_(ethernetPayloadLength, 241, u16) \
+_(ethernetTotalLength, 242, u16) \
+_(dot1qVlanId, 243, u16) \
+_(dot1qPriority, 244, u8) \
+_(dot1qCustomerVlanId, 245, u16) \
+_(dot1qCustomerPriority, 246, u8) \
+_(metroEvcId, 247, string) \
+_(metroEvcType, 248, u8) \
+_(pseudoWireId, 249, u32) \
+_(pseudoWireType, 250, u16) \
+_(pseudoWireControlWord, 251, u32) \
+_(ingressPhysicalInterface, 252, u32) \
+_(egressPhysicalInterface, 253, u32) \
+_(postDot1qVlanId, 254, u16) \
+_(postDot1qCustomerVlanId, 255, u16) \
+_(ethernetType, 256, u16) \
+_(postIpPrecedence, 257, u8) \
+_(collectionTimeMilliseconds, 258, dateTimeMilliseconds) \
+_(exportSctpStreamId, 259, u16) \
+_(maxExportSeconds, 260, dateTimeSeconds) \
+_(maxFlowEndSeconds, 261, dateTimeSeconds) \
+_(messageMD5Checksum, 262, octetArray) \
+_(messageScope, 263, u8) \
+_(minExportSeconds, 264, dateTimeSeconds) \
+_(minFlowStartSeconds, 265, dateTimeSeconds) \
+_(opaqueOctets, 266, octetArray) \
+_(sessionScope, 267, u8) \
+_(maxFlowEndMicroseconds, 268, dateTimeMicroseconds) \
+_(maxFlowEndMilliseconds, 269, dateTimeMilliseconds) \
+_(maxFlowEndNanoseconds, 270, dateTimeNanoseconds) \
+_(minFlowStartMicroseconds, 271, dateTimeMicroseconds) \
+_(minFlowStartMilliseconds, 272, dateTimeMilliseconds) \
+_(minFlowStartNanoseconds, 273, dateTimeNanoseconds) \
+_(collectorCertificate, 274, octetArray) \
+_(exporterCertificate, 275, octetArray) \
+_(dataRecordsReliability, 276, boolean) \
+_(observationPointType, 277, u8) \
+_(newConnectionDeltaCount, 278, u32) \
+_(connectionSumDurationSeconds, 279, u64) \
+_(connectionTransactionId, 280, u64) \
+_(postNATSourceIPv6Address, 281, ip6_address_t) \
+_(postNATDestinationIPv6Address, 282, ip6_address_t) \
+_(natPoolId, 283, u32) \
+_(natPoolName, 284, string) \
+_(anonymizationFlags, 285, u16) \
+_(anonymizationTechnique, 286, u16) \
+_(informationElementIndex, 287, u16) \
+_(p2pTechnology, 288, string) \
+_(tunnelTechnology, 289, string) \
+_(encryptedTechnology, 290, string) \
+_(basicList, 291, basicList) \
+_(subTemplateList, 292, subTemplateList) \
+_(subTemplateMultiList, 293, subTemplateMultiList) \
+_(bgpValidityState, 294, u8) \
+_(IPSecSPI, 295, u32) \
+_(greKey, 296, u32) \
+_(natType, 297, u8) \
+_(initiatorPackets, 298, u64) \
+_(responderPackets, 299, u64) \
+_(observationDomainName, 300, string) \
+_(selectionSequenceId, 301, u64) \
+_(selectorId, 302, u64) \
+_(informationElementId, 303, u16) \
+_(selectorAlgorithm, 304, u16) \
+_(samplingPacketInterval, 305, u32) \
+_(samplingPacketSpace, 306, u32) \
+_(samplingTimeInterval, 307, u32) \
+_(samplingTimeSpace, 308, u32) \
+_(samplingSize, 309, u32) \
+_(samplingPopulation, 310, u32) \
+_(samplingProbability, 311, float64) \
+_(dataLinkFrameSize, 312, u16) \
+_(ipHeaderPacketSection, 313, octetArray) \
+_(ipPayloadPacketSection, 314, octetArray) \
+_(dataLinkFrameSection, 315, octetArray) \
+_(mplsLabelStackSection, 316, octetArray) \
+_(mplsPayloadPacketSection, 317, octetArray) \
+_(selectorIdTotalPktsObserved, 318, u64) \
+_(selectorIdTotalPktsSelected, 319, u64) \
+_(absoluteError, 320, float64) \
+_(relativeError, 321, float64) \
+_(observationTimeSeconds, 322, dateTimeSeconds) \
+_(observationTimeMilliseconds, 323, dateTimeMilliseconds) \
+_(observationTimeMicroseconds, 324, dateTimeMicroseconds) \
+_(observationTimeNanoseconds, 325, dateTimeNanoseconds) \
+_(digestHashValue, 326, u64) \
+_(hashIPPayloadOffset, 327, u64) \
+_(hashIPPayloadSize, 328, u64) \
+_(hashOutputRangeMin, 329, u64) \
+_(hashOutputRangeMax, 330, u64) \
+_(hashSelectedRangeMin, 331, u64) \
+_(hashSelectedRangeMax, 332, u64) \
+_(hashDigestOutput, 333, boolean) \
+_(hashInitialiserValue, 334, u64) \
+_(selectorName, 335, string) \
+_(upperCILimit, 336, float64) \
+_(lowerCILimit, 337, float64) \
+_(confidenceLevel, 338, float64) \
+_(informationElementDataType, 339, u8) \
+_(informationElementDescription, 340, string) \
+_(informationElementName, 341, string) \
+_(informationElementRangeBegin, 342, u64) \
+_(informationElementRangeEnd, 343, u64) \
+_(informationElementSemantics, 344, u8) \
+_(informationElementUnits, 345, u16) \
+_(privateEnterpriseNumber, 346, u32) \
+_(virtualStationInterfaceId, 347, octetArray) \
+_(virtualStationInterfaceName, 348, string) \
+_(virtualStationUUID, 349, octetArray) \
+_(virtualStationName, 350, string) \
+_(layer2SegmentId, 351, u64) \
+_(layer2OctetDeltaCount, 352, u64) \
+_(layer2OctetTotalCount, 353, u64) \
+_(ingressUnicastPacketTotalCount, 354, u64) \
+_(ingressMulticastPacketTotalCount, 355, u64) \
+_(ingressBroadcastPacketTotalCount, 356, u64) \
+_(egressUnicastPacketTotalCount, 357, u64) \
+_(egressBroadcastPacketTotalCount, 358, u64) \
+_(monitoringIntervalStartMilliSeconds, 359, dateTimeMilliseconds) \
+_(monitoringIntervalEndMilliSeconds, 360, dateTimeMilliseconds) \
+_(portRangeStart, 361, u16) \
+_(portRangeEnd, 362, u16) \
+_(portRangeStepSize, 363, u16) \
+_(portRangeNumPorts, 364, u16) \
+_(staMacAddress, 365, macAddress) \
+_(staIPv4Address, 366, ip4_address_t) \
+_(wtpMacAddress, 367, macAddress ) \
+_(ingressInterfaceType, 368, u32) \
+_(egressInterfaceType, 369, u32) \
+_(rtpSequenceNumber, 370, u16) \
+_(userName, 371, string) \
+_(applicationCategoryName, 372, string) \
+_(applicationSubCategoryName, 373, string) \
+_(applicationGroupName, 374, string) \
+_(originalFlowsPresent, 375, u64) \
+_(originalFlowsInitiated, 376, u64) \
+_(originalFlowsCompleted, 377, u64) \
+_(distinctCountOfSourceIPAddress, 378, u64) \
+_(distinctCountOfDestinationIPAddress, 379, u64) \
+_(distinctCountOfSourceIPv4Address, 380, u32) \
+_(distinctCountOfDestinationIPv4Address, 381, u32) \
+_(distinctCountOfSourceIPv6Address, 382, u64) \
+_(distinctCountOfDestinationIPv6Address, 383, u64) \
+_(valueDistributionMethod, 384, u8) \
+_(rfc3550JitterMilliseconds, 385, u32) \
+_(rfc3550JitterMicroseconds, 386, u32) \
+_(rfc3550JitterNanoseconds, 387, u32) \
+_(dot1qDEI, 388, boolean) \
+_(dot1qCustomerDEI, 389, boolean) \
+_(flowSelectorAlgorithm, 390, u16) \
+_(flowSelectedOctetDeltaCount, 391, u64) \
+_(flowSelectedPacketDeltaCount, 392, u64) \
+_(flowSelectedFlowDeltaCount, 393, u64) \
+_(selectorIDTotalFlowsObserved, 394, u64) \
+_(selectorIDTotalFlowsSelected, 395, u64) \
+_(samplingFlowInterval, 396, u64) \
+_(samplingFlowSpacing, 397, u64) \
+_(flowSamplingTimeInterval, 398, u64) \
+_(flowSamplingTimeSpacing, 399, u64) \
+_(hashFlowDomain, 400, u16) \
+_(transportOctetDeltaCount, 401, u64) \
+_(transportPacketDeltaCount, 402, u64) \
+_(originalExporterIPv4Address, 403, ip4_address_t) \
+_(originalExporterIPv6Address, 404, ip6_address_t) \
+_(originalObservationDomainId, 405, u32) \
+_(intermediateProcessId, 406, u32) \
+_(ignoredDataRecordTotalCount, 407, u64) \
+_(dataLinkFrameType, 408, u16) \
+_(sectionOffset, 409, u16) \
+_(sectionExportedOctets, 410, u16) \
+_(dot1qServiceInstanceTag, 411, octetArray) \
+_(dot1qServiceInstanceId, 412, u32) \
+_(dot1qServiceInstancePriority, 413, u8) \
+_(dot1qCustomerSourceMacAddress, 414, macAddress) \
+_(dot1qCustomerDestinationMacAddress, 415, macAddress) \
+_(postLayer2OctetDeltaCount, 417, u64) \
+_(postMCastLayer2OctetDeltaCount, 418, u64) \
+_(postLayer2OctetTotalCount, 420, u64) \
+_(postMCastLayer2OctetTotalCount, 421, u64) \
+_(minimumLayer2TotalLength, 422, u64) \
+_(maximumLayer2TotalLength, 423, u64) \
+_(droppedLayer2OctetDeltaCount, 424, u64) \
+_(droppedLayer2OctetTotalCount, 425, u64) \
+_(ignoredLayer2OctetTotalCount, 426, u64) \
+_(notSentLayer2OctetTotalCount, 427, u64) \
+_(layer2OctetDeltaSumOfSquares, 428, u64) \
+_(layer2OctetTotalSumOfSquares, 429, u64) \
+_(layer2FrameDeltaCount, 430, u64) \
+_(layer2FrameTotalCount, 431, u64) \
+_(pseudoWireDestinationIPv4Address, 432, ip4_address_t) \
+_(ignoredLayer2FrameTotalCount, 433, u64)
+
+typedef enum {
+#define _(n,v,t) n = v,
+ foreach_ipfix_info_element_t
+#undef _
+} ipfix_info_element_id_t;
+
+#endif /* __included_ipfix_info_elements_h__ */
diff --git a/src/vnet/flow/ipfix_packet.h b/src/vnet/flow/ipfix_packet.h
new file mode 100644
index 00000000000..329796191de
--- /dev/null
+++ b/src/vnet/flow/ipfix_packet.h
@@ -0,0 +1,188 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef __included_ipfix_packet_h__
+#define __included_ipfix_packet_h__
+
+#include <vnet/flow/ipfix_info_elements.h>
+
+/* From RFC-7011:
+ * https://tools.ietf.org/html/rfc7011
+ */
+
+typedef struct {
+ u32 version_length;
+ u32 export_time;
+ u32 sequence_number;
+ u32 domain_id;
+} ipfix_message_header_t;
+
+static inline u32 version_length (u16 length)
+{
+ return clib_host_to_net_u32 (0x000a0000 | length);
+}
+
+
+/*
+ * The Field Specifier format is shown in Figure G.
+ *
+ * 0 1 2 3
+ * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |E| Information Element ident. | Field Length |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Enterprise Number |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * Figure G: Field Specifier Format
+ *
+ * Where:
+ *
+ * E
+ *
+ * Enterprise bit. This is the first bit of the Field Specifier. If
+ * this bit is zero, the Information Element identifier identifies an
+ * Information Element in [IANA-IPFIX], and the four-octet Enterprise
+ * Number field MUST NOT be present. If this bit is one, the
+ * Information Element identifier identifies an enterprise-specific
+ * Information Element, and the Enterprise Number field MUST be
+ * present.
+ */
+
+typedef struct {
+ u32 e_id_length;
+ u32 enterprise;
+} ipfix_enterprise_field_specifier_t;
+
+typedef struct {
+ u32 e_id_length;
+} ipfix_field_specifier_t;
+
+static inline u32 ipfix_e_id_length (int e, u16 id, u16 length)
+{
+ u32 value;
+ value = (e<<31) | ((id&0x7FFF) <<16) | length;
+ return clib_host_to_net_u32 (value);
+}
+
+/*
+ * Every Set contains a common header. This header is defined in
+ * Figure I.
+ *
+ * 0 1 2 3
+ * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Set ID | Length |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * Figure I: Set Header Format
+ *
+ * Each Set Header field is exported in network format. The fields are
+ * defined as follows:
+ *
+ * Set ID
+ *
+ * Identifies the Set. A value of 2 is reserved for Template Sets.
+ * A value of 3 is reserved for Options Template Sets. Values from 4
+ * to 255 are reserved for future use. Values 256 and above are used
+ * for Data Sets. The Set ID values of 0 and 1 are not used, for
+ * historical reasons [RFC3954].
+ *
+ * Length
+ *
+ * Total length of the Set, in octets, including the Set Header, all
+ * records, and the optional padding. Because an individual Set MAY
+ * contain multiple records, the Length value MUST be used to
+ * determine the position of the next Set.
+ */
+
+typedef struct {
+ u32 set_id_length;
+} ipfix_set_header_t;
+
+static inline u32 ipfix_set_id_length (u16 set_id, u16 length)
+{
+ return clib_host_to_net_u32 ((set_id<<16) | length);
+}
+
+/*
+ * The format of the Template Record is shown in Figure J. It consists
+ * of a Template Record Header and one or more Field Specifiers. Field
+ * Specifiers are defined in Figure G above.
+ *
+ * +--------------------------------------------------+
+ * | Template Record Header |
+ * +--------------------------------------------------+
+ * | Field Specifier |
+ * +--------------------------------------------------+
+ * | Field Specifier |
+ * +--------------------------------------------------+
+ * ...
+ * +--------------------------------------------------+
+ * | Field Specifier |
+ * +--------------------------------------------------+
+ *
+ * Figure J: Template Record Format
+ *
+ * The format of the Template Record Header is shown in Figure K.
+ *
+ * 0 1 2 3
+ * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Template ID (> 255) | Field Count |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * Figure K: Template Record Header Format
+ *
+ * The Template Record Header Field definitions are as follows:
+ *
+ * Template ID
+ *
+ * Each Template Record is given a unique Template ID in the range
+ * 256 to 65535. This uniqueness is local to the Transport Session
+ * and Observation Domain that generated the Template ID. Since
+ * Template IDs are used as Set IDs in the Sets they describe (see
+ * Section 3.4.3), values 0-255 are reserved for special Set types
+ * (e.g., Template Sets themselves), and Templates and Options
+ * Templates (see Section 3.4.2) cannot share Template IDs within a
+ * Transport Session and Observation Domain. There are no
+ * constraints regarding the order of the Template ID allocation. As
+ * Exporting Processes are free to allocate Template IDs as they see
+ * fit, Collecting Processes MUST NOT assume incremental Template
+ * IDs, or anything about the contents of a Template based on its
+ * Template ID alone.
+ *
+ * Field Count
+ *
+ * Number of fields in this Template Record.
+ */
+
+typedef struct {
+ u32 id_count;
+} ipfix_template_header_t;
+
+static inline u32 ipfix_id_count (u16 id, u16 count)
+{
+ return clib_host_to_net_u32 ((id<<16) | count);
+}
+
+/* Template packet */
+typedef struct {
+ ipfix_message_header_t h;
+ ipfix_set_header_t s;
+ ipfix_template_header_t t;
+ ipfix_field_specifier_t fields[0];
+} ipfix_template_packet_t;
+
+#endif /* __included_ipfix_packet_h__ */
diff --git a/src/vnet/global_funcs.h b/src/vnet/global_funcs.h
new file mode 100644
index 00000000000..92a5c04de1f
--- /dev/null
+++ b/src/vnet/global_funcs.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * global_funcs.h: global data structure access functions
+ */
+
+#ifndef included_vnet_global_funcs_h_
+#define included_vnet_global_funcs_h_
+
+vnet_main_t *vnet_get_main (void);
+
+#endif /* included_vnet_global_funcs_h_ */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/gre/error.def b/src/vnet/gre/error.def
new file mode 100644
index 00000000000..161ecc1d874
--- /dev/null
+++ b/src/vnet/gre/error.def
@@ -0,0 +1,23 @@
+/*
+ * gre_error.def: gre errors
+ *
+ * Copyright (c) 2012 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+gre_error (NONE, "no error")
+gre_error (UNKNOWN_PROTOCOL, "unknown protocol")
+gre_error (UNSUPPORTED_VERSION, "unsupported version")
+gre_error (PKTS_DECAP, "GRE input packets decapsulated")
+gre_error (PKTS_ENCAP, "GRE output packets encapsulated")
+gre_error (NO_SUCH_TUNNEL, "GRE input packets dropped due to missing tunnel")
diff --git a/src/vnet/gre/gre.api b/src/vnet/gre/gre.api
new file mode 100644
index 00000000000..28f6dbc94fa
--- /dev/null
+++ b/src/vnet/gre/gre.api
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2015-2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+define gre_add_del_tunnel
+{
+ u32 client_index;
+ u32 context;
+ u8 is_add;
+ u8 is_ipv6;
+ u8 teb;
+ u8 src_address[16];
+ u8 dst_address[16];
+ u32 outer_fib_id;
+};
+
+define gre_add_del_tunnel_reply
+{
+ u32 context;
+ i32 retval;
+ u32 sw_if_index;
+};
+
+define gre_tunnel_dump
+{
+ u32 client_index;
+ u32 context;
+ u32 sw_if_index;
+};
+
+define gre_tunnel_details
+{
+ u32 context;
+ u32 sw_if_index;
+ u8 is_ipv6;
+ u8 teb;
+ u8 src_address[16];
+ u8 dst_address[16];
+ u32 outer_fib_id;
+};
+
+/*
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/gre/gre.c b/src/vnet/gre/gre.c
new file mode 100644
index 00000000000..0faed13eb29
--- /dev/null
+++ b/src/vnet/gre/gre.c
@@ -0,0 +1,455 @@
+/*
+ * gre.c: gre
+ *
+ * Copyright (c) 2012 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/vnet.h>
+#include <vnet/gre/gre.h>
+#include <vnet/adj/adj_midchain.h>
+
+gre_main_t gre_main;
+
+typedef struct {
+ union {
+ ip4_and_gre_header_t ip4_and_gre;
+ u64 as_u64[3];
+ };
+} ip4_and_gre_union_t;
+
+
+/* Packet trace structure */
+typedef struct {
+ /* Tunnel-id / index in tunnel vector */
+ u32 tunnel_id;
+
+ /* pkt length */
+ u32 length;
+
+ /* tunnel ip4 addresses */
+ ip4_address_t src;
+ ip4_address_t dst;
+} gre_tx_trace_t;
+
+u8 * format_gre_tx_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ gre_tx_trace_t * t = va_arg (*args, gre_tx_trace_t *);
+
+ s = format (s, "GRE: tunnel %d len %d src %U dst %U",
+ t->tunnel_id, clib_net_to_host_u16 (t->length),
+ format_ip4_address, &t->src.as_u8,
+ format_ip4_address, &t->dst.as_u8);
+ return s;
+}
+
+u8 * format_gre_protocol (u8 * s, va_list * args)
+{
+ gre_protocol_t p = va_arg (*args, u32);
+ gre_main_t * gm = &gre_main;
+ gre_protocol_info_t * pi = gre_get_protocol_info (gm, p);
+
+ if (pi)
+ s = format (s, "%s", pi->name);
+ else
+ s = format (s, "0x%04x", p);
+
+ return s;
+}
+
+u8 * format_gre_header_with_length (u8 * s, va_list * args)
+{
+ gre_main_t * gm = &gre_main;
+ gre_header_t * h = va_arg (*args, gre_header_t *);
+ u32 max_header_bytes = va_arg (*args, u32);
+ gre_protocol_t p = clib_net_to_host_u16 (h->protocol);
+ uword indent, header_bytes;
+
+ header_bytes = sizeof (h[0]);
+ if (max_header_bytes != 0 && header_bytes > max_header_bytes)
+ return format (s, "gre header truncated");
+
+ indent = format_get_indent (s);
+
+ s = format (s, "GRE %U", format_gre_protocol, p);
+
+ if (max_header_bytes != 0 && header_bytes > max_header_bytes)
+ {
+ gre_protocol_info_t * pi = gre_get_protocol_info (gm, p);
+ vlib_node_t * node = vlib_get_node (gm->vlib_main, pi->node_index);
+ if (node->format_buffer)
+ s = format (s, "\n%U%U",
+ format_white_space, indent,
+ node->format_buffer, (void *) (h + 1),
+ max_header_bytes - header_bytes);
+ }
+
+ return s;
+}
+
+u8 * format_gre_header (u8 * s, va_list * args)
+{
+ gre_header_t * h = va_arg (*args, gre_header_t *);
+ return format (s, "%U", format_gre_header_with_length, h, 0);
+}
+
+/* Returns gre protocol as an int in host byte order. */
+uword
+unformat_gre_protocol_host_byte_order (unformat_input_t * input,
+ va_list * args)
+{
+ u16 * result = va_arg (*args, u16 *);
+ gre_main_t * gm = &gre_main;
+ int i;
+
+ /* Named type. */
+ if (unformat_user (input, unformat_vlib_number_by_name,
+ gm->protocol_info_by_name, &i))
+ {
+ gre_protocol_info_t * pi = vec_elt_at_index (gm->protocol_infos, i);
+ *result = pi->protocol;
+ return 1;
+ }
+
+ return 0;
+}
+
+uword
+unformat_gre_protocol_net_byte_order (unformat_input_t * input,
+ va_list * args)
+{
+ u16 * result = va_arg (*args, u16 *);
+ if (! unformat_user (input, unformat_gre_protocol_host_byte_order, result))
+ return 0;
+ *result = clib_host_to_net_u16 ((u16) *result);
+ return 1;
+}
+
+uword
+unformat_gre_header (unformat_input_t * input, va_list * args)
+{
+ u8 ** result = va_arg (*args, u8 **);
+ gre_header_t _h, * h = &_h;
+ u16 p;
+
+ if (! unformat (input, "%U",
+ unformat_gre_protocol_host_byte_order, &p))
+ return 0;
+
+ h->protocol = clib_host_to_net_u16 (p);
+
+ /* Add header to result. */
+ {
+ void * p;
+ u32 n_bytes = sizeof (h[0]);
+
+ vec_add2 (*result, p, n_bytes);
+ clib_memcpy (p, h, n_bytes);
+ }
+
+ return 1;
+}
+
+static int
+gre_proto_from_vnet_link (vnet_link_t link)
+{
+ switch (link)
+ {
+ case VNET_LINK_IP4:
+ return (GRE_PROTOCOL_ip4);
+ case VNET_LINK_IP6:
+ return (GRE_PROTOCOL_ip6);
+ case VNET_LINK_MPLS:
+ return (GRE_PROTOCOL_mpls_unicast);
+ case VNET_LINK_ETHERNET:
+ return (GRE_PROTOCOL_teb);
+ case VNET_LINK_ARP:
+ return (GRE_PROTOCOL_arp);
+ }
+ ASSERT(0);
+ return (GRE_PROTOCOL_ip4);
+}
+
+static u8*
+gre_build_rewrite (vnet_main_t * vnm,
+ u32 sw_if_index,
+ vnet_link_t link_type,
+ const void *dst_address)
+{
+ gre_main_t * gm = &gre_main;
+ ip4_and_gre_header_t * h;
+ u8* rewrite = NULL;
+ gre_tunnel_t *t;
+ u32 ti;
+
+ ti = gm->tunnel_index_by_sw_if_index[sw_if_index];
+
+ if (~0 == ti)
+ /* not one of ours */
+ return (0);
+
+ t = pool_elt_at_index(gm->tunnels, ti);
+
+ vec_validate(rewrite, sizeof(*h)-1);
+ h = (ip4_and_gre_header_t*)rewrite;
+ h->gre.protocol = clib_host_to_net_u16(gre_proto_from_vnet_link(link_type));
+
+ h->ip4.ip_version_and_header_length = 0x45;
+ h->ip4.ttl = 254;
+ h->ip4.protocol = IP_PROTOCOL_GRE;
+ /* fixup ip4 header length and checksum after-the-fact */
+ h->ip4.src_address.as_u32 = t->tunnel_src.as_u32;
+ h->ip4.dst_address.as_u32 = t->tunnel_dst.as_u32;
+ h->ip4.checksum = ip4_header_checksum (&h->ip4);
+
+ return (rewrite);
+}
+
+void
+gre_fixup (vlib_main_t *vm,
+ ip_adjacency_t *adj,
+ vlib_buffer_t *b0)
+{
+ ip4_header_t * ip0;
+
+ ip0 = vlib_buffer_get_current (b0);
+
+ /* Fixup the checksum and len fields in the GRE tunnel encap
+ * that was applied at the midchain node */
+ ip0->length = clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0));
+ ip0->checksum = ip4_header_checksum (ip0);
+}
+
+void
+gre_update_adj (vnet_main_t * vnm,
+ u32 sw_if_index,
+ adj_index_t ai)
+{
+ adj_nbr_midchain_update_rewrite (ai, gre_fixup,
+ ADJ_MIDCHAIN_FLAG_NONE,
+ gre_build_rewrite(vnm, sw_if_index,
+ adj_get_link_type(ai),
+ NULL));
+
+ gre_tunnel_stack(ai);
+}
+
+/**
+ * @brief TX function. Only called L2. L3 traffic uses the adj-midchains
+ */
+static uword
+gre_interface_tx_inline (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ gre_main_t * gm = &gre_main;
+ u32 next_index;
+ u32 * from, * to_next, n_left_from, n_left_to_next;
+ vnet_interface_output_runtime_t * rd = (void *) node->runtime_data;
+ const gre_tunnel_t *gt = pool_elt_at_index (gm->tunnels, rd->dev_instance);
+
+ /* Vector of buffer / pkt indices we're supposed to process */
+ from = vlib_frame_vector_args (frame);
+
+ /* Number of buffers / pkts */
+ n_left_from = frame->n_vectors;
+
+ /* Speculatively send the first buffer to the last disposition we used */
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ /* set up to enqueue to our disposition with index = next_index */
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ /*
+ * FIXME DUAL LOOP
+ */
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ vlib_buffer_t * b0;
+ u32 bi0;
+
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer(vm, bi0);
+
+ vnet_buffer(b0)->ip.adj_index[VLIB_TX] = gt->l2_adj_index;
+
+ if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ gre_tx_trace_t *tr = vlib_add_trace (vm, node,
+ b0, sizeof (*tr));
+ tr->tunnel_id = gt - gm->tunnels;
+ tr->length = vlib_buffer_length_in_chain (vm, b0);
+ tr->src.as_u32 = gt->tunnel_src.as_u32;
+ tr->dst.as_u32 = gt->tunnel_src.as_u32;
+ }
+
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, gt->l2_tx_arc);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ vlib_node_increment_counter (vm, gre_input_node.index,
+ GRE_ERROR_PKTS_ENCAP, frame->n_vectors);
+
+ return frame->n_vectors;
+}
+
+static uword
+gre_interface_tx (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ return (gre_interface_tx_inline (vm, node, frame));
+}
+
+static uword
+gre_teb_interface_tx (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ return (gre_interface_tx_inline (vm, node, frame));
+}
+
+static u8 * format_gre_tunnel_name (u8 * s, va_list * args)
+{
+ u32 dev_instance = va_arg (*args, u32);
+ return format (s, "gre%d", dev_instance);
+}
+
+static u8 * format_gre_tunnel_teb_name (u8 * s, va_list * args)
+{
+ u32 dev_instance = va_arg (*args, u32);
+ return format (s, "teb-gre%d", dev_instance);
+}
+
+static u8 * format_gre_device (u8 * s, va_list * args)
+{
+ u32 dev_instance = va_arg (*args, u32);
+ CLIB_UNUSED (int verbose) = va_arg (*args, int);
+
+ s = format (s, "GRE tunnel: id %d\n", dev_instance);
+ return s;
+}
+
+VNET_DEVICE_CLASS (gre_device_class) = {
+ .name = "GRE tunnel device",
+ .format_device_name = format_gre_tunnel_name,
+ .format_device = format_gre_device,
+ .format_tx_trace = format_gre_tx_trace,
+ .tx_function = gre_interface_tx,
+ .admin_up_down_function = gre_interface_admin_up_down,
+#ifdef SOON
+ .clear counter = 0;
+#endif
+};
+
+VLIB_DEVICE_TX_FUNCTION_MULTIARCH (gre_device_class,
+ gre_interface_tx)
+
+VNET_DEVICE_CLASS (gre_device_teb_class) = {
+ .name = "GRE TEB tunnel device",
+ .format_device_name = format_gre_tunnel_teb_name,
+ .format_device = format_gre_device,
+ .format_tx_trace = format_gre_tx_trace,
+ .tx_function = gre_teb_interface_tx,
+ .admin_up_down_function = gre_interface_admin_up_down,
+#ifdef SOON
+ .clear counter = 0;
+#endif
+};
+
+VLIB_DEVICE_TX_FUNCTION_MULTIARCH (gre_device_teb_class,
+ gre_teb_interface_tx)
+
+VNET_HW_INTERFACE_CLASS (gre_hw_interface_class) = {
+ .name = "GRE",
+ .format_header = format_gre_header_with_length,
+ .unformat_header = unformat_gre_header,
+ .build_rewrite = gre_build_rewrite,
+ .update_adjacency = gre_update_adj,
+ .flags = VNET_HW_INTERFACE_CLASS_FLAG_P2P,
+};
+
+static void add_protocol (gre_main_t * gm,
+ gre_protocol_t protocol,
+ char * protocol_name)
+{
+ gre_protocol_info_t * pi;
+ u32 i;
+
+ vec_add2 (gm->protocol_infos, pi, 1);
+ i = pi - gm->protocol_infos;
+
+ pi->name = protocol_name;
+ pi->protocol = protocol;
+ pi->next_index = pi->node_index = ~0;
+
+ hash_set (gm->protocol_info_by_protocol, protocol, i);
+ hash_set_mem (gm->protocol_info_by_name, pi->name, i);
+}
+
+static clib_error_t * gre_init (vlib_main_t * vm)
+{
+ gre_main_t * gm = &gre_main;
+ clib_error_t * error;
+ ip_main_t * im = &ip_main;
+ ip_protocol_info_t * pi;
+
+ memset (gm, 0, sizeof (gm[0]));
+ gm->vlib_main = vm;
+ gm->vnet_main = vnet_get_main();
+
+ if ((error = vlib_call_init_function (vm, ip_main_init)))
+ return error;
+
+ if ((error = vlib_call_init_function (vm, ip4_lookup_init)))
+ return error;
+
+ /* Set up the ip packet generator */
+ pi = ip_get_protocol_info (im, IP_PROTOCOL_GRE);
+ pi->format_header = format_gre_header;
+ pi->unformat_pg_edit = unformat_pg_gre_header;
+
+ gm->protocol_info_by_name = hash_create_string (0, sizeof (uword));
+ gm->protocol_info_by_protocol = hash_create (0, sizeof (uword));
+ gm->tunnel_by_key = hash_create (0, sizeof (uword));
+
+#define _(n,s) add_protocol (gm, GRE_PROTOCOL_##s, #s);
+ foreach_gre_protocol
+#undef _
+
+ return vlib_call_init_function (vm, gre_input_init);
+}
+
+VLIB_INIT_FUNCTION (gre_init);
+
+gre_main_t * gre_get_main (vlib_main_t * vm)
+{
+ vlib_call_init_function (vm, gre_init);
+ return &gre_main;
+}
+
diff --git a/src/vnet/gre/gre.h b/src/vnet/gre/gre.h
new file mode 100644
index 00000000000..b6544b9b737
--- /dev/null
+++ b/src/vnet/gre/gre.h
@@ -0,0 +1,235 @@
+/*
+ * gre.h: types/functions for gre.
+ *
+ * Copyright (c) 2012 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef included_gre_h
+#define included_gre_h
+
+#include <vnet/vnet.h>
+#include <vnet/gre/packet.h>
+#include <vnet/ip/ip.h>
+#include <vnet/ip/ip4.h>
+#include <vnet/ip/ip4_packet.h>
+#include <vnet/pg/pg.h>
+#include <vnet/ip/format.h>
+#include <vnet/adj/adj_types.h>
+
+extern vnet_hw_interface_class_t gre_hw_interface_class;
+
+typedef enum {
+#define gre_error(n,s) GRE_ERROR_##n,
+#include <vnet/gre/error.def>
+#undef gre_error
+ GRE_N_ERROR,
+} gre_error_t;
+
+/**
+ * A GRE payload protocol registration
+ */
+typedef struct {
+ /** Name (a c string). */
+ char * name;
+
+ /** GRE protocol type in host byte order. */
+ gre_protocol_t protocol;
+
+ /** Node which handles this type. */
+ u32 node_index;
+
+ /** Next index for this type. */
+ u32 next_index;
+} gre_protocol_info_t;
+
+/**
+ * @brief The GRE tunnel type
+ */
+typedef enum gre_tunnel_tyoe_t_
+{
+ /**
+ * L3 GRE (i.e. this tunnel is in L3 mode)
+ */
+ GRE_TUNNEL_TYPE_L3,
+ /**
+ * Transparent Ethernet Bridging - the tunnel is in L2 mode
+ */
+ GRE_TUNNEL_TYPE_TEB,
+} gre_tunnel_type_t;
+
+#define GRE_TUNNEL_TYPE_NAMES { \
+ [GRE_TUNNEL_TYPE_L3] = "L3", \
+ [GRE_TUNNEL_TYPE_TEB] = "TEB", \
+}
+
+#define GRE_TUNNEL_N_TYPES ((gre_tunnel_type_t)GRE_TUNNEL_TYPE_TEB+1)
+
+/**
+ * @brief A representation of a GRE tunnel
+ */
+typedef struct {
+ /**
+ * Linkage into the FIB object graph
+ */
+ fib_node_t node;
+
+ /**
+ * The tunnel's source/local address
+ */
+ ip4_address_t tunnel_src;
+ /**
+ * The tunnel's destination/remote address
+ */
+ ip4_address_t tunnel_dst;
+ /**
+ * The FIB in which the src.dst address are present
+ */
+ u32 outer_fib_index;
+ u32 hw_if_index;
+ u32 sw_if_index;
+ gre_tunnel_type_t type;
+
+ /**
+ * The FIB entry sourced by the tunnel for its destination prefix
+ */
+ fib_node_index_t fib_entry_index;
+
+ /**
+ * The tunnel is a child of the FIB entry for its desintion. This is
+ * so it receives updates when the forwarding information for that entry
+ * changes.
+ * The tunnels sibling index on the FIB entry's dependency list.
+ */
+ u32 sibling_index;
+
+ /**
+ * on a L2 tunnel this is the VLIB arc from the L2-tx to the l2-midchain
+ */
+ u32 l2_tx_arc;
+
+ /**
+ * an L2 tunnel always rquires an L2 midchain. cache here for DP.
+ */
+ adj_index_t l2_adj_index;
+} gre_tunnel_t;
+
+/**
+ * @brief GRE related global data
+ */
+typedef struct {
+ /**
+ * pool of tunnel instances
+ */
+ gre_tunnel_t *tunnels;
+
+ /**
+ * GRE payload protocol registrations
+ */
+ gre_protocol_info_t * protocol_infos;
+
+ /**
+ * Hash tables mapping name/protocol to protocol info index.
+ */
+ uword * protocol_info_by_name, * protocol_info_by_protocol;
+ /**
+ * Hash mapping src/dst addr pair to tunnel
+ */
+ uword * tunnel_by_key;
+
+ /**
+ * Free vlib hw_if_indices.
+ * A free list per-tunnel type since the interfaces ctreated are fo different
+ * types and we cannot change the type.
+ */
+ u32 * free_gre_tunnel_hw_if_indices[GRE_TUNNEL_N_TYPES];
+
+ /**
+ * Mapping from sw_if_index to tunnel index
+ */
+ u32 * tunnel_index_by_sw_if_index;
+
+ /* convenience */
+ vlib_main_t * vlib_main;
+ vnet_main_t * vnet_main;
+} gre_main_t;
+
+/**
+ * @brief IPv4 and GRE header.
+ */
+typedef CLIB_PACKED (struct {
+ ip4_header_t ip4;
+ gre_header_t gre;
+}) ip4_and_gre_header_t;
+
+always_inline gre_protocol_info_t *
+gre_get_protocol_info (gre_main_t * em, gre_protocol_t protocol)
+{
+ uword * p = hash_get (em->protocol_info_by_protocol, protocol);
+ return p ? vec_elt_at_index (em->protocol_infos, p[0]) : 0;
+}
+
+gre_main_t gre_main;
+
+/* Register given node index to take input for given gre type. */
+void
+gre_register_input_type (vlib_main_t * vm,
+ gre_protocol_t protocol,
+ u32 node_index);
+
+extern clib_error_t * gre_interface_admin_up_down (vnet_main_t * vnm,
+ u32 hw_if_index,
+ u32 flags);
+
+extern void gre_tunnel_stack (adj_index_t ai);
+extern void gre_update_adj (vnet_main_t * vnm,
+ u32 sw_if_index,
+ adj_index_t ai);
+
+format_function_t format_gre_protocol;
+format_function_t format_gre_header;
+format_function_t format_gre_header_with_length;
+
+extern vlib_node_registration_t gre_input_node;
+extern vnet_device_class_t gre_device_class;
+extern vnet_device_class_t gre_device_teb_class;
+
+/* Parse gre protocol as 0xXXXX or protocol name.
+ In either host or network byte order. */
+unformat_function_t unformat_gre_protocol_host_byte_order;
+unformat_function_t unformat_gre_protocol_net_byte_order;
+
+/* Parse gre header. */
+unformat_function_t unformat_gre_header;
+unformat_function_t unformat_pg_gre_header;
+
+void
+gre_register_input_protocol (vlib_main_t * vm,
+ gre_protocol_t protocol,
+ u32 node_index);
+
+/* manually added to the interface output node in gre.c */
+#define GRE_OUTPUT_NEXT_LOOKUP 1
+
+typedef struct {
+ u8 is_add;
+
+ ip4_address_t src, dst;
+ u32 outer_fib_id;
+ u8 teb;
+} vnet_gre_add_del_tunnel_args_t;
+
+int vnet_gre_add_del_tunnel
+ (vnet_gre_add_del_tunnel_args_t *a, u32 * sw_if_indexp);
+
+#endif /* included_gre_h */
diff --git a/src/vnet/gre/gre_api.c b/src/vnet/gre/gre_api.c
new file mode 100644
index 00000000000..333838c06ad
--- /dev/null
+++ b/src/vnet/gre/gre_api.c
@@ -0,0 +1,204 @@
+/*
+ *------------------------------------------------------------------
+ * gre_api.c - gre api
+ *
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+
+#include <vnet/vnet.h>
+#include <vlibmemory/api.h>
+
+#include <vnet/interface.h>
+#include <vnet/api_errno.h>
+
+#include <vnet/gre/gre.h>
+#include <vnet/fib/fib_table.h>
+
+#include <vnet/vnet_msg_enum.h>
+
+#define vl_typedefs /* define message structures */
+#include <vnet/vnet_all_api_h.h>
+#undef vl_typedefs
+
+#define vl_endianfun /* define message structures */
+#include <vnet/vnet_all_api_h.h>
+#undef vl_endianfun
+
+/* instantiate all the print functions we know about */
+#define vl_print(handle, ...) vlib_cli_output (handle, __VA_ARGS__)
+#define vl_printfun
+#include <vnet/vnet_all_api_h.h>
+#undef vl_printfun
+
+#include <vlibapi/api_helper_macros.h>
+
+#define foreach_vpe_api_msg \
+_(GRE_ADD_DEL_TUNNEL, gre_add_del_tunnel) \
+_(GRE_TUNNEL_DUMP, gre_tunnel_dump)
+
+static void vl_api_gre_add_del_tunnel_t_handler
+ (vl_api_gre_add_del_tunnel_t * mp)
+{
+ vl_api_gre_add_del_tunnel_reply_t *rmp;
+ int rv = 0;
+ vnet_gre_add_del_tunnel_args_t _a, *a = &_a;
+ u32 outer_fib_id;
+ uword *p;
+ ip4_main_t *im = &ip4_main;
+ u32 sw_if_index = ~0;
+
+ p = hash_get (im->fib_index_by_table_id, ntohl (mp->outer_fib_id));
+ if (!p)
+ {
+ rv = VNET_API_ERROR_NO_SUCH_FIB;
+ goto out;
+ }
+ outer_fib_id = p[0];
+
+ /* Check src & dst are different */
+ if ((mp->is_ipv6 && memcmp (mp->src_address, mp->dst_address, 16) == 0) ||
+ (!mp->is_ipv6 && memcmp (mp->src_address, mp->dst_address, 4) == 0))
+ {
+ rv = VNET_API_ERROR_SAME_SRC_DST;
+ goto out;
+ }
+ memset (a, 0, sizeof (*a));
+
+ a->is_add = mp->is_add;
+ a->teb = mp->teb;
+
+ /* ip addresses sent in network byte order */
+ clib_memcpy (&(a->src), mp->src_address, 4);
+ clib_memcpy (&(a->dst), mp->dst_address, 4);
+
+ a->outer_fib_id = outer_fib_id;
+ rv = vnet_gre_add_del_tunnel (a, &sw_if_index);
+
+out:
+ /* *INDENT-OFF* */
+ REPLY_MACRO2(VL_API_GRE_ADD_DEL_TUNNEL_REPLY,
+ ({
+ rmp->sw_if_index = ntohl (sw_if_index);
+ }));
+ /* *INDENT-ON* */
+}
+
+static void send_gre_tunnel_details
+ (gre_tunnel_t * t, unix_shared_memory_queue_t * q, u32 context)
+{
+ vl_api_gre_tunnel_details_t *rmp;
+ ip4_main_t *im = &ip4_main;
+
+ rmp = vl_msg_api_alloc (sizeof (*rmp));
+ memset (rmp, 0, sizeof (*rmp));
+ rmp->_vl_msg_id = ntohs (VL_API_GRE_TUNNEL_DETAILS);
+ clib_memcpy (rmp->src_address, &(t->tunnel_src), 4);
+ clib_memcpy (rmp->dst_address, &(t->tunnel_dst), 4);
+ rmp->outer_fib_id = htonl (im->fibs[t->outer_fib_index].ft_table_id);
+ rmp->teb = (GRE_TUNNEL_TYPE_TEB == t->type);
+ rmp->sw_if_index = htonl (t->sw_if_index);
+ rmp->context = context;
+
+ vl_msg_api_send_shmem (q, (u8 *) & rmp);
+}
+
+static void
+vl_api_gre_tunnel_dump_t_handler (vl_api_gre_tunnel_dump_t * mp)
+{
+ unix_shared_memory_queue_t *q;
+ gre_main_t *gm = &gre_main;
+ gre_tunnel_t *t;
+ u32 sw_if_index;
+
+ q = vl_api_client_index_to_input_queue (mp->client_index);
+ if (q == 0)
+ {
+ return;
+ }
+
+ sw_if_index = ntohl (mp->sw_if_index);
+
+ if (~0 == sw_if_index)
+ {
+ /* *INDENT-OFF* */
+ pool_foreach (t, gm->tunnels,
+ ({
+ send_gre_tunnel_details(t, q, mp->context);
+ }));
+ /* *INDENT-ON* */
+ }
+ else
+ {
+ if ((sw_if_index >= vec_len (gm->tunnel_index_by_sw_if_index)) ||
+ (~0 == gm->tunnel_index_by_sw_if_index[sw_if_index]))
+ {
+ return;
+ }
+ t = &gm->tunnels[gm->tunnel_index_by_sw_if_index[sw_if_index]];
+ send_gre_tunnel_details (t, q, mp->context);
+ }
+}
+
+/*
+ * gre_api_hookup
+ * Add vpe's API message handlers to the table.
+ * vlib has alread mapped shared memory and
+ * added the client registration handlers.
+ * See .../vlib-api/vlibmemory/memclnt_vlib.c:memclnt_process()
+ */
+#define vl_msg_name_crc_list
+#include <vnet/vnet_all_api_h.h>
+#undef vl_msg_name_crc_list
+
+static void
+setup_message_id_table (api_main_t * am)
+{
+#define _(id,n,crc) vl_msg_api_add_msg_name_crc (am, #n "_" #crc, id);
+ foreach_vl_msg_name_crc_gre;
+#undef _
+}
+
+static clib_error_t *
+gre_api_hookup (vlib_main_t * vm)
+{
+ api_main_t *am = &api_main;
+
+#define _(N,n) \
+ vl_msg_api_set_handlers(VL_API_##N, #n, \
+ vl_api_##n##_t_handler, \
+ vl_noop_handler, \
+ vl_api_##n##_t_endian, \
+ vl_api_##n##_t_print, \
+ sizeof(vl_api_##n##_t), 1);
+ foreach_vpe_api_msg;
+#undef _
+
+ /*
+ * Set up the (msg_name, crc, message-id) table
+ */
+ setup_message_id_table (am);
+
+ return 0;
+}
+
+VLIB_API_INIT_FUNCTION (gre_api_hookup);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/gre/interface.c b/src/vnet/gre/interface.c
new file mode 100644
index 00000000000..d624587d8e9
--- /dev/null
+++ b/src/vnet/gre/interface.c
@@ -0,0 +1,606 @@
+/*
+ * gre_interface.c: gre interfaces
+ *
+ * Copyright (c) 2012 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/vnet.h>
+#include <vnet/pg/pg.h>
+#include <vnet/gre/gre.h>
+#include <vnet/ip/format.h>
+#include <vnet/fib/ip4_fib.h>
+#include <vnet/adj/adj_midchain.h>
+#include <vnet/adj/adj_nbr.h>
+#include <vnet/mpls/mpls.h>
+
+static const char *gre_tunnel_type_names[] = GRE_TUNNEL_TYPE_NAMES;
+
+static inline u64
+gre_mk_key (const ip4_address_t *src,
+ const ip4_address_t *dst,
+ u32 out_fib_index)
+{
+ // FIXME. the fib index should be part of the key
+ return ((u64)src->as_u32 << 32 | (u64)dst->as_u32);
+}
+
+static u8 *
+format_gre_tunnel_type (u8 * s, va_list * args)
+{
+ gre_tunnel_type_t type = va_arg (*args, gre_tunnel_type_t);
+
+ return (format(s, "%s", gre_tunnel_type_names[type]));
+}
+
+static u8 *
+format_gre_tunnel (u8 * s, va_list * args)
+{
+ gre_tunnel_t * t = va_arg (*args, gre_tunnel_t *);
+ gre_main_t * gm = &gre_main;
+
+ s = format (s,
+ "[%d] %U (src) %U (dst) payload %U outer_fib_index %d",
+ t - gm->tunnels,
+ format_ip4_address, &t->tunnel_src,
+ format_ip4_address, &t->tunnel_dst,
+ format_gre_tunnel_type, t->type,
+ t->outer_fib_index);
+
+ return s;
+}
+
+static gre_tunnel_t *
+gre_tunnel_db_find (const ip4_address_t *src,
+ const ip4_address_t *dst,
+ u32 out_fib_index)
+{
+ gre_main_t * gm = &gre_main;
+ uword * p;
+ u64 key;
+
+ key = gre_mk_key(src, dst, out_fib_index);
+
+ p = hash_get (gm->tunnel_by_key, key);
+
+ if (NULL == p)
+ return (NULL);
+
+ return (pool_elt_at_index (gm->tunnels, p[0]));
+}
+
+static void
+gre_tunnel_db_add (const gre_tunnel_t *t)
+{
+ gre_main_t * gm = &gre_main;
+ u64 key;
+
+ key = gre_mk_key(&t->tunnel_src, &t->tunnel_dst, t->outer_fib_index);
+ hash_set (gm->tunnel_by_key, key, t - gm->tunnels);
+}
+
+static void
+gre_tunnel_db_remove (const gre_tunnel_t *t)
+{
+ gre_main_t * gm = &gre_main;
+ u64 key;
+
+ key = gre_mk_key(&t->tunnel_src, &t->tunnel_dst, t->outer_fib_index);
+ hash_unset (gm->tunnel_by_key, key);
+}
+
+static gre_tunnel_t *
+gre_tunnel_from_fib_node (fib_node_t *node)
+{
+#if (CLIB_DEBUG > 0)
+ ASSERT(FIB_NODE_TYPE_GRE_TUNNEL == node->fn_type);
+#endif
+ return ((gre_tunnel_t*) (((char*)node) -
+ STRUCT_OFFSET_OF(gre_tunnel_t, node)));
+}
+
+/**
+ * gre_tunnel_stack
+ *
+ * 'stack' (resolve the recursion for) the tunnel's midchain adjacency
+ */
+void
+gre_tunnel_stack (adj_index_t ai)
+{
+ gre_main_t * gm = &gre_main;
+ ip_adjacency_t *adj;
+ gre_tunnel_t *gt;
+ u32 sw_if_index;
+
+ adj = adj_get(ai);
+ sw_if_index = adj->rewrite_header.sw_if_index;
+
+ if ((vec_len(gm->tunnel_index_by_sw_if_index) < sw_if_index) ||
+ (~0 == gm->tunnel_index_by_sw_if_index[sw_if_index]))
+ return;
+
+ gt = pool_elt_at_index(gm->tunnels,
+ gm->tunnel_index_by_sw_if_index[sw_if_index]);
+
+ /*
+ * find the adjacency that is contributed by the FIB entry
+ * that this tunnel resovles via, and use it as the next adj
+ * in the midchain
+ */
+ if (vnet_hw_interface_get_flags(vnet_get_main(),
+ gt->hw_if_index) &
+ VNET_HW_INTERFACE_FLAG_LINK_UP)
+ {
+ adj_nbr_midchain_stack(
+ ai,
+ fib_entry_contribute_ip_forwarding(gt->fib_entry_index));
+ }
+ else
+ {
+ adj_nbr_midchain_unstack(ai);
+ }
+}
+
+/**
+ * @brief Call back when restacking all adjacencies on a GRE interface
+ */
+static adj_walk_rc_t
+gre_adj_walk_cb (adj_index_t ai,
+ void *ctx)
+{
+ gre_tunnel_stack(ai);
+
+ return (ADJ_WALK_RC_CONTINUE);
+}
+
+static void
+gre_tunnel_restack (gre_tunnel_t *gt)
+{
+ fib_protocol_t proto;
+
+ /*
+ * walk all the adjacencies on th GRE interface and restack them
+ */
+ FOR_EACH_FIB_IP_PROTOCOL(proto)
+ {
+ adj_nbr_walk(gt->sw_if_index,
+ proto,
+ gre_adj_walk_cb,
+ NULL);
+ }
+}
+
+/**
+ * Function definition to backwalk a FIB node
+ */
+static fib_node_back_walk_rc_t
+gre_tunnel_back_walk (fib_node_t *node,
+ fib_node_back_walk_ctx_t *ctx)
+{
+ gre_tunnel_restack(gre_tunnel_from_fib_node(node));
+
+ return (FIB_NODE_BACK_WALK_CONTINUE);
+}
+
+/**
+ * Function definition to get a FIB node from its index
+ */
+static fib_node_t*
+gre_tunnel_fib_node_get (fib_node_index_t index)
+{
+ gre_tunnel_t * gt;
+ gre_main_t * gm;
+
+ gm = &gre_main;
+ gt = pool_elt_at_index(gm->tunnels, index);
+
+ return (&gt->node);
+}
+
+/**
+ * Function definition to inform the FIB node that its last lock has gone.
+ */
+static void
+gre_tunnel_last_lock_gone (fib_node_t *node)
+{
+ /*
+ * The MPLS GRE tunnel is a root of the graph. As such
+ * it never has children and thus is never locked.
+ */
+ ASSERT(0);
+}
+
+/*
+ * Virtual function table registered by MPLS GRE tunnels
+ * for participation in the FIB object graph.
+ */
+const static fib_node_vft_t gre_vft = {
+ .fnv_get = gre_tunnel_fib_node_get,
+ .fnv_last_lock = gre_tunnel_last_lock_gone,
+ .fnv_back_walk = gre_tunnel_back_walk,
+};
+
+static int
+vnet_gre_tunnel_add (vnet_gre_add_del_tunnel_args_t *a,
+ u32 * sw_if_indexp)
+{
+ gre_main_t * gm = &gre_main;
+ vnet_main_t * vnm = gm->vnet_main;
+ ip4_main_t * im = &ip4_main;
+ gre_tunnel_t * t;
+ vnet_hw_interface_t * hi;
+ u32 hw_if_index, sw_if_index;
+ u32 outer_fib_index;
+ u8 address[6];
+ clib_error_t *error;
+
+ outer_fib_index = ip4_fib_index_from_table_id(a->outer_fib_id);
+
+ if (~0 == outer_fib_index)
+ return VNET_API_ERROR_NO_SUCH_FIB;
+
+ t = gre_tunnel_db_find(&a->src, &a->dst, a->outer_fib_id);
+
+ if (NULL != t)
+ return VNET_API_ERROR_INVALID_VALUE;
+
+ pool_get_aligned (gm->tunnels, t, CLIB_CACHE_LINE_BYTES);
+ memset (t, 0, sizeof (*t));
+ fib_node_init(&t->node, FIB_NODE_TYPE_GRE_TUNNEL);
+
+ if (a->teb)
+ t->type = GRE_TUNNEL_TYPE_TEB;
+ else
+ t->type = GRE_TUNNEL_TYPE_L3;
+
+ if (vec_len (gm->free_gre_tunnel_hw_if_indices[t->type]) > 0) {
+ vnet_interface_main_t * im = &vnm->interface_main;
+
+ hw_if_index = gm->free_gre_tunnel_hw_if_indices[t->type]
+ [vec_len (gm->free_gre_tunnel_hw_if_indices[t->type])-1];
+ _vec_len (gm->free_gre_tunnel_hw_if_indices[t->type]) -= 1;
+
+ hi = vnet_get_hw_interface (vnm, hw_if_index);
+ hi->dev_instance = t - gm->tunnels;
+ hi->hw_instance = hi->dev_instance;
+
+ /* clear old stats of freed tunnel before reuse */
+ sw_if_index = hi->sw_if_index;
+ vnet_interface_counter_lock(im);
+ vlib_zero_combined_counter
+ (&im->combined_sw_if_counters[VNET_INTERFACE_COUNTER_TX], sw_if_index);
+ vlib_zero_combined_counter
+ (&im->combined_sw_if_counters[VNET_INTERFACE_COUNTER_RX], sw_if_index);
+ vlib_zero_simple_counter
+ (&im->sw_if_counters[VNET_INTERFACE_COUNTER_DROP], sw_if_index);
+ vnet_interface_counter_unlock(im);
+ if (GRE_TUNNEL_TYPE_TEB == t->type)
+ {
+ t->l2_tx_arc = vlib_node_add_named_next(vlib_get_main(),
+ hi->tx_node_index,
+ "adj-l2-midchain");
+ }
+ } else {
+ if (GRE_TUNNEL_TYPE_TEB == t->type)
+ {
+ /* Default MAC address (d00b:eed0:0000 + sw_if_index) */
+ memset (address, 0, sizeof (address));
+ address[0] = 0xd0;
+ address[1] = 0x0b;
+ address[2] = 0xee;
+ address[3] = 0xd0;
+ address[4] = t - gm->tunnels;
+
+ error = ethernet_register_interface(vnm,
+ gre_device_teb_class.index,
+ t - gm->tunnels, address,
+ &hw_if_index,
+ 0);
+
+ if (error)
+ {
+ clib_error_report (error);
+ return VNET_API_ERROR_INVALID_REGISTRATION;
+ }
+ hi = vnet_get_hw_interface (vnm, hw_if_index);
+
+ t->l2_tx_arc = vlib_node_add_named_next(vlib_get_main(),
+ hi->tx_node_index,
+ "adj-l2-midchain");
+ } else {
+ hw_if_index = vnet_register_interface(vnm,
+ gre_device_class.index,
+ t - gm->tunnels,
+ gre_hw_interface_class.index,
+ t - gm->tunnels);
+ }
+ hi = vnet_get_hw_interface (vnm, hw_if_index);
+ sw_if_index = hi->sw_if_index;
+ }
+
+ t->hw_if_index = hw_if_index;
+ t->outer_fib_index = outer_fib_index;
+ t->sw_if_index = sw_if_index;
+ t->l2_adj_index = ADJ_INDEX_INVALID;
+
+ vec_validate_init_empty (gm->tunnel_index_by_sw_if_index, sw_if_index, ~0);
+ gm->tunnel_index_by_sw_if_index[sw_if_index] = t - gm->tunnels;
+
+ vec_validate (im->fib_index_by_sw_if_index, sw_if_index);
+
+ hi->min_packet_bytes = 64 + sizeof (gre_header_t) + sizeof (ip4_header_t);
+ hi->per_packet_overhead_bytes =
+ /* preamble */ 8 + /* inter frame gap */ 12;
+
+ /* Standard default gre MTU. */
+ hi->max_l3_packet_bytes[VLIB_RX] = hi->max_l3_packet_bytes[VLIB_TX] = 9000;
+
+ clib_memcpy (&t->tunnel_src, &a->src, sizeof (t->tunnel_src));
+ clib_memcpy (&t->tunnel_dst, &a->dst, sizeof (t->tunnel_dst));
+
+ gre_tunnel_db_add(t);
+
+ /*
+ * source the FIB entry for the tunnel's destination
+ * and become a child thereof. The tunnel will then get poked
+ * when the forwarding for the entry updates, and the tunnel can
+ * re-stack accordingly
+ */
+ const fib_prefix_t tun_dst_pfx = {
+ .fp_len = 32,
+ .fp_proto = FIB_PROTOCOL_IP4,
+ .fp_addr = {
+ .ip4 = t->tunnel_dst,
+ }
+ };
+
+ t->fib_entry_index =
+ fib_table_entry_special_add(outer_fib_index,
+ &tun_dst_pfx,
+ FIB_SOURCE_RR,
+ FIB_ENTRY_FLAG_NONE,
+ ADJ_INDEX_INVALID);
+ t->sibling_index =
+ fib_entry_child_add(t->fib_entry_index,
+ FIB_NODE_TYPE_GRE_TUNNEL,
+ t - gm->tunnels);
+
+ clib_memcpy (&t->tunnel_src, &a->src, sizeof (t->tunnel_src));
+ clib_memcpy (&t->tunnel_dst, &a->dst, sizeof (t->tunnel_dst));
+
+ if (GRE_TUNNEL_TYPE_TEB == t->type)
+ {
+ t->l2_adj_index = adj_nbr_add_or_lock(FIB_PROTOCOL_IP4,
+ VNET_LINK_ETHERNET,
+ &zero_addr,
+ sw_if_index);
+ gre_update_adj(vnm, t->sw_if_index, t->l2_adj_index);
+ }
+
+ if (sw_if_indexp)
+ *sw_if_indexp = sw_if_index;
+
+ return 0;
+}
+
+static int
+vnet_gre_tunnel_delete (vnet_gre_add_del_tunnel_args_t *a,
+ u32 * sw_if_indexp)
+{
+ gre_main_t * gm = &gre_main;
+ vnet_main_t * vnm = gm->vnet_main;
+ gre_tunnel_t * t;
+ u32 sw_if_index;
+
+ t = gre_tunnel_db_find(&a->src, &a->dst, a->outer_fib_id);
+
+ if (NULL == t)
+ return VNET_API_ERROR_NO_SUCH_ENTRY;
+
+ sw_if_index = t->sw_if_index;
+ vnet_sw_interface_set_flags (vnm, sw_if_index, 0 /* down */);
+ /* make sure tunnel is removed from l2 bd or xconnect */
+ set_int_l2_mode(gm->vlib_main, vnm, MODE_L3, sw_if_index, 0, 0, 0, 0);
+ vec_add1 (gm->free_gre_tunnel_hw_if_indices[t->type], t->hw_if_index);
+ gm->tunnel_index_by_sw_if_index[sw_if_index] = ~0;
+
+ if (GRE_TUNNEL_TYPE_TEB == t->type)
+ adj_unlock(t->l2_adj_index);
+
+ if (t->l2_adj_index != ADJ_INDEX_INVALID)
+ adj_unlock(t->l2_adj_index);
+
+ fib_entry_child_remove(t->fib_entry_index,
+ t->sibling_index);
+ fib_table_entry_delete_index(t->fib_entry_index,
+ FIB_SOURCE_RR);
+
+ gre_tunnel_db_remove(t);
+ fib_node_deinit(&t->node);
+ pool_put (gm->tunnels, t);
+
+ if (sw_if_indexp)
+ *sw_if_indexp = sw_if_index;
+
+ return 0;
+}
+
+int
+vnet_gre_add_del_tunnel (vnet_gre_add_del_tunnel_args_t *a,
+ u32 * sw_if_indexp)
+{
+ if (a->is_add)
+ return (vnet_gre_tunnel_add(a, sw_if_indexp));
+ else
+ return (vnet_gre_tunnel_delete(a, sw_if_indexp));
+}
+
+clib_error_t *
+gre_interface_admin_up_down (vnet_main_t * vnm, u32 hw_if_index, u32 flags)
+{
+ gre_main_t * gm = &gre_main;
+ vnet_hw_interface_t * hi;
+ gre_tunnel_t *t;
+ u32 ti;
+
+ hi = vnet_get_hw_interface (vnm, hw_if_index);
+
+ if (NULL == gm->tunnel_index_by_sw_if_index ||
+ hi->sw_if_index >= vec_len(gm->tunnel_index_by_sw_if_index))
+ return (NULL);
+
+ ti = gm->tunnel_index_by_sw_if_index[hi->sw_if_index];
+
+ if (~0 == ti)
+ /* not one of ours */
+ return (NULL);
+
+ t = pool_elt_at_index(gm->tunnels, ti);
+
+ if (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP)
+ vnet_hw_interface_set_flags (vnm, hw_if_index, VNET_HW_INTERFACE_FLAG_LINK_UP);
+ else
+ vnet_hw_interface_set_flags (vnm, hw_if_index, 0 /* down */);
+
+ gre_tunnel_restack(t);
+
+ return /* no error */ 0;
+}
+
+static clib_error_t *
+create_gre_tunnel_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ unformat_input_t _line_input, * line_input = &_line_input;
+ vnet_gre_add_del_tunnel_args_t _a, * a = &_a;
+ ip4_address_t src, dst;
+ u32 outer_fib_id = 0;
+ u8 teb = 0;
+ int rv;
+ u32 num_m_args = 0;
+ u8 is_add = 1;
+ u32 sw_if_index;
+
+ /* Get a line of input. */
+ if (! unformat_user (input, unformat_line_input, line_input))
+ return 0;
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT) {
+ if (unformat (line_input, "del"))
+ is_add = 0;
+ else if (unformat (line_input, "src %U", unformat_ip4_address, &src))
+ num_m_args++;
+ else if (unformat (line_input, "dst %U", unformat_ip4_address, &dst))
+ num_m_args++;
+ else if (unformat (line_input, "outer-fib-id %d", &outer_fib_id))
+ ;
+ else if (unformat (line_input, "teb"))
+ teb = 1;
+ else
+ return clib_error_return (0, "unknown input `%U'",
+ format_unformat_error, input);
+ }
+ unformat_free (line_input);
+
+ if (num_m_args < 2)
+ return clib_error_return (0, "mandatory argument(s) missing");
+
+ if (memcmp (&src, &dst, sizeof(src)) == 0)
+ return clib_error_return (0, "src and dst are identical");
+
+ memset (a, 0, sizeof (*a));
+ a->outer_fib_id = outer_fib_id;
+ a->teb = teb;
+ clib_memcpy(&a->src, &src, sizeof(src));
+ clib_memcpy(&a->dst, &dst, sizeof(dst));
+
+ if (is_add)
+ rv = vnet_gre_tunnel_add(a, &sw_if_index);
+ else
+ rv = vnet_gre_tunnel_delete(a, &sw_if_index);
+
+ switch(rv)
+ {
+ case 0:
+ vlib_cli_output(vm, "%U\n", format_vnet_sw_if_index_name, vnet_get_main(), sw_if_index);
+ break;
+ case VNET_API_ERROR_INVALID_VALUE:
+ return clib_error_return (0, "GRE tunnel already exists...");
+ case VNET_API_ERROR_NO_SUCH_FIB:
+ return clib_error_return (0, "outer fib ID %d doesn't exist\n",
+ outer_fib_id);
+ default:
+ return clib_error_return (0, "vnet_gre_add_del_tunnel returned %d", rv);
+ }
+
+ return 0;
+}
+
+VLIB_CLI_COMMAND (create_gre_tunnel_command, static) = {
+ .path = "create gre tunnel",
+ .short_help = "create gre tunnel src <addr> dst <addr> "
+ "[outer-fib-id <fib>] [teb] [del]",
+ .function = create_gre_tunnel_command_fn,
+};
+
+static clib_error_t *
+show_gre_tunnel_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ gre_main_t * gm = &gre_main;
+ gre_tunnel_t * t;
+ u32 ti = ~0;
+
+ if (pool_elts (gm->tunnels) == 0)
+ vlib_cli_output (vm, "No GRE tunnels configured...");
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "%d", &ti))
+ ;
+ else
+ break;
+ }
+
+ if (~0 == ti)
+ {
+ pool_foreach (t, gm->tunnels,
+ ({
+ vlib_cli_output (vm, "%U", format_gre_tunnel, t);
+ }));
+ }
+ else
+ {
+ t = pool_elt_at_index(gm->tunnels, ti);
+
+ vlib_cli_output (vm, "%U", format_gre_tunnel, t);
+ }
+
+ return 0;
+}
+
+VLIB_CLI_COMMAND (show_gre_tunnel_command, static) = {
+ .path = "show gre tunnel",
+ .function = show_gre_tunnel_command_fn,
+};
+
+/* force inclusion from application's main.c */
+clib_error_t *gre_interface_init (vlib_main_t *vm)
+{
+ fib_node_register_type(FIB_NODE_TYPE_GRE_TUNNEL, &gre_vft);
+
+ return 0;
+}
+VLIB_INIT_FUNCTION(gre_interface_init);
diff --git a/src/vnet/gre/node.c b/src/vnet/gre/node.c
new file mode 100644
index 00000000000..86f7a6eeea4
--- /dev/null
+++ b/src/vnet/gre/node.c
@@ -0,0 +1,531 @@
+/*
+ * node.c: gre packet processing
+ *
+ * Copyright (c) 2012 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vlib/vlib.h>
+#include <vnet/pg/pg.h>
+#include <vnet/gre/gre.h>
+#include <vnet/mpls/mpls.h>
+#include <vppinfra/sparse_vec.h>
+
+#define foreach_gre_input_next \
+_(PUNT, "error-punt") \
+_(DROP, "error-drop") \
+_(ETHERNET_INPUT, "ethernet-input") \
+_(IP4_INPUT, "ip4-input") \
+_(IP6_INPUT, "ip6-input") \
+_(MPLS_INPUT, "mpls-input")
+
+typedef enum {
+#define _(s,n) GRE_INPUT_NEXT_##s,
+ foreach_gre_input_next
+#undef _
+ GRE_INPUT_N_NEXT,
+} gre_input_next_t;
+
+typedef struct {
+ u32 tunnel_id;
+ u32 length;
+ ip4_address_t src;
+ ip4_address_t dst;
+} gre_rx_trace_t;
+
+u8 * format_gre_rx_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ gre_rx_trace_t * t = va_arg (*args, gre_rx_trace_t *);
+
+ s = format (s, "GRE: tunnel %d len %d src %U dst %U",
+ t->tunnel_id, clib_net_to_host_u16(t->length),
+ format_ip4_address, &t->src.as_u8,
+ format_ip4_address, &t->dst.as_u8);
+ return s;
+}
+
+typedef struct {
+ /* Sparse vector mapping gre protocol in network byte order
+ to next index. */
+ u16 * next_by_protocol;
+} gre_input_runtime_t;
+
+static uword
+gre_input (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame)
+{
+ gre_main_t * gm = &gre_main;
+ gre_input_runtime_t * rt = (void *) node->runtime_data;
+ __attribute__((unused)) u32 n_left_from, next_index, * from, * to_next;
+ u64 cached_tunnel_key = (u64) ~0;
+ u32 cached_tunnel_sw_if_index = 0, tunnel_sw_if_index = 0;
+
+ u32 cpu_index = os_get_cpu_number();
+ u32 len;
+ vnet_interface_main_t *im = &gm->vnet_main->interface_main;
+
+ from = vlib_frame_vector_args (from_frame);
+ n_left_from = from_frame->n_vectors;
+
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame (vm, node, next_index,
+ to_next, n_left_to_next);
+
+ while (n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ u32 bi0, bi1;
+ vlib_buffer_t * b0, * b1;
+ gre_header_t * h0, * h1;
+ u16 version0, version1;
+ int verr0, verr1;
+ u32 i0, i1, next0, next1, protocol0, protocol1;
+ ip4_header_t *ip0, *ip1;
+
+ /* Prefetch next iteration. */
+ {
+ vlib_buffer_t * p2, * p3;
+
+ p2 = vlib_get_buffer (vm, from[2]);
+ p3 = vlib_get_buffer (vm, from[3]);
+
+ vlib_prefetch_buffer_header (p2, LOAD);
+ vlib_prefetch_buffer_header (p3, LOAD);
+
+ CLIB_PREFETCH (p2->data, sizeof (h0[0]), LOAD);
+ CLIB_PREFETCH (p3->data, sizeof (h1[0]), LOAD);
+ }
+
+ bi0 = from[0];
+ bi1 = from[1];
+ to_next[0] = bi0;
+ to_next[1] = bi1;
+ from += 2;
+ to_next += 2;
+ n_left_to_next -= 2;
+ n_left_from -= 2;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ b1 = vlib_get_buffer (vm, bi1);
+
+ /* ip4_local hands us the ip header, not the gre header */
+ ip0 = vlib_buffer_get_current (b0);
+ ip1 = vlib_buffer_get_current (b1);
+
+ /* Save src + dst ip4 address, e.g. for mpls-o-gre */
+ vnet_buffer(b0)->gre.src = ip0->src_address.as_u32;
+ vnet_buffer(b0)->gre.dst = ip0->dst_address.as_u32;
+ vnet_buffer(b1)->gre.src = ip1->src_address.as_u32;
+ vnet_buffer(b1)->gre.dst = ip1->dst_address.as_u32;
+
+ vlib_buffer_advance (b0, sizeof (*ip0));
+ vlib_buffer_advance (b1, sizeof (*ip1));
+
+ h0 = vlib_buffer_get_current (b0);
+ h1 = vlib_buffer_get_current (b1);
+
+ /* Index sparse array with network byte order. */
+ protocol0 = h0->protocol;
+ protocol1 = h1->protocol;
+ sparse_vec_index2 (rt->next_by_protocol, protocol0, protocol1,
+ &i0, &i1);
+ next0 = vec_elt(rt->next_by_protocol, i0);
+ next1 = vec_elt(rt->next_by_protocol, i1);
+
+ b0->error = node->errors[i0 == SPARSE_VEC_INVALID_INDEX ? GRE_ERROR_UNKNOWN_PROTOCOL : GRE_ERROR_NONE];
+ b1->error = node->errors[i1 == SPARSE_VEC_INVALID_INDEX ? GRE_ERROR_UNKNOWN_PROTOCOL : GRE_ERROR_NONE];
+
+ version0 = clib_net_to_host_u16 (h0->flags_and_version);
+ verr0 = version0 & GRE_VERSION_MASK;
+ version1 = clib_net_to_host_u16 (h1->flags_and_version);
+ verr1 = version1 & GRE_VERSION_MASK;
+
+ b0->error = verr0 ? node->errors[GRE_ERROR_UNSUPPORTED_VERSION]
+ : b0->error;
+ next0 = verr0 ? GRE_INPUT_NEXT_DROP : next0;
+ b1->error = verr1 ? node->errors[GRE_ERROR_UNSUPPORTED_VERSION]
+ : b1->error;
+ next1 = verr1 ? GRE_INPUT_NEXT_DROP : next1;
+
+
+ /* RPF check for ip4/ip6 input */
+ if (PREDICT_TRUE(next0 == GRE_INPUT_NEXT_IP4_INPUT
+ || next0 == GRE_INPUT_NEXT_IP6_INPUT
+ || next0 == GRE_INPUT_NEXT_ETHERNET_INPUT
+ || next0 == GRE_INPUT_NEXT_MPLS_INPUT))
+ {
+ u64 key = ((u64)(vnet_buffer(b0)->gre.dst) << 32) |
+ (u64)(vnet_buffer(b0)->gre.src);
+
+ if (cached_tunnel_key != key)
+ {
+ vnet_hw_interface_t * hi;
+ gre_tunnel_t * t;
+ uword * p;
+
+ p = hash_get (gm->tunnel_by_key, key);
+ if (!p)
+ {
+ next0 = GRE_INPUT_NEXT_DROP;
+ b0->error = node->errors[GRE_ERROR_NO_SUCH_TUNNEL];
+ goto drop0;
+ }
+ t = pool_elt_at_index (gm->tunnels, p[0]);
+ hi = vnet_get_hw_interface (gm->vnet_main,
+ t->hw_if_index);
+ tunnel_sw_if_index = hi->sw_if_index;
+
+ cached_tunnel_sw_if_index = tunnel_sw_if_index;
+ }
+ else
+ {
+ tunnel_sw_if_index = cached_tunnel_sw_if_index;
+ }
+ }
+ else
+ {
+ next0 = GRE_INPUT_NEXT_DROP;
+ goto drop0;
+ }
+ len = vlib_buffer_length_in_chain (vm, b0);
+ vlib_increment_combined_counter (im->combined_sw_if_counters
+ + VNET_INTERFACE_COUNTER_RX,
+ cpu_index,
+ tunnel_sw_if_index,
+ 1 /* packets */,
+ len /* bytes */);
+
+ vnet_buffer(b0)->sw_if_index[VLIB_RX] = tunnel_sw_if_index;
+
+drop0:
+ if (PREDICT_TRUE(next1 == GRE_INPUT_NEXT_IP4_INPUT
+ || next1 == GRE_INPUT_NEXT_IP6_INPUT
+ || next1 == GRE_INPUT_NEXT_ETHERNET_INPUT
+ || next1 == GRE_INPUT_NEXT_MPLS_INPUT))
+ {
+ u64 key = ((u64)(vnet_buffer(b1)->gre.dst) << 32) |
+ (u64)(vnet_buffer(b1)->gre.src);
+
+ if (cached_tunnel_key != key)
+ {
+ vnet_hw_interface_t * hi;
+ gre_tunnel_t * t;
+ uword * p;
+
+ p = hash_get (gm->tunnel_by_key, key);
+ if (!p)
+ {
+ next1 = GRE_INPUT_NEXT_DROP;
+ b1->error = node->errors[GRE_ERROR_NO_SUCH_TUNNEL];
+ goto drop1;
+ }
+ t = pool_elt_at_index (gm->tunnels, p[0]);
+ hi = vnet_get_hw_interface (gm->vnet_main,
+ t->hw_if_index);
+ tunnel_sw_if_index = hi->sw_if_index;
+
+ cached_tunnel_sw_if_index = tunnel_sw_if_index;
+ }
+ else
+ {
+ tunnel_sw_if_index = cached_tunnel_sw_if_index;
+ }
+ }
+ else
+ {
+ next1 = GRE_INPUT_NEXT_DROP;
+ goto drop1;
+ }
+ len = vlib_buffer_length_in_chain (vm, b1);
+ vlib_increment_combined_counter (im->combined_sw_if_counters
+ + VNET_INTERFACE_COUNTER_RX,
+ cpu_index,
+ tunnel_sw_if_index,
+ 1 /* packets */,
+ len /* bytes */);
+
+ vnet_buffer(b1)->sw_if_index[VLIB_RX] = tunnel_sw_if_index;
+
+drop1:
+ if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ gre_rx_trace_t *tr = vlib_add_trace (vm, node,
+ b0, sizeof (*tr));
+ tr->tunnel_id = tunnel_sw_if_index;
+ tr->length = ip0->length;
+ tr->src.as_u32 = ip0->src_address.as_u32;
+ tr->dst.as_u32 = ip0->dst_address.as_u32;
+ }
+
+ if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ gre_rx_trace_t *tr = vlib_add_trace (vm, node,
+ b1, sizeof (*tr));
+ tr->tunnel_id = tunnel_sw_if_index;
+ tr->length = ip1->length;
+ tr->src.as_u32 = ip1->src_address.as_u32;
+ tr->dst.as_u32 = ip1->dst_address.as_u32;
+ }
+
+ vlib_buffer_advance (b0, sizeof (*h0));
+ vlib_buffer_advance (b1, sizeof (*h1));
+
+ vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, bi1, next0, next1);
+ }
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 bi0;
+ vlib_buffer_t * b0;
+ gre_header_t * h0;
+ ip4_header_t * ip0;
+ u16 version0;
+ int verr0;
+ u32 i0, next0;
+
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ ip0 = vlib_buffer_get_current (b0);
+
+ vnet_buffer(b0)->gre.src = ip0->src_address.as_u32;
+ vnet_buffer(b0)->gre.dst = ip0->dst_address.as_u32;
+
+ vlib_buffer_advance (b0, sizeof (*ip0));
+
+ h0 = vlib_buffer_get_current (b0);
+
+ i0 = sparse_vec_index (rt->next_by_protocol, h0->protocol);
+ next0 = vec_elt(rt->next_by_protocol, i0);
+
+ b0->error =
+ node->errors[i0 == SPARSE_VEC_INVALID_INDEX
+ ? GRE_ERROR_UNKNOWN_PROTOCOL : GRE_ERROR_NONE];
+
+ version0 = clib_net_to_host_u16 (h0->flags_and_version);
+ verr0 = version0 & GRE_VERSION_MASK;
+ b0->error = verr0 ? node->errors[GRE_ERROR_UNSUPPORTED_VERSION]
+ : b0->error;
+ next0 = verr0 ? GRE_INPUT_NEXT_DROP : next0;
+
+
+ /* For IP payload we need to find source interface
+ so we can increase counters and help forward node to
+ pick right FIB */
+ /* RPF check for ip4/ip6 input */
+ if (PREDICT_TRUE(next0 == GRE_INPUT_NEXT_IP4_INPUT
+ || next0 == GRE_INPUT_NEXT_IP6_INPUT
+ || next0 == GRE_INPUT_NEXT_ETHERNET_INPUT
+ || next0 == GRE_INPUT_NEXT_MPLS_INPUT))
+ {
+ u64 key = ((u64)(vnet_buffer(b0)->gre.dst) << 32) |
+ (u64)(vnet_buffer(b0)->gre.src);
+
+ if (cached_tunnel_key != key)
+ {
+ vnet_hw_interface_t * hi;
+ gre_tunnel_t * t;
+ uword * p;
+
+ p = hash_get (gm->tunnel_by_key, key);
+ if (!p)
+ {
+ next0 = GRE_INPUT_NEXT_DROP;
+ b0->error = node->errors[GRE_ERROR_NO_SUCH_TUNNEL];
+ goto drop;
+ }
+ t = pool_elt_at_index (gm->tunnels, p[0]);
+ hi = vnet_get_hw_interface (gm->vnet_main,
+ t->hw_if_index);
+ tunnel_sw_if_index = hi->sw_if_index;
+
+ cached_tunnel_sw_if_index = tunnel_sw_if_index;
+ }
+ else
+ {
+ tunnel_sw_if_index = cached_tunnel_sw_if_index;
+ }
+ }
+ else
+ {
+ next0 = GRE_INPUT_NEXT_DROP;
+ goto drop;
+ }
+ len = vlib_buffer_length_in_chain (vm, b0);
+ vlib_increment_combined_counter (im->combined_sw_if_counters
+ + VNET_INTERFACE_COUNTER_RX,
+ cpu_index,
+ tunnel_sw_if_index,
+ 1 /* packets */,
+ len /* bytes */);
+
+ vnet_buffer(b0)->sw_if_index[VLIB_RX] = tunnel_sw_if_index;
+
+drop:
+ if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ gre_rx_trace_t *tr = vlib_add_trace (vm, node,
+ b0, sizeof (*tr));
+ tr->tunnel_id = tunnel_sw_if_index;
+ tr->length = ip0->length;
+ tr->src.as_u32 = ip0->src_address.as_u32;
+ tr->dst.as_u32 = ip0->dst_address.as_u32;
+ }
+
+ vlib_buffer_advance (b0, sizeof (*h0));
+
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, next0);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+ vlib_node_increment_counter (vm, gre_input_node.index,
+ GRE_ERROR_PKTS_DECAP, from_frame->n_vectors);
+ return from_frame->n_vectors;
+}
+
+static char * gre_error_strings[] = {
+#define gre_error(n,s) s,
+#include "error.def"
+#undef gre_error
+};
+
+VLIB_REGISTER_NODE (gre_input_node) = {
+ .function = gre_input,
+ .name = "gre-input",
+ /* Takes a vector of packets. */
+ .vector_size = sizeof (u32),
+
+ .runtime_data_bytes = sizeof (gre_input_runtime_t),
+
+ .n_errors = GRE_N_ERROR,
+ .error_strings = gre_error_strings,
+
+ .n_next_nodes = GRE_INPUT_N_NEXT,
+ .next_nodes = {
+#define _(s,n) [GRE_INPUT_NEXT_##s] = n,
+ foreach_gre_input_next
+#undef _
+ },
+
+ .format_buffer = format_gre_header_with_length,
+ .format_trace = format_gre_rx_trace,
+ .unformat_buffer = unformat_gre_header,
+};
+
+VLIB_NODE_FUNCTION_MULTIARCH (gre_input_node, gre_input)
+
+void
+gre_register_input_protocol (vlib_main_t * vm,
+ gre_protocol_t protocol,
+ u32 node_index)
+{
+ gre_main_t * em = &gre_main;
+ gre_protocol_info_t * pi;
+ gre_input_runtime_t * rt;
+ u16 * n;
+
+ {
+ clib_error_t * error = vlib_call_init_function (vm, gre_input_init);
+ if (error)
+ clib_error_report (error);
+ }
+
+ pi = gre_get_protocol_info (em, protocol);
+ pi->node_index = node_index;
+ pi->next_index = vlib_node_add_next (vm,
+ gre_input_node.index,
+ node_index);
+
+ /* Setup gre protocol -> next index sparse vector mapping. */
+ rt = vlib_node_get_runtime_data (vm, gre_input_node.index);
+ n = sparse_vec_validate (rt->next_by_protocol,
+ clib_host_to_net_u16 (protocol));
+ n[0] = pi->next_index;
+}
+
+static void
+gre_setup_node (vlib_main_t * vm, u32 node_index)
+{
+ vlib_node_t * n = vlib_get_node (vm, node_index);
+ pg_node_t * pn = pg_get_node (node_index);
+
+ n->format_buffer = format_gre_header_with_length;
+ n->unformat_buffer = unformat_gre_header;
+ pn->unformat_edit = unformat_pg_gre_header;
+}
+
+static clib_error_t * gre_input_init (vlib_main_t * vm)
+{
+ gre_input_runtime_t * rt;
+ vlib_node_t *ethernet_input, *ip4_input, *ip6_input, *mpls_unicast_input;
+
+ {
+ clib_error_t * error;
+ error = vlib_call_init_function (vm, gre_init);
+ if (error)
+ clib_error_report (error);
+ }
+
+ gre_setup_node (vm, gre_input_node.index);
+
+ rt = vlib_node_get_runtime_data (vm, gre_input_node.index);
+
+ rt->next_by_protocol = sparse_vec_new
+ (/* elt bytes */ sizeof (rt->next_by_protocol[0]),
+ /* bits in index */ BITS (((gre_header_t *) 0)->protocol));
+
+ /* These could be moved to the supported protocol input node defn's */
+ ethernet_input = vlib_get_node_by_name (vm, (u8 *)"ethernet-input");
+ ASSERT(ethernet_input);
+ ip4_input = vlib_get_node_by_name (vm, (u8 *)"ip4-input");
+ ASSERT(ip4_input);
+ ip6_input = vlib_get_node_by_name (vm, (u8 *)"ip6-input");
+ ASSERT(ip6_input);
+ mpls_unicast_input = vlib_get_node_by_name (vm, (u8 *)"mpls-input");
+ ASSERT(mpls_unicast_input);
+
+ gre_register_input_protocol (vm, GRE_PROTOCOL_teb,
+ ethernet_input->index);
+
+ gre_register_input_protocol (vm, GRE_PROTOCOL_ip4,
+ ip4_input->index);
+
+ gre_register_input_protocol (vm, GRE_PROTOCOL_ip6,
+ ip6_input->index);
+
+ gre_register_input_protocol (vm, GRE_PROTOCOL_mpls_unicast,
+ mpls_unicast_input->index);
+
+ ip4_register_protocol (IP_PROTOCOL_GRE, gre_input_node.index);
+
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (gre_input_init);
diff --git a/src/vnet/gre/packet.h b/src/vnet/gre/packet.h
new file mode 100644
index 00000000000..cc2ccda9eff
--- /dev/null
+++ b/src/vnet/gre/packet.h
@@ -0,0 +1,55 @@
+#ifndef included_vnet_gre_packet_h
+#define included_vnet_gre_packet_h
+
+/*
+ * GRE packet format
+ *
+ * Copyright (c) 2012 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define foreach_gre_protocol \
+_ (0x0800, ip4) \
+_ (0x86DD, ip6) \
+_ (0x6558, teb) \
+_ (0x0806, arp) \
+_ (0x8847, mpls_unicast) \
+_ (0x894F, nsh)
+
+typedef enum {
+#define _(n,f) GRE_PROTOCOL_##f = n,
+ foreach_gre_protocol
+#undef _
+} gre_protocol_t;
+
+typedef struct {
+ /* flags and version */
+ u16 flags_and_version;
+ /* unimplemented at the moment */
+#define GRE_FLAGS_CHECKSUM (1 << 15)
+
+ /* deprecated, according to rfc2784 */
+#define GRE_FLAGS_ROUTING (1 << 14)
+#define GRE_FLAGS_KEY (1 << 13)
+#define GRE_FLAGS_SEQUENCE (1 << 12)
+#define GRE_FLAGS_STRICT_SOURCE_ROUTE (1 << 11)
+
+ /* version 1 is PPTP which we don't support */
+#define GRE_SUPPORTED_VERSION 0
+#define GRE_VERSION_MASK 0x7
+
+ /* 0x800 for ip4, etc. */
+ u16 protocol;
+} gre_header_t;
+
+#endif /* included_vnet_gre_packet_h */
diff --git a/src/vnet/gre/pg.c b/src/vnet/gre/pg.c
new file mode 100644
index 00000000000..cc065d3b6b5
--- /dev/null
+++ b/src/vnet/gre/pg.c
@@ -0,0 +1,77 @@
+/*
+ * hdlc_pg.c: packet generator gre interface
+ *
+ * Copyright (c) 2012 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vlib/vlib.h>
+#include <vnet/pg/pg.h>
+#include <vnet/gre/gre.h>
+
+typedef struct {
+ pg_edit_t flags_and_version;
+ pg_edit_t protocol;
+} pg_gre_header_t;
+
+static inline void
+pg_gre_header_init (pg_gre_header_t * e)
+{
+ pg_edit_init (&e->flags_and_version, gre_header_t, flags_and_version);
+ pg_edit_init (&e->protocol, gre_header_t, protocol);
+}
+
+uword
+unformat_pg_gre_header (unformat_input_t * input, va_list * args)
+{
+ pg_stream_t * s = va_arg (*args, pg_stream_t *);
+ pg_gre_header_t * h;
+ u32 group_index, error;
+
+ h = pg_create_edit_group (s, sizeof (h[0]), sizeof (gre_header_t),
+ &group_index);
+ pg_gre_header_init (h);
+
+ pg_edit_set_fixed (&h->flags_and_version, 0);
+
+ error = 1;
+ if (! unformat (input, "%U",
+ unformat_pg_edit,
+ unformat_gre_protocol_net_byte_order, &h->protocol))
+ goto done;
+
+ {
+ gre_main_t * pm = &gre_main;
+ gre_protocol_info_t * pi = 0;
+ pg_node_t * pg_node = 0;
+
+ if (h->protocol.type == PG_EDIT_FIXED)
+ {
+ u16 t = *(u16 *) h->protocol.values[PG_EDIT_LO];
+ pi = gre_get_protocol_info (pm, clib_net_to_host_u16 (t));
+ if (pi && pi->node_index != ~0)
+ pg_node = pg_get_node (pi->node_index);
+ }
+
+ if (pg_node && pg_node->unformat_edit
+ && unformat_user (input, pg_node->unformat_edit, s))
+ ;
+ }
+
+ error = 0;
+ done:
+ if (error)
+ pg_free_edit_group (s);
+ return error == 0;
+}
+
diff --git a/src/vnet/handoff.c b/src/vnet/handoff.c
new file mode 100644
index 00000000000..9f3c93b4b70
--- /dev/null
+++ b/src/vnet/handoff.c
@@ -0,0 +1,594 @@
+
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/vnet.h>
+#include <vppinfra/xxhash.h>
+#include <vlib/threads.h>
+#include <vnet/handoff.h>
+#include <vnet/feature/feature.h>
+
+typedef struct
+{
+ uword *workers_bitmap;
+ u32 *workers;
+} per_inteface_handoff_data_t;
+
+typedef struct
+{
+ u32 cached_next_index;
+ u32 num_workers;
+ u32 first_worker_index;
+
+ per_inteface_handoff_data_t *if_data;
+
+ /* Worker handoff index */
+ u32 frame_queue_index;
+
+ /* convenience variables */
+ vlib_main_t *vlib_main;
+ vnet_main_t *vnet_main;
+
+ u64 (*hash_fn) (ethernet_header_t *);
+} handoff_main_t;
+
+handoff_main_t handoff_main;
+vlib_node_registration_t handoff_dispatch_node;
+
+typedef struct
+{
+ u32 sw_if_index;
+ u32 next_worker_index;
+ u32 buffer_index;
+} worker_handoff_trace_t;
+
+/* packet trace format function */
+static u8 *
+format_worker_handoff_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ worker_handoff_trace_t *t = va_arg (*args, worker_handoff_trace_t *);
+
+ s =
+ format (s, "worker-handoff: sw_if_index %d, next_worker %d, buffer 0x%x",
+ t->sw_if_index, t->next_worker_index, t->buffer_index);
+ return s;
+}
+
+vlib_node_registration_t handoff_node;
+
+static uword
+worker_handoff_node_fn (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ handoff_main_t *hm = &handoff_main;
+ vlib_thread_main_t *tm = vlib_get_thread_main ();
+ u32 n_left_from, *from;
+ static __thread vlib_frame_queue_elt_t **handoff_queue_elt_by_worker_index;
+ static __thread vlib_frame_queue_t **congested_handoff_queue_by_worker_index
+ = 0;
+ vlib_frame_queue_elt_t *hf = 0;
+ int i;
+ u32 n_left_to_next_worker = 0, *to_next_worker = 0;
+ u32 next_worker_index = 0;
+ u32 current_worker_index = ~0;
+
+ if (PREDICT_FALSE (handoff_queue_elt_by_worker_index == 0))
+ {
+ vec_validate (handoff_queue_elt_by_worker_index, tm->n_vlib_mains - 1);
+
+ vec_validate_init_empty (congested_handoff_queue_by_worker_index,
+ hm->first_worker_index + hm->num_workers - 1,
+ (vlib_frame_queue_t *) (~0));
+ }
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+
+ while (n_left_from > 0)
+ {
+ u32 bi0;
+ vlib_buffer_t *b0;
+ u32 sw_if_index0;
+ u32 hash;
+ u64 hash_key;
+ per_inteface_handoff_data_t *ihd0;
+ u32 index0;
+
+ bi0 = from[0];
+ from += 1;
+ n_left_from -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+ ASSERT (hm->if_data);
+ ihd0 = vec_elt_at_index (hm->if_data, sw_if_index0);
+
+ next_worker_index = hm->first_worker_index;
+
+ /*
+ * Force unknown traffic onto worker 0,
+ * and into ethernet-input. $$$$ add more hashes.
+ */
+
+ /* Compute ingress LB hash */
+ hash_key = hm->hash_fn ((ethernet_header_t *) b0->data);
+ hash = (u32) clib_xxhash (hash_key);
+
+ /* if input node did not specify next index, then packet
+ should go to eternet-input */
+ if (PREDICT_FALSE ((b0->flags & BUFFER_HANDOFF_NEXT_VALID) == 0))
+ vnet_buffer (b0)->handoff.next_index =
+ HANDOFF_DISPATCH_NEXT_ETHERNET_INPUT;
+ else if (vnet_buffer (b0)->handoff.next_index ==
+ HANDOFF_DISPATCH_NEXT_IP4_INPUT
+ || vnet_buffer (b0)->handoff.next_index ==
+ HANDOFF_DISPATCH_NEXT_IP6_INPUT
+ || vnet_buffer (b0)->handoff.next_index ==
+ HANDOFF_DISPATCH_NEXT_MPLS_INPUT)
+ vlib_buffer_advance (b0, (sizeof (ethernet_header_t)));
+
+ if (PREDICT_TRUE (is_pow2 (vec_len (ihd0->workers))))
+ index0 = hash & (vec_len (ihd0->workers) - 1);
+ else
+ index0 = hash % vec_len (ihd0->workers);
+
+ next_worker_index += ihd0->workers[index0];
+
+ if (next_worker_index != current_worker_index)
+ {
+ if (hf)
+ hf->n_vectors = VLIB_FRAME_SIZE - n_left_to_next_worker;
+
+ hf = vlib_get_worker_handoff_queue_elt (hm->frame_queue_index,
+ next_worker_index,
+ handoff_queue_elt_by_worker_index);
+
+ n_left_to_next_worker = VLIB_FRAME_SIZE - hf->n_vectors;
+ to_next_worker = &hf->buffer_index[hf->n_vectors];
+ current_worker_index = next_worker_index;
+ }
+
+ /* enqueue to correct worker thread */
+ to_next_worker[0] = bi0;
+ to_next_worker++;
+ n_left_to_next_worker--;
+
+ if (n_left_to_next_worker == 0)
+ {
+ hf->n_vectors = VLIB_FRAME_SIZE;
+ vlib_put_frame_queue_elt (hf);
+ current_worker_index = ~0;
+ handoff_queue_elt_by_worker_index[next_worker_index] = 0;
+ hf = 0;
+ }
+
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)
+ && (b0->flags & VLIB_BUFFER_IS_TRACED)))
+ {
+ worker_handoff_trace_t *t =
+ vlib_add_trace (vm, node, b0, sizeof (*t));
+ t->sw_if_index = sw_if_index0;
+ t->next_worker_index = next_worker_index - hm->first_worker_index;
+ t->buffer_index = bi0;
+ }
+
+ }
+
+ if (hf)
+ hf->n_vectors = VLIB_FRAME_SIZE - n_left_to_next_worker;
+
+ /* Ship frames to the worker nodes */
+ for (i = 0; i < vec_len (handoff_queue_elt_by_worker_index); i++)
+ {
+ if (handoff_queue_elt_by_worker_index[i])
+ {
+ hf = handoff_queue_elt_by_worker_index[i];
+ /*
+ * It works better to let the handoff node
+ * rate-adapt, always ship the handoff queue element.
+ */
+ if (1 || hf->n_vectors == hf->last_n_vectors)
+ {
+ vlib_put_frame_queue_elt (hf);
+ handoff_queue_elt_by_worker_index[i] = 0;
+ }
+ else
+ hf->last_n_vectors = hf->n_vectors;
+ }
+ congested_handoff_queue_by_worker_index[i] =
+ (vlib_frame_queue_t *) (~0);
+ }
+ hf = 0;
+ current_worker_index = ~0;
+ return frame->n_vectors;
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (worker_handoff_node) = {
+ .function = worker_handoff_node_fn,
+ .name = "worker-handoff",
+ .vector_size = sizeof (u32),
+ .format_trace = format_worker_handoff_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_next_nodes = 1,
+ .next_nodes = {
+ [0] = "error-drop",
+ },
+};
+
+VLIB_NODE_FUNCTION_MULTIARCH (worker_handoff_node, worker_handoff_node_fn)
+/* *INDENT-ON* */
+
+int
+interface_handoff_enable_disable (vlib_main_t * vm, u32 sw_if_index,
+ uword * bitmap, int enable_disable)
+{
+ handoff_main_t *hm = &handoff_main;
+ vnet_sw_interface_t *sw;
+ vnet_main_t *vnm = vnet_get_main ();
+ per_inteface_handoff_data_t *d;
+ int i, rv = 0;
+
+ if (pool_is_free_index (vnm->interface_main.sw_interfaces, sw_if_index))
+ return VNET_API_ERROR_INVALID_SW_IF_INDEX;
+
+ sw = vnet_get_sw_interface (vnm, sw_if_index);
+ if (sw->type != VNET_SW_INTERFACE_TYPE_HARDWARE)
+ return VNET_API_ERROR_INVALID_SW_IF_INDEX;
+
+ if (clib_bitmap_last_set (bitmap) >= hm->num_workers)
+ return VNET_API_ERROR_INVALID_WORKER;
+
+ if (hm->frame_queue_index == ~0)
+ hm->frame_queue_index =
+ vlib_frame_queue_main_init (handoff_dispatch_node.index, 0);
+
+ vec_validate (hm->if_data, sw_if_index);
+ d = vec_elt_at_index (hm->if_data, sw_if_index);
+
+ vec_free (d->workers);
+ vec_free (d->workers_bitmap);
+
+ if (enable_disable)
+ {
+ d->workers_bitmap = bitmap;
+ /* *INDENT-OFF* */
+ clib_bitmap_foreach (i, bitmap,
+ ({
+ vec_add1(d->workers, i);
+ }));
+ /* *INDENT-ON* */
+ }
+
+ vnet_feature_enable_disable ("device-input", "worker-handoff",
+ sw_if_index, enable_disable, 0, 0);
+ return rv;
+}
+
+static clib_error_t *
+set_interface_handoff_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ handoff_main_t *hm = &handoff_main;
+ u32 sw_if_index = ~0;
+ int enable_disable = 1;
+ uword *bitmap = 0;
+ u32 sym = ~0;
+
+ int rv = 0;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "disable"))
+ enable_disable = 0;
+ else if (unformat (input, "workers %U", unformat_bitmap_list, &bitmap))
+ ;
+ else if (unformat (input, "%U", unformat_vnet_sw_interface,
+ vnet_get_main (), &sw_if_index))
+ ;
+ else if (unformat (input, "symmetrical"))
+ sym = 1;
+ else if (unformat (input, "asymmetrical"))
+ sym = 0;
+ else
+ break;
+ }
+
+ if (sw_if_index == ~0)
+ return clib_error_return (0, "Please specify an interface...");
+
+ if (bitmap == 0)
+ return clib_error_return (0, "Please specify list of workers...");
+
+ rv =
+ interface_handoff_enable_disable (vm, sw_if_index, bitmap,
+ enable_disable);
+
+ switch (rv)
+ {
+ case 0:
+ break;
+
+ case VNET_API_ERROR_INVALID_SW_IF_INDEX:
+ return clib_error_return (0, "Invalid interface");
+ break;
+
+ case VNET_API_ERROR_INVALID_WORKER:
+ return clib_error_return (0, "Invalid worker(s)");
+ break;
+
+ case VNET_API_ERROR_UNIMPLEMENTED:
+ return clib_error_return (0,
+ "Device driver doesn't support redirection");
+ break;
+
+ default:
+ return clib_error_return (0, "unknown return value %d", rv);
+ }
+
+ if (sym == 1)
+ hm->hash_fn = eth_get_sym_key;
+ else if (sym == 0)
+ hm->hash_fn = eth_get_key;
+
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (set_interface_handoff_command, static) = {
+ .path = "set interface handoff",
+ .short_help =
+ "set interface handoff <interface-name> workers <workers-list> [symmetrical|asymmetrical]",
+ .function = set_interface_handoff_command_fn,
+};
+/* *INDENT-ON* */
+
+typedef struct
+{
+ u32 buffer_index;
+ u32 next_index;
+ u32 sw_if_index;
+} handoff_dispatch_trace_t;
+
+/* packet trace format function */
+static u8 *
+format_handoff_dispatch_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ handoff_dispatch_trace_t *t = va_arg (*args, handoff_dispatch_trace_t *);
+
+ s = format (s, "handoff-dispatch: sw_if_index %d next_index %d buffer 0x%x",
+ t->sw_if_index, t->next_index, t->buffer_index);
+ return s;
+}
+
+#define foreach_handoff_dispatch_error \
+_(EXAMPLE, "example packets")
+
+typedef enum
+{
+#define _(sym,str) HANDOFF_DISPATCH_ERROR_##sym,
+ foreach_handoff_dispatch_error
+#undef _
+ HANDOFF_DISPATCH_N_ERROR,
+} handoff_dispatch_error_t;
+
+static char *handoff_dispatch_error_strings[] = {
+#define _(sym,string) string,
+ foreach_handoff_dispatch_error
+#undef _
+};
+
+static uword
+handoff_dispatch_node_fn (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ u32 n_left_from, *from, *to_next;
+ handoff_dispatch_next_t next_index;
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ u32 bi0, bi1;
+ vlib_buffer_t *b0, *b1;
+ u32 next0, next1;
+ u32 sw_if_index0, sw_if_index1;
+
+ /* Prefetch next iteration. */
+ {
+ vlib_buffer_t *p2, *p3;
+
+ p2 = vlib_get_buffer (vm, from[2]);
+ p3 = vlib_get_buffer (vm, from[3]);
+
+ vlib_prefetch_buffer_header (p2, LOAD);
+ vlib_prefetch_buffer_header (p3, LOAD);
+ }
+
+ /* speculatively enqueue b0 and b1 to the current next frame */
+ to_next[0] = bi0 = from[0];
+ to_next[1] = bi1 = from[1];
+ from += 2;
+ to_next += 2;
+ n_left_from -= 2;
+ n_left_to_next -= 2;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ b1 = vlib_get_buffer (vm, bi1);
+
+ next0 = vnet_buffer (b0)->handoff.next_index;
+ next1 = vnet_buffer (b1)->handoff.next_index;
+
+ if (PREDICT_FALSE (vm->trace_main.trace_active_hint))
+ {
+ if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ vlib_trace_buffer (vm, node, next0, b0, /* follow_chain */
+ 0);
+ handoff_dispatch_trace_t *t =
+ vlib_add_trace (vm, node, b0, sizeof (*t));
+ sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+ t->sw_if_index = sw_if_index0;
+ t->next_index = next0;
+ t->buffer_index = bi0;
+ }
+ if (PREDICT_FALSE (b1->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ vlib_trace_buffer (vm, node, next1, b1, /* follow_chain */
+ 0);
+ handoff_dispatch_trace_t *t =
+ vlib_add_trace (vm, node, b1, sizeof (*t));
+ sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_RX];
+ t->sw_if_index = sw_if_index1;
+ t->next_index = next1;
+ t->buffer_index = bi1;
+ }
+ }
+
+ /* verify speculative enqueues, maybe switch current next frame */
+ vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, bi1, next0, next1);
+ }
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 bi0;
+ vlib_buffer_t *b0;
+ u32 next0;
+ u32 sw_if_index0;
+
+ /* speculatively enqueue b0 to the current next frame */
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+
+ next0 = vnet_buffer (b0)->handoff.next_index;
+
+ if (PREDICT_FALSE (vm->trace_main.trace_active_hint))
+ {
+ if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ vlib_trace_buffer (vm, node, next0, b0, /* follow_chain */
+ 0);
+ handoff_dispatch_trace_t *t =
+ vlib_add_trace (vm, node, b0, sizeof (*t));
+ sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+ t->sw_if_index = sw_if_index0;
+ t->next_index = next0;
+ t->buffer_index = bi0;
+ }
+ }
+
+ /* verify speculative enqueue, maybe switch current next frame */
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, next0);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ return frame->n_vectors;
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (handoff_dispatch_node) = {
+ .function = handoff_dispatch_node_fn,
+ .name = "handoff-dispatch",
+ .vector_size = sizeof (u32),
+ .format_trace = format_handoff_dispatch_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+ .flags = VLIB_NODE_FLAG_IS_HANDOFF,
+
+ .n_errors = ARRAY_LEN(handoff_dispatch_error_strings),
+ .error_strings = handoff_dispatch_error_strings,
+
+ .n_next_nodes = HANDOFF_DISPATCH_N_NEXT,
+
+ .next_nodes = {
+ [HANDOFF_DISPATCH_NEXT_DROP] = "error-drop",
+ [HANDOFF_DISPATCH_NEXT_ETHERNET_INPUT] = "ethernet-input",
+ [HANDOFF_DISPATCH_NEXT_IP4_INPUT] = "ip4-input-no-checksum",
+ [HANDOFF_DISPATCH_NEXT_IP6_INPUT] = "ip6-input",
+ [HANDOFF_DISPATCH_NEXT_MPLS_INPUT] = "mpls-input",
+ },
+};
+
+VLIB_NODE_FUNCTION_MULTIARCH (handoff_dispatch_node, handoff_dispatch_node_fn)
+/* *INDENT-ON* */
+
+clib_error_t *
+handoff_init (vlib_main_t * vm)
+{
+ handoff_main_t *hm = &handoff_main;
+ vlib_thread_main_t *tm = vlib_get_thread_main ();
+ clib_error_t *error;
+ uword *p;
+
+ if ((error = vlib_call_init_function (vm, threads_init)))
+ return error;
+
+ vlib_thread_registration_t *tr;
+ /* Only the standard vnet worker threads are supported */
+ p = hash_get_mem (tm->thread_registrations_by_name, "workers");
+ if (p)
+ {
+ tr = (vlib_thread_registration_t *) p[0];
+ if (tr)
+ {
+ hm->num_workers = tr->count;
+ hm->first_worker_index = tr->first_index;
+ }
+ }
+
+ hm->hash_fn = eth_get_key;
+
+ hm->vlib_main = vm;
+ hm->vnet_main = &vnet_main;
+
+ hm->frame_queue_index = ~0;
+
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (handoff_init);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/handoff.h b/src/vnet/handoff.h
new file mode 100644
index 00000000000..815206a9f8c
--- /dev/null
+++ b/src/vnet/handoff.h
@@ -0,0 +1,259 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef included_vnet_handoff_h
+#define included_vnet_handoff_h
+
+#include <vlib/vlib.h>
+#include <vnet/ethernet/ethernet.h>
+#include <vnet/ip/ip4_packet.h>
+#include <vnet/ip/ip6_packet.h>
+#include <vnet/mpls/packet.h>
+
+typedef enum
+{
+ HANDOFF_DISPATCH_NEXT_IP4_INPUT,
+ HANDOFF_DISPATCH_NEXT_IP6_INPUT,
+ HANDOFF_DISPATCH_NEXT_MPLS_INPUT,
+ HANDOFF_DISPATCH_NEXT_ETHERNET_INPUT,
+ HANDOFF_DISPATCH_NEXT_DROP,
+ HANDOFF_DISPATCH_N_NEXT,
+} handoff_dispatch_next_t;
+
+
+static inline u64
+ipv4_get_key (ip4_header_t * ip)
+{
+ u64 hash_key;
+
+ hash_key = *((u64 *) (&ip->address_pair)) ^ ip->protocol;
+
+ return hash_key;
+}
+
+static inline u64
+ipv6_get_key (ip6_header_t * ip)
+{
+ u64 hash_key;
+
+ hash_key = ip->src_address.as_u64[0] ^
+ rotate_left (ip->src_address.as_u64[1], 13) ^
+ rotate_left (ip->dst_address.as_u64[0], 26) ^
+ rotate_left (ip->dst_address.as_u64[1], 39) ^ ip->protocol;
+
+ return hash_key;
+}
+
+#define MPLS_BOTTOM_OF_STACK_BIT_MASK 0x00000100U
+#define MPLS_LABEL_MASK 0xFFFFF000U
+
+static inline u64
+mpls_get_key (mpls_unicast_header_t * m)
+{
+ u64 hash_key;
+ u8 ip_ver;
+
+
+ /* find the bottom of the MPLS label stack. */
+ if (PREDICT_TRUE (m->label_exp_s_ttl &
+ clib_net_to_host_u32 (MPLS_BOTTOM_OF_STACK_BIT_MASK)))
+ {
+ goto bottom_lbl_found;
+ }
+ m++;
+
+ if (PREDICT_TRUE (m->label_exp_s_ttl &
+ clib_net_to_host_u32 (MPLS_BOTTOM_OF_STACK_BIT_MASK)))
+ {
+ goto bottom_lbl_found;
+ }
+ m++;
+
+ if (m->label_exp_s_ttl &
+ clib_net_to_host_u32 (MPLS_BOTTOM_OF_STACK_BIT_MASK))
+ {
+ goto bottom_lbl_found;
+ }
+ m++;
+
+ if (m->label_exp_s_ttl &
+ clib_net_to_host_u32 (MPLS_BOTTOM_OF_STACK_BIT_MASK))
+ {
+ goto bottom_lbl_found;
+ }
+ m++;
+
+ if (m->label_exp_s_ttl &
+ clib_net_to_host_u32 (MPLS_BOTTOM_OF_STACK_BIT_MASK))
+ {
+ goto bottom_lbl_found;
+ }
+
+ /* the bottom label was not found - use the last label */
+ hash_key = m->label_exp_s_ttl & clib_net_to_host_u32 (MPLS_LABEL_MASK);
+
+ return hash_key;
+
+bottom_lbl_found:
+ m++;
+ ip_ver = (*((u8 *) m) >> 4);
+
+ /* find out if it is IPV4 or IPV6 header */
+ if (PREDICT_TRUE (ip_ver == 4))
+ {
+ hash_key = ipv4_get_key ((ip4_header_t *) m);
+ }
+ else if (PREDICT_TRUE (ip_ver == 6))
+ {
+ hash_key = ipv6_get_key ((ip6_header_t *) m);
+ }
+ else
+ {
+ /* use the bottom label */
+ hash_key =
+ (m - 1)->label_exp_s_ttl & clib_net_to_host_u32 (MPLS_LABEL_MASK);
+ }
+
+ return hash_key;
+
+}
+
+static inline u64
+eth_get_sym_key (ethernet_header_t * h0)
+{
+ u64 hash_key;
+
+ if (PREDICT_TRUE (h0->type) == clib_host_to_net_u16 (ETHERNET_TYPE_IP4))
+ {
+ ip4_header_t *ip = (ip4_header_t *) (h0 + 1);
+ hash_key =
+ (u64) (ip->src_address.as_u32 ^
+ ip->dst_address.as_u32 ^ ip->protocol);
+ }
+ else if (h0->type == clib_host_to_net_u16 (ETHERNET_TYPE_IP6))
+ {
+ ip6_header_t *ip = (ip6_header_t *) (h0 + 1);
+ hash_key = (u64) (ip->src_address.as_u64[0] ^
+ ip->src_address.as_u64[1] ^
+ ip->dst_address.as_u64[0] ^
+ ip->dst_address.as_u64[1] ^ ip->protocol);
+ }
+ else if (h0->type == clib_host_to_net_u16 (ETHERNET_TYPE_MPLS_UNICAST))
+ {
+ hash_key = mpls_get_key ((mpls_unicast_header_t *) (h0 + 1));
+ }
+ else
+ if (PREDICT_FALSE
+ ((h0->type == clib_host_to_net_u16 (ETHERNET_TYPE_VLAN))
+ || (h0->type == clib_host_to_net_u16 (ETHERNET_TYPE_DOT1AD))))
+ {
+ ethernet_vlan_header_t *outer = (ethernet_vlan_header_t *) (h0 + 1);
+
+ outer = (outer->type == clib_host_to_net_u16 (ETHERNET_TYPE_VLAN)) ?
+ outer + 1 : outer;
+ if (PREDICT_TRUE (outer->type) ==
+ clib_host_to_net_u16 (ETHERNET_TYPE_IP4))
+ {
+ ip4_header_t *ip = (ip4_header_t *) (outer + 1);
+ hash_key =
+ (u64) (ip->src_address.as_u32 ^
+ ip->dst_address.as_u32 ^ ip->protocol);
+ }
+ else if (outer->type == clib_host_to_net_u16 (ETHERNET_TYPE_IP6))
+ {
+ ip6_header_t *ip = (ip6_header_t *) (outer + 1);
+ hash_key =
+ (u64) (ip->src_address.as_u64[0] ^ ip->src_address.as_u64[1] ^
+ ip->dst_address.as_u64[0] ^
+ ip->dst_address.as_u64[1] ^ ip->protocol);
+ }
+ else if (outer->type ==
+ clib_host_to_net_u16 (ETHERNET_TYPE_MPLS_UNICAST))
+ {
+ hash_key = mpls_get_key ((mpls_unicast_header_t *) (outer + 1));
+ }
+ else
+ {
+ hash_key = outer->type;
+ }
+ }
+ else
+ {
+ hash_key = 0;
+ }
+
+ return hash_key;
+}
+
+static inline u64
+eth_get_key (ethernet_header_t * h0)
+{
+ u64 hash_key;
+
+ if (PREDICT_TRUE (h0->type) == clib_host_to_net_u16 (ETHERNET_TYPE_IP4))
+ {
+ hash_key = ipv4_get_key ((ip4_header_t *) (h0 + 1));
+ }
+ else if (h0->type == clib_host_to_net_u16 (ETHERNET_TYPE_IP6))
+ {
+ hash_key = ipv6_get_key ((ip6_header_t *) (h0 + 1));
+ }
+ else if (h0->type == clib_host_to_net_u16 (ETHERNET_TYPE_MPLS_UNICAST))
+ {
+ hash_key = mpls_get_key ((mpls_unicast_header_t *) (h0 + 1));
+ }
+ else if ((h0->type == clib_host_to_net_u16 (ETHERNET_TYPE_VLAN)) ||
+ (h0->type == clib_host_to_net_u16 (ETHERNET_TYPE_DOT1AD)))
+ {
+ ethernet_vlan_header_t *outer = (ethernet_vlan_header_t *) (h0 + 1);
+
+ outer = (outer->type == clib_host_to_net_u16 (ETHERNET_TYPE_VLAN)) ?
+ outer + 1 : outer;
+ if (PREDICT_TRUE (outer->type) ==
+ clib_host_to_net_u16 (ETHERNET_TYPE_IP4))
+ {
+ hash_key = ipv4_get_key ((ip4_header_t *) (outer + 1));
+ }
+ else if (outer->type == clib_host_to_net_u16 (ETHERNET_TYPE_IP6))
+ {
+ hash_key = ipv6_get_key ((ip6_header_t *) (outer + 1));
+ }
+ else if (outer->type ==
+ clib_host_to_net_u16 (ETHERNET_TYPE_MPLS_UNICAST))
+ {
+ hash_key = mpls_get_key ((mpls_unicast_header_t *) (outer + 1));
+ }
+ else
+ {
+ hash_key = outer->type;
+ }
+ }
+ else
+ {
+ hash_key = 0;
+ }
+
+ return hash_key;
+}
+
+#endif /* included_vnet_handoff_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/hdlc/error.def b/src/vnet/hdlc/error.def
new file mode 100644
index 00000000000..16e001bbcfa
--- /dev/null
+++ b/src/vnet/hdlc/error.def
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * hdlc_error.def: hdlc errors
+ *
+ * Copyright (c) 2008 Eliot Dresselhaus
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+hdlc_error (NONE, "no error")
+hdlc_error (UNKNOWN_PROTOCOL, "unknown hdlc protocol")
+hdlc_error (UNKNOWN_ADDRESS_CONTROL, "address, control != 0x0f00")
diff --git a/src/vnet/hdlc/hdlc.c b/src/vnet/hdlc/hdlc.c
new file mode 100644
index 00000000000..174085ac519
--- /dev/null
+++ b/src/vnet/hdlc/hdlc.c
@@ -0,0 +1,249 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * hdlc.c: hdlc
+ *
+ * Copyright (c) 2010 Eliot Dresselhaus
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <vnet/vnet.h>
+#include <vnet/hdlc/hdlc.h>
+
+/* Global main structure. */
+hdlc_main_t hdlc_main;
+
+u8 * format_hdlc_protocol (u8 * s, va_list * args)
+{
+ hdlc_protocol_t p = va_arg (*args, u32);
+ hdlc_main_t * pm = &hdlc_main;
+ hdlc_protocol_info_t * pi = hdlc_get_protocol_info (pm, p);
+
+ if (pi)
+ s = format (s, "%s", pi->name);
+ else
+ s = format (s, "0x%04x", p);
+
+ return s;
+}
+
+u8 * format_hdlc_header_with_length (u8 * s, va_list * args)
+{
+ hdlc_main_t * pm = &hdlc_main;
+ hdlc_header_t * h = va_arg (*args, hdlc_header_t *);
+ u32 max_header_bytes = va_arg (*args, u32);
+ hdlc_protocol_t p = clib_net_to_host_u16 (h->protocol);
+ uword indent, header_bytes;
+
+ header_bytes = sizeof (h[0]);
+ if (max_header_bytes != 0 && header_bytes > max_header_bytes)
+ return format (s, "hdlc header truncated");
+
+ indent = format_get_indent (s);
+
+ s = format (s, "HDLC %U", format_hdlc_protocol, p);
+
+ if (h->address != 0xff)
+ s = format (s, ", address 0x%02x", h->address);
+ if (h->control != 0x03)
+ s = format (s, ", control 0x%02x", h->control);
+
+ if (max_header_bytes != 0 && header_bytes > max_header_bytes)
+ {
+ hdlc_protocol_info_t * pi = hdlc_get_protocol_info (pm, p);
+ vlib_node_t * node = vlib_get_node (pm->vlib_main, pi->node_index);
+ if (node->format_buffer)
+ s = format (s, "\n%U%U",
+ format_white_space, indent,
+ node->format_buffer, (void *) (h + 1),
+ max_header_bytes - header_bytes);
+ }
+
+ return s;
+}
+
+u8 * format_hdlc_header (u8 * s, va_list * args)
+{
+ hdlc_header_t * h = va_arg (*args, hdlc_header_t *);
+ return format (s, "%U", format_hdlc_header_with_length, h, 0);
+}
+
+/* Returns hdlc protocol as an int in host byte order. */
+uword
+unformat_hdlc_protocol_host_byte_order (unformat_input_t * input,
+ va_list * args)
+{
+ u16 * result = va_arg (*args, u16 *);
+ hdlc_main_t * pm = &hdlc_main;
+ int p, i;
+
+ /* Numeric type. */
+ if (unformat (input, "0x%x", &p)
+ || unformat (input, "%d", &p))
+ {
+ if (p >= (1 << 16))
+ return 0;
+ *result = p;
+ return 1;
+ }
+
+ /* Named type. */
+ if (unformat_user (input, unformat_vlib_number_by_name,
+ pm->protocol_info_by_name, &i))
+ {
+ hdlc_protocol_info_t * pi = vec_elt_at_index (pm->protocol_infos, i);
+ *result = pi->protocol;
+ return 1;
+ }
+
+ return 0;
+}
+
+uword
+unformat_hdlc_protocol_net_byte_order (unformat_input_t * input,
+ va_list * args)
+{
+ u16 * result = va_arg (*args, u16 *);
+ if (! unformat_user (input, unformat_hdlc_protocol_host_byte_order, result))
+ return 0;
+ *result = clib_host_to_net_u16 ((u16) *result);
+ return 1;
+}
+
+uword
+unformat_hdlc_header (unformat_input_t * input, va_list * args)
+{
+ u8 ** result = va_arg (*args, u8 **);
+ hdlc_header_t _h, * h = &_h;
+ u16 p;
+
+ if (! unformat (input, "%U",
+ unformat_hdlc_protocol_host_byte_order, &p))
+ return 0;
+
+ h->address = 0xff;
+ h->control = 0x03;
+ h->protocol = clib_host_to_net_u16 (p);
+
+ /* Add header to result. */
+ {
+ void * p;
+ u32 n_bytes = sizeof (h[0]);
+
+ vec_add2 (*result, p, n_bytes);
+ clib_memcpy (p, h, n_bytes);
+ }
+
+ return 1;
+}
+
+static u8*
+hdlc_build_rewrite (vnet_main_t * vnm,
+ u32 sw_if_index,
+ vnet_link_t link_type,
+ const void *dst_address)
+{
+ hdlc_header_t * h;
+ u8* rewrite = NULL;
+ hdlc_protocol_t protocol;
+
+ switch (link_type) {
+#define _(a,b) case VNET_LINK_##a: protocol = HDLC_PROTOCOL_##b; break
+ _ (IP4, ip4);
+ _ (IP6, ip6);
+ _ (MPLS, mpls_unicast);
+#undef _
+ default:
+ return (NULL);
+ }
+
+ vec_validate(rewrite, sizeof(*h)-1);
+ h = (hdlc_header_t *)rewrite;
+ h->address = 0x0f;
+ h->control = 0x00;
+ h->protocol = clib_host_to_net_u16 (protocol);
+
+ return (rewrite);
+}
+
+VNET_HW_INTERFACE_CLASS (hdlc_hw_interface_class) = {
+ .name = "HDLC",
+ .format_header = format_hdlc_header_with_length,
+ .unformat_header = unformat_hdlc_header,
+ .build_rewrite = hdlc_build_rewrite,
+ .flags = VNET_HW_INTERFACE_CLASS_FLAG_P2P,
+};
+
+static void add_protocol (hdlc_main_t * pm,
+ hdlc_protocol_t protocol,
+ char * protocol_name)
+{
+ hdlc_protocol_info_t * pi;
+ u32 i;
+
+ vec_add2 (pm->protocol_infos, pi, 1);
+ i = pi - pm->protocol_infos;
+
+ pi->name = protocol_name;
+ pi->protocol = protocol;
+ pi->next_index = pi->node_index = ~0;
+
+ hash_set (pm->protocol_info_by_protocol, protocol, i);
+ hash_set_mem (pm->protocol_info_by_name, pi->name, i);
+}
+
+static clib_error_t * hdlc_init (vlib_main_t * vm)
+{
+ hdlc_main_t * pm = &hdlc_main;
+
+ memset (pm, 0, sizeof (pm[0]));
+ pm->vlib_main = vm;
+
+ pm->protocol_info_by_name = hash_create_string (0, sizeof (uword));
+ pm->protocol_info_by_protocol = hash_create (0, sizeof (uword));
+
+#define _(n,s) add_protocol (pm, HDLC_PROTOCOL_##s, #s);
+ foreach_hdlc_protocol
+#undef _
+
+ return vlib_call_init_function (vm, hdlc_input_init);
+}
+
+VLIB_INIT_FUNCTION (hdlc_init);
+
+hdlc_main_t * hdlc_get_main (vlib_main_t * vm)
+{
+ vlib_call_init_function (vm, hdlc_init);
+ return &hdlc_main;
+}
+
diff --git a/src/vnet/hdlc/hdlc.h b/src/vnet/hdlc/hdlc.h
new file mode 100644
index 00000000000..73b15c2fac7
--- /dev/null
+++ b/src/vnet/hdlc/hdlc.h
@@ -0,0 +1,127 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * hdlc.h: types/functions for hdlc.
+ *
+ * Copyright (c) 2008 Eliot Dresselhaus
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef included_hdlc_h
+#define included_hdlc_h
+
+#include <vnet/vnet.h>
+#include <vnet/hdlc/packet.h>
+#include <vnet/pg/pg.h>
+
+extern vnet_hw_interface_class_t hdlc_hw_interface_class;
+
+typedef enum {
+#define hdlc_error(n,s) HDLC_ERROR_##n,
+#include <vnet/hdlc/error.def>
+#undef hdlc_error
+ HDLC_N_ERROR,
+} hdlc_error_t;
+
+typedef struct {
+ /* Name (a c string). */
+ char * name;
+
+ /* HDLC protocol type in host byte order. */
+ hdlc_protocol_t protocol;
+
+ /* Node which handles this type. */
+ u32 node_index;
+
+ /* Next index for this type. */
+ u32 next_index;
+} hdlc_protocol_info_t;
+
+typedef struct {
+ vlib_main_t * vlib_main;
+
+ hdlc_protocol_info_t * protocol_infos;
+
+ /* Hash tables mapping name/protocol to protocol info index. */
+ uword * protocol_info_by_name, * protocol_info_by_protocol;
+} hdlc_main_t;
+
+always_inline hdlc_protocol_info_t *
+hdlc_get_protocol_info (hdlc_main_t * em, hdlc_protocol_t protocol)
+{
+ uword * p = hash_get (em->protocol_info_by_protocol, protocol);
+ return p ? vec_elt_at_index (em->protocol_infos, p[0]) : 0;
+}
+
+extern hdlc_main_t hdlc_main;
+
+/* Register given node index to take input for given hdlc type. */
+void
+hdlc_register_input_type (vlib_main_t * vm,
+ hdlc_protocol_t protocol,
+ u32 node_index);
+
+void hdlc_set_adjacency (vnet_rewrite_header_t * rw,
+ uword max_data_bytes,
+ hdlc_protocol_t protocol);
+
+format_function_t format_hdlc_protocol;
+format_function_t format_hdlc_header;
+format_function_t format_hdlc_header_with_length;
+
+/* Parse hdlc protocol as 0xXXXX or protocol name.
+ In either host or network byte order. */
+unformat_function_t unformat_hdlc_protocol_host_byte_order;
+unformat_function_t unformat_hdlc_protocol_net_byte_order;
+
+/* Parse hdlc header. */
+unformat_function_t unformat_hdlc_header;
+unformat_function_t unformat_pg_hdlc_header;
+
+always_inline void
+hdlc_setup_node (vlib_main_t * vm, u32 node_index)
+{
+ vlib_node_t * n = vlib_get_node (vm, node_index);
+ pg_node_t * pn = pg_get_node (node_index);
+
+ n->format_buffer = format_hdlc_header_with_length;
+ n->unformat_buffer = unformat_hdlc_header;
+ pn->unformat_edit = unformat_pg_hdlc_header;
+}
+
+void
+hdlc_register_input_protocol (vlib_main_t * vm,
+ hdlc_protocol_t protocol,
+ u32 node_index);
+
+#endif /* included_hdlc_h */
diff --git a/src/vnet/hdlc/node.c b/src/vnet/hdlc/node.c
new file mode 100644
index 00000000000..4fe0296aca1
--- /dev/null
+++ b/src/vnet/hdlc/node.c
@@ -0,0 +1,351 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * hdlc_node.c: hdlc packet processing
+ *
+ * Copyright (c) 2010 Eliot Dresselhaus
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <vlib/vlib.h>
+#include <vnet/pg/pg.h>
+#include <vnet/hdlc/hdlc.h>
+#include <vppinfra/sparse_vec.h>
+
+#define foreach_hdlc_input_next \
+ _ (PUNT, "error-punt") \
+ _ (DROP, "error-drop")
+
+typedef enum {
+#define _(s,n) HDLC_INPUT_NEXT_##s,
+ foreach_hdlc_input_next
+#undef _
+ HDLC_INPUT_N_NEXT,
+} hdlc_input_next_t;
+
+typedef struct {
+ u8 packet_data[32];
+} hdlc_input_trace_t;
+
+static u8 * format_hdlc_input_trace (u8 * s, va_list * va)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*va, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*va, vlib_node_t *);
+ hdlc_input_trace_t * t = va_arg (*va, hdlc_input_trace_t *);
+
+ s = format (s, "%U", format_hdlc_header, t->packet_data);
+
+ return s;
+}
+
+typedef struct {
+ /* Sparse vector mapping hdlc protocol in network byte order
+ to next index. */
+ u16 * next_by_protocol;
+
+ u32 * sparse_index_by_next_index;
+} hdlc_input_runtime_t;
+
+static uword
+hdlc_input (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame)
+{
+ hdlc_input_runtime_t * rt = (void *) node->runtime_data;
+ u32 n_left_from, next_index, i_next, * from, * to_next;
+
+ from = vlib_frame_vector_args (from_frame);
+ n_left_from = from_frame->n_vectors;
+
+ if (node->flags & VLIB_NODE_FLAG_TRACE)
+ vlib_trace_frame_buffers_only (vm, node,
+ from,
+ n_left_from,
+ sizeof (from[0]),
+ sizeof (hdlc_input_trace_t));
+
+ next_index = node->cached_next_index;
+ i_next = vec_elt (rt->sparse_index_by_next_index, next_index);
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame (vm, node, next_index,
+ to_next, n_left_to_next);
+
+ while (n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ u32 bi0, bi1;
+ vlib_buffer_t * b0, * b1;
+ hdlc_header_t * h0, * h1;
+ u32 i0, i1, len0, len1, protocol0, protocol1, enqueue_code;
+
+ /* Prefetch next iteration. */
+ {
+ vlib_buffer_t * b2, * b3;
+
+ b2 = vlib_get_buffer (vm, from[2]);
+ b3 = vlib_get_buffer (vm, from[3]);
+
+ vlib_prefetch_buffer_header (b2, LOAD);
+ vlib_prefetch_buffer_header (b3, LOAD);
+
+ CLIB_PREFETCH (b2->data, sizeof (h0[0]), LOAD);
+ CLIB_PREFETCH (b3->data, sizeof (h1[0]), LOAD);
+ }
+
+ bi0 = from[0];
+ bi1 = from[1];
+ to_next[0] = bi0;
+ to_next[1] = bi1;
+ from += 2;
+ to_next += 2;
+ n_left_to_next -= 2;
+ n_left_from -= 2;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ b1 = vlib_get_buffer (vm, bi1);
+
+ h0 = (void *) (b0->data + b0->current_data);
+ h1 = (void *) (b1->data + b1->current_data);
+
+ protocol0 = h0->protocol;
+ protocol1 = h1->protocol;
+
+ /* Add padding bytes for OSI protocols. */
+ len0 = sizeof (h0[0]);
+ len1 = sizeof (h1[0]);
+
+ len0 += protocol0 == clib_host_to_net_u16 (HDLC_PROTOCOL_osi);
+ len1 += protocol1 == clib_host_to_net_u16 (HDLC_PROTOCOL_osi);
+
+ b0->current_data += len0;
+ b1->current_data += len1;
+
+ b0->current_length -= len0;
+ b1->current_length -= len1;
+
+ /* Index sparse array with network byte order. */
+ sparse_vec_index2 (rt->next_by_protocol, protocol0, protocol1, &i0, &i1);
+
+ b0->error = node->errors[i0 == SPARSE_VEC_INVALID_INDEX ? HDLC_ERROR_UNKNOWN_PROTOCOL : HDLC_ERROR_NONE];
+ b1->error = node->errors[i1 == SPARSE_VEC_INVALID_INDEX ? HDLC_ERROR_UNKNOWN_PROTOCOL : HDLC_ERROR_NONE];
+
+ enqueue_code = (i0 != i_next) + 2*(i1 != i_next);
+
+ if (PREDICT_FALSE (enqueue_code != 0))
+ {
+ switch (enqueue_code)
+ {
+ case 1:
+ /* A B A */
+ to_next[-2] = bi1;
+ to_next -= 1;
+ n_left_to_next += 1;
+ vlib_set_next_frame_buffer (vm, node, vec_elt (rt->next_by_protocol, i0), bi0);
+ break;
+
+ case 2:
+ /* A A B */
+ to_next -= 1;
+ n_left_to_next += 1;
+ vlib_set_next_frame_buffer (vm, node, vec_elt (rt->next_by_protocol, i1), bi1);
+ break;
+
+ case 3:
+ /* A B B or A B C */
+ to_next -= 2;
+ n_left_to_next += 2;
+ vlib_set_next_frame_buffer (vm, node, vec_elt (rt->next_by_protocol, i0), bi0);
+ vlib_set_next_frame_buffer (vm, node, vec_elt (rt->next_by_protocol, i1), bi1);
+ if (i0 == i1)
+ {
+ vlib_put_next_frame (vm, node, next_index,
+ n_left_to_next);
+ i_next = i1;
+ next_index = vec_elt (rt->next_by_protocol, i_next);
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+ }
+ }
+ }
+ }
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 bi0;
+ vlib_buffer_t * b0;
+ hdlc_header_t * h0;
+ u32 i0, len0, protocol0;
+
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+
+ h0 = (void *) (b0->data + b0->current_data);
+
+ protocol0 = h0->protocol;
+
+ /* Add padding bytes for OSI protocols. */
+ len0 = sizeof (h0[0]);
+ len0 += protocol0 == clib_host_to_net_u16 (HDLC_PROTOCOL_osi);
+
+ b0->current_data += len0;
+ b0->current_length -= len0;
+
+ i0 = sparse_vec_index (rt->next_by_protocol, protocol0);
+
+ b0->error = node->errors[i0 == SPARSE_VEC_INVALID_INDEX ? HDLC_ERROR_UNKNOWN_PROTOCOL : HDLC_ERROR_NONE];
+
+ /* Sent packet to wrong next? */
+ if (PREDICT_FALSE (i0 != i_next))
+ {
+ /* Return old frame; remove incorrectly enqueued packet. */
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next + 1);
+
+ /* Send to correct next. */
+ i_next = i0;
+ next_index = vec_elt (rt->next_by_protocol, i_next);
+ vlib_get_next_frame (vm, node, next_index,
+ to_next, n_left_to_next);
+
+ to_next[0] = bi0;
+ to_next += 1;
+ n_left_to_next -= 1;
+ }
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ return from_frame->n_vectors;
+}
+
+static char * hdlc_error_strings[] = {
+#define hdlc_error(n,s) s,
+#include "error.def"
+#undef hdlc_error
+};
+
+VLIB_REGISTER_NODE (hdlc_input_node) = {
+ .function = hdlc_input,
+ .name = "hdlc-input",
+ /* Takes a vector of packets. */
+ .vector_size = sizeof (u32),
+
+ .runtime_data_bytes = sizeof (hdlc_input_runtime_t),
+
+ .n_errors = HDLC_N_ERROR,
+ .error_strings = hdlc_error_strings,
+
+ .n_next_nodes = HDLC_INPUT_N_NEXT,
+ .next_nodes = {
+#define _(s,n) [HDLC_INPUT_NEXT_##s] = n,
+ foreach_hdlc_input_next
+#undef _
+ },
+
+ .format_buffer = format_hdlc_header_with_length,
+ .format_trace = format_hdlc_input_trace,
+ .unformat_buffer = unformat_hdlc_header,
+};
+
+static clib_error_t * hdlc_input_init (vlib_main_t * vm)
+{
+ hdlc_input_runtime_t * rt;
+
+ {
+ clib_error_t * error = vlib_call_init_function (vm, hdlc_init);
+ if (error)
+ clib_error_report (error);
+ }
+
+ hdlc_setup_node (vm, hdlc_input_node.index);
+
+ rt = vlib_node_get_runtime_data (vm, hdlc_input_node.index);
+
+ rt->next_by_protocol = sparse_vec_new
+ (/* elt bytes */ sizeof (rt->next_by_protocol[0]),
+ /* bits in index */ BITS (((hdlc_header_t *) 0)->protocol));
+
+ vec_validate (rt->sparse_index_by_next_index, HDLC_INPUT_NEXT_DROP);
+ vec_validate (rt->sparse_index_by_next_index, HDLC_INPUT_NEXT_PUNT);
+ rt->sparse_index_by_next_index[HDLC_INPUT_NEXT_DROP]
+ = SPARSE_VEC_INVALID_INDEX;
+ rt->sparse_index_by_next_index[HDLC_INPUT_NEXT_PUNT]
+ = SPARSE_VEC_INVALID_INDEX;
+
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (hdlc_input_init);
+
+void
+hdlc_register_input_protocol (vlib_main_t * vm,
+ hdlc_protocol_t protocol,
+ u32 node_index)
+{
+ hdlc_main_t * em = &hdlc_main;
+ hdlc_protocol_info_t * pi;
+ hdlc_input_runtime_t * rt;
+ u16 * n;
+ u32 i;
+
+ {
+ clib_error_t * error = vlib_call_init_function (vm, hdlc_input_init);
+ if (error)
+ clib_error_report (error);
+ }
+
+ pi = hdlc_get_protocol_info (em, protocol);
+ pi->node_index = node_index;
+ pi->next_index = vlib_node_add_next (vm,
+ hdlc_input_node.index,
+ node_index);
+
+ /* Setup hdlc protocol -> next index sparse vector mapping. */
+ rt = vlib_node_get_runtime_data (vm, hdlc_input_node.index);
+ n = sparse_vec_validate (rt->next_by_protocol, clib_host_to_net_u16 (protocol));
+ n[0] = pi->next_index;
+
+ /* Rebuild next index -> sparse index inverse mapping when sparse vector
+ is updated. */
+ vec_validate (rt->sparse_index_by_next_index, pi->next_index);
+ for (i = 1; i < vec_len (rt->next_by_protocol); i++)
+ rt->sparse_index_by_next_index[rt->next_by_protocol[i]] = i;
+}
diff --git a/src/vnet/hdlc/packet.h b/src/vnet/hdlc/packet.h
new file mode 100644
index 00000000000..45e5496fbc8
--- /dev/null
+++ b/src/vnet/hdlc/packet.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef included_vnet_hdlc_packet_h
+#define included_vnet_hdlc_packet_h
+
+/*
+ * HDLC packet format
+ *
+ * Copyright (c) 2009 Eliot Dresselhaus
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#define foreach_hdlc_protocol \
+ _ (0x0800, ip4) \
+ _ (0x2000, cdp) \
+ _ (0x8035, slarp) \
+ _ (0x8847, mpls_unicast) \
+ _ (0x8848, mpls_multicast) \
+ _ (0x86dd, ip6) \
+ _ (0xfefe, osi)
+
+typedef enum {
+#define _(n,f) HDLC_PROTOCOL_##f = n,
+ foreach_hdlc_protocol
+#undef _
+} hdlc_protocol_t;
+
+typedef struct {
+ /* Set to 0x0f for unicast; 0x8f for broadcast. */
+ u8 address;
+
+ /* Always zero. */
+ u8 control;
+
+ /* Layer 3 protocol for this packet. */
+ u16 protocol;
+
+ /* Layer 3 payload. */
+ u8 payload[0];
+} hdlc_header_t;
+
+#endif /* included_vnet_hdlc_packet_h */
diff --git a/src/vnet/hdlc/pg.c b/src/vnet/hdlc/pg.c
new file mode 100644
index 00000000000..b8e67022b08
--- /dev/null
+++ b/src/vnet/hdlc/pg.c
@@ -0,0 +1,105 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * hdlc_pg.c: packet generator hdlc interface
+ *
+ * Copyright (c) 2008 Eliot Dresselhaus
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <vlib/vlib.h>
+#include <vnet/pg/pg.h>
+#include <vnet/hdlc/hdlc.h>
+
+typedef struct {
+ pg_edit_t address;
+ pg_edit_t control;
+ pg_edit_t protocol;
+} pg_hdlc_header_t;
+
+static inline void
+pg_hdlc_header_init (pg_hdlc_header_t * e)
+{
+ pg_edit_init (&e->address, hdlc_header_t, address);
+ pg_edit_init (&e->control, hdlc_header_t, control);
+ pg_edit_init (&e->protocol, hdlc_header_t, protocol);
+}
+
+uword
+unformat_pg_hdlc_header (unformat_input_t * input, va_list * args)
+{
+ pg_stream_t * s = va_arg (*args, pg_stream_t *);
+ pg_hdlc_header_t * h;
+ u32 group_index, error;
+
+ h = pg_create_edit_group (s, sizeof (h[0]), sizeof (hdlc_header_t),
+ &group_index);
+ pg_hdlc_header_init (h);
+
+ pg_edit_set_fixed (&h->address, 0x0f);
+ pg_edit_set_fixed (&h->control, 0x00);
+
+ error = 1;
+ if (! unformat (input, "%U",
+ unformat_pg_edit,
+ unformat_hdlc_protocol_net_byte_order, &h->protocol))
+ goto done;
+
+ {
+ hdlc_main_t * pm = &hdlc_main;
+ hdlc_protocol_info_t * pi = 0;
+ pg_node_t * pg_node = 0;
+
+ if (h->protocol.type == PG_EDIT_FIXED)
+ {
+ u16 t = *(u16 *) h->protocol.values[PG_EDIT_LO];
+ pi = hdlc_get_protocol_info (pm, clib_net_to_host_u16 (t));
+ if (pi && pi->node_index != ~0)
+ pg_node = pg_get_node (pi->node_index);
+ }
+
+ if (pg_node && pg_node->unformat_edit
+ && unformat_user (input, pg_node->unformat_edit, s))
+ ;
+
+ else if (! unformat_user (input, unformat_pg_payload, s))
+ goto done;
+ }
+
+ error = 0;
+ done:
+ if (error)
+ pg_free_edit_group (s);
+ return error == 0;
+}
+
diff --git a/src/vnet/interface.api b/src/vnet/interface.api
new file mode 100644
index 00000000000..752e79c5ed1
--- /dev/null
+++ b/src/vnet/interface.api
@@ -0,0 +1,339 @@
+/** \brief Set flags on the interface
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param sw_if_index - index of the interface to set flags on
+ @param admin_up_down - set the admin state, 1 = up, 0 = down
+ @param link_up_down - Oper state sent on change event, not used in config.
+ @param deleted - interface was deleted
+*/
+define sw_interface_set_flags
+{
+ u32 client_index;
+ u32 context;
+ u32 sw_if_index;
+ /* 1 = up, 0 = down */
+ u8 admin_up_down;
+ u8 link_up_down;
+ u8 deleted;
+};
+
+/** \brief Reply to sw_interface_set_flags
+ @param context - sender context which was passed in the request
+ @param retval - return code of the set flags request
+*/
+define sw_interface_set_flags_reply
+{
+ u32 context;
+ i32 retval;
+};
+
+/** \brief Set interface MTU
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param sw_if_index - index of the interface to set MTU on
+ @param mtu - MTU
+*/
+define sw_interface_set_mtu
+{
+ u32 client_index;
+ u32 context;
+ u32 sw_if_index;
+ u16 mtu;
+};
+
+/** \brief Reply to sw_interface_set_mtu
+ @param context - sender context which was passed in the request
+ @param retval - return code of the set flags request
+*/
+define sw_interface_set_mtu_reply
+{
+ u32 context;
+ i32 retval;
+};
+
+/** \brief Register for interface events
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param enable_disable - 1 => register for events, 0 => cancel registration
+ @param pid - sender's pid
+*/
+define want_interface_events
+{
+ u32 client_index;
+ u32 context;
+ u32 enable_disable;
+ u32 pid;
+};
+
+/** \brief Reply for interface events registration
+ @param context - returned sender context, to match reply w/ request
+ @param retval - return code
+*/
+define want_interface_events_reply
+{
+ u32 context;
+ i32 retval;
+};
+
+/** \brief Interface details structure (fix this)
+ @param sw_if_index - index of the interface
+ @param sup_sw_if_index - index of parent interface if any, else same as sw_if_index
+ @param l2_address_length - length of the interface's l2 address
+ @param pid - the interface's l2 address
+ @param interface_name - name of the interface
+ @param link_duplex - 1 if half duplex, 2 if full duplex
+ @param link_speed - 1 = 10M, 2 = 100M, 4 = 1G, 8 = 10G, 16 = 40G, 32 = 100G
+ @param link_MTU - max. transmittion unit
+ @param sub_if_id - A number 0-N to uniquely identify this subif on super if
+ @param sub_dot1ad - 0 = dot1q, 1=dot1ad
+ @param sub_number_of_tags - Number of tags (0 - 2)
+ @param sub_outer_vlan_id
+ @param sub_inner_vlan_id
+ @param sub_exact_match
+ @param sub_default
+ @param sub_outer_vlan_id_any
+ @param sub_inner_vlan_id_any
+ @param vtr_op - vlan tag rewrite operation
+ @param vtr_push_dot1q
+ @param vtr_tag1
+ @param vtr_tag2
+*/
+define sw_interface_details
+{
+ u32 context;
+ u32 sw_if_index;
+
+ /* index of sup interface (e.g. hw interface).
+ equal to sw_if_index for super hw interface. */
+ u32 sup_sw_if_index;
+
+ /* Layer 2 address, if applicable */
+ u32 l2_address_length;
+ u8 l2_address[8];
+
+ /* Interface name */
+ u8 interface_name[64];
+
+ /* 1 = up, 0 = down */
+ u8 admin_up_down;
+ u8 link_up_down;
+
+ /* 1 = half duplex, 2 = full duplex */
+ u8 link_duplex;
+
+ /* 1 = 10M, 2 = 100M, 4 = 1G, 8 = 10G, 16 = 40G, 32 = 100G */
+ u8 link_speed;
+
+ /* MTU */
+ u16 link_mtu;
+
+ /* Subinterface ID. A number 0-N to uniquely identify this subinterface under the super interface */
+ u32 sub_id;
+
+ /* 0 = dot1q, 1=dot1ad */
+ u8 sub_dot1ad;
+
+ /* Number of tags 0-2 */
+ u8 sub_number_of_tags;
+ u16 sub_outer_vlan_id;
+ u16 sub_inner_vlan_id;
+ u8 sub_exact_match;
+ u8 sub_default;
+ u8 sub_outer_vlan_id_any;
+ u8 sub_inner_vlan_id_any;
+
+ /* vlan tag rewrite state */
+ u32 vtr_op;
+ u32 vtr_push_dot1q; // ethertype of first pushed tag is dot1q/dot1ad
+ u32 vtr_tag1; // first pushed tag
+ u32 vtr_tag2; // second pushed tag
+ u8 tag[64];
+};
+
+/* works */
+define sw_interface_dump
+{
+ u32 client_index;
+ u32 context;
+ u8 name_filter_valid;
+ u8 name_filter[49];
+};
+
+/** \brief Set or delete one or all ip addresses on a specified interface
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param sw_if_index - index of the interface to add/del addresses
+ @param is_add - add address if non-zero, else delete
+ @param is_ipv6 - if non-zero the address is ipv6, else ipv4
+ @param del_all - if non-zero delete all addresses on the interface
+ @param address_length - address length in bytes, 4 for ip4, 16 for ip6
+ @param address - array of address bytes
+*/
+define sw_interface_add_del_address
+{
+ u32 client_index;
+ u32 context;
+ u32 sw_if_index;
+ u8 is_add;
+ u8 is_ipv6;
+ u8 del_all;
+ u8 address_length;
+ u8 address[16];
+};
+
+/** \brief Reply to sw_interface_add_del_address
+ @param context - returned sender context, to match reply w/ request
+ @param retval - return code
+*/
+define sw_interface_add_del_address_reply
+{
+ u32 context;
+ i32 retval;
+};
+
+/** \brief Associate the specified interface with a fib table
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param sw_if_index - index of the interface
+ @param is_ipv6 - if non-zero ipv6, else ipv4
+ @param vrf_id - fib table/vrd id to associate the interface with
+*/
+define sw_interface_set_table
+{
+ u32 client_index;
+ u32 context;
+ u32 sw_if_index;
+ u8 is_ipv6;
+ u32 vrf_id;
+};
+
+/** \brief Reply to sw_interface_set_table
+ @param context - returned sender context, to match reply w/ request
+ @param retval - return code
+*/
+define sw_interface_set_table_reply
+{
+ u32 context;
+ i32 retval;
+};
+
+/** \brief Get VRF id assigned to interface
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param sw_if_index - index of the interface
+*/
+define sw_interface_get_table
+{
+ u32 client_index;
+ u32 context;
+ u32 sw_if_index;
+ u8 is_ipv6;
+};
+
+/** \brief Reply to get_sw_interface_vrf
+ @param context - sender context which was passed in the request
+ @param vrf_id - VRF id assigned to the interface
+*/
+define sw_interface_get_table_reply
+{
+ u32 context;
+ i32 retval;
+ u32 vrf_id;
+};
+
+/** \brief Stats counters structure
+ @param vnet_counter_type- such as ip4, ip6, punts, etc
+ @param is_combined - rx & tx total (all types) counts
+ @param first_sw_if_index - first sw index in block of index, counts
+ @param count - number of interfaces this stats block includes counters for
+ @param data - contiguous block of vlib_counter_t structures
+*/
+define vnet_interface_counters
+{
+ /* enums - plural - in vnet/interface.h */
+ u8 vnet_counter_type;
+ u8 is_combined;
+ u32 first_sw_if_index;
+ u32 count;
+ u8 data[count];
+};
+
+/** \brief Set unnumbered interface add / del request
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param sw_if_index - interface with an IP address
+ @param unnumbered_sw_if_index - interface which will use the address
+ @param is_add - if non-zero set the association, else unset it
+*/
+define sw_interface_set_unnumbered
+{
+ u32 client_index;
+ u32 context;
+ u32 sw_if_index; /* use this intfc address */
+ u32 unnumbered_sw_if_index; /* on this interface */
+ u8 is_add;
+};
+
+/** \brief Set unnumbered interface add / del response
+ @param context - sender context, to match reply w/ request
+ @param retval - return code for the request
+*/
+define sw_interface_set_unnumbered_reply
+{
+ u32 context;
+ i32 retval;
+};
+
+/** \brief Clear interface statistics
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param sw_if_index - index of the interface to clear statistics
+*/
+define sw_interface_clear_stats
+{
+ u32 client_index;
+ u32 context;
+ u32 sw_if_index;
+};
+
+/** \brief Reply to sw_interface_clear_stats
+ @param context - sender context which was passed in the request
+ @param retval - return code of the set flags request
+*/
+define sw_interface_clear_stats_reply
+{
+ u32 context;
+ i32 retval;
+};
+
+/** \brief Set / clear software interface tag
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param sw_if_index - the interface
+ @param add_del - 1 = add, 0 = delete
+ @param tag - an ascii tag
+*/
+define sw_interface_tag_add_del
+{
+ u32 client_index;
+ u32 context;
+ u8 is_add;
+ u32 sw_if_index;
+ u8 tag[64];
+};
+
+/** \brief Reply to set / clear software interface tag
+ @param context - sender context which was passed in the request
+ @param retval - return code for the request
+*/
+define sw_interface_tag_add_del_reply
+{
+ u32 context;
+ i32 retval;
+};
+/*
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
+
diff --git a/src/vnet/interface.c b/src/vnet/interface.c
new file mode 100644
index 00000000000..78610ed460b
--- /dev/null
+++ b/src/vnet/interface.c
@@ -0,0 +1,1398 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * interface.c: VNET interfaces/sub-interfaces
+ *
+ * Copyright (c) 2008 Eliot Dresselhaus
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <vnet/vnet.h>
+#include <vnet/plugin/plugin.h>
+#include <vnet/fib/ip6_fib.h>
+#include <vnet/adj/adj.h>
+
+#define VNET_INTERFACE_SET_FLAGS_HELPER_IS_CREATE (1 << 0)
+#define VNET_INTERFACE_SET_FLAGS_HELPER_WANT_REDISTRIBUTE (1 << 1)
+
+static clib_error_t *vnet_hw_interface_set_flags_helper (vnet_main_t * vnm,
+ u32 hw_if_index,
+ u32 flags,
+ u32 helper_flags);
+
+static clib_error_t *vnet_sw_interface_set_flags_helper (vnet_main_t * vnm,
+ u32 sw_if_index,
+ u32 flags,
+ u32 helper_flags);
+
+static clib_error_t *vnet_hw_interface_set_class_helper (vnet_main_t * vnm,
+ u32 hw_if_index,
+ u32 hw_class_index,
+ u32 redistribute);
+
+typedef struct
+{
+ /* Either sw or hw interface index. */
+ u32 sw_hw_if_index;
+
+ /* Flags. */
+ u32 flags;
+} vnet_sw_hw_interface_state_t;
+
+static void
+serialize_vec_vnet_sw_hw_interface_state (serialize_main_t * m, va_list * va)
+{
+ vnet_sw_hw_interface_state_t *s =
+ va_arg (*va, vnet_sw_hw_interface_state_t *);
+ u32 n = va_arg (*va, u32);
+ u32 i;
+ for (i = 0; i < n; i++)
+ {
+ serialize_integer (m, s[i].sw_hw_if_index,
+ sizeof (s[i].sw_hw_if_index));
+ serialize_integer (m, s[i].flags, sizeof (s[i].flags));
+ }
+}
+
+static void
+unserialize_vec_vnet_sw_hw_interface_state (serialize_main_t * m,
+ va_list * va)
+{
+ vnet_sw_hw_interface_state_t *s =
+ va_arg (*va, vnet_sw_hw_interface_state_t *);
+ u32 n = va_arg (*va, u32);
+ u32 i;
+ for (i = 0; i < n; i++)
+ {
+ unserialize_integer (m, &s[i].sw_hw_if_index,
+ sizeof (s[i].sw_hw_if_index));
+ unserialize_integer (m, &s[i].flags, sizeof (s[i].flags));
+ }
+}
+
+static void
+serialize_vnet_sw_hw_interface_set_flags (serialize_main_t * m, va_list * va)
+{
+ vnet_sw_hw_interface_state_t *s =
+ va_arg (*va, vnet_sw_hw_interface_state_t *);
+ serialize (m, serialize_vec_vnet_sw_hw_interface_state, s, 1);
+}
+
+static void
+unserialize_vnet_sw_interface_set_flags (serialize_main_t * m, va_list * va)
+{
+ CLIB_UNUSED (mc_main_t * mc) = va_arg (*va, mc_main_t *);
+ vnet_sw_hw_interface_state_t s;
+
+ unserialize (m, unserialize_vec_vnet_sw_hw_interface_state, &s, 1);
+
+ vnet_sw_interface_set_flags_helper
+ (vnet_get_main (), s.sw_hw_if_index, s.flags,
+ /* helper_flags no redistribution */ 0);
+}
+
+static void
+unserialize_vnet_hw_interface_set_flags (serialize_main_t * m, va_list * va)
+{
+ CLIB_UNUSED (mc_main_t * mc) = va_arg (*va, mc_main_t *);
+ vnet_sw_hw_interface_state_t s;
+
+ unserialize (m, unserialize_vec_vnet_sw_hw_interface_state, &s, 1);
+
+ vnet_hw_interface_set_flags_helper
+ (vnet_get_main (), s.sw_hw_if_index, s.flags,
+ /* helper_flags no redistribution */ 0);
+}
+
+MC_SERIALIZE_MSG (vnet_sw_interface_set_flags_msg, static) =
+{
+.name = "vnet_sw_interface_set_flags",.serialize =
+ serialize_vnet_sw_hw_interface_set_flags,.unserialize =
+ unserialize_vnet_sw_interface_set_flags,};
+
+MC_SERIALIZE_MSG (vnet_hw_interface_set_flags_msg, static) =
+{
+.name = "vnet_hw_interface_set_flags",.serialize =
+ serialize_vnet_sw_hw_interface_set_flags,.unserialize =
+ unserialize_vnet_hw_interface_set_flags,};
+
+void
+serialize_vnet_interface_state (serialize_main_t * m, va_list * va)
+{
+ vnet_main_t *vnm = va_arg (*va, vnet_main_t *);
+ vnet_sw_hw_interface_state_t *sts = 0, *st;
+ vnet_sw_interface_t *sif;
+ vnet_hw_interface_t *hif;
+ vnet_interface_main_t *im = &vnm->interface_main;
+
+ /* Serialize hardware interface classes since they may have changed.
+ Must do this before sending up/down flags. */
+ /* *INDENT-OFF* */
+ pool_foreach (hif, im->hw_interfaces, ({
+ vnet_hw_interface_class_t * hw_class = vnet_get_hw_interface_class (vnm, hif->hw_class_index);
+ serialize_cstring (m, hw_class->name);
+ }));
+ /* *INDENT-ON* */
+
+ /* Send sw/hw interface state when non-zero. */
+ /* *INDENT-OFF* */
+ pool_foreach (sif, im->sw_interfaces, ({
+ if (sif->flags != 0)
+ {
+ vec_add2 (sts, st, 1);
+ st->sw_hw_if_index = sif->sw_if_index;
+ st->flags = sif->flags;
+ }
+ }));
+ /* *INDENT-ON* */
+
+ vec_serialize (m, sts, serialize_vec_vnet_sw_hw_interface_state);
+
+ if (sts)
+ _vec_len (sts) = 0;
+
+ /* *INDENT-OFF* */
+ pool_foreach (hif, im->hw_interfaces, ({
+ if (hif->flags != 0)
+ {
+ vec_add2 (sts, st, 1);
+ st->sw_hw_if_index = hif->hw_if_index;
+ st->flags = hif->flags;
+ }
+ }));
+ /* *INDENT-ON* */
+
+ vec_serialize (m, sts, serialize_vec_vnet_sw_hw_interface_state);
+
+ vec_free (sts);
+}
+
+void
+unserialize_vnet_interface_state (serialize_main_t * m, va_list * va)
+{
+ vnet_main_t *vnm = va_arg (*va, vnet_main_t *);
+ vnet_sw_hw_interface_state_t *sts = 0, *st;
+
+ /* First set interface hardware class. */
+ {
+ vnet_interface_main_t *im = &vnm->interface_main;
+ vnet_hw_interface_t *hif;
+ char *class_name;
+ uword *p;
+ clib_error_t *error;
+
+ /* *INDENT-OFF* */
+ pool_foreach (hif, im->hw_interfaces, ({
+ unserialize_cstring (m, &class_name);
+ p = hash_get_mem (im->hw_interface_class_by_name, class_name);
+ ASSERT (p != 0);
+ error = vnet_hw_interface_set_class_helper (vnm, hif->hw_if_index, p[0], /* redistribute */ 0);
+ if (error)
+ clib_error_report (error);
+ vec_free (class_name);
+ }));
+ /* *INDENT-ON* */
+ }
+
+ vec_unserialize (m, &sts, unserialize_vec_vnet_sw_hw_interface_state);
+ vec_foreach (st, sts)
+ vnet_sw_interface_set_flags_helper (vnm, st->sw_hw_if_index, st->flags,
+ /* no distribute */ 0);
+ vec_free (sts);
+
+ vec_unserialize (m, &sts, unserialize_vec_vnet_sw_hw_interface_state);
+ vec_foreach (st, sts)
+ vnet_hw_interface_set_flags_helper (vnm, st->sw_hw_if_index, st->flags,
+ /* no distribute */ 0);
+ vec_free (sts);
+}
+
+static clib_error_t *
+call_elf_section_interface_callbacks (vnet_main_t * vnm, u32 if_index,
+ u32 flags,
+ _vnet_interface_function_list_elt_t **
+ elts)
+{
+ _vnet_interface_function_list_elt_t *elt;
+ vnet_interface_function_priority_t prio;
+ clib_error_t *error = 0;
+
+ for (prio = VNET_ITF_FUNC_PRIORITY_LOW;
+ prio <= VNET_ITF_FUNC_PRIORITY_HIGH; prio++)
+ {
+ elt = elts[prio];
+
+ while (elt)
+ {
+ error = elt->fp (vnm, if_index, flags);
+ if (error)
+ return error;
+ elt = elt->next_interface_function;
+ }
+ }
+ return error;
+}
+
+static clib_error_t *
+call_hw_interface_add_del_callbacks (vnet_main_t * vnm, u32 hw_if_index,
+ u32 is_create)
+{
+ vnet_hw_interface_t *hi = vnet_get_hw_interface (vnm, hw_if_index);
+ vnet_hw_interface_class_t *hw_class =
+ vnet_get_hw_interface_class (vnm, hi->hw_class_index);
+ vnet_device_class_t *dev_class =
+ vnet_get_device_class (vnm, hi->dev_class_index);
+ clib_error_t *error = 0;
+
+ if (hw_class->interface_add_del_function
+ && (error =
+ hw_class->interface_add_del_function (vnm, hw_if_index, is_create)))
+ return error;
+
+ if (dev_class->interface_add_del_function
+ && (error =
+ dev_class->interface_add_del_function (vnm, hw_if_index,
+ is_create)))
+ return error;
+
+ error = call_elf_section_interface_callbacks
+ (vnm, hw_if_index, is_create, vnm->hw_interface_add_del_functions);
+
+ return error;
+}
+
+static clib_error_t *
+call_sw_interface_add_del_callbacks (vnet_main_t * vnm, u32 sw_if_index,
+ u32 is_create)
+{
+ return call_elf_section_interface_callbacks
+ (vnm, sw_if_index, is_create, vnm->sw_interface_add_del_functions);
+}
+
+#define VNET_INTERFACE_SET_FLAGS_HELPER_IS_CREATE (1 << 0)
+#define VNET_INTERFACE_SET_FLAGS_HELPER_WANT_REDISTRIBUTE (1 << 1)
+
+static clib_error_t *
+vnet_hw_interface_set_flags_helper (vnet_main_t * vnm, u32 hw_if_index,
+ u32 flags, u32 helper_flags)
+{
+ vnet_hw_interface_t *hi = vnet_get_hw_interface (vnm, hw_if_index);
+ vnet_hw_interface_class_t *hw_class =
+ vnet_get_hw_interface_class (vnm, hi->hw_class_index);
+ vnet_device_class_t *dev_class =
+ vnet_get_device_class (vnm, hi->dev_class_index);
+ vlib_main_t *vm = vnm->vlib_main;
+ u32 mask;
+ clib_error_t *error = 0;
+ u32 is_create =
+ (helper_flags & VNET_INTERFACE_SET_FLAGS_HELPER_IS_CREATE) != 0;
+
+ mask =
+ (VNET_HW_INTERFACE_FLAG_LINK_UP | VNET_HW_INTERFACE_FLAG_DUPLEX_MASK |
+ VNET_HW_INTERFACE_FLAG_SPEED_MASK);
+ flags &= mask;
+
+ /* Call hardware interface add/del callbacks. */
+ if (is_create)
+ call_hw_interface_add_del_callbacks (vnm, hw_if_index, is_create);
+
+ /* Already in the desired state? */
+ if (!is_create && (hi->flags & mask) == flags)
+ goto done;
+
+ /* Some interface classes do not redistribute (e.g. are local). */
+ if (!dev_class->redistribute)
+ helper_flags &= ~VNET_INTERFACE_SET_FLAGS_HELPER_WANT_REDISTRIBUTE;
+
+ if (vm->mc_main
+ && (helper_flags & VNET_INTERFACE_SET_FLAGS_HELPER_WANT_REDISTRIBUTE))
+ {
+ vnet_sw_hw_interface_state_t s;
+ s.sw_hw_if_index = hw_if_index;
+ s.flags = flags;
+ mc_serialize (vm->mc_main, &vnet_hw_interface_set_flags_msg, &s);
+ }
+
+ if ((hi->flags & VNET_HW_INTERFACE_FLAG_LINK_UP) !=
+ (flags & VNET_HW_INTERFACE_FLAG_LINK_UP))
+ {
+ /* Do hardware class (e.g. ethernet). */
+ if (hw_class->link_up_down_function
+ && (error = hw_class->link_up_down_function (vnm, hw_if_index,
+ flags)))
+ goto done;
+
+ error = call_elf_section_interface_callbacks
+ (vnm, hw_if_index, flags, vnm->hw_interface_link_up_down_functions);
+
+ if (error)
+ goto done;
+ }
+
+ hi->flags &= ~mask;
+ hi->flags |= flags;
+
+done:
+ return error;
+}
+
+static clib_error_t *
+vnet_sw_interface_set_flags_helper (vnet_main_t * vnm, u32 sw_if_index,
+ u32 flags, u32 helper_flags)
+{
+ vnet_sw_interface_t *si = vnet_get_sw_interface (vnm, sw_if_index);
+ vlib_main_t *vm = vnm->vlib_main;
+ u32 mask;
+ clib_error_t *error = 0;
+ u32 is_create =
+ (helper_flags & VNET_INTERFACE_SET_FLAGS_HELPER_IS_CREATE) != 0;
+ u32 old_flags;
+
+ mask = VNET_SW_INTERFACE_FLAG_ADMIN_UP | VNET_SW_INTERFACE_FLAG_PUNT;
+ flags &= mask;
+
+ if (is_create)
+ {
+ error =
+ call_sw_interface_add_del_callbacks (vnm, sw_if_index, is_create);
+ if (error)
+ goto done;
+
+ if (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP)
+ {
+ /* Notify everyone when the interface is created as admin up */
+ error = call_elf_section_interface_callbacks (vnm, sw_if_index,
+ flags,
+ vnm->
+ sw_interface_admin_up_down_functions);
+ if (error)
+ goto done;
+ }
+ }
+ else
+ {
+ vnet_sw_interface_t *si_sup = si;
+
+ /* Check that super interface is in correct state. */
+ if (si->type == VNET_SW_INTERFACE_TYPE_SUB)
+ {
+ si_sup = vnet_get_sw_interface (vnm, si->sup_sw_if_index);
+
+ /* Check to see if we're bringing down the soft interface and if it's parent is up */
+ if ((flags != (si_sup->flags & mask)) &&
+ (!((flags == 0)
+ && ((si_sup->flags & mask) ==
+ VNET_SW_INTERFACE_FLAG_ADMIN_UP))))
+ {
+ error = clib_error_return (0, "super-interface %U must be %U",
+ format_vnet_sw_interface_name, vnm,
+ si_sup,
+ format_vnet_sw_interface_flags,
+ flags);
+ goto done;
+ }
+ }
+
+ /* Donot change state for slave link of bonded interfaces */
+ if (si->flags & VNET_SW_INTERFACE_FLAG_BOND_SLAVE)
+ {
+ error = clib_error_return
+ (0, "not allowed as %U belong to a BondEthernet interface",
+ format_vnet_sw_interface_name, vnm, si);
+ goto done;
+ }
+
+ /* Already in the desired state? */
+ if ((si->flags & mask) == flags)
+ goto done;
+
+ /* Sub-interfaces of hardware interfaces that do no redistribute,
+ do not redistribute themselves. */
+ if (si_sup->type == VNET_SW_INTERFACE_TYPE_HARDWARE)
+ {
+ vnet_hw_interface_t *hi =
+ vnet_get_hw_interface (vnm, si_sup->hw_if_index);
+ vnet_device_class_t *dev_class =
+ vnet_get_device_class (vnm, hi->dev_class_index);
+ if (!dev_class->redistribute)
+ helper_flags &=
+ ~VNET_INTERFACE_SET_FLAGS_HELPER_WANT_REDISTRIBUTE;
+ }
+
+ if (vm->mc_main
+ && (helper_flags &
+ VNET_INTERFACE_SET_FLAGS_HELPER_WANT_REDISTRIBUTE))
+ {
+ vnet_sw_hw_interface_state_t s;
+ s.sw_hw_if_index = sw_if_index;
+ s.flags = flags;
+ mc_serialize (vm->mc_main, &vnet_sw_interface_set_flags_msg, &s);
+ }
+
+ /* set the flags now before invoking the registered clients
+ * so that the state they query is consistent with the state here notified */
+ old_flags = si->flags;
+ si->flags &= ~mask;
+ si->flags |= flags;
+ if ((flags | old_flags) & VNET_SW_INTERFACE_FLAG_ADMIN_UP)
+ error = call_elf_section_interface_callbacks
+ (vnm, sw_if_index, flags,
+ vnm->sw_interface_admin_up_down_functions);
+ si->flags = old_flags;
+
+ if (error)
+ goto done;
+
+ if (si->type == VNET_SW_INTERFACE_TYPE_HARDWARE)
+ {
+ vnet_hw_interface_t *hi =
+ vnet_get_hw_interface (vnm, si->hw_if_index);
+ vnet_hw_interface_class_t *hw_class =
+ vnet_get_hw_interface_class (vnm, hi->hw_class_index);
+ vnet_device_class_t *dev_class =
+ vnet_get_device_class (vnm, hi->dev_class_index);
+
+ /* save the si admin up flag */
+ old_flags = si->flags;
+
+ /* update si admin up flag in advance if we are going admin down */
+ if (!(flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP))
+ si->flags &= ~VNET_SW_INTERFACE_FLAG_ADMIN_UP;
+
+ if (dev_class->admin_up_down_function
+ && (error = dev_class->admin_up_down_function (vnm,
+ si->hw_if_index,
+ flags)))
+ {
+ /* restore si admin up flag to it's original state on errors */
+ si->flags = old_flags;
+ goto done;
+ }
+
+ if (hw_class->admin_up_down_function
+ && (error = hw_class->admin_up_down_function (vnm,
+ si->hw_if_index,
+ flags)))
+ {
+ /* restore si admin up flag to it's original state on errors */
+ si->flags = old_flags;
+ goto done;
+ }
+
+ /* Admin down implies link down. */
+ if (!(flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP)
+ && (hi->flags & VNET_HW_INTERFACE_FLAG_LINK_UP))
+ vnet_hw_interface_set_flags_helper (vnm, si->hw_if_index,
+ hi->flags &
+ ~VNET_HW_INTERFACE_FLAG_LINK_UP,
+ helper_flags);
+ }
+ }
+
+ si->flags &= ~mask;
+ si->flags |= flags;
+
+done:
+ return error;
+}
+
+clib_error_t *
+vnet_hw_interface_set_flags (vnet_main_t * vnm, u32 hw_if_index, u32 flags)
+{
+ return vnet_hw_interface_set_flags_helper
+ (vnm, hw_if_index, flags,
+ VNET_INTERFACE_SET_FLAGS_HELPER_WANT_REDISTRIBUTE);
+}
+
+clib_error_t *
+vnet_sw_interface_set_flags (vnet_main_t * vnm, u32 sw_if_index, u32 flags)
+{
+ return vnet_sw_interface_set_flags_helper
+ (vnm, sw_if_index, flags,
+ VNET_INTERFACE_SET_FLAGS_HELPER_WANT_REDISTRIBUTE);
+}
+
+static u32
+vnet_create_sw_interface_no_callbacks (vnet_main_t * vnm,
+ vnet_sw_interface_t * template)
+{
+ vnet_interface_main_t *im = &vnm->interface_main;
+ vnet_sw_interface_t *sw;
+ u32 sw_if_index;
+
+ pool_get (im->sw_interfaces, sw);
+ sw_if_index = sw - im->sw_interfaces;
+
+ sw[0] = template[0];
+
+ sw->flags = 0;
+ sw->sw_if_index = sw_if_index;
+ if (sw->type == VNET_SW_INTERFACE_TYPE_HARDWARE)
+ sw->sup_sw_if_index = sw->sw_if_index;
+
+ /* Allocate counters for this interface. */
+ {
+ u32 i;
+
+ vnet_interface_counter_lock (im);
+
+ for (i = 0; i < vec_len (im->sw_if_counters); i++)
+ {
+ vlib_validate_simple_counter (&im->sw_if_counters[i], sw_if_index);
+ vlib_zero_simple_counter (&im->sw_if_counters[i], sw_if_index);
+ }
+
+ for (i = 0; i < vec_len (im->combined_sw_if_counters); i++)
+ {
+ vlib_validate_combined_counter (&im->combined_sw_if_counters[i],
+ sw_if_index);
+ vlib_zero_combined_counter (&im->combined_sw_if_counters[i],
+ sw_if_index);
+ }
+
+ vnet_interface_counter_unlock (im);
+ }
+
+ return sw_if_index;
+}
+
+clib_error_t *
+vnet_create_sw_interface (vnet_main_t * vnm, vnet_sw_interface_t * template,
+ u32 * sw_if_index)
+{
+ clib_error_t *error;
+ vnet_hw_interface_t *hi;
+ vnet_device_class_t *dev_class;
+
+ hi = vnet_get_sup_hw_interface (vnm, template->sup_sw_if_index);
+ dev_class = vnet_get_device_class (vnm, hi->dev_class_index);
+
+ if (template->type == VNET_SW_INTERFACE_TYPE_SUB &&
+ dev_class->subif_add_del_function)
+ {
+ error = dev_class->subif_add_del_function (vnm, hi->hw_if_index,
+ (struct vnet_sw_interface_t
+ *) template, 1);
+ if (error)
+ return error;
+ }
+
+ *sw_if_index = vnet_create_sw_interface_no_callbacks (vnm, template);
+ error = vnet_sw_interface_set_flags_helper
+ (vnm, *sw_if_index, template->flags,
+ VNET_INTERFACE_SET_FLAGS_HELPER_IS_CREATE);
+
+ if (error)
+ {
+ /* undo the work done by vnet_create_sw_interface_no_callbacks() */
+ vnet_interface_main_t *im = &vnm->interface_main;
+ vnet_sw_interface_t *sw =
+ pool_elt_at_index (im->sw_interfaces, *sw_if_index);
+ pool_put (im->sw_interfaces, sw);
+ }
+
+ return error;
+}
+
+void
+vnet_delete_sw_interface (vnet_main_t * vnm, u32 sw_if_index)
+{
+ vnet_interface_main_t *im = &vnm->interface_main;
+ vnet_sw_interface_t *sw =
+ pool_elt_at_index (im->sw_interfaces, sw_if_index);
+
+ /* Make sure the interface is in L3 mode (removed from L2 BD or XConnect) */
+ vlib_main_t *vm = vlib_get_main ();
+ l2_input_config_t *config;
+ config = vec_elt_at_index (l2input_main.configs, sw_if_index);
+ if (config->xconnect)
+ set_int_l2_mode (vm, vnm, MODE_L3, config->output_sw_if_index, 0, 0, 0,
+ 0);
+ if (config->xconnect || config->bridge)
+ set_int_l2_mode (vm, vnm, MODE_L3, sw_if_index, 0, 0, 0, 0);
+
+ /* Bring down interface in case it is up. */
+ if (sw->flags != 0)
+ vnet_sw_interface_set_flags (vnm, sw_if_index, /* flags */ 0);
+
+ call_sw_interface_add_del_callbacks (vnm, sw_if_index, /* is_create */ 0);
+
+ pool_put (im->sw_interfaces, sw);
+}
+
+static void
+setup_tx_node (vlib_main_t * vm,
+ u32 node_index, vnet_device_class_t * dev_class)
+{
+ vlib_node_t *n = vlib_get_node (vm, node_index);
+
+ n->function = dev_class->tx_function;
+ n->format_trace = dev_class->format_tx_trace;
+
+ vlib_register_errors (vm, node_index,
+ dev_class->tx_function_n_errors,
+ dev_class->tx_function_error_strings);
+}
+
+static void
+setup_output_node (vlib_main_t * vm,
+ u32 node_index, vnet_hw_interface_class_t * hw_class)
+{
+ vlib_node_t *n = vlib_get_node (vm, node_index);
+ n->format_buffer = hw_class->format_header;
+ n->unformat_buffer = hw_class->unformat_header;
+}
+
+/* Register an interface instance. */
+u32
+vnet_register_interface (vnet_main_t * vnm,
+ u32 dev_class_index,
+ u32 dev_instance,
+ u32 hw_class_index, u32 hw_instance)
+{
+ vnet_interface_main_t *im = &vnm->interface_main;
+ vnet_hw_interface_t *hw;
+ vnet_device_class_t *dev_class =
+ vnet_get_device_class (vnm, dev_class_index);
+ vnet_hw_interface_class_t *hw_class =
+ vnet_get_hw_interface_class (vnm, hw_class_index);
+ vlib_main_t *vm = vnm->vlib_main;
+ vnet_feature_config_main_t *fcm;
+ vnet_config_main_t *cm;
+ u32 hw_index, i;
+ char *tx_node_name, *output_node_name;
+
+ pool_get (im->hw_interfaces, hw);
+
+ hw_index = hw - im->hw_interfaces;
+ hw->hw_if_index = hw_index;
+
+ if (dev_class->format_device_name)
+ hw->name = format (0, "%U", dev_class->format_device_name, dev_instance);
+ else if (hw_class->format_interface_name)
+ hw->name = format (0, "%U", hw_class->format_interface_name,
+ dev_instance);
+ else
+ hw->name = format (0, "%s%x", hw_class->name, dev_instance);
+
+ if (!im->hw_interface_by_name)
+ im->hw_interface_by_name = hash_create_vec ( /* size */ 0,
+ sizeof (hw->name[0]),
+ sizeof (uword));
+
+ hash_set_mem (im->hw_interface_by_name, hw->name, hw_index);
+
+ /* Make hardware interface point to software interface. */
+ {
+ vnet_sw_interface_t sw = {
+ .type = VNET_SW_INTERFACE_TYPE_HARDWARE,
+ .flood_class = VNET_FLOOD_CLASS_NORMAL,
+ .hw_if_index = hw_index
+ };
+ hw->sw_if_index = vnet_create_sw_interface_no_callbacks (vnm, &sw);
+ }
+
+ hw->dev_class_index = dev_class_index;
+ hw->dev_instance = dev_instance;
+ hw->hw_class_index = hw_class_index;
+ hw->hw_instance = hw_instance;
+
+ hw->max_rate_bits_per_sec = 0;
+ hw->min_packet_bytes = 0;
+ hw->per_packet_overhead_bytes = 0;
+ hw->max_l3_packet_bytes[VLIB_RX] = ~0;
+ hw->max_l3_packet_bytes[VLIB_TX] = ~0;
+
+ tx_node_name = (char *) format (0, "%v-tx", hw->name);
+ output_node_name = (char *) format (0, "%v-output", hw->name);
+
+ /* If we have previously deleted interface nodes, re-use them. */
+ if (vec_len (im->deleted_hw_interface_nodes) > 0)
+ {
+ vnet_hw_interface_nodes_t *hn;
+ vnet_interface_output_runtime_t *rt;
+ vlib_node_t *node;
+ vlib_node_runtime_t *nrt;
+
+ hn = vec_end (im->deleted_hw_interface_nodes) - 1;
+
+ hw->tx_node_index = hn->tx_node_index;
+ hw->output_node_index = hn->output_node_index;
+
+ vlib_node_rename (vm, hw->tx_node_index, "%v", tx_node_name);
+ vlib_node_rename (vm, hw->output_node_index, "%v", output_node_name);
+
+ rt = vlib_node_get_runtime_data (vm, hw->output_node_index);
+ ASSERT (rt->is_deleted == 1);
+ rt->is_deleted = 0;
+ rt->hw_if_index = hw_index;
+ rt->sw_if_index = hw->sw_if_index;
+ rt->dev_instance = hw->dev_instance;
+
+ rt = vlib_node_get_runtime_data (vm, hw->tx_node_index);
+ rt->hw_if_index = hw_index;
+ rt->sw_if_index = hw->sw_if_index;
+ rt->dev_instance = hw->dev_instance;
+
+ /* The new class may differ from the old one.
+ * Functions have to be updated. */
+ node = vlib_get_node (vm, hw->output_node_index);
+ node->function = dev_class->flatten_output_chains ?
+ vnet_interface_output_node_flatten_multiarch_select () :
+ vnet_interface_output_node_multiarch_select ();
+ node->format_trace = format_vnet_interface_output_trace;
+ nrt = vlib_node_get_runtime (vm, hw->output_node_index);
+ nrt->function = node->function;
+
+ node = vlib_get_node (vm, hw->tx_node_index);
+ node->function = dev_class->tx_function;
+ node->format_trace = dev_class->format_tx_trace;
+ nrt = vlib_node_get_runtime (vm, hw->tx_node_index);
+ nrt->function = node->function;
+
+ vlib_worker_thread_node_runtime_update ();
+ _vec_len (im->deleted_hw_interface_nodes) -= 1;
+ }
+ else
+ {
+ vlib_node_registration_t r;
+ vnet_interface_output_runtime_t rt = {
+ .hw_if_index = hw_index,
+ .sw_if_index = hw->sw_if_index,
+ .dev_instance = hw->dev_instance,
+ .is_deleted = 0,
+ };
+
+ memset (&r, 0, sizeof (r));
+ r.type = VLIB_NODE_TYPE_INTERNAL;
+ r.runtime_data = &rt;
+ r.runtime_data_bytes = sizeof (rt);
+ r.scalar_size = 0;
+ r.vector_size = sizeof (u32);
+
+ r.flags = VLIB_NODE_FLAG_IS_OUTPUT;
+ r.name = tx_node_name;
+ r.function = dev_class->tx_function;
+
+ hw->tx_node_index = vlib_register_node (vm, &r);
+
+ vlib_node_add_named_next_with_slot (vm, hw->tx_node_index,
+ "error-drop",
+ VNET_INTERFACE_TX_NEXT_DROP);
+
+ r.flags = 0;
+ r.name = output_node_name;
+ r.function = dev_class->flatten_output_chains ?
+ vnet_interface_output_node_flatten_multiarch_select () :
+ vnet_interface_output_node_multiarch_select ();
+ r.format_trace = format_vnet_interface_output_trace;
+
+ {
+ static char *e[] = {
+ "interface is down",
+ "interface is deleted",
+ };
+
+ r.n_errors = ARRAY_LEN (e);
+ r.error_strings = e;
+ }
+ hw->output_node_index = vlib_register_node (vm, &r);
+
+ vlib_node_add_named_next_with_slot (vm, hw->output_node_index,
+ "error-drop",
+ VNET_INTERFACE_OUTPUT_NEXT_DROP);
+ vlib_node_add_next_with_slot (vm, hw->output_node_index,
+ hw->tx_node_index,
+ VNET_INTERFACE_OUTPUT_NEXT_TX);
+
+ /* add interface to the list of "output-interface" feature arc start nodes
+ and clone nexts from 1st interface if it exists */
+ fcm = vnet_feature_get_config_main (im->output_feature_arc_index);
+ cm = &fcm->config_main;
+ i = vec_len (cm->start_node_indices);
+ vec_validate (cm->start_node_indices, i);
+ cm->start_node_indices[i] = hw->output_node_index;
+ if (hw_index)
+ {
+ /* copy nexts from 1st interface */
+ vnet_hw_interface_t *first_hw;
+ vlib_node_t *first_node;
+
+ first_hw = vnet_get_hw_interface (vnm, /* hw_if_index */ 0);
+ first_node = vlib_get_node (vm, first_hw->output_node_index);
+
+ /* 1st 2 nexts are already added above */
+ for (i = 2; i < vec_len (first_node->next_nodes); i++)
+ vlib_node_add_next_with_slot (vm, hw->output_node_index,
+ first_node->next_nodes[i], i);
+ }
+ }
+
+ setup_output_node (vm, hw->output_node_index, hw_class);
+ setup_tx_node (vm, hw->tx_node_index, dev_class);
+
+ /* Call all up/down callbacks with zero flags when interface is created. */
+ vnet_sw_interface_set_flags_helper (vnm, hw->sw_if_index, /* flags */ 0,
+ VNET_INTERFACE_SET_FLAGS_HELPER_IS_CREATE);
+ vnet_hw_interface_set_flags_helper (vnm, hw_index, /* flags */ 0,
+ VNET_INTERFACE_SET_FLAGS_HELPER_IS_CREATE);
+
+ return hw_index;
+}
+
+void
+vnet_delete_hw_interface (vnet_main_t * vnm, u32 hw_if_index)
+{
+ vnet_interface_main_t *im = &vnm->interface_main;
+ vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
+ vlib_main_t *vm = vnm->vlib_main;
+
+ /* If it is up, mark it down. */
+ if (hw->flags != 0)
+ vnet_hw_interface_set_flags (vnm, hw_if_index, /* flags */ 0);
+
+ /* Call delete callbacks. */
+ call_hw_interface_add_del_callbacks (vnm, hw_if_index, /* is_create */ 0);
+
+ /* Delete software interface corresponding to hardware interface. */
+ vnet_delete_sw_interface (vnm, hw->sw_if_index);
+
+ /* Delete any sub-interfaces. */
+ {
+ u32 id, sw_if_index;
+ /* *INDENT-OFF* */
+ hash_foreach (id, sw_if_index, hw->sub_interface_sw_if_index_by_id, ({
+ vnet_delete_sw_interface (vnm, sw_if_index);
+ }));
+ /* *INDENT-ON* */
+ }
+
+ {
+ vnet_hw_interface_nodes_t *dn;
+ vnet_interface_output_runtime_t *rt =
+ vlib_node_get_runtime_data (vm, hw->output_node_index);
+
+ /* Mark node runtime as deleted so output node (if called) will drop packets. */
+ rt->is_deleted = 1;
+
+ vlib_node_rename (vm, hw->output_node_index,
+ "interface-%d-output-deleted", hw_if_index);
+ vlib_node_rename (vm, hw->tx_node_index, "interface-%d-tx-deleted",
+ hw_if_index);
+ vec_add2 (im->deleted_hw_interface_nodes, dn, 1);
+ dn->tx_node_index = hw->tx_node_index;
+ dn->output_node_index = hw->output_node_index;
+ }
+
+ hash_unset_mem (im->hw_interface_by_name, hw->name);
+ vec_free (hw->name);
+
+ pool_put (im->hw_interfaces, hw);
+}
+
+void
+vnet_hw_interface_walk_sw (vnet_main_t * vnm,
+ u32 hw_if_index,
+ vnet_hw_sw_interface_walk_t fn, void *ctx)
+{
+ vnet_hw_interface_t *hi;
+ u32 id, sw_if_index;
+
+ hi = vnet_get_hw_interface (vnm, hw_if_index);
+ /* the super first, then the and sub interfaces */
+ fn (vnm, hi->sw_if_index, ctx);
+
+ /* *INDENT-OFF* */
+ hash_foreach (id, sw_if_index,
+ hi->sub_interface_sw_if_index_by_id,
+ ({
+ fn (vnm, sw_if_index, ctx);
+ }));
+ /* *INDENT-ON* */
+}
+
+static void
+serialize_vnet_hw_interface_set_class (serialize_main_t * m, va_list * va)
+{
+ u32 hw_if_index = va_arg (*va, u32);
+ char *hw_class_name = va_arg (*va, char *);
+ serialize_integer (m, hw_if_index, sizeof (hw_if_index));
+ serialize_cstring (m, hw_class_name);
+}
+
+static void
+unserialize_vnet_hw_interface_set_class (serialize_main_t * m, va_list * va)
+{
+ CLIB_UNUSED (mc_main_t * mc) = va_arg (*va, mc_main_t *);
+ vnet_main_t *vnm = vnet_get_main ();
+ u32 hw_if_index;
+ char *hw_class_name;
+ uword *p;
+ clib_error_t *error;
+
+ unserialize_integer (m, &hw_if_index, sizeof (hw_if_index));
+ unserialize_cstring (m, &hw_class_name);
+ p =
+ hash_get (vnm->interface_main.hw_interface_class_by_name, hw_class_name);
+ ASSERT (p != 0);
+ error = vnet_hw_interface_set_class_helper (vnm, hw_if_index, p[0],
+ /* redistribute */ 0);
+ if (error)
+ clib_error_report (error);
+}
+
+MC_SERIALIZE_MSG (vnet_hw_interface_set_class_msg, static) =
+{
+.name = "vnet_hw_interface_set_class",.serialize =
+ serialize_vnet_hw_interface_set_class,.unserialize =
+ unserialize_vnet_hw_interface_set_class,};
+
+void
+vnet_hw_interface_init_for_class (vnet_main_t * vnm, u32 hw_if_index,
+ u32 hw_class_index, u32 hw_instance)
+{
+ vnet_hw_interface_t *hi = vnet_get_hw_interface (vnm, hw_if_index);
+ vnet_hw_interface_class_t *hc =
+ vnet_get_hw_interface_class (vnm, hw_class_index);
+
+ hi->hw_class_index = hw_class_index;
+ hi->hw_instance = hw_instance;
+ setup_output_node (vnm->vlib_main, hi->output_node_index, hc);
+}
+
+static clib_error_t *
+vnet_hw_interface_set_class_helper (vnet_main_t * vnm, u32 hw_if_index,
+ u32 hw_class_index, u32 redistribute)
+{
+ vnet_hw_interface_t *hi = vnet_get_hw_interface (vnm, hw_if_index);
+ vnet_sw_interface_t *si = vnet_get_sw_interface (vnm, hi->sw_if_index);
+ vnet_hw_interface_class_t *old_class =
+ vnet_get_hw_interface_class (vnm, hi->hw_class_index);
+ vnet_hw_interface_class_t *new_class =
+ vnet_get_hw_interface_class (vnm, hw_class_index);
+ vnet_device_class_t *dev_class =
+ vnet_get_device_class (vnm, hi->dev_class_index);
+ clib_error_t *error = 0;
+
+ /* New class equals old class? Nothing to do. */
+ if (hi->hw_class_index == hw_class_index)
+ return 0;
+
+ /* No need (and incorrect since admin up flag may be set) to do error checking when
+ receiving unserialize message. */
+ if (redistribute)
+ {
+ if (si->flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP)
+ return clib_error_return (0,
+ "%v must be admin down to change class from %s to %s",
+ hi->name, old_class->name, new_class->name);
+
+ /* Make sure interface supports given class. */
+ if ((new_class->is_valid_class_for_interface
+ && !new_class->is_valid_class_for_interface (vnm, hw_if_index,
+ hw_class_index))
+ || (dev_class->is_valid_class_for_interface
+ && !dev_class->is_valid_class_for_interface (vnm, hw_if_index,
+ hw_class_index)))
+ return clib_error_return (0,
+ "%v class cannot be changed from %s to %s",
+ hi->name, old_class->name, new_class->name);
+
+ if (vnm->vlib_main->mc_main)
+ {
+ mc_serialize (vnm->vlib_main->mc_main,
+ &vnet_hw_interface_set_class_msg, hw_if_index,
+ new_class->name);
+ return 0;
+ }
+ }
+
+ if (old_class->hw_class_change)
+ old_class->hw_class_change (vnm, hw_if_index, old_class->index,
+ new_class->index);
+
+ vnet_hw_interface_init_for_class (vnm, hw_if_index, new_class->index,
+ /* instance */ ~0);
+
+ if (new_class->hw_class_change)
+ new_class->hw_class_change (vnm, hw_if_index, old_class->index,
+ new_class->index);
+
+ if (dev_class->hw_class_change)
+ dev_class->hw_class_change (vnm, hw_if_index, new_class->index);
+
+ return error;
+}
+
+clib_error_t *
+vnet_hw_interface_set_class (vnet_main_t * vnm, u32 hw_if_index,
+ u32 hw_class_index)
+{
+ return vnet_hw_interface_set_class_helper (vnm, hw_if_index, hw_class_index,
+ /* redistribute */ 1);
+}
+
+static int
+vnet_hw_interface_rx_redirect_to_node_helper (vnet_main_t * vnm,
+ u32 hw_if_index,
+ u32 node_index,
+ u32 redistribute)
+{
+ vnet_hw_interface_t *hi = vnet_get_hw_interface (vnm, hw_if_index);
+ vnet_device_class_t *dev_class = vnet_get_device_class
+ (vnm, hi->dev_class_index);
+
+ if (redistribute)
+ {
+ /* $$$$ fixme someday maybe */
+ ASSERT (vnm->vlib_main->mc_main == 0);
+ }
+ if (dev_class->rx_redirect_to_node)
+ {
+ dev_class->rx_redirect_to_node (vnm, hw_if_index, node_index);
+ return 0;
+ }
+
+ return VNET_API_ERROR_UNIMPLEMENTED;
+}
+
+int
+vnet_hw_interface_rx_redirect_to_node (vnet_main_t * vnm, u32 hw_if_index,
+ u32 node_index)
+{
+ return vnet_hw_interface_rx_redirect_to_node_helper (vnm, hw_if_index,
+ node_index,
+ 1 /* redistribute */ );
+}
+
+word
+vnet_sw_interface_compare (vnet_main_t * vnm,
+ uword sw_if_index0, uword sw_if_index1)
+{
+ vnet_sw_interface_t *sup0 = vnet_get_sup_sw_interface (vnm, sw_if_index0);
+ vnet_sw_interface_t *sup1 = vnet_get_sup_sw_interface (vnm, sw_if_index1);
+ vnet_hw_interface_t *h0 = vnet_get_hw_interface (vnm, sup0->hw_if_index);
+ vnet_hw_interface_t *h1 = vnet_get_hw_interface (vnm, sup1->hw_if_index);
+
+ if (h0 != h1)
+ return vec_cmp (h0->name, h1->name);
+ return (word) h0->hw_instance - (word) h1->hw_instance;
+}
+
+word
+vnet_hw_interface_compare (vnet_main_t * vnm,
+ uword hw_if_index0, uword hw_if_index1)
+{
+ vnet_hw_interface_t *h0 = vnet_get_hw_interface (vnm, hw_if_index0);
+ vnet_hw_interface_t *h1 = vnet_get_hw_interface (vnm, hw_if_index1);
+
+ if (h0 != h1)
+ return vec_cmp (h0->name, h1->name);
+ return (word) h0->hw_instance - (word) h1->hw_instance;
+}
+
+int
+vnet_sw_interface_is_p2p (vnet_main_t * vnm, u32 sw_if_index)
+{
+ vnet_hw_interface_t *hw = vnet_get_sup_hw_interface (vnm, sw_if_index);
+ vnet_hw_interface_class_t *hc =
+ vnet_get_hw_interface_class (vnm, hw->hw_class_index);
+
+ return (hc->flags & VNET_HW_INTERFACE_CLASS_FLAG_P2P);
+}
+
+clib_error_t *
+vnet_interface_init (vlib_main_t * vm)
+{
+ vnet_main_t *vnm = vnet_get_main ();
+ vnet_interface_main_t *im = &vnm->interface_main;
+ vlib_buffer_t *b = 0;
+ vnet_buffer_opaque_t *o = 0;
+
+ /*
+ * Keep people from shooting themselves in the foot.
+ */
+ if (sizeof (b->opaque) != sizeof (vnet_buffer_opaque_t))
+ {
+#define _(a) if (sizeof(o->a) > sizeof (o->unused)) \
+ clib_warning \
+ ("FATAL: size of opaque union subtype %s is %d (max %d)", \
+ #a, sizeof(o->a), sizeof (o->unused));
+ foreach_buffer_opaque_union_subtype;
+#undef _
+
+ return clib_error_return
+ (0, "FATAL: size of vlib buffer opaque %d, size of vnet opaque %d",
+ sizeof (b->opaque), sizeof (vnet_buffer_opaque_t));
+ }
+
+ im->sw_if_counter_lock = clib_mem_alloc_aligned (CLIB_CACHE_LINE_BYTES,
+ CLIB_CACHE_LINE_BYTES);
+ im->sw_if_counter_lock[0] = 1; /* should be no need */
+
+ vec_validate (im->sw_if_counters, VNET_N_SIMPLE_INTERFACE_COUNTER - 1);
+ im->sw_if_counters[VNET_INTERFACE_COUNTER_DROP].name = "drops";
+ im->sw_if_counters[VNET_INTERFACE_COUNTER_PUNT].name = "punts";
+ im->sw_if_counters[VNET_INTERFACE_COUNTER_IP4].name = "ip4";
+ im->sw_if_counters[VNET_INTERFACE_COUNTER_IP6].name = "ip6";
+ im->sw_if_counters[VNET_INTERFACE_COUNTER_RX_NO_BUF].name = "rx-no-buf";
+ im->sw_if_counters[VNET_INTERFACE_COUNTER_RX_MISS].name = "rx-miss";
+ im->sw_if_counters[VNET_INTERFACE_COUNTER_RX_ERROR].name = "rx-error";
+ im->sw_if_counters[VNET_INTERFACE_COUNTER_TX_ERROR].name = "tx-error";
+
+ vec_validate (im->combined_sw_if_counters,
+ VNET_N_COMBINED_INTERFACE_COUNTER - 1);
+ im->combined_sw_if_counters[VNET_INTERFACE_COUNTER_RX].name = "rx";
+ im->combined_sw_if_counters[VNET_INTERFACE_COUNTER_TX].name = "tx";
+
+ im->sw_if_counter_lock[0] = 0;
+
+ im->device_class_by_name = hash_create_string ( /* size */ 0,
+ sizeof (uword));
+ {
+ vnet_device_class_t *c;
+
+ c = vnm->device_class_registrations;
+
+ while (c)
+ {
+ c->index = vec_len (im->device_classes);
+ hash_set_mem (im->device_class_by_name, c->name, c->index);
+ vec_add1 (im->device_classes, c[0]);
+ c = c->next_class_registration;
+ }
+ }
+
+ im->hw_interface_class_by_name = hash_create_string ( /* size */ 0,
+ sizeof (uword));
+
+ im->sw_if_index_by_sup_and_sub = hash_create_mem (0, sizeof (u64),
+ sizeof (uword));
+ {
+ vnet_hw_interface_class_t *c;
+
+ c = vnm->hw_interface_class_registrations;
+
+ while (c)
+ {
+ c->index = vec_len (im->hw_interface_classes);
+ hash_set_mem (im->hw_interface_class_by_name, c->name, c->index);
+
+ if (NULL == c->build_rewrite)
+ c->build_rewrite = default_build_rewrite;
+ if (NULL == c->update_adjacency)
+ c->update_adjacency = default_update_adjacency;
+
+ vec_add1 (im->hw_interface_classes, c[0]);
+ c = c->next_class_registration;
+ }
+ }
+
+ {
+ clib_error_t *error;
+
+ if ((error = vlib_call_init_function (vm, vnet_interface_cli_init)))
+ return error;
+
+ return error;
+ }
+ vnm->interface_tag_by_sw_if_index = hash_create (0, sizeof (uword));
+}
+
+VLIB_INIT_FUNCTION (vnet_interface_init);
+
+/* Kludge to renumber interface names [only!] */
+int
+vnet_interface_name_renumber (u32 sw_if_index, u32 new_show_dev_instance)
+{
+ int rv;
+ vnet_main_t *vnm = vnet_get_main ();
+ vnet_interface_main_t *im = &vnm->interface_main;
+ vnet_hw_interface_t *hi = vnet_get_sup_hw_interface (vnm, sw_if_index);
+
+ vnet_device_class_t *dev_class = vnet_get_device_class
+ (vnm, hi->dev_class_index);
+
+ if (dev_class->name_renumber == 0 || dev_class->format_device_name == 0)
+ return VNET_API_ERROR_UNIMPLEMENTED;
+
+ rv = dev_class->name_renumber (hi, new_show_dev_instance);
+
+ if (rv)
+ return rv;
+
+ hash_unset_mem (im->hw_interface_by_name, hi->name);
+ vec_free (hi->name);
+ /* Use the mapping we set up to call it Ishmael */
+ hi->name = format (0, "%U", dev_class->format_device_name,
+ hi->dev_instance);
+
+ hash_set_mem (im->hw_interface_by_name, hi->name, hi->hw_if_index);
+ return rv;
+}
+
+clib_error_t *
+vnet_rename_interface (vnet_main_t * vnm, u32 hw_if_index, char *new_name)
+{
+ vnet_interface_main_t *im = &vnm->interface_main;
+ vlib_main_t *vm = vnm->vlib_main;
+ vnet_hw_interface_t *hw;
+ u8 *old_name;
+ clib_error_t *error = 0;
+
+ hw = vnet_get_hw_interface (vnm, hw_if_index);
+ if (!hw)
+ {
+ return clib_error_return (0,
+ "unable to find hw interface for index %u",
+ hw_if_index);
+ }
+
+ old_name = hw->name;
+
+ /* set new hw->name */
+ hw->name = format (0, "%s", new_name);
+
+ /* remove the old name to hw_if_index mapping and install the new one */
+ hash_unset_mem (im->hw_interface_by_name, old_name);
+ hash_set_mem (im->hw_interface_by_name, hw->name, hw_if_index);
+
+ /* rename tx/output nodes */
+ vlib_node_rename (vm, hw->tx_node_index, "%v-tx", hw->name);
+ vlib_node_rename (vm, hw->output_node_index, "%v-output", hw->name);
+
+ /* free the old name vector */
+ vec_free (old_name);
+
+ return error;
+}
+
+static clib_error_t *
+vnet_hw_interface_change_mac_address_helper (vnet_main_t * vnm,
+ u32 hw_if_index, u64 mac_address)
+{
+ clib_error_t *error = 0;
+ vnet_hw_interface_t *hi = vnet_get_hw_interface (vnm, hw_if_index);
+
+ if (hi->hw_address)
+ {
+ vnet_device_class_t *dev_class =
+ vnet_get_device_class (vnm, hi->dev_class_index);
+ if (dev_class->mac_addr_change_function)
+ {
+ error =
+ dev_class->mac_addr_change_function (hi, (char *) &mac_address);
+ }
+ if (!error)
+ {
+ vnet_hw_interface_class_t *hw_class;
+
+ hw_class = vnet_get_hw_interface_class (vnm, hi->hw_class_index);
+
+ if (NULL != hw_class->mac_addr_change_function)
+ hw_class->mac_addr_change_function (hi, (char *) &mac_address);
+ }
+ else
+ {
+ error =
+ clib_error_return (0,
+ "MAC Address Change is not supported on this interface");
+ }
+ }
+ else
+ {
+ error =
+ clib_error_return (0,
+ "mac address change is not supported for interface index %u",
+ hw_if_index);
+ }
+ return error;
+}
+
+clib_error_t *
+vnet_hw_interface_change_mac_address (vnet_main_t * vnm, u32 hw_if_index,
+ u64 mac_address)
+{
+ return vnet_hw_interface_change_mac_address_helper
+ (vnm, hw_if_index, mac_address);
+}
+
+vnet_l3_packet_type_t
+vnet_link_to_l3_proto (vnet_link_t link)
+{
+ switch (link)
+ {
+ case VNET_LINK_IP4:
+ return (VNET_L3_PACKET_TYPE_IP4);
+ case VNET_LINK_IP6:
+ return (VNET_L3_PACKET_TYPE_IP6);
+ case VNET_LINK_MPLS:
+ return (VNET_L3_PACKET_TYPE_MPLS_UNICAST);
+ case VNET_LINK_ARP:
+ return (VNET_L3_PACKET_TYPE_ARP);
+ case VNET_LINK_ETHERNET:
+ ASSERT (0);
+ break;
+ }
+ ASSERT (0);
+ return (0);
+}
+
+u8 *
+default_build_rewrite (vnet_main_t * vnm,
+ u32 sw_if_index,
+ vnet_link_t link_type, const void *dst_address)
+{
+ return (NULL);
+}
+
+void
+default_update_adjacency (vnet_main_t * vnm, u32 sw_if_index, u32 ai)
+{
+ u8 *rewrite;
+
+ rewrite = vnet_build_rewrite_for_sw_interface (vnm, sw_if_index,
+ adj_get_link_type (ai),
+ NULL);
+
+ adj_nbr_update_rewrite (ai, ADJ_NBR_REWRITE_FLAG_COMPLETE, rewrite);
+}
+
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/interface.h b/src/vnet/interface.h
new file mode 100644
index 00000000000..d42e5fda84c
--- /dev/null
+++ b/src/vnet/interface.h
@@ -0,0 +1,658 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * interface.h: VNET interfaces/sub-interfaces
+ *
+ * Copyright (c) 2008 Eliot Dresselhaus
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef included_vnet_interface_h
+#define included_vnet_interface_h
+
+#include <vnet/unix/pcap.h>
+#include <vnet/l3_types.h>
+
+struct vnet_main_t;
+struct vnet_hw_interface_t;
+struct vnet_sw_interface_t;
+struct ip46_address_t;
+
+/* Interface up/down callback. */
+typedef clib_error_t *(vnet_interface_function_t)
+ (struct vnet_main_t * vnm, u32 if_index, u32 flags);
+
+/* Sub-interface add/del callback. */
+typedef clib_error_t *(vnet_subif_add_del_function_t)
+ (struct vnet_main_t * vnm, u32 if_index,
+ struct vnet_sw_interface_t * template, int is_add);
+
+/* Interface set mac address callback. */
+typedef clib_error_t *(vnet_interface_set_mac_address_function_t)
+ (struct vnet_hw_interface_t * hi, char *address);
+
+typedef enum vnet_interface_function_priority_t_
+{
+ VNET_ITF_FUNC_PRIORITY_LOW,
+ VNET_ITF_FUNC_PRIORITY_HIGH,
+} vnet_interface_function_priority_t;
+#define VNET_ITF_FUNC_N_PRIO ((vnet_interface_function_priority_t)VNET_ITF_FUNC_PRIORITY_HIGH+1)
+
+typedef struct _vnet_interface_function_list_elt
+{
+ struct _vnet_interface_function_list_elt *next_interface_function;
+ clib_error_t *(*fp) (struct vnet_main_t * vnm, u32 if_index, u32 flags);
+} _vnet_interface_function_list_elt_t;
+
+#define _VNET_INTERFACE_FUNCTION_DECL(f,tag) \
+ \
+static void __vnet_interface_function_init_##tag##_##f (void) \
+ __attribute__((__constructor__)) ; \
+ \
+static void __vnet_interface_function_init_##tag##_##f (void) \
+{ \
+ vnet_main_t * vnm = vnet_get_main(); \
+ static _vnet_interface_function_list_elt_t init_function; \
+ init_function.next_interface_function = \
+ vnm->tag##_functions[VNET_ITF_FUNC_PRIORITY_LOW]; \
+ vnm->tag##_functions[VNET_ITF_FUNC_PRIORITY_LOW] = &init_function; \
+ init_function.fp = (void *) &f; \
+}
+
+#define _VNET_INTERFACE_FUNCTION_DECL_PRIO(f,tag,p) \
+ \
+static void __vnet_interface_function_init_##tag##_##f (void) \
+ __attribute__((__constructor__)) ; \
+ \
+static void __vnet_interface_function_init_##tag##_##f (void) \
+{ \
+ vnet_main_t * vnm = vnet_get_main(); \
+ static _vnet_interface_function_list_elt_t init_function; \
+ init_function.next_interface_function = vnm->tag##_functions[p]; \
+ vnm->tag##_functions[p] = &init_function; \
+ init_function.fp = (void *) &f; \
+}
+
+#define VNET_HW_INTERFACE_ADD_DEL_FUNCTION(f) \
+ _VNET_INTERFACE_FUNCTION_DECL(f,hw_interface_add_del)
+#define VNET_HW_INTERFACE_LINK_UP_DOWN_FUNCTION(f) \
+ _VNET_INTERFACE_FUNCTION_DECL(f,hw_interface_link_up_down)
+#define VNET_HW_INTERFACE_LINK_UP_DOWN_FUNCTION_PRIO(f,p) \
+ _VNET_INTERFACE_FUNCTION_DECL_PRIO(f,hw_interface_link_up_down,p)
+#define VNET_SW_INTERFACE_ADD_DEL_FUNCTION(f) \
+ _VNET_INTERFACE_FUNCTION_DECL(f,sw_interface_add_del)
+#define VNET_SW_INTERFACE_ADMIN_UP_DOWN_FUNCTION(f) \
+ _VNET_INTERFACE_FUNCTION_DECL(f,sw_interface_admin_up_down)
+#define VNET_SW_INTERFACE_ADMIN_UP_DOWN_FUNCTION_PRIO(f,p) \
+ _VNET_INTERFACE_FUNCTION_DECL_PRIO(f,sw_interface_admin_up_down, p)
+
+/* A class of hardware interface devices. */
+typedef struct _vnet_device_class
+{
+ /* Index into main vector. */
+ u32 index;
+
+ /* Device name (e.g. "FOOBAR 1234a"). */
+ char *name;
+
+ /* Function to call when hardware interface is added/deleted. */
+ vnet_interface_function_t *interface_add_del_function;
+
+ /* Function to bring device administratively up/down. */
+ vnet_interface_function_t *admin_up_down_function;
+
+ /* Function to call when sub-interface is added/deleted */
+ vnet_subif_add_del_function_t *subif_add_del_function;
+
+ /* Redistribute flag changes/existence of this interface class. */
+ u32 redistribute;
+
+ /* Transmit function. */
+ vlib_node_function_t *tx_function;
+
+ /* Error strings indexed by error code for this node. */
+ char **tx_function_error_strings;
+
+ /* Number of error codes used by this node. */
+ u32 tx_function_n_errors;
+
+ /* Renumber device name [only!] support, a control-plane kludge */
+ int (*name_renumber) (struct vnet_hw_interface_t * hi,
+ u32 new_dev_instance);
+
+ /* Format device instance as name. */
+ format_function_t *format_device_name;
+
+ /* Parse function for device name. */
+ unformat_function_t *unformat_device_name;
+
+ /* Format device verbosely for this class. */
+ format_function_t *format_device;
+
+ /* Trace buffer format for TX function. */
+ format_function_t *format_tx_trace;
+
+ /* Function to clear hardware counters for device. */
+ void (*clear_counters) (u32 dev_class_instance);
+
+ uword (*is_valid_class_for_interface) (struct vnet_main_t * vnm,
+ u32 hw_if_index,
+ u32 hw_class_index);
+
+ /* Called when hardware class of an interface changes. */
+ void (*hw_class_change) (struct vnet_main_t * vnm,
+ u32 hw_if_index, u32 new_hw_class_index);
+
+ /* Called to redirect traffic from a specific interface instance */
+ void (*rx_redirect_to_node) (struct vnet_main_t * vnm,
+ u32 hw_if_index, u32 node_index);
+
+ /* Link-list of all device classes set up by constructors created below */
+ struct _vnet_device_class *next_class_registration;
+
+ /* Splice vnet_interface_output_node into TX path */
+ u8 flatten_output_chains;
+
+ /* Function to set mac address. */
+ vnet_interface_set_mac_address_function_t *mac_addr_change_function;
+} vnet_device_class_t;
+
+#define VNET_DEVICE_CLASS(x,...) \
+ __VA_ARGS__ vnet_device_class_t x; \
+static void __vnet_add_device_class_registration_##x (void) \
+ __attribute__((__constructor__)) ; \
+static void __vnet_add_device_class_registration_##x (void) \
+{ \
+ vnet_main_t * vnm = vnet_get_main(); \
+ x.next_class_registration = vnm->device_class_registrations; \
+ vnm->device_class_registrations = &x; \
+} \
+__VA_ARGS__ vnet_device_class_t x
+
+#define VLIB_DEVICE_TX_FUNCTION_CLONE_TEMPLATE(arch, fn, tgt) \
+ uword \
+ __attribute__ ((flatten)) \
+ __attribute__ ((target (tgt))) \
+ CLIB_CPU_OPTIMIZED \
+ fn ## _ ## arch ( vlib_main_t * vm, \
+ vlib_node_runtime_t * node, \
+ vlib_frame_t * frame) \
+ { return fn (vm, node, frame); }
+
+#define VLIB_DEVICE_TX_FUNCTION_MULTIARCH_CLONE(fn) \
+ foreach_march_variant(VLIB_DEVICE_TX_FUNCTION_CLONE_TEMPLATE, fn)
+
+#if CLIB_DEBUG > 0
+#define VLIB_MULTIARCH_CLONE_AND_SELECT_FN(fn,...)
+#define VLIB_DEVICE_TX_FUNCTION_MULTIARCH(dev, fn)
+#else
+#define VLIB_DEVICE_TX_FUNCTION_MULTIARCH(dev, fn) \
+ VLIB_DEVICE_TX_FUNCTION_MULTIARCH_CLONE(fn) \
+ CLIB_MULTIARCH_SELECT_FN(fn, static inline) \
+ static void __attribute__((__constructor__)) \
+ __vlib_device_tx_function_multiarch_select_##dev (void) \
+ { dev.tx_function = fn ## _multiarch_select(); }
+#endif
+
+/**
+ * Link Type: A description of the protocol of packets on the link.
+ * On an ethernet link this maps directly into the ethertype. On a GRE tunnel
+ * it maps to the GRE-proto, etc for other lnk types.
+ */
+typedef enum vnet_link_t_
+{
+#if CLIB_DEBUG > 0
+ VNET_LINK_IP4 = 1,
+#else
+ VNET_LINK_IP4 = 0,
+#endif
+ VNET_LINK_IP6,
+ VNET_LINK_MPLS,
+ VNET_LINK_ETHERNET,
+ VNET_LINK_ARP,
+} __attribute__ ((packed)) vnet_link_t;
+
+#define VNET_LINKS { \
+ [VNET_LINK_ETHERNET] = "ethernet", \
+ [VNET_LINK_IP4] = "ipv4", \
+ [VNET_LINK_IP6] = "ipv6", \
+ [VNET_LINK_MPLS] = "mpls", \
+ [VNET_LINK_ARP] = "arp", \
+}
+
+/**
+ * @brief Number of link types. Not part of the enum so it does not have to be included in
+ * switch statements
+ */
+#define VNET_LINK_NUM (VNET_LINK_ARP+1)
+
+/**
+ * @brief Convert a link to to an Ethertype
+ */
+extern vnet_l3_packet_type_t vnet_link_to_l3_proto (vnet_link_t link);
+
+/**
+ * @brief Attributes assignable to a HW interface Class.
+ */
+typedef enum vnet_hw_interface_class_flags_t_
+{
+ /**
+ * @brief a point 2 point interface
+ */
+ VNET_HW_INTERFACE_CLASS_FLAG_P2P = (1 << 0),
+} vnet_hw_interface_class_flags_t;
+
+/* Layer-2 (e.g. Ethernet) interface class. */
+typedef struct _vnet_hw_interface_class
+{
+ /* Index into main vector. */
+ u32 index;
+
+ /* Class name (e.g. "Ethernet"). */
+ char *name;
+
+ /* Flags */
+ vnet_hw_interface_class_flags_t flags;
+
+ /* Function to call when hardware interface is added/deleted. */
+ vnet_interface_function_t *interface_add_del_function;
+
+ /* Function to bring interface administratively up/down. */
+ vnet_interface_function_t *admin_up_down_function;
+
+ /* Function to call when link state changes. */
+ vnet_interface_function_t *link_up_down_function;
+
+ /* Function to call when link MAC changes. */
+ vnet_interface_set_mac_address_function_t *mac_addr_change_function;
+
+ /* Format function to display interface name. */
+ format_function_t *format_interface_name;
+
+ /* Format function to display interface address. */
+ format_function_t *format_address;
+
+ /* Format packet header for this interface class. */
+ format_function_t *format_header;
+
+ /* Format device verbosely for this class. */
+ format_function_t *format_device;
+
+ /* Parser for hardware (e.g. ethernet) address. */
+ unformat_function_t *unformat_hw_address;
+
+ /* Parser for packet header for e.g. rewrite string. */
+ unformat_function_t *unformat_header;
+
+ /* Builds a rewrite string for the interface to the destination
+ * for the payload/link type. */
+ u8 *(*build_rewrite) (struct vnet_main_t * vnm,
+ u32 sw_if_index,
+ vnet_link_t link_type, const void *dst_hw_address);
+
+ /* Update an adjacecny added by FIB (as opposed to via the
+ * neighbour resolution protocol). */
+ void (*update_adjacency) (struct vnet_main_t * vnm,
+ u32 sw_if_index, u32 adj_index);
+
+ uword (*is_valid_class_for_interface) (struct vnet_main_t * vnm,
+ u32 hw_if_index,
+ u32 hw_class_index);
+
+ /* Called when hw interface class is changed and old hardware instance
+ may want to be deleted. */
+ void (*hw_class_change) (struct vnet_main_t * vnm, u32 hw_if_index,
+ u32 old_class_index, u32 new_class_index);
+
+ /* List of hw interface classes, built by constructors */
+ struct _vnet_hw_interface_class *next_class_registration;
+
+} vnet_hw_interface_class_t;
+
+/**
+ * @brief Return a complete, zero-length (aka dummy) rewrite
+ */
+extern u8 *default_build_rewrite (struct vnet_main_t *vnm,
+ u32 sw_if_index,
+ vnet_link_t link_type,
+ const void *dst_hw_address);
+
+/**
+ * @brief Default adjacency update function
+ */
+extern void default_update_adjacency (struct vnet_main_t *vnm,
+ u32 sw_if_index, u32 adj_index);
+
+#define VNET_HW_INTERFACE_CLASS(x,...) \
+ __VA_ARGS__ vnet_hw_interface_class_t x; \
+static void __vnet_add_hw_interface_class_registration_##x (void) \
+ __attribute__((__constructor__)) ; \
+static void __vnet_add_hw_interface_class_registration_##x (void) \
+{ \
+ vnet_main_t * vnm = vnet_get_main(); \
+ x.next_class_registration = vnm->hw_interface_class_registrations; \
+ vnm->hw_interface_class_registrations = &x; \
+} \
+__VA_ARGS__ vnet_hw_interface_class_t x
+
+/* Hardware-interface. This corresponds to a physical wire
+ that packets flow over. */
+typedef struct vnet_hw_interface_t
+{
+ /* Interface name. */
+ u8 *name;
+
+ u32 flags;
+ /* Hardware link state is up. */
+#define VNET_HW_INTERFACE_FLAG_LINK_UP (1 << 0)
+ /* Hardware duplex state */
+#define VNET_HW_INTERFACE_FLAG_DUPLEX_SHIFT 1
+#define VNET_HW_INTERFACE_FLAG_HALF_DUPLEX (1 << 1)
+#define VNET_HW_INTERFACE_FLAG_FULL_DUPLEX (1 << 2)
+#define VNET_HW_INTERFACE_FLAG_DUPLEX_MASK \
+ (VNET_HW_INTERFACE_FLAG_HALF_DUPLEX | \
+ VNET_HW_INTERFACE_FLAG_FULL_DUPLEX)
+
+ /* Hardware link speed */
+#define VNET_HW_INTERFACE_FLAG_SPEED_SHIFT 3
+#define VNET_HW_INTERFACE_FLAG_SPEED_10M (1 << 3)
+#define VNET_HW_INTERFACE_FLAG_SPEED_100M (1 << 4)
+#define VNET_HW_INTERFACE_FLAG_SPEED_1G (1 << 5)
+#define VNET_HW_INTERFACE_FLAG_SPEED_10G (1 << 6)
+#define VNET_HW_INTERFACE_FLAG_SPEED_40G (1 << 7)
+#define VNET_HW_INTERFACE_FLAG_SPEED_100G (1 << 8)
+#define VNET_HW_INTERFACE_FLAG_SPEED_MASK \
+ (VNET_HW_INTERFACE_FLAG_SPEED_10M | \
+ VNET_HW_INTERFACE_FLAG_SPEED_100M | \
+ VNET_HW_INTERFACE_FLAG_SPEED_1G | \
+ VNET_HW_INTERFACE_FLAG_SPEED_10G | \
+ VNET_HW_INTERFACE_FLAG_SPEED_40G | \
+ VNET_HW_INTERFACE_FLAG_SPEED_100G)
+
+ /* l2output node flags */
+#define VNET_HW_INTERFACE_FLAG_L2OUTPUT_SHIFT 9
+#define VNET_HW_INTERFACE_FLAG_L2OUTPUT_MAPPED (1 << 9)
+
+ /* Hardware address as vector. Zero (e.g. zero-length vector) if no
+ address for this class (e.g. PPP). */
+ u8 *hw_address;
+
+ /* Interface is up as far as software is concerned. */
+ /* NAME.{output,tx} nodes for this interface. */
+ u32 output_node_index, tx_node_index;
+
+ /* (dev_class, dev_instance) uniquely identifies hw interface. */
+ u32 dev_class_index;
+ u32 dev_instance;
+
+ /* (hw_class, hw_instance) uniquely identifies hw interface. */
+ u32 hw_class_index;
+ u32 hw_instance;
+
+ /* Hardware index for this hardware interface. */
+ u32 hw_if_index;
+
+ /* Software index for this hardware interface. */
+ u32 sw_if_index;
+
+ /* Maximum transmit rate for this interface in bits/sec. */
+ f64 max_rate_bits_per_sec;
+
+ /* Smallest packet size supported by this interface. */
+ u32 min_supported_packet_bytes;
+
+ /* Largest packet size supported by this interface. */
+ u32 max_supported_packet_bytes;
+
+ /* Smallest packet size for this interface. */
+ u32 min_packet_bytes;
+
+ /* Largest packet size for this interface. */
+ u32 max_packet_bytes;
+
+ /* Number of extra bytes that go on the wire.
+ Packet length on wire
+ = max (length + per_packet_overhead_bytes, min_packet_bytes). */
+ u32 per_packet_overhead_bytes;
+
+ /* Receive and transmit layer 3 packet size limits (MRU/MTU). */
+ u32 max_l3_packet_bytes[VLIB_N_RX_TX];
+
+ /* Hash table mapping sub interface id to sw_if_index. */
+ uword *sub_interface_sw_if_index_by_id;
+
+ /* Count of number of L2 subinterfaces */
+ u32 l2_if_count;
+
+ /* Bonded interface info -
+ 0 - not a bonded interface nor a slave
+ ~0 - slave to a bonded interface
+ others - A bonded interface with a pointer to bitmap for all slaves */
+ uword *bond_info;
+#define VNET_HW_INTERFACE_BOND_INFO_NONE ((uword *) 0)
+#define VNET_HW_INTERFACE_BOND_INFO_SLAVE ((uword *) ~0)
+
+} vnet_hw_interface_t;
+
+extern vnet_device_class_t vnet_local_interface_device_class;
+
+typedef enum
+{
+ /* A hw interface. */
+ VNET_SW_INTERFACE_TYPE_HARDWARE,
+
+ /* A sub-interface. */
+ VNET_SW_INTERFACE_TYPE_SUB,
+} vnet_sw_interface_type_t;
+
+typedef struct
+{
+ /*
+ * Subinterface ID. A number 0-N to uniquely identify
+ * this subinterface under the main (parent?) interface
+ */
+ u32 id;
+
+ /* Classification data. Used to associate packet header with subinterface. */
+ struct
+ {
+ u16 outer_vlan_id;
+ u16 inner_vlan_id;
+ union
+ {
+ u16 raw_flags;
+ struct
+ {
+ u16 no_tags:1;
+ u16 one_tag:1;
+ u16 two_tags:1;
+ u16 dot1ad:1; /* 0 = dot1q, 1=dot1ad */
+ u16 exact_match:1;
+ u16 default_sub:1;
+ u16 outer_vlan_id_any:1;
+ u16 inner_vlan_id_any:1;
+ } flags;
+ };
+ } eth;
+} vnet_sub_interface_t;
+
+typedef enum
+{
+ /* Always flood */
+ VNET_FLOOD_CLASS_NORMAL,
+ VNET_FLOOD_CLASS_TUNNEL_MASTER,
+ /* Does not flood when tunnel master is in the same L2 BD */
+ VNET_FLOOD_CLASS_TUNNEL_NORMAL
+} vnet_flood_class_t;
+
+/* Software-interface. This corresponds to a Ethernet VLAN, ATM vc, a
+ tunnel, etc. Configuration (e.g. IP address) gets attached to
+ software interface. */
+typedef struct
+{
+ vnet_sw_interface_type_t type:16;
+
+ u16 flags;
+ /* Interface is "up" meaning adminstratively up.
+ Up in the sense of link state being up is maintained by hardware interface. */
+#define VNET_SW_INTERFACE_FLAG_ADMIN_UP (1 << 0)
+
+ /* Interface is disabled for forwarding: punt all traffic to slow-path. */
+#define VNET_SW_INTERFACE_FLAG_PUNT (1 << 1)
+
+#define VNET_SW_INTERFACE_FLAG_PROXY_ARP (1 << 2)
+
+#define VNET_SW_INTERFACE_FLAG_UNNUMBERED (1 << 3)
+
+#define VNET_SW_INTERFACE_FLAG_BOND_SLAVE (1 << 4)
+
+ /* Index for this interface. */
+ u32 sw_if_index;
+
+ /* Software interface index of super-interface;
+ equal to sw_if_index if this interface is not a
+ sub-interface. */
+ u32 sup_sw_if_index;
+
+ /* this swif is unnumbered, use addresses on unnumbered_sw_if_index... */
+ u32 unnumbered_sw_if_index;
+
+ u32 link_speed;
+
+ union
+ {
+ /* VNET_SW_INTERFACE_TYPE_HARDWARE. */
+ u32 hw_if_index;
+
+ /* VNET_SW_INTERFACE_TYPE_SUB. */
+ vnet_sub_interface_t sub;
+ };
+
+ vnet_flood_class_t flood_class;
+} vnet_sw_interface_t;
+
+typedef enum
+{
+ /* Simple counters. */
+ VNET_INTERFACE_COUNTER_DROP = 0,
+ VNET_INTERFACE_COUNTER_PUNT = 1,
+ VNET_INTERFACE_COUNTER_IP4 = 2,
+ VNET_INTERFACE_COUNTER_IP6 = 3,
+ VNET_INTERFACE_COUNTER_RX_NO_BUF = 4,
+ VNET_INTERFACE_COUNTER_RX_MISS = 5,
+ VNET_INTERFACE_COUNTER_RX_ERROR = 6,
+ VNET_INTERFACE_COUNTER_TX_ERROR = 7,
+ VNET_INTERFACE_COUNTER_MPLS = 8,
+ VNET_N_SIMPLE_INTERFACE_COUNTER = 9,
+ /* Combined counters. */
+ VNET_INTERFACE_COUNTER_RX = 0,
+ VNET_INTERFACE_COUNTER_TX = 1,
+ VNET_N_COMBINED_INTERFACE_COUNTER = 2,
+} vnet_interface_counter_type_t;
+
+typedef struct
+{
+ u32 output_node_index;
+ u32 tx_node_index;
+} vnet_hw_interface_nodes_t;
+
+typedef struct
+{
+ /* Hardware interfaces. */
+ vnet_hw_interface_t *hw_interfaces;
+
+ /* Hash table mapping HW interface name to index. */
+ uword *hw_interface_by_name;
+
+ /* Vectors if hardware interface classes and device classes. */
+ vnet_hw_interface_class_t *hw_interface_classes;
+ vnet_device_class_t *device_classes;
+
+ /* Hash table mapping name to hw interface/device class. */
+ uword *hw_interface_class_by_name;
+ uword *device_class_by_name;
+
+ /* Software interfaces. */
+ vnet_sw_interface_t *sw_interfaces;
+
+ /* Hash table mapping sub intfc sw_if_index by sup sw_if_index and sub id */
+ uword *sw_if_index_by_sup_and_sub;
+
+ /* Software interface counters both simple and combined
+ packet and byte counters. */
+ volatile u32 *sw_if_counter_lock;
+ vlib_simple_counter_main_t *sw_if_counters;
+ vlib_combined_counter_main_t *combined_sw_if_counters;
+
+ vnet_hw_interface_nodes_t *deleted_hw_interface_nodes;
+
+ /* pcap drop tracing */
+ int drop_pcap_enable;
+ pcap_main_t pcap_main;
+ u8 *pcap_filename;
+ u32 pcap_sw_if_index;
+ u32 pcap_pkts_to_capture;
+ uword *pcap_drop_filter_hash;
+
+ /* feature_arc_index */
+ u8 output_feature_arc_index;
+} vnet_interface_main_t;
+
+static inline void
+vnet_interface_counter_lock (vnet_interface_main_t * im)
+{
+ if (im->sw_if_counter_lock)
+ while (__sync_lock_test_and_set (im->sw_if_counter_lock, 1))
+ /* zzzz */ ;
+}
+
+static inline void
+vnet_interface_counter_unlock (vnet_interface_main_t * im)
+{
+ if (im->sw_if_counter_lock)
+ *im->sw_if_counter_lock = 0;
+}
+
+void vnet_pcap_drop_trace_filter_add_del (u32 error_index, int is_add);
+
+int vnet_interface_name_renumber (u32 sw_if_index, u32 new_show_dev_instance);
+
+#endif /* included_vnet_interface_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/interface_api.c b/src/vnet/interface_api.c
new file mode 100644
index 00000000000..42fd14ee19d
--- /dev/null
+++ b/src/vnet/interface_api.c
@@ -0,0 +1,725 @@
+/*
+ *------------------------------------------------------------------
+ * interface_api.c - vnet interface api
+ *
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+
+#include <vnet/vnet.h>
+#include <vlibmemory/api.h>
+
+#include <vnet/interface.h>
+#include <vnet/api_errno.h>
+#include <vnet/ethernet/ethernet.h>
+#include <vnet/ip/ip.h>
+#include <vnet/fib/fib_table.h>
+#include <vnet/l2/l2_vtr.h>
+#include <vnet/vnet_msg_enum.h>
+#include <vnet/fib/fib_api.h>
+
+#define vl_typedefs /* define message structures */
+#include <vnet/vnet_all_api_h.h>
+#undef vl_typedefs
+
+#define vl_endianfun /* define message structures */
+#include <vnet/vnet_all_api_h.h>
+#undef vl_endianfun
+
+/* instantiate all the print functions we know about */
+#define vl_print(handle, ...) vlib_cli_output (handle, __VA_ARGS__)
+#define vl_printfun
+#include <vnet/vnet_all_api_h.h>
+#undef vl_printfun
+
+#include <vlibapi/api_helper_macros.h>
+
+#define foreach_vpe_api_msg \
+_(SW_INTERFACE_SET_FLAGS, sw_interface_set_flags) \
+_(SW_INTERFACE_SET_MTU, sw_interface_set_mtu) \
+_(WANT_INTERFACE_EVENTS, want_interface_events) \
+_(SW_INTERFACE_DUMP, sw_interface_dump) \
+_(SW_INTERFACE_DETAILS, sw_interface_details) \
+_(SW_INTERFACE_ADD_DEL_ADDRESS, sw_interface_add_del_address) \
+_(SW_INTERFACE_SET_TABLE, sw_interface_set_table) \
+_(SW_INTERFACE_GET_TABLE, sw_interface_get_table) \
+_(SW_INTERFACE_SET_UNNUMBERED, sw_interface_set_unnumbered) \
+_(SW_INTERFACE_CLEAR_STATS, sw_interface_clear_stats) \
+_(SW_INTERFACE_TAG_ADD_DEL, sw_interface_tag_add_del)
+
+static void
+vl_api_sw_interface_set_flags_t_handler (vl_api_sw_interface_set_flags_t * mp)
+{
+ vl_api_sw_interface_set_flags_reply_t *rmp;
+ vnet_main_t *vnm = vnet_get_main ();
+ int rv = 0;
+ clib_error_t *error;
+ u16 flags;
+
+ VALIDATE_SW_IF_INDEX (mp);
+
+ flags = mp->admin_up_down ? VNET_SW_INTERFACE_FLAG_ADMIN_UP : 0;
+
+ error = vnet_sw_interface_set_flags (vnm, ntohl (mp->sw_if_index), flags);
+ if (error)
+ {
+ rv = -1;
+ clib_error_report (error);
+ }
+
+ BAD_SW_IF_INDEX_LABEL;
+ REPLY_MACRO (VL_API_SW_INTERFACE_SET_FLAGS_REPLY);
+}
+
+static void
+vl_api_sw_interface_set_mtu_t_handler (vl_api_sw_interface_set_mtu_t * mp)
+{
+ vl_api_sw_interface_set_mtu_reply_t *rmp;
+ vnet_main_t *vnm = vnet_get_main ();
+ u32 flags = ETHERNET_INTERFACE_FLAG_MTU;
+ u32 sw_if_index = ntohl (mp->sw_if_index);
+ u16 mtu = ntohs (mp->mtu);
+ ethernet_main_t *em = &ethernet_main;
+ int rv = 0;
+
+ VALIDATE_SW_IF_INDEX (mp);
+
+ vnet_hw_interface_t *hi = vnet_get_hw_interface (vnm, sw_if_index);
+ ethernet_interface_t *eif = ethernet_get_interface (em, sw_if_index);
+
+ if (!eif)
+ {
+ rv = VNET_API_ERROR_FEATURE_DISABLED;
+ goto bad_sw_if_index;
+ }
+
+ if (mtu < hi->min_supported_packet_bytes)
+ {
+ rv = VNET_API_ERROR_INVALID_VALUE;
+ goto bad_sw_if_index;
+ }
+
+ if (mtu > hi->max_supported_packet_bytes)
+ {
+ rv = VNET_API_ERROR_INVALID_VALUE;
+ goto bad_sw_if_index;
+ }
+
+ if (hi->max_packet_bytes != mtu)
+ {
+ hi->max_packet_bytes = mtu;
+ ethernet_set_flags (vnm, sw_if_index, flags);
+ }
+
+ BAD_SW_IF_INDEX_LABEL;
+ REPLY_MACRO (VL_API_SW_INTERFACE_SET_MTU_REPLY);
+}
+
+static void
+send_sw_interface_details (vpe_api_main_t * am,
+ unix_shared_memory_queue_t * q,
+ vnet_sw_interface_t * swif,
+ u8 * interface_name, u32 context)
+{
+ vl_api_sw_interface_details_t *mp;
+ vnet_main_t *vnm = vnet_get_main ();
+ vnet_hw_interface_t *hi;
+ u8 *tag;
+
+ hi = vnet_get_sup_hw_interface (am->vnet_main, swif->sw_if_index);
+
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ memset (mp, 0, sizeof (*mp));
+ mp->_vl_msg_id = ntohs (VL_API_SW_INTERFACE_DETAILS);
+ mp->sw_if_index = ntohl (swif->sw_if_index);
+ mp->sup_sw_if_index = ntohl (swif->sup_sw_if_index);
+ mp->admin_up_down = (swif->flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) ? 1 : 0;
+ mp->link_up_down = (hi->flags & VNET_HW_INTERFACE_FLAG_LINK_UP) ? 1 : 0;
+ mp->link_duplex = ((hi->flags & VNET_HW_INTERFACE_FLAG_DUPLEX_MASK) >>
+ VNET_HW_INTERFACE_FLAG_DUPLEX_SHIFT);
+ mp->link_speed = ((hi->flags & VNET_HW_INTERFACE_FLAG_SPEED_MASK) >>
+ VNET_HW_INTERFACE_FLAG_SPEED_SHIFT);
+ mp->link_mtu = ntohs (hi->max_packet_bytes);
+ mp->context = context;
+
+ strncpy ((char *) mp->interface_name,
+ (char *) interface_name, ARRAY_LEN (mp->interface_name) - 1);
+
+ /* Send the L2 address for ethernet physical intfcs */
+ if (swif->sup_sw_if_index == swif->sw_if_index
+ && hi->hw_class_index == ethernet_hw_interface_class.index)
+ {
+ ethernet_main_t *em = ethernet_get_main (am->vlib_main);
+ ethernet_interface_t *ei;
+
+ ei = pool_elt_at_index (em->interfaces, hi->hw_instance);
+ ASSERT (sizeof (mp->l2_address) >= sizeof (ei->address));
+ clib_memcpy (mp->l2_address, ei->address, sizeof (ei->address));
+ mp->l2_address_length = ntohl (sizeof (ei->address));
+ }
+ else if (swif->sup_sw_if_index != swif->sw_if_index)
+ {
+ vnet_sub_interface_t *sub = &swif->sub;
+ mp->sub_id = ntohl (sub->id);
+ mp->sub_dot1ad = sub->eth.flags.dot1ad;
+ mp->sub_number_of_tags =
+ sub->eth.flags.one_tag + sub->eth.flags.two_tags * 2;
+ mp->sub_outer_vlan_id = ntohs (sub->eth.outer_vlan_id);
+ mp->sub_inner_vlan_id = ntohs (sub->eth.inner_vlan_id);
+ mp->sub_exact_match = sub->eth.flags.exact_match;
+ mp->sub_default = sub->eth.flags.default_sub;
+ mp->sub_outer_vlan_id_any = sub->eth.flags.outer_vlan_id_any;
+ mp->sub_inner_vlan_id_any = sub->eth.flags.inner_vlan_id_any;
+
+ /* vlan tag rewrite data */
+ u32 vtr_op = L2_VTR_DISABLED;
+ u32 vtr_push_dot1q = 0, vtr_tag1 = 0, vtr_tag2 = 0;
+
+ if (l2vtr_get (am->vlib_main, am->vnet_main, swif->sw_if_index,
+ &vtr_op, &vtr_push_dot1q, &vtr_tag1, &vtr_tag2) != 0)
+ {
+ // error - default to disabled
+ mp->vtr_op = ntohl (L2_VTR_DISABLED);
+ clib_warning ("cannot get vlan tag rewrite for sw_if_index %d",
+ swif->sw_if_index);
+ }
+ else
+ {
+ mp->vtr_op = ntohl (vtr_op);
+ mp->vtr_push_dot1q = ntohl (vtr_push_dot1q);
+ mp->vtr_tag1 = ntohl (vtr_tag1);
+ mp->vtr_tag2 = ntohl (vtr_tag2);
+ }
+ }
+
+ tag = vnet_get_sw_interface_tag (vnm, swif->sw_if_index);
+ if (tag)
+ strncpy ((char *) mp->tag, (char *) tag, ARRAY_LEN (mp->tag) - 1);
+
+ vl_msg_api_send_shmem (q, (u8 *) & mp);
+}
+
+static void
+vl_api_sw_interface_dump_t_handler (vl_api_sw_interface_dump_t * mp)
+{
+ vpe_api_main_t *am = &vpe_api_main;
+ vnet_sw_interface_t *swif;
+ vnet_interface_main_t *im = &am->vnet_main->interface_main;
+ u8 *filter_string = 0, *name_string = 0;
+ unix_shared_memory_queue_t *q;
+ char *strcasestr (char *, char *); /* lnx hdr file botch */
+
+ q = vl_api_client_index_to_input_queue (mp->client_index);
+
+ if (q == 0)
+ return;
+
+ if (mp->name_filter_valid)
+ {
+ mp->name_filter[ARRAY_LEN (mp->name_filter) - 1] = 0;
+ filter_string = format (0, "%s%c", mp->name_filter, 0);
+ }
+
+ /* *INDENT-OFF* */
+ pool_foreach (swif, im->sw_interfaces,
+ ({
+ name_string = format (name_string, "%U%c",
+ format_vnet_sw_interface_name,
+ am->vnet_main, swif, 0);
+
+ if (mp->name_filter_valid == 0 ||
+ strcasestr((char *) name_string, (char *) filter_string)) {
+
+ send_sw_interface_details (am, q, swif, name_string, mp->context);
+ }
+ _vec_len (name_string) = 0;
+ }));
+ /* *INDENT-ON* */
+
+ vec_free (name_string);
+ vec_free (filter_string);
+}
+
+static void
+ vl_api_sw_interface_add_del_address_t_handler
+ (vl_api_sw_interface_add_del_address_t * mp)
+{
+ vlib_main_t *vm = vlib_get_main ();
+ vl_api_sw_interface_add_del_address_reply_t *rmp;
+ int rv = 0;
+ u32 is_del;
+
+ VALIDATE_SW_IF_INDEX (mp);
+
+ is_del = mp->is_add == 0;
+
+ if (mp->del_all)
+ ip_del_all_interface_addresses (vm, ntohl (mp->sw_if_index));
+ else if (mp->is_ipv6)
+ ip6_add_del_interface_address (vm, ntohl (mp->sw_if_index),
+ (void *) mp->address,
+ mp->address_length, is_del);
+ else
+ ip4_add_del_interface_address (vm, ntohl (mp->sw_if_index),
+ (void *) mp->address,
+ mp->address_length, is_del);
+
+ BAD_SW_IF_INDEX_LABEL;
+
+ REPLY_MACRO (VL_API_SW_INTERFACE_ADD_DEL_ADDRESS_REPLY);
+}
+
+void stats_dslock_with_hint (int hint, int tag) __attribute__ ((weak));
+void
+stats_dslock_with_hint (int hint, int tag)
+{
+}
+
+void stats_dsunlock (void) __attribute__ ((weak));
+void
+stats_dsunlock (void)
+{
+}
+
+static void
+vl_api_sw_interface_set_table_t_handler (vl_api_sw_interface_set_table_t * mp)
+{
+ int rv = 0;
+ u32 table_id = ntohl (mp->vrf_id);
+ u32 sw_if_index = ntohl (mp->sw_if_index);
+ vl_api_sw_interface_set_table_reply_t *rmp;
+ u32 fib_index;
+
+ VALIDATE_SW_IF_INDEX (mp);
+
+ stats_dslock_with_hint (1 /* release hint */ , 4 /* tag */ );
+
+ if (mp->is_ipv6)
+ {
+ fib_index = fib_table_find_or_create_and_lock (FIB_PROTOCOL_IP6,
+ table_id);
+
+ vec_validate (ip6_main.fib_index_by_sw_if_index, sw_if_index);
+ ip6_main.fib_index_by_sw_if_index[sw_if_index] = fib_index;
+ }
+ else
+ {
+
+ fib_index = fib_table_find_or_create_and_lock (FIB_PROTOCOL_IP4,
+ table_id);
+
+ vec_validate (ip4_main.fib_index_by_sw_if_index, sw_if_index);
+ ip4_main.fib_index_by_sw_if_index[sw_if_index] = fib_index;
+ }
+ stats_dsunlock ();
+
+ BAD_SW_IF_INDEX_LABEL;
+
+ REPLY_MACRO (VL_API_SW_INTERFACE_SET_TABLE_REPLY);
+}
+
+static void
+send_sw_interface_get_table_reply (unix_shared_memory_queue_t * q,
+ u32 context, int retval, u32 vrf_id)
+{
+ vl_api_sw_interface_get_table_reply_t *mp;
+
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ memset (mp, 0, sizeof (*mp));
+ mp->_vl_msg_id = ntohs (VL_API_SW_INTERFACE_GET_TABLE_REPLY);
+ mp->context = context;
+ mp->retval = htonl (retval);
+ mp->vrf_id = htonl (vrf_id);
+
+ vl_msg_api_send_shmem (q, (u8 *) & mp);
+}
+
+static void
+vl_api_sw_interface_get_table_t_handler (vl_api_sw_interface_get_table_t * mp)
+{
+ unix_shared_memory_queue_t *q;
+ fib_table_t *fib_table = 0;
+ u32 sw_if_index = ~0;
+ u32 fib_index = ~0;
+ u32 table_id = ~0;
+ fib_protocol_t fib_proto = FIB_PROTOCOL_IP4;
+ int rv = 0;
+
+ q = vl_api_client_index_to_input_queue (mp->client_index);
+ if (q == 0)
+ return;
+
+ VALIDATE_SW_IF_INDEX (mp);
+
+ sw_if_index = ntohl (mp->sw_if_index);
+
+ if (mp->is_ipv6)
+ fib_proto = FIB_PROTOCOL_IP6;
+
+ fib_index = fib_table_get_index_for_sw_if_index (fib_proto, sw_if_index);
+ if (fib_index != ~0)
+ {
+ fib_table = fib_table_get (fib_index, fib_proto);
+ table_id = fib_table->ft_table_id;
+ }
+
+ BAD_SW_IF_INDEX_LABEL;
+
+ send_sw_interface_get_table_reply (q, mp->context, rv, table_id);
+}
+
+static void vl_api_sw_interface_set_unnumbered_t_handler
+ (vl_api_sw_interface_set_unnumbered_t * mp)
+{
+ vl_api_sw_interface_set_unnumbered_reply_t *rmp;
+ int rv = 0;
+ vnet_sw_interface_t *si;
+ vnet_main_t *vnm = vnet_get_main ();
+ u32 sw_if_index, unnumbered_sw_if_index;
+
+ sw_if_index = ntohl (mp->sw_if_index);
+ unnumbered_sw_if_index = ntohl (mp->unnumbered_sw_if_index);
+
+ /*
+ * The API message field names are backwards from
+ * the underlying data structure names.
+ * It's not worth changing them now.
+ */
+ if (pool_is_free_index (vnm->interface_main.sw_interfaces,
+ unnumbered_sw_if_index))
+ {
+ rv = VNET_API_ERROR_INVALID_SW_IF_INDEX;
+ goto done;
+ }
+
+ /* Only check the "use loop0" field when setting the binding */
+ if (mp->is_add &&
+ pool_is_free_index (vnm->interface_main.sw_interfaces, sw_if_index))
+ {
+ rv = VNET_API_ERROR_INVALID_SW_IF_INDEX_2;
+ goto done;
+ }
+
+ si = vnet_get_sw_interface (vnm, unnumbered_sw_if_index);
+
+ if (mp->is_add)
+ {
+ si->flags |= VNET_SW_INTERFACE_FLAG_UNNUMBERED;
+ si->unnumbered_sw_if_index = sw_if_index;
+ ip4_sw_interface_enable_disable (unnumbered_sw_if_index, 1);
+ ip6_sw_interface_enable_disable (unnumbered_sw_if_index, 1);
+ }
+ else
+ {
+ si->flags &= ~(VNET_SW_INTERFACE_FLAG_UNNUMBERED);
+ si->unnumbered_sw_if_index = (u32) ~ 0;
+ ip4_sw_interface_enable_disable (unnumbered_sw_if_index, 0);
+ ip6_sw_interface_enable_disable (unnumbered_sw_if_index, 0);
+ }
+
+done:
+ REPLY_MACRO (VL_API_SW_INTERFACE_SET_UNNUMBERED_REPLY);
+}
+
+static void
+vl_api_sw_interface_clear_stats_t_handler (vl_api_sw_interface_clear_stats_t *
+ mp)
+{
+ vl_api_sw_interface_clear_stats_reply_t *rmp;
+
+ vnet_main_t *vnm = vnet_get_main ();
+ vnet_interface_main_t *im = &vnm->interface_main;
+ vlib_simple_counter_main_t *sm;
+ vlib_combined_counter_main_t *cm;
+ static vnet_main_t **my_vnet_mains;
+ int i, j, n_counters;
+ int rv = 0;
+
+ if (mp->sw_if_index != ~0)
+ VALIDATE_SW_IF_INDEX (mp);
+
+ vec_reset_length (my_vnet_mains);
+
+ for (i = 0; i < vec_len (vnet_mains); i++)
+ {
+ if (vnet_mains[i])
+ vec_add1 (my_vnet_mains, vnet_mains[i]);
+ }
+
+ if (vec_len (vnet_mains) == 0)
+ vec_add1 (my_vnet_mains, vnm);
+
+ n_counters = vec_len (im->combined_sw_if_counters);
+
+ for (j = 0; j < n_counters; j++)
+ {
+ for (i = 0; i < vec_len (my_vnet_mains); i++)
+ {
+ im = &my_vnet_mains[i]->interface_main;
+ cm = im->combined_sw_if_counters + j;
+ if (mp->sw_if_index == (u32) ~ 0)
+ vlib_clear_combined_counters (cm);
+ else
+ vlib_zero_combined_counter (cm, ntohl (mp->sw_if_index));
+ }
+ }
+
+ n_counters = vec_len (im->sw_if_counters);
+
+ for (j = 0; j < n_counters; j++)
+ {
+ for (i = 0; i < vec_len (my_vnet_mains); i++)
+ {
+ im = &my_vnet_mains[i]->interface_main;
+ sm = im->sw_if_counters + j;
+ if (mp->sw_if_index == (u32) ~ 0)
+ vlib_clear_simple_counters (sm);
+ else
+ vlib_zero_simple_counter (sm, ntohl (mp->sw_if_index));
+ }
+ }
+
+ BAD_SW_IF_INDEX_LABEL;
+
+ REPLY_MACRO (VL_API_SW_INTERFACE_CLEAR_STATS_REPLY);
+}
+
+#define API_LINK_STATE_EVENT 1
+#define API_ADMIN_UP_DOWN_EVENT 2
+
+static int
+event_data_cmp (void *a1, void *a2)
+{
+ uword *e1 = a1;
+ uword *e2 = a2;
+
+ return (word) e1[0] - (word) e2[0];
+}
+
+static void
+send_sw_interface_flags (vpe_api_main_t * am,
+ unix_shared_memory_queue_t * q,
+ vnet_sw_interface_t * swif)
+{
+ vl_api_sw_interface_set_flags_t *mp;
+ vnet_main_t *vnm = am->vnet_main;
+
+ vnet_hw_interface_t *hi = vnet_get_sup_hw_interface (vnm,
+ swif->sw_if_index);
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ memset (mp, 0, sizeof (*mp));
+ mp->_vl_msg_id = ntohs (VL_API_SW_INTERFACE_SET_FLAGS);
+ mp->sw_if_index = ntohl (swif->sw_if_index);
+
+ mp->admin_up_down = (swif->flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) ? 1 : 0;
+ mp->link_up_down = (hi->flags & VNET_HW_INTERFACE_FLAG_LINK_UP) ? 1 : 0;
+ vl_msg_api_send_shmem (q, (u8 *) & mp);
+}
+
+static uword
+link_state_process (vlib_main_t * vm,
+ vlib_node_runtime_t * rt, vlib_frame_t * f)
+{
+ vpe_api_main_t *vam = &vpe_api_main;
+ vnet_main_t *vnm = vam->vnet_main;
+ vnet_sw_interface_t *swif;
+ uword *event_data = 0;
+ vpe_client_registration_t *reg;
+ int i;
+ u32 prev_sw_if_index;
+ unix_shared_memory_queue_t *q;
+
+ vam->link_state_process_up = 1;
+
+ while (1)
+ {
+ vlib_process_wait_for_event (vm);
+
+ /* Unified list of changed link or admin state sw_if_indices */
+ vlib_process_get_events_with_type
+ (vm, &event_data, API_LINK_STATE_EVENT);
+ vlib_process_get_events_with_type
+ (vm, &event_data, API_ADMIN_UP_DOWN_EVENT);
+
+ /* Sort, so we can eliminate duplicates */
+ vec_sort_with_function (event_data, event_data_cmp);
+
+ prev_sw_if_index = ~0;
+
+ for (i = 0; i < vec_len (event_data); i++)
+ {
+ /* Only one message per swif */
+ if (prev_sw_if_index == event_data[i])
+ continue;
+ prev_sw_if_index = event_data[i];
+
+ /* *INDENT-OFF* */
+ pool_foreach(reg, vam->interface_events_registrations,
+ ({
+ q = vl_api_client_index_to_input_queue (reg->client_index);
+ if (q)
+ {
+ /* sw_interface may be deleted already */
+ if (!pool_is_free_index (vnm->interface_main.sw_interfaces,
+ event_data[i]))
+ {
+ swif = vnet_get_sw_interface (vnm, event_data[i]);
+ send_sw_interface_flags (vam, q, swif);
+ }
+ }
+ }));
+ /* *INDENT-ON* */
+ }
+ vec_reset_length (event_data);
+ }
+
+ return 0;
+}
+
+static clib_error_t *link_up_down_function (vnet_main_t * vm, u32 hw_if_index,
+ u32 flags);
+static clib_error_t *admin_up_down_function (vnet_main_t * vm,
+ u32 hw_if_index, u32 flags);
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (link_state_process_node,static) = {
+ .function = link_state_process,
+ .type = VLIB_NODE_TYPE_PROCESS,
+ .name = "vpe-link-state-process",
+};
+/* *INDENT-ON* */
+
+VNET_SW_INTERFACE_ADMIN_UP_DOWN_FUNCTION (admin_up_down_function);
+VNET_HW_INTERFACE_LINK_UP_DOWN_FUNCTION (link_up_down_function);
+
+static clib_error_t *
+link_up_down_function (vnet_main_t * vm, u32 hw_if_index, u32 flags)
+{
+ vpe_api_main_t *vam = &vpe_api_main;
+ vnet_hw_interface_t *hi = vnet_get_hw_interface (vm, hw_if_index);
+
+ if (vam->link_state_process_up)
+ vlib_process_signal_event (vam->vlib_main,
+ link_state_process_node.index,
+ API_LINK_STATE_EVENT, hi->sw_if_index);
+ return 0;
+}
+
+static clib_error_t *
+admin_up_down_function (vnet_main_t * vm, u32 sw_if_index, u32 flags)
+{
+ vpe_api_main_t *vam = &vpe_api_main;
+
+ /*
+ * Note: it's perfectly fair to set a subif admin up / admin down.
+ * Note the subtle distinction between this routine and the previous
+ * routine.
+ */
+ if (vam->link_state_process_up)
+ vlib_process_signal_event (vam->vlib_main,
+ link_state_process_node.index,
+ API_ADMIN_UP_DOWN_EVENT, sw_if_index);
+ return 0;
+}
+
+static void vl_api_sw_interface_tag_add_del_t_handler
+ (vl_api_sw_interface_tag_add_del_t * mp)
+{
+ vnet_main_t *vnm = vnet_get_main ();
+ vl_api_sw_interface_tag_add_del_reply_t *rmp;
+ int rv = 0;
+ u8 *tag;
+ u32 sw_if_index = ntohl (mp->sw_if_index);
+
+ VALIDATE_SW_IF_INDEX (mp);
+
+ if (mp->is_add)
+ {
+ if (mp->tag[0] == 0)
+ {
+ rv = VNET_API_ERROR_INVALID_VALUE;
+ goto out;
+ }
+
+ mp->tag[ARRAY_LEN (mp->tag) - 1] = 0;
+ tag = format (0, "%s%c", mp->tag, 0);
+ vnet_set_sw_interface_tag (vnm, tag, sw_if_index);
+ }
+ else
+ vnet_clear_sw_interface_tag (vnm, sw_if_index);
+
+ BAD_SW_IF_INDEX_LABEL;
+out:
+ REPLY_MACRO (VL_API_SW_INTERFACE_TAG_ADD_DEL_REPLY);
+}
+
+static void
+vl_api_sw_interface_details_t_handler (vl_api_sw_interface_details_t * mp)
+{
+ clib_warning ("BUG");
+}
+
+/*
+ * vpe_api_hookup
+ * Add vpe's API message handlers to the table.
+ * vlib has alread mapped shared memory and
+ * added the client registration handlers.
+ * See .../vlib-api/vlibmemory/memclnt_vlib.c:memclnt_process()
+ */
+#define vl_msg_name_crc_list
+#include <vnet/interface.api.h>
+#undef vl_msg_name_crc_list
+
+static void
+setup_message_id_table (api_main_t * am)
+{
+#define _(id,n,crc) vl_msg_api_add_msg_name_crc (am, #n "_" #crc, id);
+ foreach_vl_msg_name_crc_interface;
+#undef _
+}
+
+pub_sub_handler (interface_events, INTERFACE_EVENTS);
+
+static clib_error_t *
+interface_api_hookup (vlib_main_t * vm)
+{
+ api_main_t *am = &api_main;
+
+#define _(N,n) \
+ vl_msg_api_set_handlers(VL_API_##N, #n, \
+ vl_api_##n##_t_handler, \
+ vl_noop_handler, \
+ vl_api_##n##_t_endian, \
+ vl_api_##n##_t_print, \
+ sizeof(vl_api_##n##_t), 1);
+ foreach_vpe_api_msg;
+#undef _
+
+ /*
+ * Set up the (msg_name, crc, message-id) table
+ */
+ setup_message_id_table (am);
+
+ return 0;
+}
+
+VLIB_API_INIT_FUNCTION (interface_api_hookup);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/interface_cli.c b/src/vnet/interface_cli.c
new file mode 100644
index 00000000000..7dbee867ded
--- /dev/null
+++ b/src/vnet/interface_cli.c
@@ -0,0 +1,1165 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * interface_cli.c: interface CLI
+ *
+ * Copyright (c) 2008 Eliot Dresselhaus
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * @file
+ * Interface CLI.
+ */
+
+#include <vnet/vnet.h>
+#include <vnet/ip/ip.h>
+#include <vppinfra/bitmap.h>
+#include <vnet/fib/ip4_fib.h>
+#include <vnet/fib/ip6_fib.h>
+
+static int
+compare_interface_names (void *a1, void *a2)
+{
+ u32 *hi1 = a1;
+ u32 *hi2 = a2;
+
+ return vnet_hw_interface_compare (vnet_get_main (), *hi1, *hi2);
+}
+
+static clib_error_t *
+show_or_clear_hw_interfaces (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ clib_error_t *error = 0;
+ vnet_main_t *vnm = vnet_get_main ();
+ vnet_interface_main_t *im = &vnm->interface_main;
+ vnet_hw_interface_t *hi;
+ u32 hw_if_index, *hw_if_indices = 0;
+ int i, verbose = -1, is_show, show_bond = 0;
+
+ is_show = strstr (cmd->path, "show") != 0;
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ /* See if user wants to show a specific interface. */
+ if (unformat
+ (input, "%U", unformat_vnet_hw_interface, vnm, &hw_if_index))
+ vec_add1 (hw_if_indices, hw_if_index);
+
+ /* See if user wants to show an interface with a specific hw_if_index. */
+ else if (unformat (input, "%u", &hw_if_index))
+ vec_add1 (hw_if_indices, hw_if_index);
+
+ else if (unformat (input, "verbose"))
+ verbose = 1; /* this is also the default */
+
+ else if (unformat (input, "detail"))
+ verbose = 2;
+
+ else if (unformat (input, "brief"))
+ verbose = 0;
+
+ else if (unformat (input, "bond"))
+ {
+ show_bond = 1;
+ if (verbose < 0)
+ verbose = 0; /* default to brief for link bonding */
+ }
+
+ else
+ {
+ error = clib_error_return (0, "unknown input `%U'",
+ format_unformat_error, input);
+ goto done;
+ }
+ }
+
+ /* Gather interfaces. */
+ if (vec_len (hw_if_indices) == 0)
+ pool_foreach (hi, im->hw_interfaces,
+ vec_add1 (hw_if_indices, hi - im->hw_interfaces));
+
+ if (verbose < 0)
+ verbose = 1; /* default to verbose (except bond) */
+
+ if (is_show)
+ {
+ /* Sort by name. */
+ vec_sort_with_function (hw_if_indices, compare_interface_names);
+
+ vlib_cli_output (vm, "%U\n", format_vnet_hw_interface, vnm, 0, verbose);
+ for (i = 0; i < vec_len (hw_if_indices); i++)
+ {
+ hi = vnet_get_hw_interface (vnm, hw_if_indices[i]);
+ if (show_bond == 0) /* show all interfaces */
+ vlib_cli_output (vm, "%U\n", format_vnet_hw_interface, vnm,
+ hi, verbose);
+ else if ((hi->bond_info) &&
+ (hi->bond_info != VNET_HW_INTERFACE_BOND_INFO_SLAVE))
+ { /* show only bonded interface and all its slave interfaces */
+ int hw_idx;
+ vnet_hw_interface_t *shi;
+ vlib_cli_output (vm, "%U\n", format_vnet_hw_interface, vnm,
+ hi, verbose);
+
+ /* *INDENT-OFF* */
+ clib_bitmap_foreach (hw_idx, hi->bond_info,
+ ({
+ shi = vnet_get_hw_interface(vnm, hw_idx);
+ vlib_cli_output (vm, "%U\n",
+ format_vnet_hw_interface, vnm, shi, verbose);
+ }));
+ /* *INDENT-ON* */
+ }
+ }
+ }
+ else
+ {
+ for (i = 0; i < vec_len (hw_if_indices); i++)
+ {
+ vnet_device_class_t *dc;
+
+ hi = vnet_get_hw_interface (vnm, hw_if_indices[i]);
+ dc = vec_elt_at_index (im->device_classes, hi->dev_class_index);
+
+ if (dc->clear_counters)
+ dc->clear_counters (hi->dev_instance);
+ }
+ }
+
+done:
+ vec_free (hw_if_indices);
+ return error;
+}
+
+/* *INDENT-OFF* */
+/*?
+ * Displays various information about the state of the current terminal
+ * session.
+ *
+ * @cliexpar
+ * @cliexstart{show hardware}
+ * Name Link Hardware
+ * GigabitEthernet2/0/0 up GigabitEthernet2/0/0
+ * Ethernet address 00:50:56:b7:7c:83
+ * Intel 82545em_copper
+ * link up, media 1000T full-duplex, master,
+ * 0 unprocessed, 384 total buffers on rx queue 0 ring
+ * 237 buffers in driver rx cache
+ * rx total packets 1816
+ * rx total bytes 181084
+ * rx good packets 1816
+ * rx good bytes 181084
+ * rx 65 127 byte packets 1586
+ * rx 256 511 byte packets 230
+ * tx total packets 346
+ * tx total bytes 90224
+ * tx good packets 346
+ * tx good bytes 88840
+ * tx 64 byte packets 1
+ * tx 65 127 byte packets 115
+ * tx 256 511 byte packets 230
+ * @cliexend
+ ?*/
+VLIB_CLI_COMMAND (show_hw_interfaces_command, static) = {
+ .path = "show hardware-interfaces",
+ .short_help = "show hardware-interfaces [brief|verbose|detail] [bond] [<if-name1> <if-name2> ...]",
+ .function = show_or_clear_hw_interfaces,
+};
+/* *INDENT-ON* */
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (clear_hw_interface_counters_command, static) = {
+ .path = "clear hardware-interfaces",
+ .short_help = "Clear hardware interfaces statistics",
+ .function = show_or_clear_hw_interfaces,
+};
+/* *INDENT-ON* */
+
+static int
+sw_interface_name_compare (void *a1, void *a2)
+{
+ vnet_sw_interface_t *si1 = a1;
+ vnet_sw_interface_t *si2 = a2;
+
+ return vnet_sw_interface_compare (vnet_get_main (),
+ si1->sw_if_index, si2->sw_if_index);
+}
+
+static clib_error_t *
+show_sw_interfaces (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ clib_error_t *error = 0;
+ vnet_main_t *vnm = vnet_get_main ();
+ vnet_interface_main_t *im = &vnm->interface_main;
+ vnet_sw_interface_t *si, *sorted_sis = 0;
+ u32 sw_if_index = ~(u32) 0;
+ u8 show_addresses = 0;
+ u8 show_features = 0;
+ u8 show_tag = 0;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ /* See if user wants to show specific interface */
+ if (unformat
+ (input, "%U", unformat_vnet_sw_interface, vnm, &sw_if_index))
+ {
+ si = pool_elt_at_index (im->sw_interfaces, sw_if_index);
+ vec_add1 (sorted_sis, si[0]);
+ }
+ else if (unformat (input, "address") || unformat (input, "addr"))
+ show_addresses = 1;
+ else if (unformat (input, "features") || unformat (input, "feat"))
+ show_features = 1;
+ else if (unformat (input, "tag"))
+ show_tag = 1;
+ else
+ {
+ error = clib_error_return (0, "unknown input `%U'",
+ format_unformat_error, input);
+ goto done;
+ }
+ }
+
+ if (show_features || show_tag)
+ {
+ if (sw_if_index == ~(u32) 0)
+ return clib_error_return (0, "Interface not specified...");
+ }
+
+ if (show_features)
+ {
+ vnet_interface_features_show (vm, sw_if_index);
+ return 0;
+ }
+ if (show_tag)
+ {
+ u8 *tag;
+ tag = vnet_get_sw_interface_tag (vnm, sw_if_index);
+ vlib_cli_output (vm, "%U: %s",
+ format_vnet_sw_if_index_name, vnm, sw_if_index,
+ tag ? (char *) tag : "(none)");
+ return 0;
+ }
+
+ if (!show_addresses)
+ vlib_cli_output (vm, "%U\n", format_vnet_sw_interface, vnm, 0);
+
+ if (vec_len (sorted_sis) == 0) /* Get all interfaces */
+ {
+ /* Gather interfaces. */
+ sorted_sis =
+ vec_new (vnet_sw_interface_t, pool_elts (im->sw_interfaces));
+ _vec_len (sorted_sis) = 0;
+ pool_foreach (si, im->sw_interfaces, (
+ {
+ vec_add1 (sorted_sis, si[0]);
+ }
+ ));
+
+ /* Sort by name. */
+ vec_sort_with_function (sorted_sis, sw_interface_name_compare);
+ }
+
+ if (show_addresses)
+ {
+ vec_foreach (si, sorted_sis)
+ {
+ l2input_main_t *l2m = &l2input_main;
+ ip4_main_t *im4 = &ip4_main;
+ ip6_main_t *im6 = &ip6_main;
+ ip_lookup_main_t *lm4 = &im4->lookup_main;
+ ip_lookup_main_t *lm6 = &im6->lookup_main;
+ ip_interface_address_t *ia = 0;
+ ip4_address_t *r4;
+ ip6_address_t *r6;
+ u32 fib_index4 = 0, fib_index6 = 0;
+ ip4_fib_t *fib4;
+ ip6_fib_t *fib6;
+ l2_input_config_t *config;
+
+ if (vec_len (im4->fib_index_by_sw_if_index) > si->sw_if_index)
+ fib_index4 = vec_elt (im4->fib_index_by_sw_if_index,
+ si->sw_if_index);
+
+ if (vec_len (im6->fib_index_by_sw_if_index) > si->sw_if_index)
+ fib_index6 = vec_elt (im6->fib_index_by_sw_if_index,
+ si->sw_if_index);
+
+ fib4 = ip4_fib_get (fib_index4);
+ fib6 = ip6_fib_get (fib_index6);
+
+ if (si->flags & VNET_SW_INTERFACE_FLAG_UNNUMBERED)
+ vlib_cli_output
+ (vm, "%U (%s): \n unnumbered, use %U",
+ format_vnet_sw_if_index_name,
+ vnm, si->sw_if_index,
+ (si->flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) ? "up" : "dn",
+ format_vnet_sw_if_index_name, vnm, si->unnumbered_sw_if_index);
+
+ else
+ {
+ vlib_cli_output (vm, "%U (%s):",
+ format_vnet_sw_if_index_name,
+ vnm, si->sw_if_index,
+ (si->flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP)
+ ? "up" : "dn");
+ }
+
+ /* Display any L2 addressing info */
+ vec_validate (l2m->configs, si->sw_if_index);
+ config = vec_elt_at_index (l2m->configs, si->sw_if_index);
+ if (config->bridge)
+ {
+ u32 bd_id = l2input_main.bd_configs[config->bd_index].bd_id;
+ vlib_cli_output (vm, " l2 bridge bd_id %d%s%d", bd_id,
+ config->bvi ? " bvi shg " : " shg ",
+ config->shg);
+ }
+ else if (config->xconnect)
+ {
+ vlib_cli_output (vm, " l2 xconnect %U",
+ format_vnet_sw_if_index_name,
+ vnm, config->output_sw_if_index);
+ }
+
+ /* Display any IP4 addressing info */
+ /* *INDENT-OFF* */
+ foreach_ip_interface_address (lm4, ia, si->sw_if_index,
+ 1 /* honor unnumbered */,
+ ({
+ r4 = ip_interface_address_get_address (lm4, ia);
+ if (fib4->table_id)
+ {
+ vlib_cli_output (vm, " %U/%d table %d",
+ format_ip4_address, r4,
+ ia->address_length,
+ fib4->table_id);
+ }
+ else
+ {
+ vlib_cli_output (vm, " %U/%d",
+ format_ip4_address, r4,
+ ia->address_length);
+ }
+ }));
+ /* *INDENT-ON* */
+
+ /* Display any IP6 addressing info */
+ /* *INDENT-OFF* */
+ foreach_ip_interface_address (lm6, ia, si->sw_if_index,
+ 1 /* honor unnumbered */,
+ ({
+ r6 = ip_interface_address_get_address (lm6, ia);
+ if (fib6->table_id)
+ {
+ vlib_cli_output (vm, " %U/%d table %d",
+ format_ip6_address, r6,
+ ia->address_length,
+ fib6->table_id);
+ }
+ else
+ {
+ vlib_cli_output (vm, " %U/%d",
+ format_ip6_address, r6,
+ ia->address_length);
+ }
+ }));
+ /* *INDENT-ON* */
+ }
+ }
+ else
+ {
+ vec_foreach (si, sorted_sis)
+ {
+ vlib_cli_output (vm, "%U\n", format_vnet_sw_interface, vnm, si);
+ }
+ }
+
+done:
+ vec_free (sorted_sis);
+ return error;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (show_sw_interfaces_command, static) = {
+ .path = "show interfaces",
+ .short_help = "show interfaces [address|addr|features|feat] [<if-name1> <if-name2> ...]",
+ .function = show_sw_interfaces,
+};
+/* *INDENT-ON* */
+
+/* Root of all interface commands. */
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (vnet_cli_interface_command, static) = {
+ .path = "interface",
+ .short_help = "Interface commands",
+};
+/* *INDENT-ON* */
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (vnet_cli_set_interface_command, static) = {
+ .path = "set interface",
+ .short_help = "Interface commands",
+};
+/* *INDENT-ON* */
+
+static clib_error_t *
+clear_interface_counters (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ vnet_main_t *vnm = vnet_get_main ();
+ vnet_interface_main_t *im = &vnm->interface_main;
+ vlib_simple_counter_main_t *sm;
+ vlib_combined_counter_main_t *cm;
+ static vnet_main_t **my_vnet_mains;
+ int i, j, n_counters;
+
+ vec_reset_length (my_vnet_mains);
+
+ for (i = 0; i < vec_len (vnet_mains); i++)
+ {
+ if (vnet_mains[i])
+ vec_add1 (my_vnet_mains, vnet_mains[i]);
+ }
+
+ if (vec_len (vnet_mains) == 0)
+ vec_add1 (my_vnet_mains, vnm);
+
+ n_counters = vec_len (im->combined_sw_if_counters);
+
+ for (j = 0; j < n_counters; j++)
+ {
+ for (i = 0; i < vec_len (my_vnet_mains); i++)
+ {
+ im = &my_vnet_mains[i]->interface_main;
+ cm = im->combined_sw_if_counters + j;
+ vlib_clear_combined_counters (cm);
+ }
+ }
+
+ n_counters = vec_len (im->sw_if_counters);
+
+ for (j = 0; j < n_counters; j++)
+ {
+ for (i = 0; i < vec_len (my_vnet_mains); i++)
+ {
+ im = &my_vnet_mains[i]->interface_main;
+ sm = im->sw_if_counters + j;
+ vlib_clear_simple_counters (sm);
+ }
+ }
+
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (clear_interface_counters_command, static) = {
+ .path = "clear interfaces",
+ .short_help = "Clear interfaces statistics",
+ .function = clear_interface_counters,
+};
+/* *INDENT-ON* */
+
+/**
+ * Parse subinterface names.
+ *
+ * The following subinterface syntax is supported. The first two are for
+ * backwards compatability:
+ *
+ * <intf-name> <id>
+ * - a subinterface with the name <intf-name>.<id>. The subinterface
+ * is a single dot1q vlan with vlan id <id> and exact-match semantics.
+ *
+ * <intf-name> <min_id>-<max_id>
+ * - a set of the above subinterfaces, repeating for each id
+ * in the range <min_id> to <max_id>
+ *
+ * In the following, exact-match semantics (i.e. the number of vlan tags on the
+ * packet must match the number of tags in the configuration) are used only if
+ * the keyword exact-match is present. Non-exact match is the default.
+ *
+ * <intf-name> <id> dot1q <outer_id> [exact-match]
+ * - a subinterface with the name <intf-name>.<id>. The subinterface
+ * is a single dot1q vlan with vlan id <outer_id>.
+ *
+ * <intf-name> <id> dot1q any [exact-match]
+ * - a subinterface with the name <intf-name>.<id>. The subinterface
+ * is a single dot1q vlan with any vlan id.
+ *
+ * <intf-name> <id> dot1q <outer_id> inner-dot1q <inner_id> [exact-match]
+ * - a subinterface with the name <intf-name>.<id>. The subinterface
+ * is a double dot1q vlan with outer vlan id <outer_id> and inner vlan id
+ * <inner_id>.
+ *
+ * <intf-name> <id> dot1q <outer_id> inner-dot1q any [exact-match]
+ * - a subinterface with the name <intf-name>.<id>. The subinterface
+ * is a double dot1q vlan with outer vlan id <id> and any inner vlan id.
+ *
+ * <intf-name> <id> dot1q any inner-dot1q any [exact-match]
+ *
+ * - a subinterface with the name <intf-name>.<id>. The subinterface
+ * is a double dot1q vlan with any outer vlan id and any inner vlan id.
+ *
+ * For each of the above CLI, there is a duplicate that uses the keyword
+ * "dot1ad" in place of the first "dot1q". These interfaces use ethertype
+ * 0x88ad in place of 0x8100 for the outer ethertype. Note that for double-
+ * tagged packets the inner ethertype is always 0x8100. Also note that
+ * the dot1q and dot1ad naming spaces are independent, so it is legal to
+ * have both "Gig3/0/0.1 dot1q 100" and "Gig3/0/0.2 dot1ad 100". For example:
+ *
+ * <intf-name> <id> dot1ad <outer_id> inner-dot1q <inner_id> [exact-match]
+ * - a subinterface with the name <intf-name>.<id>. The subinterface
+ * is a double dot1ad vlan with outer vlan id <outer_id> and inner vlan
+ * id <inner_id>.
+ *
+ * <intf-name> <id> untagged
+ * - a subinterface with the name <intf-name>.<id>. The subinterface
+ * has no vlan tags. Only one can be specified per interface.
+ *
+ * <intf-name> <id> default
+ * - a subinterface with the name <intf-name>.<id>. This is associated
+ * with a packet that did not match any other configured subinterface
+ * on this interface. Only one can be specified per interface.
+ */
+
+static clib_error_t *
+parse_vlan_sub_interfaces (unformat_input_t * input,
+ vnet_sw_interface_t * template)
+{
+ clib_error_t *error = 0;
+ u32 inner_vlan, outer_vlan;
+
+ if (unformat (input, "any inner-dot1q any"))
+ {
+ template->sub.eth.flags.two_tags = 1;
+ template->sub.eth.flags.outer_vlan_id_any = 1;
+ template->sub.eth.flags.inner_vlan_id_any = 1;
+ }
+ else if (unformat (input, "any"))
+ {
+ template->sub.eth.flags.one_tag = 1;
+ template->sub.eth.flags.outer_vlan_id_any = 1;
+ }
+ else if (unformat (input, "%d inner-dot1q any", &outer_vlan))
+ {
+ template->sub.eth.flags.two_tags = 1;
+ template->sub.eth.flags.inner_vlan_id_any = 1;
+ template->sub.eth.outer_vlan_id = outer_vlan;
+ }
+ else if (unformat (input, "%d inner-dot1q %d", &outer_vlan, &inner_vlan))
+ {
+ template->sub.eth.flags.two_tags = 1;
+ template->sub.eth.outer_vlan_id = outer_vlan;
+ template->sub.eth.inner_vlan_id = inner_vlan;
+ }
+ else if (unformat (input, "%d", &outer_vlan))
+ {
+ template->sub.eth.flags.one_tag = 1;
+ template->sub.eth.outer_vlan_id = outer_vlan;
+ }
+ else
+ {
+ error = clib_error_return (0, "expected dot1q config, got `%U'",
+ format_unformat_error, input);
+ goto done;
+ }
+
+ if (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "exact-match"))
+ {
+ template->sub.eth.flags.exact_match = 1;
+ }
+ }
+
+done:
+ return error;
+}
+
+static clib_error_t *
+create_sub_interfaces (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ vnet_main_t *vnm = vnet_get_main ();
+ clib_error_t *error = 0;
+ u32 hw_if_index, sw_if_index;
+ vnet_hw_interface_t *hi;
+ u32 id, id_min, id_max;
+ vnet_sw_interface_t template;
+
+ hw_if_index = ~0;
+ if (!unformat_user (input, unformat_vnet_hw_interface, vnm, &hw_if_index))
+ {
+ error = clib_error_return (0, "unknown interface `%U'",
+ format_unformat_error, input);
+ goto done;
+ }
+
+ memset (&template, 0, sizeof (template));
+ template.sub.eth.raw_flags = 0;
+
+ if (unformat (input, "%d default", &id_min))
+ {
+ id_max = id_min;
+ template.sub.eth.flags.default_sub = 1;
+ }
+ else if (unformat (input, "%d untagged", &id_min))
+ {
+ id_max = id_min;
+ template.sub.eth.flags.no_tags = 1;
+ template.sub.eth.flags.exact_match = 1;
+ }
+ else if (unformat (input, "%d dot1q", &id_min))
+ {
+ /* parse dot1q config */
+ id_max = id_min;
+ error = parse_vlan_sub_interfaces (input, &template);
+ if (error)
+ goto done;
+ }
+ else if (unformat (input, "%d dot1ad", &id_min))
+ {
+ /* parse dot1ad config */
+ id_max = id_min;
+ template.sub.eth.flags.dot1ad = 1;
+ error = parse_vlan_sub_interfaces (input, &template);
+ if (error)
+ goto done;
+ }
+ else if (unformat (input, "%d-%d", &id_min, &id_max))
+ {
+ template.sub.eth.flags.one_tag = 1;
+ template.sub.eth.flags.exact_match = 1;
+ if (id_min > id_max)
+ goto id_error;
+ }
+ else if (unformat (input, "%d", &id_min))
+ {
+ id_max = id_min;
+ template.sub.eth.flags.one_tag = 1;
+ template.sub.eth.outer_vlan_id = id_min;
+ template.sub.eth.flags.exact_match = 1;
+ }
+ else
+ {
+ id_error:
+ error = clib_error_return (0, "expected ID or ID MIN-MAX, got `%U'",
+ format_unformat_error, input);
+ goto done;
+ }
+
+ hi = vnet_get_hw_interface (vnm, hw_if_index);
+
+ if (hi->bond_info == VNET_HW_INTERFACE_BOND_INFO_SLAVE)
+ {
+ error =
+ clib_error_return (0,
+ "not allowed as %v belong to a BondEthernet interface",
+ hi->name);
+ goto done;
+ }
+
+ for (id = id_min; id <= id_max; id++)
+ {
+ uword *p;
+ vnet_interface_main_t *im = &vnm->interface_main;
+ u64 sup_and_sub_key = ((u64) (hi->sw_if_index) << 32) | (u64) id;
+ u64 *kp;
+
+ p = hash_get_mem (im->sw_if_index_by_sup_and_sub, &sup_and_sub_key);
+ if (p)
+ {
+ if (CLIB_DEBUG > 0)
+ clib_warning ("sup sw_if_index %d, sub id %d already exists\n",
+ hi->sw_if_index, id);
+ continue;
+ }
+
+ kp = clib_mem_alloc (sizeof (*kp));
+ *kp = sup_and_sub_key;
+
+ template.type = VNET_SW_INTERFACE_TYPE_SUB;
+ template.flood_class = VNET_FLOOD_CLASS_NORMAL;
+ template.sup_sw_if_index = hi->sw_if_index;
+ template.sub.id = id;
+ if (id_min < id_max)
+ template.sub.eth.outer_vlan_id = id;
+
+ error = vnet_create_sw_interface (vnm, &template, &sw_if_index);
+ if (error)
+ goto done;
+
+ hash_set (hi->sub_interface_sw_if_index_by_id, id, sw_if_index);
+ hash_set_mem (im->sw_if_index_by_sup_and_sub, kp, sw_if_index);
+ vlib_cli_output (vm, "%U\n", format_vnet_sw_if_index_name,
+ vnet_get_main (), sw_if_index);
+ }
+
+done:
+ return error;
+}
+
+/* *INDENT-OFF* */
+/*?
+ * Create vlan subinterfaces
+ *
+ * @cliexpar
+ * @cliexstart{create sub-interfaces}
+ *
+ * To create a vlan subinterface 11 to process packets on 802.1q VLAN id 11, use:
+ *
+ * vpp# create sub GigabitEthernet2/0/0 11
+ *
+ * This shorthand is equivalent to:
+ * vpp# create sub GigabitEthernet2/0/0 11 dot1q 11 exact-match
+ *
+ * You can specify a subinterface number that is different from the vlan id:
+ * vpp# create sub GigabitEthernet2/0/0 11 dot1q 100
+ *
+ * You can create qinq and q-in-any interfaces:
+ * vpp# create sub GigabitEthernet2/0/0 11 dot1q 100 inner-dot1q 200
+ * vpp# create sub GigabitEthernet2/0/0 12 dot1q 100 inner-dot1q any
+ *
+ * You can also create dot1ad interfaces:
+ * vpp# create sub GigabitEthernet2/0/0 11 dot1ad 11
+ * vpp# create sub GigabitEthernet2/0/0 12 dot1q 100 inner-dot1q 200
+ *
+ * Subinterfaces can be configured as either exact-match or non-exact match.
+ * Non-exact match is the CLI default. If exact-match is specified,
+ * packets must have the same number of vlan tags as the configuration.
+ * For non-exact-match, packets must at least that number of tags.
+ * L3 (routed) interfaces must be configured as exact-match.
+ * L2 interfaces are typically configured as non-exact-match.
+ *
+ * For example, a packet with outer vlan 100 and inner 200 would match this interface:
+ * vpp# create sub GigabitEthernet2/0/0 5 dot1q 100
+ *
+ * but would not match this interface:
+ * vpp# create sub GigabitEthernet2/0/0 5 dot1q 100 exact-match
+ *
+ * There are two special subinterfaces that can be configured. Subinterface untagged has no vlan tags:
+ * vpp# create sub GigabitEthernet2/0/0 5 untagged
+ *
+ * The subinterface default matches any packet that does not match any other subinterface:
+ * vpp# create sub GigabitEthernet2/0/0 7 default
+ * @cliexend
+ ?*/
+VLIB_CLI_COMMAND (create_sub_interfaces_command, static) = {
+ .path = "create sub-interfaces",
+ .short_help = "create sub-interfaces <nn>[-<nn>] [dot1q|dot1ad|default|untagged]",
+ .function = create_sub_interfaces,
+};
+/* *INDENT-ON* */
+
+static clib_error_t *
+set_state (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ vnet_main_t *vnm = vnet_get_main ();
+ clib_error_t *error;
+ u32 sw_if_index, flags;
+
+ sw_if_index = ~0;
+ if (!unformat_user (input, unformat_vnet_sw_interface, vnm, &sw_if_index))
+ {
+ error = clib_error_return (0, "unknown interface `%U'",
+ format_unformat_error, input);
+ goto done;
+ }
+
+ if (!unformat (input, "%U", unformat_vnet_sw_interface_flags, &flags))
+ {
+ error = clib_error_return (0, "unknown flags `%U'",
+ format_unformat_error, input);
+ goto done;
+ }
+
+ error = vnet_sw_interface_set_flags (vnm, sw_if_index, flags);
+ if (error)
+ goto done;
+
+done:
+ return error;
+}
+
+
+/* *INDENT-OFF* */
+/*?
+ * Interface admin up/down
+ *
+ * @cliexpar
+ * @cliexstart{set interface state}
+ * vpp# set interface state GigabitEthernet2/0/0 up
+ * vpp# set interface state GigabitEthernet2/0/0 down
+ * @cliexend
+ ?*/
+VLIB_CLI_COMMAND (set_state_command, static) = {
+ .path = "set interface state",
+ .short_help = "Set interface state",
+ .function = set_state,
+};
+/* *INDENT-ON* */
+
+static clib_error_t *
+set_unnumbered (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ vnet_main_t *vnm = vnet_get_main ();
+ u32 unnumbered_sw_if_index;
+ u32 inherit_from_sw_if_index;
+ vnet_sw_interface_t *si;
+ int is_set = 0;
+ int is_del = 0;
+
+ if (unformat (input, "%U use %U",
+ unformat_vnet_sw_interface, vnm, &unnumbered_sw_if_index,
+ unformat_vnet_sw_interface, vnm, &inherit_from_sw_if_index))
+ is_set = 1;
+ else if (unformat (input, "del %U",
+ unformat_vnet_sw_interface, vnm,
+ &unnumbered_sw_if_index))
+ is_del = 1;
+ else
+ return clib_error_return (0, "parse error '%U'",
+ format_unformat_error, input);
+
+ si = vnet_get_sw_interface (vnm, unnumbered_sw_if_index);
+ if (is_del)
+ {
+ si->flags &= ~(VNET_SW_INTERFACE_FLAG_UNNUMBERED);
+ si->unnumbered_sw_if_index = (u32) ~ 0;
+ ip4_sw_interface_enable_disable (unnumbered_sw_if_index, 0);
+ ip6_sw_interface_enable_disable (unnumbered_sw_if_index, 0);
+ }
+ else if (is_set)
+ {
+ si->flags |= VNET_SW_INTERFACE_FLAG_UNNUMBERED;
+ si->unnumbered_sw_if_index = inherit_from_sw_if_index;
+ ip4_sw_interface_enable_disable (unnumbered_sw_if_index, 1);
+ ip6_sw_interface_enable_disable (unnumbered_sw_if_index, 1);
+ }
+
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (set_unnumbered_command, static) = {
+ .path = "set interface unnumbered",
+ .short_help = "set interface unnumbered [<intfc> use <intfc> | del <intfc>]",
+ .function = set_unnumbered,
+};
+/* *INDENT-ON* */
+
+
+
+static clib_error_t *
+set_hw_class (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ vnet_main_t *vnm = vnet_get_main ();
+ vnet_interface_main_t *im = &vnm->interface_main;
+ clib_error_t *error;
+ u32 hw_if_index, hw_class_index;
+
+ hw_if_index = ~0;
+ if (!unformat_user (input, unformat_vnet_hw_interface, vnm, &hw_if_index))
+ {
+ error = clib_error_return (0, "unknown hardware interface `%U'",
+ format_unformat_error, input);
+ goto done;
+ }
+
+ if (!unformat_user (input, unformat_hash_string,
+ im->hw_interface_class_by_name, &hw_class_index))
+ {
+ error = clib_error_return (0, "unknown hardware class `%U'",
+ format_unformat_error, input);
+ goto done;
+ }
+
+ error = vnet_hw_interface_set_class (vnm, hw_if_index, hw_class_index);
+ if (error)
+ goto done;
+
+done:
+ return error;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (set_hw_class_command, static) = {
+ .path = "set interface hw-class",
+ .short_help = "Set interface hardware class",
+ .function = set_hw_class,
+};
+/* *INDENT-ON* */
+
+static clib_error_t *
+vnet_interface_cli_init (vlib_main_t * vm)
+{
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (vnet_interface_cli_init);
+
+static clib_error_t *
+renumber_interface_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ u32 hw_if_index;
+ u32 new_dev_instance;
+ vnet_main_t *vnm = vnet_get_main ();
+ int rv;
+
+ if (!unformat_user (input, unformat_vnet_hw_interface, vnm, &hw_if_index))
+ return clib_error_return (0, "unknown hardware interface `%U'",
+ format_unformat_error, input);
+
+ if (!unformat (input, "%d", &new_dev_instance))
+ return clib_error_return (0, "new dev instance missing");
+
+ rv = vnet_interface_name_renumber (hw_if_index, new_dev_instance);
+
+ switch (rv)
+ {
+ case 0:
+ break;
+
+ default:
+ return clib_error_return (0, "vnet_interface_name_renumber returned %d",
+ rv);
+
+ }
+
+ return 0;
+}
+
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (renumber_interface_command, static) = {
+ .path = "renumber interface",
+ .short_help = "renumber interface <if-name> <new-dev-instance>",
+ .function = renumber_interface_command_fn,
+};
+/* *INDENT-ON* */
+
+static clib_error_t *
+promiscuous_cmd (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ vnet_main_t *vnm = vnet_get_main ();
+ u32 hw_if_index;
+ u32 flags = ETHERNET_INTERFACE_FLAG_ACCEPT_ALL;
+ ethernet_main_t *em = &ethernet_main;
+ ethernet_interface_t *eif;
+
+ if (unformat (input, "on %U",
+ unformat_vnet_hw_interface, vnm, &hw_if_index))
+ ;
+ else if (unformat (input, "off %U",
+ unformat_ethernet_interface, vnm, &hw_if_index))
+ flags = 0;
+ else
+ return clib_error_return (0, "unknown input `%U'",
+ format_unformat_error, input);
+
+ eif = ethernet_get_interface (em, hw_if_index);
+ if (!eif)
+ return clib_error_return (0, "not supported");
+
+ ethernet_set_flags (vnm, hw_if_index, flags);
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (set_interface_promiscuous_cmd, static) = {
+ .path = "set interface promiscuous",
+ .short_help = "set interface promiscuous [on | off] <intfc>",
+ .function = promiscuous_cmd,
+};
+/* *INDENT-ON* */
+
+static clib_error_t *
+mtu_cmd (vlib_main_t * vm, unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ vnet_main_t *vnm = vnet_get_main ();
+ u32 hw_if_index, mtu;
+ u32 flags = ETHERNET_INTERFACE_FLAG_MTU;
+ ethernet_main_t *em = &ethernet_main;
+
+ if (unformat (input, "%d %U", &mtu,
+ unformat_vnet_hw_interface, vnm, &hw_if_index))
+ {
+ vnet_hw_interface_t *hi = vnet_get_hw_interface (vnm, hw_if_index);
+ ethernet_interface_t *eif = ethernet_get_interface (em, hw_if_index);
+
+ if (!eif)
+ return clib_error_return (0, "not supported");
+
+ if (mtu < hi->min_supported_packet_bytes)
+ return clib_error_return (0, "Invalid mtu (%d): "
+ "must be >= min pkt bytes (%d)", mtu,
+ hi->min_supported_packet_bytes);
+
+ if (mtu > hi->max_supported_packet_bytes)
+ return clib_error_return (0, "Invalid mtu (%d): must be <= (%d)", mtu,
+ hi->max_supported_packet_bytes);
+
+ if (hi->max_packet_bytes != mtu)
+ {
+ hi->max_packet_bytes = mtu;
+ ethernet_set_flags (vnm, hw_if_index, flags);
+ }
+ }
+ else
+ return clib_error_return (0, "unknown input `%U'",
+ format_unformat_error, input);
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (set_interface_mtu_cmd, static) = {
+ .path = "set interface mtu",
+ .short_help = "set interface mtu <value> <intfc>",
+ .function = mtu_cmd,
+};
+/* *INDENT-ON* */
+
+static clib_error_t *
+set_interface_mac_address (vlib_main_t * vm, unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ vnet_main_t *vnm = vnet_get_main ();
+ clib_error_t *error = 0;
+ u32 sw_if_index = ~0;
+ u64 mac = 0;
+
+ if (!unformat_user (input, unformat_vnet_sw_interface, vnm, &sw_if_index))
+ {
+ error = clib_error_return (0, "unknown interface `%U'",
+ format_unformat_error, input);
+ goto done;
+ }
+ if (!unformat_user (input, unformat_ethernet_address, &mac))
+ {
+ error = clib_error_return (0, "expected mac address `%U'",
+ format_unformat_error, input);
+ goto done;
+ }
+ error = vnet_hw_interface_change_mac_address (vnm, sw_if_index, mac);
+done:
+ return error;
+}
+
+/*?
+ * The '<em>set interface mac address </em>' command allows to set MAC address of given interface.
+ * In case of NIC interfaces the one has to support MAC address change. A side effect of MAC address
+ * change are changes of MAC addresses in FIB tables (ipv4 and ipv6).
+ *
+ * @cliexpar
+ * @parblock
+ * Example of how to change MAC Address of interface:
+ * @cliexcmd{set interface mac address GigabitEthernet0/8/0 aa:bb:cc:dd:ee:01}
+ * @cliexcmd{set interface mac address host-vpp0 aa:bb:cc:dd:ee:02}
+ * @cliexcmd{set interface mac address tap-0 aa:bb:cc:dd:ee:03}
+ * @cliexcmd{set interface mac address pg0 aa:bb:cc:dd:ee:04}
+ * @endparblock
+?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (set_interface_mac_address_cmd, static) = {
+ .path = "set interface mac address",
+ .short_help = "set interface mac address <intfc> <mac-address>",
+ .function = set_interface_mac_address,
+};
+/* *INDENT-ON* */
+
+static clib_error_t *
+set_tag (vlib_main_t * vm, unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ vnet_main_t *vnm = vnet_get_main ();
+ u32 sw_if_index = ~0;
+ u8 *tag = 0;
+
+ if (!unformat (input, "%U %s", unformat_vnet_sw_interface,
+ vnm, &sw_if_index, &tag))
+ return clib_error_return (0, "unknown input `%U'",
+ format_unformat_error, input);
+
+ vnet_set_sw_interface_tag (vnm, tag, sw_if_index);
+
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (set_tag_command, static) = {
+ .path = "set interface tag",
+ .short_help = "set interface tag <intfc> <tag>",
+ .function = set_tag,
+};
+/* *INDENT-ON* */
+
+static clib_error_t *
+clear_tag (vlib_main_t * vm, unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ vnet_main_t *vnm = vnet_get_main ();
+ u32 sw_if_index = ~0;
+
+ if (!unformat (input, "%U", unformat_vnet_sw_interface, vnm, &sw_if_index))
+ return clib_error_return (0, "unknown input `%U'",
+ format_unformat_error, input);
+
+ vnet_clear_sw_interface_tag (vnm, sw_if_index);
+
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (clear_tag_command, static) = {
+ .path = "clear interface tag",
+ .short_help = "clear interface tag <intfc>",
+ .function = clear_tag,
+};
+/* *INDENT-ON* */
+
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/interface_format.c b/src/vnet/interface_format.c
new file mode 100644
index 00000000000..b3a30622031
--- /dev/null
+++ b/src/vnet/interface_format.c
@@ -0,0 +1,401 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * interface_format.c: interface formatting
+ *
+ * Copyright (c) 2008 Eliot Dresselhaus
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <vnet/vnet.h>
+#include <vppinfra/bitmap.h>
+
+u8 *
+format_vnet_sw_interface_flags (u8 * s, va_list * args)
+{
+ u32 flags = va_arg (*args, u32);
+
+ if (flags & VNET_SW_INTERFACE_FLAG_BOND_SLAVE)
+ s = format (s, "bond-slave");
+ else
+ {
+ s = format (s, "%s",
+ (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) ? "up" : "down");
+ if (flags & VNET_SW_INTERFACE_FLAG_PUNT)
+ s = format (s, "/punt");
+ }
+
+ return s;
+}
+
+u8 *
+format_vnet_hw_interface (u8 * s, va_list * args)
+{
+ vnet_main_t *vnm = va_arg (*args, vnet_main_t *);
+ vnet_hw_interface_t *hi = va_arg (*args, vnet_hw_interface_t *);
+ vnet_hw_interface_class_t *hw_class;
+ vnet_device_class_t *dev_class;
+ int verbose = va_arg (*args, int);
+ uword indent;
+
+ if (!hi)
+ return format (s, "%=32s%=6s%=8s%s", "Name", "Idx", "Link", "Hardware");
+
+ indent = format_get_indent (s);
+
+ s = format (s, "%-32v%=6d", hi->name, hi->hw_if_index);
+
+ if (hi->bond_info == VNET_HW_INTERFACE_BOND_INFO_SLAVE)
+ s = format (s, "%=8s", "slave");
+ else
+ s = format (s, "%=8s",
+ hi->flags & VNET_HW_INTERFACE_FLAG_LINK_UP ? "up" : "down");
+
+ hw_class = vnet_get_hw_interface_class (vnm, hi->hw_class_index);
+ dev_class = vnet_get_device_class (vnm, hi->dev_class_index);
+
+ if (hi->bond_info && (hi->bond_info != VNET_HW_INTERFACE_BOND_INFO_SLAVE))
+ {
+ int hw_idx;
+ s = format (s, "Slave-Idx:");
+ clib_bitmap_foreach (hw_idx, hi->bond_info, s =
+ format (s, " %d", hw_idx));
+ }
+ else if (dev_class->format_device_name)
+ s = format (s, "%U", dev_class->format_device_name, hi->dev_instance);
+ else
+ s = format (s, "%s%d", dev_class->name, hi->dev_instance);
+
+ if (verbose)
+ {
+ if (hw_class->format_device)
+ s = format (s, "\n%U%U",
+ format_white_space, indent + 2,
+ hw_class->format_device, hi->hw_if_index, verbose);
+ else
+ {
+ s = format (s, "\n%U%s",
+ format_white_space, indent + 2, hw_class->name);
+ if (hw_class->format_address && vec_len (hi->hw_address) > 0)
+ s =
+ format (s, " address %U", hw_class->format_address,
+ hi->hw_address);
+ }
+
+ if (dev_class->format_device)
+ s = format (s, "\n%U%U",
+ format_white_space, indent + 2,
+ dev_class->format_device, hi->dev_instance, verbose);
+ }
+
+ return s;
+}
+
+u8 *
+format_vnet_sw_interface_name (u8 * s, va_list * args)
+{
+ vnet_main_t *vnm = va_arg (*args, vnet_main_t *);
+ vnet_sw_interface_t *si = va_arg (*args, vnet_sw_interface_t *);
+ vnet_sw_interface_t *si_sup =
+ vnet_get_sup_sw_interface (vnm, si->sw_if_index);
+ vnet_hw_interface_t *hi_sup;
+
+ ASSERT (si_sup->type == VNET_SW_INTERFACE_TYPE_HARDWARE);
+ hi_sup = vnet_get_hw_interface (vnm, si_sup->hw_if_index);
+
+ s = format (s, "%v", hi_sup->name);
+
+ if (si->type != VNET_SW_INTERFACE_TYPE_HARDWARE)
+ s = format (s, ".%d", si->sub.id);
+
+ return s;
+}
+
+u8 *
+format_vnet_sw_if_index_name (u8 * s, va_list * args)
+{
+ vnet_main_t *vnm = va_arg (*args, vnet_main_t *);
+ u32 sw_if_index = va_arg (*args, u32);
+ return format (s, "%U",
+ format_vnet_sw_interface_name, vnm,
+ vnet_get_sw_interface (vnm, sw_if_index));
+}
+
+u8 *
+format_vnet_sw_interface_cntrs (u8 * s, vnet_interface_main_t * im,
+ vnet_sw_interface_t * si)
+{
+ uword indent, n_printed;
+ int i, j, n_counters;
+ static vnet_main_t **my_vnet_mains;
+
+ vec_reset_length (my_vnet_mains);
+
+ indent = format_get_indent (s);
+ n_printed = 0;
+
+ {
+ vlib_combined_counter_main_t *cm;
+ vlib_counter_t v, vtotal;
+ u8 *n = 0;
+
+ for (i = 0; i < vec_len (vnet_mains); i++)
+ {
+ if (vnet_mains[i])
+ vec_add1 (my_vnet_mains, vnet_mains[i]);
+ }
+
+ if (vec_len (my_vnet_mains) == 0)
+ vec_add1 (my_vnet_mains, &vnet_main);
+
+ /* Each vnet_main_t has its own copy of the interface counters */
+ n_counters = vec_len (im->combined_sw_if_counters);
+
+ /* rx, tx counters... */
+ for (j = 0; j < n_counters; j++)
+ {
+ vtotal.packets = 0;
+ vtotal.bytes = 0;
+
+ for (i = 0; i < vec_len (my_vnet_mains); i++)
+ {
+ im = &my_vnet_mains[i]->interface_main;
+ cm = im->combined_sw_if_counters + j;
+ vlib_get_combined_counter (cm, si->sw_if_index, &v);
+ vtotal.packets += v.packets;
+ vtotal.bytes += v.bytes;
+ }
+
+ /* Only display non-zero counters. */
+ if (vtotal.packets == 0)
+ continue;
+
+ if (n_printed > 0)
+ s = format (s, "\n%U", format_white_space, indent);
+ n_printed += 2;
+
+ if (n)
+ _vec_len (n) = 0;
+ n = format (n, "%s packets", cm->name);
+ s = format (s, "%-16v%16Ld", n, vtotal.packets);
+
+ _vec_len (n) = 0;
+ n = format (n, "%s bytes", cm->name);
+ s = format (s, "\n%U%-16v%16Ld",
+ format_white_space, indent, n, vtotal.bytes);
+ }
+ vec_free (n);
+ }
+
+ {
+ vlib_simple_counter_main_t *cm;
+ u64 v, vtotal;
+
+ n_counters = vec_len (im->sw_if_counters);
+
+ for (j = 0; j < n_counters; j++)
+ {
+ vtotal = 0;
+
+ for (i = 0; i < vec_len (my_vnet_mains); i++)
+ {
+ im = &my_vnet_mains[i]->interface_main;
+ cm = im->sw_if_counters + j;
+
+ v = vlib_get_simple_counter (cm, si->sw_if_index);
+ vtotal += v;
+ }
+
+ /* Only display non-zero counters. */
+ if (vtotal == 0)
+ continue;
+
+ if (n_printed > 0)
+ s = format (s, "\n%U", format_white_space, indent);
+ n_printed += 1;
+
+ s = format (s, "%-16s%16Ld", cm->name, vtotal);
+ }
+ }
+
+ return s;
+}
+
+u8 *
+format_vnet_sw_interface (u8 * s, va_list * args)
+{
+ vnet_main_t *vnm = va_arg (*args, vnet_main_t *);
+ vnet_sw_interface_t *si = va_arg (*args, vnet_sw_interface_t *);
+ vnet_interface_main_t *im = &vnm->interface_main;
+
+ if (!si)
+ return format (s, "%=32s%=5s%=16s%=16s%=16s",
+ "Name", "Idx", "State", "Counter", "Count");
+
+ s = format (s, "%-32U%=5d%=16U",
+ format_vnet_sw_interface_name, vnm, si, si->sw_if_index,
+ format_vnet_sw_interface_flags, si->flags);
+
+ s = format_vnet_sw_interface_cntrs (s, im, si);
+
+ return s;
+}
+
+u8 *
+format_vnet_sw_interface_name_override (u8 * s, va_list * args)
+{
+ vnet_main_t *vnm = va_arg (*args, vnet_main_t *);
+ vnet_sw_interface_t *si = va_arg (*args, vnet_sw_interface_t *);
+ /* caller supplied display name for this interface */
+ u8 *name = va_arg (*args, u8 *);
+ vnet_interface_main_t *im = &vnm->interface_main;
+
+
+ if (!si)
+ return format (s, "%=32s%=5s%=16s%=16s%=16s",
+ "Name", "Idx", "State", "Counter", "Count");
+
+ s = format (s, "%-32v%=5d%=16U",
+ name, si->sw_if_index,
+ format_vnet_sw_interface_flags, si->flags);
+
+ s = format_vnet_sw_interface_cntrs (s, im, si);
+
+ return s;
+}
+
+uword
+unformat_vnet_hw_interface (unformat_input_t * input, va_list * args)
+{
+ vnet_main_t *vnm = va_arg (*args, vnet_main_t *);
+ u32 *hw_if_index = va_arg (*args, u32 *);
+ vnet_interface_main_t *im = &vnm->interface_main;
+ vnet_device_class_t *c;
+
+ /* Try per device class functions first. */
+ vec_foreach (c, im->device_classes)
+ {
+ if (c->unformat_device_name
+ && unformat_user (input, c->unformat_device_name, hw_if_index))
+ return 1;
+ }
+
+ return unformat_user (input, unformat_hash_vec_string,
+ im->hw_interface_by_name, hw_if_index);
+}
+
+uword
+unformat_vnet_sw_interface (unformat_input_t * input, va_list * args)
+{
+ vnet_main_t *vnm = va_arg (*args, vnet_main_t *);
+ u32 *result = va_arg (*args, u32 *);
+ vnet_hw_interface_t *hi;
+ u32 hw_if_index, id, id_specified;
+ u8 *if_name = 0;
+ uword *p, error = 0;
+
+ id = ~0;
+ if (unformat (input, "%_%v.%d%_", &if_name, &id)
+ && ((p = hash_get (vnm->interface_main.hw_interface_by_name, if_name))))
+ {
+ hw_if_index = p[0];
+ id_specified = 1;
+ }
+ else
+ if (unformat (input, "%U", unformat_vnet_hw_interface, vnm, &hw_if_index))
+ id_specified = 0;
+ else
+ goto done;
+
+ hi = vnet_get_hw_interface (vnm, hw_if_index);
+ if (!id_specified)
+ {
+ *result = hi->sw_if_index;
+ }
+ else
+ {
+ if (!(p = hash_get (hi->sub_interface_sw_if_index_by_id, id)))
+ return 0;
+ *result = p[0];
+ }
+ error = 1;
+done:
+ vec_free (if_name);
+ return error;
+}
+
+uword
+unformat_vnet_sw_interface_flags (unformat_input_t * input, va_list * args)
+{
+ u32 *result = va_arg (*args, u32 *);
+ u32 flags = 0;
+
+ if (unformat (input, "up"))
+ flags |= VNET_SW_INTERFACE_FLAG_ADMIN_UP;
+ else if (unformat (input, "down"))
+ flags &= ~VNET_SW_INTERFACE_FLAG_ADMIN_UP;
+ else if (unformat (input, "punt"))
+ flags |= VNET_SW_INTERFACE_FLAG_PUNT;
+ else if (unformat (input, "enable"))
+ flags &= ~VNET_SW_INTERFACE_FLAG_PUNT;
+ else
+ return 0;
+
+ *result = flags;
+ return 1;
+}
+
+uword
+unformat_vnet_hw_interface_flags (unformat_input_t * input, va_list * args)
+{
+ u32 *result = va_arg (*args, u32 *);
+ u32 flags = 0;
+
+ if (unformat (input, "up"))
+ flags |= VNET_HW_INTERFACE_FLAG_LINK_UP;
+ else if (unformat (input, "down"))
+ flags &= ~VNET_HW_INTERFACE_FLAG_LINK_UP;
+ else
+ return 0;
+
+ *result = flags;
+ return 1;
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/interface_funcs.h b/src/vnet/interface_funcs.h
new file mode 100644
index 00000000000..b84d151c86d
--- /dev/null
+++ b/src/vnet/interface_funcs.h
@@ -0,0 +1,318 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * interface_funcs.h: VNET interfaces/sub-interfaces exported functions
+ *
+ * Copyright (c) 2008 Eliot Dresselhaus
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef included_vnet_interface_funcs_h
+#define included_vnet_interface_funcs_h
+
+always_inline vnet_hw_interface_t *
+vnet_get_hw_interface (vnet_main_t * vnm, u32 hw_if_index)
+{
+ return pool_elt_at_index (vnm->interface_main.hw_interfaces, hw_if_index);
+}
+
+always_inline vnet_sw_interface_t *
+vnet_get_sw_interface (vnet_main_t * vnm, u32 sw_if_index)
+{
+ return pool_elt_at_index (vnm->interface_main.sw_interfaces, sw_if_index);
+}
+
+always_inline vnet_sw_interface_t *
+vnet_get_hw_sw_interface (vnet_main_t * vnm, u32 hw_if_index)
+{
+ vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
+ vnet_sw_interface_t *sw = vnet_get_sw_interface (vnm, hw->sw_if_index);
+ ASSERT (sw->type == VNET_SW_INTERFACE_TYPE_HARDWARE);
+ return sw;
+}
+
+always_inline vnet_sw_interface_t *
+vnet_get_sup_sw_interface (vnet_main_t * vnm, u32 sw_if_index)
+{
+ vnet_sw_interface_t *sw = vnet_get_sw_interface (vnm, sw_if_index);
+ if (sw->type == VNET_SW_INTERFACE_TYPE_SUB)
+ sw = vnet_get_sw_interface (vnm, sw->sup_sw_if_index);
+ return sw;
+}
+
+always_inline vnet_hw_interface_t *
+vnet_get_sup_hw_interface (vnet_main_t * vnm, u32 sw_if_index)
+{
+ vnet_sw_interface_t *sw = vnet_get_sup_sw_interface (vnm, sw_if_index);
+ ASSERT (sw->type == VNET_SW_INTERFACE_TYPE_HARDWARE);
+ return vnet_get_hw_interface (vnm, sw->hw_if_index);
+}
+
+always_inline vnet_hw_interface_class_t *
+vnet_get_hw_interface_class (vnet_main_t * vnm, u32 hw_class_index)
+{
+ return vec_elt_at_index (vnm->interface_main.hw_interface_classes,
+ hw_class_index);
+}
+
+always_inline vnet_device_class_t *
+vnet_get_device_class (vnet_main_t * vnm, u32 dev_class_index)
+{
+ return vec_elt_at_index (vnm->interface_main.device_classes,
+ dev_class_index);
+}
+
+static inline u8 *
+vnet_get_sw_interface_tag (vnet_main_t * vnm, u32 sw_if_index)
+{
+ uword *p;
+ p = hash_get (vnm->interface_tag_by_sw_if_index, sw_if_index);
+ if (p)
+ return ((u8 *) p[0]);
+ return 0;
+}
+
+static inline void
+vnet_set_sw_interface_tag (vnet_main_t * vnm, u8 * tag, u32 sw_if_index)
+{
+ uword *p;
+ p = hash_get (vnm->interface_tag_by_sw_if_index, sw_if_index);
+ if (p)
+ {
+ u8 *oldtag = (u8 *) p[0];
+ hash_unset (vnm->interface_tag_by_sw_if_index, sw_if_index);
+ vec_free (oldtag);
+ }
+
+ hash_set (vnm->interface_tag_by_sw_if_index, sw_if_index, tag);
+}
+
+static inline void
+vnet_clear_sw_interface_tag (vnet_main_t * vnm, u32 sw_if_index)
+{
+ uword *p;
+ p = hash_get (vnm->interface_tag_by_sw_if_index, sw_if_index);
+ if (p)
+ {
+ u8 *oldtag = (u8 *) p[0];
+ hash_unset (vnm->interface_tag_by_sw_if_index, sw_if_index);
+ vec_free (oldtag);
+ }
+}
+
+/**
+ * Call back walk type for walking SW indices on a HW interface
+ */
+typedef void (*vnet_hw_sw_interface_walk_t) (vnet_main_t * vnm,
+ u32 sw_if_index, void *ctx);
+
+/**
+ * @brief
+ * Walk the SW interfaces on a HW interface - this is the super
+ * interface and any sub-interfaces.
+ */
+void vnet_hw_interface_walk_sw (vnet_main_t * vnm,
+ u32 hw_if_index,
+ vnet_hw_sw_interface_walk_t fn, void *ctx);
+
+/* Register a hardware interface instance. */
+u32 vnet_register_interface (vnet_main_t * vnm,
+ u32 dev_class_index,
+ u32 dev_instance,
+ u32 hw_class_index, u32 hw_instance);
+
+/* Creates a software interface given template. */
+clib_error_t *vnet_create_sw_interface (vnet_main_t * vnm,
+ vnet_sw_interface_t * template,
+ u32 * sw_if_index);
+
+void vnet_delete_hw_interface (vnet_main_t * vnm, u32 hw_if_index);
+void vnet_delete_sw_interface (vnet_main_t * vnm, u32 sw_if_index);
+int vnet_sw_interface_is_p2p (vnet_main_t * vnm, u32 sw_if_index);
+
+always_inline uword
+vnet_sw_interface_get_flags (vnet_main_t * vnm, u32 sw_if_index)
+{
+ vnet_sw_interface_t *sw = vnet_get_sw_interface (vnm, sw_if_index);
+ return sw->flags;
+}
+
+always_inline uword
+vnet_sw_interface_is_admin_up (vnet_main_t * vnm, u32 sw_if_index)
+{
+ return (vnet_sw_interface_get_flags (vnm, sw_if_index) &
+ VNET_SW_INTERFACE_FLAG_ADMIN_UP) != 0;
+}
+
+always_inline uword
+vnet_hw_interface_get_flags (vnet_main_t * vnm, u32 hw_if_index)
+{
+ vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
+ return hw->flags;
+}
+
+always_inline uword
+vnet_hw_interface_get_mtu (vnet_main_t * vnm, u32 hw_if_index,
+ vlib_rx_or_tx_t dir)
+{
+ vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
+ return hw->max_l3_packet_bytes[dir];
+}
+
+always_inline uword
+vnet_sw_interface_get_mtu (vnet_main_t * vnm, u32 sw_if_index,
+ vlib_rx_or_tx_t dir)
+{
+ vnet_hw_interface_t *hw = vnet_get_sup_hw_interface (vnm, sw_if_index);
+ return (hw->max_l3_packet_bytes[dir]);
+}
+
+always_inline uword
+vnet_hw_interface_is_link_up (vnet_main_t * vnm, u32 hw_if_index)
+{
+ return (vnet_hw_interface_get_flags (vnm, hw_if_index) &
+ VNET_HW_INTERFACE_FLAG_LINK_UP) != 0;
+}
+
+always_inline vlib_frame_t *
+vnet_get_frame_to_sw_interface (vnet_main_t * vnm, u32 sw_if_index)
+{
+ vnet_hw_interface_t *hw = vnet_get_sup_hw_interface (vnm, sw_if_index);
+ return vlib_get_frame_to_node (vnm->vlib_main, hw->output_node_index);
+}
+
+always_inline void
+vnet_put_frame_to_sw_interface (vnet_main_t * vnm, u32 sw_if_index,
+ vlib_frame_t * f)
+{
+ vnet_hw_interface_t *hw = vnet_get_sup_hw_interface (vnm, sw_if_index);
+ return vlib_put_frame_to_node (vnm->vlib_main, hw->output_node_index, f);
+}
+
+/* Change interface flags (e.g. up, down, enable, disable). */
+clib_error_t *vnet_hw_interface_set_flags (vnet_main_t * vnm, u32 hw_if_index,
+ u32 flags);
+
+/* Change interface flags (e.g. up, down, enable, disable). */
+clib_error_t *vnet_sw_interface_set_flags (vnet_main_t * vnm, u32 sw_if_index,
+ u32 flags);
+
+/* Change interface class. */
+clib_error_t *vnet_hw_interface_set_class (vnet_main_t * vnm, u32 hw_if_index,
+ u32 new_hw_class_index);
+
+/* Redirect rx pkts to node */
+int vnet_hw_interface_rx_redirect_to_node (vnet_main_t * vnm, u32 hw_if_index,
+ u32 node_index);
+
+void vnet_hw_interface_init_for_class (vnet_main_t * vnm, u32 hw_if_index,
+ u32 hw_class_index, u32 hw_instance);
+
+/* Rename interface */
+clib_error_t *vnet_rename_interface (vnet_main_t * vnm, u32 hw_if_index,
+ char *new_name);
+
+/* Change interface mac address*/
+clib_error_t *vnet_hw_interface_change_mac_address (vnet_main_t * vnm,
+ u32 hw_if_index,
+ u64 mac_address);
+
+/* Formats sw/hw interface. */
+format_function_t format_vnet_hw_interface;
+format_function_t format_vnet_sw_interface;
+format_function_t format_vnet_sw_interface_name;
+format_function_t format_vnet_sw_interface_name_override;
+format_function_t format_vnet_sw_if_index_name;
+format_function_t format_vnet_sw_interface_flags;
+
+/* Parses sw/hw interface name -> index. */
+unformat_function_t unformat_vnet_sw_interface;
+unformat_function_t unformat_vnet_hw_interface;
+
+/* Parses interface flags (up, down, enable, disable, etc.) */
+unformat_function_t unformat_vnet_hw_interface_flags;
+unformat_function_t unformat_vnet_sw_interface_flags;
+
+/* Node runtime for interface output function. */
+typedef struct
+{
+ u32 hw_if_index;
+ u32 sw_if_index;
+ u32 dev_instance;
+ u32 is_deleted;
+} vnet_interface_output_runtime_t;
+
+/* Interface output functions. */
+void *vnet_interface_output_node_multiarch_select (void);
+void *vnet_interface_output_node_flatten_multiarch_select (void);
+
+word vnet_sw_interface_compare (vnet_main_t * vnm, uword sw_if_index0,
+ uword sw_if_index1);
+word vnet_hw_interface_compare (vnet_main_t * vnm, uword hw_if_index0,
+ uword hw_if_index1);
+
+typedef enum
+{
+ VNET_INTERFACE_OUTPUT_NEXT_DROP,
+ VNET_INTERFACE_OUTPUT_NEXT_TX,
+} vnet_interface_output_next_t;
+
+typedef enum
+{
+ VNET_INTERFACE_TX_NEXT_DROP,
+ VNET_INTERFACE_TX_N_NEXT,
+} vnet_interface_tx_next_t;
+
+#define VNET_SIMULATED_ETHERNET_TX_NEXT_ETHERNET_INPUT VNET_INTERFACE_TX_N_NEXT
+
+typedef enum
+{
+ VNET_INTERFACE_OUTPUT_ERROR_INTERFACE_DOWN,
+ VNET_INTERFACE_OUTPUT_ERROR_INTERFACE_DELETED,
+} vnet_interface_output_error_t;
+
+/* Format for interface output traces. */
+u8 *format_vnet_interface_output_trace (u8 * s, va_list * va);
+
+serialize_function_t serialize_vnet_interface_state,
+ unserialize_vnet_interface_state;
+
+#endif /* included_vnet_interface_funcs_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/interface_output.c b/src/vnet/interface_output.c
new file mode 100644
index 00000000000..475b0b935af
--- /dev/null
+++ b/src/vnet/interface_output.c
@@ -0,0 +1,1404 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * interface_output.c: interface output node
+ *
+ * Copyright (c) 2008 Eliot Dresselhaus
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <vnet/vnet.h>
+#include <vnet/feature/feature.h>
+
+typedef struct
+{
+ u32 sw_if_index;
+ u8 data[128 - sizeof (u32)];
+}
+interface_output_trace_t;
+
+u8 *
+format_vnet_interface_output_trace (u8 * s, va_list * va)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*va, vlib_main_t *);
+ vlib_node_t *node = va_arg (*va, vlib_node_t *);
+ interface_output_trace_t *t = va_arg (*va, interface_output_trace_t *);
+ vnet_main_t *vnm = vnet_get_main ();
+ vnet_sw_interface_t *si;
+ uword indent;
+
+ if (t->sw_if_index != (u32) ~ 0)
+ {
+ indent = format_get_indent (s);
+
+ if (pool_is_free_index
+ (vnm->interface_main.sw_interfaces, t->sw_if_index))
+ {
+ /* the interface may have been deleted by the time the trace is printed */
+ s = format (s, "sw_if_index: %d\n%U%U",
+ t->sw_if_index,
+ format_white_space, indent,
+ node->format_buffer ? node->
+ format_buffer : format_hex_bytes, t->data,
+ sizeof (t->data));
+ }
+ else
+ {
+ si = vnet_get_sw_interface (vnm, t->sw_if_index);
+
+ s = format (s, "%U\n%U%U",
+ format_vnet_sw_interface_name, vnm, si,
+ format_white_space, indent,
+ node->format_buffer ? node->
+ format_buffer : format_hex_bytes, t->data,
+ sizeof (t->data));
+ }
+ }
+ return s;
+}
+
+static void
+vnet_interface_output_trace (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame, uword n_buffers)
+{
+ u32 n_left, *from;
+
+ n_left = n_buffers;
+ from = vlib_frame_args (frame);
+
+ while (n_left >= 4)
+ {
+ u32 bi0, bi1;
+ vlib_buffer_t *b0, *b1;
+ interface_output_trace_t *t0, *t1;
+
+ /* Prefetch next iteration. */
+ vlib_prefetch_buffer_with_index (vm, from[2], LOAD);
+ vlib_prefetch_buffer_with_index (vm, from[3], LOAD);
+
+ bi0 = from[0];
+ bi1 = from[1];
+
+ b0 = vlib_get_buffer (vm, bi0);
+ b1 = vlib_get_buffer (vm, bi1);
+
+ if (b0->flags & VLIB_BUFFER_IS_TRACED)
+ {
+ t0 = vlib_add_trace (vm, node, b0, sizeof (t0[0]));
+ t0->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_TX];
+ clib_memcpy (t0->data, vlib_buffer_get_current (b0),
+ sizeof (t0->data));
+ }
+ if (b1->flags & VLIB_BUFFER_IS_TRACED)
+ {
+ t1 = vlib_add_trace (vm, node, b1, sizeof (t1[0]));
+ t1->sw_if_index = vnet_buffer (b1)->sw_if_index[VLIB_TX];
+ clib_memcpy (t1->data, vlib_buffer_get_current (b1),
+ sizeof (t1->data));
+ }
+ from += 2;
+ n_left -= 2;
+ }
+
+ while (n_left >= 1)
+ {
+ u32 bi0;
+ vlib_buffer_t *b0;
+ interface_output_trace_t *t0;
+
+ bi0 = from[0];
+
+ b0 = vlib_get_buffer (vm, bi0);
+
+ if (b0->flags & VLIB_BUFFER_IS_TRACED)
+ {
+ t0 = vlib_add_trace (vm, node, b0, sizeof (t0[0]));
+ t0->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_TX];
+ clib_memcpy (t0->data, vlib_buffer_get_current (b0),
+ sizeof (t0->data));
+ }
+ from += 1;
+ n_left -= 1;
+ }
+}
+
+static never_inline u32
+slow_path (vlib_main_t * vm,
+ u32 bi,
+ vlib_buffer_t * b,
+ u32 n_left_to_tx, u32 * to_tx, u32 * n_slow_bytes_result)
+{
+ /* We've already enqueued a single buffer. */
+ u32 n_buffers = 0;
+ u32 n_slow_bytes = 0;
+
+ while (n_left_to_tx > 0)
+ {
+ to_tx[0] = bi;
+ to_tx += 1;
+ n_left_to_tx -= 1;
+ n_buffers += 1;
+ n_slow_bytes += vlib_buffer_length_in_chain (vm, b);
+
+ /* Be grumpy about zero length buffers for benefit of
+ driver tx function. */
+ ASSERT (b->current_length > 0);
+
+ if (!(b->flags & VLIB_BUFFER_NEXT_PRESENT))
+ break;
+
+ bi = b->next_buffer;
+ b = vlib_get_buffer (vm, bi);
+ }
+
+ /* Ran out of space in next frame trying to enqueue buffers? */
+ if (b->flags & VLIB_BUFFER_NEXT_PRESENT)
+ return 0;
+
+ *n_slow_bytes_result = n_slow_bytes;
+ return n_buffers;
+}
+
+/*
+ * Increment TX stats. Roll up consecutive increments to the same sw_if_index
+ * into one increment.
+ */
+static_always_inline void
+incr_output_stats (vnet_main_t * vnm,
+ u32 cpu_index,
+ u32 length,
+ u32 sw_if_index,
+ u32 * last_sw_if_index, u32 * n_packets, u32 * n_bytes)
+{
+ vnet_interface_main_t *im;
+
+ if (PREDICT_TRUE (sw_if_index == *last_sw_if_index))
+ {
+ *n_packets += 1;
+ *n_bytes += length;
+ }
+ else
+ {
+ if (PREDICT_TRUE (*last_sw_if_index != ~0))
+ {
+ im = &vnm->interface_main;
+
+ vlib_increment_combined_counter (im->combined_sw_if_counters
+ + VNET_INTERFACE_COUNTER_TX,
+ cpu_index,
+ *last_sw_if_index,
+ *n_packets, *n_bytes);
+ }
+ *last_sw_if_index = sw_if_index;
+ *n_packets = 1;
+ *n_bytes = length;
+ }
+}
+
+
+/* Interface output functions. */
+uword
+vnet_interface_output_node_flatten (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ vnet_main_t *vnm = vnet_get_main ();
+ vnet_interface_output_runtime_t *rt = (void *) node->runtime_data;
+ vnet_sw_interface_t *si;
+ vnet_hw_interface_t *hi;
+ u32 n_left_to_tx, *from, *from_end, *to_tx;
+ u32 n_bytes, n_buffers, n_packets;
+ u32 last_sw_if_index;
+ u32 cpu_index = vm->cpu_index;
+
+ n_buffers = frame->n_vectors;
+
+ if (node->flags & VLIB_NODE_FLAG_TRACE)
+ vnet_interface_output_trace (vm, node, frame, n_buffers);
+
+ from = vlib_frame_args (frame);
+
+ if (rt->is_deleted)
+ return vlib_error_drop_buffers (vm, node, from,
+ /* buffer stride */ 1,
+ n_buffers,
+ VNET_INTERFACE_OUTPUT_NEXT_DROP,
+ node->node_index,
+ VNET_INTERFACE_OUTPUT_ERROR_INTERFACE_DELETED);
+
+ si = vnet_get_sw_interface (vnm, rt->sw_if_index);
+ hi = vnet_get_sup_hw_interface (vnm, rt->sw_if_index);
+ if (!(si->flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) ||
+ !(hi->flags & VNET_HW_INTERFACE_FLAG_LINK_UP))
+ {
+ vlib_simple_counter_main_t *cm;
+
+ cm = vec_elt_at_index (vnm->interface_main.sw_if_counters,
+ VNET_INTERFACE_COUNTER_TX_ERROR);
+ vlib_increment_simple_counter (cm, cpu_index,
+ rt->sw_if_index, n_buffers);
+ return vlib_error_drop_buffers (vm, node, from,
+ /* buffer stride */ 1,
+ n_buffers,
+ VNET_INTERFACE_OUTPUT_NEXT_DROP,
+ node->node_index,
+ VNET_INTERFACE_OUTPUT_ERROR_INTERFACE_DOWN);
+ }
+
+ from_end = from + n_buffers;
+
+ /* Total byte count of all buffers. */
+ n_bytes = 0;
+ n_packets = 0;
+ last_sw_if_index = ~0;
+
+ while (from < from_end)
+ {
+ /* Get new next frame since previous incomplete frame may have less
+ than VNET_FRAME_SIZE vectors in it. */
+ vlib_get_new_next_frame (vm, node, VNET_INTERFACE_OUTPUT_NEXT_TX,
+ to_tx, n_left_to_tx);
+
+ while (from + 4 <= from_end && n_left_to_tx >= 2)
+ {
+ u32 bi0, bi1;
+ vlib_buffer_t *b0, *b1;
+
+ /* Prefetch next iteration. */
+ vlib_prefetch_buffer_with_index (vm, from[2], LOAD);
+ vlib_prefetch_buffer_with_index (vm, from[3], LOAD);
+
+ bi0 = from[0];
+ bi1 = from[1];
+ to_tx[0] = bi0;
+ to_tx[1] = bi1;
+ from += 2;
+ to_tx += 2;
+ n_left_to_tx -= 2;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ b1 = vlib_get_buffer (vm, bi1);
+
+ /* Be grumpy about zero length buffers for benefit of
+ driver tx function. */
+ ASSERT (b0->current_length > 0);
+ ASSERT (b1->current_length > 0);
+
+ if (PREDICT_FALSE
+ ((b0->flags | b1->flags) & VLIB_BUFFER_NEXT_PRESENT))
+ {
+ u32 n_buffers, n_slow_bytes, i;
+
+ /* Undo. */
+ from -= 2;
+ to_tx -= 2;
+ n_left_to_tx += 2;
+
+ /* Do slow path two times. */
+ for (i = 0; i < 2; i++)
+ {
+ u32 bi = i ? bi1 : bi0;
+ vlib_buffer_t *b = i ? b1 : b0;
+
+ n_buffers = slow_path (vm, bi, b,
+ n_left_to_tx, to_tx, &n_slow_bytes);
+
+ /* Not enough room for single packet? */
+ if (n_buffers == 0)
+ goto put;
+
+ from += 1;
+ to_tx += n_buffers;
+ n_left_to_tx -= n_buffers;
+ incr_output_stats (vnm, cpu_index, n_slow_bytes,
+ vnet_buffer (b)->sw_if_index[VLIB_TX],
+ &last_sw_if_index, &n_packets, &n_bytes);
+ }
+ }
+ else
+ {
+ incr_output_stats (vnm, cpu_index,
+ vlib_buffer_length_in_chain (vm, b0),
+ vnet_buffer (b0)->sw_if_index[VLIB_TX],
+ &last_sw_if_index, &n_packets, &n_bytes);
+ incr_output_stats (vnm, cpu_index,
+ vlib_buffer_length_in_chain (vm, b0),
+ vnet_buffer (b1)->sw_if_index[VLIB_TX],
+ &last_sw_if_index, &n_packets, &n_bytes);
+ }
+ }
+
+ while (from + 1 <= from_end && n_left_to_tx >= 1)
+ {
+ u32 bi0;
+ vlib_buffer_t *b0;
+
+ bi0 = from[0];
+ to_tx[0] = bi0;
+ from += 1;
+ to_tx += 1;
+ n_left_to_tx -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+
+ /* Be grumpy about zero length buffers for benefit of
+ driver tx function. */
+ ASSERT (b0->current_length > 0);
+
+ if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_NEXT_PRESENT))
+ {
+ u32 n_buffers, n_slow_bytes;
+
+ /* Undo. */
+ from -= 1;
+ to_tx -= 1;
+ n_left_to_tx += 1;
+
+ n_buffers = slow_path (vm, bi0, b0,
+ n_left_to_tx, to_tx, &n_slow_bytes);
+
+ /* Not enough room for single packet? */
+ if (n_buffers == 0)
+ goto put;
+
+ from += 1;
+ to_tx += n_buffers;
+ n_left_to_tx -= n_buffers;
+ }
+ incr_output_stats (vnm, cpu_index,
+ vlib_buffer_length_in_chain (vm, b0),
+ vnet_buffer (b0)->sw_if_index[VLIB_TX],
+ &last_sw_if_index, &n_packets, &n_bytes);
+ }
+
+ put:
+ vlib_put_next_frame (vm, node, VNET_INTERFACE_OUTPUT_NEXT_TX,
+ n_left_to_tx);
+ }
+
+ /* Final update of interface stats. */
+ incr_output_stats (vnm, cpu_index, 0, ~0, /* ~0 will flush stats */
+ &last_sw_if_index, &n_packets, &n_bytes);
+
+ return n_buffers;
+}
+
+VLIB_NODE_FUNCTION_MULTIARCH_CLONE (vnet_interface_output_node_flatten);
+CLIB_MULTIARCH_SELECT_FN (vnet_interface_output_node_flatten);
+
+uword
+vnet_interface_output_node (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ vnet_main_t *vnm = vnet_get_main ();
+ vnet_interface_output_runtime_t *rt = (void *) node->runtime_data;
+ vnet_sw_interface_t *si;
+ vnet_hw_interface_t *hi;
+ u32 n_left_to_tx, *from, *from_end, *to_tx;
+ u32 n_bytes, n_buffers, n_packets;
+ u32 n_bytes_b0, n_bytes_b1;
+ u32 cpu_index = vm->cpu_index;
+ vnet_interface_main_t *im = &vnm->interface_main;
+ u32 next_index = VNET_INTERFACE_OUTPUT_NEXT_TX;
+ u32 current_config_index = ~0;
+ u8 arc = im->output_feature_arc_index;
+
+ n_buffers = frame->n_vectors;
+
+ if (node->flags & VLIB_NODE_FLAG_TRACE)
+ vnet_interface_output_trace (vm, node, frame, n_buffers);
+
+ from = vlib_frame_args (frame);
+
+ if (rt->is_deleted)
+ return vlib_error_drop_buffers (vm, node, from,
+ /* buffer stride */ 1,
+ n_buffers,
+ VNET_INTERFACE_OUTPUT_NEXT_DROP,
+ node->node_index,
+ VNET_INTERFACE_OUTPUT_ERROR_INTERFACE_DELETED);
+
+ si = vnet_get_sw_interface (vnm, rt->sw_if_index);
+ hi = vnet_get_sup_hw_interface (vnm, rt->sw_if_index);
+ if (!(si->flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) ||
+ !(hi->flags & VNET_HW_INTERFACE_FLAG_LINK_UP))
+ {
+ vlib_simple_counter_main_t *cm;
+
+ cm = vec_elt_at_index (vnm->interface_main.sw_if_counters,
+ VNET_INTERFACE_COUNTER_TX_ERROR);
+ vlib_increment_simple_counter (cm, cpu_index,
+ rt->sw_if_index, n_buffers);
+
+ return vlib_error_drop_buffers (vm, node, from,
+ /* buffer stride */ 1,
+ n_buffers,
+ VNET_INTERFACE_OUTPUT_NEXT_DROP,
+ node->node_index,
+ VNET_INTERFACE_OUTPUT_ERROR_INTERFACE_DOWN);
+ }
+
+ from_end = from + n_buffers;
+
+ /* Total byte count of all buffers. */
+ n_bytes = 0;
+ n_packets = 0;
+
+ /* interface-output feature arc handling */
+ if (PREDICT_FALSE (vnet_have_features (arc, rt->sw_if_index)))
+ {
+ vnet_feature_config_main_t *fcm;
+ fcm = vnet_feature_get_config_main (arc);
+ current_config_index = vnet_get_feature_config_index (arc,
+ rt->sw_if_index);
+ vnet_get_config_data (&fcm->config_main, &current_config_index,
+ &next_index, 0);
+ }
+
+ while (from < from_end)
+ {
+ /* Get new next frame since previous incomplete frame may have less
+ than VNET_FRAME_SIZE vectors in it. */
+ vlib_get_new_next_frame (vm, node, next_index, to_tx, n_left_to_tx);
+
+ while (from + 4 <= from_end && n_left_to_tx >= 2)
+ {
+ u32 bi0, bi1;
+ vlib_buffer_t *b0, *b1;
+ u32 tx_swif0, tx_swif1;
+
+ /* Prefetch next iteration. */
+ vlib_prefetch_buffer_with_index (vm, from[2], LOAD);
+ vlib_prefetch_buffer_with_index (vm, from[3], LOAD);
+
+ bi0 = from[0];
+ bi1 = from[1];
+ to_tx[0] = bi0;
+ to_tx[1] = bi1;
+ from += 2;
+ to_tx += 2;
+ n_left_to_tx -= 2;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ b1 = vlib_get_buffer (vm, bi1);
+
+ /* Be grumpy about zero length buffers for benefit of
+ driver tx function. */
+ ASSERT (b0->current_length > 0);
+ ASSERT (b1->current_length > 0);
+
+ n_bytes_b0 = vlib_buffer_length_in_chain (vm, b0);
+ n_bytes_b1 = vlib_buffer_length_in_chain (vm, b1);
+ tx_swif0 = vnet_buffer (b0)->sw_if_index[VLIB_TX];
+ tx_swif1 = vnet_buffer (b1)->sw_if_index[VLIB_TX];
+
+ n_bytes += n_bytes_b0 + n_bytes_b1;
+ n_packets += 2;
+
+ if (PREDICT_FALSE (current_config_index != ~0))
+ {
+ b0->feature_arc_index = arc;
+ b1->feature_arc_index = arc;
+ b0->current_config_index = current_config_index;
+ b1->current_config_index = current_config_index;
+ }
+
+ if (PREDICT_FALSE (tx_swif0 != rt->sw_if_index))
+ {
+ /* update vlan subif tx counts, if required */
+ vlib_increment_combined_counter (im->combined_sw_if_counters +
+ VNET_INTERFACE_COUNTER_TX,
+ cpu_index, tx_swif0, 1,
+ n_bytes_b0);
+ }
+
+ /* update vlan subif tx counts, if required */
+ if (PREDICT_FALSE (tx_swif1 != rt->sw_if_index))
+ {
+
+ vlib_increment_combined_counter (im->combined_sw_if_counters +
+ VNET_INTERFACE_COUNTER_TX,
+ cpu_index, tx_swif1, 1,
+ n_bytes_b1);
+ }
+ }
+
+ while (from + 1 <= from_end && n_left_to_tx >= 1)
+ {
+ u32 bi0;
+ vlib_buffer_t *b0;
+ u32 tx_swif0;
+
+ bi0 = from[0];
+ to_tx[0] = bi0;
+ from += 1;
+ to_tx += 1;
+ n_left_to_tx -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+
+ /* Be grumpy about zero length buffers for benefit of
+ driver tx function. */
+ ASSERT (b0->current_length > 0);
+
+ n_bytes_b0 = vlib_buffer_length_in_chain (vm, b0);
+ tx_swif0 = vnet_buffer (b0)->sw_if_index[VLIB_TX];
+ n_bytes += n_bytes_b0;
+ n_packets += 1;
+
+ if (PREDICT_FALSE (current_config_index != ~0))
+ {
+ b0->feature_arc_index = arc;
+ b0->current_config_index = current_config_index;
+ }
+
+ if (PREDICT_FALSE (tx_swif0 != rt->sw_if_index))
+ {
+
+ vlib_increment_combined_counter (im->combined_sw_if_counters +
+ VNET_INTERFACE_COUNTER_TX,
+ cpu_index, tx_swif0, 1,
+ n_bytes_b0);
+ }
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_tx);
+ }
+
+ /* Update main interface stats. */
+ vlib_increment_combined_counter (im->combined_sw_if_counters
+ + VNET_INTERFACE_COUNTER_TX,
+ cpu_index,
+ rt->sw_if_index, n_packets, n_bytes);
+ return n_buffers;
+}
+
+VLIB_NODE_FUNCTION_MULTIARCH_CLONE (vnet_interface_output_node);
+CLIB_MULTIARCH_SELECT_FN (vnet_interface_output_node);
+
+/* Use buffer's sw_if_index[VNET_TX] to choose output interface. */
+static uword
+vnet_per_buffer_interface_output (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ vnet_main_t *vnm = vnet_get_main ();
+ u32 n_left_to_next, *from, *to_next;
+ u32 n_left_from, next_index;
+
+ n_left_from = frame->n_vectors;
+
+ from = vlib_frame_args (frame);
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ u32 bi0, bi1, next0, next1;
+ vlib_buffer_t *b0, *b1;
+ vnet_hw_interface_t *hi0, *hi1;
+
+ /* Prefetch next iteration. */
+ vlib_prefetch_buffer_with_index (vm, from[2], LOAD);
+ vlib_prefetch_buffer_with_index (vm, from[3], LOAD);
+
+ bi0 = from[0];
+ bi1 = from[1];
+ to_next[0] = bi0;
+ to_next[1] = bi1;
+ from += 2;
+ to_next += 2;
+ n_left_to_next -= 2;
+ n_left_from -= 2;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ b1 = vlib_get_buffer (vm, bi1);
+
+ hi0 =
+ vnet_get_sup_hw_interface (vnm,
+ vnet_buffer (b0)->sw_if_index
+ [VLIB_TX]);
+ hi1 =
+ vnet_get_sup_hw_interface (vnm,
+ vnet_buffer (b1)->sw_if_index
+ [VLIB_TX]);
+
+ next0 = hi0->hw_if_index;
+ next1 = hi1->hw_if_index;
+
+ vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next,
+ n_left_to_next, bi0, bi1, next0,
+ next1);
+ }
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 bi0, next0;
+ vlib_buffer_t *b0;
+ vnet_hw_interface_t *hi0;
+
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_to_next -= 1;
+ n_left_from -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+
+ hi0 =
+ vnet_get_sup_hw_interface (vnm,
+ vnet_buffer (b0)->sw_if_index
+ [VLIB_TX]);
+
+ next0 = hi0->hw_if_index;
+
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
+ n_left_to_next, bi0, next0);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ return frame->n_vectors;
+}
+
+always_inline u32
+counter_index (vlib_main_t * vm, vlib_error_t e)
+{
+ vlib_node_t *n;
+ u32 ci, ni;
+
+ ni = vlib_error_get_node (e);
+ n = vlib_get_node (vm, ni);
+
+ ci = vlib_error_get_code (e);
+ ASSERT (ci < n->n_errors);
+
+ ci += n->error_heap_index;
+
+ return ci;
+}
+
+static u8 *
+format_vnet_error_trace (u8 * s, va_list * va)
+{
+ vlib_main_t *vm = va_arg (*va, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*va, vlib_node_t *);
+ vlib_error_t *e = va_arg (*va, vlib_error_t *);
+ vlib_node_t *error_node;
+ vlib_error_main_t *em = &vm->error_main;
+ u32 i;
+
+ error_node = vlib_get_node (vm, vlib_error_get_node (e[0]));
+ i = counter_index (vm, e[0]);
+ s = format (s, "%v: %s", error_node->name, em->error_strings_heap[i]);
+
+ return s;
+}
+
+static void
+trace_errors_with_buffers (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ u32 n_left, *buffers;
+
+ buffers = vlib_frame_vector_args (frame);
+ n_left = frame->n_vectors;
+
+ while (n_left >= 4)
+ {
+ u32 bi0, bi1;
+ vlib_buffer_t *b0, *b1;
+ vlib_error_t *t0, *t1;
+
+ /* Prefetch next iteration. */
+ vlib_prefetch_buffer_with_index (vm, buffers[2], LOAD);
+ vlib_prefetch_buffer_with_index (vm, buffers[3], LOAD);
+
+ bi0 = buffers[0];
+ bi1 = buffers[1];
+
+ b0 = vlib_get_buffer (vm, bi0);
+ b1 = vlib_get_buffer (vm, bi1);
+
+ if (b0->flags & VLIB_BUFFER_IS_TRACED)
+ {
+ t0 = vlib_add_trace (vm, node, b0, sizeof (t0[0]));
+ t0[0] = b0->error;
+ }
+ if (b1->flags & VLIB_BUFFER_IS_TRACED)
+ {
+ t1 = vlib_add_trace (vm, node, b1, sizeof (t1[0]));
+ t1[0] = b1->error;
+ }
+ buffers += 2;
+ n_left -= 2;
+ }
+
+ while (n_left >= 1)
+ {
+ u32 bi0;
+ vlib_buffer_t *b0;
+ vlib_error_t *t0;
+
+ bi0 = buffers[0];
+
+ b0 = vlib_get_buffer (vm, bi0);
+
+ if (b0->flags & VLIB_BUFFER_IS_TRACED)
+ {
+ t0 = vlib_add_trace (vm, node, b0, sizeof (t0[0]));
+ t0[0] = b0->error;
+ }
+ buffers += 1;
+ n_left -= 1;
+ }
+}
+
+static u8 *
+validate_error (vlib_main_t * vm, vlib_error_t * e, u32 index)
+{
+ uword node_index = vlib_error_get_node (e[0]);
+ uword code = vlib_error_get_code (e[0]);
+ vlib_node_t *n;
+
+ if (node_index >= vec_len (vm->node_main.nodes))
+ return format (0, "[%d], node index out of range 0x%x, error 0x%x",
+ index, node_index, e[0]);
+
+ n = vlib_get_node (vm, node_index);
+ if (code >= n->n_errors)
+ return format (0, "[%d], code %d out of range for node %v",
+ index, code, n->name);
+
+ return 0;
+}
+
+static u8 *
+validate_error_frame (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * f)
+{
+ u32 *buffers = vlib_frame_args (f);
+ vlib_buffer_t *b;
+ u8 *msg = 0;
+ uword i;
+
+ for (i = 0; i < f->n_vectors; i++)
+ {
+ b = vlib_get_buffer (vm, buffers[i]);
+ msg = validate_error (vm, &b->error, i);
+ if (msg)
+ return msg;
+ }
+
+ return msg;
+}
+
+typedef enum
+{
+ VNET_ERROR_DISPOSITION_DROP,
+ VNET_ERROR_DISPOSITION_PUNT,
+ VNET_ERROR_N_DISPOSITION,
+} vnet_error_disposition_t;
+
+always_inline void
+do_packet (vlib_main_t * vm, vlib_error_t a)
+{
+ vlib_error_main_t *em = &vm->error_main;
+ u32 i = counter_index (vm, a);
+ em->counters[i] += 1;
+ vlib_error_elog_count (vm, i, 1);
+}
+
+static_always_inline uword
+process_drop_punt (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame, vnet_error_disposition_t disposition)
+{
+ vnet_main_t *vnm = vnet_get_main ();
+ vlib_error_main_t *em = &vm->error_main;
+ u32 *buffers, *first_buffer;
+ vlib_error_t current_error;
+ u32 current_counter_index, n_errors_left;
+ u32 current_sw_if_index, n_errors_current_sw_if_index;
+ u64 current_counter;
+ vlib_simple_counter_main_t *cm;
+ u32 cpu_index = vm->cpu_index;
+
+ static vlib_error_t memory[VNET_ERROR_N_DISPOSITION];
+ static char memory_init[VNET_ERROR_N_DISPOSITION];
+
+ buffers = vlib_frame_args (frame);
+ first_buffer = buffers;
+
+ {
+ vlib_buffer_t *b = vlib_get_buffer (vm, first_buffer[0]);
+
+ if (!memory_init[disposition])
+ {
+ memory_init[disposition] = 1;
+ memory[disposition] = b->error;
+ }
+
+ current_sw_if_index = vnet_buffer (b)->sw_if_index[VLIB_RX];
+ n_errors_current_sw_if_index = 0;
+ }
+
+ current_error = memory[disposition];
+ current_counter_index = counter_index (vm, memory[disposition]);
+ current_counter = em->counters[current_counter_index];
+
+ if (node->flags & VLIB_NODE_FLAG_TRACE)
+ trace_errors_with_buffers (vm, node, frame);
+
+ n_errors_left = frame->n_vectors;
+ cm = vec_elt_at_index (vnm->interface_main.sw_if_counters,
+ (disposition == VNET_ERROR_DISPOSITION_PUNT
+ ? VNET_INTERFACE_COUNTER_PUNT
+ : VNET_INTERFACE_COUNTER_DROP));
+
+ while (n_errors_left >= 2)
+ {
+ vlib_buffer_t *b0, *b1;
+ vnet_sw_interface_t *sw_if0, *sw_if1;
+ vlib_error_t e0, e1;
+ u32 bi0, bi1;
+ u32 sw_if_index0, sw_if_index1;
+
+ bi0 = buffers[0];
+ bi1 = buffers[1];
+
+ buffers += 2;
+ n_errors_left -= 2;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ b1 = vlib_get_buffer (vm, bi1);
+
+ e0 = b0->error;
+ e1 = b1->error;
+
+ sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+ sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_RX];
+
+ /* Speculate that sw_if_index == sw_if_index[01]. */
+ n_errors_current_sw_if_index += 2;
+
+ /* Speculatively assume all 2 (node, code) pairs are equal
+ to current (node, code). */
+ current_counter += 2;
+
+ if (PREDICT_FALSE (e0 != current_error
+ || e1 != current_error
+ || sw_if_index0 != current_sw_if_index
+ || sw_if_index1 != current_sw_if_index))
+ {
+ current_counter -= 2;
+ n_errors_current_sw_if_index -= 2;
+
+ vlib_increment_simple_counter (cm, cpu_index, sw_if_index0, 1);
+ vlib_increment_simple_counter (cm, cpu_index, sw_if_index1, 1);
+
+ /* Increment super-interface drop/punt counters for
+ sub-interfaces. */
+ sw_if0 = vnet_get_sw_interface (vnm, sw_if_index0);
+ vlib_increment_simple_counter
+ (cm, cpu_index, sw_if0->sup_sw_if_index,
+ sw_if0->sup_sw_if_index != sw_if_index0);
+
+ sw_if1 = vnet_get_sw_interface (vnm, sw_if_index1);
+ vlib_increment_simple_counter
+ (cm, cpu_index, sw_if1->sup_sw_if_index,
+ sw_if1->sup_sw_if_index != sw_if_index1);
+
+ em->counters[current_counter_index] = current_counter;
+ do_packet (vm, e0);
+ do_packet (vm, e1);
+
+ /* For 2 repeated errors, change current error. */
+ if (e0 == e1 && e1 != current_error)
+ {
+ current_error = e0;
+ current_counter_index = counter_index (vm, e0);
+ }
+ current_counter = em->counters[current_counter_index];
+ }
+ }
+
+ while (n_errors_left >= 1)
+ {
+ vlib_buffer_t *b0;
+ vnet_sw_interface_t *sw_if0;
+ vlib_error_t e0;
+ u32 bi0, sw_if_index0;
+
+ bi0 = buffers[0];
+
+ buffers += 1;
+ n_errors_left -= 1;
+ current_counter += 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ e0 = b0->error;
+
+ sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+
+ /* Increment drop/punt counters. */
+ vlib_increment_simple_counter (cm, cpu_index, sw_if_index0, 1);
+
+ /* Increment super-interface drop/punt counters for sub-interfaces. */
+ sw_if0 = vnet_get_sw_interface (vnm, sw_if_index0);
+ vlib_increment_simple_counter (cm, cpu_index, sw_if0->sup_sw_if_index,
+ sw_if0->sup_sw_if_index != sw_if_index0);
+
+ if (PREDICT_FALSE (e0 != current_error))
+ {
+ current_counter -= 1;
+
+ vlib_error_elog_count (vm, current_counter_index,
+ (current_counter
+ - em->counters[current_counter_index]));
+
+ em->counters[current_counter_index] = current_counter;
+
+ do_packet (vm, e0);
+ current_error = e0;
+ current_counter_index = counter_index (vm, e0);
+ current_counter = em->counters[current_counter_index];
+ }
+ }
+
+ if (n_errors_current_sw_if_index > 0)
+ {
+ vnet_sw_interface_t *si;
+
+ vlib_increment_simple_counter (cm, cpu_index, current_sw_if_index,
+ n_errors_current_sw_if_index);
+
+ si = vnet_get_sw_interface (vnm, current_sw_if_index);
+ if (si->sup_sw_if_index != current_sw_if_index)
+ vlib_increment_simple_counter (cm, cpu_index, si->sup_sw_if_index,
+ n_errors_current_sw_if_index);
+ }
+
+ vlib_error_elog_count (vm, current_counter_index,
+ (current_counter
+ - em->counters[current_counter_index]));
+
+ /* Return cached counter. */
+ em->counters[current_counter_index] = current_counter;
+
+ /* Save memory for next iteration. */
+ memory[disposition] = current_error;
+
+ if (disposition == VNET_ERROR_DISPOSITION_DROP || !vm->os_punt_frame)
+ {
+ vlib_buffer_free (vm, first_buffer, frame->n_vectors);
+
+ /* If there is no punt function, free the frame as well. */
+ if (disposition == VNET_ERROR_DISPOSITION_PUNT && !vm->os_punt_frame)
+ vlib_frame_free (vm, node, frame);
+ }
+ else
+ vm->os_punt_frame (vm, node, frame);
+
+ return frame->n_vectors;
+}
+
+static inline void
+pcap_drop_trace (vlib_main_t * vm,
+ vnet_interface_main_t * im, vlib_frame_t * f)
+{
+ u32 *from;
+ u32 n_left = f->n_vectors;
+ vlib_buffer_t *b0, *p1;
+ u32 bi0;
+ i16 save_current_data;
+ u16 save_current_length;
+
+ from = vlib_frame_vector_args (f);
+
+ while (n_left > 0)
+ {
+ if (PREDICT_TRUE (n_left > 1))
+ {
+ p1 = vlib_get_buffer (vm, from[1]);
+ vlib_prefetch_buffer_header (p1, LOAD);
+ }
+
+ bi0 = from[0];
+ b0 = vlib_get_buffer (vm, bi0);
+ from++;
+ n_left--;
+
+ /* See if we're pointedly ignoring this specific error */
+ if (im->pcap_drop_filter_hash
+ && hash_get (im->pcap_drop_filter_hash, b0->error))
+ continue;
+
+ /* Trace all drops, or drops received on a specific interface */
+ if (im->pcap_sw_if_index == 0 ||
+ im->pcap_sw_if_index == vnet_buffer (b0)->sw_if_index[VLIB_RX])
+ {
+ save_current_data = b0->current_data;
+ save_current_length = b0->current_length;
+
+ /*
+ * Typically, we'll need to rewind the buffer
+ */
+ if (b0->current_data > 0)
+ vlib_buffer_advance (b0, (word) - b0->current_data);
+
+ pcap_add_buffer (&im->pcap_main, vm, bi0, 512);
+
+ b0->current_data = save_current_data;
+ b0->current_length = save_current_length;
+ }
+ }
+}
+
+void
+vnet_pcap_drop_trace_filter_add_del (u32 error_index, int is_add)
+{
+ vnet_interface_main_t *im = &vnet_get_main ()->interface_main;
+
+ if (im->pcap_drop_filter_hash == 0)
+ im->pcap_drop_filter_hash = hash_create (0, sizeof (uword));
+
+ if (is_add)
+ hash_set (im->pcap_drop_filter_hash, error_index, 1);
+ else
+ hash_unset (im->pcap_drop_filter_hash, error_index);
+}
+
+static uword
+process_drop (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ vnet_interface_main_t *im = &vnet_get_main ()->interface_main;
+
+ if (PREDICT_FALSE (im->drop_pcap_enable))
+ pcap_drop_trace (vm, im, frame);
+
+ return process_drop_punt (vm, node, frame, VNET_ERROR_DISPOSITION_DROP);
+}
+
+static uword
+process_punt (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ return process_drop_punt (vm, node, frame, VNET_ERROR_DISPOSITION_PUNT);
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (drop_buffers,static) = {
+ .function = process_drop,
+ .name = "error-drop",
+ .flags = VLIB_NODE_FLAG_IS_DROP,
+ .vector_size = sizeof (u32),
+ .format_trace = format_vnet_error_trace,
+ .validate_frame = validate_error_frame,
+};
+/* *INDENT-ON* */
+
+VLIB_NODE_FUNCTION_MULTIARCH (drop_buffers, process_drop);
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (punt_buffers,static) = {
+ .function = process_punt,
+ .flags = (VLIB_NODE_FLAG_FRAME_NO_FREE_AFTER_DISPATCH
+ | VLIB_NODE_FLAG_IS_PUNT),
+ .name = "error-punt",
+ .vector_size = sizeof (u32),
+ .format_trace = format_vnet_error_trace,
+ .validate_frame = validate_error_frame,
+};
+/* *INDENT-ON* */
+
+VLIB_NODE_FUNCTION_MULTIARCH (punt_buffers, process_punt);
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (vnet_per_buffer_interface_output_node,static) = {
+ .function = vnet_per_buffer_interface_output,
+ .name = "interface-output",
+ .vector_size = sizeof (u32),
+};
+/* *INDENT-ON* */
+
+VLIB_NODE_FUNCTION_MULTIARCH (vnet_per_buffer_interface_output_node,
+ vnet_per_buffer_interface_output);
+
+static uword
+interface_tx_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame)
+{
+ vnet_main_t *vnm = vnet_get_main ();
+ u32 last_sw_if_index = ~0;
+ vlib_frame_t *to_frame = 0;
+ vnet_hw_interface_t *hw = 0;
+ u32 *from, *to_next = 0;
+ u32 n_left_from;
+
+ from = vlib_frame_vector_args (from_frame);
+ n_left_from = from_frame->n_vectors;
+ while (n_left_from > 0)
+ {
+ u32 bi0;
+ vlib_buffer_t *b0;
+ u32 sw_if_index0;
+
+ bi0 = from[0];
+ from++;
+ n_left_from--;
+ b0 = vlib_get_buffer (vm, bi0);
+ sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_TX];
+
+ if (PREDICT_FALSE ((last_sw_if_index != sw_if_index0) || to_frame == 0))
+ {
+ if (to_frame)
+ {
+ hw = vnet_get_sup_hw_interface (vnm, last_sw_if_index);
+ vlib_put_frame_to_node (vm, hw->tx_node_index, to_frame);
+ }
+ last_sw_if_index = sw_if_index0;
+ hw = vnet_get_sup_hw_interface (vnm, sw_if_index0);
+ to_frame = vlib_get_frame_to_node (vm, hw->tx_node_index);
+ to_next = vlib_frame_vector_args (to_frame);
+ }
+
+ to_next[0] = bi0;
+ to_next++;
+ to_frame->n_vectors++;
+ }
+ vlib_put_frame_to_node (vm, hw->tx_node_index, to_frame);
+ return from_frame->n_vectors;
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (interface_tx, static) = {
+ .function = interface_tx_node_fn,
+ .name = "interface-tx",
+ .vector_size = sizeof (u32),
+ .n_next_nodes = 1,
+ .next_nodes = {
+ [0] = "error-drop",
+ },
+};
+
+VNET_FEATURE_ARC_INIT (interface_output, static) =
+{
+ .arc_name = "interface-output",
+ .start_nodes = VNET_FEATURES (0),
+ .end_node = "interface-tx",
+ .arc_index_ptr = &vnet_main.interface_main.output_feature_arc_index,
+};
+
+VNET_FEATURE_INIT (span_tx, static) = {
+ .arc_name = "interface-output",
+ .node_name = "span-output",
+ .runs_before = VNET_FEATURES ("interface-tx"),
+};
+
+VNET_FEATURE_INIT (interface_tx, static) = {
+ .arc_name = "interface-output",
+ .node_name = "interface-tx",
+ .runs_before = 0,
+};
+/* *INDENT-ON* */
+
+clib_error_t *
+vnet_per_buffer_interface_output_hw_interface_add_del (vnet_main_t * vnm,
+ u32 hw_if_index,
+ u32 is_create)
+{
+ vnet_hw_interface_t *hi = vnet_get_hw_interface (vnm, hw_if_index);
+ u32 next_index;
+
+ next_index = vlib_node_add_next_with_slot
+ (vnm->vlib_main, vnet_per_buffer_interface_output_node.index,
+ hi->output_node_index,
+ /* next_index */ hw_if_index);
+
+ ASSERT (next_index == hw_if_index);
+
+ return 0;
+}
+
+VNET_HW_INTERFACE_ADD_DEL_FUNCTION
+ (vnet_per_buffer_interface_output_hw_interface_add_del);
+
+static clib_error_t *
+pcap_drop_trace_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ vnet_main_t *vnm = vnet_get_main ();
+ vnet_interface_main_t *im = &vnm->interface_main;
+ u8 *filename;
+ u32 max;
+ int matched = 0;
+ clib_error_t *error = 0;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "on"))
+ {
+ if (im->drop_pcap_enable == 0)
+ {
+ if (im->pcap_filename == 0)
+ im->pcap_filename = format (0, "/tmp/drop.pcap%c", 0);
+
+ memset (&im->pcap_main, 0, sizeof (im->pcap_main));
+ im->pcap_main.file_name = (char *) im->pcap_filename;
+ im->pcap_main.n_packets_to_capture = 100;
+ if (im->pcap_pkts_to_capture)
+ im->pcap_main.n_packets_to_capture = im->pcap_pkts_to_capture;
+
+ im->pcap_main.packet_type = PCAP_PACKET_TYPE_ethernet;
+ im->drop_pcap_enable = 1;
+ matched = 1;
+ vlib_cli_output (vm, "pcap drop capture on...");
+ }
+ else
+ {
+ vlib_cli_output (vm, "pcap drop capture already on...");
+ }
+ matched = 1;
+ }
+ else if (unformat (input, "off"))
+ {
+ matched = 1;
+
+ if (im->drop_pcap_enable)
+ {
+ vlib_cli_output (vm, "captured %d pkts...",
+ im->pcap_main.n_packets_captured);
+ if (im->pcap_main.n_packets_captured)
+ {
+ im->pcap_main.n_packets_to_capture =
+ im->pcap_main.n_packets_captured;
+ error = pcap_write (&im->pcap_main);
+ if (error)
+ clib_error_report (error);
+ else
+ vlib_cli_output (vm, "saved to %s...", im->pcap_filename);
+ }
+ }
+ else
+ {
+ vlib_cli_output (vm, "pcap drop capture already off...");
+ }
+
+ im->drop_pcap_enable = 0;
+ }
+ else if (unformat (input, "max %d", &max))
+ {
+ im->pcap_pkts_to_capture = max;
+ matched = 1;
+ }
+
+ else if (unformat (input, "intfc %U",
+ unformat_vnet_sw_interface, vnm,
+ &im->pcap_sw_if_index))
+ matched = 1;
+ else if (unformat (input, "intfc any"))
+ {
+ im->pcap_sw_if_index = 0;
+ matched = 1;
+ }
+ else if (unformat (input, "file %s", &filename))
+ {
+ u8 *chroot_filename;
+ /* Brain-police user path input */
+ if (strstr ((char *) filename, "..")
+ || index ((char *) filename, '/'))
+ {
+ vlib_cli_output (vm, "illegal characters in filename '%s'",
+ filename);
+ continue;
+ }
+
+ chroot_filename = format (0, "/tmp/%s%c", filename, 0);
+ vec_free (filename);
+
+ if (im->pcap_filename)
+ vec_free (im->pcap_filename);
+ vec_add1 (filename, 0);
+ im->pcap_filename = chroot_filename;
+ matched = 1;
+ }
+ else if (unformat (input, "status"))
+ {
+ if (im->drop_pcap_enable == 0)
+ {
+ vlib_cli_output (vm, "pcap drop capture is off...");
+ continue;
+ }
+
+ vlib_cli_output (vm, "pcap drop capture: %d of %d pkts...",
+ im->pcap_main.n_packets_captured,
+ im->pcap_main.n_packets_to_capture);
+ matched = 1;
+ }
+
+ else
+ break;
+ }
+
+ if (matched == 0)
+ return clib_error_return (0, "unknown input `%U'",
+ format_unformat_error, input);
+
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (pcap_trace_command, static) = {
+ .path = "pcap drop trace",
+ .short_help =
+ "pcap drop trace on off max <nn> intfc <intfc> file <name> status",
+ .function = pcap_drop_trace_command_fn,
+};
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/ip/dir.dox b/src/vnet/ip/dir.dox
new file mode 100644
index 00000000000..a4eb733774a
--- /dev/null
+++ b/src/vnet/ip/dir.dox
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* Doxygen directory documentation */
+
+/**
+@dir
+@brief Layer 3 IP Code.
+
+This directory contains the source code for IP routing.
+
+*/
+/*? %%clicmd:group_label Layer 3 IP CLI %% ?*/
diff --git a/src/vnet/ip/format.c b/src/vnet/ip/format.c
new file mode 100644
index 00000000000..be1c4fd32fb
--- /dev/null
+++ b/src/vnet/ip/format.c
@@ -0,0 +1,121 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * ip/ip_format.c: ip generic (4 or 6) formatting
+ *
+ * Copyright (c) 2008 Eliot Dresselhaus
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <vnet/ip/ip.h>
+
+/* Format IP protocol. */
+u8 *
+format_ip_protocol (u8 * s, va_list * args)
+{
+ ip_protocol_t protocol = va_arg (*args, ip_protocol_t);
+ ip_main_t *im = &ip_main;
+ ip_protocol_info_t *pi = ip_get_protocol_info (im, protocol);
+
+ if (pi)
+ return format (s, "%s", pi->name);
+ else
+ return format (s, "unknown %d", protocol);
+}
+
+uword
+unformat_ip_protocol (unformat_input_t * input, va_list * args)
+{
+ u8 *result = va_arg (*args, u8 *);
+ ip_main_t *im = &ip_main;
+ ip_protocol_info_t *pi;
+ int i;
+
+ if (!unformat_user (input, unformat_vlib_number_by_name,
+ im->protocol_info_by_name, &i))
+ return 0;
+
+ pi = vec_elt_at_index (im->protocol_infos, i);
+ *result = pi->protocol;
+ return 1;
+}
+
+u8 *
+format_tcp_udp_port (u8 * s, va_list * args)
+{
+ int port = va_arg (*args, int);
+ ip_main_t *im = &ip_main;
+ tcp_udp_port_info_t *pi;
+
+ pi = ip_get_tcp_udp_port_info (im, port);
+ if (pi)
+ s = format (s, "%s", pi->name);
+ else
+ s = format (s, "%d", clib_net_to_host_u16 (port));
+
+ return s;
+}
+
+uword
+unformat_tcp_udp_port (unformat_input_t * input, va_list * args)
+{
+ u16 *result = va_arg (*args, u16 *);
+ ip_main_t *im = &ip_main;
+ tcp_udp_port_info_t *pi;
+ u32 i, port;
+
+
+ if (unformat_user (input, unformat_vlib_number_by_name,
+ im->port_info_by_name, &i))
+ {
+ pi = vec_elt_at_index (im->port_infos, i);
+ port = pi->port;
+ }
+ else if (unformat_user (input, unformat_vlib_number, &port)
+ && port < (1 << 16))
+ port = clib_host_to_net_u16 (port);
+
+ else
+ return 0;
+
+ *result = port;
+ return 1;
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/ip/format.h b/src/vnet/ip/format.h
new file mode 100644
index 00000000000..c35f0f4bb74
--- /dev/null
+++ b/src/vnet/ip/format.h
@@ -0,0 +1,114 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * ip/format.h: ip 4 and/or 6 formatting
+ *
+ * Copyright (c) 2008 Eliot Dresselhaus
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef included_ip_format_h
+#define included_ip_format_h
+
+/* IP4 or IP6. */
+
+format_function_t format_ip_protocol;
+unformat_function_t unformat_ip_protocol;
+
+format_function_t format_tcp_udp_port;
+unformat_function_t unformat_tcp_udp_port;
+
+typedef enum format_ip_adjacency_flags_t_
+{
+ FORMAT_IP_ADJACENCY_NONE,
+ FORMAT_IP_ADJACENCY_BRIEF = FORMAT_IP_ADJACENCY_NONE,
+ FORMAT_IP_ADJACENCY_DETAIL = (1 << 0),
+} format_ip_adjacency_flags_t;
+
+format_function_t format_ip_adjacency;
+format_function_t format_ip_adjacency_packet_data;
+
+format_function_t format_ip46_address;
+
+typedef enum
+{
+ IP46_TYPE_ANY,
+ IP46_TYPE_IP4,
+ IP46_TYPE_IP6
+} ip46_type_t;
+/* unformat_ip46_address expects arguments (ip46_address_t *, ip46_type_t)
+ * The type argument is used to enforce a particular IP version. */
+unformat_function_t unformat_ip46_address;
+
+/* IP4 */
+
+/* Parse an IP4 address %d.%d.%d.%d. */
+unformat_function_t unformat_ip4_address;
+
+/* Format an IP4 address. */
+format_function_t format_ip4_address;
+format_function_t format_ip4_address_and_length;
+
+/* Parse an IP4 header. */
+unformat_function_t unformat_ip4_header;
+
+/* Format an IP4 header. */
+format_function_t format_ip4_header;
+
+/* Parse an IP packet matching pattern. */
+unformat_function_t unformat_ip4_match;
+
+unformat_function_t unformat_pg_ip4_header;
+
+/* IP6 */
+unformat_function_t unformat_ip6_address;
+format_function_t format_ip6_address;
+format_function_t format_ip6_address_and_length;
+unformat_function_t unformat_ip6_header;
+format_function_t format_ip6_header;
+unformat_function_t unformat_pg_ip6_header;
+
+/* Format a TCP/UDP headers. */
+format_function_t format_tcp_header, format_udp_header;
+
+unformat_function_t unformat_pg_tcp_header, unformat_pg_udp_header;
+
+#endif /* included_ip_format_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/ip/icmp4.c b/src/vnet/ip/icmp4.c
new file mode 100644
index 00000000000..c3afff72f26
--- /dev/null
+++ b/src/vnet/ip/icmp4.c
@@ -0,0 +1,784 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * ip/icmp4.c: ipv4 icmp
+ *
+ * Copyright (c) 2008 Eliot Dresselhaus
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <vlib/vlib.h>
+#include <vnet/ip/ip.h>
+#include <vnet/pg/pg.h>
+
+
+static char *icmp_error_strings[] = {
+#define _(f,s) s,
+ foreach_icmp4_error
+#undef _
+};
+
+static u8 *
+format_ip4_icmp_type_and_code (u8 * s, va_list * args)
+{
+ icmp4_type_t type = va_arg (*args, int);
+ u8 code = va_arg (*args, int);
+ char *t = 0;
+
+#define _(n,f) case n: t = #f; break;
+
+ switch (type)
+ {
+ foreach_icmp4_type;
+
+ default:
+ break;
+ }
+
+#undef _
+
+ if (!t)
+ return format (s, "unknown 0x%x", type);
+
+ s = format (s, "%s", t);
+
+ t = 0;
+ switch ((type << 8) | code)
+ {
+#define _(a,n,f) case (ICMP4_##a << 8) | (n): t = #f; break;
+
+ foreach_icmp4_code;
+
+#undef _
+ }
+
+ if (t)
+ s = format (s, " %s", t);
+
+ return s;
+}
+
+static u8 *
+format_ip4_icmp_header (u8 * s, va_list * args)
+{
+ icmp46_header_t *icmp = va_arg (*args, icmp46_header_t *);
+ u32 max_header_bytes = va_arg (*args, u32);
+
+ /* Nothing to do. */
+ if (max_header_bytes < sizeof (icmp[0]))
+ return format (s, "ICMP header truncated");
+
+ s = format (s, "ICMP %U checksum 0x%x",
+ format_ip4_icmp_type_and_code, icmp->type, icmp->code,
+ clib_net_to_host_u16 (icmp->checksum));
+
+ return s;
+}
+
+static u8 *
+format_icmp_input_trace (u8 * s, va_list * va)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*va, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*va, vlib_node_t *);
+ icmp_input_trace_t *t = va_arg (*va, icmp_input_trace_t *);
+
+ s = format (s, "%U",
+ format_ip4_header, t->packet_data, sizeof (t->packet_data));
+
+ return s;
+}
+
+typedef enum
+{
+ ICMP_INPUT_NEXT_ERROR,
+ ICMP_INPUT_N_NEXT,
+} icmp_input_next_t;
+
+typedef struct
+{
+ uword *type_and_code_by_name;
+
+ uword *type_by_name;
+
+ /* Vector dispatch table indexed by [icmp type]. */
+ u8 ip4_input_next_index_by_type[256];
+} icmp4_main_t;
+
+icmp4_main_t icmp4_main;
+
+static uword
+ip4_icmp_input (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ icmp4_main_t *im = &icmp4_main;
+ uword n_packets = frame->n_vectors;
+ u32 *from, *to_next;
+ u32 n_left_from, n_left_to_next, next;
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = n_packets;
+ next = node->cached_next_index;
+
+ if (node->flags & VLIB_NODE_FLAG_TRACE)
+ vlib_trace_frame_buffers_only (vm, node, from, frame->n_vectors,
+ /* stride */ 1,
+ sizeof (icmp_input_trace_t));
+
+ while (n_left_from > 0)
+ {
+ vlib_get_next_frame (vm, node, next, to_next, n_left_to_next);
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ vlib_buffer_t *p0;
+ ip4_header_t *ip0;
+ icmp46_header_t *icmp0;
+ icmp4_type_t type0;
+ u32 bi0, next0;
+
+ if (PREDICT_TRUE (n_left_from > 2))
+ {
+ vlib_prefetch_buffer_with_index (vm, from[2], LOAD);
+ p0 = vlib_get_buffer (vm, from[1]);
+ ip0 = vlib_buffer_get_current (p0);
+ CLIB_PREFETCH (ip0, CLIB_CACHE_LINE_BYTES, LOAD);
+ }
+
+ bi0 = to_next[0] = from[0];
+
+ from += 1;
+ n_left_from -= 1;
+ to_next += 1;
+ n_left_to_next -= 1;
+
+ p0 = vlib_get_buffer (vm, bi0);
+ ip0 = vlib_buffer_get_current (p0);
+ icmp0 = ip4_next_header (ip0);
+ type0 = icmp0->type;
+ next0 = im->ip4_input_next_index_by_type[type0];
+
+ p0->error = node->errors[ICMP4_ERROR_UNKNOWN_TYPE];
+ if (PREDICT_FALSE (next0 != next))
+ {
+ vlib_put_next_frame (vm, node, next, n_left_to_next + 1);
+ next = next0;
+ vlib_get_next_frame (vm, node, next, to_next, n_left_to_next);
+ to_next[0] = bi0;
+ to_next += 1;
+ n_left_to_next -= 1;
+ }
+ }
+
+ vlib_put_next_frame (vm, node, next, n_left_to_next);
+ }
+
+ return frame->n_vectors;
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (ip4_icmp_input_node,static) = {
+ .function = ip4_icmp_input,
+ .name = "ip4-icmp-input",
+
+ .vector_size = sizeof (u32),
+
+ .format_trace = format_icmp_input_trace,
+
+ .n_errors = ARRAY_LEN (icmp_error_strings),
+ .error_strings = icmp_error_strings,
+
+ .n_next_nodes = 1,
+ .next_nodes = {
+ [ICMP_INPUT_NEXT_ERROR] = "error-punt",
+ },
+};
+/* *INDENT-ON* */
+
+static uword
+ip4_icmp_echo_request (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ uword n_packets = frame->n_vectors;
+ u32 *from, *to_next;
+ u32 n_left_from, n_left_to_next, next;
+ ip4_main_t *i4m = &ip4_main;
+ u16 *fragment_ids, *fid;
+ u8 host_config_ttl = i4m->host_config.ttl;
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = n_packets;
+ next = node->cached_next_index;
+
+ if (node->flags & VLIB_NODE_FLAG_TRACE)
+ vlib_trace_frame_buffers_only (vm, node, from, frame->n_vectors,
+ /* stride */ 1,
+ sizeof (icmp_input_trace_t));
+
+ /* Get random fragment IDs for replies. */
+ fid = fragment_ids = clib_random_buffer_get_data (&vm->random_buffer,
+ n_packets *
+ sizeof (fragment_ids[0]));
+
+ while (n_left_from > 0)
+ {
+ vlib_get_next_frame (vm, node, next, to_next, n_left_to_next);
+
+ while (n_left_from > 2 && n_left_to_next > 2)
+ {
+ vlib_buffer_t *p0, *p1;
+ ip4_header_t *ip0, *ip1;
+ icmp46_header_t *icmp0, *icmp1;
+ u32 bi0, src0, dst0;
+ u32 bi1, src1, dst1;
+ ip_csum_t sum0, sum1;
+
+ bi0 = to_next[0] = from[0];
+ bi1 = to_next[1] = from[1];
+
+ from += 2;
+ n_left_from -= 2;
+ to_next += 2;
+ n_left_to_next -= 2;
+
+ p0 = vlib_get_buffer (vm, bi0);
+ p1 = vlib_get_buffer (vm, bi1);
+ ip0 = vlib_buffer_get_current (p0);
+ ip1 = vlib_buffer_get_current (p1);
+ icmp0 = ip4_next_header (ip0);
+ icmp1 = ip4_next_header (ip1);
+
+ vnet_buffer (p0)->sw_if_index[VLIB_RX] =
+ vnet_main.local_interface_sw_if_index;
+ vnet_buffer (p1)->sw_if_index[VLIB_RX] =
+ vnet_main.local_interface_sw_if_index;
+
+ /* Update ICMP checksum. */
+ sum0 = icmp0->checksum;
+ sum1 = icmp1->checksum;
+
+ ASSERT (icmp0->type == ICMP4_echo_request);
+ ASSERT (icmp1->type == ICMP4_echo_request);
+ sum0 = ip_csum_update (sum0, ICMP4_echo_request, ICMP4_echo_reply,
+ icmp46_header_t, type);
+ sum1 = ip_csum_update (sum1, ICMP4_echo_request, ICMP4_echo_reply,
+ icmp46_header_t, type);
+ icmp0->type = ICMP4_echo_reply;
+ icmp1->type = ICMP4_echo_reply;
+
+ icmp0->checksum = ip_csum_fold (sum0);
+ icmp1->checksum = ip_csum_fold (sum1);
+
+ src0 = ip0->src_address.data_u32;
+ src1 = ip1->src_address.data_u32;
+ dst0 = ip0->dst_address.data_u32;
+ dst1 = ip1->dst_address.data_u32;
+
+ /* Swap source and destination address.
+ Does not change checksum. */
+ ip0->src_address.data_u32 = dst0;
+ ip1->src_address.data_u32 = dst1;
+ ip0->dst_address.data_u32 = src0;
+ ip1->dst_address.data_u32 = src1;
+
+ /* Update IP checksum. */
+ sum0 = ip0->checksum;
+ sum1 = ip1->checksum;
+
+ sum0 = ip_csum_update (sum0, ip0->ttl, host_config_ttl,
+ ip4_header_t, ttl);
+ sum1 = ip_csum_update (sum1, ip1->ttl, host_config_ttl,
+ ip4_header_t, ttl);
+ ip0->ttl = host_config_ttl;
+ ip1->ttl = host_config_ttl;
+
+ /* New fragment id. */
+ sum0 = ip_csum_update (sum0, ip0->fragment_id, fid[0],
+ ip4_header_t, fragment_id);
+ sum1 = ip_csum_update (sum1, ip1->fragment_id, fid[1],
+ ip4_header_t, fragment_id);
+ ip0->fragment_id = fid[0];
+ ip1->fragment_id = fid[1];
+ fid += 2;
+
+ ip0->checksum = ip_csum_fold (sum0);
+ ip1->checksum = ip_csum_fold (sum1);
+
+ ASSERT (ip0->checksum == ip4_header_checksum (ip0));
+ ASSERT (ip1->checksum == ip4_header_checksum (ip1));
+
+ p0->flags |= VNET_BUFFER_LOCALLY_ORIGINATED;
+ p1->flags |= VNET_BUFFER_LOCALLY_ORIGINATED;
+ }
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ vlib_buffer_t *p0;
+ ip4_header_t *ip0;
+ icmp46_header_t *icmp0;
+ u32 bi0, src0, dst0;
+ ip_csum_t sum0;
+
+ bi0 = to_next[0] = from[0];
+
+ from += 1;
+ n_left_from -= 1;
+ to_next += 1;
+ n_left_to_next -= 1;
+
+ p0 = vlib_get_buffer (vm, bi0);
+ ip0 = vlib_buffer_get_current (p0);
+ icmp0 = ip4_next_header (ip0);
+
+ vnet_buffer (p0)->sw_if_index[VLIB_RX] =
+ vnet_main.local_interface_sw_if_index;
+
+ /* Update ICMP checksum. */
+ sum0 = icmp0->checksum;
+
+ ASSERT (icmp0->type == ICMP4_echo_request);
+ sum0 = ip_csum_update (sum0, ICMP4_echo_request, ICMP4_echo_reply,
+ icmp46_header_t, type);
+ icmp0->type = ICMP4_echo_reply;
+ icmp0->checksum = ip_csum_fold (sum0);
+
+ src0 = ip0->src_address.data_u32;
+ dst0 = ip0->dst_address.data_u32;
+ ip0->src_address.data_u32 = dst0;
+ ip0->dst_address.data_u32 = src0;
+
+ /* Update IP checksum. */
+ sum0 = ip0->checksum;
+
+ sum0 = ip_csum_update (sum0, ip0->ttl, host_config_ttl,
+ ip4_header_t, ttl);
+ ip0->ttl = host_config_ttl;
+
+ sum0 = ip_csum_update (sum0, ip0->fragment_id, fid[0],
+ ip4_header_t, fragment_id);
+ ip0->fragment_id = fid[0];
+ fid += 1;
+
+ ip0->checksum = ip_csum_fold (sum0);
+
+ ASSERT (ip0->checksum == ip4_header_checksum (ip0));
+
+ p0->flags |= VNET_BUFFER_LOCALLY_ORIGINATED;
+ }
+
+ vlib_put_next_frame (vm, node, next, n_left_to_next);
+ }
+
+ vlib_error_count (vm, ip4_icmp_input_node.index,
+ ICMP4_ERROR_ECHO_REPLIES_SENT, frame->n_vectors);
+
+ return frame->n_vectors;
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (ip4_icmp_echo_request_node,static) = {
+ .function = ip4_icmp_echo_request,
+ .name = "ip4-icmp-echo-request",
+
+ .vector_size = sizeof (u32),
+
+ .format_trace = format_icmp_input_trace,
+
+ .n_next_nodes = 1,
+ .next_nodes = {
+ [0] = "ip4-load-balance",
+ },
+};
+/* *INDENT-ON* */
+
+typedef enum
+{
+ IP4_ICMP_ERROR_NEXT_DROP,
+ IP4_ICMP_ERROR_NEXT_LOOKUP,
+ IP4_ICMP_ERROR_N_NEXT,
+} ip4_icmp_error_next_t;
+
+void
+icmp4_error_set_vnet_buffer (vlib_buffer_t * b, u8 type, u8 code, u32 data)
+{
+ vnet_buffer (b)->ip.icmp.type = type;
+ vnet_buffer (b)->ip.icmp.code = code;
+ vnet_buffer (b)->ip.icmp.data = data;
+}
+
+static u8
+icmp4_icmp_type_to_error (u8 type)
+{
+ switch (type)
+ {
+ case ICMP4_destination_unreachable:
+ return ICMP4_ERROR_DEST_UNREACH_SENT;
+ case ICMP4_time_exceeded:
+ return ICMP4_ERROR_TTL_EXPIRE_SENT;
+ case ICMP4_parameter_problem:
+ return ICMP4_ERROR_PARAM_PROBLEM_SENT;
+ default:
+ return ICMP4_ERROR_DROP;
+ }
+}
+
+static uword
+ip4_icmp_error (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ u32 *from, *to_next;
+ uword n_left_from, n_left_to_next;
+ ip4_icmp_error_next_t next_index;
+ ip4_main_t *im = &ip4_main;
+ ip_lookup_main_t *lm = &im->lookup_main;
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+
+ if (node->flags & VLIB_NODE_FLAG_TRACE)
+ vlib_trace_frame_buffers_only (vm, node, from, frame->n_vectors,
+ /* stride */ 1,
+ sizeof (icmp_input_trace_t));
+
+ while (n_left_from > 0)
+ {
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 pi0 = from[0];
+ u32 next0 = IP4_ICMP_ERROR_NEXT_LOOKUP;
+ u8 error0 = ICMP4_ERROR_NONE;
+ vlib_buffer_t *p0;
+ ip4_header_t *ip0, *out_ip0;
+ icmp46_header_t *icmp0;
+ u32 sw_if_index0, if_add_index0;
+ ip_csum_t sum;
+
+ /* Speculatively enqueue p0 to the current next frame */
+ to_next[0] = pi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ p0 = vlib_get_buffer (vm, pi0);
+ ip0 = vlib_buffer_get_current (p0);
+ sw_if_index0 = vnet_buffer (p0)->sw_if_index[VLIB_RX];
+
+ /*
+ * RFC1812 says to keep as much of the original packet as
+ * possible within the minimum MTU (576). We cheat "a little"
+ * here by keeping whatever fits in the first buffer, to be more
+ * efficient
+ */
+ if (PREDICT_FALSE (p0->total_length_not_including_first_buffer))
+ {
+ /* clear current_length of all other buffers in chain */
+ vlib_buffer_t *b = p0;
+ p0->total_length_not_including_first_buffer = 0;
+ while (b->flags & VLIB_BUFFER_NEXT_PRESENT)
+ {
+ b = vlib_get_buffer (vm, b->next_buffer);
+ b->current_length = 0;
+ }
+ }
+ p0->current_length =
+ p0->current_length > 576 ? 576 : p0->current_length;
+
+ /* Add IP header and ICMPv4 header including a 4 byte data field */
+ vlib_buffer_advance (p0,
+ -sizeof (ip4_header_t) -
+ sizeof (icmp46_header_t) - 4);
+ out_ip0 = vlib_buffer_get_current (p0);
+ icmp0 = (icmp46_header_t *) & out_ip0[1];
+
+ /* Fill ip header fields */
+ out_ip0->ip_version_and_header_length = 0x45;
+ out_ip0->tos = 0;
+ out_ip0->length = clib_host_to_net_u16 (p0->current_length);
+ out_ip0->fragment_id = 0;
+ out_ip0->flags_and_fragment_offset = 0;
+ out_ip0->ttl = 0xff;
+ out_ip0->protocol = IP_PROTOCOL_ICMP;
+ out_ip0->dst_address = ip0->src_address;
+ if_add_index0 = ~0;
+ if (PREDICT_TRUE (vec_len (lm->if_address_pool_index_by_sw_if_index)
+ > sw_if_index0))
+ if_add_index0 =
+ lm->if_address_pool_index_by_sw_if_index[sw_if_index0];
+ if (PREDICT_TRUE (if_add_index0 != ~0))
+ {
+ ip_interface_address_t *if_add =
+ pool_elt_at_index (lm->if_address_pool, if_add_index0);
+ ip4_address_t *if_ip =
+ ip_interface_address_get_address (lm, if_add);
+ out_ip0->src_address = *if_ip;
+ }
+ else
+ {
+ /* interface has no IP4 address - should not happen */
+ next0 = IP4_ICMP_ERROR_NEXT_DROP;
+ error0 = ICMP4_ERROR_DROP;
+ }
+ out_ip0->checksum = ip4_header_checksum (out_ip0);
+
+ /* Fill icmp header fields */
+ icmp0->type = vnet_buffer (p0)->ip.icmp.type;
+ icmp0->code = vnet_buffer (p0)->ip.icmp.code;
+ *((u32 *) (icmp0 + 1)) =
+ clib_host_to_net_u32 (vnet_buffer (p0)->ip.icmp.data);
+ icmp0->checksum = 0;
+ sum =
+ ip_incremental_checksum (0, icmp0,
+ p0->current_length -
+ sizeof (ip4_header_t));
+ icmp0->checksum = ~ip_csum_fold (sum);
+
+ /* Update error status */
+ if (error0 == ICMP4_ERROR_NONE)
+ error0 = icmp4_icmp_type_to_error (icmp0->type);
+ vlib_error_count (vm, node->node_index, error0, 1);
+
+ /* Verify speculative enqueue, maybe switch current next frame */
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ pi0, next0);
+ }
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ return frame->n_vectors;
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (ip4_icmp_error_node) = {
+ .function = ip4_icmp_error,
+ .name = "ip4-icmp-error",
+ .vector_size = sizeof (u32),
+
+ .n_errors = ARRAY_LEN (icmp_error_strings),
+ .error_strings = icmp_error_strings,
+
+ .n_next_nodes = IP4_ICMP_ERROR_N_NEXT,
+ .next_nodes = {
+ [IP4_ICMP_ERROR_NEXT_DROP] = "error-drop",
+ [IP4_ICMP_ERROR_NEXT_LOOKUP] = "ip4-lookup",
+ },
+
+ .format_trace = format_icmp_input_trace,
+};
+/* *INDENT-ON* */
+
+
+static uword
+unformat_icmp_type_and_code (unformat_input_t * input, va_list * args)
+{
+ icmp46_header_t *h = va_arg (*args, icmp46_header_t *);
+ icmp4_main_t *cm = &icmp4_main;
+ u32 i;
+
+ if (unformat_user (input, unformat_vlib_number_by_name,
+ cm->type_and_code_by_name, &i))
+ {
+ h->type = (i >> 8) & 0xff;
+ h->code = (i >> 0) & 0xff;
+ }
+ else if (unformat_user (input, unformat_vlib_number_by_name,
+ cm->type_by_name, &i))
+ {
+ h->type = i;
+ h->code = 0;
+ }
+ else
+ return 0;
+
+ return 1;
+}
+
+static void
+icmp4_pg_edit_function (pg_main_t * pg,
+ pg_stream_t * s,
+ pg_edit_group_t * g, u32 * packets, u32 n_packets)
+{
+ vlib_main_t *vm = vlib_get_main ();
+ u32 ip_offset, icmp_offset;
+
+ icmp_offset = g->start_byte_offset;
+ ip_offset = (g - 1)->start_byte_offset;
+
+ while (n_packets >= 1)
+ {
+ vlib_buffer_t *p0;
+ ip4_header_t *ip0;
+ icmp46_header_t *icmp0;
+ u32 len0;
+
+ p0 = vlib_get_buffer (vm, packets[0]);
+ n_packets -= 1;
+ packets += 1;
+
+ ASSERT (p0->current_data == 0);
+ ip0 = (void *) (p0->data + ip_offset);
+ icmp0 = (void *) (p0->data + icmp_offset);
+ len0 = clib_net_to_host_u16 (ip0->length) - ip4_header_bytes (ip0);
+ icmp0->checksum =
+ ~ip_csum_fold (ip_incremental_checksum (0, icmp0, len0));
+ }
+}
+
+typedef struct
+{
+ pg_edit_t type, code;
+ pg_edit_t checksum;
+} pg_icmp46_header_t;
+
+always_inline void
+pg_icmp_header_init (pg_icmp46_header_t * p)
+{
+ /* Initialize fields that are not bit fields in the IP header. */
+#define _(f) pg_edit_init (&p->f, icmp46_header_t, f);
+ _(type);
+ _(code);
+ _(checksum);
+#undef _
+}
+
+static uword
+unformat_pg_icmp_header (unformat_input_t * input, va_list * args)
+{
+ pg_stream_t *s = va_arg (*args, pg_stream_t *);
+ pg_icmp46_header_t *p;
+ u32 group_index;
+
+ p = pg_create_edit_group (s, sizeof (p[0]), sizeof (icmp46_header_t),
+ &group_index);
+ pg_icmp_header_init (p);
+
+ p->checksum.type = PG_EDIT_UNSPECIFIED;
+
+ {
+ icmp46_header_t tmp;
+
+ if (!unformat (input, "ICMP %U", unformat_icmp_type_and_code, &tmp))
+ goto error;
+
+ pg_edit_set_fixed (&p->type, tmp.type);
+ pg_edit_set_fixed (&p->code, tmp.code);
+ }
+
+ /* Parse options. */
+ while (1)
+ {
+ if (unformat (input, "checksum %U",
+ unformat_pg_edit, unformat_pg_number, &p->checksum))
+ ;
+
+ /* Can't parse input: try next protocol level. */
+ else
+ break;
+ }
+
+ if (!unformat_user (input, unformat_pg_payload, s))
+ goto error;
+
+ if (p->checksum.type == PG_EDIT_UNSPECIFIED)
+ {
+ pg_edit_group_t *g = pg_stream_get_group (s, group_index);
+ g->edit_function = icmp4_pg_edit_function;
+ g->edit_function_opaque = 0;
+ }
+
+ return 1;
+
+error:
+ /* Free up any edits we may have added. */
+ pg_free_edit_group (s);
+ return 0;
+}
+
+void
+ip4_icmp_register_type (vlib_main_t * vm, icmp4_type_t type, u32 node_index)
+{
+ icmp4_main_t *im = &icmp4_main;
+
+ ASSERT ((int) type < ARRAY_LEN (im->ip4_input_next_index_by_type));
+ im->ip4_input_next_index_by_type[type]
+ = vlib_node_add_next (vm, ip4_icmp_input_node.index, node_index);
+}
+
+static clib_error_t *
+icmp4_init (vlib_main_t * vm)
+{
+ ip_main_t *im = &ip_main;
+ ip_protocol_info_t *pi;
+ icmp4_main_t *cm = &icmp4_main;
+ clib_error_t *error;
+
+ error = vlib_call_init_function (vm, ip_main_init);
+
+ if (error)
+ return error;
+
+ pi = ip_get_protocol_info (im, IP_PROTOCOL_ICMP);
+ pi->format_header = format_ip4_icmp_header;
+ pi->unformat_pg_edit = unformat_pg_icmp_header;
+
+ cm->type_by_name = hash_create_string (0, sizeof (uword));
+#define _(n,t) hash_set_mem (cm->type_by_name, #t, (n));
+ foreach_icmp4_type;
+#undef _
+
+ cm->type_and_code_by_name = hash_create_string (0, sizeof (uword));
+#define _(a,n,t) hash_set_mem (cm->type_by_name, #t, (n) | (ICMP4_##a << 8));
+ foreach_icmp4_code;
+#undef _
+
+ memset (cm->ip4_input_next_index_by_type,
+ ICMP_INPUT_NEXT_ERROR, sizeof (cm->ip4_input_next_index_by_type));
+
+ ip4_icmp_register_type (vm, ICMP4_echo_request,
+ ip4_icmp_echo_request_node.index);
+
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (icmp4_init);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/ip/icmp4.h b/src/vnet/ip/icmp4.h
new file mode 100644
index 00000000000..ae805148c89
--- /dev/null
+++ b/src/vnet/ip/icmp4.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef included_vnet_icmp4_h
+#define included_vnet_icmp4_h
+
+#define foreach_icmp4_error \
+ _ (NONE, "valid packets") \
+ _ (UNKNOWN_TYPE, "unknown type") \
+ _ (INVALID_CODE_FOR_TYPE, "invalid code for type") \
+ _ (INVALID_HOP_LIMIT_FOR_TYPE, "hop_limit != 255") \
+ _ (LENGTH_TOO_SMALL_FOR_TYPE, "payload length too small for type") \
+ _ (OPTIONS_WITH_ODD_LENGTH, \
+ "total option length not multiple of 8 bytes") \
+ _ (OPTION_WITH_ZERO_LENGTH, "option has zero length") \
+ _ (ECHO_REPLIES_SENT, "echo replies sent") \
+ _ (DST_LOOKUP_MISS, "icmp6 dst address lookup misses") \
+ _ (DEST_UNREACH_SENT, "destination unreachable response sent") \
+ _ (TTL_EXPIRE_SENT, "hop limit exceeded response sent") \
+ _ (PARAM_PROBLEM_SENT, "parameter problem response sent") \
+ _ (DROP, "error message dropped")
+
+typedef enum
+{
+#define _(f,s) ICMP4_ERROR_##f,
+ foreach_icmp4_error
+#undef _
+} icmp4_error_t;
+
+typedef struct
+{
+ u8 packet_data[64];
+} icmp_input_trace_t;
+
+format_function_t format_icmp4_input_trace;
+void ip4_icmp_register_type (vlib_main_t * vm, icmp4_type_t type,
+ u32 node_index);
+void icmp4_error_set_vnet_buffer (vlib_buffer_t * b, u8 type, u8 code,
+ u32 data);
+
+#endif /* included_vnet_icmp4_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/ip/icmp46_packet.h b/src/vnet/ip/icmp46_packet.h
new file mode 100644
index 00000000000..a86cbd57bdb
--- /dev/null
+++ b/src/vnet/ip/icmp46_packet.h
@@ -0,0 +1,398 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * icmp46_packet.h: ip4/ip6 icmp packet format
+ *
+ * Copyright (c) 2008 Eliot Dresselhaus
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef included_vnet_icmp46_packet_h
+#define included_vnet_icmp46_packet_h
+
+#include <vnet/ethernet/packet.h>
+#include <vnet/ip/ip6_packet.h>
+
+#define foreach_icmp4_type \
+ _ (0, echo_reply) \
+ _ (3, destination_unreachable) \
+ _ (4, source_quench) \
+ _ (5, redirect) \
+ _ (6, alternate_host_address) \
+ _ (8, echo_request) \
+ _ (9, router_advertisement) \
+ _ (10, router_solicitation) \
+ _ (11, time_exceeded) \
+ _ (12, parameter_problem) \
+ _ (13, timestamp_request) \
+ _ (14, timestamp_reply) \
+ _ (15, information_request) \
+ _ (16, information_reply) \
+ _ (17, address_mask_request) \
+ _ (18, address_mask_reply) \
+ _ (30, traceroute) \
+ _ (31, datagram_conversion_error) \
+ _ (32, mobile_host_redirect) \
+ _ (33, ip6_where_are_you) \
+ _ (34, ip6_i_am_here) \
+ _ (35, mobile_registration_request) \
+ _ (36, mobile_registration_reply) \
+ _ (37, domain_name_request) \
+ _ (38, domain_name_reply) \
+ _ (39, skip) \
+ _ (40, photuris)
+
+#define icmp_no_code 0
+
+#define foreach_icmp4_code \
+ _ (destination_unreachable, 0, destination_unreachable_net) \
+ _ (destination_unreachable, 1, destination_unreachable_host) \
+ _ (destination_unreachable, 2, protocol_unreachable) \
+ _ (destination_unreachable, 3, port_unreachable) \
+ _ (destination_unreachable, 4, fragmentation_needed_and_dont_fragment_set) \
+ _ (destination_unreachable, 5, source_route_failed) \
+ _ (destination_unreachable, 6, destination_network_unknown) \
+ _ (destination_unreachable, 7, destination_host_unknown) \
+ _ (destination_unreachable, 8, source_host_isolated) \
+ _ (destination_unreachable, 9, network_administratively_prohibited) \
+ _ (destination_unreachable, 10, host_administratively_prohibited) \
+ _ (destination_unreachable, 11, network_unreachable_for_type_of_service) \
+ _ (destination_unreachable, 12, host_unreachable_for_type_of_service) \
+ _ (destination_unreachable, 13, communication_administratively_prohibited) \
+ _ (destination_unreachable, 14, host_precedence_violation) \
+ _ (destination_unreachable, 15, precedence_cutoff_in_effect) \
+ _ (redirect, 0, network_redirect) \
+ _ (redirect, 1, host_redirect) \
+ _ (redirect, 2, type_of_service_and_network_redirect) \
+ _ (redirect, 3, type_of_service_and_host_redirect) \
+ _ (router_advertisement, 0, normal_router_advertisement) \
+ _ (router_advertisement, 16, does_not_route_common_traffic) \
+ _ (time_exceeded, 0, ttl_exceeded_in_transit) \
+ _ (time_exceeded, 1, fragment_reassembly_time_exceeded) \
+ _ (parameter_problem, 0, pointer_indicates_error) \
+ _ (parameter_problem, 1, missing_required_option) \
+ _ (parameter_problem, 2, bad_length)
+
+/* ICMPv6 */
+#define foreach_icmp6_type \
+ _ (1, destination_unreachable) \
+ _ (2, packet_too_big) \
+ _ (3, time_exceeded) \
+ _ (4, parameter_problem) \
+ _ (128, echo_request) \
+ _ (129, echo_reply) \
+ _ (130, multicast_listener_request) \
+ _ (131, multicast_listener_report) \
+ _ (132, multicast_listener_done) \
+ _ (133, router_solicitation) \
+ _ (134, router_advertisement) \
+ _ (135, neighbor_solicitation) \
+ _ (136, neighbor_advertisement) \
+ _ (137, redirect) \
+ _ (138, router_renumbering) \
+ _ (139, node_information_request) \
+ _ (140, node_information_response) \
+ _ (141, inverse_neighbor_solicitation) \
+ _ (142, inverse_neighbor_advertisement) \
+ _ (143, multicast_listener_report_v2) \
+ _ (144, home_agent_address_discovery_request) \
+ _ (145, home_agent_address_discovery_reply) \
+ _ (146, mobile_prefix_solicitation) \
+ _ (147, mobile_prefix_advertisement) \
+ _ (148, certification_path_solicitation) \
+ _ (149, certification_path_advertisement) \
+ _ (151, multicast_router_advertisement) \
+ _ (152, multicast_router_solicitation) \
+ _ (153, multicast_router_termination) \
+ _ (154, fmipv6_messages)
+
+#define foreach_icmp6_code \
+ _ (destination_unreachable, 0, no_route_to_destination) \
+ _ (destination_unreachable, 1, destination_administratively_prohibited) \
+ _ (destination_unreachable, 2, beyond_scope_of_source_address) \
+ _ (destination_unreachable, 3, address_unreachable) \
+ _ (destination_unreachable, 4, port_unreachable) \
+ _ (destination_unreachable, 5, source_address_failed_policy) \
+ _ (destination_unreachable, 6, reject_route_to_destination) \
+ _ (time_exceeded, 0, ttl_exceeded_in_transit) \
+ _ (time_exceeded, 1, fragment_reassembly_time_exceeded) \
+ _ (parameter_problem, 0, erroneous_header_field) \
+ _ (parameter_problem, 1, unrecognized_next_header) \
+ _ (parameter_problem, 2, unrecognized_option) \
+ _ (router_renumbering, 0, command) \
+ _ (router_renumbering, 1, result) \
+ _ (node_information_request, 0, data_contains_ip6_address) \
+ _ (node_information_request, 1, data_contains_name) \
+ _ (node_information_request, 2, data_contains_ip4_address) \
+ _ (node_information_response, 0, success) \
+ _ (node_information_response, 1, failed) \
+ _ (node_information_response, 2, unknown_request)
+
+typedef enum
+{
+#define _(n,f) ICMP4_##f = n,
+ foreach_icmp4_type
+#undef _
+} icmp4_type_t;
+
+typedef enum
+{
+#define _(t,n,f) ICMP4_##t##_##f = n,
+ foreach_icmp4_code
+#undef _
+} icmp4_code_t;
+
+typedef enum
+{
+#define _(n,f) ICMP6_##f = n,
+ foreach_icmp6_type
+#undef _
+} icmp6_type_t;
+
+typedef enum
+{
+#define _(t,n,f) ICMP6_##t##_##f = n,
+ foreach_icmp6_code
+#undef _
+} icmp6_code_t;
+
+typedef CLIB_PACKED (struct
+ {
+ u8 type;
+ u8 code;
+ /* IP checksum of icmp header plus data which follows. */
+ u16 checksum;
+ }) icmp46_header_t;
+
+/* ip6 neighbor discovery */
+#define foreach_icmp6_neighbor_discovery_option \
+ _ (1, source_link_layer_address) \
+ _ (2, target_link_layer_address) \
+ _ (3, prefix_information) \
+ _ (4, redirected_header) \
+ _ (5, mtu) \
+ _ (6, nbma_shortcut_limit) \
+ _ (7, advertisement_interval) \
+ _ (8, home_agent_information) \
+ _ (9, source_address_list) \
+ _ (10, target_address_list) \
+ _ (11, cryptographically_generated_address) \
+ _ (12, rsa_signature) \
+ _ (13, timestamp) \
+ _ (14, nonce) \
+ _ (15, trust_anchor) \
+ _ (16, certificate) \
+ _ (17, ip_address_and_prefix) \
+ _ (18, new_router_prefix_information) \
+ _ (19, mobile_link_layer_address) \
+ _ (20, neighbor_advertisement_acknowledgment) \
+ _ (23, map) \
+ _ (24, route_information) \
+ _ (25, recursive_dns_server) \
+ _ (26, ra_flags_extension) \
+ _ (27, handover_key_request) \
+ _ (28, handover_key_reply) \
+ _ (29, handover_assist_information) \
+ _ (30, mobile_node_identifier) \
+ _ (31, dns_search_list) \
+ _ (138, card_request) \
+ _ (139, card_reply)
+
+typedef enum icmp6_neighbor_discovery_option_type
+{
+#define _(n,f) ICMP6_NEIGHBOR_DISCOVERY_OPTION_##f = n,
+ foreach_icmp6_neighbor_discovery_option
+#undef _
+} icmp6_neighbor_discovery_option_type_t;
+
+typedef CLIB_PACKED (struct
+ {
+ /* Option type. */
+ u8 type;
+ /* Length of this header plus option data in 8 byte units. */
+ u8 n_data_u64s;
+ /* Option data follows. */
+ u8 data[0];
+ }) icmp6_neighbor_discovery_option_header_t;
+
+typedef CLIB_PACKED (struct
+ {
+ icmp6_neighbor_discovery_option_header_t header;
+ u8 dst_address_length;
+ u8 flags;
+#define ICMP6_NEIGHBOR_DISCOVERY_PREFIX_INFORMATION_FLAG_ON_LINK (1 << 7)
+#define ICMP6_NEIGHBOR_DISCOVERY_PREFIX_INFORMATION_AUTO (1 << 6)
+ u32 valid_time;
+ u32 preferred_time;
+ u32 unused; ip6_address_t dst_address;
+ }) icmp6_neighbor_discovery_prefix_information_option_t;
+
+typedef CLIB_PACKED (struct
+ {
+ u8 type;
+ u8 aux_data_len_u32s;
+ u16 num_sources;
+ ip6_address_t mcast_addr; ip6_address_t source_addr[0];
+ }) icmp6_multicast_address_record_t;
+
+typedef CLIB_PACKED (struct
+ {
+ ip6_hop_by_hop_ext_t ext_hdr;
+ ip6_router_alert_option_t alert;
+ ip6_padN_option_t pad;
+ icmp46_header_t icmp;
+ u16 rsvd;
+ u16 num_addr_records;
+ icmp6_multicast_address_record_t records[0];
+ }) icmp6_multicast_listener_report_header_t;
+
+typedef CLIB_PACKED (struct
+ {
+ icmp6_neighbor_discovery_option_header_t header;
+ u8 reserved[6];
+ /* IP6 header plus payload follows. */
+ u8 data[0];
+ }) icmp6_neighbor_discovery_redirected_header_option_t;
+
+typedef CLIB_PACKED (struct
+ {
+ icmp6_neighbor_discovery_option_header_t header;
+ u16 unused; u32 mtu;
+ }) icmp6_neighbor_discovery_mtu_option_t;
+
+typedef CLIB_PACKED (struct
+ {
+ icmp6_neighbor_discovery_option_header_t header;
+ u8 ethernet_address[6];
+ })
+ icmp6_neighbor_discovery_ethernet_link_layer_address_option_t;
+
+typedef CLIB_PACKED (struct
+ {
+ icmp6_neighbor_discovery_option_header_t header;
+ u8 max_l2_address[6 + 8];
+ })
+ icmp6_neighbor_discovery_max_link_layer_address_option_t;
+
+/* Generic neighbor discover header. Used for router solicitations,
+ etc. */
+typedef CLIB_PACKED (struct
+ {
+ icmp46_header_t icmp; u32 reserved_must_be_zero;
+ }) icmp6_neighbor_discovery_header_t;
+
+/* Router advertisement packet formats. */
+typedef CLIB_PACKED (struct
+ {
+ icmp46_header_t icmp;
+ /* Current hop limit to use for outgoing packets. */
+ u8 current_hop_limit;
+ u8 flags;
+#define ICMP6_ROUTER_DISCOVERY_FLAG_ADDRESS_CONFIG_VIA_DHCP (1 << 7)
+#define ICMP6_ROUTER_DISCOVERY_FLAG_OTHER_CONFIG_VIA_DHCP (1 << 6)
+ /* Zero means unspecified. */
+ u16 router_lifetime_in_sec;
+ /* Zero means unspecified. */
+ u32 neighbor_reachable_time_in_msec;
+ /* Zero means unspecified. */
+ u32
+ time_in_msec_between_retransmitted_neighbor_solicitations;
+ /* Options that may follow: source_link_layer_address, mtu, prefix_information. */
+ }) icmp6_router_advertisement_header_t;
+
+/* Neighbor solicitation/advertisement header. */
+typedef CLIB_PACKED (struct
+ {
+ icmp46_header_t icmp;
+ /* Zero for solicitation; flags for advertisement. */
+ u32 advertisement_flags;
+ /* Set when sent by a router. */
+#define ICMP6_NEIGHBOR_ADVERTISEMENT_FLAG_ROUTER (1 << 31)
+ /* Set when response to solicitation. */
+#define ICMP6_NEIGHBOR_ADVERTISEMENT_FLAG_SOLICITED (1 << 30)
+#define ICMP6_NEIGHBOR_ADVERTISEMENT_FLAG_OVERRIDE (1 << 29)
+ ip6_address_t target_address;
+ /* Options that may follow: source_link_layer_address
+ (for solicitation) target_link_layer_address (for advertisement). */
+ }) icmp6_neighbor_solicitation_or_advertisement_header_t;
+
+typedef CLIB_PACKED (struct
+ {
+ icmp46_header_t icmp;
+ u32 reserved_must_be_zero;
+ /* Better next hop to use for given destination. */
+ ip6_address_t better_next_hop_address;
+ ip6_address_t dst_address;
+ /* Options that may follow: target_link_layer_address,
+ redirected_header. */
+ }) icmp6_redirect_header_t;
+
+/* Solicitation/advertisement packet format for ethernet. */
+typedef CLIB_PACKED (struct
+ {
+ ip6_header_t ip;
+ icmp6_neighbor_solicitation_or_advertisement_header_t
+ neighbor;
+ icmp6_neighbor_discovery_ethernet_link_layer_address_option_t
+ link_layer_option;
+ }) icmp6_neighbor_solicitation_header_t;
+
+/* Router solicitation packet format for ethernet. */
+typedef CLIB_PACKED (struct
+ {
+ ip6_header_t ip;
+ icmp6_neighbor_discovery_header_t neighbor;
+ icmp6_neighbor_discovery_ethernet_link_layer_address_option_t
+ link_layer_option;
+ }) icmp6_router_solicitation_header_t;
+
+/* router advertisement packet format for ethernet. */
+typedef CLIB_PACKED (struct
+ {
+ ip6_header_t ip;
+ icmp6_router_advertisement_header_t router;
+ icmp6_neighbor_discovery_ethernet_link_layer_address_option_t
+ link_layer_option;
+ icmp6_neighbor_discovery_mtu_option_t mtu_option;
+ icmp6_neighbor_discovery_prefix_information_option_t
+ prefix[0];
+ }) icmp6_router_advertisement_packet_t;
+
+/* multicast listener report packet format for ethernet. */
+typedef CLIB_PACKED (struct
+ {
+ ip6_header_t ip;
+ icmp6_multicast_listener_report_header_t report_hdr;
+ }) icmp6_multicast_listener_report_packet_t;
+
+#endif /* included_vnet_icmp46_packet_h */
diff --git a/src/vnet/ip/icmp6.c b/src/vnet/ip/icmp6.c
new file mode 100644
index 00000000000..70696d0c6cb
--- /dev/null
+++ b/src/vnet/ip/icmp6.c
@@ -0,0 +1,882 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * ip/icmp6.c: ip6 icmp
+ *
+ * Copyright (c) 2008 Eliot Dresselhaus
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <vlib/vlib.h>
+#include <vnet/ip/ip.h>
+#include <vnet/pg/pg.h>
+
+static u8 *
+format_ip6_icmp_type_and_code (u8 * s, va_list * args)
+{
+ icmp6_type_t type = va_arg (*args, int);
+ u8 code = va_arg (*args, int);
+ char *t = 0;
+
+#define _(n,f) case n: t = #f; break;
+
+ switch (type)
+ {
+ foreach_icmp6_type;
+
+ default:
+ break;
+ }
+
+#undef _
+
+ if (!t)
+ return format (s, "unknown 0x%x", type);
+
+ s = format (s, "%s", t);
+
+ t = 0;
+ switch ((type << 8) | code)
+ {
+#define _(a,n,f) case (ICMP6_##a << 8) | (n): t = #f; break;
+
+ foreach_icmp6_code;
+
+#undef _
+ }
+
+ if (t)
+ s = format (s, " %s", t);
+
+ return s;
+}
+
+static u8 *
+format_icmp6_header (u8 * s, va_list * args)
+{
+ icmp46_header_t *icmp = va_arg (*args, icmp46_header_t *);
+ u32 max_header_bytes = va_arg (*args, u32);
+
+ /* Nothing to do. */
+ if (max_header_bytes < sizeof (icmp[0]))
+ return format (s, "ICMP header truncated");
+
+ s = format (s, "ICMP %U checksum 0x%x",
+ format_ip6_icmp_type_and_code, icmp->type, icmp->code,
+ clib_net_to_host_u16 (icmp->checksum));
+
+ if (max_header_bytes >=
+ sizeof (icmp6_neighbor_solicitation_or_advertisement_header_t) &&
+ (icmp->type == ICMP6_neighbor_solicitation ||
+ icmp->type == ICMP6_neighbor_advertisement))
+ {
+ icmp6_neighbor_solicitation_or_advertisement_header_t *icmp6_nd =
+ (icmp6_neighbor_solicitation_or_advertisement_header_t *) icmp;
+ s = format (s, "\n target address %U",
+ format_ip6_address, &icmp6_nd->target_address);
+ }
+
+ return s;
+}
+
+u8 *
+format_icmp6_input_trace (u8 * s, va_list * va)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*va, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*va, vlib_node_t *);
+ icmp6_input_trace_t *t = va_arg (*va, icmp6_input_trace_t *);
+
+ s = format (s, "%U",
+ format_ip6_header, t->packet_data, sizeof (t->packet_data));
+
+ return s;
+}
+
+static char *icmp_error_strings[] = {
+#define _(f,s) s,
+ foreach_icmp6_error
+#undef _
+};
+
+typedef enum
+{
+ ICMP_INPUT_NEXT_DROP,
+ ICMP_INPUT_N_NEXT,
+} icmp_input_next_t;
+
+typedef struct
+{
+ uword *type_and_code_by_name;
+
+ uword *type_by_name;
+
+ /* Vector dispatch table indexed by [icmp type]. */
+ u8 input_next_index_by_type[256];
+
+ /* Max valid code indexed by icmp type. */
+ u8 max_valid_code_by_type[256];
+
+ /* hop_limit must be >= this value for this icmp type. */
+ u8 min_valid_hop_limit_by_type[256];
+
+ u8 min_valid_length_by_type[256];
+} icmp6_main_t;
+
+icmp6_main_t icmp6_main;
+
+static uword
+ip6_icmp_input (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ icmp6_main_t *im = &icmp6_main;
+ u32 *from, *to_next;
+ u32 n_left_from, n_left_to_next, next_index;
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+
+ if (node->flags & VLIB_NODE_FLAG_TRACE)
+ vlib_trace_frame_buffers_only (vm, node, from, frame->n_vectors,
+ /* stride */ 1,
+ sizeof (icmp6_input_trace_t));
+
+ while (n_left_from > 0)
+ {
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ vlib_buffer_t *b0;
+ ip6_header_t *ip0;
+ icmp46_header_t *icmp0;
+ icmp6_type_t type0;
+ u32 bi0, next0, error0, len0;
+
+ bi0 = to_next[0] = from[0];
+
+ from += 1;
+ n_left_from -= 1;
+ to_next += 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ ip0 = vlib_buffer_get_current (b0);
+ icmp0 = ip6_next_header (ip0);
+ type0 = icmp0->type;
+
+ error0 = ICMP6_ERROR_NONE;
+
+ next0 = im->input_next_index_by_type[type0];
+ error0 =
+ next0 == ICMP_INPUT_NEXT_DROP ? ICMP6_ERROR_UNKNOWN_TYPE : error0;
+
+ /* Check code is valid for type. */
+ error0 =
+ icmp0->code >
+ im->max_valid_code_by_type[type0] ?
+ ICMP6_ERROR_INVALID_CODE_FOR_TYPE : error0;
+
+ /* Checksum is already validated by ip6_local node so we don't need to check that. */
+
+ /* Check that hop limit == 255 for certain types. */
+ error0 =
+ ip0->hop_limit <
+ im->min_valid_hop_limit_by_type[type0] ?
+ ICMP6_ERROR_INVALID_HOP_LIMIT_FOR_TYPE : error0;
+
+ len0 = clib_net_to_host_u16 (ip0->payload_length);
+ error0 =
+ len0 <
+ im->min_valid_length_by_type[type0] ?
+ ICMP6_ERROR_LENGTH_TOO_SMALL_FOR_TYPE : error0;
+
+ b0->error = node->errors[error0];
+
+ next0 = error0 != ICMP6_ERROR_NONE ? ICMP_INPUT_NEXT_DROP : next0;
+
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, next0);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ return frame->n_vectors;
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (ip6_icmp_input_node) = {
+ .function = ip6_icmp_input,
+ .name = "ip6-icmp-input",
+
+ .vector_size = sizeof (u32),
+
+ .format_trace = format_icmp6_input_trace,
+
+ .n_errors = ARRAY_LEN (icmp_error_strings),
+ .error_strings = icmp_error_strings,
+
+ .n_next_nodes = 1,
+ .next_nodes = {
+ [ICMP_INPUT_NEXT_DROP] = "error-drop",
+ },
+};
+/* *INDENT-ON* */
+
+typedef enum
+{
+ ICMP6_ECHO_REQUEST_NEXT_LOOKUP,
+ ICMP6_ECHO_REQUEST_NEXT_OUTPUT,
+ ICMP6_ECHO_REQUEST_N_NEXT,
+} icmp6_echo_request_next_t;
+
+static uword
+ip6_icmp_echo_request (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ u32 *from, *to_next;
+ u32 n_left_from, n_left_to_next, next_index;
+ ip6_main_t *im = &ip6_main;
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+
+ if (node->flags & VLIB_NODE_FLAG_TRACE)
+ vlib_trace_frame_buffers_only (vm, node, from, frame->n_vectors,
+ /* stride */ 1,
+ sizeof (icmp6_input_trace_t));
+
+ while (n_left_from > 0)
+ {
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from > 2 && n_left_to_next > 2)
+ {
+ vlib_buffer_t *p0, *p1;
+ ip6_header_t *ip0, *ip1;
+ icmp46_header_t *icmp0, *icmp1;
+ ip6_address_t tmp0, tmp1;
+ ip_csum_t sum0, sum1;
+ u32 bi0, bi1;
+ u32 fib_index0, fib_index1;
+ u32 next0 = ICMP6_ECHO_REQUEST_NEXT_LOOKUP;
+ u32 next1 = ICMP6_ECHO_REQUEST_NEXT_LOOKUP;
+
+ bi0 = to_next[0] = from[0];
+ bi1 = to_next[1] = from[1];
+
+ from += 2;
+ n_left_from -= 2;
+ to_next += 2;
+ n_left_to_next -= 2;
+
+ p0 = vlib_get_buffer (vm, bi0);
+ p1 = vlib_get_buffer (vm, bi1);
+ ip0 = vlib_buffer_get_current (p0);
+ ip1 = vlib_buffer_get_current (p1);
+ icmp0 = ip6_next_header (ip0);
+ icmp1 = ip6_next_header (ip1);
+
+ /* Check icmp type to echo reply and update icmp checksum. */
+ sum0 = icmp0->checksum;
+ sum1 = icmp1->checksum;
+
+ ASSERT (icmp0->type == ICMP6_echo_request);
+ ASSERT (icmp1->type == ICMP6_echo_request);
+ sum0 = ip_csum_update (sum0, ICMP6_echo_request, ICMP6_echo_reply,
+ icmp46_header_t, type);
+ sum1 = ip_csum_update (sum1, ICMP6_echo_request, ICMP6_echo_reply,
+ icmp46_header_t, type);
+
+ icmp0->checksum = ip_csum_fold (sum0);
+ icmp1->checksum = ip_csum_fold (sum1);
+
+ icmp0->type = ICMP6_echo_reply;
+ icmp1->type = ICMP6_echo_reply;
+
+ /* Swap source and destination address. */
+ tmp0 = ip0->src_address;
+ tmp1 = ip1->src_address;
+
+ ip0->src_address = ip0->dst_address;
+ ip1->src_address = ip1->dst_address;
+
+ ip0->dst_address = tmp0;
+ ip1->dst_address = tmp1;
+
+ /* New hop count. */
+ ip0->hop_limit = im->host_config.ttl;
+ ip1->hop_limit = im->host_config.ttl;
+
+ if (ip6_address_is_link_local_unicast (&ip0->dst_address))
+ {
+ ethernet_header_t *eth0;
+ u8 tmp_mac[6];
+ /* For link local, reuse current MAC header by sawpping
+ * SMAC to DMAC instead of IP6 lookup since link local
+ * is not in the IP6 FIB */
+ vlib_buffer_reset (p0);
+ eth0 = vlib_buffer_get_current (p0);
+ clib_memcpy (tmp_mac, eth0->dst_address, 6);
+ clib_memcpy (eth0->dst_address, eth0->src_address, 6);
+ clib_memcpy (eth0->src_address, tmp_mac, 6);
+ vnet_buffer (p0)->sw_if_index[VLIB_TX] =
+ vnet_buffer (p0)->sw_if_index[VLIB_RX];
+ next0 = ICMP6_ECHO_REQUEST_NEXT_OUTPUT;
+ }
+ else
+ {
+ /* Determine the correct lookup fib indices... */
+ fib_index0 = vec_elt (im->fib_index_by_sw_if_index,
+ vnet_buffer (p0)->sw_if_index[VLIB_RX]);
+ vnet_buffer (p0)->sw_if_index[VLIB_TX] = fib_index0;
+ }
+
+ if (ip6_address_is_link_local_unicast (&ip1->dst_address))
+ {
+ ethernet_header_t *eth1;
+ u8 tmp_mac[6];
+ /* For link local, reuse current MAC header by sawpping
+ * SMAC to DMAC instead of IP6 lookup since link local
+ * is not in the IP6 FIB */
+ vlib_buffer_reset (p1);
+ eth1 = vlib_buffer_get_current (p1);
+ clib_memcpy (tmp_mac, eth1->dst_address, 6);
+ clib_memcpy (eth1->dst_address, eth1->src_address, 6);
+ clib_memcpy (eth1->src_address, tmp_mac, 6);
+ vnet_buffer (p1)->sw_if_index[VLIB_TX] =
+ vnet_buffer (p1)->sw_if_index[VLIB_RX];
+ next1 = ICMP6_ECHO_REQUEST_NEXT_OUTPUT;
+ }
+ else
+ {
+ /* Determine the correct lookup fib indices... */
+ fib_index1 = vec_elt (im->fib_index_by_sw_if_index,
+ vnet_buffer (p1)->sw_if_index[VLIB_RX]);
+ vnet_buffer (p1)->sw_if_index[VLIB_TX] = fib_index1;
+ }
+
+ vnet_buffer (p0)->sw_if_index[VLIB_RX]
+ = vnet_main.local_interface_sw_if_index;
+ vnet_buffer (p1)->sw_if_index[VLIB_RX]
+ = vnet_main.local_interface_sw_if_index;
+
+ /* verify speculative enqueues, maybe switch current next frame */
+ /* if next0==next1==next_index then nothing special needs to be done */
+ vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, bi1, next0, next1);
+ }
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ vlib_buffer_t *p0;
+ ip6_header_t *ip0;
+ icmp46_header_t *icmp0;
+ u32 bi0;
+ ip6_address_t tmp0;
+ ip_csum_t sum0;
+ u32 fib_index0;
+ u32 next0 = ICMP6_ECHO_REQUEST_NEXT_LOOKUP;
+
+ bi0 = to_next[0] = from[0];
+
+ from += 1;
+ n_left_from -= 1;
+ to_next += 1;
+ n_left_to_next -= 1;
+
+ p0 = vlib_get_buffer (vm, bi0);
+ ip0 = vlib_buffer_get_current (p0);
+ icmp0 = ip6_next_header (ip0);
+
+ /* Check icmp type to echo reply and update icmp checksum. */
+ sum0 = icmp0->checksum;
+
+ ASSERT (icmp0->type == ICMP6_echo_request);
+ sum0 = ip_csum_update (sum0, ICMP6_echo_request, ICMP6_echo_reply,
+ icmp46_header_t, type);
+
+ icmp0->checksum = ip_csum_fold (sum0);
+
+ icmp0->type = ICMP6_echo_reply;
+
+ /* Swap source and destination address. */
+ tmp0 = ip0->src_address;
+ ip0->src_address = ip0->dst_address;
+ ip0->dst_address = tmp0;
+
+ ip0->hop_limit = im->host_config.ttl;
+
+ if (ip6_address_is_link_local_unicast (&ip0->dst_address))
+ {
+ ethernet_header_t *eth0;
+ u8 tmp_mac[6];
+ /* For link local, reuse current MAC header by sawpping
+ * SMAC to DMAC instead of IP6 lookup since link local
+ * is not in the IP6 FIB */
+ vlib_buffer_reset (p0);
+ eth0 = vlib_buffer_get_current (p0);
+ clib_memcpy (tmp_mac, eth0->dst_address, 6);
+ clib_memcpy (eth0->dst_address, eth0->src_address, 6);
+ clib_memcpy (eth0->src_address, tmp_mac, 6);
+ vnet_buffer (p0)->sw_if_index[VLIB_TX] =
+ vnet_buffer (p0)->sw_if_index[VLIB_RX];
+ next0 = ICMP6_ECHO_REQUEST_NEXT_OUTPUT;
+ }
+ else
+ {
+ fib_index0 = vec_elt (im->fib_index_by_sw_if_index,
+ vnet_buffer (p0)->sw_if_index[VLIB_RX]);
+ vnet_buffer (p0)->sw_if_index[VLIB_TX] = fib_index0;
+ }
+ vnet_buffer (p0)->sw_if_index[VLIB_RX]
+ = vnet_main.local_interface_sw_if_index;
+
+ /* Verify speculative enqueue, maybe switch current next frame */
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, next0);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ vlib_error_count (vm, ip6_icmp_input_node.index,
+ ICMP6_ERROR_ECHO_REPLIES_SENT, frame->n_vectors);
+
+ return frame->n_vectors;
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (ip6_icmp_echo_request_node,static) = {
+ .function = ip6_icmp_echo_request,
+ .name = "ip6-icmp-echo-request",
+
+ .vector_size = sizeof (u32),
+
+ .format_trace = format_icmp6_input_trace,
+
+ .n_next_nodes = ICMP6_ECHO_REQUEST_N_NEXT,
+ .next_nodes = {
+ [ICMP6_ECHO_REQUEST_NEXT_LOOKUP] = "ip6-lookup",
+ [ICMP6_ECHO_REQUEST_NEXT_OUTPUT] = "interface-output",
+ },
+};
+/* *INDENT-ON* */
+
+typedef enum
+{
+ IP6_ICMP_ERROR_NEXT_DROP,
+ IP6_ICMP_ERROR_NEXT_LOOKUP,
+ IP6_ICMP_ERROR_N_NEXT,
+} ip6_icmp_error_next_t;
+
+void
+icmp6_error_set_vnet_buffer (vlib_buffer_t * b, u8 type, u8 code, u32 data)
+{
+ vnet_buffer (b)->ip.icmp.type = type;
+ vnet_buffer (b)->ip.icmp.code = code;
+ vnet_buffer (b)->ip.icmp.data = data;
+}
+
+static u8
+icmp6_icmp_type_to_error (u8 type)
+{
+ switch (type)
+ {
+ case ICMP6_destination_unreachable:
+ return ICMP6_ERROR_DEST_UNREACH_SENT;
+ case ICMP6_packet_too_big:
+ return ICMP6_ERROR_PACKET_TOO_BIG_SENT;
+ case ICMP6_time_exceeded:
+ return ICMP6_ERROR_TTL_EXPIRE_SENT;
+ case ICMP6_parameter_problem:
+ return ICMP6_ERROR_PARAM_PROBLEM_SENT;
+ default:
+ return ICMP6_ERROR_DROP;
+ }
+}
+
+static uword
+ip6_icmp_error (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ u32 *from, *to_next;
+ uword n_left_from, n_left_to_next;
+ ip6_icmp_error_next_t next_index;
+ ip6_main_t *im = &ip6_main;
+ ip_lookup_main_t *lm = &im->lookup_main;
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+
+ if (node->flags & VLIB_NODE_FLAG_TRACE)
+ vlib_trace_frame_buffers_only (vm, node, from, frame->n_vectors,
+ /* stride */ 1,
+ sizeof (icmp6_input_trace_t));
+
+ while (n_left_from > 0)
+ {
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 pi0 = from[0];
+ u32 next0 = IP6_ICMP_ERROR_NEXT_LOOKUP;
+ u8 error0 = ICMP6_ERROR_NONE;
+ vlib_buffer_t *p0;
+ ip6_header_t *ip0, *out_ip0;
+ icmp46_header_t *icmp0;
+ u32 sw_if_index0, if_add_index0;
+ int bogus_length;
+
+ /* Speculatively enqueue p0 to the current next frame */
+ to_next[0] = pi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ p0 = vlib_get_buffer (vm, pi0);
+ ip0 = vlib_buffer_get_current (p0);
+ sw_if_index0 = vnet_buffer (p0)->sw_if_index[VLIB_RX];
+
+ /* RFC4443 says to keep as much of the original packet as possible
+ * within the minimum MTU. We cheat "a little" here by keeping whatever fits
+ * in the first buffer, to be more efficient */
+ if (PREDICT_FALSE (p0->total_length_not_including_first_buffer))
+ { /* clear current_length of all other buffers in chain */
+ vlib_buffer_t *b = p0;
+ p0->total_length_not_including_first_buffer = 0;
+ while (b->flags & VLIB_BUFFER_NEXT_PRESENT)
+ {
+ b = vlib_get_buffer (vm, b->next_buffer);
+ b->current_length = 0;
+ }
+ }
+ p0->current_length =
+ p0->current_length > 1280 ? 1280 : p0->current_length;
+
+ /* Add IP header and ICMPv6 header including a 4 byte data field */
+ vlib_buffer_advance (p0,
+ -sizeof (ip6_header_t) -
+ sizeof (icmp46_header_t) - 4);
+ out_ip0 = vlib_buffer_get_current (p0);
+ icmp0 = (icmp46_header_t *) & out_ip0[1];
+
+ /* Fill ip header fields */
+ out_ip0->ip_version_traffic_class_and_flow_label =
+ clib_host_to_net_u32 (0x6 << 28);
+
+ out_ip0->payload_length =
+ clib_host_to_net_u16 (p0->current_length - sizeof (ip6_header_t));
+ out_ip0->protocol = IP_PROTOCOL_ICMP6;
+ out_ip0->hop_limit = 0xff;
+ out_ip0->dst_address = ip0->src_address;
+ if_add_index0 =
+ lm->if_address_pool_index_by_sw_if_index[sw_if_index0];
+ if (PREDICT_TRUE (if_add_index0 != ~0))
+ {
+ ip_interface_address_t *if_add =
+ pool_elt_at_index (lm->if_address_pool, if_add_index0);
+ ip6_address_t *if_ip =
+ ip_interface_address_get_address (lm, if_add);
+ out_ip0->src_address = *if_ip;
+ }
+ else /* interface has no IP6 address - should not happen */
+ {
+ next0 = IP6_ICMP_ERROR_NEXT_DROP;
+ error0 = ICMP6_ERROR_DROP;
+ }
+
+ /* Fill icmp header fields */
+ icmp0->type = vnet_buffer (p0)->ip.icmp.type;
+ icmp0->code = vnet_buffer (p0)->ip.icmp.code;
+ *((u32 *) (icmp0 + 1)) =
+ clib_host_to_net_u32 (vnet_buffer (p0)->ip.icmp.data);
+ icmp0->checksum = 0;
+ icmp0->checksum =
+ ip6_tcp_udp_icmp_compute_checksum (vm, p0, out_ip0,
+ &bogus_length);
+
+
+
+ /* Update error status */
+ if (error0 == ICMP6_ERROR_NONE)
+ error0 = icmp6_icmp_type_to_error (icmp0->type);
+ vlib_error_count (vm, node->node_index, error0, 1);
+
+ /* Verify speculative enqueue, maybe switch current next frame */
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ pi0, next0);
+ }
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ return frame->n_vectors;
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (ip6_icmp_error_node) = {
+ .function = ip6_icmp_error,
+ .name = "ip6-icmp-error",
+ .vector_size = sizeof (u32),
+
+ .n_errors = ARRAY_LEN (icmp_error_strings),
+ .error_strings = icmp_error_strings,
+
+ .n_next_nodes = IP6_ICMP_ERROR_N_NEXT,
+ .next_nodes = {
+ [IP6_ICMP_ERROR_NEXT_DROP] = "error-drop",
+ [IP6_ICMP_ERROR_NEXT_LOOKUP] = "ip6-lookup",
+ },
+
+ .format_trace = format_icmp6_input_trace,
+};
+/* *INDENT-ON* */
+
+
+static uword
+unformat_icmp_type_and_code (unformat_input_t * input, va_list * args)
+{
+ icmp46_header_t *h = va_arg (*args, icmp46_header_t *);
+ icmp6_main_t *cm = &icmp6_main;
+ u32 i;
+
+ if (unformat_user (input, unformat_vlib_number_by_name,
+ cm->type_and_code_by_name, &i))
+ {
+ h->type = (i >> 8) & 0xff;
+ h->code = (i >> 0) & 0xff;
+ }
+ else if (unformat_user (input, unformat_vlib_number_by_name,
+ cm->type_by_name, &i))
+ {
+ h->type = i;
+ h->code = 0;
+ }
+ else
+ return 0;
+
+ return 1;
+}
+
+static void
+icmp6_pg_edit_function (pg_main_t * pg,
+ pg_stream_t * s,
+ pg_edit_group_t * g, u32 * packets, u32 n_packets)
+{
+ vlib_main_t *vm = vlib_get_main ();
+ u32 ip_offset, icmp_offset;
+ int bogus_length;
+
+ icmp_offset = g->start_byte_offset;
+ ip_offset = (g - 1)->start_byte_offset;
+
+ while (n_packets >= 1)
+ {
+ vlib_buffer_t *p0;
+ ip6_header_t *ip0;
+ icmp46_header_t *icmp0;
+
+ p0 = vlib_get_buffer (vm, packets[0]);
+ n_packets -= 1;
+ packets += 1;
+
+ ASSERT (p0->current_data == 0);
+ ip0 = (void *) (p0->data + ip_offset);
+ icmp0 = (void *) (p0->data + icmp_offset);
+
+ icmp0->checksum = ip6_tcp_udp_icmp_compute_checksum (vm, p0, ip0,
+ &bogus_length);
+ ASSERT (bogus_length == 0);
+ }
+}
+
+typedef struct
+{
+ pg_edit_t type, code;
+ pg_edit_t checksum;
+} pg_icmp46_header_t;
+
+always_inline void
+pg_icmp_header_init (pg_icmp46_header_t * p)
+{
+ /* Initialize fields that are not bit fields in the IP header. */
+#define _(f) pg_edit_init (&p->f, icmp46_header_t, f);
+ _(type);
+ _(code);
+ _(checksum);
+#undef _
+}
+
+static uword
+unformat_pg_icmp_header (unformat_input_t * input, va_list * args)
+{
+ pg_stream_t *s = va_arg (*args, pg_stream_t *);
+ pg_icmp46_header_t *p;
+ u32 group_index;
+
+ p = pg_create_edit_group (s, sizeof (p[0]), sizeof (icmp46_header_t),
+ &group_index);
+ pg_icmp_header_init (p);
+
+ p->checksum.type = PG_EDIT_UNSPECIFIED;
+
+ {
+ icmp46_header_t tmp;
+
+ if (!unformat (input, "ICMP %U", unformat_icmp_type_and_code, &tmp))
+ goto error;
+
+ pg_edit_set_fixed (&p->type, tmp.type);
+ pg_edit_set_fixed (&p->code, tmp.code);
+ }
+
+ /* Parse options. */
+ while (1)
+ {
+ if (unformat (input, "checksum %U",
+ unformat_pg_edit, unformat_pg_number, &p->checksum))
+ ;
+
+ /* Can't parse input: try next protocol level. */
+ else
+ break;
+ }
+
+ if (!unformat_user (input, unformat_pg_payload, s))
+ goto error;
+
+ if (p->checksum.type == PG_EDIT_UNSPECIFIED)
+ {
+ pg_edit_group_t *g = pg_stream_get_group (s, group_index);
+ g->edit_function = icmp6_pg_edit_function;
+ g->edit_function_opaque = 0;
+ }
+
+ return 1;
+
+error:
+ /* Free up any edits we may have added. */
+ pg_free_edit_group (s);
+ return 0;
+}
+
+void
+icmp6_register_type (vlib_main_t * vm, icmp6_type_t type, u32 node_index)
+{
+ icmp6_main_t *im = &icmp6_main;
+
+ ASSERT ((int) type < ARRAY_LEN (im->input_next_index_by_type));
+ im->input_next_index_by_type[type]
+ = vlib_node_add_next (vm, ip6_icmp_input_node.index, node_index);
+}
+
+static clib_error_t *
+icmp6_init (vlib_main_t * vm)
+{
+ ip_main_t *im = &ip_main;
+ ip_protocol_info_t *pi;
+ icmp6_main_t *cm = &icmp6_main;
+ clib_error_t *error;
+
+ error = vlib_call_init_function (vm, ip_main_init);
+
+ if (error)
+ return error;
+
+ pi = ip_get_protocol_info (im, IP_PROTOCOL_ICMP6);
+ pi->format_header = format_icmp6_header;
+ pi->unformat_pg_edit = unformat_pg_icmp_header;
+
+ cm->type_by_name = hash_create_string (0, sizeof (uword));
+#define _(n,t) hash_set_mem (cm->type_by_name, #t, (n));
+ foreach_icmp6_type;
+#undef _
+
+ cm->type_and_code_by_name = hash_create_string (0, sizeof (uword));
+#define _(a,n,t) hash_set_mem (cm->type_by_name, #t, (n) | (ICMP6_##a << 8));
+ foreach_icmp6_code;
+#undef _
+
+ memset (cm->input_next_index_by_type,
+ ICMP_INPUT_NEXT_DROP, sizeof (cm->input_next_index_by_type));
+ memset (cm->max_valid_code_by_type, 0, sizeof (cm->max_valid_code_by_type));
+
+#define _(a,n,t) cm->max_valid_code_by_type[ICMP6_##a] = clib_max (cm->max_valid_code_by_type[ICMP6_##a], n);
+ foreach_icmp6_code;
+#undef _
+
+ memset (cm->min_valid_hop_limit_by_type, 0,
+ sizeof (cm->min_valid_hop_limit_by_type));
+ cm->min_valid_hop_limit_by_type[ICMP6_router_solicitation] = 255;
+ cm->min_valid_hop_limit_by_type[ICMP6_router_advertisement] = 255;
+ cm->min_valid_hop_limit_by_type[ICMP6_neighbor_solicitation] = 255;
+ cm->min_valid_hop_limit_by_type[ICMP6_neighbor_advertisement] = 255;
+ cm->min_valid_hop_limit_by_type[ICMP6_redirect] = 255;
+
+ memset (cm->min_valid_length_by_type, sizeof (icmp46_header_t),
+ sizeof (cm->min_valid_length_by_type));
+ cm->min_valid_length_by_type[ICMP6_router_solicitation] =
+ sizeof (icmp6_neighbor_discovery_header_t);
+ cm->min_valid_length_by_type[ICMP6_router_advertisement] =
+ sizeof (icmp6_router_advertisement_header_t);
+ cm->min_valid_length_by_type[ICMP6_neighbor_solicitation] =
+ sizeof (icmp6_neighbor_solicitation_or_advertisement_header_t);
+ cm->min_valid_length_by_type[ICMP6_neighbor_advertisement] =
+ sizeof (icmp6_neighbor_solicitation_or_advertisement_header_t);
+ cm->min_valid_length_by_type[ICMP6_redirect] =
+ sizeof (icmp6_redirect_header_t);
+
+ icmp6_register_type (vm, ICMP6_echo_request,
+ ip6_icmp_echo_request_node.index);
+
+ return vlib_call_init_function (vm, ip6_neighbor_init);
+}
+
+VLIB_INIT_FUNCTION (icmp6_init);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/ip/icmp6.h b/src/vnet/ip/icmp6.h
new file mode 100644
index 00000000000..a426512ea2f
--- /dev/null
+++ b/src/vnet/ip/icmp6.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef included_vnet_icmp6_h
+#define included_vnet_icmp6_h
+
+#define foreach_icmp6_error \
+ _ (NONE, "valid packets") \
+ _ (UNKNOWN_TYPE, "unknown type") \
+ _ (INVALID_CODE_FOR_TYPE, "invalid code for type") \
+ _ (INVALID_HOP_LIMIT_FOR_TYPE, "hop_limit != 255") \
+ _ (LENGTH_TOO_SMALL_FOR_TYPE, "payload length too small for type") \
+ _ (OPTIONS_WITH_ODD_LENGTH, \
+ "total option length not multiple of 8 bytes") \
+ _ (OPTION_WITH_ZERO_LENGTH, "option has zero length") \
+ _ (ECHO_REPLIES_SENT, "echo replies sent") \
+ _ (NEIGHBOR_SOLICITATION_SOURCE_NOT_ON_LINK, \
+ "neighbor solicitations from source not on link") \
+ _ (NEIGHBOR_SOLICITATION_SOURCE_UNKNOWN, \
+ "neighbor solicitations for unknown targets") \
+ _ (NEIGHBOR_ADVERTISEMENTS_TX, "neighbor advertisements sent") \
+ _ (NEIGHBOR_ADVERTISEMENTS_RX, "neighbor advertisements received") \
+ _ (ROUTER_SOLICITATION_SOURCE_NOT_ON_LINK, \
+ "router solicitations from source not on link") \
+ _ (ROUTER_SOLICITATION_UNSUPPORTED_INTF, \
+ "neighbor discovery unsupported interface") \
+ _ (ROUTER_SOLICITATION_RADV_NOT_CONFIG, \
+ "neighbor discovery not configured") \
+ _ (ROUTER_SOLICITATION_DEST_UNKNOWN, \
+ "router solicitations for unknown destination") \
+ _ (ROUTER_SOLICITATION_SOURCE_UNKNOWN, \
+ "router solicitations for unknown source") \
+ _ (ROUTER_ADVERTISEMENT_SOURCE_NOT_LINK_LOCAL, \
+ "router advertisement source not link local") \
+ _ (ROUTER_ADVERTISEMENTS_TX, "router advertisements sent") \
+ _ (ROUTER_ADVERTISEMENTS_RX, "router advertisements received") \
+ _ (DST_LOOKUP_MISS, "icmp6 dst address lookup misses") \
+ _ (DEST_UNREACH_SENT, "destination unreachable response sent") \
+ _ (PACKET_TOO_BIG_SENT, "packet too big response sent") \
+ _ (TTL_EXPIRE_SENT, "hop limit exceeded response sent") \
+ _ (PARAM_PROBLEM_SENT, "parameter Pproblem response sent") \
+ _ (DROP, "error message dropped")
+
+
+typedef enum
+{
+#define _(f,s) ICMP6_ERROR_##f,
+ foreach_icmp6_error
+#undef _
+} icmp6_error_t;
+
+typedef struct
+{
+ u8 packet_data[64];
+} icmp6_input_trace_t;
+
+format_function_t format_icmp6_input_trace;
+void icmp6_register_type (vlib_main_t * vm, icmp6_type_t type,
+ u32 node_index);
+void icmp6_error_set_vnet_buffer (vlib_buffer_t * b, u8 type, u8 code,
+ u32 data);
+
+extern vlib_node_registration_t ip6_icmp_input_node;
+
+#endif /* included_vnet_icmp6_h */
+
+
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/ip/igmp_packet.h b/src/vnet/ip/igmp_packet.h
new file mode 100644
index 00000000000..503259ece7c
--- /dev/null
+++ b/src/vnet/ip/igmp_packet.h
@@ -0,0 +1,155 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * igmp_packet.h: igmp packet format
+ *
+ * Copyright (c) 2011 Eliot Dresselhaus
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef included_vnet_igmp_packet_h
+#define included_vnet_igmp_packet_h
+
+#include <vnet/ip/ip4_packet.h>
+#include <vnet/ip/ip6_packet.h>
+
+#define foreach_igmp_type \
+ _ (0x11, membership_query) \
+ _ (0x12, membership_report_v1) \
+ _ (0x13, dvmrp) \
+ _ (0x14, pim_v1) \
+ _ (0x15, cisco_trace) \
+ _ (0x16, membership_report_v2) \
+ _ (0x17, leave_group_v2) \
+ _ (0x1e, traceroute_response) \
+ _ (0x1f, traceroute_request) \
+ _ (0x22, membership_report_v3) \
+ _ (0x30, router_advertisement) \
+ _ (0x31, router_solicitation) \
+ _ (0x32, router_termination)
+
+typedef enum
+{
+#define _(n,f) IGMP_TYPE_##f = n,
+ foreach_igmp_type
+#undef _
+} igmp_type_t;
+
+typedef struct
+{
+ igmp_type_t type:8;
+
+ u8 code;
+
+ u16 checksum;
+} igmp_header_t;
+
+typedef struct
+{
+ /* membership_query, version <= 2 reports. */
+ igmp_header_t header;
+
+ /* Multicast destination address. */
+ ip4_address_t dst;
+} igmp_message_t;
+
+#define foreach_igmp_membership_group_v3_type \
+ _ (1, mode_is_filter_include) \
+ _ (2, mode_is_filter_exclude) \
+ _ (3, change_to_filter_include) \
+ _ (4, change_to_filter_exclude) \
+ _ (5, allow_new_sources) \
+ _ (6, block_old_sources)
+
+typedef enum
+{
+#define _(n,f) IGMP_MEMBERSHIP_GROUP_##f = n,
+ foreach_igmp_membership_group_v3_type
+#undef _
+} igmp_membership_group_v3_type_t;
+
+typedef struct
+{
+ igmp_membership_group_v3_type_t type:8;
+
+ /* Number of 32 bit words of aux data after source addresses. */
+ u8 n_aux_u32s;
+
+ /* Number of source addresses that follow. */
+ u16 n_src_addresses;
+
+ /* Destination multicast address. */
+ ip4_address_t dst_address;
+
+ ip4_address_t src_addresses[0];
+} igmp_membership_group_v3_t;
+
+always_inline igmp_membership_group_v3_t *
+igmp_membership_group_v3_next (igmp_membership_group_v3_t * g)
+{
+ return ((void *) g
+ + g->n_src_addresses * sizeof (g->src_addresses[0])
+ + g->n_aux_u32s * sizeof (u32));
+}
+
+typedef struct
+{
+ /* Type 0x22. */
+ igmp_header_t header;
+
+ u16 unused;
+
+ /* Number of groups which follow. */
+ u16 n_groups;
+
+ igmp_membership_group_v3_t groups[0];
+} igmp_membership_report_v3_t;
+
+/* IP6 flavor of IGMP is called MLD which is embedded in ICMP6. */
+typedef struct
+{
+ /* Preceeded by ICMP v6 header. */
+ u16 max_response_delay_in_milliseconds;
+ u16 reserved;
+ ip6_address_t dst;
+} mld_header_t;
+
+#endif /* included_vnet_igmp_packet_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/ip/ip.api b/src/vnet/ip/ip.api
new file mode 100644
index 00000000000..c811e465ea9
--- /dev/null
+++ b/src/vnet/ip/ip.api
@@ -0,0 +1,434 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/** \file
+
+ This file defines vpp IP control-plane API messages which are generally
+ called through a shared memory interface.
+*/
+
+/** \brief Dump IP fib table
+ @param client_index - opaque cookie to identify the sender
+*/
+define ip_fib_dump
+{
+ u32 client_index;
+ u32 context;
+};
+
+/** \brief FIB path
+ @param sw_if_index - index of the interface
+ @param weight - The weight, for UCMP
+ @param is_local - local if non-zero, else remote
+ @param is_drop - Drop the packet
+ @param is_unreach - Drop the packet and rate limit send ICMP unreachable
+ @param is_prohibit - Drop the packet and rate limit send ICMP prohibited
+ @param afi - the afi of the next hop, IP46_TYPE_IP4=1, IP46_TYPE_IP6=2
+ @param next_hop[16] - the next hop address
+
+ WARNING: this type is replicated, pending cleanup completion
+*/
+typeonly manual_print manual_endian define fib_path
+{
+ u32 sw_if_index;
+ u32 weight;
+ u8 is_local;
+ u8 is_drop;
+ u8 is_unreach;
+ u8 is_prohibit;
+ u8 afi;
+ u8 next_hop[16];
+};
+
+/** \brief IP FIB table response
+ @param table_id - IP fib table id
+ @address_length - mask length
+ @address - ip4 prefix
+ @param count - the number of fib_path in path
+ @param path - array of of fib_path structures
+*/
+manual_endian manual_print define ip_fib_details
+{
+ u32 context;
+ u32 table_id;
+ u8 address_length;
+ u8 address[4];
+ u32 count;
+ vl_api_fib_path_t path[count];
+};
+
+/** \brief Dump IP6 fib table
+ @param client_index - opaque cookie to identify the sender
+*/
+define ip6_fib_dump
+{
+ u32 client_index;
+ u32 context;
+};
+
+/** \brief IP6 FIB table response
+ @param table_id - IP6 fib table id
+ @address_length - mask length
+ @address - ip6 prefix
+ @param count - the number of fib_path in path
+ @param path - array of of fib_path structures
+*/
+manual_endian manual_print define ip6_fib_details
+{
+ u32 context;
+ u32 table_id;
+ u8 address_length;
+ u8 address[16];
+ u32 count;
+ vl_api_fib_path_t path[count];
+};
+
+/** \brief Dump IP neighboors
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param sw_if_index - the interface to dump neighboors
+ @param is_ipv6 - [1|0] to indicate if address family is ipv[6|4]
+*/
+define ip_neighbor_dump
+{
+ u32 client_index;
+ u32 context;
+ u32 sw_if_index;
+ u8 is_ipv6;
+};
+
+/** \brief IP neighboors dump response
+ @param context - sender context which was passed in the request
+ @param is_static - [1|0] to indicate if neighbor is statically configured
+ @param is_ipv6 - [1|0] to indicate if address family is ipv[6|4]
+*/
+define ip_neighbor_details {
+ u32 context;
+ u32 is_static;
+ u8 is_ipv6;
+ u8 mac_address[6];
+ u8 ip_address[16];
+};
+
+/** \brief IP neighbor add / del request
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param vrf_id - vrf_id, only for IP4
+ @param sw_if_index - interface used to reach neighbor
+ @param is_add - 1 to add neighbor, 0 to delete
+ @param is_ipv6 - 1 for IPv6 neighbor, 0 for IPv4
+ @param is_static -
+ @param mac_address - l2 address of the neighbor
+ @param dst_address - ip4 or ip6 address of the neighbor
+*/
+define ip_neighbor_add_del
+{
+ u32 client_index;
+ u32 context;
+ u32 vrf_id; /* only makes sense for ip4 */
+ u32 sw_if_index;
+ /* 1 = add, 0 = delete */
+ u8 is_add;
+ u8 is_ipv6;
+ u8 is_static;
+ u8 mac_address[6];
+ u8 dst_address[16];
+};
+
+/** \brief Reply for IP Neighbor add / delete request
+ @param context - returned sender context, to match reply w/ request
+ @param retval - return code
+*/
+define ip_neighbor_add_del_reply
+{
+ u32 context;
+ i32 retval;
+};
+
+/** \brief Set the ip flow hash config for a fib request
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param vrf_id - vrf/fib id
+ @param is_ipv6 - if non-zero the fib is ip6, else ip4
+ @param src - if non-zero include src in flow hash
+ @param dst - if non-zero include dst in flow hash
+ @param sport - if non-zero include sport in flow hash
+ @param dport - if non-zero include dport in flow hash
+ @param proto -if non-zero include proto in flow hash
+ @param reverse - if non-zero include reverse in flow hash
+*/
+define set_ip_flow_hash
+{
+ u32 client_index;
+ u32 context;
+ u32 vrf_id;
+ u8 is_ipv6;
+ u8 src;
+ u8 dst;
+ u8 sport;
+ u8 dport;
+ u8 proto;
+ u8 reverse;
+};
+
+/** \brief Set the ip flow hash config for a fib response
+ @param context - sender context, to match reply w/ request
+ @param retval - return code for the request
+*/
+define set_ip_flow_hash_reply
+{
+ u32 context;
+ i32 retval;
+};
+
+/** \brief IPv6 router advertisement config request
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param suppress -
+ @param managed -
+ @param other -
+ @param ll_option -
+ @param send_unicast -
+ @param cease -
+ @param is_no -
+ @param default_router -
+ @param max_interval -
+ @param min_interval -
+ @param lifetime -
+ @param initial_count -
+ @param initial_interval -
+*/
+define sw_interface_ip6nd_ra_config
+{
+ u32 client_index;
+ u32 context;
+ u32 sw_if_index;
+ u8 suppress;
+ u8 managed;
+ u8 other;
+ u8 ll_option;
+ u8 send_unicast;
+ u8 cease;
+ u8 is_no;
+ u8 default_router;
+ u32 max_interval;
+ u32 min_interval;
+ u32 lifetime;
+ u32 initial_count;
+ u32 initial_interval;
+};
+
+/** \brief IPv6 router advertisement config response
+ @param context - sender context, to match reply w/ request
+ @param retval - return code for the request
+*/
+define sw_interface_ip6nd_ra_config_reply
+{
+ u32 context;
+ i32 retval;
+};
+
+/** \brief IPv6 router advertisement prefix config request
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param sw_if_index -
+ @param address[] -
+ @param address_length -
+ @param use_default -
+ @param no_advertise -
+ @param off_link -
+ @param no_autoconfig -
+ @param no_onlink -
+ @param is_no -
+ @param val_lifetime -
+ @param pref_lifetime -
+*/
+define sw_interface_ip6nd_ra_prefix
+{
+ u32 client_index;
+ u32 context;
+ u32 sw_if_index;
+ u8 address[16];
+ u8 address_length;
+ u8 use_default;
+ u8 no_advertise;
+ u8 off_link;
+ u8 no_autoconfig;
+ u8 no_onlink;
+ u8 is_no;
+ u32 val_lifetime;
+ u32 pref_lifetime;
+};
+
+/** \brief IPv6 router advertisement prefix config response
+ @param context - sender context, to match reply w/ request
+ @param retval - return code for the request
+*/
+define sw_interface_ip6nd_ra_prefix_reply
+{
+ u32 context;
+ i32 retval;
+};
+
+/** \brief IPv6 interface enable / disable request
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param sw_if_index - interface used to reach neighbor
+ @param enable - if non-zero enable ip6 on interface, else disable
+*/
+define sw_interface_ip6_enable_disable
+{
+ u32 client_index;
+ u32 context;
+ u32 sw_if_index;
+ u8 enable; /* set to true if enable */
+};
+
+/** \brief IPv6 interface enable / disable response
+ @param context - sender context, to match reply w/ request
+ @param retval - return code for the request
+*/
+define sw_interface_ip6_enable_disable_reply
+{
+ u32 context;
+ i32 retval;
+};
+
+/** \brief IPv6 set link local address on interface request
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param sw_if_index - interface to set link local on
+ @param address[] - the new link local address
+ @param address_length - link local address length
+*/
+define sw_interface_ip6_set_link_local_address
+{
+ u32 client_index;
+ u32 context;
+ u32 sw_if_index;
+ u8 address[16];
+ u8 address_length;
+};
+
+/** \brief IPv6 set link local address on interface response
+ @param context - sender context, to match reply w/ request
+ @param retval - error code for the request
+*/
+define sw_interface_ip6_set_link_local_address_reply
+{
+ u32 context;
+ i32 retval;
+};
+
+/** \brief Add / del route request
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param sw_if_index - software index of the new vlan's parent interface
+ @param vrf_id - fib table /vrf associated with the route
+ @param lookup_in_vrf -
+ @param classify_table_index -
+ @param create_vrf_if_needed -
+ @param is_add - 1 if adding the route, 0 if deleting
+ @param is_drop - Drop the packet
+ @param is_unreach - Drop the packet and rate limit send ICMP unreachable
+ @param is_prohibit - Drop the packet and rate limit send ICMP prohibited
+ @param is_ipv6 - 0 if an ip4 route, else ip6
+ @param is_local -
+ @param is_classify -
+ @param is_multipath - Set to 1 if this is a multipath route, else 0
+ @param not_last - Is last or not last msg in group of multiple add/del msgs
+ @param next_hop_weight -
+ @param dst_address_length -
+ @param dst_address[16] -
+ @param next_hop_address[16] -
+ @param next_hop_n_out_labels - the number of labels in the label stack
+ @param next_hop_out_label_stack - the next-hop output label stack, outer most first
+ @param next_hop_via_label - The next-hop is a resolved via a local label
+*/
+define ip_add_del_route
+{
+ u32 client_index;
+ u32 context;
+ u32 next_hop_sw_if_index;
+ u32 table_id;
+ u32 classify_table_index;
+ u32 next_hop_table_id;
+ u8 create_vrf_if_needed;
+ u8 is_add;
+ u8 is_drop;
+ u8 is_unreach;
+ u8 is_prohibit;
+ u8 is_ipv6;
+ u8 is_local;
+ u8 is_classify;
+ u8 is_multipath;
+ u8 is_resolve_host;
+ u8 is_resolve_attached;
+ /* Is last/not-last message in group of multiple add/del messages. */
+ u8 not_last;
+ u8 next_hop_weight;
+ u8 dst_address_length;
+ u8 dst_address[16];
+ u8 next_hop_address[16];
+ u8 next_hop_n_out_labels;
+ u32 next_hop_via_label;
+ u32 next_hop_out_label_stack[next_hop_n_out_labels];
+};
+
+/** \brief Reply for add / del route request
+ @param context - returned sender context, to match reply w/ request
+ @param retval - return code
+*/
+define ip_add_del_route_reply
+{
+ u32 context;
+ i32 retval;
+};
+
+define ip_address_details
+{
+ u32 client_index;
+ u32 context;
+ u8 ip[16];
+ u8 prefix_length;
+};
+
+define ip_address_dump
+{
+ u32 client_index;
+ u32 context;
+ u32 sw_if_index;
+ u8 is_ipv6;
+};
+
+define ip_details
+{
+ u32 sw_if_index;
+ u32 context;
+};
+
+define ip_dump
+{
+ u32 client_index;
+ u32 context;
+ u8 is_ipv6;
+};
+
+
+/*
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/ip/ip.h b/src/vnet/ip/ip.h
new file mode 100644
index 00000000000..02a1a9636ee
--- /dev/null
+++ b/src/vnet/ip/ip.h
@@ -0,0 +1,195 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * ip/ip.h: ip generic (4 or 6) main
+ *
+ * Copyright (c) 2008 Eliot Dresselhaus
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef included_ip_main_h
+#define included_ip_main_h
+
+#include <vppinfra/hash.h>
+#include <vppinfra/heap.h> /* adjacency heap */
+#include <vppinfra/ptclosure.h>
+
+#include <vnet/vnet.h>
+
+#include <vnet/ip/format.h>
+#include <vnet/ip/ip_packet.h>
+#include <vnet/ip/lookup.h>
+
+#include <vnet/ip/tcp_packet.h>
+#include <vnet/ip/udp_packet.h>
+#include <vnet/ip/icmp46_packet.h>
+
+#include <vnet/ip/ip4.h>
+#include <vnet/ip/ip4_error.h>
+#include <vnet/ip/ip4_packet.h>
+#include <vnet/ip/icmp4.h>
+
+#include <vnet/ip/ip6.h>
+#include <vnet/ip/ip6_packet.h>
+#include <vnet/ip/ip6_error.h>
+#include <vnet/ip/icmp6.h>
+#include <vnet/classify/vnet_classify.h>
+
+/* Per protocol info. */
+typedef struct
+{
+ /* Protocol name (also used as hash key). */
+ u8 *name;
+
+ /* Protocol number. */
+ ip_protocol_t protocol;
+
+ /* Format function for this IP protocol. */
+ format_function_t *format_header;
+
+ /* Parser for header. */
+ unformat_function_t *unformat_header;
+
+ /* Parser for per-protocol matches. */
+ unformat_function_t *unformat_match;
+
+ /* Parser for packet generator edits for this protocol. */
+ unformat_function_t *unformat_pg_edit;
+} ip_protocol_info_t;
+
+/* Per TCP/UDP port info. */
+typedef struct
+{
+ /* Port name (used as hash key). */
+ u8 *name;
+
+ /* UDP/TCP port number in network byte order. */
+ u16 port;
+
+ /* Port specific format function. */
+ format_function_t *format_header;
+
+ /* Parser for packet generator edits for this protocol. */
+ unformat_function_t *unformat_pg_edit;
+} tcp_udp_port_info_t;
+
+typedef struct
+{
+ /* Per IP protocol info. */
+ ip_protocol_info_t *protocol_infos;
+
+ /* Protocol info index hashed by 8 bit IP protocol. */
+ uword *protocol_info_by_protocol;
+
+ /* Hash table mapping IP protocol name (see protocols.def)
+ to protocol number. */
+ uword *protocol_info_by_name;
+
+ /* Per TCP/UDP port info. */
+ tcp_udp_port_info_t *port_infos;
+
+ /* Hash table from network-byte-order port to port info index. */
+ uword *port_info_by_port;
+
+ /* Hash table mapping TCP/UDP name to port info index. */
+ uword *port_info_by_name;
+} ip_main_t;
+
+extern ip_main_t ip_main;
+
+clib_error_t *ip_main_init (vlib_main_t * vm);
+
+static inline ip_protocol_info_t *
+ip_get_protocol_info (ip_main_t * im, u32 protocol)
+{
+ uword *p;
+
+ p = hash_get (im->protocol_info_by_protocol, protocol);
+ return p ? vec_elt_at_index (im->protocol_infos, p[0]) : 0;
+}
+
+static inline tcp_udp_port_info_t *
+ip_get_tcp_udp_port_info (ip_main_t * im, u32 port)
+{
+ uword *p;
+
+ p = hash_get (im->port_info_by_port, port);
+ return p ? vec_elt_at_index (im->port_infos, p[0]) : 0;
+}
+
+always_inline ip_csum_t
+ip_incremental_checksum_buffer (vlib_main_t * vm,
+ vlib_buffer_t * first_buffer,
+ u32 first_buffer_offset,
+ u32 n_bytes_to_checksum, ip_csum_t sum)
+{
+ vlib_buffer_t *b = first_buffer;
+ u32 n_bytes_left = n_bytes_to_checksum;
+ ASSERT (b->current_length >= first_buffer_offset);
+ void *h;
+ u32 n;
+
+ n = clib_min (n_bytes_left, b->current_length);
+ h = vlib_buffer_get_current (b) + first_buffer_offset;
+ sum = ip_incremental_checksum (sum, h, n);
+ if (PREDICT_FALSE (b->flags & VLIB_BUFFER_NEXT_PRESENT))
+ {
+ while (1)
+ {
+ n_bytes_left -= n;
+ if (n_bytes_left == 0)
+ break;
+ b = vlib_get_buffer (vm, b->next_buffer);
+ n = clib_min (n_bytes_left, b->current_length);
+ h = vlib_buffer_get_current (b);
+ sum = ip_incremental_checksum (sum, h, n);
+ }
+ }
+
+ return sum;
+}
+
+void ip_del_all_interface_addresses (vlib_main_t * vm, u32 sw_if_index);
+
+extern vlib_node_registration_t ip4_inacl_node;
+extern vlib_node_registration_t ip6_inacl_node;
+
+#endif /* included_ip_main_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/ip/ip4.h b/src/vnet/ip/ip4.h
new file mode 100644
index 00000000000..0331c44563e
--- /dev/null
+++ b/src/vnet/ip/ip4.h
@@ -0,0 +1,322 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * ip/ip4.h: ip4 main include file
+ *
+ * Copyright (c) 2008 Eliot Dresselhaus
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef included_ip_ip4_h
+#define included_ip_ip4_h
+
+#include <vnet/ip/ip4_mtrie.h>
+#include <vnet/ip/ip4_packet.h>
+#include <vnet/ip/lookup.h>
+#include <vnet/feature/feature.h>
+
+typedef struct ip4_fib_t
+{
+ /* Hash table for each prefix length mapping. */
+ uword *fib_entry_by_dst_address[33];
+
+ /* Mtrie for fast lookups. Hash is used to maintain overlapping prefixes. */
+ ip4_fib_mtrie_t mtrie;
+
+ /* Table ID (hash key) for this FIB. */
+ u32 table_id;
+
+ /* Index into FIB vector. */
+ u32 index;
+
+ /* flow hash configuration */
+ flow_hash_config_t flow_hash_config;
+
+ /* N-tuple classifier indices */
+ u32 fwd_classify_table_index;
+ u32 rev_classify_table_index;
+
+} ip4_fib_t;
+
+struct ip4_main_t;
+
+typedef void (ip4_add_del_interface_address_function_t)
+ (struct ip4_main_t * im,
+ uword opaque,
+ u32 sw_if_index,
+ ip4_address_t * address,
+ u32 address_length, u32 if_address_index, u32 is_del);
+
+typedef struct
+{
+ ip4_add_del_interface_address_function_t *function;
+ uword function_opaque;
+} ip4_add_del_interface_address_callback_t;
+
+/**
+ * @brief IPv4 main type.
+ *
+ * State of IPv4 VPP processing including:
+ * - FIBs
+ * - Feature indices used in feature topological sort
+ * - Feature node run time references
+ */
+
+typedef struct ip4_main_t
+{
+ ip_lookup_main_t lookup_main;
+
+ /** Vector of FIBs. */
+ struct fib_table_t_ *fibs;
+
+ u32 fib_masks[33];
+
+ /** Table index indexed by software interface. */
+ u32 *fib_index_by_sw_if_index;
+
+ /* IP4 enabled count by software interface */
+ u8 *ip_enabled_by_sw_if_index;
+
+ /** Hash table mapping table id to fib index.
+ ID space is not necessarily dense; index space is dense. */
+ uword *fib_index_by_table_id;
+
+ /** Functions to call when interface address changes. */
+ ip4_add_del_interface_address_callback_t
+ * add_del_interface_address_callbacks;
+
+ /** Template used to generate IP4 ARP packets. */
+ vlib_packet_template_t ip4_arp_request_packet_template;
+
+ /** Seed for Jenkins hash used to compute ip4 flow hash. */
+ u32 flow_hash_seed;
+
+ /** @brief Template information for VPP generated packets */
+ struct
+ {
+ /** TTL to use for host generated packets. */
+ u8 ttl;
+
+ /** TOS byte to use for host generated packets. */
+ u8 tos;
+
+ u8 pad[2];
+ } host_config;
+} ip4_main_t;
+
+/** Global ip4 main structure. */
+extern ip4_main_t ip4_main;
+
+/** Global ip4 input node. Errors get attached to ip4 input node. */
+extern vlib_node_registration_t ip4_input_node;
+extern vlib_node_registration_t ip4_lookup_node;
+extern vlib_node_registration_t ip4_rewrite_node;
+extern vlib_node_registration_t ip4_rewrite_local_node;
+extern vlib_node_registration_t ip4_arp_node;
+extern vlib_node_registration_t ip4_glean_node;
+extern vlib_node_registration_t ip4_midchain_node;
+
+always_inline uword
+ip4_destination_matches_route (const ip4_main_t * im,
+ const ip4_address_t * key,
+ const ip4_address_t * dest, uword dest_length)
+{
+ return 0 == ((key->data_u32 ^ dest->data_u32) & im->fib_masks[dest_length]);
+}
+
+always_inline uword
+ip4_destination_matches_interface (ip4_main_t * im,
+ ip4_address_t * key,
+ ip_interface_address_t * ia)
+{
+ ip4_address_t *a = ip_interface_address_get_address (&im->lookup_main, ia);
+ return ip4_destination_matches_route (im, key, a, ia->address_length);
+}
+
+/* As above but allows for unaligned destinations (e.g. works right from IP header of packet). */
+always_inline uword
+ip4_unaligned_destination_matches_route (ip4_main_t * im,
+ ip4_address_t * key,
+ ip4_address_t * dest,
+ uword dest_length)
+{
+ return 0 ==
+ ((clib_mem_unaligned (&key->data_u32, u32) ^ dest->
+ data_u32) & im->fib_masks[dest_length]);
+}
+
+always_inline int
+ip4_src_address_for_packet (ip_lookup_main_t * lm,
+ u32 sw_if_index, ip4_address_t * src)
+{
+ u32 if_add_index = lm->if_address_pool_index_by_sw_if_index[sw_if_index];
+ if (PREDICT_TRUE (if_add_index != ~0))
+ {
+ ip_interface_address_t *if_add =
+ pool_elt_at_index (lm->if_address_pool, if_add_index);
+ ip4_address_t *if_ip = ip_interface_address_get_address (lm, if_add);
+ *src = *if_ip;
+ return 0;
+ }
+ else
+ {
+ ASSERT (0);
+ src->as_u32 = 0;
+ }
+ return (!0);
+}
+
+/* Find interface address which matches destination. */
+always_inline ip4_address_t *
+ip4_interface_address_matching_destination (ip4_main_t * im,
+ ip4_address_t * dst,
+ u32 sw_if_index,
+ ip_interface_address_t **
+ result_ia)
+{
+ ip_lookup_main_t *lm = &im->lookup_main;
+ ip_interface_address_t *ia;
+ ip4_address_t *result = 0;
+
+ /* *INDENT-OFF* */
+ foreach_ip_interface_address (lm, ia, sw_if_index,
+ 1 /* honor unnumbered */,
+ ({
+ ip4_address_t * a = ip_interface_address_get_address (lm, ia);
+ if (ip4_destination_matches_route (im, dst, a, ia->address_length))
+ {
+ result = a;
+ break;
+ }
+ }));
+ /* *INDENT-ON* */
+ if (result_ia)
+ *result_ia = result ? ia : 0;
+ return result;
+}
+
+ip4_address_t *ip4_interface_first_address (ip4_main_t * im, u32 sw_if_index,
+ ip_interface_address_t **
+ result_ia);
+
+clib_error_t *ip4_add_del_interface_address (vlib_main_t * vm,
+ u32 sw_if_index,
+ ip4_address_t * address,
+ u32 address_length, u32 is_del);
+
+void ip4_sw_interface_enable_disable (u32 sw_if_index, u32 is_enable);
+
+int ip4_address_compare (ip4_address_t * a1, ip4_address_t * a2);
+
+/* Send an ARP request to see if given destination is reachable on given interface. */
+clib_error_t *ip4_probe_neighbor (vlib_main_t * vm, ip4_address_t * dst,
+ u32 sw_if_index);
+
+clib_error_t *ip4_set_arp_limit (u32 arp_limit);
+
+uword
+ip4_udp_register_listener (vlib_main_t * vm,
+ u16 dst_port, u32 next_node_index);
+
+void
+ip4_icmp_register_type (vlib_main_t * vm, icmp4_type_t type, u32 node_index);
+
+u16 ip4_tcp_udp_compute_checksum (vlib_main_t * vm, vlib_buffer_t * p0,
+ ip4_header_t * ip0);
+
+void ip4_register_protocol (u32 protocol, u32 node_index);
+
+serialize_function_t serialize_vnet_ip4_main, unserialize_vnet_ip4_main;
+
+int vnet_set_ip4_flow_hash (u32 table_id,
+ flow_hash_config_t flow_hash_config);
+
+void ip4_mtrie_init (ip4_fib_mtrie_t * m);
+
+int vnet_set_ip4_classify_intfc (vlib_main_t * vm, u32 sw_if_index,
+ u32 table_index);
+
+/* Compute flow hash. We'll use it to select which adjacency to use for this
+ flow. And other things. */
+always_inline u32
+ip4_compute_flow_hash (const ip4_header_t * ip,
+ flow_hash_config_t flow_hash_config)
+{
+ tcp_header_t *tcp = (void *) (ip + 1);
+ u32 a, b, c, t1, t2;
+ uword is_tcp_udp = (ip->protocol == IP_PROTOCOL_TCP
+ || ip->protocol == IP_PROTOCOL_UDP);
+
+ t1 = (flow_hash_config & IP_FLOW_HASH_SRC_ADDR)
+ ? ip->src_address.data_u32 : 0;
+ t2 = (flow_hash_config & IP_FLOW_HASH_DST_ADDR)
+ ? ip->dst_address.data_u32 : 0;
+
+ a = (flow_hash_config & IP_FLOW_HASH_REVERSE_SRC_DST) ? t2 : t1;
+ b = (flow_hash_config & IP_FLOW_HASH_REVERSE_SRC_DST) ? t1 : t2;
+ b ^= (flow_hash_config & IP_FLOW_HASH_PROTO) ? ip->protocol : 0;
+
+ t1 = is_tcp_udp ? tcp->ports.src : 0;
+ t2 = is_tcp_udp ? tcp->ports.dst : 0;
+
+ t1 = (flow_hash_config & IP_FLOW_HASH_SRC_PORT) ? t1 : 0;
+ t2 = (flow_hash_config & IP_FLOW_HASH_DST_PORT) ? t2 : 0;
+
+ c = (flow_hash_config & IP_FLOW_HASH_REVERSE_SRC_DST) ?
+ (t1 << 16) | t2 : (t2 << 16) | t1;
+
+ hash_v3_mix32 (a, b, c);
+ hash_v3_finalize32 (a, b, c);
+
+ return c;
+}
+
+void
+ip4_forward_next_trace (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame,
+ vlib_rx_or_tx_t which_adj_index);
+
+u8 *format_ip4_forward_next_trace (u8 * s, va_list * args);
+
+u32 ip4_tcp_udp_validate_checksum (vlib_main_t * vm, vlib_buffer_t * p0);
+
+#endif /* included_ip_ip4_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/ip/ip46_cli.c b/src/vnet/ip/ip46_cli.c
new file mode 100644
index 00000000000..ce1ffa6242b
--- /dev/null
+++ b/src/vnet/ip/ip46_cli.c
@@ -0,0 +1,236 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * ip/ip4_cli.c: ip4 commands
+ *
+ * Copyright (c) 2008 Eliot Dresselhaus
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <vnet/ip/ip.h>
+
+/**
+ * @file
+ * @brief Set IP Address.
+ *
+ * Configure an IPv4 or IPv6 address for on an interface.
+ */
+
+
+int
+ip4_address_compare (ip4_address_t * a1, ip4_address_t * a2)
+{
+ return clib_net_to_host_u32 (a1->data_u32) -
+ clib_net_to_host_u32 (a2->data_u32);
+}
+
+int
+ip6_address_compare (ip6_address_t * a1, ip6_address_t * a2)
+{
+ int i;
+ for (i = 0; i < ARRAY_LEN (a1->as_u16); i++)
+ {
+ int cmp =
+ clib_net_to_host_u16 (a1->as_u16[i]) -
+ clib_net_to_host_u16 (a2->as_u16[i]);
+ if (cmp != 0)
+ return cmp;
+ }
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (set_interface_ip_command, static) = {
+ .path = "set interface ip",
+ .short_help = "IP4/IP6 commands",
+};
+/* *INDENT-ON* */
+
+void
+ip_del_all_interface_addresses (vlib_main_t * vm, u32 sw_if_index)
+{
+ ip4_main_t *im4 = &ip4_main;
+ ip4_address_t *ip4_addrs = 0;
+ u32 *ip4_masks = 0;
+ ip6_main_t *im6 = &ip6_main;
+ ip6_address_t *ip6_addrs = 0;
+ u32 *ip6_masks = 0;
+ ip_interface_address_t *ia;
+ int i;
+
+ /* *INDENT-OFF* */
+ foreach_ip_interface_address (&im4->lookup_main, ia, sw_if_index,
+ 0 /* honor unnumbered */,
+ ({
+ ip4_address_t * x = (ip4_address_t *)
+ ip_interface_address_get_address (&im4->lookup_main, ia);
+ vec_add1 (ip4_addrs, x[0]);
+ vec_add1 (ip4_masks, ia->address_length);
+ }));
+ /* *INDENT-ON* */
+
+ /* *INDENT-OFF* */
+ foreach_ip_interface_address (&im6->lookup_main, ia, sw_if_index,
+ 0 /* honor unnumbered */,
+ ({
+ ip6_address_t * x = (ip6_address_t *)
+ ip_interface_address_get_address (&im6->lookup_main, ia);
+ vec_add1 (ip6_addrs, x[0]);
+ vec_add1 (ip6_masks, ia->address_length);
+ }));
+ /* *INDENT-ON* */
+
+ for (i = 0; i < vec_len (ip4_addrs); i++)
+ ip4_add_del_interface_address (vm, sw_if_index, &ip4_addrs[i],
+ ip4_masks[i], 1 /* is_del */ );
+ for (i = 0; i < vec_len (ip6_addrs); i++)
+ ip6_add_del_interface_address (vm, sw_if_index, &ip6_addrs[i],
+ ip6_masks[i], 1 /* is_del */ );
+
+ vec_free (ip4_addrs);
+ vec_free (ip4_masks);
+ vec_free (ip6_addrs);
+ vec_free (ip6_masks);
+}
+
+static clib_error_t *
+ip_address_delete_cleanup (vnet_main_t * vnm, u32 hw_if_index, u32 is_create)
+{
+ vlib_main_t *vm = vlib_get_main ();
+ vnet_hw_interface_t *hw;
+
+ if (is_create)
+ return 0;
+
+ hw = vnet_get_hw_interface (vnm, hw_if_index);
+
+ ip_del_all_interface_addresses (vm, hw->sw_if_index);
+ return 0;
+}
+
+VNET_HW_INTERFACE_ADD_DEL_FUNCTION (ip_address_delete_cleanup);
+
+static clib_error_t *
+add_del_ip_address (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ vnet_main_t *vnm = vnet_get_main ();
+ ip4_address_t a4;
+ ip6_address_t a6;
+ clib_error_t *error = 0;
+ u32 sw_if_index, length, is_del;
+
+ sw_if_index = ~0;
+ is_del = 0;
+
+ if (unformat (input, "del"))
+ is_del = 1;
+
+ if (!unformat_user (input, unformat_vnet_sw_interface, vnm, &sw_if_index))
+ {
+ error = clib_error_return (0, "unknown interface `%U'",
+ format_unformat_error, input);
+ goto done;
+ }
+
+ if (is_del && unformat (input, "all"))
+ ip_del_all_interface_addresses (vm, sw_if_index);
+ else if (unformat (input, "%U/%d", unformat_ip4_address, &a4, &length))
+ error = ip4_add_del_interface_address (vm, sw_if_index, &a4, length,
+ is_del);
+ else if (unformat (input, "%U/%d", unformat_ip6_address, &a6, &length))
+ error = ip6_add_del_interface_address (vm, sw_if_index, &a6, length,
+ is_del);
+ else
+ {
+ error = clib_error_return (0, "expected IP4/IP6 address/length `%U'",
+ format_unformat_error, input);
+ goto done;
+ }
+
+
+done:
+ return error;
+}
+
+/*?
+ * Add an IP Address to an interface or remove and IP Address from an interface.
+ * The IP Address can be an IPv4 or an IPv6 address. Interfaces may have multiple
+ * IPv4 and IPv6 addresses. There is no concept of primary vs. secondary
+ * interface addresses; they're just addresses.
+ *
+ * To display the addresses associated with a given interface, use the command
+ * '<em>show interface address <interface></em>'.
+ *
+ * Note that the debug CLI does not enforce classful mask-width / addressing
+ * constraints.
+ *
+ * @cliexpar
+ * @parblock
+ * An example of how to add an IPv4 address to an interface:
+ * @cliexcmd{set interface ip address GigabitEthernet2/0/0 172.16.2.12/24}
+ *
+ * An example of how to add an IPv6 address to an interface:
+ * @cliexcmd{set interface ip address GigabitEthernet2/0/0 @::a:1:1:0:7/126}
+ *
+ * To delete a specific interface ip address:
+ * @cliexcmd{set interface ip address GigabitEthernet2/0/0 172.16.2.12/24 del}
+ *
+ * To delete all interfaces addresses (IPv4 and IPv6):
+ * @cliexcmd{set interface ip address GigabitEthernet2/0/0 del all}
+ * @endparblock
+ ?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (set_interface_ip_address_command, static) = {
+ .path = "set interface ip address",
+ .function = add_del_ip_address,
+ .short_help = "set interface ip address <interface> [<ip-addr>/<mask> [del]] | [del all]",
+};
+/* *INDENT-ON* */
+
+/* Dummy init function to get us linked in. */
+static clib_error_t *
+ip4_cli_init (vlib_main_t * vm)
+{
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (ip4_cli_init);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/ip/ip4_error.h b/src/vnet/ip/ip4_error.h
new file mode 100644
index 00000000000..95d12ec22d5
--- /dev/null
+++ b/src/vnet/ip/ip4_error.h
@@ -0,0 +1,95 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * ip/ip4_error.h: ip4 fast path errors
+ *
+ * Copyright (c) 2008 Eliot Dresselhaus
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef included_ip_ip4_error_h
+#define included_ip_ip4_error_h
+
+#define foreach_ip4_error \
+ /* Must be first. */ \
+ _ (NONE, "valid ip4 packets") \
+ \
+ /* Errors signalled by ip4-input */ \
+ _ (TOO_SHORT, "ip4 length < 20 bytes") \
+ _ (BAD_LENGTH, "ip4 length > l2 length") \
+ _ (BAD_CHECKSUM, "bad ip4 checksum") \
+ _ (VERSION, "ip4 version != 4") \
+ _ (OPTIONS, "ip4 options present") \
+ _ (FRAGMENT_OFFSET_ONE, "ip4 fragment offset == 1") \
+ _ (TIME_EXPIRED, "ip4 ttl <= 1") \
+ \
+ /* Errors signalled by ip4-rewrite. */ \
+ _ (MTU_EXCEEDED, "ip4 MTU exceeded and DF set") \
+ _ (DST_LOOKUP_MISS, "ip4 destination lookup miss") \
+ _ (SRC_LOOKUP_MISS, "ip4 source lookup miss") \
+ _ (ADJACENCY_DROP, "ip4 adjacency drop") \
+ _ (ADJACENCY_PUNT, "ip4 adjacency punt") \
+ \
+ /* Errors signalled by ip4-local. */ \
+ _ (UNKNOWN_PROTOCOL, "unknown ip protocol") \
+ _ (TCP_CHECKSUM, "bad tcp checksum") \
+ _ (UDP_CHECKSUM, "bad udp checksum") \
+ _ (UDP_LENGTH, "inconsistent udp/ip lengths") \
+ \
+ /* Errors signalled by ip4-source-check. */ \
+ _ (UNICAST_SOURCE_CHECK_FAILS, "ip4 unicast source check fails") \
+ \
+ /* Spoofed packets in ip4-rewrite-local */ \
+ _(SPOOFED_LOCAL_PACKETS, "ip4 spoofed local-address packet drops") \
+ \
+ /* Errors singalled by ip4-inacl */ \
+ _ (INACL_TABLE_MISS, "input ACL table-miss drops") \
+ _ (INACL_SESSION_DENY, "input ACL session deny drops")
+
+typedef enum
+{
+#define _(sym,str) IP4_ERROR_##sym,
+ foreach_ip4_error
+#undef _
+ IP4_N_ERROR,
+} ip4_error_t;
+
+#endif /* included_ip_ip4_error_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/ip/ip4_format.c b/src/vnet/ip/ip4_format.c
new file mode 100644
index 00000000000..c803e0656db
--- /dev/null
+++ b/src/vnet/ip/ip4_format.c
@@ -0,0 +1,256 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * ip/ip4_format.c: ip4 formatting
+ *
+ * Copyright (c) 2008 Eliot Dresselhaus
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <vnet/ip/ip.h>
+
+/* Format an IP4 address. */
+u8 *
+format_ip4_address (u8 * s, va_list * args)
+{
+ u8 *a = va_arg (*args, u8 *);
+ return format (s, "%d.%d.%d.%d", a[0], a[1], a[2], a[3]);
+}
+
+/* Format an IP4 route destination and length. */
+u8 *
+format_ip4_address_and_length (u8 * s, va_list * args)
+{
+ u8 *a = va_arg (*args, u8 *);
+ u8 l = va_arg (*args, u32);
+ return format (s, "%U/%d", format_ip4_address, a, l);
+}
+
+/* Parse an IP4 address %d.%d.%d.%d. */
+uword
+unformat_ip4_address (unformat_input_t * input, va_list * args)
+{
+ u8 *result = va_arg (*args, u8 *);
+ unsigned a[4];
+
+ if (!unformat (input, "%d.%d.%d.%d", &a[0], &a[1], &a[2], &a[3]))
+ return 0;
+
+ if (a[0] >= 256 || a[1] >= 256 || a[2] >= 256 || a[3] >= 256)
+ return 0;
+
+ result[0] = a[0];
+ result[1] = a[1];
+ result[2] = a[2];
+ result[3] = a[3];
+
+ return 1;
+}
+
+/* Format an IP4 header. */
+u8 *
+format_ip4_header (u8 * s, va_list * args)
+{
+ ip4_header_t *ip = va_arg (*args, ip4_header_t *);
+ u32 max_header_bytes = va_arg (*args, u32);
+ u32 ip_version, header_bytes;
+ uword indent;
+
+ /* Nothing to do. */
+ if (max_header_bytes < sizeof (ip[0]))
+ return format (s, "IP header truncated");
+
+ indent = format_get_indent (s);
+ indent += 2;
+
+ ip_version = (ip->ip_version_and_header_length >> 4);
+ header_bytes = (ip->ip_version_and_header_length & 0xf) * sizeof (u32);
+
+ s = format (s, "%U: %U -> %U",
+ format_ip_protocol, ip->protocol,
+ format_ip4_address, ip->src_address.data,
+ format_ip4_address, ip->dst_address.data);
+
+ /* Show IP version and header length only with unexpected values. */
+ if (ip_version != 4 || header_bytes != sizeof (ip4_header_t))
+ s = format (s, "\n%Uversion %d, header length %d",
+ format_white_space, indent, ip_version, header_bytes);
+
+ s = format (s, "\n%Utos 0x%02x, ttl %d, length %d, checksum 0x%04x",
+ format_white_space, indent,
+ ip->tos, ip->ttl,
+ clib_net_to_host_u16 (ip->length),
+ clib_net_to_host_u16 (ip->checksum));
+
+ /* Check and report invalid checksums. */
+ {
+ u16 c = ip4_header_checksum (ip);
+ if (c != ip->checksum)
+ s = format (s, " (should be 0x%04x)", clib_net_to_host_u16 (c));
+ }
+
+ {
+ u32 f = clib_net_to_host_u16 (ip->flags_and_fragment_offset);
+ u32 o;
+
+ s = format (s, "\n%Ufragment id 0x%04x",
+ format_white_space, indent,
+ clib_net_to_host_u16 (ip->fragment_id));
+
+ /* Fragment offset. */
+ o = 8 * (f & 0x1fff);
+ f ^= o;
+ if (o != 0)
+ s = format (s, " offset %d", o);
+
+ if (f != 0)
+ {
+ s = format (s, ", flags ");
+#define _(l) if (f & IP4_HEADER_FLAG_##l) s = format (s, #l);
+ _(MORE_FRAGMENTS);
+ _(DONT_FRAGMENT);
+ _(CONGESTION);
+#undef _
+ }
+ }
+
+ /* Recurse into next protocol layer. */
+ if (max_header_bytes != 0 && header_bytes < max_header_bytes)
+ {
+ ip_main_t *im = &ip_main;
+ ip_protocol_info_t *pi = ip_get_protocol_info (im, ip->protocol);
+
+ if (pi && pi->format_header)
+ s = format (s, "\n%U%U",
+ format_white_space, indent - 2, pi->format_header,
+ /* next protocol header */ (void *) ip + header_bytes,
+ max_header_bytes - header_bytes);
+ }
+
+ return s;
+}
+
+/* Parse an IP4 header. */
+uword
+unformat_ip4_header (unformat_input_t * input, va_list * args)
+{
+ u8 **result = va_arg (*args, u8 **);
+ ip4_header_t *ip;
+ int old_length;
+
+ /* Allocate space for IP header. */
+ {
+ void *p;
+
+ old_length = vec_len (*result);
+ vec_add2 (*result, p, sizeof (ip4_header_t));
+ ip = p;
+ }
+
+ memset (ip, 0, sizeof (ip[0]));
+ ip->ip_version_and_header_length = IP4_VERSION_AND_HEADER_LENGTH_NO_OPTIONS;
+
+ if (!unformat (input, "%U: %U -> %U",
+ unformat_ip_protocol, &ip->protocol,
+ unformat_ip4_address, &ip->src_address,
+ unformat_ip4_address, &ip->dst_address))
+ return 0;
+
+ /* Parse options. */
+ while (1)
+ {
+ int i, j;
+
+ if (unformat (input, "tos %U", unformat_vlib_number, &i))
+ ip->tos = i;
+
+ else if (unformat (input, "ttl %U", unformat_vlib_number, &i))
+ ip->ttl = i;
+
+ else if (unformat (input, "fragment id %U offset %U",
+ unformat_vlib_number, &i, unformat_vlib_number, &j))
+ {
+ ip->fragment_id = clib_host_to_net_u16 (i);
+ ip->flags_and_fragment_offset |=
+ clib_host_to_net_u16 ((i / 8) & 0x1fff);
+ }
+
+ /* Flags. */
+ else if (unformat (input, "mf") || unformat (input, "MF"))
+ ip->flags_and_fragment_offset |=
+ clib_host_to_net_u16 (IP4_HEADER_FLAG_MORE_FRAGMENTS);
+
+ else if (unformat (input, "df") || unformat (input, "DF"))
+ ip->flags_and_fragment_offset |=
+ clib_host_to_net_u16 (IP4_HEADER_FLAG_DONT_FRAGMENT);
+
+ else if (unformat (input, "ce") || unformat (input, "CE"))
+ ip->flags_and_fragment_offset |=
+ clib_host_to_net_u16 (IP4_HEADER_FLAG_CONGESTION);
+
+ /* Can't parse input: try next protocol level. */
+ else
+ break;
+ }
+
+ /* Fill in checksum. */
+ ip->checksum = ip4_header_checksum (ip);
+
+ /* Recurse into next protocol layer. */
+ {
+ ip_main_t *im = &ip_main;
+ ip_protocol_info_t *pi = ip_get_protocol_info (im, ip->protocol);
+
+ if (pi && pi->unformat_header)
+ {
+ if (!unformat_user (input, pi->unformat_header, result))
+ return 0;
+
+ /* Result may have moved. */
+ ip = (void *) *result + old_length;
+ }
+ }
+
+ /* Fill in IP length. */
+ ip->length = clib_host_to_net_u16 (vec_len (*result) - old_length);
+
+ return 1;
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/ip/ip4_forward.c b/src/vnet/ip/ip4_forward.c
new file mode 100644
index 00000000000..6e91b9e91e1
--- /dev/null
+++ b/src/vnet/ip/ip4_forward.c
@@ -0,0 +1,3345 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * ip/ip4_forward.c: IP v4 forwarding
+ *
+ * Copyright (c) 2008 Eliot Dresselhaus
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <vnet/vnet.h>
+#include <vnet/ip/ip.h>
+#include <vnet/ethernet/ethernet.h> /* for ethernet_header_t */
+#include <vnet/ethernet/arp_packet.h> /* for ethernet_arp_header_t */
+#include <vnet/ppp/ppp.h>
+#include <vnet/srp/srp.h> /* for srp_hw_interface_class */
+#include <vnet/api_errno.h> /* for API error numbers */
+#include <vnet/fib/fib_table.h> /* for FIB table and entry creation */
+#include <vnet/fib/fib_entry.h> /* for FIB table and entry creation */
+#include <vnet/fib/fib_urpf_list.h> /* for FIB uRPF check */
+#include <vnet/fib/ip4_fib.h>
+#include <vnet/dpo/load_balance.h>
+#include <vnet/dpo/classify_dpo.h>
+
+/**
+ * @file
+ * @brief IPv4 Forwarding.
+ *
+ * This file contains the source code for IPv4 forwarding.
+ */
+
+void
+ip4_forward_next_trace (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame,
+ vlib_rx_or_tx_t which_adj_index);
+
+always_inline uword
+ip4_lookup_inline (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame,
+ int lookup_for_responses_to_locally_received_packets)
+{
+ ip4_main_t *im = &ip4_main;
+ vlib_combined_counter_main_t *cm = &load_balance_main.lbm_to_counters;
+ u32 n_left_from, n_left_to_next, *from, *to_next;
+ ip_lookup_next_t next;
+ u32 cpu_index = os_get_cpu_number ();
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ vlib_get_next_frame (vm, node, next, to_next, n_left_to_next);
+
+ while (n_left_from >= 8 && n_left_to_next >= 4)
+ {
+ vlib_buffer_t *p0, *p1, *p2, *p3;
+ ip4_header_t *ip0, *ip1, *ip2, *ip3;
+ __attribute__ ((unused)) tcp_header_t *tcp0, *tcp1, *tcp2, *tcp3;
+ ip_lookup_next_t next0, next1, next2, next3;
+ const load_balance_t *lb0, *lb1, *lb2, *lb3;
+ ip4_fib_mtrie_t *mtrie0, *mtrie1, *mtrie2, *mtrie3;
+ ip4_fib_mtrie_leaf_t leaf0, leaf1, leaf2, leaf3;
+ ip4_address_t *dst_addr0, *dst_addr1, *dst_addr2, *dst_addr3;
+ __attribute__ ((unused)) u32 pi0, fib_index0, lb_index0,
+ is_tcp_udp0;
+ __attribute__ ((unused)) u32 pi1, fib_index1, lb_index1,
+ is_tcp_udp1;
+ __attribute__ ((unused)) u32 pi2, fib_index2, lb_index2,
+ is_tcp_udp2;
+ __attribute__ ((unused)) u32 pi3, fib_index3, lb_index3,
+ is_tcp_udp3;
+ flow_hash_config_t flow_hash_config0, flow_hash_config1;
+ flow_hash_config_t flow_hash_config2, flow_hash_config3;
+ u32 hash_c0, hash_c1, hash_c2, hash_c3;
+ const dpo_id_t *dpo0, *dpo1, *dpo2, *dpo3;
+
+ /* Prefetch next iteration. */
+ {
+ vlib_buffer_t *p4, *p5, *p6, *p7;
+
+ p4 = vlib_get_buffer (vm, from[4]);
+ p5 = vlib_get_buffer (vm, from[5]);
+ p6 = vlib_get_buffer (vm, from[6]);
+ p7 = vlib_get_buffer (vm, from[7]);
+
+ vlib_prefetch_buffer_header (p4, LOAD);
+ vlib_prefetch_buffer_header (p5, LOAD);
+ vlib_prefetch_buffer_header (p6, LOAD);
+ vlib_prefetch_buffer_header (p7, LOAD);
+
+ CLIB_PREFETCH (p4->data, sizeof (ip0[0]), LOAD);
+ CLIB_PREFETCH (p5->data, sizeof (ip0[0]), LOAD);
+ CLIB_PREFETCH (p6->data, sizeof (ip0[0]), LOAD);
+ CLIB_PREFETCH (p7->data, sizeof (ip0[0]), LOAD);
+ }
+
+ pi0 = to_next[0] = from[0];
+ pi1 = to_next[1] = from[1];
+ pi2 = to_next[2] = from[2];
+ pi3 = to_next[3] = from[3];
+
+ from += 4;
+ to_next += 4;
+ n_left_to_next -= 4;
+ n_left_from -= 4;
+
+ p0 = vlib_get_buffer (vm, pi0);
+ p1 = vlib_get_buffer (vm, pi1);
+ p2 = vlib_get_buffer (vm, pi2);
+ p3 = vlib_get_buffer (vm, pi3);
+
+ ip0 = vlib_buffer_get_current (p0);
+ ip1 = vlib_buffer_get_current (p1);
+ ip2 = vlib_buffer_get_current (p2);
+ ip3 = vlib_buffer_get_current (p3);
+
+ dst_addr0 = &ip0->dst_address;
+ dst_addr1 = &ip1->dst_address;
+ dst_addr2 = &ip2->dst_address;
+ dst_addr3 = &ip3->dst_address;
+
+ fib_index0 =
+ vec_elt (im->fib_index_by_sw_if_index,
+ vnet_buffer (p0)->sw_if_index[VLIB_RX]);
+ fib_index1 =
+ vec_elt (im->fib_index_by_sw_if_index,
+ vnet_buffer (p1)->sw_if_index[VLIB_RX]);
+ fib_index2 =
+ vec_elt (im->fib_index_by_sw_if_index,
+ vnet_buffer (p2)->sw_if_index[VLIB_RX]);
+ fib_index3 =
+ vec_elt (im->fib_index_by_sw_if_index,
+ vnet_buffer (p3)->sw_if_index[VLIB_RX]);
+ fib_index0 =
+ (vnet_buffer (p0)->sw_if_index[VLIB_TX] ==
+ (u32) ~ 0) ? fib_index0 : vnet_buffer (p0)->sw_if_index[VLIB_TX];
+ fib_index1 =
+ (vnet_buffer (p1)->sw_if_index[VLIB_TX] ==
+ (u32) ~ 0) ? fib_index1 : vnet_buffer (p1)->sw_if_index[VLIB_TX];
+ fib_index2 =
+ (vnet_buffer (p2)->sw_if_index[VLIB_TX] ==
+ (u32) ~ 0) ? fib_index2 : vnet_buffer (p2)->sw_if_index[VLIB_TX];
+ fib_index3 =
+ (vnet_buffer (p3)->sw_if_index[VLIB_TX] ==
+ (u32) ~ 0) ? fib_index3 : vnet_buffer (p3)->sw_if_index[VLIB_TX];
+
+
+ if (!lookup_for_responses_to_locally_received_packets)
+ {
+ mtrie0 = &ip4_fib_get (fib_index0)->mtrie;
+ mtrie1 = &ip4_fib_get (fib_index1)->mtrie;
+ mtrie2 = &ip4_fib_get (fib_index2)->mtrie;
+ mtrie3 = &ip4_fib_get (fib_index3)->mtrie;
+
+ leaf0 = leaf1 = leaf2 = leaf3 = IP4_FIB_MTRIE_LEAF_ROOT;
+
+ leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, dst_addr0, 0);
+ leaf1 = ip4_fib_mtrie_lookup_step (mtrie1, leaf1, dst_addr1, 0);
+ leaf2 = ip4_fib_mtrie_lookup_step (mtrie2, leaf2, dst_addr2, 0);
+ leaf3 = ip4_fib_mtrie_lookup_step (mtrie3, leaf3, dst_addr3, 0);
+ }
+
+ tcp0 = (void *) (ip0 + 1);
+ tcp1 = (void *) (ip1 + 1);
+ tcp2 = (void *) (ip2 + 1);
+ tcp3 = (void *) (ip3 + 1);
+
+ is_tcp_udp0 = (ip0->protocol == IP_PROTOCOL_TCP
+ || ip0->protocol == IP_PROTOCOL_UDP);
+ is_tcp_udp1 = (ip1->protocol == IP_PROTOCOL_TCP
+ || ip1->protocol == IP_PROTOCOL_UDP);
+ is_tcp_udp2 = (ip2->protocol == IP_PROTOCOL_TCP
+ || ip2->protocol == IP_PROTOCOL_UDP);
+ is_tcp_udp3 = (ip1->protocol == IP_PROTOCOL_TCP
+ || ip1->protocol == IP_PROTOCOL_UDP);
+
+ if (!lookup_for_responses_to_locally_received_packets)
+ {
+ leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, dst_addr0, 1);
+ leaf1 = ip4_fib_mtrie_lookup_step (mtrie1, leaf1, dst_addr1, 1);
+ leaf2 = ip4_fib_mtrie_lookup_step (mtrie2, leaf2, dst_addr2, 1);
+ leaf3 = ip4_fib_mtrie_lookup_step (mtrie3, leaf3, dst_addr3, 1);
+ }
+
+ if (!lookup_for_responses_to_locally_received_packets)
+ {
+ leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, dst_addr0, 2);
+ leaf1 = ip4_fib_mtrie_lookup_step (mtrie1, leaf1, dst_addr1, 2);
+ leaf2 = ip4_fib_mtrie_lookup_step (mtrie2, leaf2, dst_addr2, 2);
+ leaf3 = ip4_fib_mtrie_lookup_step (mtrie3, leaf3, dst_addr3, 2);
+ }
+
+ if (!lookup_for_responses_to_locally_received_packets)
+ {
+ leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, dst_addr0, 3);
+ leaf1 = ip4_fib_mtrie_lookup_step (mtrie1, leaf1, dst_addr1, 3);
+ leaf2 = ip4_fib_mtrie_lookup_step (mtrie2, leaf2, dst_addr2, 3);
+ leaf3 = ip4_fib_mtrie_lookup_step (mtrie3, leaf3, dst_addr3, 3);
+ }
+
+ if (lookup_for_responses_to_locally_received_packets)
+ {
+ lb_index0 = vnet_buffer (p0)->ip.adj_index[VLIB_RX];
+ lb_index1 = vnet_buffer (p1)->ip.adj_index[VLIB_RX];
+ lb_index2 = vnet_buffer (p2)->ip.adj_index[VLIB_RX];
+ lb_index3 = vnet_buffer (p3)->ip.adj_index[VLIB_RX];
+ }
+ else
+ {
+ /* Handle default route. */
+ leaf0 =
+ (leaf0 ==
+ IP4_FIB_MTRIE_LEAF_EMPTY ? mtrie0->default_leaf : leaf0);
+ leaf1 =
+ (leaf1 ==
+ IP4_FIB_MTRIE_LEAF_EMPTY ? mtrie1->default_leaf : leaf1);
+ leaf2 =
+ (leaf2 ==
+ IP4_FIB_MTRIE_LEAF_EMPTY ? mtrie2->default_leaf : leaf2);
+ leaf3 =
+ (leaf3 ==
+ IP4_FIB_MTRIE_LEAF_EMPTY ? mtrie3->default_leaf : leaf3);
+ lb_index0 = ip4_fib_mtrie_leaf_get_adj_index (leaf0);
+ lb_index1 = ip4_fib_mtrie_leaf_get_adj_index (leaf1);
+ lb_index2 = ip4_fib_mtrie_leaf_get_adj_index (leaf2);
+ lb_index3 = ip4_fib_mtrie_leaf_get_adj_index (leaf3);
+ }
+
+ lb0 = load_balance_get (lb_index0);
+ lb1 = load_balance_get (lb_index1);
+ lb2 = load_balance_get (lb_index2);
+ lb3 = load_balance_get (lb_index3);
+
+ /* Use flow hash to compute multipath adjacency. */
+ hash_c0 = vnet_buffer (p0)->ip.flow_hash = 0;
+ hash_c1 = vnet_buffer (p1)->ip.flow_hash = 0;
+ hash_c2 = vnet_buffer (p2)->ip.flow_hash = 0;
+ hash_c3 = vnet_buffer (p3)->ip.flow_hash = 0;
+ if (PREDICT_FALSE (lb0->lb_n_buckets > 1))
+ {
+ flow_hash_config0 = lb0->lb_hash_config;
+ hash_c0 = vnet_buffer (p0)->ip.flow_hash =
+ ip4_compute_flow_hash (ip0, flow_hash_config0);
+ }
+ if (PREDICT_FALSE (lb1->lb_n_buckets > 1))
+ {
+ flow_hash_config1 = lb1->lb_hash_config;
+ hash_c1 = vnet_buffer (p1)->ip.flow_hash =
+ ip4_compute_flow_hash (ip1, flow_hash_config1);
+ }
+ if (PREDICT_FALSE (lb2->lb_n_buckets > 1))
+ {
+ flow_hash_config2 = lb2->lb_hash_config;
+ hash_c2 = vnet_buffer (p2)->ip.flow_hash =
+ ip4_compute_flow_hash (ip2, flow_hash_config2);
+ }
+ if (PREDICT_FALSE (lb3->lb_n_buckets > 1))
+ {
+ flow_hash_config3 = lb3->lb_hash_config;
+ hash_c3 = vnet_buffer (p3)->ip.flow_hash =
+ ip4_compute_flow_hash (ip3, flow_hash_config3);
+ }
+
+ ASSERT (lb0->lb_n_buckets > 0);
+ ASSERT (is_pow2 (lb0->lb_n_buckets));
+ ASSERT (lb1->lb_n_buckets > 0);
+ ASSERT (is_pow2 (lb1->lb_n_buckets));
+ ASSERT (lb2->lb_n_buckets > 0);
+ ASSERT (is_pow2 (lb2->lb_n_buckets));
+ ASSERT (lb3->lb_n_buckets > 0);
+ ASSERT (is_pow2 (lb3->lb_n_buckets));
+
+ dpo0 = load_balance_get_bucket_i (lb0,
+ (hash_c0 &
+ (lb0->lb_n_buckets_minus_1)));
+ dpo1 = load_balance_get_bucket_i (lb1,
+ (hash_c1 &
+ (lb1->lb_n_buckets_minus_1)));
+ dpo2 = load_balance_get_bucket_i (lb2,
+ (hash_c2 &
+ (lb2->lb_n_buckets_minus_1)));
+ dpo3 = load_balance_get_bucket_i (lb3,
+ (hash_c3 &
+ (lb3->lb_n_buckets_minus_1)));
+
+ next0 = dpo0->dpoi_next_node;
+ vnet_buffer (p0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
+ next1 = dpo1->dpoi_next_node;
+ vnet_buffer (p1)->ip.adj_index[VLIB_TX] = dpo1->dpoi_index;
+ next2 = dpo2->dpoi_next_node;
+ vnet_buffer (p2)->ip.adj_index[VLIB_TX] = dpo2->dpoi_index;
+ next3 = dpo3->dpoi_next_node;
+ vnet_buffer (p3)->ip.adj_index[VLIB_TX] = dpo3->dpoi_index;
+
+ vlib_increment_combined_counter
+ (cm, cpu_index, lb_index0, 1,
+ vlib_buffer_length_in_chain (vm, p0)
+ + sizeof (ethernet_header_t));
+ vlib_increment_combined_counter
+ (cm, cpu_index, lb_index1, 1,
+ vlib_buffer_length_in_chain (vm, p1)
+ + sizeof (ethernet_header_t));
+ vlib_increment_combined_counter
+ (cm, cpu_index, lb_index2, 1,
+ vlib_buffer_length_in_chain (vm, p2)
+ + sizeof (ethernet_header_t));
+ vlib_increment_combined_counter
+ (cm, cpu_index, lb_index3, 1,
+ vlib_buffer_length_in_chain (vm, p3)
+ + sizeof (ethernet_header_t));
+
+ vlib_validate_buffer_enqueue_x4 (vm, node, next,
+ to_next, n_left_to_next,
+ pi0, pi1, pi2, pi3,
+ next0, next1, next2, next3);
+ }
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ vlib_buffer_t *p0;
+ ip4_header_t *ip0;
+ __attribute__ ((unused)) tcp_header_t *tcp0;
+ ip_lookup_next_t next0;
+ const load_balance_t *lb0;
+ ip4_fib_mtrie_t *mtrie0;
+ ip4_fib_mtrie_leaf_t leaf0;
+ ip4_address_t *dst_addr0;
+ __attribute__ ((unused)) u32 pi0, fib_index0, is_tcp_udp0, lbi0;
+ flow_hash_config_t flow_hash_config0;
+ const dpo_id_t *dpo0;
+ u32 hash_c0;
+
+ pi0 = from[0];
+ to_next[0] = pi0;
+
+ p0 = vlib_get_buffer (vm, pi0);
+
+ ip0 = vlib_buffer_get_current (p0);
+
+ dst_addr0 = &ip0->dst_address;
+
+ fib_index0 =
+ vec_elt (im->fib_index_by_sw_if_index,
+ vnet_buffer (p0)->sw_if_index[VLIB_RX]);
+ fib_index0 =
+ (vnet_buffer (p0)->sw_if_index[VLIB_TX] ==
+ (u32) ~ 0) ? fib_index0 : vnet_buffer (p0)->sw_if_index[VLIB_TX];
+
+ if (!lookup_for_responses_to_locally_received_packets)
+ {
+ mtrie0 = &ip4_fib_get (fib_index0)->mtrie;
+
+ leaf0 = IP4_FIB_MTRIE_LEAF_ROOT;
+
+ leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, dst_addr0, 0);
+ }
+
+ tcp0 = (void *) (ip0 + 1);
+
+ is_tcp_udp0 = (ip0->protocol == IP_PROTOCOL_TCP
+ || ip0->protocol == IP_PROTOCOL_UDP);
+
+ if (!lookup_for_responses_to_locally_received_packets)
+ leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, dst_addr0, 1);
+
+ if (!lookup_for_responses_to_locally_received_packets)
+ leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, dst_addr0, 2);
+
+ if (!lookup_for_responses_to_locally_received_packets)
+ leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, dst_addr0, 3);
+
+ if (lookup_for_responses_to_locally_received_packets)
+ lbi0 = vnet_buffer (p0)->ip.adj_index[VLIB_RX];
+ else
+ {
+ /* Handle default route. */
+ leaf0 =
+ (leaf0 ==
+ IP4_FIB_MTRIE_LEAF_EMPTY ? mtrie0->default_leaf : leaf0);
+ lbi0 = ip4_fib_mtrie_leaf_get_adj_index (leaf0);
+ }
+
+ lb0 = load_balance_get (lbi0);
+
+ /* Use flow hash to compute multipath adjacency. */
+ hash_c0 = vnet_buffer (p0)->ip.flow_hash = 0;
+ if (PREDICT_FALSE (lb0->lb_n_buckets > 1))
+ {
+ flow_hash_config0 = lb0->lb_hash_config;
+
+ hash_c0 = vnet_buffer (p0)->ip.flow_hash =
+ ip4_compute_flow_hash (ip0, flow_hash_config0);
+ }
+
+ ASSERT (lb0->lb_n_buckets > 0);
+ ASSERT (is_pow2 (lb0->lb_n_buckets));
+
+ dpo0 = load_balance_get_bucket_i (lb0,
+ (hash_c0 &
+ (lb0->lb_n_buckets_minus_1)));
+
+ next0 = dpo0->dpoi_next_node;
+ vnet_buffer (p0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
+
+ vlib_increment_combined_counter
+ (cm, cpu_index, lbi0, 1, vlib_buffer_length_in_chain (vm, p0));
+
+ from += 1;
+ to_next += 1;
+ n_left_to_next -= 1;
+ n_left_from -= 1;
+
+ if (PREDICT_FALSE (next0 != next))
+ {
+ n_left_to_next += 1;
+ vlib_put_next_frame (vm, node, next, n_left_to_next);
+ next = next0;
+ vlib_get_next_frame (vm, node, next, to_next, n_left_to_next);
+ to_next[0] = pi0;
+ to_next += 1;
+ n_left_to_next -= 1;
+ }
+ }
+
+ vlib_put_next_frame (vm, node, next, n_left_to_next);
+ }
+
+ if (node->flags & VLIB_NODE_FLAG_TRACE)
+ ip4_forward_next_trace (vm, node, frame, VLIB_TX);
+
+ return frame->n_vectors;
+}
+
+/** @brief IPv4 lookup node.
+ @node ip4-lookup
+
+ This is the main IPv4 lookup dispatch node.
+
+ @param vm vlib_main_t corresponding to the current thread
+ @param node vlib_node_runtime_t
+ @param frame vlib_frame_t whose contents should be dispatched
+
+ @par Graph mechanics: buffer metadata, next index usage
+
+ @em Uses:
+ - <code>vnet_buffer(b)->sw_if_index[VLIB_RX]</code>
+ - Indicates the @c sw_if_index value of the interface that the
+ packet was received on.
+ - <code>vnet_buffer(b)->sw_if_index[VLIB_TX]</code>
+ - When the value is @c ~0 then the node performs a longest prefix
+ match (LPM) for the packet destination address in the FIB attached
+ to the receive interface.
+ - Otherwise perform LPM for the packet destination address in the
+ indicated FIB. In this case <code>[VLIB_TX]</code> is a FIB index
+ value (0, 1, ...) and not a VRF id.
+
+ @em Sets:
+ - <code>vnet_buffer(b)->ip.adj_index[VLIB_TX]</code>
+ - The lookup result adjacency index.
+
+ <em>Next Index:</em>
+ - Dispatches the packet to the node index found in
+ ip_adjacency_t @c adj->lookup_next_index
+ (where @c adj is the lookup result adjacency).
+*/
+static uword
+ip4_lookup (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ return ip4_lookup_inline (vm, node, frame,
+ /* lookup_for_responses_to_locally_received_packets */
+ 0);
+
+}
+
+static u8 *format_ip4_lookup_trace (u8 * s, va_list * args);
+
+VLIB_REGISTER_NODE (ip4_lookup_node) =
+{
+.function = ip4_lookup,.name = "ip4-lookup",.vector_size =
+ sizeof (u32),.format_trace = format_ip4_lookup_trace,.n_next_nodes =
+ IP_LOOKUP_N_NEXT,.next_nodes = IP4_LOOKUP_NEXT_NODES,};
+
+VLIB_NODE_FUNCTION_MULTIARCH (ip4_lookup_node, ip4_lookup);
+
+always_inline uword
+ip4_load_balance (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ vlib_combined_counter_main_t *cm = &load_balance_main.lbm_via_counters;
+ u32 n_left_from, n_left_to_next, *from, *to_next;
+ ip_lookup_next_t next;
+ u32 cpu_index = os_get_cpu_number ();
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next = node->cached_next_index;
+
+ if (node->flags & VLIB_NODE_FLAG_TRACE)
+ ip4_forward_next_trace (vm, node, frame, VLIB_TX);
+
+ while (n_left_from > 0)
+ {
+ vlib_get_next_frame (vm, node, next, to_next, n_left_to_next);
+
+
+ while (n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ ip_lookup_next_t next0, next1;
+ const load_balance_t *lb0, *lb1;
+ vlib_buffer_t *p0, *p1;
+ u32 pi0, lbi0, hc0, pi1, lbi1, hc1;
+ const ip4_header_t *ip0, *ip1;
+ const dpo_id_t *dpo0, *dpo1;
+
+ /* Prefetch next iteration. */
+ {
+ vlib_buffer_t *p2, *p3;
+
+ p2 = vlib_get_buffer (vm, from[2]);
+ p3 = vlib_get_buffer (vm, from[3]);
+
+ vlib_prefetch_buffer_header (p2, STORE);
+ vlib_prefetch_buffer_header (p3, STORE);
+
+ CLIB_PREFETCH (p2->data, sizeof (ip0[0]), STORE);
+ CLIB_PREFETCH (p3->data, sizeof (ip0[0]), STORE);
+ }
+
+ pi0 = to_next[0] = from[0];
+ pi1 = to_next[1] = from[1];
+
+ from += 2;
+ n_left_from -= 2;
+ to_next += 2;
+ n_left_to_next -= 2;
+
+ p0 = vlib_get_buffer (vm, pi0);
+ p1 = vlib_get_buffer (vm, pi1);
+
+ ip0 = vlib_buffer_get_current (p0);
+ ip1 = vlib_buffer_get_current (p1);
+ lbi0 = vnet_buffer (p0)->ip.adj_index[VLIB_TX];
+ lbi1 = vnet_buffer (p1)->ip.adj_index[VLIB_TX];
+
+ lb0 = load_balance_get (lbi0);
+ lb1 = load_balance_get (lbi1);
+
+ /*
+ * this node is for via FIBs we can re-use the hash value from the
+ * to node if present.
+ * We don't want to use the same hash value at each level in the recursion
+ * graph as that would lead to polarisation
+ */
+ hc0 = vnet_buffer (p0)->ip.flow_hash = 0;
+ hc1 = vnet_buffer (p1)->ip.flow_hash = 0;
+
+ if (PREDICT_FALSE (lb0->lb_n_buckets > 1))
+ {
+ if (PREDICT_TRUE (vnet_buffer (p0)->ip.flow_hash))
+ {
+ hc0 = vnet_buffer (p0)->ip.flow_hash =
+ vnet_buffer (p0)->ip.flow_hash >> 1;
+ }
+ else
+ {
+ hc0 = vnet_buffer (p0)->ip.flow_hash =
+ ip4_compute_flow_hash (ip0, hc0);
+ }
+ }
+ if (PREDICT_FALSE (lb1->lb_n_buckets > 1))
+ {
+ if (PREDICT_TRUE (vnet_buffer (p1)->ip.flow_hash))
+ {
+ hc1 = vnet_buffer (p1)->ip.flow_hash =
+ vnet_buffer (p1)->ip.flow_hash >> 1;
+ }
+ else
+ {
+ hc1 = vnet_buffer (p1)->ip.flow_hash =
+ ip4_compute_flow_hash (ip1, hc1);
+ }
+ }
+
+ dpo0 =
+ load_balance_get_bucket_i (lb0,
+ hc0 & (lb0->lb_n_buckets_minus_1));
+ dpo1 =
+ load_balance_get_bucket_i (lb1,
+ hc1 & (lb1->lb_n_buckets_minus_1));
+
+ next0 = dpo0->dpoi_next_node;
+ next1 = dpo1->dpoi_next_node;
+
+ vnet_buffer (p0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
+ vnet_buffer (p1)->ip.adj_index[VLIB_TX] = dpo1->dpoi_index;
+
+ vlib_increment_combined_counter
+ (cm, cpu_index, lbi0, 1, vlib_buffer_length_in_chain (vm, p0));
+ vlib_increment_combined_counter
+ (cm, cpu_index, lbi1, 1, vlib_buffer_length_in_chain (vm, p1));
+
+ vlib_validate_buffer_enqueue_x2 (vm, node, next,
+ to_next, n_left_to_next,
+ pi0, pi1, next0, next1);
+ }
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ ip_lookup_next_t next0;
+ const load_balance_t *lb0;
+ vlib_buffer_t *p0;
+ u32 pi0, lbi0, hc0;
+ const ip4_header_t *ip0;
+ const dpo_id_t *dpo0;
+
+ pi0 = from[0];
+ to_next[0] = pi0;
+ from += 1;
+ to_next += 1;
+ n_left_to_next -= 1;
+ n_left_from -= 1;
+
+ p0 = vlib_get_buffer (vm, pi0);
+
+ ip0 = vlib_buffer_get_current (p0);
+ lbi0 = vnet_buffer (p0)->ip.adj_index[VLIB_TX];
+
+ lb0 = load_balance_get (lbi0);
+
+ hc0 = vnet_buffer (p0)->ip.flow_hash = 0;
+ if (PREDICT_FALSE (lb0->lb_n_buckets > 1))
+ {
+ if (PREDICT_TRUE (vnet_buffer (p0)->ip.flow_hash))
+ {
+ hc0 = vnet_buffer (p0)->ip.flow_hash =
+ vnet_buffer (p0)->ip.flow_hash >> 1;
+ }
+ else
+ {
+ hc0 = vnet_buffer (p0)->ip.flow_hash =
+ ip4_compute_flow_hash (ip0, hc0);
+ }
+ }
+
+ dpo0 =
+ load_balance_get_bucket_i (lb0,
+ hc0 & (lb0->lb_n_buckets_minus_1));
+
+ next0 = dpo0->dpoi_next_node;
+ vnet_buffer (p0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
+
+ vlib_increment_combined_counter
+ (cm, cpu_index, lbi0, 1, vlib_buffer_length_in_chain (vm, p0));
+
+ vlib_validate_buffer_enqueue_x1 (vm, node, next,
+ to_next, n_left_to_next,
+ pi0, next0);
+ }
+
+ vlib_put_next_frame (vm, node, next, n_left_to_next);
+ }
+
+ return frame->n_vectors;
+}
+
+VLIB_REGISTER_NODE (ip4_load_balance_node) =
+{
+.function = ip4_load_balance,.name = "ip4-load-balance",.vector_size =
+ sizeof (u32),.sibling_of = "ip4-lookup",.format_trace =
+ format_ip4_lookup_trace,};
+
+VLIB_NODE_FUNCTION_MULTIARCH (ip4_load_balance_node, ip4_load_balance);
+
+/* get first interface address */
+ip4_address_t *
+ip4_interface_first_address (ip4_main_t * im, u32 sw_if_index,
+ ip_interface_address_t ** result_ia)
+{
+ ip_lookup_main_t *lm = &im->lookup_main;
+ ip_interface_address_t *ia = 0;
+ ip4_address_t *result = 0;
+
+ foreach_ip_interface_address (lm, ia, sw_if_index,
+ 1 /* honor unnumbered */ ,
+ (
+ {
+ ip4_address_t * a =
+ ip_interface_address_get_address (lm, ia);
+ result = a;
+ break;
+ }
+ ));
+ if (result_ia)
+ *result_ia = result ? ia : 0;
+ return result;
+}
+
+static void
+ip4_add_interface_routes (u32 sw_if_index,
+ ip4_main_t * im, u32 fib_index,
+ ip_interface_address_t * a)
+{
+ ip_lookup_main_t *lm = &im->lookup_main;
+ ip4_address_t *address = ip_interface_address_get_address (lm, a);
+ fib_prefix_t pfx = {
+ .fp_len = a->address_length,
+ .fp_proto = FIB_PROTOCOL_IP4,
+ .fp_addr.ip4 = *address,
+ };
+
+ a->neighbor_probe_adj_index = ~0;
+
+ if (pfx.fp_len < 32)
+ {
+ fib_node_index_t fei;
+
+ fei = fib_table_entry_update_one_path (fib_index, &pfx, FIB_SOURCE_INTERFACE, (FIB_ENTRY_FLAG_CONNECTED | FIB_ENTRY_FLAG_ATTACHED), FIB_PROTOCOL_IP4, NULL, /* No next-hop address */
+ sw_if_index, ~0, // invalid FIB index
+ 1, NULL, // no out-label stack
+ FIB_ROUTE_PATH_FLAG_NONE);
+ a->neighbor_probe_adj_index = fib_entry_get_adj (fei);
+ }
+
+ pfx.fp_len = 32;
+
+ if (sw_if_index < vec_len (lm->classify_table_index_by_sw_if_index))
+ {
+ u32 classify_table_index =
+ lm->classify_table_index_by_sw_if_index[sw_if_index];
+ if (classify_table_index != (u32) ~ 0)
+ {
+ dpo_id_t dpo = DPO_INVALID;
+
+ dpo_set (&dpo,
+ DPO_CLASSIFY,
+ DPO_PROTO_IP4,
+ classify_dpo_create (DPO_PROTO_IP4, classify_table_index));
+
+ fib_table_entry_special_dpo_add (fib_index,
+ &pfx,
+ FIB_SOURCE_CLASSIFY,
+ FIB_ENTRY_FLAG_NONE, &dpo);
+ dpo_reset (&dpo);
+ }
+ }
+
+ fib_table_entry_update_one_path (fib_index, &pfx, FIB_SOURCE_INTERFACE, (FIB_ENTRY_FLAG_CONNECTED | FIB_ENTRY_FLAG_LOCAL), FIB_PROTOCOL_IP4, &pfx.fp_addr, sw_if_index, ~0, // invalid FIB index
+ 1, NULL, // no out-label stack
+ FIB_ROUTE_PATH_FLAG_NONE);
+}
+
+static void
+ip4_del_interface_routes (ip4_main_t * im,
+ u32 fib_index,
+ ip4_address_t * address, u32 address_length)
+{
+ fib_prefix_t pfx = {
+ .fp_len = address_length,
+ .fp_proto = FIB_PROTOCOL_IP4,
+ .fp_addr.ip4 = *address,
+ };
+
+ if (pfx.fp_len < 32)
+ {
+ fib_table_entry_delete (fib_index, &pfx, FIB_SOURCE_INTERFACE);
+ }
+
+ pfx.fp_len = 32;
+ fib_table_entry_delete (fib_index, &pfx, FIB_SOURCE_INTERFACE);
+}
+
+void
+ip4_sw_interface_enable_disable (u32 sw_if_index, u32 is_enable)
+{
+ ip4_main_t *im = &ip4_main;
+
+ vec_validate_init_empty (im->ip_enabled_by_sw_if_index, sw_if_index, 0);
+
+ /*
+ * enable/disable only on the 1<->0 transition
+ */
+ if (is_enable)
+ {
+ if (1 != ++im->ip_enabled_by_sw_if_index[sw_if_index])
+ return;
+ }
+ else
+ {
+ ASSERT (im->ip_enabled_by_sw_if_index[sw_if_index] > 0);
+ if (0 != --im->ip_enabled_by_sw_if_index[sw_if_index])
+ return;
+ }
+ vnet_feature_enable_disable ("ip4-unicast", "ip4-drop", sw_if_index,
+ !is_enable, 0, 0);
+
+ vnet_feature_enable_disable ("ip4-multicast", "ip4-drop", sw_if_index,
+ !is_enable, 0, 0);
+
+}
+
+static clib_error_t *
+ip4_add_del_interface_address_internal (vlib_main_t * vm,
+ u32 sw_if_index,
+ ip4_address_t * address,
+ u32 address_length, u32 is_del)
+{
+ vnet_main_t *vnm = vnet_get_main ();
+ ip4_main_t *im = &ip4_main;
+ ip_lookup_main_t *lm = &im->lookup_main;
+ clib_error_t *error = 0;
+ u32 if_address_index, elts_before;
+ ip4_address_fib_t ip4_af, *addr_fib = 0;
+
+ vec_validate (im->fib_index_by_sw_if_index, sw_if_index);
+ ip4_addr_fib_init (&ip4_af, address,
+ vec_elt (im->fib_index_by_sw_if_index, sw_if_index));
+ vec_add1 (addr_fib, ip4_af);
+
+ /* FIXME-LATER
+ * there is no support for adj-fib handling in the presence of overlapping
+ * subnets on interfaces. Easy fix - disallow overlapping subnets, like
+ * most routers do.
+ */
+ if (!is_del)
+ {
+ /* When adding an address check that it does not conflict
+ with an existing address. */
+ ip_interface_address_t *ia;
+ foreach_ip_interface_address (&im->lookup_main, ia, sw_if_index,
+ 0 /* honor unnumbered */ ,
+ (
+ {
+ ip4_address_t * x =
+ ip_interface_address_get_address
+ (&im->lookup_main, ia);
+ if (ip4_destination_matches_route
+ (im, address, x, ia->address_length)
+ ||
+ ip4_destination_matches_route (im,
+ x,
+ address,
+ address_length))
+ return
+ clib_error_create
+ ("failed to add %U which conflicts with %U for interface %U",
+ format_ip4_address_and_length, address,
+ address_length,
+ format_ip4_address_and_length, x,
+ ia->address_length,
+ format_vnet_sw_if_index_name, vnm,
+ sw_if_index);}
+ ));
+ }
+
+ elts_before = pool_elts (lm->if_address_pool);
+
+ error = ip_interface_address_add_del
+ (lm, sw_if_index, addr_fib, address_length, is_del, &if_address_index);
+ if (error)
+ goto done;
+
+ ip4_sw_interface_enable_disable (sw_if_index, !is_del);
+
+ if (is_del)
+ ip4_del_interface_routes (im, ip4_af.fib_index, address, address_length);
+ else
+ ip4_add_interface_routes (sw_if_index,
+ im, ip4_af.fib_index,
+ pool_elt_at_index
+ (lm->if_address_pool, if_address_index));
+
+ /* If pool did not grow/shrink: add duplicate address. */
+ if (elts_before != pool_elts (lm->if_address_pool))
+ {
+ ip4_add_del_interface_address_callback_t *cb;
+ vec_foreach (cb, im->add_del_interface_address_callbacks)
+ cb->function (im, cb->function_opaque, sw_if_index,
+ address, address_length, if_address_index, is_del);
+ }
+
+done:
+ vec_free (addr_fib);
+ return error;
+}
+
+clib_error_t *
+ip4_add_del_interface_address (vlib_main_t * vm, u32 sw_if_index,
+ ip4_address_t * address, u32 address_length,
+ u32 is_del)
+{
+ return ip4_add_del_interface_address_internal
+ (vm, sw_if_index, address, address_length, is_del);
+}
+
+/* Built-in ip4 unicast rx feature path definition */
+/* *INDENT-OFF* */
+VNET_FEATURE_ARC_INIT (ip4_unicast, static) =
+{
+ .arc_name = "ip4-unicast",
+ .start_nodes = VNET_FEATURES ("ip4-input", "ip4-input-no-checksum"),
+ .end_node = "ip4-lookup",
+ .arc_index_ptr = &ip4_main.lookup_main.ucast_feature_arc_index,
+};
+
+VNET_FEATURE_INIT (ip4_flow_classify, static) =
+{
+ .arc_name = "ip4-unicast",
+ .node_name = "ip4-flow-classify",
+ .runs_before = VNET_FEATURES ("ip4-inacl"),
+};
+
+VNET_FEATURE_INIT (ip4_inacl, static) =
+{
+ .arc_name = "ip4-unicast",
+ .node_name = "ip4-inacl",
+ .runs_before = VNET_FEATURES ("ip4-source-check-via-rx"),
+};
+
+VNET_FEATURE_INIT (ip4_source_check_1, static) =
+{
+ .arc_name = "ip4-unicast",
+ .node_name = "ip4-source-check-via-rx",
+ .runs_before = VNET_FEATURES ("ip4-source-check-via-any"),
+};
+
+VNET_FEATURE_INIT (ip4_source_check_2, static) =
+{
+ .arc_name = "ip4-unicast",
+ .node_name = "ip4-source-check-via-any",
+ .runs_before = VNET_FEATURES ("ip4-policer-classify"),
+};
+
+VNET_FEATURE_INIT (ip4_source_and_port_range_check_rx, static) =
+{
+ .arc_name = "ip4-unicast",
+ .node_name = "ip4-source-and-port-range-check-rx",
+ .runs_before = VNET_FEATURES ("ip4-policer-classify"),
+};
+
+VNET_FEATURE_INIT (ip4_policer_classify, static) =
+{
+ .arc_name = "ip4-unicast",
+ .node_name = "ip4-policer-classify",
+ .runs_before = VNET_FEATURES ("ipsec-input-ip4"),
+};
+
+VNET_FEATURE_INIT (ip4_ipsec, static) =
+{
+ .arc_name = "ip4-unicast",
+ .node_name = "ipsec-input-ip4",
+ .runs_before = VNET_FEATURES ("vpath-input-ip4"),
+};
+
+VNET_FEATURE_INIT (ip4_vpath, static) =
+{
+ .arc_name = "ip4-unicast",
+ .node_name = "vpath-input-ip4",
+ .runs_before = VNET_FEATURES ("ip4-vxlan-bypass"),
+};
+
+VNET_FEATURE_INIT (ip4_vxlan_bypass, static) =
+{
+ .arc_name = "ip4-unicast",
+ .node_name = "ip4-vxlan-bypass",
+ .runs_before = VNET_FEATURES ("ip4-lookup"),
+};
+
+VNET_FEATURE_INIT (ip4_lookup, static) =
+{
+ .arc_name = "ip4-unicast",
+ .node_name = "ip4-lookup",
+ .runs_before = VNET_FEATURES ("ip4-drop"),
+};
+
+VNET_FEATURE_INIT (ip4_drop, static) =
+{
+ .arc_name = "ip4-unicast",
+ .node_name = "ip4-drop",
+ .runs_before = 0, /* not before any other features */
+};
+
+
+/* Built-in ip4 multicast rx feature path definition */
+VNET_FEATURE_ARC_INIT (ip4_multicast, static) =
+{
+ .arc_name = "ip4-multicast",
+ .start_nodes = VNET_FEATURES ("ip4-input", "ip4-input-no-checksum"),
+ .end_node = "ip4-lookup-multicast",
+ .arc_index_ptr = &ip4_main.lookup_main.mcast_feature_arc_index,
+};
+
+VNET_FEATURE_INIT (ip4_vpath_mc, static) =
+{
+ .arc_name = "ip4-multicast",
+ .node_name = "vpath-input-ip4",
+ .runs_before = VNET_FEATURES ("ip4-lookup-multicast"),
+};
+
+VNET_FEATURE_INIT (ip4_lookup_mc, static) =
+{
+ .arc_name = "ip4-multicast",
+ .node_name = "ip4-lookup-multicast",
+ .runs_before = VNET_FEATURES ("ip4-drop"),
+};
+
+VNET_FEATURE_INIT (ip4_mc_drop, static) =
+{
+ .arc_name = "ip4-multicast",
+ .node_name = "ip4-drop",
+ .runs_before = 0, /* last feature */
+};
+
+/* Source and port-range check ip4 tx feature path definition */
+VNET_FEATURE_ARC_INIT (ip4_output, static) =
+{
+ .arc_name = "ip4-output",
+ .start_nodes = VNET_FEATURES ("ip4-rewrite", "ip4-midchain"),
+ .end_node = "interface-output",
+ .arc_index_ptr = &ip4_main.lookup_main.output_feature_arc_index,
+};
+
+VNET_FEATURE_INIT (ip4_source_and_port_range_check_tx, static) =
+{
+ .arc_name = "ip4-output",
+ .node_name = "ip4-source-and-port-range-check-tx",
+ .runs_before = VNET_FEATURES ("ipsec-output-ip4"),
+};
+
+VNET_FEATURE_INIT (ip4_ipsec_output, static) =
+{
+ .arc_name = "ip4-output",
+ .node_name = "ipsec-output-ip4",
+ .runs_before = VNET_FEATURES ("interface-output"),
+};
+
+/* Built-in ip4 tx feature path definition */
+VNET_FEATURE_INIT (ip4_interface_output, static) =
+{
+ .arc_name = "ip4-output",
+ .node_name = "interface-output",
+ .runs_before = 0, /* not before any other features */
+};
+/* *INDENT-ON* */
+
+static clib_error_t *
+ip4_sw_interface_add_del (vnet_main_t * vnm, u32 sw_if_index, u32 is_add)
+{
+ ip4_main_t *im = &ip4_main;
+
+ /* Fill in lookup tables with default table (0). */
+ vec_validate (im->fib_index_by_sw_if_index, sw_if_index);
+
+ vnet_feature_enable_disable ("ip4-unicast", "ip4-drop", sw_if_index,
+ is_add, 0, 0);
+
+ vnet_feature_enable_disable ("ip4-multicast", "ip4-drop", sw_if_index,
+ is_add, 0, 0);
+
+ return /* no error */ 0;
+}
+
+VNET_SW_INTERFACE_ADD_DEL_FUNCTION (ip4_sw_interface_add_del);
+
+/* Global IP4 main. */
+ip4_main_t ip4_main;
+
+clib_error_t *
+ip4_lookup_init (vlib_main_t * vm)
+{
+ ip4_main_t *im = &ip4_main;
+ clib_error_t *error;
+ uword i;
+
+ if ((error = vlib_call_init_function (vm, vnet_feature_init)))
+ return error;
+
+ for (i = 0; i < ARRAY_LEN (im->fib_masks); i++)
+ {
+ u32 m;
+
+ if (i < 32)
+ m = pow2_mask (i) << (32 - i);
+ else
+ m = ~0;
+ im->fib_masks[i] = clib_host_to_net_u32 (m);
+ }
+
+ ip_lookup_init (&im->lookup_main, /* is_ip6 */ 0);
+
+ /* Create FIB with index 0 and table id of 0. */
+ fib_table_find_or_create_and_lock (FIB_PROTOCOL_IP4, 0);
+
+ {
+ pg_node_t *pn;
+ pn = pg_get_node (ip4_lookup_node.index);
+ pn->unformat_edit = unformat_pg_ip4_header;
+ }
+
+ {
+ ethernet_arp_header_t h;
+
+ memset (&h, 0, sizeof (h));
+
+ /* Set target ethernet address to all zeros. */
+ memset (h.ip4_over_ethernet[1].ethernet, 0,
+ sizeof (h.ip4_over_ethernet[1].ethernet));
+
+#define _16(f,v) h.f = clib_host_to_net_u16 (v);
+#define _8(f,v) h.f = v;
+ _16 (l2_type, ETHERNET_ARP_HARDWARE_TYPE_ethernet);
+ _16 (l3_type, ETHERNET_TYPE_IP4);
+ _8 (n_l2_address_bytes, 6);
+ _8 (n_l3_address_bytes, 4);
+ _16 (opcode, ETHERNET_ARP_OPCODE_request);
+#undef _16
+#undef _8
+
+ vlib_packet_template_init (vm, &im->ip4_arp_request_packet_template,
+ /* data */ &h,
+ sizeof (h),
+ /* alloc chunk size */ 8,
+ "ip4 arp");
+ }
+
+ return error;
+}
+
+VLIB_INIT_FUNCTION (ip4_lookup_init);
+
+typedef struct
+{
+ /* Adjacency taken. */
+ u32 dpo_index;
+ u32 flow_hash;
+ u32 fib_index;
+
+ /* Packet data, possibly *after* rewrite. */
+ u8 packet_data[64 - 1 * sizeof (u32)];
+}
+ip4_forward_next_trace_t;
+
+u8 *
+format_ip4_forward_next_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ ip4_forward_next_trace_t *t = va_arg (*args, ip4_forward_next_trace_t *);
+ uword indent = format_get_indent (s);
+ s = format (s, "%U%U",
+ format_white_space, indent,
+ format_ip4_header, t->packet_data, sizeof (t->packet_data));
+ return s;
+}
+
+static u8 *
+format_ip4_lookup_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ ip4_forward_next_trace_t *t = va_arg (*args, ip4_forward_next_trace_t *);
+ uword indent = format_get_indent (s);
+
+ s = format (s, "fib %d dpo-idx %d flow hash: 0x%08x",
+ t->fib_index, t->dpo_index, t->flow_hash);
+ s = format (s, "\n%U%U",
+ format_white_space, indent,
+ format_ip4_header, t->packet_data, sizeof (t->packet_data));
+ return s;
+}
+
+static u8 *
+format_ip4_rewrite_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ ip4_forward_next_trace_t *t = va_arg (*args, ip4_forward_next_trace_t *);
+ vnet_main_t *vnm = vnet_get_main ();
+ uword indent = format_get_indent (s);
+
+ s = format (s, "tx_sw_if_index %d dpo-idx %d : %U flow hash: 0x%08x",
+ t->fib_index, t->dpo_index, format_ip_adjacency,
+ t->dpo_index, FORMAT_IP_ADJACENCY_NONE, t->flow_hash);
+ s = format (s, "\n%U%U",
+ format_white_space, indent,
+ format_ip_adjacency_packet_data,
+ vnm, t->dpo_index, t->packet_data, sizeof (t->packet_data));
+ return s;
+}
+
+/* Common trace function for all ip4-forward next nodes. */
+void
+ip4_forward_next_trace (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame, vlib_rx_or_tx_t which_adj_index)
+{
+ u32 *from, n_left;
+ ip4_main_t *im = &ip4_main;
+
+ n_left = frame->n_vectors;
+ from = vlib_frame_vector_args (frame);
+
+ while (n_left >= 4)
+ {
+ u32 bi0, bi1;
+ vlib_buffer_t *b0, *b1;
+ ip4_forward_next_trace_t *t0, *t1;
+
+ /* Prefetch next iteration. */
+ vlib_prefetch_buffer_with_index (vm, from[2], LOAD);
+ vlib_prefetch_buffer_with_index (vm, from[3], LOAD);
+
+ bi0 = from[0];
+ bi1 = from[1];
+
+ b0 = vlib_get_buffer (vm, bi0);
+ b1 = vlib_get_buffer (vm, bi1);
+
+ if (b0->flags & VLIB_BUFFER_IS_TRACED)
+ {
+ t0 = vlib_add_trace (vm, node, b0, sizeof (t0[0]));
+ t0->dpo_index = vnet_buffer (b0)->ip.adj_index[which_adj_index];
+ t0->flow_hash = vnet_buffer (b0)->ip.flow_hash;
+ t0->fib_index =
+ (vnet_buffer (b0)->sw_if_index[VLIB_TX] !=
+ (u32) ~ 0) ? vnet_buffer (b0)->sw_if_index[VLIB_TX] :
+ vec_elt (im->fib_index_by_sw_if_index,
+ vnet_buffer (b0)->sw_if_index[VLIB_RX]);
+
+ clib_memcpy (t0->packet_data,
+ vlib_buffer_get_current (b0),
+ sizeof (t0->packet_data));
+ }
+ if (b1->flags & VLIB_BUFFER_IS_TRACED)
+ {
+ t1 = vlib_add_trace (vm, node, b1, sizeof (t1[0]));
+ t1->dpo_index = vnet_buffer (b1)->ip.adj_index[which_adj_index];
+ t1->flow_hash = vnet_buffer (b1)->ip.flow_hash;
+ t1->fib_index =
+ (vnet_buffer (b1)->sw_if_index[VLIB_TX] !=
+ (u32) ~ 0) ? vnet_buffer (b1)->sw_if_index[VLIB_TX] :
+ vec_elt (im->fib_index_by_sw_if_index,
+ vnet_buffer (b1)->sw_if_index[VLIB_RX]);
+ clib_memcpy (t1->packet_data, vlib_buffer_get_current (b1),
+ sizeof (t1->packet_data));
+ }
+ from += 2;
+ n_left -= 2;
+ }
+
+ while (n_left >= 1)
+ {
+ u32 bi0;
+ vlib_buffer_t *b0;
+ ip4_forward_next_trace_t *t0;
+
+ bi0 = from[0];
+
+ b0 = vlib_get_buffer (vm, bi0);
+
+ if (b0->flags & VLIB_BUFFER_IS_TRACED)
+ {
+ t0 = vlib_add_trace (vm, node, b0, sizeof (t0[0]));
+ t0->dpo_index = vnet_buffer (b0)->ip.adj_index[which_adj_index];
+ t0->flow_hash = vnet_buffer (b0)->ip.flow_hash;
+ t0->fib_index =
+ (vnet_buffer (b0)->sw_if_index[VLIB_TX] !=
+ (u32) ~ 0) ? vnet_buffer (b0)->sw_if_index[VLIB_TX] :
+ vec_elt (im->fib_index_by_sw_if_index,
+ vnet_buffer (b0)->sw_if_index[VLIB_RX]);
+ clib_memcpy (t0->packet_data, vlib_buffer_get_current (b0),
+ sizeof (t0->packet_data));
+ }
+ from += 1;
+ n_left -= 1;
+ }
+}
+
+static uword
+ip4_drop_or_punt (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame, ip4_error_t error_code)
+{
+ u32 *buffers = vlib_frame_vector_args (frame);
+ uword n_packets = frame->n_vectors;
+
+ vlib_error_drop_buffers (vm, node, buffers,
+ /* stride */ 1,
+ n_packets,
+ /* next */ 0,
+ ip4_input_node.index, error_code);
+
+ if (node->flags & VLIB_NODE_FLAG_TRACE)
+ ip4_forward_next_trace (vm, node, frame, VLIB_TX);
+
+ return n_packets;
+}
+
+static uword
+ip4_drop (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ return ip4_drop_or_punt (vm, node, frame, IP4_ERROR_ADJACENCY_DROP);
+}
+
+static uword
+ip4_punt (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ return ip4_drop_or_punt (vm, node, frame, IP4_ERROR_ADJACENCY_PUNT);
+}
+
+VLIB_REGISTER_NODE (ip4_drop_node, static) =
+{
+ .function = ip4_drop,.name = "ip4-drop",.vector_size =
+ sizeof (u32),.format_trace = format_ip4_forward_next_trace,.n_next_nodes =
+ 1,.next_nodes =
+ {
+ [0] = "error-drop",}
+,};
+
+VLIB_NODE_FUNCTION_MULTIARCH (ip4_drop_node, ip4_drop);
+
+VLIB_REGISTER_NODE (ip4_punt_node, static) =
+{
+ .function = ip4_punt,.name = "ip4-punt",.vector_size =
+ sizeof (u32),.format_trace = format_ip4_forward_next_trace,.n_next_nodes =
+ 1,.next_nodes =
+ {
+ [0] = "error-punt",}
+,};
+
+VLIB_NODE_FUNCTION_MULTIARCH (ip4_punt_node, ip4_punt);
+
+/* Compute TCP/UDP/ICMP4 checksum in software. */
+u16
+ip4_tcp_udp_compute_checksum (vlib_main_t * vm, vlib_buffer_t * p0,
+ ip4_header_t * ip0)
+{
+ ip_csum_t sum0;
+ u32 ip_header_length, payload_length_host_byte_order;
+ u32 n_this_buffer, n_bytes_left;
+ u16 sum16;
+ void *data_this_buffer;
+
+ /* Initialize checksum with ip header. */
+ ip_header_length = ip4_header_bytes (ip0);
+ payload_length_host_byte_order =
+ clib_net_to_host_u16 (ip0->length) - ip_header_length;
+ sum0 =
+ clib_host_to_net_u32 (payload_length_host_byte_order +
+ (ip0->protocol << 16));
+
+ if (BITS (uword) == 32)
+ {
+ sum0 =
+ ip_csum_with_carry (sum0,
+ clib_mem_unaligned (&ip0->src_address, u32));
+ sum0 =
+ ip_csum_with_carry (sum0,
+ clib_mem_unaligned (&ip0->dst_address, u32));
+ }
+ else
+ sum0 =
+ ip_csum_with_carry (sum0, clib_mem_unaligned (&ip0->src_address, u64));
+
+ n_bytes_left = n_this_buffer = payload_length_host_byte_order;
+ data_this_buffer = (void *) ip0 + ip_header_length;
+ if (n_this_buffer + ip_header_length > p0->current_length)
+ n_this_buffer =
+ p0->current_length >
+ ip_header_length ? p0->current_length - ip_header_length : 0;
+ while (1)
+ {
+ sum0 = ip_incremental_checksum (sum0, data_this_buffer, n_this_buffer);
+ n_bytes_left -= n_this_buffer;
+ if (n_bytes_left == 0)
+ break;
+
+ ASSERT (p0->flags & VLIB_BUFFER_NEXT_PRESENT);
+ p0 = vlib_get_buffer (vm, p0->next_buffer);
+ data_this_buffer = vlib_buffer_get_current (p0);
+ n_this_buffer = p0->current_length;
+ }
+
+ sum16 = ~ip_csum_fold (sum0);
+
+ return sum16;
+}
+
+u32
+ip4_tcp_udp_validate_checksum (vlib_main_t * vm, vlib_buffer_t * p0)
+{
+ ip4_header_t *ip0 = vlib_buffer_get_current (p0);
+ udp_header_t *udp0;
+ u16 sum16;
+
+ ASSERT (ip0->protocol == IP_PROTOCOL_TCP
+ || ip0->protocol == IP_PROTOCOL_UDP);
+
+ udp0 = (void *) (ip0 + 1);
+ if (ip0->protocol == IP_PROTOCOL_UDP && udp0->checksum == 0)
+ {
+ p0->flags |= (IP_BUFFER_L4_CHECKSUM_COMPUTED
+ | IP_BUFFER_L4_CHECKSUM_CORRECT);
+ return p0->flags;
+ }
+
+ sum16 = ip4_tcp_udp_compute_checksum (vm, p0, ip0);
+
+ p0->flags |= (IP_BUFFER_L4_CHECKSUM_COMPUTED
+ | ((sum16 == 0) << LOG2_IP_BUFFER_L4_CHECKSUM_CORRECT));
+
+ return p0->flags;
+}
+
+static uword
+ip4_local (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ ip4_main_t *im = &ip4_main;
+ ip_lookup_main_t *lm = &im->lookup_main;
+ ip_local_next_t next_index;
+ u32 *from, *to_next, n_left_from, n_left_to_next;
+ vlib_node_runtime_t *error_node =
+ vlib_node_get_runtime (vm, ip4_input_node.index);
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+
+ if (node->flags & VLIB_NODE_FLAG_TRACE)
+ ip4_forward_next_trace (vm, node, frame, VLIB_TX);
+
+ while (n_left_from > 0)
+ {
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ vlib_buffer_t *p0, *p1;
+ ip4_header_t *ip0, *ip1;
+ udp_header_t *udp0, *udp1;
+ ip4_fib_mtrie_t *mtrie0, *mtrie1;
+ ip4_fib_mtrie_leaf_t leaf0, leaf1;
+ const dpo_id_t *dpo0, *dpo1;
+ const load_balance_t *lb0, *lb1;
+ u32 pi0, ip_len0, udp_len0, flags0, next0, fib_index0, lbi0;
+ u32 pi1, ip_len1, udp_len1, flags1, next1, fib_index1, lbi1;
+ i32 len_diff0, len_diff1;
+ u8 error0, is_udp0, is_tcp_udp0, good_tcp_udp0, proto0;
+ u8 error1, is_udp1, is_tcp_udp1, good_tcp_udp1, proto1;
+ u8 enqueue_code;
+
+ pi0 = to_next[0] = from[0];
+ pi1 = to_next[1] = from[1];
+ from += 2;
+ n_left_from -= 2;
+ to_next += 2;
+ n_left_to_next -= 2;
+
+ p0 = vlib_get_buffer (vm, pi0);
+ p1 = vlib_get_buffer (vm, pi1);
+
+ ip0 = vlib_buffer_get_current (p0);
+ ip1 = vlib_buffer_get_current (p1);
+
+ vnet_buffer (p0)->ip.start_of_ip_header = p0->current_data;
+ vnet_buffer (p1)->ip.start_of_ip_header = p1->current_data;
+
+ fib_index0 = vec_elt (im->fib_index_by_sw_if_index,
+ vnet_buffer (p0)->sw_if_index[VLIB_RX]);
+ fib_index0 = (vnet_buffer (p0)->sw_if_index[VLIB_TX] == (u32) ~ 0) ?
+ fib_index0 : vnet_buffer (p0)->sw_if_index[VLIB_TX];
+
+ fib_index1 = vec_elt (im->fib_index_by_sw_if_index,
+ vnet_buffer (p1)->sw_if_index[VLIB_RX]);
+ fib_index1 = (vnet_buffer (p1)->sw_if_index[VLIB_TX] == (u32) ~ 0) ?
+ fib_index1 : vnet_buffer (p1)->sw_if_index[VLIB_TX];
+
+ mtrie0 = &ip4_fib_get (fib_index0)->mtrie;
+ mtrie1 = &ip4_fib_get (fib_index1)->mtrie;
+
+ leaf0 = leaf1 = IP4_FIB_MTRIE_LEAF_ROOT;
+
+ leaf0 =
+ ip4_fib_mtrie_lookup_step (mtrie0, leaf0, &ip0->src_address, 0);
+ leaf1 =
+ ip4_fib_mtrie_lookup_step (mtrie1, leaf1, &ip1->src_address, 0);
+
+ /* Treat IP frag packets as "experimental" protocol for now
+ until support of IP frag reassembly is implemented */
+ proto0 = ip4_is_fragment (ip0) ? 0xfe : ip0->protocol;
+ proto1 = ip4_is_fragment (ip1) ? 0xfe : ip1->protocol;
+ is_udp0 = proto0 == IP_PROTOCOL_UDP;
+ is_udp1 = proto1 == IP_PROTOCOL_UDP;
+ is_tcp_udp0 = is_udp0 || proto0 == IP_PROTOCOL_TCP;
+ is_tcp_udp1 = is_udp1 || proto1 == IP_PROTOCOL_TCP;
+
+ flags0 = p0->flags;
+ flags1 = p1->flags;
+
+ good_tcp_udp0 = (flags0 & IP_BUFFER_L4_CHECKSUM_CORRECT) != 0;
+ good_tcp_udp1 = (flags1 & IP_BUFFER_L4_CHECKSUM_CORRECT) != 0;
+
+ udp0 = ip4_next_header (ip0);
+ udp1 = ip4_next_header (ip1);
+
+ /* Don't verify UDP checksum for packets with explicit zero checksum. */
+ good_tcp_udp0 |= is_udp0 && udp0->checksum == 0;
+ good_tcp_udp1 |= is_udp1 && udp1->checksum == 0;
+
+ leaf0 =
+ ip4_fib_mtrie_lookup_step (mtrie0, leaf0, &ip0->src_address, 1);
+ leaf1 =
+ ip4_fib_mtrie_lookup_step (mtrie1, leaf1, &ip1->src_address, 1);
+
+ /* Verify UDP length. */
+ ip_len0 = clib_net_to_host_u16 (ip0->length);
+ ip_len1 = clib_net_to_host_u16 (ip1->length);
+ udp_len0 = clib_net_to_host_u16 (udp0->length);
+ udp_len1 = clib_net_to_host_u16 (udp1->length);
+
+ len_diff0 = ip_len0 - udp_len0;
+ len_diff1 = ip_len1 - udp_len1;
+
+ len_diff0 = is_udp0 ? len_diff0 : 0;
+ len_diff1 = is_udp1 ? len_diff1 : 0;
+
+ if (PREDICT_FALSE (!(is_tcp_udp0 & is_tcp_udp1
+ & good_tcp_udp0 & good_tcp_udp1)))
+ {
+ if (is_tcp_udp0)
+ {
+ if (is_tcp_udp0
+ && !(flags0 & IP_BUFFER_L4_CHECKSUM_COMPUTED))
+ flags0 = ip4_tcp_udp_validate_checksum (vm, p0);
+ good_tcp_udp0 =
+ (flags0 & IP_BUFFER_L4_CHECKSUM_CORRECT) != 0;
+ good_tcp_udp0 |= is_udp0 && udp0->checksum == 0;
+ }
+ if (is_tcp_udp1)
+ {
+ if (is_tcp_udp1
+ && !(flags1 & IP_BUFFER_L4_CHECKSUM_COMPUTED))
+ flags1 = ip4_tcp_udp_validate_checksum (vm, p1);
+ good_tcp_udp1 =
+ (flags1 & IP_BUFFER_L4_CHECKSUM_CORRECT) != 0;
+ good_tcp_udp1 |= is_udp1 && udp1->checksum == 0;
+ }
+ }
+
+ good_tcp_udp0 &= len_diff0 >= 0;
+ good_tcp_udp1 &= len_diff1 >= 0;
+
+ leaf0 =
+ ip4_fib_mtrie_lookup_step (mtrie0, leaf0, &ip0->src_address, 2);
+ leaf1 =
+ ip4_fib_mtrie_lookup_step (mtrie1, leaf1, &ip1->src_address, 2);
+
+ error0 = error1 = IP4_ERROR_UNKNOWN_PROTOCOL;
+
+ error0 = len_diff0 < 0 ? IP4_ERROR_UDP_LENGTH : error0;
+ error1 = len_diff1 < 0 ? IP4_ERROR_UDP_LENGTH : error1;
+
+ ASSERT (IP4_ERROR_TCP_CHECKSUM + 1 == IP4_ERROR_UDP_CHECKSUM);
+ error0 = (is_tcp_udp0 && !good_tcp_udp0
+ ? IP4_ERROR_TCP_CHECKSUM + is_udp0 : error0);
+ error1 = (is_tcp_udp1 && !good_tcp_udp1
+ ? IP4_ERROR_TCP_CHECKSUM + is_udp1 : error1);
+
+ leaf0 =
+ ip4_fib_mtrie_lookup_step (mtrie0, leaf0, &ip0->src_address, 3);
+ leaf1 =
+ ip4_fib_mtrie_lookup_step (mtrie1, leaf1, &ip1->src_address, 3);
+ leaf0 =
+ (leaf0 ==
+ IP4_FIB_MTRIE_LEAF_EMPTY ? mtrie0->default_leaf : leaf0);
+ leaf1 =
+ (leaf1 ==
+ IP4_FIB_MTRIE_LEAF_EMPTY ? mtrie1->default_leaf : leaf1);
+
+ vnet_buffer (p0)->ip.adj_index[VLIB_RX] = lbi0 =
+ ip4_fib_mtrie_leaf_get_adj_index (leaf0);
+ vnet_buffer (p0)->ip.adj_index[VLIB_TX] = lbi0;
+
+ vnet_buffer (p1)->ip.adj_index[VLIB_RX] = lbi1 =
+ ip4_fib_mtrie_leaf_get_adj_index (leaf1);
+ vnet_buffer (p1)->ip.adj_index[VLIB_TX] = lbi1;
+
+ lb0 = load_balance_get (lbi0);
+ lb1 = load_balance_get (lbi1);
+ dpo0 = load_balance_get_bucket_i (lb0, 0);
+ dpo1 = load_balance_get_bucket_i (lb1, 0);
+
+ /*
+ * Must have a route to source otherwise we drop the packet.
+ * ip4 broadcasts are accepted, e.g. to make dhcp client work
+ *
+ * The checks are:
+ * - the source is a recieve => it's from us => bogus, do this
+ * first since it sets a different error code.
+ * - uRPF check for any route to source - accept if passes.
+ * - allow packets destined to the broadcast address from unknown sources
+ */
+ error0 = ((error0 == IP4_ERROR_UNKNOWN_PROTOCOL &&
+ dpo0->dpoi_type == DPO_RECEIVE) ?
+ IP4_ERROR_SPOOFED_LOCAL_PACKETS : error0);
+ error0 = ((error0 == IP4_ERROR_UNKNOWN_PROTOCOL &&
+ !fib_urpf_check_size (lb0->lb_urpf) &&
+ ip0->dst_address.as_u32 != 0xFFFFFFFF)
+ ? IP4_ERROR_SRC_LOOKUP_MISS : error0);
+ error1 = ((error1 == IP4_ERROR_UNKNOWN_PROTOCOL &&
+ dpo1->dpoi_type == DPO_RECEIVE) ?
+ IP4_ERROR_SPOOFED_LOCAL_PACKETS : error1);
+ error1 = ((error1 == IP4_ERROR_UNKNOWN_PROTOCOL &&
+ !fib_urpf_check_size (lb1->lb_urpf) &&
+ ip1->dst_address.as_u32 != 0xFFFFFFFF)
+ ? IP4_ERROR_SRC_LOOKUP_MISS : error1);
+
+ next0 = lm->local_next_by_ip_protocol[proto0];
+ next1 = lm->local_next_by_ip_protocol[proto1];
+
+ next0 =
+ error0 != IP4_ERROR_UNKNOWN_PROTOCOL ? IP_LOCAL_NEXT_DROP : next0;
+ next1 =
+ error1 != IP4_ERROR_UNKNOWN_PROTOCOL ? IP_LOCAL_NEXT_DROP : next1;
+
+ p0->error = error0 ? error_node->errors[error0] : 0;
+ p1->error = error1 ? error_node->errors[error1] : 0;
+
+ enqueue_code = (next0 != next_index) + 2 * (next1 != next_index);
+
+ if (PREDICT_FALSE (enqueue_code != 0))
+ {
+ switch (enqueue_code)
+ {
+ case 1:
+ /* A B A */
+ to_next[-2] = pi1;
+ to_next -= 1;
+ n_left_to_next += 1;
+ vlib_set_next_frame_buffer (vm, node, next0, pi0);
+ break;
+
+ case 2:
+ /* A A B */
+ to_next -= 1;
+ n_left_to_next += 1;
+ vlib_set_next_frame_buffer (vm, node, next1, pi1);
+ break;
+
+ case 3:
+ /* A B B or A B C */
+ to_next -= 2;
+ n_left_to_next += 2;
+ vlib_set_next_frame_buffer (vm, node, next0, pi0);
+ vlib_set_next_frame_buffer (vm, node, next1, pi1);
+ if (next0 == next1)
+ {
+ vlib_put_next_frame (vm, node, next_index,
+ n_left_to_next);
+ next_index = next1;
+ vlib_get_next_frame (vm, node, next_index, to_next,
+ n_left_to_next);
+ }
+ break;
+ }
+ }
+ }
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ vlib_buffer_t *p0;
+ ip4_header_t *ip0;
+ udp_header_t *udp0;
+ ip4_fib_mtrie_t *mtrie0;
+ ip4_fib_mtrie_leaf_t leaf0;
+ u32 pi0, next0, ip_len0, udp_len0, flags0, fib_index0, lbi0;
+ i32 len_diff0;
+ u8 error0, is_udp0, is_tcp_udp0, good_tcp_udp0, proto0;
+ load_balance_t *lb0;
+ const dpo_id_t *dpo0;
+
+ pi0 = to_next[0] = from[0];
+ from += 1;
+ n_left_from -= 1;
+ to_next += 1;
+ n_left_to_next -= 1;
+
+ p0 = vlib_get_buffer (vm, pi0);
+
+ ip0 = vlib_buffer_get_current (p0);
+
+ vnet_buffer (p0)->ip.start_of_ip_header = p0->current_data;
+
+ fib_index0 = vec_elt (im->fib_index_by_sw_if_index,
+ vnet_buffer (p0)->sw_if_index[VLIB_RX]);
+ fib_index0 = (vnet_buffer (p0)->sw_if_index[VLIB_TX] == (u32) ~ 0) ?
+ fib_index0 : vnet_buffer (p0)->sw_if_index[VLIB_TX];
+
+ mtrie0 = &ip4_fib_get (fib_index0)->mtrie;
+
+ leaf0 = IP4_FIB_MTRIE_LEAF_ROOT;
+
+ leaf0 =
+ ip4_fib_mtrie_lookup_step (mtrie0, leaf0, &ip0->src_address, 0);
+
+ /* Treat IP frag packets as "experimental" protocol for now
+ until support of IP frag reassembly is implemented */
+ proto0 = ip4_is_fragment (ip0) ? 0xfe : ip0->protocol;
+ is_udp0 = proto0 == IP_PROTOCOL_UDP;
+ is_tcp_udp0 = is_udp0 || proto0 == IP_PROTOCOL_TCP;
+
+ flags0 = p0->flags;
+
+ good_tcp_udp0 = (flags0 & IP_BUFFER_L4_CHECKSUM_CORRECT) != 0;
+
+ udp0 = ip4_next_header (ip0);
+
+ /* Don't verify UDP checksum for packets with explicit zero checksum. */
+ good_tcp_udp0 |= is_udp0 && udp0->checksum == 0;
+
+ leaf0 =
+ ip4_fib_mtrie_lookup_step (mtrie0, leaf0, &ip0->src_address, 1);
+
+ /* Verify UDP length. */
+ ip_len0 = clib_net_to_host_u16 (ip0->length);
+ udp_len0 = clib_net_to_host_u16 (udp0->length);
+
+ len_diff0 = ip_len0 - udp_len0;
+
+ len_diff0 = is_udp0 ? len_diff0 : 0;
+
+ if (PREDICT_FALSE (!(is_tcp_udp0 & good_tcp_udp0)))
+ {
+ if (is_tcp_udp0)
+ {
+ if (is_tcp_udp0
+ && !(flags0 & IP_BUFFER_L4_CHECKSUM_COMPUTED))
+ flags0 = ip4_tcp_udp_validate_checksum (vm, p0);
+ good_tcp_udp0 =
+ (flags0 & IP_BUFFER_L4_CHECKSUM_CORRECT) != 0;
+ good_tcp_udp0 |= is_udp0 && udp0->checksum == 0;
+ }
+ }
+
+ good_tcp_udp0 &= len_diff0 >= 0;
+
+ leaf0 =
+ ip4_fib_mtrie_lookup_step (mtrie0, leaf0, &ip0->src_address, 2);
+
+ error0 = IP4_ERROR_UNKNOWN_PROTOCOL;
+
+ error0 = len_diff0 < 0 ? IP4_ERROR_UDP_LENGTH : error0;
+
+ ASSERT (IP4_ERROR_TCP_CHECKSUM + 1 == IP4_ERROR_UDP_CHECKSUM);
+ error0 = (is_tcp_udp0 && !good_tcp_udp0
+ ? IP4_ERROR_TCP_CHECKSUM + is_udp0 : error0);
+
+ leaf0 =
+ ip4_fib_mtrie_lookup_step (mtrie0, leaf0, &ip0->src_address, 3);
+ leaf0 =
+ (leaf0 ==
+ IP4_FIB_MTRIE_LEAF_EMPTY ? mtrie0->default_leaf : leaf0);
+
+ lbi0 = ip4_fib_mtrie_leaf_get_adj_index (leaf0);
+ vnet_buffer (p0)->ip.adj_index[VLIB_TX] = lbi0;
+
+ lb0 = load_balance_get (lbi0);
+ dpo0 = load_balance_get_bucket_i (lb0, 0);
+
+ vnet_buffer (p0)->ip.adj_index[VLIB_TX] =
+ vnet_buffer (p0)->ip.adj_index[VLIB_RX] = lbi0;
+
+ error0 = ((error0 == IP4_ERROR_UNKNOWN_PROTOCOL &&
+ dpo0->dpoi_type == DPO_RECEIVE) ?
+ IP4_ERROR_SPOOFED_LOCAL_PACKETS : error0);
+ error0 = ((error0 == IP4_ERROR_UNKNOWN_PROTOCOL &&
+ !fib_urpf_check_size (lb0->lb_urpf) &&
+ ip0->dst_address.as_u32 != 0xFFFFFFFF)
+ ? IP4_ERROR_SRC_LOOKUP_MISS : error0);
+
+ next0 = lm->local_next_by_ip_protocol[proto0];
+
+ next0 =
+ error0 != IP4_ERROR_UNKNOWN_PROTOCOL ? IP_LOCAL_NEXT_DROP : next0;
+
+ p0->error = error0 ? error_node->errors[error0] : 0;
+
+ if (PREDICT_FALSE (next0 != next_index))
+ {
+ n_left_to_next += 1;
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+
+ next_index = next0;
+ vlib_get_next_frame (vm, node, next_index, to_next,
+ n_left_to_next);
+ to_next[0] = pi0;
+ to_next += 1;
+ n_left_to_next -= 1;
+ }
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ return frame->n_vectors;
+}
+
+VLIB_REGISTER_NODE (ip4_local_node, static) =
+{
+ .function = ip4_local,.name = "ip4-local",.vector_size =
+ sizeof (u32),.format_trace = format_ip4_forward_next_trace,.n_next_nodes =
+ IP_LOCAL_N_NEXT,.next_nodes =
+ {
+ [IP_LOCAL_NEXT_DROP] = "error-drop",
+ [IP_LOCAL_NEXT_PUNT] = "error-punt",
+ [IP_LOCAL_NEXT_UDP_LOOKUP] = "ip4-udp-lookup",
+ [IP_LOCAL_NEXT_ICMP] = "ip4-icmp-input",}
+,};
+
+VLIB_NODE_FUNCTION_MULTIARCH (ip4_local_node, ip4_local);
+
+void
+ip4_register_protocol (u32 protocol, u32 node_index)
+{
+ vlib_main_t *vm = vlib_get_main ();
+ ip4_main_t *im = &ip4_main;
+ ip_lookup_main_t *lm = &im->lookup_main;
+
+ ASSERT (protocol < ARRAY_LEN (lm->local_next_by_ip_protocol));
+ lm->local_next_by_ip_protocol[protocol] =
+ vlib_node_add_next (vm, ip4_local_node.index, node_index);
+}
+
+static clib_error_t *
+show_ip_local_command_fn (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ ip4_main_t *im = &ip4_main;
+ ip_lookup_main_t *lm = &im->lookup_main;
+ int i;
+
+ vlib_cli_output (vm, "Protocols handled by ip4_local");
+ for (i = 0; i < ARRAY_LEN (lm->local_next_by_ip_protocol); i++)
+ {
+ if (lm->local_next_by_ip_protocol[i] != IP_LOCAL_NEXT_PUNT)
+ vlib_cli_output (vm, "%d", i);
+ }
+ return 0;
+}
+
+
+
+/*?
+ * Display the set of protocols handled by the local IPv4 stack.
+ *
+ * @cliexpar
+ * Example of how to display local protocol table:
+ * @cliexstart{show ip local}
+ * Protocols handled by ip4_local
+ * 1
+ * 17
+ * 47
+ * @cliexend
+?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (show_ip_local, static) =
+{
+ .path = "show ip local",
+ .function = show_ip_local_command_fn,
+ .short_help = "show ip local",
+};
+/* *INDENT-ON* */
+
+always_inline uword
+ip4_arp_inline (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame, int is_glean)
+{
+ vnet_main_t *vnm = vnet_get_main ();
+ ip4_main_t *im = &ip4_main;
+ ip_lookup_main_t *lm = &im->lookup_main;
+ u32 *from, *to_next_drop;
+ uword n_left_from, n_left_to_next_drop, next_index;
+ static f64 time_last_seed_change = -1e100;
+ static u32 hash_seeds[3];
+ static uword hash_bitmap[256 / BITS (uword)];
+ f64 time_now;
+
+ if (node->flags & VLIB_NODE_FLAG_TRACE)
+ ip4_forward_next_trace (vm, node, frame, VLIB_TX);
+
+ time_now = vlib_time_now (vm);
+ if (time_now - time_last_seed_change > 1e-3)
+ {
+ uword i;
+ u32 *r = clib_random_buffer_get_data (&vm->random_buffer,
+ sizeof (hash_seeds));
+ for (i = 0; i < ARRAY_LEN (hash_seeds); i++)
+ hash_seeds[i] = r[i];
+
+ /* Mark all hash keys as been no-seen before. */
+ for (i = 0; i < ARRAY_LEN (hash_bitmap); i++)
+ hash_bitmap[i] = 0;
+
+ time_last_seed_change = time_now;
+ }
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+ if (next_index == IP4_ARP_NEXT_DROP)
+ next_index = IP4_ARP_N_NEXT; /* point to first interface */
+
+ while (n_left_from > 0)
+ {
+ vlib_get_next_frame (vm, node, IP4_ARP_NEXT_DROP,
+ to_next_drop, n_left_to_next_drop);
+
+ while (n_left_from > 0 && n_left_to_next_drop > 0)
+ {
+ u32 pi0, adj_index0, a0, b0, c0, m0, sw_if_index0, drop0;
+ ip_adjacency_t *adj0;
+ vlib_buffer_t *p0;
+ ip4_header_t *ip0;
+ uword bm0;
+
+ pi0 = from[0];
+
+ p0 = vlib_get_buffer (vm, pi0);
+
+ adj_index0 = vnet_buffer (p0)->ip.adj_index[VLIB_TX];
+ adj0 = ip_get_adjacency (lm, adj_index0);
+ ip0 = vlib_buffer_get_current (p0);
+
+ a0 = hash_seeds[0];
+ b0 = hash_seeds[1];
+ c0 = hash_seeds[2];
+
+ sw_if_index0 = adj0->rewrite_header.sw_if_index;
+ vnet_buffer (p0)->sw_if_index[VLIB_TX] = sw_if_index0;
+
+ if (is_glean)
+ {
+ /*
+ * this is the Glean case, so we are ARPing for the
+ * packet's destination
+ */
+ a0 ^= ip0->dst_address.data_u32;
+ }
+ else
+ {
+ a0 ^= adj0->sub_type.nbr.next_hop.ip4.data_u32;
+ }
+ b0 ^= sw_if_index0;
+
+ hash_v3_finalize32 (a0, b0, c0);
+
+ c0 &= BITS (hash_bitmap) - 1;
+ c0 = c0 / BITS (uword);
+ m0 = (uword) 1 << (c0 % BITS (uword));
+
+ bm0 = hash_bitmap[c0];
+ drop0 = (bm0 & m0) != 0;
+
+ /* Mark it as seen. */
+ hash_bitmap[c0] = bm0 | m0;
+
+ from += 1;
+ n_left_from -= 1;
+ to_next_drop[0] = pi0;
+ to_next_drop += 1;
+ n_left_to_next_drop -= 1;
+
+ p0->error =
+ node->errors[drop0 ? IP4_ARP_ERROR_DROP :
+ IP4_ARP_ERROR_REQUEST_SENT];
+
+ /*
+ * the adj has been updated to a rewrite but the node the DPO that got
+ * us here hasn't - yet. no big deal. we'll drop while we wait.
+ */
+ if (IP_LOOKUP_NEXT_REWRITE == adj0->lookup_next_index)
+ continue;
+
+ if (drop0)
+ continue;
+
+ /*
+ * Can happen if the control-plane is programming tables
+ * with traffic flowing; at least that's today's lame excuse.
+ */
+ if ((is_glean && adj0->lookup_next_index != IP_LOOKUP_NEXT_GLEAN) ||
+ (!is_glean && adj0->lookup_next_index != IP_LOOKUP_NEXT_ARP))
+ {
+ p0->error = node->errors[IP4_ARP_ERROR_NON_ARP_ADJ];
+ }
+ else
+ /* Send ARP request. */
+ {
+ u32 bi0 = 0;
+ vlib_buffer_t *b0;
+ ethernet_arp_header_t *h0;
+ vnet_hw_interface_t *hw_if0;
+
+ h0 =
+ vlib_packet_template_get_packet (vm,
+ &im->ip4_arp_request_packet_template,
+ &bi0);
+
+ /* Add rewrite/encap string for ARP packet. */
+ vnet_rewrite_one_header (adj0[0], h0,
+ sizeof (ethernet_header_t));
+
+ hw_if0 = vnet_get_sup_hw_interface (vnm, sw_if_index0);
+
+ /* Src ethernet address in ARP header. */
+ clib_memcpy (h0->ip4_over_ethernet[0].ethernet,
+ hw_if0->hw_address,
+ sizeof (h0->ip4_over_ethernet[0].ethernet));
+
+ if (is_glean)
+ {
+ /* The interface's source address is stashed in the Glean Adj */
+ h0->ip4_over_ethernet[0].ip4 =
+ adj0->sub_type.glean.receive_addr.ip4;
+
+ /* Copy in destination address we are requesting. This is the
+ * glean case, so it's the packet's destination.*/
+ h0->ip4_over_ethernet[1].ip4.data_u32 =
+ ip0->dst_address.data_u32;
+ }
+ else
+ {
+ /* Src IP address in ARP header. */
+ if (ip4_src_address_for_packet (lm, sw_if_index0,
+ &h0->
+ ip4_over_ethernet[0].ip4))
+ {
+ /* No source address available */
+ p0->error =
+ node->errors[IP4_ARP_ERROR_NO_SOURCE_ADDRESS];
+ vlib_buffer_free (vm, &bi0, 1);
+ continue;
+ }
+
+ /* Copy in destination address we are requesting from the
+ incomplete adj */
+ h0->ip4_over_ethernet[1].ip4.data_u32 =
+ adj0->sub_type.nbr.next_hop.ip4.as_u32;
+ }
+
+ vlib_buffer_copy_trace_flag (vm, p0, bi0);
+ b0 = vlib_get_buffer (vm, bi0);
+ vnet_buffer (b0)->sw_if_index[VLIB_TX] = sw_if_index0;
+
+ vlib_buffer_advance (b0, -adj0->rewrite_header.data_bytes);
+
+ vlib_set_next_frame_buffer (vm, node,
+ adj0->rewrite_header.next_index,
+ bi0);
+ }
+ }
+
+ vlib_put_next_frame (vm, node, IP4_ARP_NEXT_DROP, n_left_to_next_drop);
+ }
+
+ return frame->n_vectors;
+}
+
+static uword
+ip4_arp (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ return (ip4_arp_inline (vm, node, frame, 0));
+}
+
+static uword
+ip4_glean (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ return (ip4_arp_inline (vm, node, frame, 1));
+}
+
+static char *ip4_arp_error_strings[] = {
+ [IP4_ARP_ERROR_DROP] = "address overflow drops",
+ [IP4_ARP_ERROR_REQUEST_SENT] = "ARP requests sent",
+ [IP4_ARP_ERROR_NON_ARP_ADJ] = "ARPs to non-ARP adjacencies",
+ [IP4_ARP_ERROR_REPLICATE_DROP] = "ARP replication completed",
+ [IP4_ARP_ERROR_REPLICATE_FAIL] = "ARP replication failed",
+ [IP4_ARP_ERROR_NO_SOURCE_ADDRESS] = "no source address for ARP request",
+};
+
+VLIB_REGISTER_NODE (ip4_arp_node) =
+{
+ .function = ip4_arp,.name = "ip4-arp",.vector_size =
+ sizeof (u32),.format_trace = format_ip4_forward_next_trace,.n_errors =
+ ARRAY_LEN (ip4_arp_error_strings),.error_strings =
+ ip4_arp_error_strings,.n_next_nodes = IP4_ARP_N_NEXT,.next_nodes =
+ {
+ [IP4_ARP_NEXT_DROP] = "error-drop",}
+,};
+
+VLIB_REGISTER_NODE (ip4_glean_node) =
+{
+ .function = ip4_glean,.name = "ip4-glean",.vector_size =
+ sizeof (u32),.format_trace = format_ip4_forward_next_trace,.n_errors =
+ ARRAY_LEN (ip4_arp_error_strings),.error_strings =
+ ip4_arp_error_strings,.n_next_nodes = IP4_ARP_N_NEXT,.next_nodes =
+ {
+ [IP4_ARP_NEXT_DROP] = "error-drop",}
+,};
+
+#define foreach_notrace_ip4_arp_error \
+_(DROP) \
+_(REQUEST_SENT) \
+_(REPLICATE_DROP) \
+_(REPLICATE_FAIL)
+
+clib_error_t *
+arp_notrace_init (vlib_main_t * vm)
+{
+ vlib_node_runtime_t *rt = vlib_node_get_runtime (vm, ip4_arp_node.index);
+
+ /* don't trace ARP request packets */
+#define _(a) \
+ vnet_pcap_drop_trace_filter_add_del \
+ (rt->errors[IP4_ARP_ERROR_##a], \
+ 1 /* is_add */);
+ foreach_notrace_ip4_arp_error;
+#undef _
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (arp_notrace_init);
+
+
+/* Send an ARP request to see if given destination is reachable on given interface. */
+clib_error_t *
+ip4_probe_neighbor (vlib_main_t * vm, ip4_address_t * dst, u32 sw_if_index)
+{
+ vnet_main_t *vnm = vnet_get_main ();
+ ip4_main_t *im = &ip4_main;
+ ethernet_arp_header_t *h;
+ ip4_address_t *src;
+ ip_interface_address_t *ia;
+ ip_adjacency_t *adj;
+ vnet_hw_interface_t *hi;
+ vnet_sw_interface_t *si;
+ vlib_buffer_t *b;
+ u32 bi = 0;
+
+ si = vnet_get_sw_interface (vnm, sw_if_index);
+
+ if (!(si->flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP))
+ {
+ return clib_error_return (0, "%U: interface %U down",
+ format_ip4_address, dst,
+ format_vnet_sw_if_index_name, vnm,
+ sw_if_index);
+ }
+
+ src =
+ ip4_interface_address_matching_destination (im, dst, sw_if_index, &ia);
+ if (!src)
+ {
+ vnm->api_errno = VNET_API_ERROR_NO_MATCHING_INTERFACE;
+ return clib_error_return
+ (0, "no matching interface address for destination %U (interface %U)",
+ format_ip4_address, dst,
+ format_vnet_sw_if_index_name, vnm, sw_if_index);
+ }
+
+ adj = ip_get_adjacency (&im->lookup_main, ia->neighbor_probe_adj_index);
+
+ h =
+ vlib_packet_template_get_packet (vm, &im->ip4_arp_request_packet_template,
+ &bi);
+
+ hi = vnet_get_sup_hw_interface (vnm, sw_if_index);
+
+ clib_memcpy (h->ip4_over_ethernet[0].ethernet, hi->hw_address,
+ sizeof (h->ip4_over_ethernet[0].ethernet));
+
+ h->ip4_over_ethernet[0].ip4 = src[0];
+ h->ip4_over_ethernet[1].ip4 = dst[0];
+
+ b = vlib_get_buffer (vm, bi);
+ vnet_buffer (b)->sw_if_index[VLIB_RX] =
+ vnet_buffer (b)->sw_if_index[VLIB_TX] = sw_if_index;
+
+ /* Add encapsulation string for software interface (e.g. ethernet header). */
+ vnet_rewrite_one_header (adj[0], h, sizeof (ethernet_header_t));
+ vlib_buffer_advance (b, -adj->rewrite_header.data_bytes);
+
+ {
+ vlib_frame_t *f = vlib_get_frame_to_node (vm, hi->output_node_index);
+ u32 *to_next = vlib_frame_vector_args (f);
+ to_next[0] = bi;
+ f->n_vectors = 1;
+ vlib_put_frame_to_node (vm, hi->output_node_index, f);
+ }
+
+ return /* no error */ 0;
+}
+
+typedef enum
+{
+ IP4_REWRITE_NEXT_DROP,
+ IP4_REWRITE_NEXT_ICMP_ERROR,
+} ip4_rewrite_next_t;
+
+always_inline uword
+ip4_rewrite_inline (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame, int is_midchain)
+{
+ ip_lookup_main_t *lm = &ip4_main.lookup_main;
+ u32 *from = vlib_frame_vector_args (frame);
+ u32 n_left_from, n_left_to_next, *to_next, next_index;
+ vlib_node_runtime_t *error_node =
+ vlib_node_get_runtime (vm, ip4_input_node.index);
+
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+ u32 cpu_index = os_get_cpu_number ();
+
+ while (n_left_from > 0)
+ {
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ ip_adjacency_t *adj0, *adj1;
+ vlib_buffer_t *p0, *p1;
+ ip4_header_t *ip0, *ip1;
+ u32 pi0, rw_len0, next0, error0, checksum0, adj_index0;
+ u32 pi1, rw_len1, next1, error1, checksum1, adj_index1;
+ u32 tx_sw_if_index0, tx_sw_if_index1;
+
+ /* Prefetch next iteration. */
+ {
+ vlib_buffer_t *p2, *p3;
+
+ p2 = vlib_get_buffer (vm, from[2]);
+ p3 = vlib_get_buffer (vm, from[3]);
+
+ vlib_prefetch_buffer_header (p2, STORE);
+ vlib_prefetch_buffer_header (p3, STORE);
+
+ CLIB_PREFETCH (p2->data, sizeof (ip0[0]), STORE);
+ CLIB_PREFETCH (p3->data, sizeof (ip0[0]), STORE);
+ }
+
+ pi0 = to_next[0] = from[0];
+ pi1 = to_next[1] = from[1];
+
+ from += 2;
+ n_left_from -= 2;
+ to_next += 2;
+ n_left_to_next -= 2;
+
+ p0 = vlib_get_buffer (vm, pi0);
+ p1 = vlib_get_buffer (vm, pi1);
+
+ adj_index0 = vnet_buffer (p0)->ip.adj_index[VLIB_TX];
+ adj_index1 = vnet_buffer (p1)->ip.adj_index[VLIB_TX];
+
+ /* We should never rewrite a pkt using the MISS adjacency */
+ ASSERT (adj_index0 && adj_index1);
+
+ ip0 = vlib_buffer_get_current (p0);
+ ip1 = vlib_buffer_get_current (p1);
+
+ error0 = error1 = IP4_ERROR_NONE;
+ next0 = next1 = IP4_REWRITE_NEXT_DROP;
+
+ /* Decrement TTL & update checksum.
+ Works either endian, so no need for byte swap. */
+ if (PREDICT_TRUE (!(p0->flags & VNET_BUFFER_LOCALLY_ORIGINATED)))
+ {
+ i32 ttl0 = ip0->ttl;
+
+ /* Input node should have reject packets with ttl 0. */
+ ASSERT (ip0->ttl > 0);
+
+ checksum0 = ip0->checksum + clib_host_to_net_u16 (0x0100);
+ checksum0 += checksum0 >= 0xffff;
+
+ ip0->checksum = checksum0;
+ ttl0 -= 1;
+ ip0->ttl = ttl0;
+
+ /*
+ * If the ttl drops below 1 when forwarding, generate
+ * an ICMP response.
+ */
+ if (PREDICT_FALSE (ttl0 <= 0))
+ {
+ error0 = IP4_ERROR_TIME_EXPIRED;
+ vnet_buffer (p0)->sw_if_index[VLIB_TX] = (u32) ~ 0;
+ icmp4_error_set_vnet_buffer (p0, ICMP4_time_exceeded,
+ ICMP4_time_exceeded_ttl_exceeded_in_transit,
+ 0);
+ next0 = IP4_REWRITE_NEXT_ICMP_ERROR;
+ }
+
+ /* Verify checksum. */
+ ASSERT (ip0->checksum == ip4_header_checksum (ip0));
+ }
+ else
+ {
+ p0->flags &= ~VNET_BUFFER_LOCALLY_ORIGINATED;
+ }
+ if (PREDICT_TRUE (!(p1->flags & VNET_BUFFER_LOCALLY_ORIGINATED)))
+ {
+ i32 ttl1 = ip1->ttl;
+
+ /* Input node should have reject packets with ttl 0. */
+ ASSERT (ip1->ttl > 0);
+
+ checksum1 = ip1->checksum + clib_host_to_net_u16 (0x0100);
+ checksum1 += checksum1 >= 0xffff;
+
+ ip1->checksum = checksum1;
+ ttl1 -= 1;
+ ip1->ttl = ttl1;
+
+ /*
+ * If the ttl drops below 1 when forwarding, generate
+ * an ICMP response.
+ */
+ if (PREDICT_FALSE (ttl1 <= 0))
+ {
+ error1 = IP4_ERROR_TIME_EXPIRED;
+ vnet_buffer (p1)->sw_if_index[VLIB_TX] = (u32) ~ 0;
+ icmp4_error_set_vnet_buffer (p1, ICMP4_time_exceeded,
+ ICMP4_time_exceeded_ttl_exceeded_in_transit,
+ 0);
+ next1 = IP4_REWRITE_NEXT_ICMP_ERROR;
+ }
+
+ /* Verify checksum. */
+ ASSERT (ip0->checksum == ip4_header_checksum (ip0));
+ ASSERT (ip1->checksum == ip4_header_checksum (ip1));
+ }
+ else
+ {
+ p1->flags &= ~VNET_BUFFER_LOCALLY_ORIGINATED;
+ }
+
+ /* Rewrite packet header and updates lengths. */
+ adj0 = ip_get_adjacency (lm, adj_index0);
+ adj1 = ip_get_adjacency (lm, adj_index1);
+
+ /* Worth pipelining. No guarantee that adj0,1 are hot... */
+ rw_len0 = adj0[0].rewrite_header.data_bytes;
+ rw_len1 = adj1[0].rewrite_header.data_bytes;
+ vnet_buffer (p0)->ip.save_rewrite_length = rw_len0;
+ vnet_buffer (p1)->ip.save_rewrite_length = rw_len1;
+
+ /* Check MTU of outgoing interface. */
+ error0 =
+ (vlib_buffer_length_in_chain (vm, p0) >
+ adj0[0].
+ rewrite_header.max_l3_packet_bytes ? IP4_ERROR_MTU_EXCEEDED :
+ error0);
+ error1 =
+ (vlib_buffer_length_in_chain (vm, p1) >
+ adj1[0].
+ rewrite_header.max_l3_packet_bytes ? IP4_ERROR_MTU_EXCEEDED :
+ error1);
+
+ /*
+ * We've already accounted for an ethernet_header_t elsewhere
+ */
+ if (PREDICT_FALSE (rw_len0 > sizeof (ethernet_header_t)))
+ vlib_increment_combined_counter
+ (&adjacency_counters, cpu_index, adj_index0,
+ /* packet increment */ 0,
+ /* byte increment */ rw_len0 - sizeof (ethernet_header_t));
+
+ if (PREDICT_FALSE (rw_len1 > sizeof (ethernet_header_t)))
+ vlib_increment_combined_counter
+ (&adjacency_counters, cpu_index, adj_index1,
+ /* packet increment */ 0,
+ /* byte increment */ rw_len1 - sizeof (ethernet_header_t));
+
+ /* Don't adjust the buffer for ttl issue; icmp-error node wants
+ * to see the IP headerr */
+ if (PREDICT_TRUE (error0 == IP4_ERROR_NONE))
+ {
+ next0 = adj0[0].rewrite_header.next_index;
+ p0->current_data -= rw_len0;
+ p0->current_length += rw_len0;
+ tx_sw_if_index0 = adj0[0].rewrite_header.sw_if_index;
+ vnet_buffer (p0)->sw_if_index[VLIB_TX] = tx_sw_if_index0;
+
+ vnet_feature_arc_start (lm->output_feature_arc_index,
+ tx_sw_if_index0, &next0, p0);
+ }
+ if (PREDICT_TRUE (error1 == IP4_ERROR_NONE))
+ {
+ next1 = adj1[0].rewrite_header.next_index;
+ p1->current_data -= rw_len1;
+ p1->current_length += rw_len1;
+
+ tx_sw_if_index1 = adj1[0].rewrite_header.sw_if_index;
+ vnet_buffer (p1)->sw_if_index[VLIB_TX] = tx_sw_if_index1;
+
+ vnet_feature_arc_start (lm->output_feature_arc_index,
+ tx_sw_if_index1, &next1, p1);
+ }
+
+ /* Guess we are only writing on simple Ethernet header. */
+ vnet_rewrite_two_headers (adj0[0], adj1[0],
+ ip0, ip1, sizeof (ethernet_header_t));
+
+ if (is_midchain)
+ {
+ adj0->sub_type.midchain.fixup_func (vm, adj0, p0);
+ adj1->sub_type.midchain.fixup_func (vm, adj1, p1);
+ }
+
+ vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
+ to_next, n_left_to_next,
+ pi0, pi1, next0, next1);
+ }
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ ip_adjacency_t *adj0;
+ vlib_buffer_t *p0;
+ ip4_header_t *ip0;
+ u32 pi0, rw_len0, adj_index0, next0, error0, checksum0;
+ u32 tx_sw_if_index0;
+
+ pi0 = to_next[0] = from[0];
+
+ p0 = vlib_get_buffer (vm, pi0);
+
+ adj_index0 = vnet_buffer (p0)->ip.adj_index[VLIB_TX];
+
+ /* We should never rewrite a pkt using the MISS adjacency */
+ ASSERT (adj_index0);
+
+ adj0 = ip_get_adjacency (lm, adj_index0);
+
+ ip0 = vlib_buffer_get_current (p0);
+
+ error0 = IP4_ERROR_NONE;
+ next0 = IP4_REWRITE_NEXT_DROP; /* drop on error */
+
+ /* Decrement TTL & update checksum. */
+ if (PREDICT_TRUE (!(p0->flags & VNET_BUFFER_LOCALLY_ORIGINATED)))
+ {
+ i32 ttl0 = ip0->ttl;
+
+ checksum0 = ip0->checksum + clib_host_to_net_u16 (0x0100);
+
+ checksum0 += checksum0 >= 0xffff;
+
+ ip0->checksum = checksum0;
+
+ ASSERT (ip0->ttl > 0);
+
+ ttl0 -= 1;
+
+ ip0->ttl = ttl0;
+
+ ASSERT (ip0->checksum == ip4_header_checksum (ip0));
+
+ if (PREDICT_FALSE (ttl0 <= 0))
+ {
+ /*
+ * If the ttl drops below 1 when forwarding, generate
+ * an ICMP response.
+ */
+ error0 = IP4_ERROR_TIME_EXPIRED;
+ next0 = IP4_REWRITE_NEXT_ICMP_ERROR;
+ vnet_buffer (p0)->sw_if_index[VLIB_TX] = (u32) ~ 0;
+ icmp4_error_set_vnet_buffer (p0, ICMP4_time_exceeded,
+ ICMP4_time_exceeded_ttl_exceeded_in_transit,
+ 0);
+ }
+ }
+ else
+ {
+ p0->flags &= ~VNET_BUFFER_LOCALLY_ORIGINATED;
+ }
+
+ /* Guess we are only writing on simple Ethernet header. */
+ vnet_rewrite_one_header (adj0[0], ip0, sizeof (ethernet_header_t));
+
+ /* Update packet buffer attributes/set output interface. */
+ rw_len0 = adj0[0].rewrite_header.data_bytes;
+ vnet_buffer (p0)->ip.save_rewrite_length = rw_len0;
+
+ if (PREDICT_FALSE (rw_len0 > sizeof (ethernet_header_t)))
+ vlib_increment_combined_counter
+ (&adjacency_counters, cpu_index, adj_index0,
+ /* packet increment */ 0,
+ /* byte increment */ rw_len0 - sizeof (ethernet_header_t));
+
+ /* Check MTU of outgoing interface. */
+ error0 = (vlib_buffer_length_in_chain (vm, p0)
+ > adj0[0].rewrite_header.max_l3_packet_bytes
+ ? IP4_ERROR_MTU_EXCEEDED : error0);
+
+ p0->error = error_node->errors[error0];
+
+ /* Don't adjust the buffer for ttl issue; icmp-error node wants
+ * to see the IP headerr */
+ if (PREDICT_TRUE (error0 == IP4_ERROR_NONE))
+ {
+ p0->current_data -= rw_len0;
+ p0->current_length += rw_len0;
+ tx_sw_if_index0 = adj0[0].rewrite_header.sw_if_index;
+
+ vnet_buffer (p0)->sw_if_index[VLIB_TX] = tx_sw_if_index0;
+ next0 = adj0[0].rewrite_header.next_index;
+
+ if (is_midchain)
+ {
+ adj0->sub_type.midchain.fixup_func (vm, adj0, p0);
+ }
+
+ vnet_feature_arc_start (lm->output_feature_arc_index,
+ tx_sw_if_index0, &next0, p0);
+
+ }
+
+ from += 1;
+ n_left_from -= 1;
+ to_next += 1;
+ n_left_to_next -= 1;
+
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ pi0, next0);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ /* Need to do trace after rewrites to pick up new packet data. */
+ if (node->flags & VLIB_NODE_FLAG_TRACE)
+ ip4_forward_next_trace (vm, node, frame, VLIB_TX);
+
+ return frame->n_vectors;
+}
+
+
+/** @brief IPv4 rewrite node.
+ @node ip4-rewrite
+
+ This is the IPv4 transit-rewrite node: decrement TTL, fix the ipv4
+ header checksum, fetch the ip adjacency, check the outbound mtu,
+ apply the adjacency rewrite, and send pkts to the adjacency
+ rewrite header's rewrite_next_index.
+
+ @param vm vlib_main_t corresponding to the current thread
+ @param node vlib_node_runtime_t
+ @param frame vlib_frame_t whose contents should be dispatched
+
+ @par Graph mechanics: buffer metadata, next index usage
+
+ @em Uses:
+ - <code>vnet_buffer(b)->ip.adj_index[VLIB_TX]</code>
+ - the rewrite adjacency index
+ - <code>adj->lookup_next_index</code>
+ - Must be IP_LOOKUP_NEXT_REWRITE or IP_LOOKUP_NEXT_ARP, otherwise
+ the packet will be dropped.
+ - <code>adj->rewrite_header</code>
+ - Rewrite string length, rewrite string, next_index
+
+ @em Sets:
+ - <code>b->current_data, b->current_length</code>
+ - Updated net of applying the rewrite string
+
+ <em>Next Indices:</em>
+ - <code> adj->rewrite_header.next_index </code>
+ or @c error-drop
+*/
+static uword
+ip4_rewrite (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ return ip4_rewrite_inline (vm, node, frame, 0);
+}
+
+static uword
+ip4_midchain (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ return ip4_rewrite_inline (vm, node, frame, 1);
+}
+
+
+VLIB_REGISTER_NODE (ip4_rewrite_node) =
+{
+ .function = ip4_rewrite,.name = "ip4-rewrite",.vector_size =
+ sizeof (u32),.format_trace = format_ip4_rewrite_trace,.n_next_nodes =
+ 2,.next_nodes =
+ {
+ [IP4_REWRITE_NEXT_DROP] = "error-drop",
+ [IP4_REWRITE_NEXT_ICMP_ERROR] = "ip4-icmp-error",}
+,};
+
+VLIB_NODE_FUNCTION_MULTIARCH (ip4_rewrite_node, ip4_rewrite);
+
+VLIB_REGISTER_NODE (ip4_midchain_node) =
+{
+.function = ip4_midchain,.name = "ip4-midchain",.vector_size =
+ sizeof (u32),.format_trace = format_ip4_forward_next_trace,.sibling_of =
+ "ip4-rewrite",};
+
+VLIB_NODE_FUNCTION_MULTIARCH (ip4_midchain_node, ip4_midchain);
+
+static clib_error_t *
+add_del_interface_table (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ vnet_main_t *vnm = vnet_get_main ();
+ clib_error_t *error = 0;
+ u32 sw_if_index, table_id;
+
+ sw_if_index = ~0;
+
+ if (!unformat_user (input, unformat_vnet_sw_interface, vnm, &sw_if_index))
+ {
+ error = clib_error_return (0, "unknown interface `%U'",
+ format_unformat_error, input);
+ goto done;
+ }
+
+ if (unformat (input, "%d", &table_id))
+ ;
+ else
+ {
+ error = clib_error_return (0, "expected table id `%U'",
+ format_unformat_error, input);
+ goto done;
+ }
+
+ {
+ ip4_main_t *im = &ip4_main;
+ u32 fib_index;
+
+ fib_index = fib_table_find_or_create_and_lock (FIB_PROTOCOL_IP4,
+ table_id);
+
+ //
+ // FIXME-LATER
+ // changing an interface's table has consequences for any connecteds
+ // and adj-fibs already installed.
+ //
+ vec_validate (im->fib_index_by_sw_if_index, sw_if_index);
+ im->fib_index_by_sw_if_index[sw_if_index] = fib_index;
+ }
+
+done:
+ return error;
+}
+
+/*?
+ * Place the indicated interface into the supplied IPv4 FIB table (also known
+ * as a VRF). If the FIB table does not exist, this command creates it. To
+ * display the current IPv4 FIB table, use the command '<em>show ip fib</em>'.
+ * FIB table will only be displayed if a route has been added to the table, or
+ * an IP Address is assigned to an interface in the table (which adds a route
+ * automatically).
+ *
+ * @note IP addresses added after setting the interface IP table end up in
+ * the indicated FIB table. If the IP address is added prior to adding the
+ * interface to the FIB table, it will NOT be part of the FIB table. Predictable
+ * but potentially counter-intuitive results occur if you provision interface
+ * addresses in multiple FIBs. Upon RX, packets will be processed in the last
+ * IP table ID provisioned. It might be marginally useful to evade source RPF
+ * drops to put an interface address into multiple FIBs.
+ *
+ * @cliexpar
+ * Example of how to add an interface to an IPv4 FIB table (where 2 is the table-id):
+ * @cliexcmd{set interface ip table GigabitEthernet2/0/0 2}
+ ?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (set_interface_ip_table_command, static) =
+{
+ .path = "set interface ip table",
+ .function = add_del_interface_table,
+ .short_help = "set interface ip table <interface> <table-id>",
+};
+/* *INDENT-ON* */
+
+
+static uword
+ip4_lookup_multicast (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ ip4_main_t *im = &ip4_main;
+ vlib_combined_counter_main_t *cm = &load_balance_main.lbm_to_counters;
+ u32 n_left_from, n_left_to_next, *from, *to_next;
+ ip_lookup_next_t next;
+ u32 cpu_index = os_get_cpu_number ();
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ vlib_get_next_frame (vm, node, next, to_next, n_left_to_next);
+
+ while (n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ vlib_buffer_t *p0, *p1;
+ u32 pi0, pi1, lb_index0, lb_index1, wrong_next;
+ ip_lookup_next_t next0, next1;
+ ip4_header_t *ip0, *ip1;
+ u32 fib_index0, fib_index1;
+ const dpo_id_t *dpo0, *dpo1;
+ const load_balance_t *lb0, *lb1;
+
+ /* Prefetch next iteration. */
+ {
+ vlib_buffer_t *p2, *p3;
+
+ p2 = vlib_get_buffer (vm, from[2]);
+ p3 = vlib_get_buffer (vm, from[3]);
+
+ vlib_prefetch_buffer_header (p2, LOAD);
+ vlib_prefetch_buffer_header (p3, LOAD);
+
+ CLIB_PREFETCH (p2->data, sizeof (ip0[0]), LOAD);
+ CLIB_PREFETCH (p3->data, sizeof (ip0[0]), LOAD);
+ }
+
+ pi0 = to_next[0] = from[0];
+ pi1 = to_next[1] = from[1];
+
+ p0 = vlib_get_buffer (vm, pi0);
+ p1 = vlib_get_buffer (vm, pi1);
+
+ ip0 = vlib_buffer_get_current (p0);
+ ip1 = vlib_buffer_get_current (p1);
+
+ fib_index0 =
+ vec_elt (im->fib_index_by_sw_if_index,
+ vnet_buffer (p0)->sw_if_index[VLIB_RX]);
+ fib_index1 =
+ vec_elt (im->fib_index_by_sw_if_index,
+ vnet_buffer (p1)->sw_if_index[VLIB_RX]);
+ fib_index0 =
+ (vnet_buffer (p0)->sw_if_index[VLIB_TX] ==
+ (u32) ~ 0) ? fib_index0 : vnet_buffer (p0)->sw_if_index[VLIB_TX];
+ fib_index1 =
+ (vnet_buffer (p1)->sw_if_index[VLIB_TX] ==
+ (u32) ~ 0) ? fib_index1 : vnet_buffer (p1)->sw_if_index[VLIB_TX];
+
+ lb_index0 = ip4_fib_table_lookup_lb (ip4_fib_get (fib_index0),
+ &ip0->dst_address);
+ lb_index1 = ip4_fib_table_lookup_lb (ip4_fib_get (fib_index1),
+ &ip1->dst_address);
+
+ lb0 = load_balance_get (lb_index0);
+ lb1 = load_balance_get (lb_index1);
+
+ ASSERT (lb0->lb_n_buckets > 0);
+ ASSERT (is_pow2 (lb0->lb_n_buckets));
+ ASSERT (lb1->lb_n_buckets > 0);
+ ASSERT (is_pow2 (lb1->lb_n_buckets));
+
+ vnet_buffer (p0)->ip.flow_hash = ip4_compute_flow_hash
+ (ip0, lb0->lb_hash_config);
+
+ vnet_buffer (p1)->ip.flow_hash = ip4_compute_flow_hash
+ (ip1, lb1->lb_hash_config);
+
+ dpo0 = load_balance_get_bucket_i (lb0,
+ (vnet_buffer (p0)->ip.flow_hash &
+ (lb0->lb_n_buckets_minus_1)));
+ dpo1 = load_balance_get_bucket_i (lb1,
+ (vnet_buffer (p1)->ip.flow_hash &
+ (lb1->lb_n_buckets_minus_1)));
+
+ next0 = dpo0->dpoi_next_node;
+ vnet_buffer (p0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
+ next1 = dpo1->dpoi_next_node;
+ vnet_buffer (p1)->ip.adj_index[VLIB_TX] = dpo1->dpoi_index;
+
+ if (1) /* $$$$$$ HACK FIXME */
+ vlib_increment_combined_counter
+ (cm, cpu_index, lb_index0, 1,
+ vlib_buffer_length_in_chain (vm, p0));
+ if (1) /* $$$$$$ HACK FIXME */
+ vlib_increment_combined_counter
+ (cm, cpu_index, lb_index1, 1,
+ vlib_buffer_length_in_chain (vm, p1));
+
+ from += 2;
+ to_next += 2;
+ n_left_to_next -= 2;
+ n_left_from -= 2;
+
+ wrong_next = (next0 != next) + 2 * (next1 != next);
+ if (PREDICT_FALSE (wrong_next != 0))
+ {
+ switch (wrong_next)
+ {
+ case 1:
+ /* A B A */
+ to_next[-2] = pi1;
+ to_next -= 1;
+ n_left_to_next += 1;
+ vlib_set_next_frame_buffer (vm, node, next0, pi0);
+ break;
+
+ case 2:
+ /* A A B */
+ to_next -= 1;
+ n_left_to_next += 1;
+ vlib_set_next_frame_buffer (vm, node, next1, pi1);
+ break;
+
+ case 3:
+ /* A B C */
+ to_next -= 2;
+ n_left_to_next += 2;
+ vlib_set_next_frame_buffer (vm, node, next0, pi0);
+ vlib_set_next_frame_buffer (vm, node, next1, pi1);
+ if (next0 == next1)
+ {
+ /* A B B */
+ vlib_put_next_frame (vm, node, next, n_left_to_next);
+ next = next1;
+ vlib_get_next_frame (vm, node, next, to_next,
+ n_left_to_next);
+ }
+ }
+ }
+ }
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ vlib_buffer_t *p0;
+ ip4_header_t *ip0;
+ u32 pi0, lb_index0;
+ ip_lookup_next_t next0;
+ u32 fib_index0;
+ const dpo_id_t *dpo0;
+ const load_balance_t *lb0;
+
+ pi0 = from[0];
+ to_next[0] = pi0;
+
+ p0 = vlib_get_buffer (vm, pi0);
+
+ ip0 = vlib_buffer_get_current (p0);
+
+ fib_index0 = vec_elt (im->fib_index_by_sw_if_index,
+ vnet_buffer (p0)->sw_if_index[VLIB_RX]);
+ fib_index0 = (vnet_buffer (p0)->sw_if_index[VLIB_TX] == (u32) ~ 0) ?
+ fib_index0 : vnet_buffer (p0)->sw_if_index[VLIB_TX];
+
+ lb_index0 = ip4_fib_table_lookup_lb (ip4_fib_get (fib_index0),
+ &ip0->dst_address);
+
+ lb0 = load_balance_get (lb_index0);
+
+ ASSERT (lb0->lb_n_buckets > 0);
+ ASSERT (is_pow2 (lb0->lb_n_buckets));
+
+ vnet_buffer (p0)->ip.flow_hash = ip4_compute_flow_hash
+ (ip0, lb0->lb_hash_config);
+
+ dpo0 = load_balance_get_bucket_i (lb0,
+ (vnet_buffer (p0)->ip.flow_hash &
+ (lb0->lb_n_buckets_minus_1)));
+
+ next0 = dpo0->dpoi_next_node;
+ vnet_buffer (p0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
+
+ if (1) /* $$$$$$ HACK FIXME */
+ vlib_increment_combined_counter
+ (cm, cpu_index, lb_index0, 1,
+ vlib_buffer_length_in_chain (vm, p0));
+
+ from += 1;
+ to_next += 1;
+ n_left_to_next -= 1;
+ n_left_from -= 1;
+
+ if (PREDICT_FALSE (next0 != next))
+ {
+ n_left_to_next += 1;
+ vlib_put_next_frame (vm, node, next, n_left_to_next);
+ next = next0;
+ vlib_get_next_frame (vm, node, next, to_next, n_left_to_next);
+ to_next[0] = pi0;
+ to_next += 1;
+ n_left_to_next -= 1;
+ }
+ }
+
+ vlib_put_next_frame (vm, node, next, n_left_to_next);
+ }
+
+ if (node->flags & VLIB_NODE_FLAG_TRACE)
+ ip4_forward_next_trace (vm, node, frame, VLIB_TX);
+
+ return frame->n_vectors;
+}
+
+VLIB_REGISTER_NODE (ip4_lookup_multicast_node, static) =
+{
+.function = ip4_lookup_multicast,.name =
+ "ip4-lookup-multicast",.vector_size = sizeof (u32),.sibling_of =
+ "ip4-lookup",.format_trace = format_ip4_lookup_trace,.n_next_nodes = 0,};
+
+VLIB_NODE_FUNCTION_MULTIARCH (ip4_lookup_multicast_node,
+ ip4_lookup_multicast);
+
+VLIB_REGISTER_NODE (ip4_multicast_node, static) =
+{
+ .function = ip4_drop,.name = "ip4-multicast",.vector_size =
+ sizeof (u32),.format_trace = format_ip4_forward_next_trace,.n_next_nodes =
+ 1,.next_nodes =
+ {
+ [0] = "error-drop",}
+,};
+
+int
+ip4_lookup_validate (ip4_address_t * a, u32 fib_index0)
+{
+ ip4_fib_mtrie_t *mtrie0;
+ ip4_fib_mtrie_leaf_t leaf0;
+ u32 lbi0;
+
+ mtrie0 = &ip4_fib_get (fib_index0)->mtrie;
+
+ leaf0 = IP4_FIB_MTRIE_LEAF_ROOT;
+ leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, a, 0);
+ leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, a, 1);
+ leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, a, 2);
+ leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, a, 3);
+
+ /* Handle default route. */
+ leaf0 = (leaf0 == IP4_FIB_MTRIE_LEAF_EMPTY ? mtrie0->default_leaf : leaf0);
+
+ lbi0 = ip4_fib_mtrie_leaf_get_adj_index (leaf0);
+
+ return lbi0 == ip4_fib_table_lookup_lb (ip4_fib_get (fib_index0), a);
+}
+
+static clib_error_t *
+test_lookup_command_fn (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ ip4_fib_t *fib;
+ u32 table_id = 0;
+ f64 count = 1;
+ u32 n;
+ int i;
+ ip4_address_t ip4_base_address;
+ u64 errors = 0;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "table %d", &table_id))
+ {
+ /* Make sure the entry exists. */
+ fib = ip4_fib_get (table_id);
+ if ((fib) && (fib->index != table_id))
+ return clib_error_return (0, "<fib-index> %d does not exist",
+ table_id);
+ }
+ else if (unformat (input, "count %f", &count))
+ ;
+
+ else if (unformat (input, "%U",
+ unformat_ip4_address, &ip4_base_address))
+ ;
+ else
+ return clib_error_return (0, "unknown input `%U'",
+ format_unformat_error, input);
+ }
+
+ n = count;
+
+ for (i = 0; i < n; i++)
+ {
+ if (!ip4_lookup_validate (&ip4_base_address, table_id))
+ errors++;
+
+ ip4_base_address.as_u32 =
+ clib_host_to_net_u32 (1 +
+ clib_net_to_host_u32 (ip4_base_address.as_u32));
+ }
+
+ if (errors)
+ vlib_cli_output (vm, "%llu errors out of %d lookups\n", errors, n);
+ else
+ vlib_cli_output (vm, "No errors in %d lookups\n", n);
+
+ return 0;
+}
+
+/*?
+ * Perform a lookup of an IPv4 Address (or range of addresses) in the
+ * given FIB table to determine if there is a conflict with the
+ * adjacency table. The fib-id can be determined by using the
+ * '<em>show ip fib</em>' command. If fib-id is not entered, default value
+ * of 0 is used.
+ *
+ * @todo This command uses fib-id, other commands use table-id (not
+ * just a name, they are different indexes). Would like to change this
+ * to table-id for consistency.
+ *
+ * @cliexpar
+ * Example of how to run the test lookup command:
+ * @cliexstart{test lookup 172.16.1.1 table 1 count 2}
+ * No errors in 2 lookups
+ * @cliexend
+?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (lookup_test_command, static) =
+{
+ .path = "test lookup",
+ .short_help = "test lookup <ipv4-addr> [table <fib-id>] [count <nn>]",
+ .function = test_lookup_command_fn,
+};
+/* *INDENT-ON* */
+
+int
+vnet_set_ip4_flow_hash (u32 table_id, u32 flow_hash_config)
+{
+ ip4_main_t *im4 = &ip4_main;
+ ip4_fib_t *fib;
+ uword *p = hash_get (im4->fib_index_by_table_id, table_id);
+
+ if (p == 0)
+ return VNET_API_ERROR_NO_SUCH_FIB;
+
+ fib = ip4_fib_get (p[0]);
+
+ fib->flow_hash_config = flow_hash_config;
+ return 0;
+}
+
+static clib_error_t *
+set_ip_flow_hash_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ int matched = 0;
+ u32 table_id = 0;
+ u32 flow_hash_config = 0;
+ int rv;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "table %d", &table_id))
+ matched = 1;
+#define _(a,v) \
+ else if (unformat (input, #a)) { flow_hash_config |= v; matched=1;}
+ foreach_flow_hash_bit
+#undef _
+ else
+ break;
+ }
+
+ if (matched == 0)
+ return clib_error_return (0, "unknown input `%U'",
+ format_unformat_error, input);
+
+ rv = vnet_set_ip4_flow_hash (table_id, flow_hash_config);
+ switch (rv)
+ {
+ case 0:
+ break;
+
+ case VNET_API_ERROR_NO_SUCH_FIB:
+ return clib_error_return (0, "no such FIB table %d", table_id);
+
+ default:
+ clib_warning ("BUG: illegal flow hash config 0x%x", flow_hash_config);
+ break;
+ }
+
+ return 0;
+}
+
+/*?
+ * Configure the set of IPv4 fields used by the flow hash.
+ *
+ * @cliexpar
+ * Example of how to set the flow hash on a given table:
+ * @cliexcmd{set ip flow-hash table 7 dst sport dport proto}
+ * Example of display the configured flow hash:
+ * @cliexstart{show ip fib}
+ * ipv4-VRF:0, fib_index 0, flow hash: src dst sport dport proto
+ * 0.0.0.0/0
+ * unicast-ip4-chain
+ * [@0]: dpo-load-balance: [index:0 buckets:1 uRPF:0 to:[0:0]]
+ * [0] [@0]: dpo-drop ip6
+ * 0.0.0.0/32
+ * unicast-ip4-chain
+ * [@0]: dpo-load-balance: [index:1 buckets:1 uRPF:1 to:[0:0]]
+ * [0] [@0]: dpo-drop ip6
+ * 224.0.0.0/8
+ * unicast-ip4-chain
+ * [@0]: dpo-load-balance: [index:3 buckets:1 uRPF:3 to:[0:0]]
+ * [0] [@0]: dpo-drop ip6
+ * 6.0.1.2/32
+ * unicast-ip4-chain
+ * [@0]: dpo-load-balance: [index:30 buckets:1 uRPF:29 to:[0:0]]
+ * [0] [@3]: arp-ipv4: via 6.0.0.1 af_packet0
+ * 7.0.0.1/32
+ * unicast-ip4-chain
+ * [@0]: dpo-load-balance: [index:31 buckets:4 uRPF:30 to:[0:0]]
+ * [0] [@3]: arp-ipv4: via 6.0.0.2 af_packet0
+ * [1] [@3]: arp-ipv4: via 6.0.0.2 af_packet0
+ * [2] [@3]: arp-ipv4: via 6.0.0.2 af_packet0
+ * [3] [@3]: arp-ipv4: via 6.0.0.1 af_packet0
+ * 240.0.0.0/8
+ * unicast-ip4-chain
+ * [@0]: dpo-load-balance: [index:2 buckets:1 uRPF:2 to:[0:0]]
+ * [0] [@0]: dpo-drop ip6
+ * 255.255.255.255/32
+ * unicast-ip4-chain
+ * [@0]: dpo-load-balance: [index:4 buckets:1 uRPF:4 to:[0:0]]
+ * [0] [@0]: dpo-drop ip6
+ * ipv4-VRF:7, fib_index 1, flow hash: dst sport dport proto
+ * 0.0.0.0/0
+ * unicast-ip4-chain
+ * [@0]: dpo-load-balance: [index:12 buckets:1 uRPF:11 to:[0:0]]
+ * [0] [@0]: dpo-drop ip6
+ * 0.0.0.0/32
+ * unicast-ip4-chain
+ * [@0]: dpo-load-balance: [index:13 buckets:1 uRPF:12 to:[0:0]]
+ * [0] [@0]: dpo-drop ip6
+ * 172.16.1.0/24
+ * unicast-ip4-chain
+ * [@0]: dpo-load-balance: [index:17 buckets:1 uRPF:16 to:[0:0]]
+ * [0] [@4]: ipv4-glean: af_packet0
+ * 172.16.1.1/32
+ * unicast-ip4-chain
+ * [@0]: dpo-load-balance: [index:18 buckets:1 uRPF:17 to:[1:84]]
+ * [0] [@2]: dpo-receive: 172.16.1.1 on af_packet0
+ * 172.16.1.2/32
+ * unicast-ip4-chain
+ * [@0]: dpo-load-balance: [index:21 buckets:1 uRPF:20 to:[0:0]]
+ * [0] [@5]: ipv4 via 172.16.1.2 af_packet0: IP4: 02:fe:9e:70:7a:2b -> 26:a5:f6:9c:3a:36
+ * 172.16.2.0/24
+ * unicast-ip4-chain
+ * [@0]: dpo-load-balance: [index:19 buckets:1 uRPF:18 to:[0:0]]
+ * [0] [@4]: ipv4-glean: af_packet1
+ * 172.16.2.1/32
+ * unicast-ip4-chain
+ * [@0]: dpo-load-balance: [index:20 buckets:1 uRPF:19 to:[0:0]]
+ * [0] [@2]: dpo-receive: 172.16.2.1 on af_packet1
+ * 224.0.0.0/8
+ * unicast-ip4-chain
+ * [@0]: dpo-load-balance: [index:15 buckets:1 uRPF:14 to:[0:0]]
+ * [0] [@0]: dpo-drop ip6
+ * 240.0.0.0/8
+ * unicast-ip4-chain
+ * [@0]: dpo-load-balance: [index:14 buckets:1 uRPF:13 to:[0:0]]
+ * [0] [@0]: dpo-drop ip6
+ * 255.255.255.255/32
+ * unicast-ip4-chain
+ * [@0]: dpo-load-balance: [index:16 buckets:1 uRPF:15 to:[0:0]]
+ * [0] [@0]: dpo-drop ip6
+ * @cliexend
+?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (set_ip_flow_hash_command, static) =
+{
+ .path = "set ip flow-hash",
+ .short_help =
+ "set ip flow-hash table <table-id> [src] [dst] [sport] [dport] [proto] [reverse]",
+ .function = set_ip_flow_hash_command_fn,
+};
+/* *INDENT-ON* */
+
+int
+vnet_set_ip4_classify_intfc (vlib_main_t * vm, u32 sw_if_index,
+ u32 table_index)
+{
+ vnet_main_t *vnm = vnet_get_main ();
+ vnet_interface_main_t *im = &vnm->interface_main;
+ ip4_main_t *ipm = &ip4_main;
+ ip_lookup_main_t *lm = &ipm->lookup_main;
+ vnet_classify_main_t *cm = &vnet_classify_main;
+ ip4_address_t *if_addr;
+
+ if (pool_is_free_index (im->sw_interfaces, sw_if_index))
+ return VNET_API_ERROR_NO_MATCHING_INTERFACE;
+
+ if (table_index != ~0 && pool_is_free_index (cm->tables, table_index))
+ return VNET_API_ERROR_NO_SUCH_ENTRY;
+
+ vec_validate (lm->classify_table_index_by_sw_if_index, sw_if_index);
+ lm->classify_table_index_by_sw_if_index[sw_if_index] = table_index;
+
+ if_addr = ip4_interface_first_address (ipm, sw_if_index, NULL);
+
+ if (NULL != if_addr)
+ {
+ fib_prefix_t pfx = {
+ .fp_len = 32,
+ .fp_proto = FIB_PROTOCOL_IP4,
+ .fp_addr.ip4 = *if_addr,
+ };
+ u32 fib_index;
+
+ fib_index = fib_table_get_index_for_sw_if_index (FIB_PROTOCOL_IP4,
+ sw_if_index);
+
+
+ if (table_index != (u32) ~ 0)
+ {
+ dpo_id_t dpo = DPO_INVALID;
+
+ dpo_set (&dpo,
+ DPO_CLASSIFY,
+ DPO_PROTO_IP4,
+ classify_dpo_create (DPO_PROTO_IP4, table_index));
+
+ fib_table_entry_special_dpo_add (fib_index,
+ &pfx,
+ FIB_SOURCE_CLASSIFY,
+ FIB_ENTRY_FLAG_NONE, &dpo);
+ dpo_reset (&dpo);
+ }
+ else
+ {
+ fib_table_entry_special_remove (fib_index,
+ &pfx, FIB_SOURCE_CLASSIFY);
+ }
+ }
+
+ return 0;
+}
+
+static clib_error_t *
+set_ip_classify_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ u32 table_index = ~0;
+ int table_index_set = 0;
+ u32 sw_if_index = ~0;
+ int rv;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "table-index %d", &table_index))
+ table_index_set = 1;
+ else if (unformat (input, "intfc %U", unformat_vnet_sw_interface,
+ vnet_get_main (), &sw_if_index))
+ ;
+ else
+ break;
+ }
+
+ if (table_index_set == 0)
+ return clib_error_return (0, "classify table-index must be specified");
+
+ if (sw_if_index == ~0)
+ return clib_error_return (0, "interface / subif must be specified");
+
+ rv = vnet_set_ip4_classify_intfc (vm, sw_if_index, table_index);
+
+ switch (rv)
+ {
+ case 0:
+ break;
+
+ case VNET_API_ERROR_NO_MATCHING_INTERFACE:
+ return clib_error_return (0, "No such interface");
+
+ case VNET_API_ERROR_NO_SUCH_ENTRY:
+ return clib_error_return (0, "No such classifier table");
+ }
+ return 0;
+}
+
+/*?
+ * Assign a classification table to an interface. The classification
+ * table is created using the '<em>classify table</em>' and '<em>classify session</em>'
+ * commands. Once the table is create, use this command to filter packets
+ * on an interface.
+ *
+ * @cliexpar
+ * Example of how to assign a classification table to an interface:
+ * @cliexcmd{set ip classify intfc GigabitEthernet2/0/0 table-index 1}
+?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (set_ip_classify_command, static) =
+{
+ .path = "set ip classify",
+ .short_help =
+ "set ip classify intfc <interface> table-index <classify-idx>",
+ .function = set_ip_classify_command_fn,
+};
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/ip/ip4_input.c b/src/vnet/ip/ip4_input.c
new file mode 100644
index 00000000000..1cf5e0b8517
--- /dev/null
+++ b/src/vnet/ip/ip4_input.c
@@ -0,0 +1,507 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * ip/ip4_input.c: IP v4 input node
+ *
+ * Copyright (c) 2008 Eliot Dresselhaus
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <vnet/ip/ip.h>
+#include <vnet/ethernet/ethernet.h>
+#include <vnet/ppp/ppp.h>
+#include <vnet/hdlc/hdlc.h>
+
+typedef struct
+{
+ u8 packet_data[64];
+} ip4_input_trace_t;
+
+static u8 *
+format_ip4_input_trace (u8 * s, va_list * va)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*va, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*va, vlib_node_t *);
+ ip4_input_trace_t *t = va_arg (*va, ip4_input_trace_t *);
+
+ s = format (s, "%U",
+ format_ip4_header, t->packet_data, sizeof (t->packet_data));
+
+ return s;
+}
+
+typedef enum
+{
+ IP4_INPUT_NEXT_DROP,
+ IP4_INPUT_NEXT_PUNT,
+ IP4_INPUT_NEXT_LOOKUP,
+ IP4_INPUT_NEXT_LOOKUP_MULTICAST,
+ IP4_INPUT_NEXT_ICMP_ERROR,
+ IP4_INPUT_N_NEXT,
+} ip4_input_next_t;
+
+/* Validate IP v4 packets and pass them either to forwarding code
+ or drop/punt exception packets. */
+always_inline uword
+ip4_input_inline (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame, int verify_checksum)
+{
+ ip4_main_t *im = &ip4_main;
+ vnet_main_t *vnm = vnet_get_main ();
+ ip_lookup_main_t *lm = &im->lookup_main;
+ u32 n_left_from, *from, *to_next;
+ ip4_input_next_t next_index;
+ vlib_node_runtime_t *error_node =
+ vlib_node_get_runtime (vm, ip4_input_node.index);
+ vlib_simple_counter_main_t *cm;
+ u32 cpu_index = os_get_cpu_number ();
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+
+ if (node->flags & VLIB_NODE_FLAG_TRACE)
+ vlib_trace_frame_buffers_only (vm, node, from, frame->n_vectors,
+ /* stride */ 1,
+ sizeof (ip4_input_trace_t));
+
+ cm = vec_elt_at_index (vnm->interface_main.sw_if_counters,
+ VNET_INTERFACE_COUNTER_IP4);
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ vlib_buffer_t *p0, *p1;
+ ip4_header_t *ip0, *ip1;
+ u32 sw_if_index0, pi0, ip_len0, cur_len0, next0;
+ u32 sw_if_index1, pi1, ip_len1, cur_len1, next1;
+ i32 len_diff0, len_diff1;
+ u8 error0, error1, arc0, arc1;
+
+ /* Prefetch next iteration. */
+ {
+ vlib_buffer_t *p2, *p3;
+
+ p2 = vlib_get_buffer (vm, from[2]);
+ p3 = vlib_get_buffer (vm, from[3]);
+
+ vlib_prefetch_buffer_header (p2, LOAD);
+ vlib_prefetch_buffer_header (p3, LOAD);
+
+ CLIB_PREFETCH (p2->data, sizeof (ip0[0]), LOAD);
+ CLIB_PREFETCH (p3->data, sizeof (ip1[0]), LOAD);
+ }
+
+ to_next[0] = pi0 = from[0];
+ to_next[1] = pi1 = from[1];
+ from += 2;
+ to_next += 2;
+ n_left_from -= 2;
+ n_left_to_next -= 2;
+
+ p0 = vlib_get_buffer (vm, pi0);
+ p1 = vlib_get_buffer (vm, pi1);
+
+ ip0 = vlib_buffer_get_current (p0);
+ ip1 = vlib_buffer_get_current (p1);
+
+ sw_if_index0 = vnet_buffer (p0)->sw_if_index[VLIB_RX];
+ sw_if_index1 = vnet_buffer (p1)->sw_if_index[VLIB_RX];
+
+ error0 = error1 = IP4_ERROR_NONE;
+
+ if (PREDICT_FALSE (ip4_address_is_multicast (&ip0->dst_address)))
+ {
+ arc0 = lm->mcast_feature_arc_index;
+ next0 = IP4_INPUT_NEXT_LOOKUP_MULTICAST;
+ }
+ else
+ {
+ arc0 = lm->ucast_feature_arc_index;
+ next0 = IP4_INPUT_NEXT_LOOKUP;
+ if (PREDICT_FALSE (ip0->ttl < 1))
+ error0 = IP4_ERROR_TIME_EXPIRED;
+ }
+
+ if (PREDICT_FALSE (ip4_address_is_multicast (&ip1->dst_address)))
+ {
+ arc1 = lm->mcast_feature_arc_index;
+ next1 = IP4_INPUT_NEXT_LOOKUP_MULTICAST;
+ }
+ else
+ {
+ arc1 = lm->ucast_feature_arc_index;
+ next1 = IP4_INPUT_NEXT_LOOKUP;
+ if (PREDICT_FALSE (ip1->ttl < 1))
+ error1 = IP4_ERROR_TIME_EXPIRED;
+ }
+
+ vnet_buffer (p0)->ip.adj_index[VLIB_RX] = ~0;
+ vnet_buffer (p1)->ip.adj_index[VLIB_RX] = ~0;
+
+ vnet_feature_arc_start (arc0, sw_if_index0, &next0, p0);
+ vnet_feature_arc_start (arc1, sw_if_index1, &next1, p1);
+
+ vlib_increment_simple_counter (cm, cpu_index, sw_if_index0, 1);
+ vlib_increment_simple_counter (cm, cpu_index, sw_if_index1, 1);
+
+ /* Punt packets with options or wrong version. */
+ if (PREDICT_FALSE (ip0->ip_version_and_header_length != 0x45))
+ error0 = (ip0->ip_version_and_header_length & 0xf) != 5 ?
+ IP4_ERROR_OPTIONS : IP4_ERROR_VERSION;
+
+ if (PREDICT_FALSE (ip1->ip_version_and_header_length != 0x45))
+ error1 = (ip1->ip_version_and_header_length & 0xf) != 5 ?
+ IP4_ERROR_OPTIONS : IP4_ERROR_VERSION;
+
+ /* Verify header checksum. */
+ if (verify_checksum)
+ {
+ ip_csum_t sum0, sum1;
+
+ ip4_partial_header_checksum_x1 (ip0, sum0);
+ ip4_partial_header_checksum_x1 (ip1, sum1);
+
+ error0 = 0xffff != ip_csum_fold (sum0) ?
+ IP4_ERROR_BAD_CHECKSUM : error0;
+ error1 = 0xffff != ip_csum_fold (sum1) ?
+ IP4_ERROR_BAD_CHECKSUM : error1;
+ }
+
+ /* Drop fragmentation offset 1 packets. */
+ error0 = ip4_get_fragment_offset (ip0) == 1 ?
+ IP4_ERROR_FRAGMENT_OFFSET_ONE : error0;
+ error1 = ip4_get_fragment_offset (ip1) == 1 ?
+ IP4_ERROR_FRAGMENT_OFFSET_ONE : error1;
+
+ /* Verify lengths. */
+ ip_len0 = clib_net_to_host_u16 (ip0->length);
+ ip_len1 = clib_net_to_host_u16 (ip1->length);
+
+ /* IP length must be at least minimal IP header. */
+ error0 = ip_len0 < sizeof (ip0[0]) ? IP4_ERROR_TOO_SHORT : error0;
+ error1 = ip_len1 < sizeof (ip1[0]) ? IP4_ERROR_TOO_SHORT : error1;
+
+ cur_len0 = vlib_buffer_length_in_chain (vm, p0);
+ cur_len1 = vlib_buffer_length_in_chain (vm, p1);
+
+ len_diff0 = cur_len0 - ip_len0;
+ len_diff1 = cur_len1 - ip_len1;
+
+ error0 = len_diff0 < 0 ? IP4_ERROR_BAD_LENGTH : error0;
+ error1 = len_diff1 < 0 ? IP4_ERROR_BAD_LENGTH : error1;
+
+ p0->error = error_node->errors[error0];
+ p1->error = error_node->errors[error1];
+
+ if (PREDICT_FALSE (error0 != IP4_ERROR_NONE))
+ {
+ if (error0 == IP4_ERROR_TIME_EXPIRED)
+ {
+ icmp4_error_set_vnet_buffer (p0, ICMP4_time_exceeded,
+ ICMP4_time_exceeded_ttl_exceeded_in_transit,
+ 0);
+ next0 = IP4_INPUT_NEXT_ICMP_ERROR;
+ }
+ else
+ next0 = error0 != IP4_ERROR_OPTIONS ?
+ IP4_INPUT_NEXT_DROP : IP4_INPUT_NEXT_PUNT;
+ }
+ if (PREDICT_FALSE (error1 != IP4_ERROR_NONE))
+ {
+ if (error1 == IP4_ERROR_TIME_EXPIRED)
+ {
+ icmp4_error_set_vnet_buffer (p1, ICMP4_time_exceeded,
+ ICMP4_time_exceeded_ttl_exceeded_in_transit,
+ 0);
+ next1 = IP4_INPUT_NEXT_ICMP_ERROR;
+ }
+ else
+ next1 = error1 != IP4_ERROR_OPTIONS ?
+ IP4_INPUT_NEXT_DROP : IP4_INPUT_NEXT_PUNT;
+ }
+
+ vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
+ to_next, n_left_to_next,
+ pi0, pi1, next0, next1);
+ }
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ vlib_buffer_t *p0;
+ ip4_header_t *ip0;
+ u32 sw_if_index0, pi0, ip_len0, cur_len0, next0;
+ i32 len_diff0;
+ u8 error0, arc0;
+
+ pi0 = from[0];
+ to_next[0] = pi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ p0 = vlib_get_buffer (vm, pi0);
+ ip0 = vlib_buffer_get_current (p0);
+
+ sw_if_index0 = vnet_buffer (p0)->sw_if_index[VLIB_RX];
+
+ error0 = IP4_ERROR_NONE;
+
+ if (PREDICT_FALSE (ip4_address_is_multicast (&ip0->dst_address)))
+ {
+ arc0 = lm->mcast_feature_arc_index;
+ next0 = IP4_INPUT_NEXT_LOOKUP_MULTICAST;
+ }
+ else
+ {
+ arc0 = lm->ucast_feature_arc_index;
+ next0 = IP4_INPUT_NEXT_LOOKUP;
+ if (PREDICT_FALSE (ip0->ttl < 1))
+ error0 = IP4_ERROR_TIME_EXPIRED;
+ }
+
+ vnet_buffer (p0)->ip.adj_index[VLIB_RX] = ~0;
+ vnet_feature_arc_start (arc0, sw_if_index0, &next0, p0);
+
+ vlib_increment_simple_counter (cm, cpu_index, sw_if_index0, 1);
+
+ /* Punt packets with options or wrong version. */
+ if (PREDICT_FALSE (ip0->ip_version_and_header_length != 0x45))
+ error0 = (ip0->ip_version_and_header_length & 0xf) != 5 ?
+ IP4_ERROR_OPTIONS : IP4_ERROR_VERSION;
+
+ /* Verify header checksum. */
+ if (verify_checksum)
+ {
+ ip_csum_t sum0;
+
+ ip4_partial_header_checksum_x1 (ip0, sum0);
+ error0 =
+ 0xffff !=
+ ip_csum_fold (sum0) ? IP4_ERROR_BAD_CHECKSUM : error0;
+ }
+
+ /* Drop fragmentation offset 1 packets. */
+ error0 =
+ ip4_get_fragment_offset (ip0) ==
+ 1 ? IP4_ERROR_FRAGMENT_OFFSET_ONE : error0;
+
+ /* Verify lengths. */
+ ip_len0 = clib_net_to_host_u16 (ip0->length);
+
+ /* IP length must be at least minimal IP header. */
+ error0 = ip_len0 < sizeof (ip0[0]) ? IP4_ERROR_TOO_SHORT : error0;
+
+ cur_len0 = vlib_buffer_length_in_chain (vm, p0);
+ len_diff0 = cur_len0 - ip_len0;
+ error0 = len_diff0 < 0 ? IP4_ERROR_BAD_LENGTH : error0;
+
+ p0->error = error_node->errors[error0];
+ if (PREDICT_FALSE (error0 != IP4_ERROR_NONE))
+ {
+ if (error0 == IP4_ERROR_TIME_EXPIRED)
+ {
+ icmp4_error_set_vnet_buffer (p0, ICMP4_time_exceeded,
+ ICMP4_time_exceeded_ttl_exceeded_in_transit,
+ 0);
+ next0 = IP4_INPUT_NEXT_ICMP_ERROR;
+ }
+ else
+ next0 = error0 != IP4_ERROR_OPTIONS ?
+ IP4_INPUT_NEXT_DROP : IP4_INPUT_NEXT_PUNT;
+ }
+
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ pi0, next0);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ return frame->n_vectors;
+}
+
+/** \brief IPv4 input node.
+ @node ip4-input
+
+ This is the IPv4 input node: validates ip4 header checksums,
+ verifies ip header lengths, discards pkts with expired TTLs,
+ and sends pkts to the set of ip feature nodes configured on
+ the rx interface.
+
+ @param vm vlib_main_t corresponding to the current thread
+ @param node vlib_node_runtime_t
+ @param frame vlib_frame_t whose contents should be dispatched
+
+ @par Graph mechanics: buffer metadata, next index usage
+
+ @em Uses:
+ - vnet_feature_config_main_t cm corresponding to each pkt's dst address unicast /
+ multicast status.
+ - <code>b->current_config_index</code> corresponding to each pkt's
+ rx sw_if_index.
+ - This sets the per-packet graph trajectory, ensuring that
+ each packet visits the per-interface features in order.
+
+ - <code>vnet_buffer(b)->sw_if_index[VLIB_RX]</code>
+ - Indicates the @c sw_if_index value of the interface that the
+ packet was received on.
+
+ @em Sets:
+ - <code>vnet_buffer(b)->ip.adj_index[VLIB_TX]</code>
+ - The lookup result adjacency index.
+
+ <em>Next Indices:</em>
+ - Dispatches pkts to the (first) feature node:
+ <code> vnet_get_config_data (... &next0 ...); </code>
+ or @c error-drop
+*/
+static uword
+ip4_input (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ return ip4_input_inline (vm, node, frame, /* verify_checksum */ 1);
+}
+
+static uword
+ip4_input_no_checksum (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ return ip4_input_inline (vm, node, frame, /* verify_checksum */ 0);
+}
+
+static char *ip4_error_strings[] = {
+#define _(sym,string) string,
+ foreach_ip4_error
+#undef _
+};
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (ip4_input_node) = {
+ .function = ip4_input,
+ .name = "ip4-input",
+ .vector_size = sizeof (u32),
+
+ .n_errors = IP4_N_ERROR,
+ .error_strings = ip4_error_strings,
+
+ .n_next_nodes = IP4_INPUT_N_NEXT,
+ .next_nodes = {
+ [IP4_INPUT_NEXT_DROP] = "error-drop",
+ [IP4_INPUT_NEXT_PUNT] = "error-punt",
+ [IP4_INPUT_NEXT_LOOKUP] = "ip4-lookup",
+ [IP4_INPUT_NEXT_LOOKUP_MULTICAST] = "ip4-lookup-multicast",
+ [IP4_INPUT_NEXT_ICMP_ERROR] = "ip4-icmp-error",
+ },
+
+ .format_buffer = format_ip4_header,
+ .format_trace = format_ip4_input_trace,
+};
+/* *INDENT-ON* */
+
+VLIB_NODE_FUNCTION_MULTIARCH (ip4_input_node, ip4_input);
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (ip4_input_no_checksum_node,static) = {
+ .function = ip4_input_no_checksum,
+ .name = "ip4-input-no-checksum",
+ .vector_size = sizeof (u32),
+
+ .n_next_nodes = IP4_INPUT_N_NEXT,
+ .next_nodes = {
+ [IP4_INPUT_NEXT_DROP] = "error-drop",
+ [IP4_INPUT_NEXT_PUNT] = "error-punt",
+ [IP4_INPUT_NEXT_LOOKUP] = "ip4-lookup",
+ [IP4_INPUT_NEXT_LOOKUP_MULTICAST] = "ip4-lookup-multicast",
+ [IP4_INPUT_NEXT_ICMP_ERROR] = "ip4-icmp-error",
+ },
+
+ .format_buffer = format_ip4_header,
+ .format_trace = format_ip4_input_trace,
+};
+/* *INDENT-ON* */
+
+VLIB_NODE_FUNCTION_MULTIARCH (ip4_input_no_checksum_node,
+ ip4_input_no_checksum);
+
+static clib_error_t *
+ip4_init (vlib_main_t * vm)
+{
+ clib_error_t *error;
+
+ ethernet_register_input_type (vm, ETHERNET_TYPE_IP4, ip4_input_node.index);
+ ppp_register_input_protocol (vm, PPP_PROTOCOL_ip4, ip4_input_node.index);
+ hdlc_register_input_protocol (vm, HDLC_PROTOCOL_ip4, ip4_input_node.index);
+
+ {
+ pg_node_t *pn;
+ pn = pg_get_node (ip4_input_node.index);
+ pn->unformat_edit = unformat_pg_ip4_header;
+ pn = pg_get_node (ip4_input_no_checksum_node.index);
+ pn->unformat_edit = unformat_pg_ip4_header;
+ }
+
+ if ((error = vlib_call_init_function (vm, ip4_cli_init)))
+ return error;
+
+ if ((error = vlib_call_init_function (vm, ip4_source_check_init)))
+ return error;
+
+ if ((error = vlib_call_init_function
+ (vm, ip4_source_and_port_range_check_init)))
+ return error;
+
+ /* Set flow hash to something non-zero. */
+ ip4_main.flow_hash_seed = 0xdeadbeef;
+
+ /* Default TTL for packets we generate. */
+ ip4_main.host_config.ttl = 64;
+
+ return error;
+}
+
+VLIB_INIT_FUNCTION (ip4_init);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/ip/ip4_mtrie.c b/src/vnet/ip/ip4_mtrie.c
new file mode 100644
index 00000000000..6e3d0e8068b
--- /dev/null
+++ b/src/vnet/ip/ip4_mtrie.c
@@ -0,0 +1,568 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * ip/ip4_fib.h: ip4 mtrie fib
+ *
+ * Copyright (c) 2012 Eliot Dresselhaus
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <vnet/ip/ip.h>
+#include <vnet/fib/fib_entry.h>
+
+static void
+ply_init (ip4_fib_mtrie_ply_t * p, ip4_fib_mtrie_leaf_t init,
+ uword prefix_len)
+{
+ p->n_non_empty_leafs =
+ ip4_fib_mtrie_leaf_is_empty (init) ? 0 : ARRAY_LEN (p->leaves);
+ memset (p->dst_address_bits_of_leaves, prefix_len,
+ sizeof (p->dst_address_bits_of_leaves));
+
+ /* Initialize leaves. */
+#ifdef CLIB_HAVE_VEC128
+ {
+ u32x4 *l, init_x4;
+
+#ifndef __ALTIVEC__
+ init_x4 = u32x4_splat (init);
+#else
+ {
+ u32x4_union_t y;
+ y.as_u32[0] = init;
+ y.as_u32[1] = init;
+ y.as_u32[2] = init;
+ y.as_u32[3] = init;
+ init_x4 = y.as_u32x4;
+ }
+#endif
+
+ for (l = p->leaves_as_u32x4;
+ l < p->leaves_as_u32x4 + ARRAY_LEN (p->leaves_as_u32x4); l += 4)
+ {
+ l[0] = init_x4;
+ l[1] = init_x4;
+ l[2] = init_x4;
+ l[3] = init_x4;
+ }
+ }
+#else
+ {
+ u32 *l;
+
+ for (l = p->leaves; l < p->leaves + ARRAY_LEN (p->leaves); l += 4)
+ {
+ l[0] = init;
+ l[1] = init;
+ l[2] = init;
+ l[3] = init;
+ }
+ }
+#endif
+}
+
+static ip4_fib_mtrie_leaf_t
+ply_create (ip4_fib_mtrie_t * m, ip4_fib_mtrie_leaf_t init_leaf,
+ uword prefix_len)
+{
+ ip4_fib_mtrie_ply_t *p;
+
+ /* Get cache aligned ply. */
+ pool_get_aligned (m->ply_pool, p, sizeof (p[0]));
+
+ ply_init (p, init_leaf, prefix_len);
+ return ip4_fib_mtrie_leaf_set_next_ply_index (p - m->ply_pool);
+}
+
+always_inline ip4_fib_mtrie_ply_t *
+get_next_ply_for_leaf (ip4_fib_mtrie_t * m, ip4_fib_mtrie_leaf_t l)
+{
+ uword n = ip4_fib_mtrie_leaf_get_next_ply_index (l);
+ /* It better not be the root ply. */
+ ASSERT (n != 0);
+ return pool_elt_at_index (m->ply_pool, n);
+}
+
+static void
+ply_free (ip4_fib_mtrie_t * m, ip4_fib_mtrie_ply_t * p)
+{
+ uword i, is_root;
+
+ is_root = p - m->ply_pool == 0;
+
+ for (i = 0; i < ARRAY_LEN (p->leaves); i++)
+ {
+ ip4_fib_mtrie_leaf_t l = p->leaves[i];
+ if (ip4_fib_mtrie_leaf_is_next_ply (l))
+ ply_free (m, get_next_ply_for_leaf (m, l));
+ }
+
+ if (is_root)
+ ply_init (p, IP4_FIB_MTRIE_LEAF_EMPTY, /* prefix_len */ 0);
+ else
+ pool_put (m->ply_pool, p);
+}
+
+void
+ip4_fib_free (ip4_fib_mtrie_t * m)
+{
+ ip4_fib_mtrie_ply_t *root_ply = pool_elt_at_index (m->ply_pool, 0);
+ ply_free (m, root_ply);
+}
+
+u32
+ip4_mtrie_lookup_address (ip4_fib_mtrie_t * m, ip4_address_t dst)
+{
+ ip4_fib_mtrie_ply_t *p = pool_elt_at_index (m->ply_pool, 0);
+ ip4_fib_mtrie_leaf_t l;
+
+ l = p->leaves[dst.as_u8[0]];
+ if (ip4_fib_mtrie_leaf_is_terminal (l))
+ return ip4_fib_mtrie_leaf_get_adj_index (l);
+
+ p = get_next_ply_for_leaf (m, l);
+ l = p->leaves[dst.as_u8[1]];
+ if (ip4_fib_mtrie_leaf_is_terminal (l))
+ return ip4_fib_mtrie_leaf_get_adj_index (l);
+
+ p = get_next_ply_for_leaf (m, l);
+ l = p->leaves[dst.as_u8[2]];
+ if (ip4_fib_mtrie_leaf_is_terminal (l))
+ return ip4_fib_mtrie_leaf_get_adj_index (l);
+
+ p = get_next_ply_for_leaf (m, l);
+ l = p->leaves[dst.as_u8[3]];
+
+ ASSERT (ip4_fib_mtrie_leaf_is_terminal (l));
+ return ip4_fib_mtrie_leaf_get_adj_index (l);
+}
+
+typedef struct
+{
+ ip4_address_t dst_address;
+ u32 dst_address_length;
+ u32 adj_index;
+} ip4_fib_mtrie_set_unset_leaf_args_t;
+
+static void
+set_ply_with_more_specific_leaf (ip4_fib_mtrie_t * m,
+ ip4_fib_mtrie_ply_t * ply,
+ ip4_fib_mtrie_leaf_t new_leaf,
+ uword new_leaf_dst_address_bits)
+{
+ ip4_fib_mtrie_leaf_t old_leaf;
+ uword i;
+
+ ASSERT (ip4_fib_mtrie_leaf_is_terminal (new_leaf));
+ ASSERT (!ip4_fib_mtrie_leaf_is_empty (new_leaf));
+
+ for (i = 0; i < ARRAY_LEN (ply->leaves); i++)
+ {
+ old_leaf = ply->leaves[i];
+
+ /* Recurse into sub plies. */
+ if (!ip4_fib_mtrie_leaf_is_terminal (old_leaf))
+ {
+ ip4_fib_mtrie_ply_t *sub_ply = get_next_ply_for_leaf (m, old_leaf);
+ set_ply_with_more_specific_leaf (m, sub_ply, new_leaf,
+ new_leaf_dst_address_bits);
+ }
+
+ /* Replace less specific terminal leaves with new leaf. */
+ else if (new_leaf_dst_address_bits >=
+ ply->dst_address_bits_of_leaves[i])
+ {
+ __sync_val_compare_and_swap (&ply->leaves[i], old_leaf, new_leaf);
+ ASSERT (ply->leaves[i] == new_leaf);
+ ply->dst_address_bits_of_leaves[i] = new_leaf_dst_address_bits;
+ ply->n_non_empty_leafs += ip4_fib_mtrie_leaf_is_empty (old_leaf);
+ }
+ }
+}
+
+static void
+set_leaf (ip4_fib_mtrie_t * m,
+ ip4_fib_mtrie_set_unset_leaf_args_t * a,
+ u32 old_ply_index, u32 dst_address_byte_index)
+{
+ ip4_fib_mtrie_leaf_t old_leaf, new_leaf;
+ i32 n_dst_bits_next_plies;
+ u8 dst_byte;
+
+ ASSERT (a->dst_address_length > 0 && a->dst_address_length <= 32);
+ ASSERT (dst_address_byte_index < ARRAY_LEN (a->dst_address.as_u8));
+
+ n_dst_bits_next_plies =
+ a->dst_address_length - BITS (u8) * (dst_address_byte_index + 1);
+
+ dst_byte = a->dst_address.as_u8[dst_address_byte_index];
+
+ /* Number of bits next plies <= 0 => insert leaves this ply. */
+ if (n_dst_bits_next_plies <= 0)
+ {
+ uword i, n_dst_bits_this_ply, old_leaf_is_terminal;
+
+ n_dst_bits_this_ply = -n_dst_bits_next_plies;
+ ASSERT ((a->dst_address.as_u8[dst_address_byte_index] &
+ pow2_mask (n_dst_bits_this_ply)) == 0);
+
+ for (i = dst_byte; i < dst_byte + (1 << n_dst_bits_this_ply); i++)
+ {
+ ip4_fib_mtrie_ply_t *old_ply, *new_ply;
+
+ old_ply = pool_elt_at_index (m->ply_pool, old_ply_index);
+
+ old_leaf = old_ply->leaves[i];
+ old_leaf_is_terminal = ip4_fib_mtrie_leaf_is_terminal (old_leaf);
+
+ /* Is leaf to be inserted more specific? */
+ if (a->dst_address_length >= old_ply->dst_address_bits_of_leaves[i])
+ {
+ new_leaf = ip4_fib_mtrie_leaf_set_adj_index (a->adj_index);
+
+ if (old_leaf_is_terminal)
+ {
+ old_ply->dst_address_bits_of_leaves[i] =
+ a->dst_address_length;
+ __sync_val_compare_and_swap (&old_ply->leaves[i], old_leaf,
+ new_leaf);
+ ASSERT (old_ply->leaves[i] == new_leaf);
+ old_ply->n_non_empty_leafs +=
+ ip4_fib_mtrie_leaf_is_empty (old_leaf);
+ ASSERT (old_ply->n_non_empty_leafs <=
+ ARRAY_LEN (old_ply->leaves));
+ }
+ else
+ {
+ /* Existing leaf points to another ply. We need to place new_leaf into all
+ more specific slots. */
+ new_ply = get_next_ply_for_leaf (m, old_leaf);
+ set_ply_with_more_specific_leaf (m, new_ply, new_leaf,
+ a->dst_address_length);
+ }
+ }
+
+ else if (!old_leaf_is_terminal)
+ {
+ new_ply = get_next_ply_for_leaf (m, old_leaf);
+ set_leaf (m, a, new_ply - m->ply_pool,
+ dst_address_byte_index + 1);
+ }
+ }
+ }
+ else
+ {
+ ip4_fib_mtrie_ply_t *old_ply, *new_ply;
+
+ old_ply = pool_elt_at_index (m->ply_pool, old_ply_index);
+ old_leaf = old_ply->leaves[dst_byte];
+ if (ip4_fib_mtrie_leaf_is_terminal (old_leaf))
+ {
+ new_leaf =
+ ply_create (m, old_leaf,
+ old_ply->dst_address_bits_of_leaves[dst_byte]);
+ new_ply = get_next_ply_for_leaf (m, new_leaf);
+
+ /* Refetch since ply_create may move pool. */
+ old_ply = pool_elt_at_index (m->ply_pool, old_ply_index);
+
+ __sync_val_compare_and_swap (&old_ply->leaves[dst_byte], old_leaf,
+ new_leaf);
+ ASSERT (old_ply->leaves[dst_byte] == new_leaf);
+ old_ply->dst_address_bits_of_leaves[dst_byte] = 0;
+
+ old_ply->n_non_empty_leafs -=
+ ip4_fib_mtrie_leaf_is_non_empty (old_leaf);
+ ASSERT (old_ply->n_non_empty_leafs >= 0);
+
+ /* Account for the ply we just created. */
+ old_ply->n_non_empty_leafs += 1;
+ }
+ else
+ new_ply = get_next_ply_for_leaf (m, old_leaf);
+
+ set_leaf (m, a, new_ply - m->ply_pool, dst_address_byte_index + 1);
+ }
+}
+
+static uword
+unset_leaf (ip4_fib_mtrie_t * m,
+ ip4_fib_mtrie_set_unset_leaf_args_t * a,
+ ip4_fib_mtrie_ply_t * old_ply, u32 dst_address_byte_index)
+{
+ ip4_fib_mtrie_leaf_t old_leaf, del_leaf;
+ i32 n_dst_bits_next_plies;
+ i32 i, n_dst_bits_this_ply, old_leaf_is_terminal;
+ u8 dst_byte;
+
+ ASSERT (a->dst_address_length > 0 && a->dst_address_length <= 32);
+ ASSERT (dst_address_byte_index < ARRAY_LEN (a->dst_address.as_u8));
+
+ n_dst_bits_next_plies =
+ a->dst_address_length - BITS (u8) * (dst_address_byte_index + 1);
+
+ dst_byte = a->dst_address.as_u8[dst_address_byte_index];
+ if (n_dst_bits_next_plies < 0)
+ dst_byte &= ~pow2_mask (-n_dst_bits_next_plies);
+
+ n_dst_bits_this_ply =
+ n_dst_bits_next_plies <= 0 ? -n_dst_bits_next_plies : 0;
+ n_dst_bits_this_ply = clib_min (8, n_dst_bits_this_ply);
+
+ del_leaf = ip4_fib_mtrie_leaf_set_adj_index (a->adj_index);
+
+ for (i = dst_byte; i < dst_byte + (1 << n_dst_bits_this_ply); i++)
+ {
+ old_leaf = old_ply->leaves[i];
+ old_leaf_is_terminal = ip4_fib_mtrie_leaf_is_terminal (old_leaf);
+
+ if (old_leaf == del_leaf
+ || (!old_leaf_is_terminal
+ && unset_leaf (m, a, get_next_ply_for_leaf (m, old_leaf),
+ dst_address_byte_index + 1)))
+ {
+ old_ply->leaves[i] = IP4_FIB_MTRIE_LEAF_EMPTY;
+ old_ply->dst_address_bits_of_leaves[i] = 0;
+
+ /* No matter what we just deleted a non-empty leaf. */
+ ASSERT (!ip4_fib_mtrie_leaf_is_empty (old_leaf));
+ old_ply->n_non_empty_leafs -= 1;
+
+ ASSERT (old_ply->n_non_empty_leafs >= 0);
+ if (old_ply->n_non_empty_leafs == 0 && dst_address_byte_index > 0)
+ {
+ pool_put (m->ply_pool, old_ply);
+ /* Old ply was deleted. */
+ return 1;
+ }
+ }
+ }
+
+ /* Old ply was not deleted. */
+ return 0;
+}
+
+void
+ip4_mtrie_init (ip4_fib_mtrie_t * m)
+{
+ ip4_fib_mtrie_leaf_t root;
+ memset (m, 0, sizeof (m[0]));
+ m->default_leaf = IP4_FIB_MTRIE_LEAF_EMPTY;
+ root = ply_create (m, IP4_FIB_MTRIE_LEAF_EMPTY, /* dst_address_bits_of_leaves */
+ 0);
+ ASSERT (ip4_fib_mtrie_leaf_get_next_ply_index (root) == 0);
+}
+
+void
+ip4_fib_mtrie_add_del_route (ip4_fib_t * fib,
+ ip4_address_t dst_address,
+ u32 dst_address_length,
+ u32 adj_index, u32 is_del)
+{
+ ip4_fib_mtrie_t *m = &fib->mtrie;
+ ip4_fib_mtrie_ply_t *root_ply;
+ ip4_fib_mtrie_set_unset_leaf_args_t a;
+ ip4_main_t *im = &ip4_main;
+
+ ASSERT (m->ply_pool != 0);
+
+ root_ply = pool_elt_at_index (m->ply_pool, 0);
+
+ /* Honor dst_address_length. Fib masks are in network byte order */
+ dst_address.as_u32 &= im->fib_masks[dst_address_length];
+ a.dst_address = dst_address;
+ a.dst_address_length = dst_address_length;
+ a.adj_index = adj_index;
+
+ if (!is_del)
+ {
+ if (dst_address_length == 0)
+ m->default_leaf = ip4_fib_mtrie_leaf_set_adj_index (adj_index);
+ else
+ set_leaf (m, &a, /* ply_index */ 0, /* dst_address_byte_index */ 0);
+ }
+ else
+ {
+ if (dst_address_length == 0)
+ m->default_leaf = IP4_FIB_MTRIE_LEAF_EMPTY;
+
+ else
+ {
+ ip4_main_t *im = &ip4_main;
+ uword i;
+
+ unset_leaf (m, &a, root_ply, 0);
+
+ /* Find next less specific route and insert into mtrie. */
+ for (i = dst_address_length - 1; i >= 1; i--)
+ {
+ uword *p;
+ index_t lbi;
+ ip4_address_t key;
+
+ if (!fib->fib_entry_by_dst_address[i])
+ continue;
+
+ key.as_u32 = dst_address.as_u32 & im->fib_masks[i];
+ p = hash_get (fib->fib_entry_by_dst_address[i], key.as_u32);
+ if (p)
+ {
+ lbi = fib_entry_contribute_ip_forwarding (p[0])->dpoi_index;
+ if (INDEX_INVALID == lbi)
+ continue;
+
+ a.dst_address = key;
+ a.adj_index = lbi;
+ a.dst_address_length = i;
+
+ set_leaf (m, &a, /* ply_index */ 0,
+ /* dst_address_byte_index */ 0);
+ break;
+ }
+ }
+ }
+ }
+}
+
+/* Returns number of bytes of memory used by mtrie. */
+static uword
+mtrie_memory_usage (ip4_fib_mtrie_t * m, ip4_fib_mtrie_ply_t * p)
+{
+ uword bytes, i;
+
+ if (!p)
+ {
+ if (pool_is_free_index (m->ply_pool, 0))
+ return 0;
+ p = pool_elt_at_index (m->ply_pool, 0);
+ }
+
+ bytes = sizeof (p[0]);
+ for (i = 0; i < ARRAY_LEN (p->leaves); i++)
+ {
+ ip4_fib_mtrie_leaf_t l = p->leaves[i];
+ if (ip4_fib_mtrie_leaf_is_next_ply (l))
+ bytes += mtrie_memory_usage (m, get_next_ply_for_leaf (m, l));
+ }
+
+ return bytes;
+}
+
+static u8 *
+format_ip4_fib_mtrie_leaf (u8 * s, va_list * va)
+{
+ ip4_fib_mtrie_leaf_t l = va_arg (*va, ip4_fib_mtrie_leaf_t);
+
+ if (ip4_fib_mtrie_leaf_is_empty (l))
+ s = format (s, "miss");
+ else if (ip4_fib_mtrie_leaf_is_terminal (l))
+ s = format (s, "adj %d", ip4_fib_mtrie_leaf_get_adj_index (l));
+ else
+ s = format (s, "next ply %d", ip4_fib_mtrie_leaf_get_next_ply_index (l));
+ return s;
+}
+
+static u8 *
+format_ip4_fib_mtrie_ply (u8 * s, va_list * va)
+{
+ ip4_fib_mtrie_t *m = va_arg (*va, ip4_fib_mtrie_t *);
+ u32 base_address = va_arg (*va, u32);
+ u32 ply_index = va_arg (*va, u32);
+ u32 dst_address_byte_index = va_arg (*va, u32);
+ ip4_fib_mtrie_ply_t *p;
+ uword i, indent;
+
+ p = pool_elt_at_index (m->ply_pool, ply_index);
+ indent = format_get_indent (s);
+ s =
+ format (s, "ply index %d, %d non-empty leaves", ply_index,
+ p->n_non_empty_leafs);
+ for (i = 0; i < ARRAY_LEN (p->leaves); i++)
+ {
+ ip4_fib_mtrie_leaf_t l = p->leaves[i];
+
+ if (!ip4_fib_mtrie_leaf_is_empty (l))
+ {
+ u32 a, ia_length;
+ ip4_address_t ia;
+
+ a = base_address + (i << (24 - 8 * dst_address_byte_index));
+ ia.as_u32 = clib_host_to_net_u32 (a);
+ if (ip4_fib_mtrie_leaf_is_terminal (l))
+ ia_length = p->dst_address_bits_of_leaves[i];
+ else
+ ia_length = 8 * (1 + dst_address_byte_index);
+ s = format (s, "\n%U%20U %U",
+ format_white_space, indent + 2,
+ format_ip4_address_and_length, &ia, ia_length,
+ format_ip4_fib_mtrie_leaf, l);
+
+ if (ip4_fib_mtrie_leaf_is_next_ply (l))
+ s = format (s, "\n%U%U",
+ format_white_space, indent + 2,
+ format_ip4_fib_mtrie_ply, m, a,
+ ip4_fib_mtrie_leaf_get_next_ply_index (l),
+ dst_address_byte_index + 1);
+ }
+ }
+
+ return s;
+}
+
+u8 *
+format_ip4_fib_mtrie (u8 * s, va_list * va)
+{
+ ip4_fib_mtrie_t *m = va_arg (*va, ip4_fib_mtrie_t *);
+
+ s = format (s, "%d plies, memory usage %U",
+ pool_elts (m->ply_pool),
+ format_memory_size, mtrie_memory_usage (m, 0));
+
+ if (pool_elts (m->ply_pool) > 0)
+ {
+ ip4_address_t base_address;
+ base_address.as_u32 = 0;
+ s =
+ format (s, "\n %U", format_ip4_fib_mtrie_ply, m, base_address, 0, 0);
+ }
+
+ return s;
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/ip/ip4_mtrie.h b/src/vnet/ip/ip4_mtrie.h
new file mode 100644
index 00000000000..c0afc2cf842
--- /dev/null
+++ b/src/vnet/ip/ip4_mtrie.h
@@ -0,0 +1,188 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * ip/ip4_fib.h: ip4 mtrie fib
+ *
+ * Copyright (c) 2012 Eliot Dresselhaus
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef included_ip_ip4_fib_h
+#define included_ip_ip4_fib_h
+
+#include <vppinfra/cache.h>
+#include <vppinfra/vector.h>
+#include <vnet/ip/lookup.h>
+#include <vnet/ip/ip4_packet.h> /* for ip4_address_t */
+
+/* ip4 fib leafs: 4 ply 8-8-8-8 mtrie.
+ 1 + 2*adj_index for terminal leaves.
+ 0 + 2*next_ply_index for non-terminals.
+ 1 => empty (adjacency index of zero is special miss adjacency). */
+typedef u32 ip4_fib_mtrie_leaf_t;
+
+#define IP4_FIB_MTRIE_LEAF_EMPTY (1 + 2*0)
+#define IP4_FIB_MTRIE_LEAF_ROOT (0 + 2*0)
+
+always_inline u32
+ip4_fib_mtrie_leaf_is_empty (ip4_fib_mtrie_leaf_t n)
+{
+ return n == IP4_FIB_MTRIE_LEAF_EMPTY;
+}
+
+always_inline u32
+ip4_fib_mtrie_leaf_is_non_empty (ip4_fib_mtrie_leaf_t n)
+{
+ return n != IP4_FIB_MTRIE_LEAF_EMPTY;
+}
+
+always_inline u32
+ip4_fib_mtrie_leaf_is_terminal (ip4_fib_mtrie_leaf_t n)
+{
+ return n & 1;
+}
+
+always_inline u32
+ip4_fib_mtrie_leaf_get_adj_index (ip4_fib_mtrie_leaf_t n)
+{
+ ASSERT (ip4_fib_mtrie_leaf_is_terminal (n));
+ return n >> 1;
+}
+
+always_inline ip4_fib_mtrie_leaf_t
+ip4_fib_mtrie_leaf_set_adj_index (u32 adj_index)
+{
+ ip4_fib_mtrie_leaf_t l;
+ l = 1 + 2 * adj_index;
+ ASSERT (ip4_fib_mtrie_leaf_get_adj_index (l) == adj_index);
+ return l;
+}
+
+always_inline u32
+ip4_fib_mtrie_leaf_is_next_ply (ip4_fib_mtrie_leaf_t n)
+{
+ return (n & 1) == 0;
+}
+
+always_inline u32
+ip4_fib_mtrie_leaf_get_next_ply_index (ip4_fib_mtrie_leaf_t n)
+{
+ ASSERT (ip4_fib_mtrie_leaf_is_next_ply (n));
+ return n >> 1;
+}
+
+always_inline ip4_fib_mtrie_leaf_t
+ip4_fib_mtrie_leaf_set_next_ply_index (u32 i)
+{
+ ip4_fib_mtrie_leaf_t l;
+ l = 0 + 2 * i;
+ ASSERT (ip4_fib_mtrie_leaf_get_next_ply_index (l) == i);
+ return l;
+}
+
+/* One ply of the 4 ply mtrie fib. */
+typedef struct
+{
+ union
+ {
+ ip4_fib_mtrie_leaf_t leaves[256];
+
+#ifdef CLIB_HAVE_VEC128
+ u32x4 leaves_as_u32x4[256 / 4];
+#endif
+ };
+
+ /* Prefix length for terminal leaves. */
+ u8 dst_address_bits_of_leaves[256];
+
+ /* Number of non-empty leafs (whether terminal or not). */
+ i32 n_non_empty_leafs;
+
+ /* Pad to cache line boundary. */
+ u8 pad[CLIB_CACHE_LINE_BYTES - 1 * sizeof (i32)];
+}
+ip4_fib_mtrie_ply_t;
+
+STATIC_ASSERT (0 == sizeof (ip4_fib_mtrie_ply_t) % CLIB_CACHE_LINE_BYTES,
+ "IP4 Mtrie ply cache line");
+
+typedef struct
+{
+ /* Pool of plies. Index zero is root ply. */
+ ip4_fib_mtrie_ply_t *ply_pool;
+
+ /* Special case leaf for default route 0.0.0.0/0. */
+ ip4_fib_mtrie_leaf_t default_leaf;
+} ip4_fib_mtrie_t;
+
+void ip4_fib_mtrie_init (ip4_fib_mtrie_t * m);
+
+struct ip4_fib_t;
+
+void ip4_fib_mtrie_add_del_route (struct ip4_fib_t *f,
+ ip4_address_t dst_address,
+ u32 dst_address_length,
+ u32 adj_index, u32 is_del);
+
+/* Returns adjacency index. */
+u32 ip4_mtrie_lookup_address (ip4_fib_mtrie_t * m, ip4_address_t dst);
+
+format_function_t format_ip4_fib_mtrie;
+
+/* Lookup step. Processes 1 byte of 4 byte ip4 address. */
+always_inline ip4_fib_mtrie_leaf_t
+ip4_fib_mtrie_lookup_step (ip4_fib_mtrie_t * m,
+ ip4_fib_mtrie_leaf_t current_leaf,
+ const ip4_address_t * dst_address,
+ u32 dst_address_byte_index)
+{
+ ip4_fib_mtrie_leaf_t next_leaf;
+ ip4_fib_mtrie_ply_t *ply;
+ uword current_is_terminal = ip4_fib_mtrie_leaf_is_terminal (current_leaf);
+
+ ply = m->ply_pool + (current_is_terminal ? 0 : (current_leaf >> 1));
+ next_leaf = ply->leaves[dst_address->as_u8[dst_address_byte_index]];
+ next_leaf = current_is_terminal ? current_leaf : next_leaf;
+
+ return next_leaf;
+}
+
+#endif /* included_ip_ip4_fib_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/ip/ip4_packet.h b/src/vnet/ip/ip4_packet.h
new file mode 100644
index 00000000000..8da788b411f
--- /dev/null
+++ b/src/vnet/ip/ip4_packet.h
@@ -0,0 +1,384 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * ip4/packet.h: ip4 packet format
+ *
+ * Copyright (c) 2008 Eliot Dresselhaus
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef included_ip4_packet_h
+#define included_ip4_packet_h
+
+#include <vnet/ip/ip_packet.h> /* for ip_csum_t */
+#include <vnet/ip/tcp_packet.h> /* for tcp_header_t */
+#include <vppinfra/byte_order.h> /* for clib_net_to_host_u16 */
+
+/* IP4 address which can be accessed either as 4 bytes
+ or as a 32-bit number. */
+typedef union
+{
+ u8 data[4];
+ u32 data_u32;
+ /* Aliases. */
+ u8 as_u8[4];
+ u32 as_u32;
+} ip4_address_t;
+
+typedef struct
+{
+ /* IP address must be first for ip_interface_address_get_address() to work */
+ ip4_address_t ip4_addr;
+ u32 fib_index;
+} ip4_address_fib_t;
+
+always_inline void
+ip4_addr_fib_init (ip4_address_fib_t * addr_fib, ip4_address_t * address,
+ u32 fib_index)
+{
+ clib_memcpy (&addr_fib->ip4_addr, address, sizeof (addr_fib->ip4_addr));
+ addr_fib->fib_index = fib_index;
+}
+
+/* (src,dst) pair of addresses as found in packet header. */
+typedef struct
+{
+ ip4_address_t src, dst;
+} ip4_address_pair_t;
+
+/* If address is a valid netmask, return length of mask. */
+always_inline uword
+ip4_address_netmask_length (ip4_address_t * a)
+{
+ uword result = 0;
+ uword i;
+ for (i = 0; i < ARRAY_LEN (a->as_u8); i++)
+ {
+ switch (a->as_u8[i])
+ {
+ case 0xff:
+ result += 8;
+ break;
+ case 0xfe:
+ result += 7;
+ goto done;
+ case 0xfc:
+ result += 6;
+ goto done;
+ case 0xf8:
+ result += 5;
+ goto done;
+ case 0xf0:
+ result += 4;
+ goto done;
+ case 0xe0:
+ result += 3;
+ goto done;
+ case 0xc0:
+ result += 2;
+ goto done;
+ case 0x80:
+ result += 1;
+ goto done;
+ case 0x00:
+ result += 0;
+ goto done;
+ default:
+ /* Not a valid netmask mask. */
+ return ~0;
+ }
+ }
+done:
+ return result;
+}
+
+typedef union
+{
+ struct
+ {
+ /* 4 bit packet length (in 32bit units) and version VVVVLLLL.
+ e.g. for packets w/ no options ip_version_and_header_length == 0x45. */
+ u8 ip_version_and_header_length;
+
+ /* Type of service. */
+ u8 tos;
+
+ /* Total layer 3 packet length including this header. */
+ u16 length;
+
+ /* Fragmentation ID. */
+ u16 fragment_id;
+
+ /* 3 bits of flags and 13 bits of fragment offset (in units
+ of 8 byte quantities). */
+ u16 flags_and_fragment_offset;
+#define IP4_HEADER_FLAG_MORE_FRAGMENTS (1 << 13)
+#define IP4_HEADER_FLAG_DONT_FRAGMENT (1 << 14)
+#define IP4_HEADER_FLAG_CONGESTION (1 << 15)
+
+ /* Time to live decremented by router at each hop. */
+ u8 ttl;
+
+ /* Next level protocol packet. */
+ u8 protocol;
+
+ /* Checksum. */
+ u16 checksum;
+
+ /* Source and destination address. */
+ union
+ {
+ struct
+ {
+ ip4_address_t src_address, dst_address;
+ };
+ ip4_address_pair_t address_pair;
+ };
+ };
+
+ /* For checksumming we'll want to access IP header in word sized chunks. */
+ /* For 64 bit machines. */
+ /* *INDENT-OFF* */
+ CLIB_PACKED (struct {
+ u64 checksum_data_64[2];
+ u32 checksum_data_64_32[1];
+ });
+ /* *INDENT-ON* */
+
+ /* For 32 bit machines. */
+ /* *INDENT-OFF* */
+ CLIB_PACKED (struct {
+ u32 checksum_data_32[5];
+ });
+ /* *INDENT-ON* */
+} ip4_header_t;
+
+/* Value of ip_version_and_header_length for packets w/o options. */
+#define IP4_VERSION_AND_HEADER_LENGTH_NO_OPTIONS \
+ ((4 << 4) | (sizeof (ip4_header_t) / sizeof (u32)))
+
+always_inline int
+ip4_get_fragment_offset (ip4_header_t * i)
+{
+ return clib_net_to_host_u16 (i->flags_and_fragment_offset) & 0x1fff;
+}
+
+always_inline int
+ip4_get_fragment_more (ip4_header_t * i)
+{
+ return clib_net_to_host_u16 (i->flags_and_fragment_offset) &
+ IP4_HEADER_FLAG_MORE_FRAGMENTS;
+}
+
+always_inline int
+ip4_is_fragment (ip4_header_t * i)
+{
+ return (i->flags_and_fragment_offset &
+ clib_net_to_host_u16 (0x1fff | IP4_HEADER_FLAG_MORE_FRAGMENTS));
+}
+
+always_inline int
+ip4_is_first_fragment (ip4_header_t * i)
+{
+ return (i->flags_and_fragment_offset &
+ clib_net_to_host_u16 (0x1fff | IP4_HEADER_FLAG_MORE_FRAGMENTS)) ==
+ clib_net_to_host_u16 (IP4_HEADER_FLAG_MORE_FRAGMENTS);
+}
+
+/* Fragment offset in bytes. */
+always_inline int
+ip4_get_fragment_offset_bytes (ip4_header_t * i)
+{
+ return 8 * ip4_get_fragment_offset (i);
+}
+
+always_inline int
+ip4_header_bytes (ip4_header_t * i)
+{
+ return sizeof (u32) * (i->ip_version_and_header_length & 0xf);
+}
+
+always_inline void *
+ip4_next_header (ip4_header_t * i)
+{
+ return (void *) i + ip4_header_bytes (i);
+}
+
+always_inline u16
+ip4_header_checksum (ip4_header_t * i)
+{
+ u16 save, csum;
+ ip_csum_t sum;
+
+ save = i->checksum;
+ i->checksum = 0;
+ sum = ip_incremental_checksum (0, i, ip4_header_bytes (i));
+ csum = ~ip_csum_fold (sum);
+
+ i->checksum = save;
+
+ /* Make checksum agree for special case where either
+ 0 or 0xffff would give same 1s complement sum. */
+ if (csum == 0 && save == 0xffff)
+ csum = save;
+
+ return csum;
+}
+
+static inline uword
+ip4_header_checksum_is_valid (ip4_header_t * i)
+{
+ return i->checksum == ip4_header_checksum (i);
+}
+
+#define ip4_partial_header_checksum_x1(ip0,sum0) \
+do { \
+ if (BITS (ip_csum_t) > 32) \
+ { \
+ sum0 = ip0->checksum_data_64[0]; \
+ sum0 = ip_csum_with_carry (sum0, ip0->checksum_data_64[1]); \
+ sum0 = ip_csum_with_carry (sum0, ip0->checksum_data_64_32[0]); \
+ } \
+ else \
+ { \
+ sum0 = ip0->checksum_data_32[0]; \
+ sum0 = ip_csum_with_carry (sum0, ip0->checksum_data_32[1]); \
+ sum0 = ip_csum_with_carry (sum0, ip0->checksum_data_32[2]); \
+ sum0 = ip_csum_with_carry (sum0, ip0->checksum_data_32[3]); \
+ sum0 = ip_csum_with_carry (sum0, ip0->checksum_data_32[4]); \
+ } \
+} while (0)
+
+#define ip4_partial_header_checksum_x2(ip0,ip1,sum0,sum1) \
+do { \
+ if (BITS (ip_csum_t) > 32) \
+ { \
+ sum0 = ip0->checksum_data_64[0]; \
+ sum1 = ip1->checksum_data_64[0]; \
+ sum0 = ip_csum_with_carry (sum0, ip0->checksum_data_64[1]); \
+ sum1 = ip_csum_with_carry (sum1, ip1->checksum_data_64[1]); \
+ sum0 = ip_csum_with_carry (sum0, ip0->checksum_data_64_32[0]); \
+ sum1 = ip_csum_with_carry (sum1, ip1->checksum_data_64_32[0]); \
+ } \
+ else \
+ { \
+ sum0 = ip0->checksum_data_32[0]; \
+ sum1 = ip1->checksum_data_32[0]; \
+ sum0 = ip_csum_with_carry (sum0, ip0->checksum_data_32[1]); \
+ sum1 = ip_csum_with_carry (sum1, ip1->checksum_data_32[1]); \
+ sum0 = ip_csum_with_carry (sum0, ip0->checksum_data_32[2]); \
+ sum1 = ip_csum_with_carry (sum1, ip1->checksum_data_32[2]); \
+ sum0 = ip_csum_with_carry (sum0, ip0->checksum_data_32[3]); \
+ sum1 = ip_csum_with_carry (sum1, ip1->checksum_data_32[3]); \
+ sum0 = ip_csum_with_carry (sum0, ip0->checksum_data_32[4]); \
+ sum1 = ip_csum_with_carry (sum1, ip1->checksum_data_32[4]); \
+ } \
+} while (0)
+
+always_inline uword
+ip4_address_is_multicast (ip4_address_t * a)
+{
+ return (a->data[0] & 0xf0) == 0xe0;
+}
+
+always_inline void
+ip4_multicast_address_set_for_group (ip4_address_t * a,
+ ip_multicast_group_t g)
+{
+ ASSERT ((u32) g < (1 << 28));
+ a->as_u32 = clib_host_to_net_u32 ((0xe << 28) + g);
+}
+
+always_inline void
+ip4_multicast_ethernet_address (u8 * ethernet_address, ip4_address_t * a)
+{
+ u8 *d = a->as_u8;
+
+ ethernet_address[0] = 0x01;
+ ethernet_address[1] = 0x00;
+ ethernet_address[2] = 0x5e;
+ ethernet_address[3] = d[1] & 0x7f;
+ ethernet_address[4] = d[2];
+ ethernet_address[5] = d[3];
+}
+
+always_inline void
+ip4_tcp_reply_x1 (ip4_header_t * ip0, tcp_header_t * tcp0)
+{
+ u32 src0, dst0;
+
+ src0 = ip0->src_address.data_u32;
+ dst0 = ip0->dst_address.data_u32;
+ ip0->src_address.data_u32 = dst0;
+ ip0->dst_address.data_u32 = src0;
+
+ src0 = tcp0->ports.src;
+ dst0 = tcp0->ports.dst;
+ tcp0->ports.src = dst0;
+ tcp0->ports.dst = src0;
+}
+
+always_inline void
+ip4_tcp_reply_x2 (ip4_header_t * ip0, ip4_header_t * ip1,
+ tcp_header_t * tcp0, tcp_header_t * tcp1)
+{
+ u32 src0, dst0, src1, dst1;
+
+ src0 = ip0->src_address.data_u32;
+ src1 = ip1->src_address.data_u32;
+ dst0 = ip0->dst_address.data_u32;
+ dst1 = ip1->dst_address.data_u32;
+ ip0->src_address.data_u32 = dst0;
+ ip1->src_address.data_u32 = dst1;
+ ip0->dst_address.data_u32 = src0;
+ ip1->dst_address.data_u32 = src1;
+
+ src0 = tcp0->ports.src;
+ src1 = tcp1->ports.src;
+ dst0 = tcp0->ports.dst;
+ dst1 = tcp1->ports.dst;
+ tcp0->ports.src = dst0;
+ tcp1->ports.src = dst1;
+ tcp0->ports.dst = src0;
+ tcp1->ports.dst = src1;
+}
+
+#endif /* included_ip4_packet_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/ip/ip4_pg.c b/src/vnet/ip/ip4_pg.c
new file mode 100644
index 00000000000..9697a3b9c89
--- /dev/null
+++ b/src/vnet/ip/ip4_pg.c
@@ -0,0 +1,387 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * ip/ip4_pg: IP v4 packet-generator interface
+ *
+ * Copyright (c) 2008 Eliot Dresselhaus
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <vnet/ip/ip.h>
+#include <vnet/pg/pg.h>
+
+#define IP4_PG_EDIT_CHECKSUM (1 << 0)
+#define IP4_PG_EDIT_LENGTH (1 << 1)
+
+static_always_inline void
+compute_length_and_or_checksum (vlib_main_t * vm,
+ u32 * packets,
+ u32 n_packets,
+ u32 ip_header_offset, u32 flags)
+{
+ ASSERT (flags != 0);
+
+ while (n_packets >= 2)
+ {
+ u32 pi0, pi1;
+ vlib_buffer_t *p0, *p1;
+ ip4_header_t *ip0, *ip1;
+ ip_csum_t sum0, sum1;
+
+ pi0 = packets[0];
+ pi1 = packets[1];
+ p0 = vlib_get_buffer (vm, pi0);
+ p1 = vlib_get_buffer (vm, pi1);
+ n_packets -= 2;
+ packets += 2;
+
+ ip0 = (void *) (p0->data + ip_header_offset);
+ ip1 = (void *) (p1->data + ip_header_offset);
+
+ if (flags & IP4_PG_EDIT_LENGTH)
+ {
+ ip0->length =
+ clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, p0) -
+ ip_header_offset);
+ ip1->length =
+ clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, p1) -
+ ip_header_offset);
+ }
+
+ if (flags & IP4_PG_EDIT_CHECKSUM)
+ {
+ ASSERT (ip4_header_bytes (ip0) == sizeof (ip0[0]));
+ ASSERT (ip4_header_bytes (ip1) == sizeof (ip1[0]));
+
+ ip0->checksum = 0;
+ ip1->checksum = 0;
+
+ ip4_partial_header_checksum_x2 (ip0, ip1, sum0, sum1);
+ ip0->checksum = ~ip_csum_fold (sum0);
+ ip1->checksum = ~ip_csum_fold (sum1);
+
+ ASSERT (ip0->checksum == ip4_header_checksum (ip0));
+ ASSERT (ip1->checksum == ip4_header_checksum (ip1));
+ }
+ }
+
+ while (n_packets >= 1)
+ {
+ u32 pi0;
+ vlib_buffer_t *p0;
+ ip4_header_t *ip0;
+ ip_csum_t sum0;
+
+ pi0 = packets[0];
+ p0 = vlib_get_buffer (vm, pi0);
+ n_packets -= 1;
+ packets += 1;
+
+ ip0 = (void *) (p0->data + ip_header_offset);
+
+ if (flags & IP4_PG_EDIT_LENGTH)
+ ip0->length =
+ clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, p0) -
+ ip_header_offset);
+
+ if (flags & IP4_PG_EDIT_CHECKSUM)
+ {
+ ASSERT (ip4_header_bytes (ip0) == sizeof (ip0[0]));
+
+ ip0->checksum = 0;
+
+ ip4_partial_header_checksum_x1 (ip0, sum0);
+ ip0->checksum = ~ip_csum_fold (sum0);
+
+ ASSERT (ip0->checksum == ip4_header_checksum (ip0));
+ }
+ }
+}
+
+static void
+ip4_pg_edit_function (pg_main_t * pg,
+ pg_stream_t * s,
+ pg_edit_group_t * g, u32 * packets, u32 n_packets)
+{
+ vlib_main_t *vm = vlib_get_main ();
+ u32 ip_offset;
+
+ ip_offset = g->start_byte_offset;
+
+ switch (g->edit_function_opaque)
+ {
+ case IP4_PG_EDIT_LENGTH:
+ compute_length_and_or_checksum (vm, packets, n_packets, ip_offset,
+ IP4_PG_EDIT_LENGTH);
+ break;
+
+ case IP4_PG_EDIT_CHECKSUM:
+ compute_length_and_or_checksum (vm, packets, n_packets, ip_offset,
+ IP4_PG_EDIT_CHECKSUM);
+ break;
+
+ case IP4_PG_EDIT_LENGTH | IP4_PG_EDIT_CHECKSUM:
+ compute_length_and_or_checksum (vm, packets, n_packets, ip_offset,
+ IP4_PG_EDIT_LENGTH
+ | IP4_PG_EDIT_CHECKSUM);
+ break;
+
+ default:
+ ASSERT (0);
+ break;
+ }
+}
+
+typedef struct
+{
+ pg_edit_t ip_version, header_length;
+ pg_edit_t tos;
+ pg_edit_t length;
+
+ pg_edit_t fragment_id, fragment_offset;
+
+ /* Flags together with fragment offset. */
+ pg_edit_t mf_flag, df_flag, ce_flag;
+
+ pg_edit_t ttl;
+
+ pg_edit_t protocol;
+
+ pg_edit_t checksum;
+
+ pg_edit_t src_address, dst_address;
+} pg_ip4_header_t;
+
+static inline void
+pg_ip4_header_init (pg_ip4_header_t * p)
+{
+ /* Initialize fields that are not bit fields in the IP header. */
+#define _(f) pg_edit_init (&p->f, ip4_header_t, f);
+ _(tos);
+ _(length);
+ _(fragment_id);
+ _(ttl);
+ _(protocol);
+ _(checksum);
+ _(src_address);
+ _(dst_address);
+#undef _
+
+ /* Initialize bit fields. */
+ pg_edit_init_bitfield (&p->header_length, ip4_header_t,
+ ip_version_and_header_length, 0, 4);
+ pg_edit_init_bitfield (&p->ip_version, ip4_header_t,
+ ip_version_and_header_length, 4, 4);
+
+ pg_edit_init_bitfield (&p->fragment_offset, ip4_header_t,
+ flags_and_fragment_offset, 0, 13);
+ pg_edit_init_bitfield (&p->mf_flag, ip4_header_t,
+ flags_and_fragment_offset, 13, 1);
+ pg_edit_init_bitfield (&p->df_flag, ip4_header_t,
+ flags_and_fragment_offset, 14, 1);
+ pg_edit_init_bitfield (&p->ce_flag, ip4_header_t,
+ flags_and_fragment_offset, 15, 1);
+}
+
+uword
+unformat_pg_ip4_header (unformat_input_t * input, va_list * args)
+{
+ pg_stream_t *s = va_arg (*args, pg_stream_t *);
+ pg_ip4_header_t *p;
+ u32 group_index;
+
+ p = pg_create_edit_group (s, sizeof (p[0]), sizeof (ip4_header_t),
+ &group_index);
+ pg_ip4_header_init (p);
+
+ /* Defaults. */
+ pg_edit_set_fixed (&p->ip_version, 4);
+ pg_edit_set_fixed (&p->header_length, sizeof (ip4_header_t) / sizeof (u32));
+
+ pg_edit_set_fixed (&p->tos, 0);
+ pg_edit_set_fixed (&p->ttl, 64);
+
+ pg_edit_set_fixed (&p->fragment_id, 0);
+ pg_edit_set_fixed (&p->fragment_offset, 0);
+ pg_edit_set_fixed (&p->mf_flag, 0);
+ pg_edit_set_fixed (&p->df_flag, 0);
+ pg_edit_set_fixed (&p->ce_flag, 0);
+
+ p->length.type = PG_EDIT_UNSPECIFIED;
+ p->checksum.type = PG_EDIT_UNSPECIFIED;
+
+ if (unformat (input, "%U: %U -> %U",
+ unformat_pg_edit,
+ unformat_ip_protocol, &p->protocol,
+ unformat_pg_edit,
+ unformat_ip4_address, &p->src_address,
+ unformat_pg_edit, unformat_ip4_address, &p->dst_address))
+ goto found;
+
+ if (!unformat (input, "%U:",
+ unformat_pg_edit, unformat_ip_protocol, &p->protocol))
+ goto error;
+
+found:
+ /* Parse options. */
+ while (1)
+ {
+ if (unformat (input, "version %U",
+ unformat_pg_edit, unformat_pg_number, &p->ip_version))
+ ;
+
+ else if (unformat (input, "header-length %U",
+ unformat_pg_edit,
+ unformat_pg_number, &p->header_length))
+ ;
+
+ else if (unformat (input, "tos %U",
+ unformat_pg_edit, unformat_pg_number, &p->tos))
+ ;
+
+ else if (unformat (input, "length %U",
+ unformat_pg_edit, unformat_pg_number, &p->length))
+ ;
+
+ else if (unformat (input, "checksum %U",
+ unformat_pg_edit, unformat_pg_number, &p->checksum))
+ ;
+
+ else if (unformat (input, "ttl %U",
+ unformat_pg_edit, unformat_pg_number, &p->ttl))
+ ;
+
+ else if (unformat (input, "fragment id %U offset %U",
+ unformat_pg_edit,
+ unformat_pg_number, &p->fragment_id,
+ unformat_pg_edit,
+ unformat_pg_number, &p->fragment_offset))
+ {
+ int i;
+ for (i = 0; i < ARRAY_LEN (p->fragment_offset.values); i++)
+ pg_edit_set_value (&p->fragment_offset, i,
+ pg_edit_get_value (&p->fragment_offset,
+ i) / 8);
+
+ }
+
+ /* Flags. */
+ else if (unformat (input, "mf") || unformat (input, "MF"))
+ pg_edit_set_fixed (&p->mf_flag, 1);
+
+ else if (unformat (input, "df") || unformat (input, "DF"))
+ pg_edit_set_fixed (&p->df_flag, 1);
+
+ else if (unformat (input, "ce") || unformat (input, "CE"))
+ pg_edit_set_fixed (&p->ce_flag, 1);
+
+ /* Can't parse input: try next protocol level. */
+ else
+ break;
+ }
+
+ {
+ ip_main_t *im = &ip_main;
+ ip_protocol_t protocol;
+ ip_protocol_info_t *pi;
+
+ pi = 0;
+ if (p->protocol.type == PG_EDIT_FIXED)
+ {
+ protocol = pg_edit_get_value (&p->protocol, PG_EDIT_LO);
+ pi = ip_get_protocol_info (im, protocol);
+ }
+
+ if (pi && pi->unformat_pg_edit
+ && unformat_user (input, pi->unformat_pg_edit, s))
+ ;
+
+ else if (!unformat_user (input, unformat_pg_payload, s))
+ goto error;
+
+ if (p->length.type == PG_EDIT_UNSPECIFIED
+ && s->min_packet_bytes == s->max_packet_bytes
+ && group_index + 1 < vec_len (s->edit_groups))
+ {
+ pg_edit_set_fixed (&p->length,
+ pg_edit_group_n_bytes (s, group_index));
+ }
+
+ /* Compute IP header checksum if all edits are fixed. */
+ if (p->checksum.type == PG_EDIT_UNSPECIFIED)
+ {
+ ip4_header_t fixed_header, fixed_mask, cmp_mask;
+
+ /* See if header is all fixed and specified except for
+ checksum field. */
+ memset (&cmp_mask, ~0, sizeof (cmp_mask));
+ cmp_mask.checksum = 0;
+
+ pg_edit_group_get_fixed_packet_data (s, group_index,
+ &fixed_header, &fixed_mask);
+ if (!memcmp (&fixed_mask, &cmp_mask, sizeof (cmp_mask)))
+ pg_edit_set_fixed (&p->checksum,
+ clib_net_to_host_u16 (ip4_header_checksum
+ (&fixed_header)));
+ }
+
+ p = pg_get_edit_group (s, group_index);
+ if (p->length.type == PG_EDIT_UNSPECIFIED
+ || p->checksum.type == PG_EDIT_UNSPECIFIED)
+ {
+ pg_edit_group_t *g = pg_stream_get_group (s, group_index);
+ g->edit_function = ip4_pg_edit_function;
+ g->edit_function_opaque = 0;
+ if (p->length.type == PG_EDIT_UNSPECIFIED)
+ g->edit_function_opaque |= IP4_PG_EDIT_LENGTH;
+ if (p->checksum.type == PG_EDIT_UNSPECIFIED)
+ g->edit_function_opaque |= IP4_PG_EDIT_CHECKSUM;
+ }
+
+ return 1;
+ }
+
+error:
+ /* Free up any edits we may have added. */
+ pg_free_edit_group (s);
+ return 0;
+}
+
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/ip/ip4_source_and_port_range_check.c b/src/vnet/ip/ip4_source_and_port_range_check.c
new file mode 100644
index 00000000000..ae836a113a5
--- /dev/null
+++ b/src/vnet/ip/ip4_source_and_port_range_check.c
@@ -0,0 +1,1415 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <vnet/ip/ip.h>
+#include <vnet/ip/ip_source_and_port_range_check.h>
+#include <vnet/dpo/load_balance.h>
+#include <vnet/fib/fib_table.h>
+#include <vnet/fib/ip4_fib.h>
+
+/**
+ * @file
+ * @brief IPv4 Source and Port Range Checking.
+ *
+ * This file contains the source code for IPv4 source and port range
+ * checking.
+ */
+
+
+/**
+ * @brief The pool of range chack DPOs
+ */
+static protocol_port_range_dpo_t *ppr_dpo_pool;
+
+/**
+ * @brief Dynamically registered DPO type
+ */
+static dpo_type_t ppr_dpo_type;
+
+vlib_node_registration_t ip4_source_port_and_range_check_rx;
+vlib_node_registration_t ip4_source_port_and_range_check_tx;
+
+#define foreach_ip4_source_and_port_range_check_error \
+ _(CHECK_FAIL, "ip4 source and port range check bad packets") \
+ _(CHECK_OK, "ip4 source and port range check good packets")
+
+typedef enum
+{
+#define _(sym,str) IP4_SOURCE_AND_PORT_RANGE_CHECK_ERROR_##sym,
+ foreach_ip4_source_and_port_range_check_error
+#undef _
+ IP4_SOURCE_AND_PORT_RANGE_CHECK_N_ERROR,
+} ip4_source_and_port_range_check_error_t;
+
+static char *ip4_source_and_port_range_check_error_strings[] = {
+#define _(sym,string) string,
+ foreach_ip4_source_and_port_range_check_error
+#undef _
+};
+
+typedef struct
+{
+ u32 pass;
+ u32 bypass;
+ u32 is_tcp;
+ ip4_address_t src_addr;
+ u16 port;
+ u32 fib_index;
+} ip4_source_and_port_range_check_trace_t;
+
+static u8 *
+format_ip4_source_and_port_range_check_trace (u8 * s, va_list * va)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*va, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*va, vlib_node_t *);
+ ip4_source_and_port_range_check_trace_t *t =
+ va_arg (*va, ip4_source_and_port_range_check_trace_t *);
+
+ if (t->bypass)
+ s = format (s, "PASS (bypass case)");
+ else
+ s = format (s, "fib %d src ip %U %s dst port %d: %s",
+ t->fib_index, format_ip4_address, &t->src_addr,
+ t->is_tcp ? "TCP" : "UDP", (u32) t->port,
+ (t->pass == 1) ? "PASS" : "FAIL");
+ return s;
+}
+
+typedef enum
+{
+ IP4_SOURCE_AND_PORT_RANGE_CHECK_NEXT_DROP,
+ IP4_SOURCE_AND_PORT_RANGE_CHECK_N_NEXT,
+} ip4_source_and_port_range_check_next_t;
+
+
+static inline u32
+check_adj_port_range_x1 (const protocol_port_range_dpo_t * ppr_dpo,
+ u16 dst_port, u32 next)
+{
+ u16x8vec_t key;
+ u16x8vec_t diff1;
+ u16x8vec_t diff2;
+ u16x8vec_t sum, sum_equal_diff2;
+ u16 sum_nonzero, sum_equal, winner_mask;
+ int i;
+
+ if (NULL == ppr_dpo || dst_port == 0)
+ return IP4_SOURCE_AND_PORT_RANGE_CHECK_NEXT_DROP;
+
+ /* Make the obvious screw-case work. A variant also works w/ no MMX */
+ if (PREDICT_FALSE (dst_port == 65535))
+ {
+ int j;
+
+ for (i = 0;
+ i < VLIB_BUFFER_PRE_DATA_SIZE / sizeof (protocol_port_range_t);
+ i++)
+ {
+ for (j = 0; j < 8; j++)
+ if (ppr_dpo->blocks[i].low.as_u16[j] == 65535)
+ return next;
+ }
+ return IP4_SOURCE_AND_PORT_RANGE_CHECK_NEXT_DROP;
+ }
+
+ key.as_u16x8 = u16x8_splat (dst_port);
+
+ for (i = 0; i < ppr_dpo->n_used_blocks; i++)
+ {
+ diff1.as_u16x8 =
+ u16x8_sub_saturate (ppr_dpo->blocks[i].low.as_u16x8, key.as_u16x8);
+ diff2.as_u16x8 =
+ u16x8_sub_saturate (ppr_dpo->blocks[i].hi.as_u16x8, key.as_u16x8);
+ sum.as_u16x8 = u16x8_add (diff1.as_u16x8, diff2.as_u16x8);
+ sum_equal_diff2.as_u16x8 =
+ u16x8_is_equal (sum.as_u16x8, diff2.as_u16x8);
+ sum_nonzero = ~u16x8_zero_byte_mask (sum.as_u16x8);
+ sum_equal = ~u16x8_zero_byte_mask (sum_equal_diff2.as_u16x8);
+ winner_mask = sum_nonzero & sum_equal;
+ if (winner_mask)
+ return next;
+ }
+ return IP4_SOURCE_AND_PORT_RANGE_CHECK_NEXT_DROP;
+}
+
+always_inline protocol_port_range_dpo_t *
+protocol_port_range_dpo_get (index_t index)
+{
+ return (pool_elt_at_index (ppr_dpo_pool, index));
+}
+
+always_inline uword
+ip4_source_and_port_range_check_inline (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame, int is_tx)
+{
+ ip4_main_t *im = &ip4_main;
+ u32 n_left_from, *from, *to_next;
+ u32 next_index;
+ vlib_node_runtime_t *error_node = node;
+ u32 good_packets = 0;
+ int i;
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+
+ /* while (n_left_from >= 4 && n_left_to_next >= 2) */
+ /* { */
+ /* vlib_buffer_t *b0, *b1; */
+ /* ip4_header_t *ip0, *ip1; */
+ /* ip4_fib_mtrie_t *mtrie0, *mtrie1; */
+ /* ip4_fib_mtrie_leaf_t leaf0, leaf1; */
+ /* ip_source_and_port_range_check_config_t *c0, *c1; */
+ /* ip_adjacency_t *adj0 = 0, *adj1 = 0; */
+ /* u32 bi0, next0, adj_index0, pass0, save_next0, fib_index0; */
+ /* u32 bi1, next1, adj_index1, pass1, save_next1, fib_index1; */
+ /* udp_header_t *udp0, *udp1; */
+
+ /* /\* Prefetch next iteration. *\/ */
+ /* { */
+ /* vlib_buffer_t *p2, *p3; */
+
+ /* p2 = vlib_get_buffer (vm, from[2]); */
+ /* p3 = vlib_get_buffer (vm, from[3]); */
+
+ /* vlib_prefetch_buffer_header (p2, LOAD); */
+ /* vlib_prefetch_buffer_header (p3, LOAD); */
+
+ /* CLIB_PREFETCH (p2->data, sizeof (ip0[0]), LOAD); */
+ /* CLIB_PREFETCH (p3->data, sizeof (ip1[0]), LOAD); */
+ /* } */
+
+ /* bi0 = to_next[0] = from[0]; */
+ /* bi1 = to_next[1] = from[1]; */
+ /* from += 2; */
+ /* to_next += 2; */
+ /* n_left_from -= 2; */
+ /* n_left_to_next -= 2; */
+
+ /* b0 = vlib_get_buffer (vm, bi0); */
+ /* b1 = vlib_get_buffer (vm, bi1); */
+
+ /* fib_index0 = */
+ /* vec_elt (im->fib_index_by_sw_if_index, */
+ /* vnet_buffer (b0)->sw_if_index[VLIB_RX]); */
+ /* fib_index1 = */
+ /* vec_elt (im->fib_index_by_sw_if_index, */
+ /* vnet_buffer (b1)->sw_if_index[VLIB_RX]); */
+
+ /* ip0 = vlib_buffer_get_current (b0); */
+ /* ip1 = vlib_buffer_get_current (b1); */
+
+ /* if (is_tx) */
+ /* { */
+ /* c0 = vnet_get_config_data (&tx_cm->config_main, */
+ /* &b0->current_config_index, */
+ /* &next0, sizeof (c0[0])); */
+ /* c1 = vnet_get_config_data (&tx_cm->config_main, */
+ /* &b1->current_config_index, */
+ /* &next1, sizeof (c1[0])); */
+ /* } */
+ /* else */
+ /* { */
+ /* c0 = vnet_get_config_data (&rx_cm->config_main, */
+ /* &b0->current_config_index, */
+ /* &next0, sizeof (c0[0])); */
+ /* c1 = vnet_get_config_data (&rx_cm->config_main, */
+ /* &b1->current_config_index, */
+ /* &next1, sizeof (c1[0])); */
+ /* } */
+
+ /* /\* we can't use the default VRF here... *\/ */
+ /* for (i = 0; i < IP_SOURCE_AND_PORT_RANGE_CHECK_N_PROTOCOLS; i++) */
+ /* { */
+ /* ASSERT (c0->fib_index[i] && c1->fib_index[i]); */
+ /* } */
+
+
+ /* if (is_tx) */
+ /* { */
+ /* if (ip0->protocol == IP_PROTOCOL_UDP) */
+ /* fib_index0 = */
+ /* c0->fib_index */
+ /* [IP_SOURCE_AND_PORT_RANGE_CHECK_PROTOCOL_UDP_IN]; */
+ /* if (ip0->protocol == IP_PROTOCOL_TCP) */
+ /* fib_index0 = */
+ /* c0->fib_index */
+ /* [IP_SOURCE_AND_PORT_RANGE_CHECK_PROTOCOL_TCP_IN]; */
+ /* } */
+ /* else */
+ /* { */
+ /* if (ip0->protocol == IP_PROTOCOL_UDP) */
+ /* fib_index0 = */
+ /* c0->fib_index */
+ /* [IP_SOURCE_AND_PORT_RANGE_CHECK_PROTOCOL_UDP_OUT]; */
+ /* if (ip0->protocol == IP_PROTOCOL_TCP) */
+ /* fib_index0 = */
+ /* c0->fib_index */
+ /* [IP_SOURCE_AND_PORT_RANGE_CHECK_PROTOCOL_TCP_OUT]; */
+ /* } */
+
+ /* if (PREDICT_TRUE (fib_index0 != ~0)) */
+ /* { */
+
+ /* mtrie0 = &vec_elt_at_index (im->fibs, fib_index0)->mtrie; */
+
+ /* leaf0 = IP4_FIB_MTRIE_LEAF_ROOT; */
+
+ /* leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, */
+ /* &ip0->src_address, 0); */
+
+ /* leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, */
+ /* &ip0->src_address, 1); */
+
+ /* leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, */
+ /* &ip0->src_address, 2); */
+
+ /* leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, */
+ /* &ip0->src_address, 3); */
+
+ /* adj_index0 = ip4_fib_mtrie_leaf_get_adj_index (leaf0); */
+
+ /* ASSERT (adj_index0 == ip4_fib_lookup_with_table (im, fib_index0, */
+ /* &ip0->src_address, */
+ /* 0 */
+ /* /\* use dflt rt *\/ */
+ /* )); */
+ /* adj0 = ip_get_adjacency (lm, adj_index0); */
+ /* } */
+
+ /* if (is_tx) */
+ /* { */
+ /* if (ip1->protocol == IP_PROTOCOL_UDP) */
+ /* fib_index1 = */
+ /* c1->fib_index */
+ /* [IP_SOURCE_AND_PORT_RANGE_CHECK_PROTOCOL_UDP_IN]; */
+ /* if (ip1->protocol == IP_PROTOCOL_TCP) */
+ /* fib_index1 = */
+ /* c1->fib_index */
+ /* [IP_SOURCE_AND_PORT_RANGE_CHECK_PROTOCOL_TCP_IN]; */
+ /* } */
+ /* else */
+ /* { */
+ /* if (ip1->protocol == IP_PROTOCOL_UDP) */
+ /* fib_index1 = */
+ /* c1->fib_index */
+ /* [IP_SOURCE_AND_PORT_RANGE_CHECK_PROTOCOL_UDP_OUT]; */
+ /* if (ip1->protocol == IP_PROTOCOL_TCP) */
+ /* fib_index1 = */
+ /* c1->fib_index */
+ /* [IP_SOURCE_AND_PORT_RANGE_CHECK_PROTOCOL_TCP_OUT]; */
+ /* } */
+
+ /* if (PREDICT_TRUE (fib_index1 != ~0)) */
+ /* { */
+
+ /* mtrie1 = &vec_elt_at_index (im->fibs, fib_index1)->mtrie; */
+
+ /* leaf1 = IP4_FIB_MTRIE_LEAF_ROOT; */
+
+ /* leaf1 = ip4_fib_mtrie_lookup_step (mtrie1, leaf1, */
+ /* &ip1->src_address, 0); */
+
+ /* leaf1 = ip4_fib_mtrie_lookup_step (mtrie1, leaf1, */
+ /* &ip1->src_address, 1); */
+
+ /* leaf1 = ip4_fib_mtrie_lookup_step (mtrie1, leaf1, */
+ /* &ip1->src_address, 2); */
+
+ /* leaf1 = ip4_fib_mtrie_lookup_step (mtrie1, leaf1, */
+ /* &ip1->src_address, 3); */
+
+ /* adj_index1 = ip4_fib_mtrie_leaf_get_adj_index (leaf1); */
+
+ /* ASSERT (adj_index1 == ip4_fib_lookup_with_table (im, fib_index1, */
+ /* &ip1->src_address, */
+ /* 0)); */
+ /* adj1 = ip_get_adjacency (lm, adj_index1); */
+ /* } */
+
+ /* pass0 = 0; */
+ /* pass0 |= adj0 == 0; */
+ /* pass0 |= ip4_address_is_multicast (&ip0->src_address); */
+ /* pass0 |= */
+ /* ip0->src_address.as_u32 == clib_host_to_net_u32 (0xFFFFFFFF); */
+ /* pass0 |= (ip0->protocol != IP_PROTOCOL_UDP) */
+ /* && (ip0->protocol != IP_PROTOCOL_TCP); */
+
+ /* pass1 = 0; */
+ /* pass1 |= adj1 == 0; */
+ /* pass1 |= ip4_address_is_multicast (&ip1->src_address); */
+ /* pass1 |= */
+ /* ip1->src_address.as_u32 == clib_host_to_net_u32 (0xFFFFFFFF); */
+ /* pass1 |= (ip1->protocol != IP_PROTOCOL_UDP) */
+ /* && (ip1->protocol != IP_PROTOCOL_TCP); */
+
+ /* save_next0 = next0; */
+ /* udp0 = ip4_next_header (ip0); */
+ /* save_next1 = next1; */
+ /* udp1 = ip4_next_header (ip1); */
+
+ /* if (PREDICT_TRUE (pass0 == 0)) */
+ /* { */
+ /* good_packets++; */
+ /* next0 = check_adj_port_range_x1 */
+ /* (adj0, clib_net_to_host_u16 (udp0->dst_port), next0); */
+ /* good_packets -= (save_next0 != next0); */
+ /* b0->error = error_node->errors */
+ /* [IP4_SOURCE_AND_PORT_RANGE_CHECK_ERROR_CHECK_FAIL]; */
+ /* } */
+
+ /* if (PREDICT_TRUE (pass1 == 0)) */
+ /* { */
+ /* good_packets++; */
+ /* next1 = check_adj_port_range_x1 */
+ /* (adj1, clib_net_to_host_u16 (udp1->dst_port), next1); */
+ /* good_packets -= (save_next1 != next1); */
+ /* b1->error = error_node->errors */
+ /* [IP4_SOURCE_AND_PORT_RANGE_CHECK_ERROR_CHECK_FAIL]; */
+ /* } */
+
+ /* if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) */
+ /* && (b0->flags & VLIB_BUFFER_IS_TRACED))) */
+ /* { */
+ /* ip4_source_and_port_range_check_trace_t *t = */
+ /* vlib_add_trace (vm, node, b0, sizeof (*t)); */
+ /* t->pass = next0 == save_next0; */
+ /* t->bypass = pass0; */
+ /* t->fib_index = fib_index0; */
+ /* t->src_addr.as_u32 = ip0->src_address.as_u32; */
+ /* t->port = (pass0 == 0) ? */
+ /* clib_net_to_host_u16 (udp0->dst_port) : 0; */
+ /* t->is_tcp = ip0->protocol == IP_PROTOCOL_TCP; */
+ /* } */
+
+ /* if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) */
+ /* && (b1->flags & VLIB_BUFFER_IS_TRACED))) */
+ /* { */
+ /* ip4_source_and_port_range_check_trace_t *t = */
+ /* vlib_add_trace (vm, node, b1, sizeof (*t)); */
+ /* t->pass = next1 == save_next1; */
+ /* t->bypass = pass1; */
+ /* t->fib_index = fib_index1; */
+ /* t->src_addr.as_u32 = ip1->src_address.as_u32; */
+ /* t->port = (pass1 == 0) ? */
+ /* clib_net_to_host_u16 (udp1->dst_port) : 0; */
+ /* t->is_tcp = ip1->protocol == IP_PROTOCOL_TCP; */
+ /* } */
+
+ /* vlib_validate_buffer_enqueue_x2 (vm, node, next_index, */
+ /* to_next, n_left_to_next, */
+ /* bi0, bi1, next0, next1); */
+ /* } */
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ vlib_buffer_t *b0;
+ ip4_header_t *ip0;
+ ip_source_and_port_range_check_config_t *c0;
+ u32 bi0, next0, lb_index0, pass0, save_next0, fib_index0;
+ udp_header_t *udp0;
+ const protocol_port_range_dpo_t *ppr_dpo0 = NULL;
+ const dpo_id_t *dpo;
+ u32 sw_if_index0;
+
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+
+ fib_index0 = vec_elt (im->fib_index_by_sw_if_index, sw_if_index0);
+
+ if (is_tx)
+ vlib_buffer_advance (b0, sizeof (ethernet_header_t));
+
+ ip0 = vlib_buffer_get_current (b0);
+
+ c0 = vnet_feature_next_with_data (sw_if_index0, &next0,
+ b0, sizeof (c0[0]));
+
+ /* we can't use the default VRF here... */
+ for (i = 0; i < IP_SOURCE_AND_PORT_RANGE_CHECK_N_PROTOCOLS; i++)
+ {
+ ASSERT (c0->fib_index[i]);
+ }
+
+
+ if (is_tx)
+ {
+ if (ip0->protocol == IP_PROTOCOL_UDP)
+ fib_index0 =
+ c0->fib_index
+ [IP_SOURCE_AND_PORT_RANGE_CHECK_PROTOCOL_UDP_IN];
+ if (ip0->protocol == IP_PROTOCOL_TCP)
+ fib_index0 =
+ c0->fib_index
+ [IP_SOURCE_AND_PORT_RANGE_CHECK_PROTOCOL_TCP_IN];
+ }
+ else
+ {
+ if (ip0->protocol == IP_PROTOCOL_UDP)
+ fib_index0 =
+ c0->fib_index
+ [IP_SOURCE_AND_PORT_RANGE_CHECK_PROTOCOL_UDP_OUT];
+ if (ip0->protocol == IP_PROTOCOL_TCP)
+ fib_index0 =
+ c0->fib_index
+ [IP_SOURCE_AND_PORT_RANGE_CHECK_PROTOCOL_TCP_OUT];
+ }
+
+ if (fib_index0 != ~0)
+ {
+ lb_index0 = ip4_fib_forwarding_lookup (fib_index0,
+ &ip0->src_address);
+
+ dpo =
+ load_balance_get_bucket_i (load_balance_get (lb_index0), 0);
+
+ if (ppr_dpo_type == dpo->dpoi_type)
+ {
+ ppr_dpo0 = protocol_port_range_dpo_get (dpo->dpoi_index);
+ }
+ /*
+ * else the lookup hit an enty that was no inserted
+ * by this range checker, which is the default route
+ */
+ }
+ /*
+ * $$$ which (src,dst) categories should we always pass?
+ */
+ pass0 = 0;
+ pass0 |= ip4_address_is_multicast (&ip0->src_address);
+ pass0 |=
+ ip0->src_address.as_u32 == clib_host_to_net_u32 (0xFFFFFFFF);
+ pass0 |= (ip0->protocol != IP_PROTOCOL_UDP)
+ && (ip0->protocol != IP_PROTOCOL_TCP);
+
+ save_next0 = next0;
+ udp0 = ip4_next_header (ip0);
+
+ if (PREDICT_TRUE (pass0 == 0))
+ {
+ good_packets++;
+ next0 = check_adj_port_range_x1
+ (ppr_dpo0, clib_net_to_host_u16 (udp0->dst_port), next0);
+ good_packets -= (save_next0 != next0);
+ b0->error = error_node->errors
+ [IP4_SOURCE_AND_PORT_RANGE_CHECK_ERROR_CHECK_FAIL];
+ }
+
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)
+ && (b0->flags & VLIB_BUFFER_IS_TRACED)))
+ {
+ ip4_source_and_port_range_check_trace_t *t =
+ vlib_add_trace (vm, node, b0, sizeof (*t));
+ t->pass = next0 == save_next0;
+ t->bypass = pass0;
+ t->fib_index = fib_index0;
+ t->src_addr.as_u32 = ip0->src_address.as_u32;
+ t->port = (pass0 == 0) ?
+ clib_net_to_host_u16 (udp0->dst_port) : 0;
+ t->is_tcp = ip0->protocol == IP_PROTOCOL_TCP;
+ }
+
+ if (is_tx)
+ vlib_buffer_advance (b0, -sizeof (ethernet_header_t));
+
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, next0);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ if (is_tx)
+ vlib_node_increment_counter (vm, ip4_source_port_and_range_check_tx.index,
+ IP4_SOURCE_AND_PORT_RANGE_CHECK_ERROR_CHECK_OK,
+ good_packets);
+ else
+ vlib_node_increment_counter (vm, ip4_source_port_and_range_check_rx.index,
+ IP4_SOURCE_AND_PORT_RANGE_CHECK_ERROR_CHECK_OK,
+ good_packets);
+ return frame->n_vectors;
+}
+
+static uword
+ip4_source_and_port_range_check_rx (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ return ip4_source_and_port_range_check_inline (vm, node, frame,
+ 0 /* !is_tx */ );
+}
+
+static uword
+ip4_source_and_port_range_check_tx (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ return ip4_source_and_port_range_check_inline (vm, node, frame,
+ 1 /* is_tx */ );
+}
+
+/* Note: Calling same function for both RX and TX nodes
+ as always checking dst_port, although
+ if this changes can easily make new function
+*/
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (ip4_source_port_and_range_check_rx) = {
+ .function = ip4_source_and_port_range_check_rx,
+ .name = "ip4-source-and-port-range-check-rx",
+ .vector_size = sizeof (u32),
+
+ .n_errors = ARRAY_LEN(ip4_source_and_port_range_check_error_strings),
+ .error_strings = ip4_source_and_port_range_check_error_strings,
+
+ .n_next_nodes = IP4_SOURCE_AND_PORT_RANGE_CHECK_N_NEXT,
+ .next_nodes = {
+ [IP4_SOURCE_AND_PORT_RANGE_CHECK_NEXT_DROP] = "error-drop",
+ },
+
+ .format_buffer = format_ip4_header,
+ .format_trace = format_ip4_source_and_port_range_check_trace,
+};
+/* *INDENT-ON* */
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (ip4_source_port_and_range_check_tx) = {
+ .function = ip4_source_and_port_range_check_tx,
+ .name = "ip4-source-and-port-range-check-tx",
+ .vector_size = sizeof (u32),
+
+ .n_errors = ARRAY_LEN(ip4_source_and_port_range_check_error_strings),
+ .error_strings = ip4_source_and_port_range_check_error_strings,
+
+ .n_next_nodes = IP4_SOURCE_AND_PORT_RANGE_CHECK_N_NEXT,
+ .next_nodes = {
+ [IP4_SOURCE_AND_PORT_RANGE_CHECK_NEXT_DROP] = "error-drop",
+ },
+
+ .format_buffer = format_ip4_header,
+ .format_trace = format_ip4_source_and_port_range_check_trace,
+};
+/* *INDENT-ON* */
+
+int
+set_ip_source_and_port_range_check (vlib_main_t * vm,
+ u32 * fib_index,
+ u32 sw_if_index, u32 is_add)
+{
+ ip_source_and_port_range_check_config_t config;
+ int rv = 0;
+ int i;
+
+ for (i = 0; i < IP_SOURCE_AND_PORT_RANGE_CHECK_N_PROTOCOLS; i++)
+ {
+ config.fib_index[i] = fib_index[i];
+ }
+
+ /* For OUT we are in the RX path */
+ if ((fib_index[IP_SOURCE_AND_PORT_RANGE_CHECK_PROTOCOL_TCP_OUT] != ~0) ||
+ (fib_index[IP_SOURCE_AND_PORT_RANGE_CHECK_PROTOCOL_UDP_OUT] != ~0))
+ {
+ vnet_feature_enable_disable ("ip4-unicast",
+ "ip4-source-and-port-range-check-rx",
+ sw_if_index, is_add, &config,
+ sizeof (config));
+ }
+
+ /* For IN we are in the TX path */
+ if ((fib_index[IP_SOURCE_AND_PORT_RANGE_CHECK_PROTOCOL_TCP_IN] != ~0) ||
+ (fib_index[IP_SOURCE_AND_PORT_RANGE_CHECK_PROTOCOL_UDP_IN] != ~0))
+ {
+ vnet_feature_enable_disable ("ip4-output",
+ "ip4-source-and-port-range-check-tx",
+ sw_if_index, is_add, &config,
+ sizeof (config));
+ }
+ return rv;
+}
+
+static clib_error_t *
+set_ip_source_and_port_range_check_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ vnet_main_t *vnm = vnet_get_main ();
+ ip4_main_t *im = &ip4_main;
+ clib_error_t *error = 0;
+ u8 is_add = 1;
+ u32 sw_if_index = ~0;
+ u32 vrf_id[IP_SOURCE_AND_PORT_RANGE_CHECK_N_PROTOCOLS];
+ u32 fib_index[IP_SOURCE_AND_PORT_RANGE_CHECK_N_PROTOCOLS];
+ int vrf_set = 0;
+ uword *p;
+ int rv = 0;
+ int i;
+
+ sw_if_index = ~0;
+ for (i = 0; i < IP_SOURCE_AND_PORT_RANGE_CHECK_N_PROTOCOLS; i++)
+ {
+ fib_index[i] = ~0;
+ vrf_id[i] = ~0;
+ }
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "%U", unformat_vnet_sw_interface, vnm,
+ &sw_if_index))
+ ;
+ else
+ if (unformat
+ (input, "tcp-out-vrf %d",
+ &vrf_id[IP_SOURCE_AND_PORT_RANGE_CHECK_PROTOCOL_TCP_OUT]))
+ vrf_set = 1;
+ else
+ if (unformat
+ (input, "udp-out-vrf %d",
+ &vrf_id[IP_SOURCE_AND_PORT_RANGE_CHECK_PROTOCOL_UDP_OUT]))
+ vrf_set = 1;
+ else
+ if (unformat
+ (input, "tcp-in-vrf %d",
+ &vrf_id[IP_SOURCE_AND_PORT_RANGE_CHECK_PROTOCOL_TCP_IN]))
+ vrf_set = 1;
+ else
+ if (unformat
+ (input, "udp-in-vrf %d",
+ &vrf_id[IP_SOURCE_AND_PORT_RANGE_CHECK_PROTOCOL_UDP_IN]))
+ vrf_set = 1;
+ else if (unformat (input, "del"))
+ is_add = 0;
+ else
+ break;
+ }
+
+ if (sw_if_index == ~0)
+ return clib_error_return (0, "Interface required but not specified");
+
+ if (!vrf_set)
+ return clib_error_return (0,
+ "TCP or UDP VRF ID required but not specified");
+
+ for (i = 0; i < IP_SOURCE_AND_PORT_RANGE_CHECK_N_PROTOCOLS; i++)
+ {
+
+ if (vrf_id[i] == 0)
+ return clib_error_return (0,
+ "TCP, UDP VRF ID should not be 0 (default). Should be distinct VRF for this purpose. ");
+
+ if (vrf_id[i] != ~0)
+ {
+ p = hash_get (im->fib_index_by_table_id, vrf_id[i]);
+
+ if (p == 0)
+ return clib_error_return (0, "Invalid VRF ID %d", vrf_id[i]);
+
+ fib_index[i] = p[0];
+ }
+ }
+ rv =
+ set_ip_source_and_port_range_check (vm, fib_index, sw_if_index, is_add);
+
+ switch (rv)
+ {
+ case 0:
+ break;
+
+ default:
+ return clib_error_return
+ (0,
+ "set source and port-range on interface returned an unexpected value: %d",
+ rv);
+ }
+ return error;
+}
+
+/*?
+ * Add the 'ip4-source-and-port-range-check-rx' or
+ * 'ip4-source-and-port-range-check-tx' graph node for a given
+ * interface. 'tcp-out-vrf' and 'udp-out-vrf' will add to
+ * the RX path. 'tcp-in-vrf' and 'udp-in-vrf' will add to
+ * the TX path. A graph node will be inserted into the chain when
+ * the range check is added to the first interface. It will not
+ * be removed from when range check is removed from the last
+ * interface.
+ *
+ * By adding the range check graph node to the interface, incoming
+ * or outgoing TCP/UDP packets will be validated using the
+ * provided IPv4 FIB table (VRF).
+ *
+ * @note 'ip4-source-and-port-range-check-rx' and
+ * 'ip4-source-and-port-range-check-tx' strings are too long, so
+ * they are truncated on the 'show vlib graph' output.
+ *
+ * @todo This content needs to be validated and potentially more detail added.
+ *
+ * @cliexpar
+ * @parblock
+ * Example of graph node before range checking is enabled:
+ * @cliexstart{show vlib graph ip4-source-and-port-range-check-tx}
+ * Name Next Previous
+ * ip4-source-and-port-range- error-drop [0]
+ * @cliexend
+ *
+ * Example of how to enable range checking on TX:
+ * @cliexcmd{set interface ip source-and-port-range-check GigabitEthernet2/0/0 udp-in-vrf 7}
+ *
+ * Example of graph node after range checking is enabled:
+ * @cliexstart{show vlib graph ip4-source-and-port-range-check-tx}
+ * Name Next Previous
+ * ip4-source-and-port-range- error-drop [0] ip4-rewrite
+ * interface-output [1]
+ * @cliexend
+ *
+ * Example of how to display the features enabed on an interface:
+ * @cliexstart{show ip interface features GigabitEthernet2/0/0}
+ * IP feature paths configured on GigabitEthernet2/0/0...
+ *
+ * ipv4 unicast:
+ * ip4-source-and-port-range-check-rx
+ * ip4-lookup
+ *
+ * ipv4 multicast:
+ * ip4-lookup-multicast
+ *
+ * ipv4 multicast:
+ * interface-output
+ *
+ * ipv6 unicast:
+ * ip6-lookup
+ *
+ * ipv6 multicast:
+ * ip6-lookup
+ *
+ * ipv6 multicast:
+ * interface-output
+ * @cliexend
+ * @endparblock
+?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (set_interface_ip_source_and_port_range_check_command, static) = {
+ .path = "set interface ip source-and-port-range-check",
+ .function = set_ip_source_and_port_range_check_fn,
+ .short_help = "set interface ip source-and-port-range-check <interface> [tcp-out-vrf <table-id>] [udp-out-vrf <table-id>] [tcp-in-vrf <table-id>] [udp-in-vrf <table-id>] [del]",
+};
+/* *INDENT-ON* */
+
+static u8 *
+format_ppr_dpo (u8 * s, va_list * args)
+{
+ index_t index = va_arg (*args, index_t);
+ CLIB_UNUSED (u32 indent) = va_arg (*args, u32);
+
+ protocol_port_range_dpo_t *ppr_dpo;
+ int i, j;
+ int printed = 0;
+
+ ppr_dpo = protocol_port_range_dpo_get (index);
+
+ s = format (s, "allow ");
+
+ for (i = 0; i < ppr_dpo->n_used_blocks; i++)
+ {
+ for (j = 0; j < 8; j++)
+ {
+ if (ppr_dpo->blocks[i].low.as_u16[j])
+ {
+ if (printed)
+ s = format (s, ", ");
+ if (ppr_dpo->blocks[i].hi.as_u16[j] >
+ (ppr_dpo->blocks[i].low.as_u16[j] + 1))
+ s =
+ format (s, "%d-%d", (u32) ppr_dpo->blocks[i].low.as_u16[j],
+ (u32) ppr_dpo->blocks[i].hi.as_u16[j] - 1);
+ else
+ s = format (s, "%d", ppr_dpo->blocks[i].low.as_u16[j]);
+ printed = 1;
+ }
+ }
+ }
+ return s;
+}
+
+static void
+ppr_dpo_lock (dpo_id_t * dpo)
+{
+}
+
+static void
+ppr_dpo_unlock (dpo_id_t * dpo)
+{
+}
+
+const static dpo_vft_t ppr_vft = {
+ .dv_lock = ppr_dpo_lock,
+ .dv_unlock = ppr_dpo_unlock,
+ .dv_format = format_ppr_dpo,
+};
+
+const static char *const ppr_ip4_nodes[] = {
+ "ip4-source-and-port-range-check-rx",
+ NULL,
+};
+
+const static char *const *const ppr_nodes[DPO_PROTO_NUM] = {
+ [DPO_PROTO_IP4] = ppr_ip4_nodes,
+};
+
+clib_error_t *
+ip4_source_and_port_range_check_init (vlib_main_t * vm)
+{
+ source_range_check_main_t *srm = &source_range_check_main;
+
+ srm->vlib_main = vm;
+ srm->vnet_main = vnet_get_main ();
+
+ ppr_dpo_type = dpo_register_new_type (&ppr_vft, ppr_nodes);
+
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (ip4_source_and_port_range_check_init);
+
+protocol_port_range_dpo_t *
+protocol_port_range_dpo_alloc (void)
+{
+ protocol_port_range_dpo_t *ppr_dpo;
+
+ pool_get_aligned (ppr_dpo_pool, ppr_dpo, CLIB_CACHE_LINE_BYTES);
+ memset (ppr_dpo, 0, sizeof (*ppr_dpo));
+
+ ppr_dpo->n_free_ranges = N_PORT_RANGES_PER_DPO;
+
+ return (ppr_dpo);
+}
+
+
+static int
+add_port_range_adjacency (u32 fib_index,
+ ip4_address_t * address,
+ u32 length, u16 * low_ports, u16 * high_ports)
+{
+ protocol_port_range_dpo_t *ppr_dpo;
+ dpo_id_t dpop = DPO_INVALID;
+ int i, j, k;
+
+ fib_node_index_t fei;
+ fib_prefix_t pfx = {
+ .fp_proto = FIB_PROTOCOL_IP4,
+ .fp_len = length,
+ .fp_addr = {
+ .ip4 = *address,
+ },
+ };
+
+ /*
+ * check to see if we have already sourced this prefix
+ */
+ fei = fib_table_lookup_exact_match (fib_index, &pfx);
+
+ if (FIB_NODE_INDEX_INVALID == fei)
+ {
+ /*
+ * this is a first time add for this prefix.
+ */
+ ppr_dpo = protocol_port_range_dpo_alloc ();
+ }
+ else
+ {
+ /*
+ * the prefix is already there.
+ * check it was sourced by us, and if so get the ragne DPO from it.
+ */
+ dpo_id_t dpo = DPO_INVALID;
+ const dpo_id_t *bucket;
+
+ if (fib_entry_get_dpo_for_source (fei, FIB_SOURCE_SPECIAL, &dpo))
+ {
+ /*
+ * there is existing state. we'll want to add the new ranges to it
+ */
+ bucket =
+ load_balance_get_bucket_i (load_balance_get (dpo.dpoi_index), 0);
+ ppr_dpo = protocol_port_range_dpo_get (bucket->dpoi_index);
+ dpo_reset (&dpo);
+ }
+ else
+ {
+ /*
+ * there is no PPR state associated with this prefix,
+ * so we'll need a new DPO
+ */
+ ppr_dpo = protocol_port_range_dpo_alloc ();
+ }
+ }
+
+ if (vec_len (low_ports) > ppr_dpo->n_free_ranges)
+ return VNET_API_ERROR_EXCEEDED_NUMBER_OF_RANGES_CAPACITY;
+
+ j = k = 0;
+
+ for (i = 0; i < vec_len (low_ports); i++)
+ {
+ for (; j < N_BLOCKS_PER_DPO; j++)
+ {
+ for (; k < 8; k++)
+ {
+ if (ppr_dpo->blocks[j].low.as_u16[k] == 0)
+ {
+ ppr_dpo->blocks[j].low.as_u16[k] = low_ports[i];
+ ppr_dpo->blocks[j].hi.as_u16[k] = high_ports[i];
+ goto doublebreak;
+ }
+ }
+ }
+ doublebreak:;
+ }
+ ppr_dpo->n_used_blocks = j + 1;
+
+ /*
+ * add or update the entry in the FIB
+ */
+ dpo_set (&dpop, ppr_dpo_type, DPO_PROTO_IP4, (ppr_dpo - ppr_dpo_pool));
+
+ if (FIB_NODE_INDEX_INVALID == fei)
+ {
+ fib_table_entry_special_dpo_add (fib_index,
+ &pfx,
+ FIB_SOURCE_SPECIAL,
+ FIB_ENTRY_FLAG_NONE, &dpop);
+ }
+ else
+ {
+ fib_entry_special_update (fei,
+ FIB_SOURCE_SPECIAL,
+ FIB_ENTRY_FLAG_NONE, &dpop);
+ }
+
+ return 0;
+}
+
+static int
+remove_port_range_adjacency (u32 fib_index,
+ ip4_address_t * address,
+ u32 length, u16 * low_ports, u16 * high_ports)
+{
+ protocol_port_range_dpo_t *ppr_dpo;
+ fib_node_index_t fei;
+ int i, j, k;
+
+ fib_prefix_t pfx = {
+ .fp_proto = FIB_PROTOCOL_IP4,
+ .fp_len = length,
+ .fp_addr = {
+ .ip4 = *address,
+ },
+ };
+
+ /*
+ * check to see if we have sourced this prefix
+ */
+ fei = fib_table_lookup_exact_match (fib_index, &pfx);
+
+ if (FIB_NODE_INDEX_INVALID == fei)
+ {
+ /*
+ * not one of ours
+ */
+ return VNET_API_ERROR_INCORRECT_ADJACENCY_TYPE;
+ }
+ else
+ {
+ /*
+ * the prefix is already there.
+ * check it was sourced by us
+ */
+ dpo_id_t dpo = DPO_INVALID;
+ const dpo_id_t *bucket;
+
+ if (fib_entry_get_dpo_for_source (fei, FIB_SOURCE_SPECIAL, &dpo))
+ {
+ /*
+ * there is existing state. we'll want to add the new ranges to it
+ */
+ bucket =
+ load_balance_get_bucket_i (load_balance_get (dpo.dpoi_index), 0);
+ ppr_dpo = protocol_port_range_dpo_get (bucket->dpoi_index);
+ dpo_reset (&dpo);
+ }
+ else
+ {
+ /*
+ * not one of ours
+ */
+ return VNET_API_ERROR_INCORRECT_ADJACENCY_TYPE;
+ }
+ }
+
+ for (i = 0; i < vec_len (low_ports); i++)
+ {
+ for (j = 0; j < N_BLOCKS_PER_DPO; j++)
+ {
+ for (k = 0; k < 8; k++)
+ {
+ if (low_ports[i] == ppr_dpo->blocks[j].low.as_u16[k] &&
+ high_ports[i] == ppr_dpo->blocks[j].hi.as_u16[k])
+ {
+ ppr_dpo->blocks[j].low.as_u16[k] =
+ ppr_dpo->blocks[j].hi.as_u16[k] = 0;
+ goto doublebreak;
+ }
+ }
+ }
+ doublebreak:;
+ }
+
+ ppr_dpo->n_free_ranges = 0;
+
+ /* Have we deleted all ranges yet? */
+ for (i = 0; i < N_BLOCKS_PER_DPO; i++)
+ {
+ for (j = 0; j < 8; j++)
+ {
+ if (ppr_dpo->blocks[j].low.as_u16[i] == 0)
+ ppr_dpo->n_free_ranges++;
+ }
+ }
+
+ if (N_PORT_RANGES_PER_DPO == ppr_dpo->n_free_ranges)
+ {
+ /* Yes, lose the adjacency... */
+ fib_table_entry_special_remove (fib_index, &pfx, FIB_SOURCE_SPECIAL);
+ }
+ else
+ {
+ /*
+ * compact the ranges down to a contiguous block
+ */
+ // FIXME. TODO.
+ }
+
+ return 0;
+}
+
+// This will be moved to another file and implemented post API freeze.
+int
+ip6_source_and_port_range_check_add_del (ip6_address_t * address,
+ u32 length,
+ u32 vrf_id,
+ u16 * low_ports,
+ u16 * high_ports, int is_add)
+{
+ return 0;
+}
+
+int
+ip4_source_and_port_range_check_add_del (ip4_address_t * address,
+ u32 length,
+ u32 vrf_id,
+ u16 * low_ports,
+ u16 * high_ports, int is_add)
+{
+ u32 fib_index;
+
+ fib_index = fib_table_find_or_create_and_lock (FIB_PROTOCOL_IP4, vrf_id);
+
+ if (is_add == 0)
+ {
+ remove_port_range_adjacency (fib_index, address, length,
+ low_ports, high_ports);
+ }
+ else
+ {
+ add_port_range_adjacency (fib_index, address, length,
+ low_ports, high_ports);
+ }
+
+ return 0;
+}
+
+static clib_error_t *
+ip_source_and_port_range_check_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ u16 *low_ports = 0;
+ u16 *high_ports = 0;
+ u16 this_low;
+ u16 this_hi;
+ ip4_address_t ip4_addr;
+ ip6_address_t ip6_addr; //This function will be moved to generic impl when v6 done.
+ u32 length;
+ u32 tmp, tmp2;
+ u32 vrf_id = ~0;
+ int is_add = 1, ip_ver = ~0;
+ int rv;
+
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "%U/%d", unformat_ip4_address, &ip4_addr, &length))
+ ip_ver = 4;
+ else
+ if (unformat
+ (input, "%U/%d", unformat_ip6_address, &ip6_addr, &length))
+ ip_ver = 6;
+ else if (unformat (input, "vrf %d", &vrf_id))
+ ;
+ else if (unformat (input, "del"))
+ is_add = 0;
+ else if (unformat (input, "port %d", &tmp))
+ {
+ if (tmp == 0 || tmp > 65535)
+ return clib_error_return (0, "port %d out of range", tmp);
+ this_low = tmp;
+ this_hi = this_low + 1;
+ vec_add1 (low_ports, this_low);
+ vec_add1 (high_ports, this_hi);
+ }
+ else if (unformat (input, "range %d - %d", &tmp, &tmp2))
+ {
+ if (tmp > tmp2)
+ return clib_error_return (0, "ports %d and %d out of order",
+ tmp, tmp2);
+ if (tmp == 0 || tmp > 65535)
+ return clib_error_return (0, "low port %d out of range", tmp);
+ if (tmp2 == 0 || tmp2 > 65535)
+ return clib_error_return (0, "high port %d out of range", tmp2);
+ this_low = tmp;
+ this_hi = tmp2 + 1;
+ vec_add1 (low_ports, this_low);
+ vec_add1 (high_ports, this_hi);
+ }
+ else
+ break;
+ }
+
+ if (ip_ver == ~0)
+ return clib_error_return (0, " <address>/<mask> not specified");
+
+ if (vrf_id == ~0)
+ return clib_error_return (0, " VRF ID required, not specified");
+
+ if (vec_len (low_ports) == 0)
+ return clib_error_return (0,
+ " Both VRF ID and range/port must be set for a protocol.");
+
+ if (vrf_id == 0)
+ return clib_error_return (0, " VRF ID can not be 0 (default).");
+
+
+ if (ip_ver == 4)
+ rv = ip4_source_and_port_range_check_add_del
+ (&ip4_addr, length, vrf_id, low_ports, high_ports, is_add);
+ else
+ return clib_error_return (0, " IPv6 in subsequent patch");
+
+ switch (rv)
+ {
+ case 0:
+ break;
+
+ case VNET_API_ERROR_INCORRECT_ADJACENCY_TYPE:
+ return clib_error_return
+ (0, " Incorrect adjacency for add/del operation");
+
+ case VNET_API_ERROR_EXCEEDED_NUMBER_OF_PORTS_CAPACITY:
+ return clib_error_return (0, " Too many ports in add/del operation");
+
+ case VNET_API_ERROR_EXCEEDED_NUMBER_OF_RANGES_CAPACITY:
+ return clib_error_return
+ (0, " Too many ranges requested for add operation");
+
+ default:
+ return clib_error_return (0, " returned an unexpected value: %d", rv);
+ }
+
+ return 0;
+}
+
+/*?
+ * This command adds an IP Subnet and range of ports to be validated
+ * by an IP FIB table (VRF).
+ *
+ * @todo This is incomplete. This needs a detailed description and a
+ * practical example.
+ *
+ * @cliexpar
+ * Example of how to add an IPv4 subnet and single port to an IPv4 FIB table:
+ * @cliexcmd{set ip source-and-port-range-check vrf 7 172.16.1.0/24 port 23}
+ * Example of how to add an IPv4 subnet and range of ports to an IPv4 FIB table:
+ * @cliexcmd{set ip source-and-port-range-check vrf 7 172.16.1.0/24 range 23 - 100}
+ * Example of how to delete an IPv4 subnet and single port from an IPv4 FIB table:
+ * @cliexcmd{set ip source-and-port-range-check vrf 7 172.16.1.0/24 port 23 del}
+ * Example of how to delete an IPv4 subnet and range of ports from an IPv4 FIB table:
+ * @cliexcmd{set ip source-and-port-range-check vrf 7 172.16.1.0/24 range 23 - 100 del}
+?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (ip_source_and_port_range_check_command, static) = {
+ .path = "set ip source-and-port-range-check",
+ .function = ip_source_and_port_range_check_command_fn,
+ .short_help =
+ "set ip source-and-port-range-check vrf <table-id> <ip-addr>/<mask> {port nn | range <nn> - <nn>} [del]",
+};
+/* *INDENT-ON* */
+
+
+static clib_error_t *
+show_source_and_port_range_check_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ protocol_port_range_dpo_t *ppr_dpo;
+ u32 fib_index;
+ u8 addr_set = 0;
+ u32 vrf_id = ~0;
+ int rv, i, j;
+ u32 port = 0;
+ fib_prefix_t pfx = {
+ .fp_proto = FIB_PROTOCOL_IP4,
+ .fp_len = 32,
+ };
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "%U", unformat_ip4_address, &pfx.fp_addr.ip4))
+ addr_set = 1;
+ else if (unformat (input, "vrf %d", &vrf_id))
+ ;
+ else if (unformat (input, "port %d", &port))
+ ;
+ else
+ break;
+ }
+
+ if (addr_set == 0)
+ return clib_error_return (0, "<address> not specified");
+
+ if (vrf_id == ~0)
+ return clib_error_return (0, "VRF ID required, not specified");
+
+ fib_index = fib_table_find (FIB_PROTOCOL_IP4, vrf_id);
+ if (~0 == fib_index)
+ return clib_error_return (0, "VRF %d not found", vrf_id);
+
+ /*
+ * find the longest prefix match on the address requested,
+ * check it was sourced by us
+ */
+ dpo_id_t dpo = DPO_INVALID;
+ const dpo_id_t *bucket;
+
+ if (!fib_entry_get_dpo_for_source (fib_table_lookup (fib_index, &pfx),
+ FIB_SOURCE_SPECIAL, &dpo))
+ {
+ /*
+ * not one of ours
+ */
+ vlib_cli_output (vm, "%U: src address drop", format_ip4_address,
+ &pfx.fp_addr.ip4);
+ return 0;
+ }
+
+ bucket = load_balance_get_bucket_i (load_balance_get (dpo.dpoi_index), 0);
+ ppr_dpo = protocol_port_range_dpo_get (bucket->dpoi_index);
+ dpo_reset (&dpo);
+
+ if (port)
+ {
+ rv = check_adj_port_range_x1 (ppr_dpo, (u16) port, 1234);
+ if (rv == 1234)
+ vlib_cli_output (vm, "%U port %d PASS", format_ip4_address,
+ &pfx.fp_addr.ip4, port);
+ else
+ vlib_cli_output (vm, "%U port %d FAIL", format_ip4_address,
+ &pfx.fp_addr.ip4, port);
+ return 0;
+ }
+ else
+ {
+ u8 *s;
+
+ s = format (0, "%U: ", format_ip4_address, &pfx.fp_addr.ip4);
+
+ for (i = 0; i < N_BLOCKS_PER_DPO; i++)
+ {
+ for (j = 0; j < 8; j++)
+ {
+ if (ppr_dpo->blocks[i].low.as_u16[j])
+ s = format (s, "%d - %d ",
+ (u32) ppr_dpo->blocks[i].low.as_u16[j],
+ (u32) ppr_dpo->blocks[i].hi.as_u16[j]);
+ }
+ }
+ vlib_cli_output (vm, "%s", s);
+ vec_free (s);
+ }
+
+ return 0;
+}
+
+/*?
+ * Display the range of ports being validated by an IPv4 FIB for a given
+ * IP or subnet, or test if a given IP and port are being validated.
+ *
+ * @todo This is incomplete. This needs a detailed description and a
+ * practical example.
+ *
+ * @cliexpar
+ * Example of how to display the set of ports being validated for a given
+ * IPv4 subnet:
+ * @cliexstart{show ip source-and-port-range-check vrf 7 172.16.2.0}
+ * 172.16.2.0: 23 - 101
+ * @cliexend
+ * Example of how to test to determine of a given Pv4 address and port
+ * are being validated:
+ * @cliexstart{show ip source-and-port-range-check vrf 7 172.16.2.2 port 23}
+ * 172.16.2.2 port 23 PASS
+ * @cliexend
+ * @cliexstart{show ip source-and-port-range-check vrf 7 172.16.2.2 port 250}
+ * 172.16.2.2 port 250 FAIL
+ * @cliexend
+ ?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (show_source_and_port_range_check, static) = {
+ .path = "show ip source-and-port-range-check",
+ .function = show_source_and_port_range_check_fn,
+ .short_help =
+ "show ip source-and-port-range-check vrf <table-id> <ip-addr> [port <n>]",
+};
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/ip/ip4_source_check.c b/src/vnet/ip/ip4_source_check.c
new file mode 100644
index 00000000000..d461cc885d6
--- /dev/null
+++ b/src/vnet/ip/ip4_source_check.c
@@ -0,0 +1,573 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * ip/ip4_source_check.c: IP v4 check source address (unicast RPF check)
+ *
+ * Copyright (c) 2008 Eliot Dresselhaus
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <vnet/ip/ip.h>
+#include <vnet/fib/ip4_fib.h>
+#include <vnet/fib/fib_urpf_list.h>
+#include <vnet/dpo/load_balance.h>
+
+/**
+ * @file
+ * @brief IPv4 Unicast Source Check.
+ *
+ * This file contains the IPv4 interface unicast source check.
+ */
+
+
+typedef struct
+{
+ u8 packet_data[64];
+ index_t urpf;
+} ip4_source_check_trace_t;
+
+static u8 *
+format_ip4_source_check_trace (u8 * s, va_list * va)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*va, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*va, vlib_node_t *);
+ ip4_source_check_trace_t *t = va_arg (*va, ip4_source_check_trace_t *);
+
+ s = format (s, "%U",
+ format_ip4_header, t->packet_data, sizeof (t->packet_data));
+
+ return s;
+}
+
+typedef enum
+{
+ IP4_SOURCE_CHECK_NEXT_DROP,
+ IP4_SOURCE_CHECK_N_NEXT,
+} ip4_source_check_next_t;
+
+typedef enum
+{
+ IP4_SOURCE_CHECK_REACHABLE_VIA_RX,
+ IP4_SOURCE_CHECK_REACHABLE_VIA_ANY,
+} ip4_source_check_type_t;
+
+typedef union
+{
+ u32 fib_index;
+} ip4_source_check_config_t;
+
+always_inline uword
+ip4_source_check_inline (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame,
+ ip4_source_check_type_t source_check_type)
+{
+ u32 n_left_from, *from, *to_next;
+ u32 next_index;
+ vlib_node_runtime_t *error_node =
+ vlib_node_get_runtime (vm, ip4_input_node.index);
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+
+ if (node->flags & VLIB_NODE_FLAG_TRACE)
+ vlib_trace_frame_buffers_only (vm, node, from, frame->n_vectors,
+ /* stride */ 1,
+ sizeof (ip4_source_check_trace_t));
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ vlib_buffer_t *p0, *p1;
+ ip4_header_t *ip0, *ip1;
+ ip4_fib_mtrie_t *mtrie0, *mtrie1;
+ ip4_fib_mtrie_leaf_t leaf0, leaf1;
+ ip4_source_check_config_t *c0, *c1;
+ const load_balance_t *lb0, *lb1;
+ u32 pi0, next0, pass0, lb_index0;
+ u32 pi1, next1, pass1, lb_index1;
+
+ /* Prefetch next iteration. */
+ {
+ vlib_buffer_t *p2, *p3;
+
+ p2 = vlib_get_buffer (vm, from[2]);
+ p3 = vlib_get_buffer (vm, from[3]);
+
+ vlib_prefetch_buffer_header (p2, LOAD);
+ vlib_prefetch_buffer_header (p3, LOAD);
+
+ CLIB_PREFETCH (p2->data, sizeof (ip0[0]), LOAD);
+ CLIB_PREFETCH (p3->data, sizeof (ip1[0]), LOAD);
+ }
+
+ pi0 = to_next[0] = from[0];
+ pi1 = to_next[1] = from[1];
+ from += 2;
+ to_next += 2;
+ n_left_from -= 2;
+ n_left_to_next -= 2;
+
+ p0 = vlib_get_buffer (vm, pi0);
+ p1 = vlib_get_buffer (vm, pi1);
+
+ ip0 = vlib_buffer_get_current (p0);
+ ip1 = vlib_buffer_get_current (p1);
+
+ c0 =
+ vnet_feature_next_with_data (vnet_buffer (p0)->sw_if_index
+ [VLIB_RX], &next0, p0,
+ sizeof (c0[0]));
+ c1 =
+ vnet_feature_next_with_data (vnet_buffer (p1)->sw_if_index
+ [VLIB_RX], &next1, p1,
+ sizeof (c1[0]));
+
+ mtrie0 = &ip4_fib_get (c0->fib_index)->mtrie;
+ mtrie1 = &ip4_fib_get (c1->fib_index)->mtrie;
+
+ leaf0 = leaf1 = IP4_FIB_MTRIE_LEAF_ROOT;
+
+ leaf0 =
+ ip4_fib_mtrie_lookup_step (mtrie0, leaf0, &ip0->src_address, 0);
+ leaf1 =
+ ip4_fib_mtrie_lookup_step (mtrie1, leaf1, &ip1->src_address, 0);
+
+ leaf0 =
+ ip4_fib_mtrie_lookup_step (mtrie0, leaf0, &ip0->src_address, 1);
+ leaf1 =
+ ip4_fib_mtrie_lookup_step (mtrie1, leaf1, &ip1->src_address, 1);
+
+ leaf0 =
+ ip4_fib_mtrie_lookup_step (mtrie0, leaf0, &ip0->src_address, 2);
+ leaf1 =
+ ip4_fib_mtrie_lookup_step (mtrie1, leaf1, &ip1->src_address, 2);
+
+ leaf0 =
+ ip4_fib_mtrie_lookup_step (mtrie0, leaf0, &ip0->src_address, 3);
+ leaf1 =
+ ip4_fib_mtrie_lookup_step (mtrie1, leaf1, &ip1->src_address, 3);
+
+ lb_index0 = ip4_fib_mtrie_leaf_get_adj_index (leaf0);
+ lb_index1 = ip4_fib_mtrie_leaf_get_adj_index (leaf1);
+
+ lb0 = load_balance_get (lb_index0);
+ lb1 = load_balance_get (lb_index1);
+
+ /* Pass multicast. */
+ pass0 = ip4_address_is_multicast (&ip0->src_address)
+ || ip0->src_address.as_u32 == clib_host_to_net_u32 (0xFFFFFFFF);
+ pass1 = ip4_address_is_multicast (&ip1->src_address)
+ || ip1->src_address.as_u32 == clib_host_to_net_u32 (0xFFFFFFFF);
+
+ if (IP4_SOURCE_CHECK_REACHABLE_VIA_RX == source_check_type)
+ {
+ pass0 |= fib_urpf_check (lb0->lb_urpf,
+ vnet_buffer (p0)->sw_if_index
+ [VLIB_RX]);
+ pass1 |=
+ fib_urpf_check (lb1->lb_urpf,
+ vnet_buffer (p1)->sw_if_index[VLIB_RX]);
+ }
+ else
+ {
+ pass0 |= fib_urpf_check_size (lb0->lb_urpf);
+ pass1 |= fib_urpf_check_size (lb1->lb_urpf);
+ }
+ next0 = (pass0 ? next0 : IP4_SOURCE_CHECK_NEXT_DROP);
+ next1 = (pass1 ? next1 : IP4_SOURCE_CHECK_NEXT_DROP);
+
+ p0->error =
+ error_node->errors[IP4_ERROR_UNICAST_SOURCE_CHECK_FAILS];
+ p1->error =
+ error_node->errors[IP4_ERROR_UNICAST_SOURCE_CHECK_FAILS];
+
+ vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
+ to_next, n_left_to_next,
+ pi0, pi1, next0, next1);
+ }
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ vlib_buffer_t *p0;
+ ip4_header_t *ip0;
+ ip4_fib_mtrie_t *mtrie0;
+ ip4_fib_mtrie_leaf_t leaf0;
+ ip4_source_check_config_t *c0;
+ u32 pi0, next0, pass0, lb_index0;
+ const load_balance_t *lb0;
+
+ pi0 = from[0];
+ to_next[0] = pi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ p0 = vlib_get_buffer (vm, pi0);
+ ip0 = vlib_buffer_get_current (p0);
+
+ c0 =
+ vnet_feature_next_with_data (vnet_buffer (p0)->sw_if_index
+ [VLIB_RX], &next0, p0,
+ sizeof (c0[0]));
+
+ mtrie0 = &ip4_fib_get (c0->fib_index)->mtrie;
+
+ leaf0 = IP4_FIB_MTRIE_LEAF_ROOT;
+
+ leaf0 =
+ ip4_fib_mtrie_lookup_step (mtrie0, leaf0, &ip0->src_address, 0);
+
+ leaf0 =
+ ip4_fib_mtrie_lookup_step (mtrie0, leaf0, &ip0->src_address, 1);
+
+ leaf0 =
+ ip4_fib_mtrie_lookup_step (mtrie0, leaf0, &ip0->src_address, 2);
+
+ leaf0 =
+ ip4_fib_mtrie_lookup_step (mtrie0, leaf0, &ip0->src_address, 3);
+
+ lb_index0 = ip4_fib_mtrie_leaf_get_adj_index (leaf0);
+
+ lb0 = load_balance_get (lb_index0);
+
+ /* Pass multicast. */
+ pass0 = ip4_address_is_multicast (&ip0->src_address)
+ || ip0->src_address.as_u32 == clib_host_to_net_u32 (0xFFFFFFFF);
+
+ if (IP4_SOURCE_CHECK_REACHABLE_VIA_RX == source_check_type)
+ {
+ pass0 |= fib_urpf_check (lb0->lb_urpf,
+ vnet_buffer (p0)->sw_if_index
+ [VLIB_RX]);
+ }
+ else
+ {
+ pass0 |= fib_urpf_check_size (lb0->lb_urpf);
+ }
+
+ next0 = (pass0 ? next0 : IP4_SOURCE_CHECK_NEXT_DROP);
+ p0->error =
+ error_node->errors[IP4_ERROR_UNICAST_SOURCE_CHECK_FAILS];
+
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ pi0, next0);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ return frame->n_vectors;
+}
+
+static uword
+ip4_source_check_reachable_via_any (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ return ip4_source_check_inline (vm, node, frame,
+ IP4_SOURCE_CHECK_REACHABLE_VIA_ANY);
+}
+
+static uword
+ip4_source_check_reachable_via_rx (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ return ip4_source_check_inline (vm, node, frame,
+ IP4_SOURCE_CHECK_REACHABLE_VIA_RX);
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (ip4_check_source_reachable_via_any) = {
+ .function = ip4_source_check_reachable_via_any,
+ .name = "ip4-source-check-via-any",
+ .vector_size = sizeof (u32),
+
+ .n_next_nodes = IP4_SOURCE_CHECK_N_NEXT,
+ .next_nodes = {
+ [IP4_SOURCE_CHECK_NEXT_DROP] = "error-drop",
+ },
+
+ .format_buffer = format_ip4_header,
+ .format_trace = format_ip4_source_check_trace,
+};
+/* *INDENT-ON* */
+
+VLIB_NODE_FUNCTION_MULTIARCH (ip4_check_source_reachable_via_any,
+ ip4_source_check_reachable_via_any);
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (ip4_check_source_reachable_via_rx) = {
+ .function = ip4_source_check_reachable_via_rx,
+ .name = "ip4-source-check-via-rx",
+ .vector_size = sizeof (u32),
+
+ .n_next_nodes = IP4_SOURCE_CHECK_N_NEXT,
+ .next_nodes = {
+ [IP4_SOURCE_CHECK_NEXT_DROP] = "error-drop",
+ },
+
+ .format_buffer = format_ip4_header,
+ .format_trace = format_ip4_source_check_trace,
+};
+/* *INDENT-ON* */
+
+VLIB_NODE_FUNCTION_MULTIARCH (ip4_check_source_reachable_via_rx,
+ ip4_source_check_reachable_via_rx);
+
+static clib_error_t *
+set_ip_source_check (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ unformat_input_t _line_input, *line_input = &_line_input;
+ vnet_main_t *vnm = vnet_get_main ();
+ ip4_main_t *im = &ip4_main;
+ clib_error_t *error = 0;
+ u32 sw_if_index, is_del;
+ ip4_source_check_config_t config;
+ char *feature_name = "ip4-source-check-via-rx";
+
+ sw_if_index = ~0;
+ is_del = 0;
+
+ if (!unformat_user (input, unformat_line_input, line_input))
+ return 0;
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat_user
+ (line_input, unformat_vnet_sw_interface, vnm, &sw_if_index))
+ ;
+ else if (unformat (line_input, "del"))
+ is_del = 1;
+ else if (unformat (line_input, "loose"))
+ feature_name = "ip4-source-check-via-any";
+ else
+ {
+ error = unformat_parse_error (line_input);
+ goto done;
+ }
+ }
+
+ if (~0 == sw_if_index)
+ {
+ error = clib_error_return (0, "unknown interface `%U'",
+ format_unformat_error, line_input);
+ goto done;
+ }
+
+ config.fib_index = im->fib_index_by_sw_if_index[sw_if_index];
+ vnet_feature_enable_disable ("ip4-unicast", feature_name, sw_if_index,
+ is_del == 0, &config, sizeof (config));
+done:
+ return error;
+}
+
+/*?
+ * This command adds the 'ip4-source-check-via-rx' graph node for
+ * a given interface. By adding the IPv4 source check graph node to
+ * an interface, the code verifies that the source address of incoming
+ * unicast packets are reachable over the incoming interface. Two flavours
+ * are supported (the default is strict):
+ * - loose: accept ingress packet if there is a route to reach the source
+ * - strict: accept ingress packet if it arrived on an interface which
+ * the route to the source uses. i.e. an interface that the source
+ * is reachable via.
+ *
+ * @cliexpar
+ * @parblock
+ * Example of graph node before range checking is enabled:
+ * @cliexstart{show vlib graph ip4-source-check-via-rx}
+ * Name Next Previous
+ * ip4-source-check-via-rx error-drop [0]
+ * @cliexend
+ *
+ * Example of how to enable unicast source checking on an interface:
+ * @cliexcmd{set interface ip source-check GigabitEthernet2/0/0 loose}
+ *
+ * Example of graph node after range checking is enabled:
+ * @cliexstart{show vlib graph ip4-source-check-via-rx}
+ * Name Next Previous
+ * ip4-source-check-via-rx error-drop [0] ip4-input-no-checksum
+ * ip4-source-and-port-range- ip4-input
+ * @cliexend
+ *
+ * Example of how to display the feature enabed on an interface:
+ * @cliexstart{show ip interface features GigabitEthernet2/0/0}
+ * IP feature paths configured on GigabitEthernet2/0/0...
+ *
+ * ipv4 unicast:
+ * ip4-source-check-via-rx
+ * ip4-lookup
+ *
+ * ipv4 multicast:
+ * ip4-lookup-multicast
+ *
+ * ipv4 multicast:
+ * interface-output
+ *
+ * ipv6 unicast:
+ * ip6-lookup
+ *
+ * ipv6 multicast:
+ * ip6-lookup
+ *
+ * ipv6 multicast:
+ * interface-output
+ * @cliexend
+ *
+ * Example of how to disable unicast source checking on an interface:
+ * @cliexcmd{set interface ip source-check GigabitEthernet2/0/0 del}
+ * @endparblock
+?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (set_interface_ip_source_check_command, static) = {
+ .path = "set interface ip source-check",
+ .function = set_ip_source_check,
+ .short_help = "set interface ip source-check <interface> [strict|loose] [del]",
+};
+/* *INDENT-ON* */
+
+static clib_error_t *
+ip_source_check_accept (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ unformat_input_t _line_input, *line_input = &_line_input;
+ fib_prefix_t pfx = {
+ .fp_proto = FIB_PROTOCOL_IP4,
+ };
+ clib_error_t *error = NULL;
+ u32 table_id, is_add, fib_index;
+
+ is_add = 1;
+ table_id = ~0;
+
+ /* Get a line of input. */
+ if (!unformat_user (input, unformat_line_input, line_input))
+ return 0;
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "table %d", &table_id))
+ ;
+ else if (unformat (line_input, "del"))
+ is_add = 0;
+ else if (unformat (line_input, "add"))
+ is_add = 1;
+ else if (unformat (line_input, "%U/%d",
+ unformat_ip4_address, &pfx.fp_addr.ip4, &pfx.fp_len))
+ pfx.fp_proto = FIB_PROTOCOL_IP4;
+ else
+ {
+ error = unformat_parse_error (line_input);
+ goto done;
+ }
+ }
+
+ if (~0 != table_id)
+ {
+ fib_index = fib_table_id_find_fib_index (pfx.fp_proto, table_id);
+ if (~0 == fib_index)
+ {
+ error = clib_error_return (0, "Nonexistent table id %d", table_id);
+ goto done;
+ }
+ }
+ else
+ {
+ fib_index = 0;
+ }
+
+ if (is_add)
+ {
+ fib_table_entry_special_add (fib_index,
+ &pfx,
+ FIB_SOURCE_URPF_EXEMPT,
+ FIB_ENTRY_FLAG_DROP, ADJ_INDEX_INVALID);
+ }
+ else
+ {
+ fib_table_entry_special_remove (fib_index,
+ &pfx, FIB_SOURCE_URPF_EXEMPT);
+ }
+
+done:
+ return (error);
+}
+
+/*?
+ * Add an exemption for a prefix to pass the Unicast Reverse Path
+ * Forwarding (uRPF) loose check. This is for testing purposes only.
+ * If the '<em>table</em>' is not enter it is defaulted to 0. Default
+ * is to '<em>add</em>'. VPP always performs a loose uRPF check for
+ * for-us traffic.
+ *
+ * @cliexpar
+ * Example of how to add a uRPF exception to a FIB table to pass the
+ * loose RPF tests:
+ * @cliexcmd{ip urpf-accept table 7 add}
+?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (ip_source_check_accept_command, static) = {
+ .path = "ip urpf-accept",
+ .function = ip_source_check_accept,
+ .short_help = "ip urpf-accept [table <table-id>] [add|del]",
+};
+/* *INDENT-ON* */
+
+
+/* Dummy init function to get us linked in. */
+clib_error_t *
+ip4_source_check_init (vlib_main_t * vm)
+{
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (ip4_source_check_init);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/ip/ip4_test.c b/src/vnet/ip/ip4_test.c
new file mode 100644
index 00000000000..45d171130df
--- /dev/null
+++ b/src/vnet/ip/ip4_test.c
@@ -0,0 +1,340 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <vnet/ip/ip.h>
+#include <vnet/ethernet/ethernet.h>
+
+/**
+ * @file
+ * @brief IPv4 FIB Tester.
+ *
+ * Not compiled in by default. IPv4 FIB tester. Add, probe, delete a bunch of
+ * random routes / masks and make sure that the mtrie agrees with
+ * the hash-table FIB.
+ *
+ * Manipulate the FIB by means of the debug CLI commands, to minimize
+ * the chances of doing something idiotic.
+ */
+
+/*
+ * These routines need to be redeclared non-static elsewhere.
+ *
+ * Also: rename ip_route() -> vnet_ip_route_cmd() and add the usual
+ * test_route_init() call to main.c
+ */
+clib_error_t *vnet_ip_route_cmd (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd_arg);
+
+int ip4_lookup_validate (ip4_address_t * a, u32 fib_index0);
+
+ip4_fib_t *find_fib_by_table_index_or_id (ip4_main_t * im,
+ u32 table_index_or_id, u32 flags);
+
+/* Routes to insert/delete/probe in FIB */
+typedef struct
+{
+ ip4_address_t address;
+ u32 mask_width;
+ u32 interface_id; /* not an xx_if_index */
+} test_route_t;
+
+typedef struct
+{
+ /* Test routes in use */
+ test_route_t *route_pool;
+
+ /* Number of fake ethernets created */
+ u32 test_interfaces_created;
+} test_main_t;
+
+test_main_t test_main;
+
+/* fake ethernet device class, distinct from "fake-ethX" */
+static u8 *
+format_test_interface_name (u8 * s, va_list * args)
+{
+ u32 dev_instance = va_arg (*args, u32);
+ return format (s, "test-eth%d", dev_instance);
+}
+
+static uword
+dummy_interface_tx (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ clib_warning ("you shouldn't be here, leaking buffers...");
+ return frame->n_vectors;
+}
+
+/* *INDENT-OFF* */
+VNET_DEVICE_CLASS (test_interface_device_class,static) = {
+ .name = "Test interface",
+ .format_device_name = format_test_interface_name,
+ .tx_function = dummy_interface_tx,
+};
+/* *INDENT-ON* */
+
+static clib_error_t *
+thrash (vlib_main_t * vm,
+ unformat_input_t * main_input, vlib_cli_command_t * cmd_arg)
+{
+ u32 seed = 0xdeaddabe;
+ u32 niter = 10;
+ u32 nroutes = 10;
+ u32 ninterfaces = 4;
+ f64 min_mask_bits = 7.0;
+ f64 max_mask_bits = 32.0;
+ u32 table_id = 11; /* my amp goes to 11 (use fib 11) */
+ u32 table_index;
+ int iter, i;
+ u8 *cmd;
+ test_route_t *tr;
+ test_main_t *tm = &test_main;
+ ip4_main_t *im = &ip4_main;
+ vnet_main_t *vnm = vnet_get_main ();
+ unformat_input_t cmd_input;
+ f64 rf;
+ u32 *masks = 0;
+ u32 tmp;
+ u32 hw_if_index;
+ clib_error_t *error = 0;
+ uword *p;
+ unformat_input_t _line_input, *line_input = &_line_input;
+ u8 hw_address[6];
+ ip4_fib_t *fib;
+ int verbose = 0;
+
+ /* Precompute mask width -> mask vector */
+ tmp = (u32) ~ 0;
+ vec_validate (masks, 32);
+ for (i = 32; i > 0; i--)
+ {
+ masks[i] = tmp;
+ tmp <<= 1;
+ }
+
+ if (unformat_user (main_input, unformat_line_input, line_input))
+ {
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "seed %d", &seed))
+ ;
+ else if (unformat (line_input, "niter %d", &niter))
+ ;
+ else if (unformat (line_input, "nroutes %d", &nroutes))
+ ;
+ else if (unformat (line_input, "ninterfaces %d", &ninterfaces))
+ ;
+ else if (unformat (line_input, "min-mask-bits %d", &tmp))
+ min_mask_bits = (f64) tmp;
+ else if (unformat (line_input, "max-mask-bits %d", &tmp))
+ max_mask_bits = (f64) tmp;
+ else if (unformat (line_input, "verbose"))
+ verbose = 1;
+ else
+ return clib_error_return (0, "unknown input `%U'",
+ format_unformat_error, line_input);
+ }
+ }
+
+ /* Find or create FIB table 11 */
+ fib = ip4_fib_find_or_create_fib_by_table_id (table_id);
+
+ for (i = tm->test_interfaces_created; i < ninterfaces; i++)
+ {
+ vnet_hw_interface_t *hw;
+ memset (hw_address, 0, sizeof (hw_address));
+ hw_address[0] = 0xd0;
+ hw_address[1] = 0x0f;
+ hw_address[5] = i;
+
+ error = ethernet_register_interface
+ (vnm, test_interface_device_class.index, i /* instance */ ,
+ hw_address, &hw_if_index,
+ /* flag change */ 0);
+
+ /* Fake interfaces use FIB table 11 */
+ hw = vnet_get_hw_interface (vnm, hw_if_index);
+ vec_validate (im->fib_index_by_sw_if_index, hw->sw_if_index);
+ im->fib_index_by_sw_if_index[hw->sw_if_index] = fib->index;
+ ip4_sw_interface_enable_disable (sw_if_index, 1);
+ }
+
+ tm->test_interfaces_created = ninterfaces;
+
+ /* Find fib index corresponding to FIB id 11 */
+ p = hash_get (im->fib_index_by_table_id, table_id);
+ if (p == 0)
+ {
+ vlib_cli_output (vm, "Couldn't map fib id %d to fib index\n", table_id);
+ return 0;
+ }
+ table_index = p[0];
+
+ for (iter = 0; iter < niter; iter++)
+ {
+ /* Pick random routes to install */
+ for (i = 0; i < nroutes; i++)
+ {
+ int j;
+
+ pool_get (tm->route_pool, tr);
+ memset (tr, 0, sizeof (*tr));
+
+ again:
+ rf = random_f64 (&seed);
+ tr->mask_width = (u32) (min_mask_bits
+ + rf * (max_mask_bits - min_mask_bits));
+ tmp = random_u32 (&seed);
+ tmp &= masks[tr->mask_width];
+ tr->address.as_u32 = clib_host_to_net_u32 (tmp);
+
+ /* We can't add the same address/mask twice... */
+ for (j = 0; j < i; j++)
+ {
+ test_route_t *prev;
+ prev = pool_elt_at_index (tm->route_pool, j);
+ if ((prev->address.as_u32 == tr->address.as_u32)
+ && (prev->mask_width == tr->mask_width))
+ goto again;
+ }
+
+ rf = random_f64 (&seed);
+ tr->interface_id = (u32) (rf * ninterfaces);
+ }
+
+ /* Add them */
+ for (i = 0; i < nroutes; i++)
+ {
+ tr = pool_elt_at_index (tm->route_pool, i);
+ cmd = format (0, "add table %d %U/%d via test-eth%d",
+ table_id,
+ format_ip4_address, &tr->address,
+ tr->mask_width, tr->interface_id);
+ vec_add1 (cmd, 0);
+ if (verbose)
+ fformat (stderr, "ip route %s\n", cmd);
+ unformat_init_string (&cmd_input, (char *) cmd, vec_len (cmd) - 1);
+ error = vnet_ip_route_cmd (vm, &cmd_input, cmd_arg);
+ if (error)
+ clib_error_report (error);
+ unformat_free (&cmd_input);
+ vec_free (cmd);
+ }
+ /* Probe them */
+ for (i = 0; i < nroutes; i++)
+ {
+ tr = pool_elt_at_index (tm->route_pool, i);
+ if (!ip4_lookup_validate (&tr->address, table_index))
+ {
+ if (verbose)
+ fformat (stderr, "test lookup table %d %U\n",
+ table_index, format_ip4_address, &tr->address);
+
+ fformat (stderr, "FAIL-after-insert: %U/%d\n",
+ format_ip4_address, &tr->address, tr->mask_width);
+ }
+ }
+
+ /* Delete them */
+ for (i = 0; i < nroutes; i++)
+ {
+ int j;
+ tr = pool_elt_at_index (tm->route_pool, i);
+ if (0)
+ cmd = format (0, "del table %d %U/%d via test-eth%d",
+ table_id,
+ format_ip4_address, &tr->address,
+ tr->mask_width, tr->interface_id);
+ else
+ cmd = format (0, "del table %d %U/%d",
+ table_id,
+ format_ip4_address, &tr->address, tr->mask_width);
+ vec_add1 (cmd, 0);
+ if (verbose)
+ fformat (stderr, "ip route %s\n", cmd);
+ unformat_init_string (&cmd_input, (char *) cmd, vec_len (cmd) - 1);
+ error = vnet_ip_route_cmd (vm, &cmd_input, cmd_arg);
+ if (error)
+ clib_error_report (error);
+ unformat_free (&cmd_input);
+ vec_free (cmd);
+
+ /* Make sure all undeleted routes still work */
+ for (j = i + 1; j < nroutes; j++)
+ {
+ test_route_t *rr; /* remaining route */
+ rr = pool_elt_at_index (tm->route_pool, j);
+ if (!ip4_lookup_validate (&rr->address, table_index))
+ {
+ if (verbose)
+ fformat (stderr, "test lookup table %d %U\n",
+ table_index, format_ip4_address, &rr->address);
+
+ fformat (stderr, "FAIL: %U/%d AWOL\n",
+ format_ip4_address, &rr->address, rr->mask_width);
+ fformat (stderr, " iter %d after %d of %d deletes\n",
+ iter, i, nroutes);
+ fformat (stderr, " last route deleted %U/%d\n",
+ format_ip4_address, &tr->address, tr->mask_width);
+ }
+ }
+ }
+
+ pool_free (tm->route_pool);
+ }
+ return 0;
+}
+
+/*?
+ * This command in not in the build by default. It is an internal
+ * command used to test the route functonality.
+ *
+ * Create test routes on IPv4 FIB table 11. Table will be created if it
+ * does not exist.
+ *
+ * There are several optional attributes:
+ * - If not provided, <seed> defaults to 0xdeaddabe.
+ * - If not provided, <num-iter> defaults to 10.
+ * - If not provided, <num-iface> defaults to 4.
+ * - If not provided, <min-mask> defaults to 7.0.
+ * - If not provided, <max-mask> defaults to 32.0.
+ *
+ * @cliexpar
+ * Example of how to run:
+ * @cliexcmd{test route}
+?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (test_route_command, static) = {
+ .path = "test route",
+ .short_help = "test route [seed <seed-num>] [niter <num-iter>] [ninterfaces <num-iface>] [min-mask-bits <min-mask>] [max-mask-bits <max-mask>] [verbose]", .function = thrash,
+ .function = thrash,
+};
+/* *INDENT-ON* */
+
+clib_error_t *
+test_route_init (vlib_main_t * vm)
+{
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (test_route_init);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/ip/ip6.h b/src/vnet/ip/ip6.h
new file mode 100644
index 00000000000..586b7c1b7f2
--- /dev/null
+++ b/src/vnet/ip/ip6.h
@@ -0,0 +1,476 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * ip/ip6.h: ip6 main include file
+ *
+ * Copyright (c) 2008 Eliot Dresselhaus
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef included_ip_ip6_h
+#define included_ip_ip6_h
+
+#include <vlib/mc.h>
+#include <vlib/buffer.h>
+#include <vnet/ethernet/packet.h>
+#include <vnet/ip/ip6_packet.h>
+#include <vnet/ip/ip6_hop_by_hop_packet.h>
+#include <vnet/ip/lookup.h>
+#include <stdbool.h>
+#include <vppinfra/bihash_24_8.h>
+#include <vppinfra/bihash_template.h>
+
+/*
+ * Default size of the ip6 fib hash table
+ */
+#define IP6_FIB_DEFAULT_HASH_NUM_BUCKETS (64 * 1024)
+#define IP6_FIB_DEFAULT_HASH_MEMORY_SIZE (32<<20)
+
+typedef struct
+{
+ ip6_address_t addr;
+ u32 dst_address_length;
+ u32 vrf_index;
+} ip6_fib_key_t;
+
+typedef struct
+{
+ /* Table ID (hash key) for this FIB. */
+ u32 table_id;
+
+ /* Index into FIB vector. */
+ u32 index;
+
+ /* flow hash configuration */
+ flow_hash_config_t flow_hash_config;
+} ip6_fib_t;
+
+struct ip6_main_t;
+
+typedef void (ip6_add_del_interface_address_function_t)
+ (struct ip6_main_t * im,
+ uword opaque,
+ u32 sw_if_index,
+ ip6_address_t * address,
+ u32 address_length, u32 if_address_index, u32 is_del);
+
+typedef struct
+{
+ ip6_add_del_interface_address_function_t *function;
+ uword function_opaque;
+} ip6_add_del_interface_address_callback_t;
+
+/**
+ * Enumeration of the FIB table instance types
+ */
+typedef enum ip6_fib_table_instance_type_t_
+{
+ /**
+ * This table stores the routes that are used to forward traffic.
+ * The key is the prefix, the result the adjacnecy to forward on.
+ */
+ IP6_FIB_TABLE_FWDING,
+ /**
+ * The table that stores ALL routes learned by the DP.
+ * Some of these routes may not be ready to install in forwarding
+ * at a given time.
+ * The key in this table is the prefix, the result is the fib_entry_t
+ */
+ IP6_FIB_TABLE_NON_FWDING,
+} ip6_fib_table_instance_type_t;
+
+#define IP6_FIB_NUM_TABLES (IP6_FIB_TABLE_NON_FWDING+1)
+
+/**
+ * A represenation of a single IP6 table
+ */
+typedef struct ip6_fib_table_instance_t_
+{
+ /* The hash table */
+ BVT (clib_bihash) ip6_hash;
+
+ /* bitmap / refcounts / vector of mask widths to search */
+ uword *non_empty_dst_address_length_bitmap;
+ u8 *prefix_lengths_in_search_order;
+ i32 dst_address_length_refcounts[129];
+} ip6_fib_table_instance_t;
+
+typedef struct ip6_main_t
+{
+ /**
+ * The two FIB tables; fwding and non-fwding
+ */
+ ip6_fib_table_instance_t ip6_table[IP6_FIB_NUM_TABLES];
+
+ ip_lookup_main_t lookup_main;
+
+ /* Pool of FIBs. */
+ struct fib_table_t_ *fibs;
+
+ /* Network byte orders subnet mask for each prefix length */
+ ip6_address_t fib_masks[129];
+
+ /* Table index indexed by software interface. */
+ u32 *fib_index_by_sw_if_index;
+
+ /* IP6 enabled count by software interface */
+ u8 *ip_enabled_by_sw_if_index;
+
+ /* Hash table mapping table id to fib index.
+ ID space is not necessarily dense; index space is dense. */
+ uword *fib_index_by_table_id;
+
+ /* Hash table mapping interface rewrite adjacency index by sw if index. */
+ uword *interface_route_adj_index_by_sw_if_index;
+
+ /* Functions to call when interface address changes. */
+ ip6_add_del_interface_address_callback_t
+ * add_del_interface_address_callbacks;
+
+ /* Template used to generate IP6 neighbor solicitation packets. */
+ vlib_packet_template_t discover_neighbor_packet_template;
+
+ /* ip6 lookup table config parameters */
+ u32 lookup_table_nbuckets;
+ uword lookup_table_size;
+
+ /* Seed for Jenkins hash used to compute ip6 flow hash. */
+ u32 flow_hash_seed;
+
+ struct
+ {
+ /* TTL to use for host generated packets. */
+ u8 ttl;
+
+ u8 pad[3];
+ } host_config;
+
+ /* HBH processing enabled? */
+ u8 hbh_enabled;
+} ip6_main_t;
+
+/* Global ip6 main structure. */
+extern ip6_main_t ip6_main;
+
+/* Global ip6 input node. Errors get attached to ip6 input node. */
+extern vlib_node_registration_t ip6_input_node;
+extern vlib_node_registration_t ip6_rewrite_node;
+extern vlib_node_registration_t ip6_rewrite_local_node;
+extern vlib_node_registration_t ip6_discover_neighbor_node;
+extern vlib_node_registration_t ip6_glean_node;
+extern vlib_node_registration_t ip6_midchain_node;
+
+extern vlib_node_registration_t ip6_icmp_neighbor_discovery_event_node;
+
+/* ipv6 neighbor discovery - timer/event types */
+typedef enum
+{
+ ICMP6_ND_EVENT_INIT,
+} ip6_icmp_neighbor_discovery_event_type_t;
+
+typedef union
+{
+ u32 add_del_swindex;
+ struct
+ {
+ u32 up_down_swindex;
+ u32 fib_index;
+ } up_down_event;
+} ip6_icmp_neighbor_discovery_event_data_t;
+
+always_inline uword
+ip6_destination_matches_route (const ip6_main_t * im,
+ const ip6_address_t * key,
+ const ip6_address_t * dest, uword dest_length)
+{
+ int i;
+ for (i = 0; i < ARRAY_LEN (key->as_uword); i++)
+ {
+ if ((key->as_uword[i] ^ dest->as_uword[i]) & im->
+ fib_masks[dest_length].as_uword[i])
+ return 0;
+ }
+ return 1;
+}
+
+always_inline uword
+ip6_destination_matches_interface (ip6_main_t * im,
+ ip6_address_t * key,
+ ip_interface_address_t * ia)
+{
+ ip6_address_t *a = ip_interface_address_get_address (&im->lookup_main, ia);
+ return ip6_destination_matches_route (im, key, a, ia->address_length);
+}
+
+/* As above but allows for unaligned destinations (e.g. works right from IP header of packet). */
+always_inline uword
+ip6_unaligned_destination_matches_route (ip6_main_t * im,
+ ip6_address_t * key,
+ ip6_address_t * dest,
+ uword dest_length)
+{
+ int i;
+ for (i = 0; i < ARRAY_LEN (key->as_uword); i++)
+ {
+ if ((clib_mem_unaligned (&key->as_uword[i], uword) ^ dest->as_uword[i])
+ & im->fib_masks[dest_length].as_uword[i])
+ return 0;
+ }
+ return 1;
+}
+
+always_inline int
+ip6_src_address_for_packet (ip_lookup_main_t * lm,
+ u32 sw_if_index, ip6_address_t * src)
+{
+ u32 if_add_index = lm->if_address_pool_index_by_sw_if_index[sw_if_index];
+ if (PREDICT_TRUE (if_add_index != ~0))
+ {
+ ip_interface_address_t *if_add =
+ pool_elt_at_index (lm->if_address_pool, if_add_index);
+ ip6_address_t *if_ip = ip_interface_address_get_address (lm, if_add);
+ *src = *if_ip;
+ return (0);
+ }
+ else
+ {
+ src->as_u64[0] = 0;
+ src->as_u64[1] = 0;
+ }
+ return (!0);
+}
+
+/* Find interface address which matches destination. */
+always_inline ip6_address_t *
+ip6_interface_address_matching_destination (ip6_main_t * im,
+ ip6_address_t * dst,
+ u32 sw_if_index,
+ ip_interface_address_t **
+ result_ia)
+{
+ ip_lookup_main_t *lm = &im->lookup_main;
+ ip_interface_address_t *ia;
+ ip6_address_t *result = 0;
+
+ /* *INDENT-OFF* */
+ foreach_ip_interface_address (lm, ia, sw_if_index,
+ 1 /* honor unnumbered */,
+ ({
+ ip6_address_t * a = ip_interface_address_get_address (lm, ia);
+ if (ip6_destination_matches_route (im, dst, a, ia->address_length))
+ {
+ result = a;
+ break;
+ }
+ }));
+ /* *INDENT-ON* */
+ if (result_ia)
+ *result_ia = result ? ia : 0;
+ return result;
+}
+
+clib_error_t *ip6_add_del_interface_address (vlib_main_t * vm,
+ u32 sw_if_index,
+ ip6_address_t * address,
+ u32 address_length, u32 is_del);
+void ip6_sw_interface_enable_disable (u32 sw_if_index, u32 is_enable);
+
+int ip6_address_compare (ip6_address_t * a1, ip6_address_t * a2);
+
+clib_error_t *ip6_probe_neighbor (vlib_main_t * vm, ip6_address_t * dst,
+ u32 sw_if_index);
+
+clib_error_t *ip6_set_neighbor_limit (u32 neighbor_limit);
+
+uword
+ip6_udp_register_listener (vlib_main_t * vm,
+ u16 dst_port, u32 next_node_index);
+
+u16 ip6_tcp_udp_icmp_compute_checksum (vlib_main_t * vm, vlib_buffer_t * p0,
+ ip6_header_t * ip0,
+ int *bogus_lengthp);
+
+void ip6_register_protocol (u32 protocol, u32 node_index);
+
+serialize_function_t serialize_vnet_ip6_main, unserialize_vnet_ip6_main;
+
+void ip6_ethernet_update_adjacency (vnet_main_t * vnm,
+ u32 sw_if_index, u32 ai);
+
+int
+vnet_set_ip6_ethernet_neighbor (vlib_main_t * vm,
+ u32 sw_if_index,
+ ip6_address_t * a,
+ u8 * link_layer_address,
+ uword n_bytes_link_layer_address,
+ int is_static);
+int
+vnet_unset_ip6_ethernet_neighbor (vlib_main_t * vm,
+ u32 sw_if_index,
+ ip6_address_t * a,
+ u8 * link_layer_address,
+ uword n_bytes_link_layer_address);
+
+void
+ip6_link_local_address_from_ethernet_mac_address (ip6_address_t * ip,
+ u8 * mac);
+
+void
+ip6_ethernet_mac_address_from_link_local_address (u8 * mac,
+ ip6_address_t * ip);
+
+int vnet_set_ip6_flow_hash (u32 table_id,
+ flow_hash_config_t flow_hash_config);
+
+int
+ip6_neighbor_ra_config (vlib_main_t * vm, u32 sw_if_index,
+ u8 suppress, u8 managed, u8 other,
+ u8 ll_option, u8 send_unicast, u8 cease,
+ u8 use_lifetime, u32 lifetime,
+ u32 initial_count, u32 initial_interval,
+ u32 max_interval, u32 min_interval, u8 is_no);
+
+int
+ip6_neighbor_ra_prefix (vlib_main_t * vm, u32 sw_if_index,
+ ip6_address_t * prefix_addr, u8 prefix_len,
+ u8 use_default, u32 val_lifetime, u32 pref_lifetime,
+ u8 no_advertise, u8 off_link, u8 no_autoconfig,
+ u8 no_onlink, u8 is_no);
+
+
+clib_error_t *enable_ip6_interface (vlib_main_t * vm, u32 sw_if_index);
+
+clib_error_t *disable_ip6_interface (vlib_main_t * vm, u32 sw_if_index);
+
+int ip6_interface_enabled (vlib_main_t * vm, u32 sw_if_index);
+
+clib_error_t *set_ip6_link_local_address (vlib_main_t * vm,
+ u32 sw_if_index,
+ ip6_address_t * address,
+ u8 address_length);
+
+void vnet_register_ip6_neighbor_resolution_event (vnet_main_t * vnm,
+ void *address_arg,
+ uword node_index,
+ uword type_opaque,
+ uword data);
+
+int vnet_add_del_ip6_nd_change_event (vnet_main_t * vnm,
+ void *data_callback,
+ u32 pid,
+ void *address_arg,
+ uword node_index,
+ uword type_opaque,
+ uword data, int is_add);
+
+int vnet_ip6_nd_term (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_buffer_t * p0,
+ ethernet_header_t * eth,
+ ip6_header_t * ip,
+ u32 sw_if_index, u16 bd_index, u8 shg);
+
+int vnet_set_ip6_classify_intfc (vlib_main_t * vm, u32 sw_if_index,
+ u32 table_index);
+extern vlib_node_registration_t ip6_lookup_node;
+
+/* Compute flow hash. We'll use it to select which Sponge to use for this
+ flow. And other things. */
+always_inline u32
+ip6_compute_flow_hash (const ip6_header_t * ip,
+ flow_hash_config_t flow_hash_config)
+{
+ tcp_header_t *tcp = (void *) (ip + 1);
+ u64 a, b, c;
+ u64 t1, t2;
+ uword is_tcp_udp = (ip->protocol == IP_PROTOCOL_TCP
+ || ip->protocol == IP_PROTOCOL_UDP);
+
+ t1 = (ip->src_address.as_u64[0] ^ ip->src_address.as_u64[1]);
+ t1 = (flow_hash_config & IP_FLOW_HASH_SRC_ADDR) ? t1 : 0;
+
+ t2 = (ip->dst_address.as_u64[0] ^ ip->dst_address.as_u64[1]);
+ t2 = (flow_hash_config & IP_FLOW_HASH_DST_ADDR) ? t2 : 0;
+
+ a = (flow_hash_config & IP_FLOW_HASH_REVERSE_SRC_DST) ? t2 : t1;
+ b = (flow_hash_config & IP_FLOW_HASH_REVERSE_SRC_DST) ? t1 : t2;
+ b ^= (flow_hash_config & IP_FLOW_HASH_PROTO) ? ip->protocol : 0;
+
+ t1 = is_tcp_udp ? tcp->ports.src : 0;
+ t2 = is_tcp_udp ? tcp->ports.dst : 0;
+
+ t1 = (flow_hash_config & IP_FLOW_HASH_SRC_PORT) ? t1 : 0;
+ t2 = (flow_hash_config & IP_FLOW_HASH_DST_PORT) ? t2 : 0;
+
+ c = (flow_hash_config & IP_FLOW_HASH_REVERSE_SRC_DST) ?
+ ((t1 << 16) | t2) : ((t2 << 16) | t1);
+
+ hash_mix64 (a, b, c);
+ return (u32) c;
+}
+
+/*
+ * Hop-by-Hop handling
+ */
+typedef struct
+{
+ /* Array of function pointers to HBH option handling routines */
+ int (*options[256]) (vlib_buffer_t * b, ip6_header_t * ip,
+ ip6_hop_by_hop_option_t * opt);
+ u8 *(*trace[256]) (u8 * s, ip6_hop_by_hop_option_t * opt);
+ uword next_override;
+} ip6_hop_by_hop_main_t;
+
+extern ip6_hop_by_hop_main_t ip6_hop_by_hop_main;
+
+int ip6_hbh_register_option (u8 option,
+ int options (vlib_buffer_t * b,
+ ip6_header_t * ip,
+ ip6_hop_by_hop_option_t * opt),
+ u8 * trace (u8 * s,
+ ip6_hop_by_hop_option_t * opt));
+int ip6_hbh_unregister_option (u8 option);
+void ip6_hbh_set_next_override (uword next);
+
+/* Flag used by IOAM code. Classifier sets it pop-hop-by-hop checks it */
+#define OI_DECAP 0x80000000
+
+#endif /* included_ip_ip6_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/ip/ip6_error.h b/src/vnet/ip/ip6_error.h
new file mode 100644
index 00000000000..a2807169123
--- /dev/null
+++ b/src/vnet/ip/ip6_error.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * ip/ip6_error.h: ip6 fast path errors
+ *
+ * Copyright (c) 2008 Eliot Dresselhaus
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef included_ip_ip6_error_h
+#define included_ip_ip6_error_h
+
+#define foreach_ip6_error \
+ /* Must be first. */ \
+ _ (NONE, "valid ip6 packets") \
+ \
+ /* Errors signalled by ip6-input */ \
+ _ (TOO_SHORT, "ip6 length < 40 bytes") \
+ _ (BAD_LENGTH, "ip6 length > l2 length") \
+ _ (VERSION, "ip6 version != 6") \
+ _ (TIME_EXPIRED, "ip6 ttl <= 1") \
+ \
+ /* Errors signalled by ip6-rewrite. */ \
+ _ (MTU_EXCEEDED, "ip6 MTU exceeded") \
+ _ (DST_LOOKUP_MISS, "ip6 destination lookup miss") \
+ _ (SRC_LOOKUP_MISS, "ip6 source lookup miss") \
+ _ (ADJACENCY_DROP, "ip6 adjacency drop") \
+ _ (ADJACENCY_PUNT, "ip6 adjacency punt") \
+ \
+ /* Errors signalled by ip6-local. */ \
+ _ (UNKNOWN_PROTOCOL, "unknown ip protocol") \
+ _ (UDP_CHECKSUM, "bad udp checksum") \
+ _ (ICMP_CHECKSUM, "bad icmp checksum") \
+ _ (UDP_LENGTH, "inconsistent udp/ip lengths") \
+ \
+ /* Errors signalled by udp6-lookup. */ \
+ _ (UNKNOWN_UDP_PORT, "no listener for udp port") \
+ \
+ /* Spoofed packets in ip6-rewrite-local */ \
+ _(SPOOFED_LOCAL_PACKETS, "ip4 spoofed local-address packet drops") \
+ \
+ /* Erros singalled by ip6-inacl */ \
+ _ (INACL_TABLE_MISS, "input ACL table-miss drops") \
+ _ (INACL_SESSION_DENY, "input ACL session deny drops")
+
+typedef enum
+{
+#define _(sym,str) IP6_ERROR_##sym,
+ foreach_ip6_error
+#undef _
+ IP6_N_ERROR,
+} ip6_error_t;
+
+#endif /* included_ip_ip6_error_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/ip/ip6_format.c b/src/vnet/ip/ip6_format.c
new file mode 100644
index 00000000000..56899b73d8b
--- /dev/null
+++ b/src/vnet/ip/ip6_format.c
@@ -0,0 +1,383 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * ip/ip6_format.c: ip6 formatting
+ *
+ * Copyright (c) 2008 Eliot Dresselhaus
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <vnet/ip/ip.h>
+
+/* Format an IP6 address. */
+u8 *
+format_ip6_address (u8 * s, va_list * args)
+{
+ ip6_address_t *a = va_arg (*args, ip6_address_t *);
+ u32 max_zero_run = 0, this_zero_run = 0;
+ int max_zero_run_index = -1, this_zero_run_index = 0;
+ int in_zero_run = 0, i;
+ int last_double_colon = 0;
+
+ /* Ugh, this is a pain. Scan forward looking for runs of 0's */
+ for (i = 0; i < ARRAY_LEN (a->as_u16); i++)
+ {
+ if (a->as_u16[i] == 0)
+ {
+ if (in_zero_run)
+ this_zero_run++;
+ else
+ {
+ in_zero_run = 1;
+ this_zero_run = 1;
+ this_zero_run_index = i;
+ }
+ }
+ else
+ {
+ if (in_zero_run)
+ {
+ /* offer to compress the biggest run of > 1 zero */
+ if (this_zero_run > max_zero_run && this_zero_run > 1)
+ {
+ max_zero_run_index = this_zero_run_index;
+ max_zero_run = this_zero_run;
+ }
+ }
+ in_zero_run = 0;
+ this_zero_run = 0;
+ }
+ }
+
+ if (in_zero_run)
+ {
+ if (this_zero_run > max_zero_run && this_zero_run > 1)
+ {
+ max_zero_run_index = this_zero_run_index;
+ max_zero_run = this_zero_run;
+ }
+ }
+
+ for (i = 0; i < ARRAY_LEN (a->as_u16); i++)
+ {
+ if (i == max_zero_run_index)
+ {
+ s = format (s, "::");
+ i += max_zero_run - 1;
+ last_double_colon = 1;
+ }
+ else
+ {
+ s = format (s, "%s%x",
+ (last_double_colon || i == 0) ? "" : ":",
+ clib_net_to_host_u16 (a->as_u16[i]));
+ last_double_colon = 0;
+ }
+ }
+
+ return s;
+}
+
+/* Format an IP6 route destination and length. */
+u8 *
+format_ip6_address_and_length (u8 * s, va_list * args)
+{
+ ip6_address_t *a = va_arg (*args, ip6_address_t *);
+ u8 l = va_arg (*args, u32);
+ return format (s, "%U/%d", format_ip6_address, a, l);
+}
+
+/* Parse an IP6 address. */
+uword
+unformat_ip6_address (unformat_input_t * input, va_list * args)
+{
+ ip6_address_t *result = va_arg (*args, ip6_address_t *);
+ u16 hex_quads[8];
+ uword hex_quad, n_hex_quads, hex_digit, n_hex_digits;
+ uword c, n_colon, double_colon_index;
+
+ n_hex_quads = hex_quad = n_hex_digits = n_colon = 0;
+ double_colon_index = ARRAY_LEN (hex_quads);
+ while ((c = unformat_get_input (input)) != UNFORMAT_END_OF_INPUT)
+ {
+ hex_digit = 16;
+ if (c >= '0' && c <= '9')
+ hex_digit = c - '0';
+ else if (c >= 'a' && c <= 'f')
+ hex_digit = c + 10 - 'a';
+ else if (c >= 'A' && c <= 'F')
+ hex_digit = c + 10 - 'A';
+ else if (c == ':' && n_colon < 2)
+ n_colon++;
+ else
+ {
+ unformat_put_input (input);
+ break;
+ }
+
+ /* Too many hex quads. */
+ if (n_hex_quads >= ARRAY_LEN (hex_quads))
+ return 0;
+
+ if (hex_digit < 16)
+ {
+ hex_quad = (hex_quad << 4) | hex_digit;
+
+ /* Hex quad must fit in 16 bits. */
+ if (n_hex_digits >= 4)
+ return 0;
+
+ n_colon = 0;
+ n_hex_digits++;
+ }
+
+ /* Save position of :: */
+ if (n_colon == 2)
+ {
+ /* More than one :: ? */
+ if (double_colon_index < ARRAY_LEN (hex_quads))
+ return 0;
+ double_colon_index = n_hex_quads;
+ }
+
+ if (n_colon > 0 && n_hex_digits > 0)
+ {
+ hex_quads[n_hex_quads++] = hex_quad;
+ hex_quad = 0;
+ n_hex_digits = 0;
+ }
+ }
+
+ if (n_hex_digits > 0)
+ hex_quads[n_hex_quads++] = hex_quad;
+
+ {
+ word i;
+
+ /* Expand :: to appropriate number of zero hex quads. */
+ if (double_colon_index < ARRAY_LEN (hex_quads))
+ {
+ word n_zero = ARRAY_LEN (hex_quads) - n_hex_quads;
+
+ for (i = n_hex_quads - 1; i >= (signed) double_colon_index; i--)
+ hex_quads[n_zero + i] = hex_quads[i];
+
+ for (i = 0; i < n_zero; i++)
+ {
+ ASSERT ((double_colon_index + i) < ARRAY_LEN (hex_quads));
+ hex_quads[double_colon_index + i] = 0;
+ }
+
+ n_hex_quads = ARRAY_LEN (hex_quads);
+ }
+
+ /* Too few hex quads given. */
+ if (n_hex_quads < ARRAY_LEN (hex_quads))
+ return 0;
+
+ for (i = 0; i < ARRAY_LEN (hex_quads); i++)
+ result->as_u16[i] = clib_host_to_net_u16 (hex_quads[i]);
+
+ return 1;
+ }
+}
+
+/* Format an IP6 header. */
+u8 *
+format_ip6_header (u8 * s, va_list * args)
+{
+ ip6_header_t *ip = va_arg (*args, ip6_header_t *);
+ u32 max_header_bytes = va_arg (*args, u32);
+ u32 i, ip_version, traffic_class, flow_label;
+ uword indent;
+
+ /* Nothing to do. */
+ if (max_header_bytes < sizeof (ip[0]))
+ return format (s, "IP header truncated");
+
+ indent = format_get_indent (s);
+ indent += 2;
+
+ s = format (s, "%U: %U -> %U",
+ format_ip_protocol, ip->protocol,
+ format_ip6_address, &ip->src_address,
+ format_ip6_address, &ip->dst_address);
+
+ i = clib_net_to_host_u32 (ip->ip_version_traffic_class_and_flow_label);
+ ip_version = (i >> 28);
+ traffic_class = (i >> 20) & 0xff;
+ flow_label = i & pow2_mask (20);
+
+ if (ip_version != 6)
+ s = format (s, "\n%Uversion %d", format_white_space, indent, ip_version);
+
+ s =
+ format (s,
+ "\n%Utos 0x%02x, flow label 0x%x, hop limit %d, payload length %d",
+ format_white_space, indent, traffic_class, flow_label,
+ ip->hop_limit, clib_net_to_host_u16 (ip->payload_length));
+
+ /* Recurse into next protocol layer. */
+ if (max_header_bytes != 0 && sizeof (ip[0]) < max_header_bytes)
+ {
+ ip_main_t *im = &ip_main;
+ ip_protocol_info_t *pi = ip_get_protocol_info (im, ip->protocol);
+
+ if (pi && pi->format_header)
+ s = format (s, "\n%U%U",
+ format_white_space, indent - 2, pi->format_header,
+ /* next protocol header */ (void *) (ip + 1),
+ max_header_bytes - sizeof (ip[0]));
+ }
+
+ return s;
+}
+
+/* Parse an IP6 header. */
+uword
+unformat_ip6_header (unformat_input_t * input, va_list * args)
+{
+ u8 **result = va_arg (*args, u8 **);
+ ip6_header_t *ip;
+ int old_length;
+
+ /* Allocate space for IP header. */
+ {
+ void *p;
+
+ old_length = vec_len (*result);
+ vec_add2 (*result, p, sizeof (ip[0]));
+ ip = p;
+ }
+
+ memset (ip, 0, sizeof (ip[0]));
+ ip->ip_version_traffic_class_and_flow_label =
+ clib_host_to_net_u32 (6 << 28);
+
+ if (!unformat (input, "%U: %U -> %U",
+ unformat_ip_protocol, &ip->protocol,
+ unformat_ip6_address, &ip->src_address,
+ unformat_ip6_address, &ip->dst_address))
+ return 0;
+
+ /* Parse options. */
+ while (1)
+ {
+ int i;
+
+ if (unformat (input, "tos %U", unformat_vlib_number, &i))
+ ip->ip_version_traffic_class_and_flow_label |=
+ clib_host_to_net_u32 ((i & 0xff) << 20);
+
+ else if (unformat (input, "hop-limit %U", unformat_vlib_number, &i))
+ ip->hop_limit = i;
+
+ /* Can't parse input: try next protocol level. */
+ else
+ break;
+ }
+
+ /* Recurse into next protocol layer. */
+ {
+ ip_main_t *im = &ip_main;
+ ip_protocol_info_t *pi = ip_get_protocol_info (im, ip->protocol);
+
+ if (pi && pi->unformat_header)
+ {
+ if (!unformat_user (input, pi->unformat_header, result))
+ return 0;
+
+ /* Result may have moved. */
+ ip = (void *) *result + old_length;
+ }
+ }
+
+ ip->payload_length =
+ clib_host_to_net_u16 (vec_len (*result) - (old_length + sizeof (ip[0])));
+
+ return 1;
+}
+
+/* Parse an IP46 address. */
+uword
+unformat_ip46_address (unformat_input_t * input, va_list * args)
+{
+ ip46_address_t *ip46 = va_arg (*args, ip46_address_t *);
+ ip46_type_t type = va_arg (*args, ip46_type_t);
+ if ((type != IP46_TYPE_IP6) &&
+ unformat (input, "%U", unformat_ip4_address, &ip46->ip4))
+ {
+ ip46_address_mask_ip4 (ip46);
+ return 1;
+ }
+ else if ((type != IP46_TYPE_IP4) &&
+ unformat (input, "%U", unformat_ip6_address, &ip46->ip6))
+ {
+ return 1;
+ }
+ return 0;
+}
+
+/* Format an IP46 address. */
+u8 *
+format_ip46_address (u8 * s, va_list * args)
+{
+ ip46_address_t *ip46 = va_arg (*args, ip46_address_t *);
+ ip46_type_t type = va_arg (*args, ip46_type_t);
+ int is_ip4 = 1;
+
+ switch (type)
+ {
+ case IP46_TYPE_ANY:
+ is_ip4 = ip46_address_is_ip4 (ip46);
+ break;
+ case IP46_TYPE_IP4:
+ is_ip4 = 1;
+ break;
+ case IP46_TYPE_IP6:
+ is_ip4 = 0;
+ break;
+ }
+
+ return is_ip4 ?
+ format (s, "%U", format_ip4_address, &ip46->ip4) :
+ format (s, "%U", format_ip6_address, &ip46->ip6);
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/ip/ip6_forward.c b/src/vnet/ip/ip6_forward.c
new file mode 100644
index 00000000000..b5c795523ca
--- /dev/null
+++ b/src/vnet/ip/ip6_forward.c
@@ -0,0 +1,3402 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * ip/ip6_forward.c: IP v6 forwarding
+ *
+ * Copyright (c) 2008 Eliot Dresselhaus
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <vnet/vnet.h>
+#include <vnet/ip/ip.h>
+#include <vnet/ethernet/ethernet.h> /* for ethernet_header_t */
+#include <vnet/srp/srp.h> /* for srp_hw_interface_class */
+#include <vppinfra/cache.h>
+#include <vnet/fib/fib_table.h>
+#include <vnet/fib/ip6_fib.h>
+#include <vnet/dpo/load_balance.h>
+#include <vnet/dpo/classify_dpo.h>
+
+#include <vppinfra/bihash_template.c>
+
+/**
+ * @file
+ * @brief IPv6 Forwarding.
+ *
+ * This file contains the source code for IPv6 forwarding.
+ */
+
+void
+ip6_forward_next_trace (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame,
+ vlib_rx_or_tx_t which_adj_index);
+
+always_inline uword
+ip6_lookup_inline (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ ip6_main_t *im = &ip6_main;
+ vlib_combined_counter_main_t *cm = &load_balance_main.lbm_to_counters;
+ u32 n_left_from, n_left_to_next, *from, *to_next;
+ ip_lookup_next_t next;
+ u32 cpu_index = os_get_cpu_number ();
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ vlib_get_next_frame (vm, node, next, to_next, n_left_to_next);
+
+ while (n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ vlib_buffer_t *p0, *p1;
+ u32 pi0, pi1, lbi0, lbi1, wrong_next;
+ ip_lookup_next_t next0, next1;
+ ip6_header_t *ip0, *ip1;
+ ip6_address_t *dst_addr0, *dst_addr1;
+ u32 fib_index0, fib_index1;
+ u32 flow_hash_config0, flow_hash_config1;
+ const dpo_id_t *dpo0, *dpo1;
+ const load_balance_t *lb0, *lb1;
+
+ /* Prefetch next iteration. */
+ {
+ vlib_buffer_t *p2, *p3;
+
+ p2 = vlib_get_buffer (vm, from[2]);
+ p3 = vlib_get_buffer (vm, from[3]);
+
+ vlib_prefetch_buffer_header (p2, LOAD);
+ vlib_prefetch_buffer_header (p3, LOAD);
+ CLIB_PREFETCH (p2->data, sizeof (ip0[0]), LOAD);
+ CLIB_PREFETCH (p3->data, sizeof (ip0[0]), LOAD);
+ }
+
+ pi0 = to_next[0] = from[0];
+ pi1 = to_next[1] = from[1];
+
+ p0 = vlib_get_buffer (vm, pi0);
+ p1 = vlib_get_buffer (vm, pi1);
+
+ ip0 = vlib_buffer_get_current (p0);
+ ip1 = vlib_buffer_get_current (p1);
+
+ dst_addr0 = &ip0->dst_address;
+ dst_addr1 = &ip1->dst_address;
+
+ fib_index0 =
+ vec_elt (im->fib_index_by_sw_if_index,
+ vnet_buffer (p0)->sw_if_index[VLIB_RX]);
+ fib_index1 =
+ vec_elt (im->fib_index_by_sw_if_index,
+ vnet_buffer (p1)->sw_if_index[VLIB_RX]);
+
+ fib_index0 = (vnet_buffer (p0)->sw_if_index[VLIB_TX] == (u32) ~ 0) ?
+ fib_index0 : vnet_buffer (p0)->sw_if_index[VLIB_TX];
+ fib_index1 = (vnet_buffer (p1)->sw_if_index[VLIB_TX] == (u32) ~ 0) ?
+ fib_index1 : vnet_buffer (p1)->sw_if_index[VLIB_TX];
+
+ lbi0 = ip6_fib_table_fwding_lookup (im, fib_index0, dst_addr0);
+ lbi1 = ip6_fib_table_fwding_lookup (im, fib_index1, dst_addr1);
+
+ lb0 = load_balance_get (lbi0);
+ lb1 = load_balance_get (lbi1);
+
+ vnet_buffer (p0)->ip.flow_hash = vnet_buffer (p1)->ip.flow_hash = 0;
+
+ if (PREDICT_FALSE (lb0->lb_n_buckets > 1))
+ {
+ flow_hash_config0 = lb0->lb_hash_config;
+ vnet_buffer (p0)->ip.flow_hash =
+ ip6_compute_flow_hash (ip0, flow_hash_config0);
+ }
+ if (PREDICT_FALSE (lb1->lb_n_buckets > 1))
+ {
+ flow_hash_config1 = lb1->lb_hash_config;
+ vnet_buffer (p1)->ip.flow_hash =
+ ip6_compute_flow_hash (ip1, flow_hash_config1);
+ }
+
+ ASSERT (lb0->lb_n_buckets > 0);
+ ASSERT (lb1->lb_n_buckets > 0);
+ ASSERT (is_pow2 (lb0->lb_n_buckets));
+ ASSERT (is_pow2 (lb1->lb_n_buckets));
+ dpo0 = load_balance_get_bucket_i (lb0,
+ (vnet_buffer (p0)->ip.flow_hash &
+ lb0->lb_n_buckets_minus_1));
+ dpo1 = load_balance_get_bucket_i (lb1,
+ (vnet_buffer (p1)->ip.flow_hash &
+ lb1->lb_n_buckets_minus_1));
+
+ next0 = dpo0->dpoi_next_node;
+ next1 = dpo1->dpoi_next_node;
+
+ /* Only process the HBH Option Header if explicitly configured to do so */
+ if (PREDICT_FALSE
+ (ip0->protocol == IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS))
+ {
+ next0 = (dpo_is_adj (dpo0) && im->hbh_enabled) ?
+ (ip_lookup_next_t) IP6_LOOKUP_NEXT_HOP_BY_HOP : next0;
+ }
+ if (PREDICT_FALSE
+ (ip1->protocol == IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS))
+ {
+ next1 = (dpo_is_adj (dpo1) && im->hbh_enabled) ?
+ (ip_lookup_next_t) IP6_LOOKUP_NEXT_HOP_BY_HOP : next1;
+ }
+ vnet_buffer (p0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
+ vnet_buffer (p1)->ip.adj_index[VLIB_TX] = dpo1->dpoi_index;
+
+ vlib_increment_combined_counter
+ (cm, cpu_index, lbi0, 1, vlib_buffer_length_in_chain (vm, p0));
+ vlib_increment_combined_counter
+ (cm, cpu_index, lbi1, 1, vlib_buffer_length_in_chain (vm, p1));
+
+ from += 2;
+ to_next += 2;
+ n_left_to_next -= 2;
+ n_left_from -= 2;
+
+ wrong_next = (next0 != next) + 2 * (next1 != next);
+ if (PREDICT_FALSE (wrong_next != 0))
+ {
+ switch (wrong_next)
+ {
+ case 1:
+ /* A B A */
+ to_next[-2] = pi1;
+ to_next -= 1;
+ n_left_to_next += 1;
+ vlib_set_next_frame_buffer (vm, node, next0, pi0);
+ break;
+
+ case 2:
+ /* A A B */
+ to_next -= 1;
+ n_left_to_next += 1;
+ vlib_set_next_frame_buffer (vm, node, next1, pi1);
+ break;
+
+ case 3:
+ /* A B C */
+ to_next -= 2;
+ n_left_to_next += 2;
+ vlib_set_next_frame_buffer (vm, node, next0, pi0);
+ vlib_set_next_frame_buffer (vm, node, next1, pi1);
+ if (next0 == next1)
+ {
+ /* A B B */
+ vlib_put_next_frame (vm, node, next, n_left_to_next);
+ next = next1;
+ vlib_get_next_frame (vm, node, next, to_next,
+ n_left_to_next);
+ }
+ }
+ }
+ }
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ vlib_buffer_t *p0;
+ ip6_header_t *ip0;
+ u32 pi0, lbi0;
+ ip_lookup_next_t next0;
+ load_balance_t *lb0;
+ ip6_address_t *dst_addr0;
+ u32 fib_index0, flow_hash_config0;
+ const dpo_id_t *dpo0;
+
+ pi0 = from[0];
+ to_next[0] = pi0;
+
+ p0 = vlib_get_buffer (vm, pi0);
+
+ ip0 = vlib_buffer_get_current (p0);
+
+ dst_addr0 = &ip0->dst_address;
+
+ fib_index0 =
+ vec_elt (im->fib_index_by_sw_if_index,
+ vnet_buffer (p0)->sw_if_index[VLIB_RX]);
+ fib_index0 =
+ (vnet_buffer (p0)->sw_if_index[VLIB_TX] ==
+ (u32) ~ 0) ? fib_index0 : vnet_buffer (p0)->sw_if_index[VLIB_TX];
+
+ flow_hash_config0 = ip6_fib_get (fib_index0)->flow_hash_config;
+
+ lbi0 = ip6_fib_table_fwding_lookup (im, fib_index0, dst_addr0);
+
+ lb0 = load_balance_get (lbi0);
+
+ vnet_buffer (p0)->ip.flow_hash = 0;
+
+ if (PREDICT_FALSE (lb0->lb_n_buckets > 1))
+ {
+ flow_hash_config0 = lb0->lb_hash_config;
+ vnet_buffer (p0)->ip.flow_hash =
+ ip6_compute_flow_hash (ip0, flow_hash_config0);
+ }
+
+ ASSERT (lb0->lb_n_buckets > 0);
+ ASSERT (is_pow2 (lb0->lb_n_buckets));
+ dpo0 = load_balance_get_bucket_i (lb0,
+ (vnet_buffer (p0)->ip.flow_hash &
+ lb0->lb_n_buckets_minus_1));
+ next0 = dpo0->dpoi_next_node;
+
+ /* Only process the HBH Option Header if explicitly configured to do so */
+ if (PREDICT_FALSE
+ (ip0->protocol == IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS))
+ {
+ next0 = (dpo_is_adj (dpo0) && im->hbh_enabled) ?
+ (ip_lookup_next_t) IP6_LOOKUP_NEXT_HOP_BY_HOP : next0;
+ }
+ vnet_buffer (p0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
+
+ vlib_increment_combined_counter
+ (cm, cpu_index, lbi0, 1, vlib_buffer_length_in_chain (vm, p0));
+
+ from += 1;
+ to_next += 1;
+ n_left_to_next -= 1;
+ n_left_from -= 1;
+
+ if (PREDICT_FALSE (next0 != next))
+ {
+ n_left_to_next += 1;
+ vlib_put_next_frame (vm, node, next, n_left_to_next);
+ next = next0;
+ vlib_get_next_frame (vm, node, next, to_next, n_left_to_next);
+ to_next[0] = pi0;
+ to_next += 1;
+ n_left_to_next -= 1;
+ }
+ }
+
+ vlib_put_next_frame (vm, node, next, n_left_to_next);
+ }
+
+ if (node->flags & VLIB_NODE_FLAG_TRACE)
+ ip6_forward_next_trace (vm, node, frame, VLIB_TX);
+
+ return frame->n_vectors;
+}
+
+static void
+ip6_add_interface_routes (vnet_main_t * vnm, u32 sw_if_index,
+ ip6_main_t * im, u32 fib_index,
+ ip_interface_address_t * a)
+{
+ ip_lookup_main_t *lm = &im->lookup_main;
+ ip6_address_t *address = ip_interface_address_get_address (lm, a);
+ fib_prefix_t pfx = {
+ .fp_len = a->address_length,
+ .fp_proto = FIB_PROTOCOL_IP6,
+ .fp_addr.ip6 = *address,
+ };
+
+ a->neighbor_probe_adj_index = ~0;
+ if (a->address_length < 128)
+ {
+ fib_node_index_t fei;
+
+ fei = fib_table_entry_update_one_path (fib_index, &pfx, FIB_SOURCE_INTERFACE, (FIB_ENTRY_FLAG_CONNECTED | FIB_ENTRY_FLAG_ATTACHED), FIB_PROTOCOL_IP6, NULL, /* No next-hop address */
+ sw_if_index, ~0, // invalid FIB index
+ 1, NULL, // no label stack
+ FIB_ROUTE_PATH_FLAG_NONE);
+ a->neighbor_probe_adj_index = fib_entry_get_adj (fei);
+ }
+
+ pfx.fp_len = 128;
+ if (sw_if_index < vec_len (lm->classify_table_index_by_sw_if_index))
+ {
+ u32 classify_table_index =
+ lm->classify_table_index_by_sw_if_index[sw_if_index];
+ if (classify_table_index != (u32) ~ 0)
+ {
+ dpo_id_t dpo = DPO_INVALID;
+
+ dpo_set (&dpo,
+ DPO_CLASSIFY,
+ DPO_PROTO_IP6,
+ classify_dpo_create (DPO_PROTO_IP6, classify_table_index));
+
+ fib_table_entry_special_dpo_add (fib_index,
+ &pfx,
+ FIB_SOURCE_CLASSIFY,
+ FIB_ENTRY_FLAG_NONE, &dpo);
+ dpo_reset (&dpo);
+ }
+ }
+
+ fib_table_entry_update_one_path (fib_index, &pfx, FIB_SOURCE_INTERFACE, (FIB_ENTRY_FLAG_CONNECTED | FIB_ENTRY_FLAG_LOCAL), FIB_PROTOCOL_IP6, &pfx.fp_addr, sw_if_index, ~0, // invalid FIB index
+ 1, NULL, FIB_ROUTE_PATH_FLAG_NONE);
+}
+
+static void
+ip6_del_interface_routes (ip6_main_t * im,
+ u32 fib_index,
+ ip6_address_t * address, u32 address_length)
+{
+ fib_prefix_t pfx = {
+ .fp_len = address_length,
+ .fp_proto = FIB_PROTOCOL_IP6,
+ .fp_addr.ip6 = *address,
+ };
+
+ if (pfx.fp_len < 128)
+ {
+ fib_table_entry_delete (fib_index, &pfx, FIB_SOURCE_INTERFACE);
+
+ }
+
+ pfx.fp_len = 128;
+ fib_table_entry_delete (fib_index, &pfx, FIB_SOURCE_INTERFACE);
+}
+
+void
+ip6_sw_interface_enable_disable (u32 sw_if_index, u32 is_enable)
+{
+ ip6_main_t *im = &ip6_main;
+
+ vec_validate_init_empty (im->ip_enabled_by_sw_if_index, sw_if_index, 0);
+
+ /*
+ * enable/disable only on the 1<->0 transition
+ */
+ if (is_enable)
+ {
+ if (1 != ++im->ip_enabled_by_sw_if_index[sw_if_index])
+ return;
+ }
+ else
+ {
+ ASSERT (im->ip_enabled_by_sw_if_index[sw_if_index] > 0);
+ if (0 != --im->ip_enabled_by_sw_if_index[sw_if_index])
+ return;
+ }
+
+ vnet_feature_enable_disable ("ip6-unicast", "ip6-lookup", sw_if_index,
+ is_enable, 0, 0);
+
+ vnet_feature_enable_disable ("ip6-multicast", "ip6-lookup", sw_if_index,
+ is_enable, 0, 0);
+
+}
+
+/* get first interface address */
+ip6_address_t *
+ip6_interface_first_address (ip6_main_t * im,
+ u32 sw_if_index,
+ ip_interface_address_t ** result_ia)
+{
+ ip_lookup_main_t *lm = &im->lookup_main;
+ ip_interface_address_t *ia = 0;
+ ip6_address_t *result = 0;
+
+ /* *INDENT-OFF* */
+ foreach_ip_interface_address (lm, ia, sw_if_index,
+ 1 /* honor unnumbered */,
+ ({
+ ip6_address_t * a = ip_interface_address_get_address (lm, ia);
+ result = a;
+ break;
+ }));
+ /* *INDENT-ON* */
+ if (result_ia)
+ *result_ia = result ? ia : 0;
+ return result;
+}
+
+clib_error_t *
+ip6_add_del_interface_address (vlib_main_t * vm,
+ u32 sw_if_index,
+ ip6_address_t * address,
+ u32 address_length, u32 is_del)
+{
+ vnet_main_t *vnm = vnet_get_main ();
+ ip6_main_t *im = &ip6_main;
+ ip_lookup_main_t *lm = &im->lookup_main;
+ clib_error_t *error;
+ u32 if_address_index;
+ ip6_address_fib_t ip6_af, *addr_fib = 0;
+
+ vec_validate (im->fib_index_by_sw_if_index, sw_if_index);
+ ip6_addr_fib_init (&ip6_af, address,
+ vec_elt (im->fib_index_by_sw_if_index, sw_if_index));
+ vec_add1 (addr_fib, ip6_af);
+
+ {
+ uword elts_before = pool_elts (lm->if_address_pool);
+
+ error = ip_interface_address_add_del
+ (lm, sw_if_index, addr_fib, address_length, is_del, &if_address_index);
+ if (error)
+ goto done;
+
+ /* Pool did not grow: add duplicate address. */
+ if (elts_before == pool_elts (lm->if_address_pool))
+ goto done;
+ }
+
+ ip6_sw_interface_enable_disable (sw_if_index, !is_del);
+
+ if (is_del)
+ ip6_del_interface_routes (im, ip6_af.fib_index, address, address_length);
+ else
+ ip6_add_interface_routes (vnm, sw_if_index,
+ im, ip6_af.fib_index,
+ pool_elt_at_index (lm->if_address_pool,
+ if_address_index));
+
+ {
+ ip6_add_del_interface_address_callback_t *cb;
+ vec_foreach (cb, im->add_del_interface_address_callbacks)
+ cb->function (im, cb->function_opaque, sw_if_index,
+ address, address_length, if_address_index, is_del);
+ }
+
+done:
+ vec_free (addr_fib);
+ return error;
+}
+
+clib_error_t *
+ip6_sw_interface_admin_up_down (vnet_main_t * vnm, u32 sw_if_index, u32 flags)
+{
+ ip6_main_t *im = &ip6_main;
+ ip_interface_address_t *ia;
+ ip6_address_t *a;
+ u32 is_admin_up, fib_index;
+
+ /* Fill in lookup tables with default table (0). */
+ vec_validate (im->fib_index_by_sw_if_index, sw_if_index);
+
+ vec_validate_init_empty (im->
+ lookup_main.if_address_pool_index_by_sw_if_index,
+ sw_if_index, ~0);
+
+ is_admin_up = (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) != 0;
+
+ fib_index = vec_elt (im->fib_index_by_sw_if_index, sw_if_index);
+
+ /* *INDENT-OFF* */
+ foreach_ip_interface_address (&im->lookup_main, ia, sw_if_index,
+ 0 /* honor unnumbered */,
+ ({
+ a = ip_interface_address_get_address (&im->lookup_main, ia);
+ if (is_admin_up)
+ ip6_add_interface_routes (vnm, sw_if_index,
+ im, fib_index,
+ ia);
+ else
+ ip6_del_interface_routes (im, fib_index,
+ a, ia->address_length);
+ }));
+ /* *INDENT-ON* */
+
+ return 0;
+}
+
+VNET_SW_INTERFACE_ADMIN_UP_DOWN_FUNCTION (ip6_sw_interface_admin_up_down);
+
+/* Built-in ip6 unicast rx feature path definition */
+/* *INDENT-OFF* */
+VNET_FEATURE_ARC_INIT (ip6_unicast, static) =
+{
+ .arc_name = "ip6-unicast",
+ .start_nodes = VNET_FEATURES ("ip6-input"),
+ .arc_index_ptr = &ip6_main.lookup_main.ucast_feature_arc_index,
+};
+
+VNET_FEATURE_INIT (ip6_flow_classify, static) =
+{
+ .arc_name = "ip6-unicast",
+ .node_name = "ip6-flow-classify",
+ .runs_before = VNET_FEATURES ("ip6-inacl"),
+};
+
+VNET_FEATURE_INIT (ip6_inacl, static) =
+{
+ .arc_name = "ip6-unicast",
+ .node_name = "ip6-inacl",
+ .runs_before = VNET_FEATURES ("ip6-policer-classify"),
+};
+
+VNET_FEATURE_INIT (ip6_policer_classify, static) =
+{
+ .arc_name = "ip6-unicast",
+ .node_name = "ip6-policer-classify",
+ .runs_before = VNET_FEATURES ("ipsec-input-ip6"),
+};
+
+VNET_FEATURE_INIT (ip6_ipsec, static) =
+{
+ .arc_name = "ip6-unicast",
+ .node_name = "ipsec-input-ip6",
+ .runs_before = VNET_FEATURES ("l2tp-decap"),
+};
+
+VNET_FEATURE_INIT (ip6_l2tp, static) =
+{
+ .arc_name = "ip6-unicast",
+ .node_name = "l2tp-decap",
+ .runs_before = VNET_FEATURES ("vpath-input-ip6"),
+};
+
+VNET_FEATURE_INIT (ip6_vpath, static) =
+{
+ .arc_name = "ip6-unicast",
+ .node_name = "vpath-input-ip6",
+ .runs_before = VNET_FEATURES ("ip6-lookup"),
+};
+
+VNET_FEATURE_INIT (ip6_lookup, static) =
+{
+ .arc_name = "ip6-unicast",
+ .node_name = "ip6-lookup",
+ .runs_before = VNET_FEATURES ("ip6-drop"),
+};
+
+VNET_FEATURE_INIT (ip6_drop, static) =
+{
+ .arc_name = "ip6-unicast",
+ .node_name = "ip6-drop",
+ .runs_before = 0, /*last feature*/
+};
+
+/* Built-in ip6 multicast rx feature path definition (none now) */
+VNET_FEATURE_ARC_INIT (ip6_multicast, static) =
+{
+ .arc_name = "ip6-multicast",
+ .start_nodes = VNET_FEATURES ("ip6-input"),
+ .arc_index_ptr = &ip6_main.lookup_main.mcast_feature_arc_index,
+};
+
+VNET_FEATURE_INIT (ip6_vpath_mc, static) = {
+ .arc_name = "ip6-multicast",
+ .node_name = "vpath-input-ip6",
+ .runs_before = VNET_FEATURES ("ip6-lookup"),
+};
+
+VNET_FEATURE_INIT (ip6_mc_lookup, static) = {
+ .arc_name = "ip6-multicast",
+ .node_name = "ip6-lookup",
+ .runs_before = VNET_FEATURES ("ip6-drop"),
+};
+
+VNET_FEATURE_INIT (ip6_drop_mc, static) = {
+ .arc_name = "ip6-multicast",
+ .node_name = "ip6-drop",
+ .runs_before = 0, /* last feature */
+};
+
+/* Built-in ip4 tx feature path definition */
+VNET_FEATURE_ARC_INIT (ip6_output, static) =
+{
+ .arc_name = "ip6-output",
+ .start_nodes = VNET_FEATURES ("ip6-rewrite", "ip6-midchain"),
+ .arc_index_ptr = &ip6_main.lookup_main.output_feature_arc_index,
+};
+
+VNET_FEATURE_INIT (ip6_ipsec_output, static) = {
+ .arc_name = "ip6-output",
+ .node_name = "ipsec-output-ip6",
+ .runs_before = VNET_FEATURES ("interface-output"),
+};
+
+VNET_FEATURE_INIT (ip6_interface_output, static) = {
+ .arc_name = "ip6-output",
+ .node_name = "interface-output",
+ .runs_before = 0, /* not before any other features */
+};
+/* *INDENT-ON* */
+
+clib_error_t *
+ip6_sw_interface_add_del (vnet_main_t * vnm, u32 sw_if_index, u32 is_add)
+{
+ vnet_feature_enable_disable ("ip6-unicast", "ip6-drop", sw_if_index,
+ is_add, 0, 0);
+
+ vnet_feature_enable_disable ("ip6-multicast", "ip6-drop", sw_if_index,
+ is_add, 0, 0);
+
+ vnet_feature_enable_disable ("ip6-output", "interface-output", sw_if_index,
+ is_add, 0, 0);
+
+ return /* no error */ 0;
+}
+
+VNET_SW_INTERFACE_ADD_DEL_FUNCTION (ip6_sw_interface_add_del);
+
+static uword
+ip6_lookup (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ return ip6_lookup_inline (vm, node, frame);
+}
+
+static u8 *format_ip6_lookup_trace (u8 * s, va_list * args);
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (ip6_lookup_node) =
+{
+ .function = ip6_lookup,
+ .name = "ip6-lookup",
+ .vector_size = sizeof (u32),
+ .format_trace = format_ip6_lookup_trace,
+ .n_next_nodes = IP6_LOOKUP_N_NEXT,
+ .next_nodes = IP6_LOOKUP_NEXT_NODES,
+};
+/* *INDENT-ON* */
+
+VLIB_NODE_FUNCTION_MULTIARCH (ip6_lookup_node, ip6_lookup);
+
+always_inline uword
+ip6_load_balance (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ vlib_combined_counter_main_t *cm = &load_balance_main.lbm_via_counters;
+ u32 n_left_from, n_left_to_next, *from, *to_next;
+ ip_lookup_next_t next;
+ u32 cpu_index = os_get_cpu_number ();
+ ip6_main_t *im = &ip6_main;
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next = node->cached_next_index;
+
+ if (node->flags & VLIB_NODE_FLAG_TRACE)
+ ip6_forward_next_trace (vm, node, frame, VLIB_TX);
+
+ while (n_left_from > 0)
+ {
+ vlib_get_next_frame (vm, node, next, to_next, n_left_to_next);
+
+
+ while (n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ ip_lookup_next_t next0, next1;
+ const load_balance_t *lb0, *lb1;
+ vlib_buffer_t *p0, *p1;
+ u32 pi0, lbi0, hc0, pi1, lbi1, hc1;
+ const ip6_header_t *ip0, *ip1;
+ const dpo_id_t *dpo0, *dpo1;
+
+ /* Prefetch next iteration. */
+ {
+ vlib_buffer_t *p2, *p3;
+
+ p2 = vlib_get_buffer (vm, from[2]);
+ p3 = vlib_get_buffer (vm, from[3]);
+
+ vlib_prefetch_buffer_header (p2, STORE);
+ vlib_prefetch_buffer_header (p3, STORE);
+
+ CLIB_PREFETCH (p2->data, sizeof (ip0[0]), STORE);
+ CLIB_PREFETCH (p3->data, sizeof (ip0[0]), STORE);
+ }
+
+ pi0 = to_next[0] = from[0];
+ pi1 = to_next[1] = from[1];
+
+ from += 2;
+ n_left_from -= 2;
+ to_next += 2;
+ n_left_to_next -= 2;
+
+ p0 = vlib_get_buffer (vm, pi0);
+ p1 = vlib_get_buffer (vm, pi1);
+
+ ip0 = vlib_buffer_get_current (p0);
+ ip1 = vlib_buffer_get_current (p1);
+ lbi0 = vnet_buffer (p0)->ip.adj_index[VLIB_TX];
+ lbi1 = vnet_buffer (p1)->ip.adj_index[VLIB_TX];
+
+ lb0 = load_balance_get (lbi0);
+ lb1 = load_balance_get (lbi1);
+
+ /*
+ * this node is for via FIBs we can re-use the hash value from the
+ * to node if present.
+ * We don't want to use the same hash value at each level in the recursion
+ * graph as that would lead to polarisation
+ */
+ hc0 = vnet_buffer (p0)->ip.flow_hash = 0;
+ hc1 = vnet_buffer (p1)->ip.flow_hash = 0;
+
+ if (PREDICT_FALSE (lb0->lb_n_buckets > 1))
+ {
+ if (PREDICT_TRUE (vnet_buffer (p0)->ip.flow_hash))
+ {
+ hc0 = vnet_buffer (p0)->ip.flow_hash =
+ vnet_buffer (p0)->ip.flow_hash >> 1;
+ }
+ else
+ {
+ hc0 = vnet_buffer (p0)->ip.flow_hash =
+ ip6_compute_flow_hash (ip0, hc0);
+ }
+ }
+ if (PREDICT_FALSE (lb1->lb_n_buckets > 1))
+ {
+ if (PREDICT_TRUE (vnet_buffer (p1)->ip.flow_hash))
+ {
+ hc1 = vnet_buffer (p1)->ip.flow_hash =
+ vnet_buffer (p1)->ip.flow_hash >> 1;
+ }
+ else
+ {
+ hc1 = vnet_buffer (p1)->ip.flow_hash =
+ ip6_compute_flow_hash (ip1, hc1);
+ }
+ }
+
+ dpo0 =
+ load_balance_get_bucket_i (lb0,
+ hc0 & (lb0->lb_n_buckets_minus_1));
+ dpo1 =
+ load_balance_get_bucket_i (lb1,
+ hc1 & (lb1->lb_n_buckets_minus_1));
+
+ next0 = dpo0->dpoi_next_node;
+ next1 = dpo1->dpoi_next_node;
+
+ /* Only process the HBH Option Header if explicitly configured to do so */
+ if (PREDICT_FALSE
+ (ip0->protocol == IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS))
+ {
+ next0 = (dpo_is_adj (dpo0) && im->hbh_enabled) ?
+ (ip_lookup_next_t) IP6_LOOKUP_NEXT_HOP_BY_HOP : next0;
+ }
+ /* Only process the HBH Option Header if explicitly configured to do so */
+ if (PREDICT_FALSE
+ (ip1->protocol == IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS))
+ {
+ next1 = (dpo_is_adj (dpo1) && im->hbh_enabled) ?
+ (ip_lookup_next_t) IP6_LOOKUP_NEXT_HOP_BY_HOP : next1;
+ }
+
+ vnet_buffer (p0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
+ vnet_buffer (p1)->ip.adj_index[VLIB_TX] = dpo1->dpoi_index;
+
+ vlib_increment_combined_counter
+ (cm, cpu_index, lbi0, 1, vlib_buffer_length_in_chain (vm, p0));
+ vlib_increment_combined_counter
+ (cm, cpu_index, lbi1, 1, vlib_buffer_length_in_chain (vm, p1));
+
+ vlib_validate_buffer_enqueue_x2 (vm, node, next,
+ to_next, n_left_to_next,
+ pi0, pi1, next0, next1);
+ }
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ ip_lookup_next_t next0;
+ const load_balance_t *lb0;
+ vlib_buffer_t *p0;
+ u32 pi0, lbi0, hc0;
+ const ip6_header_t *ip0;
+ const dpo_id_t *dpo0;
+
+ pi0 = from[0];
+ to_next[0] = pi0;
+ from += 1;
+ to_next += 1;
+ n_left_to_next -= 1;
+ n_left_from -= 1;
+
+ p0 = vlib_get_buffer (vm, pi0);
+
+ ip0 = vlib_buffer_get_current (p0);
+ lbi0 = vnet_buffer (p0)->ip.adj_index[VLIB_TX];
+
+ lb0 = load_balance_get (lbi0);
+
+ hc0 = vnet_buffer (p0)->ip.flow_hash = 0;
+ if (PREDICT_FALSE (lb0->lb_n_buckets > 1))
+ {
+ if (PREDICT_TRUE (vnet_buffer (p0)->ip.flow_hash))
+ {
+ hc0 = vnet_buffer (p0)->ip.flow_hash =
+ vnet_buffer (p0)->ip.flow_hash >> 1;
+ }
+ else
+ {
+ hc0 = vnet_buffer (p0)->ip.flow_hash =
+ ip6_compute_flow_hash (ip0, hc0);
+ }
+ }
+ dpo0 =
+ load_balance_get_bucket_i (lb0,
+ hc0 & (lb0->lb_n_buckets_minus_1));
+
+ next0 = dpo0->dpoi_next_node;
+ vnet_buffer (p0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
+
+ /* Only process the HBH Option Header if explicitly configured to do so */
+ if (PREDICT_FALSE
+ (ip0->protocol == IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS))
+ {
+ next0 = (dpo_is_adj (dpo0) && im->hbh_enabled) ?
+ (ip_lookup_next_t) IP6_LOOKUP_NEXT_HOP_BY_HOP : next0;
+ }
+
+ vlib_increment_combined_counter
+ (cm, cpu_index, lbi0, 1, vlib_buffer_length_in_chain (vm, p0));
+
+ vlib_validate_buffer_enqueue_x1 (vm, node, next,
+ to_next, n_left_to_next,
+ pi0, next0);
+ }
+
+ vlib_put_next_frame (vm, node, next, n_left_to_next);
+ }
+
+ return frame->n_vectors;
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (ip6_load_balance_node) =
+{
+ .function = ip6_load_balance,
+ .name = "ip6-load-balance",
+ .vector_size = sizeof (u32),
+ .sibling_of = "ip6-lookup",
+ .format_trace = format_ip6_lookup_trace,
+};
+/* *INDENT-ON* */
+
+VLIB_NODE_FUNCTION_MULTIARCH (ip6_load_balance_node, ip6_load_balance);
+
+typedef struct
+{
+ /* Adjacency taken. */
+ u32 adj_index;
+ u32 flow_hash;
+ u32 fib_index;
+
+ /* Packet data, possibly *after* rewrite. */
+ u8 packet_data[128 - 1 * sizeof (u32)];
+}
+ip6_forward_next_trace_t;
+
+static u8 *
+format_ip6_forward_next_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ ip6_forward_next_trace_t *t = va_arg (*args, ip6_forward_next_trace_t *);
+ uword indent = format_get_indent (s);
+
+ s = format (s, "%U%U",
+ format_white_space, indent,
+ format_ip6_header, t->packet_data, sizeof (t->packet_data));
+ return s;
+}
+
+static u8 *
+format_ip6_lookup_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ ip6_forward_next_trace_t *t = va_arg (*args, ip6_forward_next_trace_t *);
+ uword indent = format_get_indent (s);
+
+ s = format (s, "fib %d dpo-idx %d flow hash: 0x%08x",
+ t->fib_index, t->adj_index, t->flow_hash);
+ s = format (s, "\n%U%U",
+ format_white_space, indent,
+ format_ip6_header, t->packet_data, sizeof (t->packet_data));
+ return s;
+}
+
+
+static u8 *
+format_ip6_rewrite_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ ip6_forward_next_trace_t *t = va_arg (*args, ip6_forward_next_trace_t *);
+ vnet_main_t *vnm = vnet_get_main ();
+ uword indent = format_get_indent (s);
+
+ s = format (s, "tx_sw_if_index %d adj-idx %d : %U flow hash: 0x%08x",
+ t->fib_index, t->adj_index, format_ip_adjacency,
+ t->adj_index, FORMAT_IP_ADJACENCY_NONE, t->flow_hash);
+ s = format (s, "\n%U%U",
+ format_white_space, indent,
+ format_ip_adjacency_packet_data,
+ vnm, t->adj_index, t->packet_data, sizeof (t->packet_data));
+ return s;
+}
+
+/* Common trace function for all ip6-forward next nodes. */
+void
+ip6_forward_next_trace (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame, vlib_rx_or_tx_t which_adj_index)
+{
+ u32 *from, n_left;
+ ip6_main_t *im = &ip6_main;
+
+ n_left = frame->n_vectors;
+ from = vlib_frame_vector_args (frame);
+
+ while (n_left >= 4)
+ {
+ u32 bi0, bi1;
+ vlib_buffer_t *b0, *b1;
+ ip6_forward_next_trace_t *t0, *t1;
+
+ /* Prefetch next iteration. */
+ vlib_prefetch_buffer_with_index (vm, from[2], LOAD);
+ vlib_prefetch_buffer_with_index (vm, from[3], LOAD);
+
+ bi0 = from[0];
+ bi1 = from[1];
+
+ b0 = vlib_get_buffer (vm, bi0);
+ b1 = vlib_get_buffer (vm, bi1);
+
+ if (b0->flags & VLIB_BUFFER_IS_TRACED)
+ {
+ t0 = vlib_add_trace (vm, node, b0, sizeof (t0[0]));
+ t0->adj_index = vnet_buffer (b0)->ip.adj_index[which_adj_index];
+ t0->flow_hash = vnet_buffer (b0)->ip.flow_hash;
+ t0->fib_index =
+ (vnet_buffer (b0)->sw_if_index[VLIB_TX] !=
+ (u32) ~ 0) ? vnet_buffer (b0)->sw_if_index[VLIB_TX] :
+ vec_elt (im->fib_index_by_sw_if_index,
+ vnet_buffer (b0)->sw_if_index[VLIB_RX]);
+
+ clib_memcpy (t0->packet_data,
+ vlib_buffer_get_current (b0),
+ sizeof (t0->packet_data));
+ }
+ if (b1->flags & VLIB_BUFFER_IS_TRACED)
+ {
+ t1 = vlib_add_trace (vm, node, b1, sizeof (t1[0]));
+ t1->adj_index = vnet_buffer (b1)->ip.adj_index[which_adj_index];
+ t1->flow_hash = vnet_buffer (b1)->ip.flow_hash;
+ t1->fib_index =
+ (vnet_buffer (b1)->sw_if_index[VLIB_TX] !=
+ (u32) ~ 0) ? vnet_buffer (b1)->sw_if_index[VLIB_TX] :
+ vec_elt (im->fib_index_by_sw_if_index,
+ vnet_buffer (b1)->sw_if_index[VLIB_RX]);
+
+ clib_memcpy (t1->packet_data,
+ vlib_buffer_get_current (b1),
+ sizeof (t1->packet_data));
+ }
+ from += 2;
+ n_left -= 2;
+ }
+
+ while (n_left >= 1)
+ {
+ u32 bi0;
+ vlib_buffer_t *b0;
+ ip6_forward_next_trace_t *t0;
+
+ bi0 = from[0];
+
+ b0 = vlib_get_buffer (vm, bi0);
+
+ if (b0->flags & VLIB_BUFFER_IS_TRACED)
+ {
+ t0 = vlib_add_trace (vm, node, b0, sizeof (t0[0]));
+ t0->adj_index = vnet_buffer (b0)->ip.adj_index[which_adj_index];
+ t0->flow_hash = vnet_buffer (b0)->ip.flow_hash;
+ t0->fib_index =
+ (vnet_buffer (b0)->sw_if_index[VLIB_TX] !=
+ (u32) ~ 0) ? vnet_buffer (b0)->sw_if_index[VLIB_TX] :
+ vec_elt (im->fib_index_by_sw_if_index,
+ vnet_buffer (b0)->sw_if_index[VLIB_RX]);
+
+ clib_memcpy (t0->packet_data,
+ vlib_buffer_get_current (b0),
+ sizeof (t0->packet_data));
+ }
+ from += 1;
+ n_left -= 1;
+ }
+}
+
+static uword
+ip6_drop_or_punt (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame, ip6_error_t error_code)
+{
+ u32 *buffers = vlib_frame_vector_args (frame);
+ uword n_packets = frame->n_vectors;
+
+ vlib_error_drop_buffers (vm, node, buffers,
+ /* stride */ 1,
+ n_packets,
+ /* next */ 0,
+ ip6_input_node.index, error_code);
+
+ if (node->flags & VLIB_NODE_FLAG_TRACE)
+ ip6_forward_next_trace (vm, node, frame, VLIB_TX);
+
+ return n_packets;
+}
+
+static uword
+ip6_drop (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ return ip6_drop_or_punt (vm, node, frame, IP6_ERROR_ADJACENCY_DROP);
+}
+
+static uword
+ip6_punt (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ return ip6_drop_or_punt (vm, node, frame, IP6_ERROR_ADJACENCY_PUNT);
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (ip6_drop_node, static) =
+{
+ .function = ip6_drop,
+ .name = "ip6-drop",
+ .vector_size = sizeof (u32),
+ .format_trace = format_ip6_forward_next_trace,
+ .n_next_nodes = 1,
+ .next_nodes =
+ {
+ [0] = "error-drop",},
+};
+/* *INDENT-ON* */
+
+VLIB_NODE_FUNCTION_MULTIARCH (ip6_drop_node, ip6_drop);
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (ip6_punt_node, static) =
+{
+ .function = ip6_punt,
+ .name = "ip6-punt",
+ .vector_size = sizeof (u32),
+ .format_trace = format_ip6_forward_next_trace,
+ .n_next_nodes = 1,
+ .next_nodes =
+ {
+ [0] = "error-punt",},
+};
+/* *INDENT-ON* */
+
+VLIB_NODE_FUNCTION_MULTIARCH (ip6_punt_node, ip6_punt);
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (ip6_multicast_node, static) =
+{
+ .function = ip6_drop,
+ .name = "ip6-multicast",
+ .vector_size = sizeof (u32),
+ .format_trace = format_ip6_forward_next_trace,
+ .n_next_nodes = 1,
+ .next_nodes =
+ {
+ [0] = "error-drop",
+ },
+};
+
+/* *INDENT-ON* */
+
+/* Compute TCP/UDP/ICMP6 checksum in software. */
+u16
+ip6_tcp_udp_icmp_compute_checksum (vlib_main_t * vm, vlib_buffer_t * p0,
+ ip6_header_t * ip0, int *bogus_lengthp)
+{
+ ip_csum_t sum0;
+ u16 sum16, payload_length_host_byte_order;
+ u32 i, n_this_buffer, n_bytes_left;
+ u32 headers_size = sizeof (ip0[0]);
+ void *data_this_buffer;
+
+ ASSERT (bogus_lengthp);
+ *bogus_lengthp = 0;
+
+ /* Initialize checksum with ip header. */
+ sum0 = ip0->payload_length + clib_host_to_net_u16 (ip0->protocol);
+ payload_length_host_byte_order = clib_net_to_host_u16 (ip0->payload_length);
+ data_this_buffer = (void *) (ip0 + 1);
+
+ for (i = 0; i < ARRAY_LEN (ip0->src_address.as_uword); i++)
+ {
+ sum0 = ip_csum_with_carry (sum0,
+ clib_mem_unaligned (&ip0->
+ src_address.as_uword[i],
+ uword));
+ sum0 =
+ ip_csum_with_carry (sum0,
+ clib_mem_unaligned (&ip0->dst_address.as_uword[i],
+ uword));
+ }
+
+ /* some icmp packets may come with a "router alert" hop-by-hop extension header (e.g., mldv2 packets) */
+ if (PREDICT_FALSE (ip0->protocol == IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS))
+ {
+ u32 skip_bytes;
+ ip6_hop_by_hop_ext_t *ext_hdr =
+ (ip6_hop_by_hop_ext_t *) data_this_buffer;
+
+ /* validate really icmp6 next */
+ ASSERT (ext_hdr->next_hdr == IP_PROTOCOL_ICMP6);
+
+ skip_bytes = 8 * (1 + ext_hdr->n_data_u64s);
+ data_this_buffer = (void *) ((u8 *) data_this_buffer + skip_bytes);
+
+ payload_length_host_byte_order -= skip_bytes;
+ headers_size += skip_bytes;
+ }
+
+ n_bytes_left = n_this_buffer = payload_length_host_byte_order;
+ if (p0 && n_this_buffer + headers_size > p0->current_length)
+ n_this_buffer =
+ p0->current_length >
+ headers_size ? p0->current_length - headers_size : 0;
+ while (1)
+ {
+ sum0 = ip_incremental_checksum (sum0, data_this_buffer, n_this_buffer);
+ n_bytes_left -= n_this_buffer;
+ if (n_bytes_left == 0)
+ break;
+
+ if (!(p0->flags & VLIB_BUFFER_NEXT_PRESENT))
+ {
+ *bogus_lengthp = 1;
+ return 0xfefe;
+ }
+ p0 = vlib_get_buffer (vm, p0->next_buffer);
+ data_this_buffer = vlib_buffer_get_current (p0);
+ n_this_buffer = p0->current_length;
+ }
+
+ sum16 = ~ip_csum_fold (sum0);
+
+ return sum16;
+}
+
+u32
+ip6_tcp_udp_icmp_validate_checksum (vlib_main_t * vm, vlib_buffer_t * p0)
+{
+ ip6_header_t *ip0 = vlib_buffer_get_current (p0);
+ udp_header_t *udp0;
+ u16 sum16;
+ int bogus_length;
+
+ /* some icmp packets may come with a "router alert" hop-by-hop extension header (e.g., mldv2 packets) */
+ ASSERT (ip0->protocol == IP_PROTOCOL_TCP
+ || ip0->protocol == IP_PROTOCOL_ICMP6
+ || ip0->protocol == IP_PROTOCOL_UDP
+ || ip0->protocol == IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS);
+
+ udp0 = (void *) (ip0 + 1);
+ if (ip0->protocol == IP_PROTOCOL_UDP && udp0->checksum == 0)
+ {
+ p0->flags |= (IP_BUFFER_L4_CHECKSUM_COMPUTED
+ | IP_BUFFER_L4_CHECKSUM_CORRECT);
+ return p0->flags;
+ }
+
+ sum16 = ip6_tcp_udp_icmp_compute_checksum (vm, p0, ip0, &bogus_length);
+
+ p0->flags |= (IP_BUFFER_L4_CHECKSUM_COMPUTED
+ | ((sum16 == 0) << LOG2_IP_BUFFER_L4_CHECKSUM_CORRECT));
+
+ return p0->flags;
+}
+
+/* ip6_locate_header
+ *
+ * This function is to search for the header specified by the find_hdr number.
+ * 1. If the find_hdr < 0 then it finds and returns the protocol number and
+ * offset stored in *offset of the transport or ESP header in the chain if
+ * found.
+ * 2. If a header with find_hdr > 0 protocol number is found then the
+ * offset is stored in *offset and protocol number of the header is
+ * returned.
+ * 3. If find_hdr header is not found or packet is malformed or
+ * it is a non-first fragment -1 is returned.
+ */
+always_inline int
+ip6_locate_header (vlib_buffer_t * p0,
+ ip6_header_t * ip0, int find_hdr, u32 * offset)
+{
+ u8 next_proto = ip0->protocol;
+ u8 *next_header;
+ u8 done = 0;
+ u32 cur_offset;
+ u8 *temp_nxthdr = 0;
+ u32 exthdr_len = 0;
+
+ next_header = ip6_next_header (ip0);
+ cur_offset = sizeof (ip6_header_t);
+ while (1)
+ {
+ done = (next_proto == find_hdr);
+ if (PREDICT_FALSE
+ (next_header >=
+ (u8 *) vlib_buffer_get_current (p0) + p0->current_length))
+ {
+ //A malicious packet could set an extension header with a too big size
+ return (-1);
+ }
+ if (done)
+ break;
+ if ((!ip6_ext_hdr (next_proto)) || next_proto == IP_PROTOCOL_IP6_NONXT)
+ {
+ if (find_hdr < 0)
+ break;
+ return -1;
+ }
+ if (next_proto == IP_PROTOCOL_IPV6_FRAGMENTATION)
+ {
+ ip6_frag_hdr_t *frag_hdr = (ip6_frag_hdr_t *) next_header;
+ u16 frag_off = ip6_frag_hdr_offset (frag_hdr);
+ /* Non first fragment return -1 */
+ if (frag_off)
+ return (-1);
+ exthdr_len = sizeof (ip6_frag_hdr_t);
+ temp_nxthdr = next_header + exthdr_len;
+ }
+ else if (next_proto == IP_PROTOCOL_IPSEC_AH)
+ {
+ exthdr_len =
+ ip6_ext_authhdr_len (((ip6_ext_header_t *) next_header));
+ temp_nxthdr = next_header + exthdr_len;
+ }
+ else
+ {
+ exthdr_len =
+ ip6_ext_header_len (((ip6_ext_header_t *) next_header));
+ temp_nxthdr = next_header + exthdr_len;
+ }
+ next_proto = ((ip6_ext_header_t *) next_header)->next_hdr;
+ next_header = temp_nxthdr;
+ cur_offset += exthdr_len;
+ }
+
+ *offset = cur_offset;
+ return (next_proto);
+}
+
+static uword
+ip6_local (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ ip6_main_t *im = &ip6_main;
+ ip_lookup_main_t *lm = &im->lookup_main;
+ ip_local_next_t next_index;
+ u32 *from, *to_next, n_left_from, n_left_to_next;
+ vlib_node_runtime_t *error_node =
+ vlib_node_get_runtime (vm, ip6_input_node.index);
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+
+ if (node->flags & VLIB_NODE_FLAG_TRACE)
+ ip6_forward_next_trace (vm, node, frame, VLIB_TX);
+
+ while (n_left_from > 0)
+ {
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ vlib_buffer_t *p0, *p1;
+ ip6_header_t *ip0, *ip1;
+ udp_header_t *udp0, *udp1;
+ u32 pi0, ip_len0, udp_len0, flags0, next0;
+ u32 pi1, ip_len1, udp_len1, flags1, next1;
+ i32 len_diff0, len_diff1;
+ u8 error0, type0, good_l4_checksum0;
+ u8 error1, type1, good_l4_checksum1;
+ u32 udp_offset0, udp_offset1;
+
+ pi0 = to_next[0] = from[0];
+ pi1 = to_next[1] = from[1];
+ from += 2;
+ n_left_from -= 2;
+ to_next += 2;
+ n_left_to_next -= 2;
+
+ p0 = vlib_get_buffer (vm, pi0);
+ p1 = vlib_get_buffer (vm, pi1);
+
+ ip0 = vlib_buffer_get_current (p0);
+ ip1 = vlib_buffer_get_current (p1);
+
+ type0 = lm->builtin_protocol_by_ip_protocol[ip0->protocol];
+ type1 = lm->builtin_protocol_by_ip_protocol[ip1->protocol];
+
+ next0 = lm->local_next_by_ip_protocol[ip0->protocol];
+ next1 = lm->local_next_by_ip_protocol[ip1->protocol];
+
+ flags0 = p0->flags;
+ flags1 = p1->flags;
+
+ good_l4_checksum0 = (flags0 & IP_BUFFER_L4_CHECKSUM_CORRECT) != 0;
+ good_l4_checksum1 = (flags1 & IP_BUFFER_L4_CHECKSUM_CORRECT) != 0;
+ len_diff0 = 0;
+ len_diff1 = 0;
+
+ /* Skip HBH local processing */
+ if (PREDICT_FALSE
+ (ip0->protocol == IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS))
+ {
+ ip6_hop_by_hop_ext_t *ext_hdr =
+ (ip6_hop_by_hop_ext_t *) ip6_next_header (ip0);
+ next0 = lm->local_next_by_ip_protocol[ext_hdr->next_hdr];
+ type0 = lm->builtin_protocol_by_ip_protocol[ext_hdr->next_hdr];
+ }
+ if (PREDICT_FALSE
+ (ip1->protocol == IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS))
+ {
+ ip6_hop_by_hop_ext_t *ext_hdr =
+ (ip6_hop_by_hop_ext_t *) ip6_next_header (ip1);
+ next1 = lm->local_next_by_ip_protocol[ext_hdr->next_hdr];
+ type1 = lm->builtin_protocol_by_ip_protocol[ext_hdr->next_hdr];
+ }
+ if (PREDICT_TRUE (IP_PROTOCOL_UDP == ip6_locate_header (p0, ip0,
+ IP_PROTOCOL_UDP,
+ &udp_offset0)))
+ {
+ udp0 = (udp_header_t *) ((u8 *) ip0 + udp_offset0);
+ /* Don't verify UDP checksum for packets with explicit zero checksum. */
+ good_l4_checksum0 |= type0 == IP_BUILTIN_PROTOCOL_UDP
+ && udp0->checksum == 0;
+ /* Verify UDP length. */
+ ip_len0 = clib_net_to_host_u16 (ip0->payload_length);
+ udp_len0 = clib_net_to_host_u16 (udp0->length);
+ len_diff0 = ip_len0 - udp_len0;
+ }
+ if (PREDICT_TRUE (IP_PROTOCOL_UDP == ip6_locate_header (p1, ip1,
+ IP_PROTOCOL_UDP,
+ &udp_offset1)))
+ {
+ udp1 = (udp_header_t *) ((u8 *) ip1 + udp_offset1);
+ /* Don't verify UDP checksum for packets with explicit zero checksum. */
+ good_l4_checksum1 |= type1 == IP_BUILTIN_PROTOCOL_UDP
+ && udp1->checksum == 0;
+ /* Verify UDP length. */
+ ip_len1 = clib_net_to_host_u16 (ip1->payload_length);
+ udp_len1 = clib_net_to_host_u16 (udp1->length);
+ len_diff1 = ip_len1 - udp_len1;
+ }
+
+ good_l4_checksum0 |= type0 == IP_BUILTIN_PROTOCOL_UNKNOWN;
+ good_l4_checksum1 |= type1 == IP_BUILTIN_PROTOCOL_UNKNOWN;
+
+ len_diff0 = type0 == IP_BUILTIN_PROTOCOL_UDP ? len_diff0 : 0;
+ len_diff1 = type1 == IP_BUILTIN_PROTOCOL_UDP ? len_diff1 : 0;
+
+ if (PREDICT_FALSE (type0 != IP_BUILTIN_PROTOCOL_UNKNOWN
+ && !good_l4_checksum0
+ && !(flags0 & IP_BUFFER_L4_CHECKSUM_COMPUTED)))
+ {
+ flags0 = ip6_tcp_udp_icmp_validate_checksum (vm, p0);
+ good_l4_checksum0 =
+ (flags0 & IP_BUFFER_L4_CHECKSUM_CORRECT) != 0;
+ }
+ if (PREDICT_FALSE (type1 != IP_BUILTIN_PROTOCOL_UNKNOWN
+ && !good_l4_checksum1
+ && !(flags1 & IP_BUFFER_L4_CHECKSUM_COMPUTED)))
+ {
+ flags1 = ip6_tcp_udp_icmp_validate_checksum (vm, p1);
+ good_l4_checksum1 =
+ (flags1 & IP_BUFFER_L4_CHECKSUM_CORRECT) != 0;
+ }
+
+ error0 = error1 = IP6_ERROR_UNKNOWN_PROTOCOL;
+
+ error0 = len_diff0 < 0 ? IP6_ERROR_UDP_LENGTH : error0;
+ error1 = len_diff1 < 0 ? IP6_ERROR_UDP_LENGTH : error1;
+
+ ASSERT (IP6_ERROR_UDP_CHECKSUM + IP_BUILTIN_PROTOCOL_UDP ==
+ IP6_ERROR_UDP_CHECKSUM);
+ ASSERT (IP6_ERROR_UDP_CHECKSUM + IP_BUILTIN_PROTOCOL_ICMP ==
+ IP6_ERROR_ICMP_CHECKSUM);
+ error0 =
+ (!good_l4_checksum0 ? IP6_ERROR_UDP_CHECKSUM + type0 : error0);
+ error1 =
+ (!good_l4_checksum1 ? IP6_ERROR_UDP_CHECKSUM + type1 : error1);
+
+ /* Drop packets from unroutable hosts. */
+ /* If this is a neighbor solicitation (ICMP), skip source RPF check */
+ if (error0 == IP6_ERROR_UNKNOWN_PROTOCOL &&
+ type0 != IP_BUILTIN_PROTOCOL_ICMP &&
+ !ip6_address_is_link_local_unicast (&ip0->src_address))
+ {
+ u32 src_adj_index0 = ip6_src_lookup_for_packet (im, p0, ip0);
+ error0 = (ADJ_INDEX_INVALID == src_adj_index0
+ ? IP6_ERROR_SRC_LOOKUP_MISS : error0);
+ }
+ if (error1 == IP6_ERROR_UNKNOWN_PROTOCOL &&
+ type1 != IP_BUILTIN_PROTOCOL_ICMP &&
+ !ip6_address_is_link_local_unicast (&ip1->src_address))
+ {
+ u32 src_adj_index1 = ip6_src_lookup_for_packet (im, p1, ip1);
+ error1 = (ADJ_INDEX_INVALID == src_adj_index1
+ ? IP6_ERROR_SRC_LOOKUP_MISS : error1);
+ }
+
+ next0 =
+ error0 != IP6_ERROR_UNKNOWN_PROTOCOL ? IP_LOCAL_NEXT_DROP : next0;
+ next1 =
+ error1 != IP6_ERROR_UNKNOWN_PROTOCOL ? IP_LOCAL_NEXT_DROP : next1;
+
+ p0->error = error_node->errors[error0];
+ p1->error = error_node->errors[error1];
+
+ vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
+ to_next, n_left_to_next,
+ pi0, pi1, next0, next1);
+ }
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ vlib_buffer_t *p0;
+ ip6_header_t *ip0;
+ udp_header_t *udp0;
+ u32 pi0, ip_len0, udp_len0, flags0, next0;
+ i32 len_diff0;
+ u8 error0, type0, good_l4_checksum0;
+ u32 udp_offset0;
+
+ pi0 = to_next[0] = from[0];
+ from += 1;
+ n_left_from -= 1;
+ to_next += 1;
+ n_left_to_next -= 1;
+
+ p0 = vlib_get_buffer (vm, pi0);
+
+ ip0 = vlib_buffer_get_current (p0);
+
+ type0 = lm->builtin_protocol_by_ip_protocol[ip0->protocol];
+ next0 = lm->local_next_by_ip_protocol[ip0->protocol];
+
+ flags0 = p0->flags;
+
+ good_l4_checksum0 = (flags0 & IP_BUFFER_L4_CHECKSUM_CORRECT) != 0;
+ len_diff0 = 0;
+
+ /* Skip HBH local processing */
+ if (PREDICT_FALSE
+ (ip0->protocol == IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS))
+ {
+ ip6_hop_by_hop_ext_t *ext_hdr =
+ (ip6_hop_by_hop_ext_t *) ip6_next_header (ip0);
+ next0 = lm->local_next_by_ip_protocol[ext_hdr->next_hdr];
+ type0 = lm->builtin_protocol_by_ip_protocol[ext_hdr->next_hdr];
+ }
+ if (PREDICT_TRUE (IP_PROTOCOL_UDP == ip6_locate_header (p0, ip0,
+ IP_PROTOCOL_UDP,
+ &udp_offset0)))
+ {
+ udp0 = (udp_header_t *) ((u8 *) ip0 + udp_offset0);
+ /* Don't verify UDP checksum for packets with explicit zero checksum. */
+ good_l4_checksum0 |= type0 == IP_BUILTIN_PROTOCOL_UDP
+ && udp0->checksum == 0;
+ /* Verify UDP length. */
+ ip_len0 = clib_net_to_host_u16 (ip0->payload_length);
+ udp_len0 = clib_net_to_host_u16 (udp0->length);
+ len_diff0 = ip_len0 - udp_len0;
+ }
+
+ good_l4_checksum0 |= type0 == IP_BUILTIN_PROTOCOL_UNKNOWN;
+ len_diff0 = type0 == IP_BUILTIN_PROTOCOL_UDP ? len_diff0 : 0;
+
+ if (PREDICT_FALSE (type0 != IP_BUILTIN_PROTOCOL_UNKNOWN
+ && !good_l4_checksum0
+ && !(flags0 & IP_BUFFER_L4_CHECKSUM_COMPUTED)))
+ {
+ flags0 = ip6_tcp_udp_icmp_validate_checksum (vm, p0);
+ good_l4_checksum0 =
+ (flags0 & IP_BUFFER_L4_CHECKSUM_CORRECT) != 0;
+ }
+
+ error0 = IP6_ERROR_UNKNOWN_PROTOCOL;
+
+ error0 = len_diff0 < 0 ? IP6_ERROR_UDP_LENGTH : error0;
+
+ ASSERT (IP6_ERROR_UDP_CHECKSUM + IP_BUILTIN_PROTOCOL_UDP ==
+ IP6_ERROR_UDP_CHECKSUM);
+ ASSERT (IP6_ERROR_UDP_CHECKSUM + IP_BUILTIN_PROTOCOL_ICMP ==
+ IP6_ERROR_ICMP_CHECKSUM);
+ error0 =
+ (!good_l4_checksum0 ? IP6_ERROR_UDP_CHECKSUM + type0 : error0);
+
+ /* If this is a neighbor solicitation (ICMP), skip source RPF check */
+ if (error0 == IP6_ERROR_UNKNOWN_PROTOCOL &&
+ type0 != IP_BUILTIN_PROTOCOL_ICMP &&
+ !ip6_address_is_link_local_unicast (&ip0->src_address))
+ {
+ u32 src_adj_index0 = ip6_src_lookup_for_packet (im, p0, ip0);
+ error0 = (ADJ_INDEX_INVALID == src_adj_index0
+ ? IP6_ERROR_SRC_LOOKUP_MISS : error0);
+ }
+
+ next0 =
+ error0 != IP6_ERROR_UNKNOWN_PROTOCOL ? IP_LOCAL_NEXT_DROP : next0;
+
+ p0->error = error_node->errors[error0];
+
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ pi0, next0);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ return frame->n_vectors;
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (ip6_local_node, static) =
+{
+ .function = ip6_local,
+ .name = "ip6-local",
+ .vector_size = sizeof (u32),
+ .format_trace = format_ip6_forward_next_trace,
+ .n_next_nodes = IP_LOCAL_N_NEXT,
+ .next_nodes =
+ {
+ [IP_LOCAL_NEXT_DROP] = "error-drop",
+ [IP_LOCAL_NEXT_PUNT] = "error-punt",
+ [IP_LOCAL_NEXT_UDP_LOOKUP] = "ip6-udp-lookup",
+ [IP_LOCAL_NEXT_ICMP] = "ip6-icmp-input",
+ },
+};
+/* *INDENT-ON* */
+
+VLIB_NODE_FUNCTION_MULTIARCH (ip6_local_node, ip6_local);
+
+void
+ip6_register_protocol (u32 protocol, u32 node_index)
+{
+ vlib_main_t *vm = vlib_get_main ();
+ ip6_main_t *im = &ip6_main;
+ ip_lookup_main_t *lm = &im->lookup_main;
+
+ ASSERT (protocol < ARRAY_LEN (lm->local_next_by_ip_protocol));
+ lm->local_next_by_ip_protocol[protocol] =
+ vlib_node_add_next (vm, ip6_local_node.index, node_index);
+}
+
+typedef enum
+{
+ IP6_DISCOVER_NEIGHBOR_NEXT_DROP,
+ IP6_DISCOVER_NEIGHBOR_NEXT_REPLY_TX,
+ IP6_DISCOVER_NEIGHBOR_N_NEXT,
+} ip6_discover_neighbor_next_t;
+
+typedef enum
+{
+ IP6_DISCOVER_NEIGHBOR_ERROR_DROP,
+ IP6_DISCOVER_NEIGHBOR_ERROR_REQUEST_SENT,
+ IP6_DISCOVER_NEIGHBOR_ERROR_NO_SOURCE_ADDRESS,
+} ip6_discover_neighbor_error_t;
+
+static uword
+ip6_discover_neighbor_inline (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame, int is_glean)
+{
+ vnet_main_t *vnm = vnet_get_main ();
+ ip6_main_t *im = &ip6_main;
+ ip_lookup_main_t *lm = &im->lookup_main;
+ u32 *from, *to_next_drop;
+ uword n_left_from, n_left_to_next_drop;
+ static f64 time_last_seed_change = -1e100;
+ static u32 hash_seeds[3];
+ static uword hash_bitmap[256 / BITS (uword)];
+ f64 time_now;
+ int bogus_length;
+
+ if (node->flags & VLIB_NODE_FLAG_TRACE)
+ ip6_forward_next_trace (vm, node, frame, VLIB_TX);
+
+ time_now = vlib_time_now (vm);
+ if (time_now - time_last_seed_change > 1e-3)
+ {
+ uword i;
+ u32 *r = clib_random_buffer_get_data (&vm->random_buffer,
+ sizeof (hash_seeds));
+ for (i = 0; i < ARRAY_LEN (hash_seeds); i++)
+ hash_seeds[i] = r[i];
+
+ /* Mark all hash keys as been not-seen before. */
+ for (i = 0; i < ARRAY_LEN (hash_bitmap); i++)
+ hash_bitmap[i] = 0;
+
+ time_last_seed_change = time_now;
+ }
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+
+ while (n_left_from > 0)
+ {
+ vlib_get_next_frame (vm, node, IP6_DISCOVER_NEIGHBOR_NEXT_DROP,
+ to_next_drop, n_left_to_next_drop);
+
+ while (n_left_from > 0 && n_left_to_next_drop > 0)
+ {
+ vlib_buffer_t *p0;
+ ip6_header_t *ip0;
+ u32 pi0, adj_index0, a0, b0, c0, m0, sw_if_index0, drop0;
+ uword bm0;
+ ip_adjacency_t *adj0;
+ vnet_hw_interface_t *hw_if0;
+ u32 next0;
+
+ pi0 = from[0];
+
+ p0 = vlib_get_buffer (vm, pi0);
+
+ adj_index0 = vnet_buffer (p0)->ip.adj_index[VLIB_TX];
+
+ ip0 = vlib_buffer_get_current (p0);
+
+ adj0 = ip_get_adjacency (lm, adj_index0);
+
+ if (!is_glean)
+ {
+ ip0->dst_address.as_u64[0] =
+ adj0->sub_type.nbr.next_hop.ip6.as_u64[0];
+ ip0->dst_address.as_u64[1] =
+ adj0->sub_type.nbr.next_hop.ip6.as_u64[1];
+ }
+
+ a0 = hash_seeds[0];
+ b0 = hash_seeds[1];
+ c0 = hash_seeds[2];
+
+ sw_if_index0 = adj0->rewrite_header.sw_if_index;
+ vnet_buffer (p0)->sw_if_index[VLIB_TX] = sw_if_index0;
+
+ a0 ^= sw_if_index0;
+ b0 ^= ip0->dst_address.as_u32[0];
+ c0 ^= ip0->dst_address.as_u32[1];
+
+ hash_v3_mix32 (a0, b0, c0);
+
+ b0 ^= ip0->dst_address.as_u32[2];
+ c0 ^= ip0->dst_address.as_u32[3];
+
+ hash_v3_finalize32 (a0, b0, c0);
+
+ c0 &= BITS (hash_bitmap) - 1;
+ c0 = c0 / BITS (uword);
+ m0 = (uword) 1 << (c0 % BITS (uword));
+
+ bm0 = hash_bitmap[c0];
+ drop0 = (bm0 & m0) != 0;
+
+ /* Mark it as seen. */
+ hash_bitmap[c0] = bm0 | m0;
+
+ from += 1;
+ n_left_from -= 1;
+ to_next_drop[0] = pi0;
+ to_next_drop += 1;
+ n_left_to_next_drop -= 1;
+
+ hw_if0 = vnet_get_sup_hw_interface (vnm, sw_if_index0);
+
+ /* If the interface is link-down, drop the pkt */
+ if (!(hw_if0->flags & VNET_HW_INTERFACE_FLAG_LINK_UP))
+ drop0 = 1;
+
+ p0->error =
+ node->errors[drop0 ? IP6_DISCOVER_NEIGHBOR_ERROR_DROP
+ : IP6_DISCOVER_NEIGHBOR_ERROR_REQUEST_SENT];
+ if (drop0)
+ continue;
+
+ /*
+ * the adj has been updated to a rewrite but the node the DPO that got
+ * us here hasn't - yet. no big deal. we'll drop while we wait.
+ */
+ if (IP_LOOKUP_NEXT_REWRITE == adj0->lookup_next_index)
+ continue;
+
+ {
+ u32 bi0 = 0;
+ icmp6_neighbor_solicitation_header_t *h0;
+ vlib_buffer_t *b0;
+
+ h0 = vlib_packet_template_get_packet
+ (vm, &im->discover_neighbor_packet_template, &bi0);
+
+ /*
+ * Build ethernet header.
+ * Choose source address based on destination lookup
+ * adjacency.
+ */
+ if (ip6_src_address_for_packet (lm,
+ sw_if_index0,
+ &h0->ip.src_address))
+ {
+ /* There is no address on the interface */
+ p0->error =
+ node->errors[IP6_DISCOVER_NEIGHBOR_ERROR_NO_SOURCE_ADDRESS];
+ vlib_buffer_free (vm, &bi0, 1);
+ continue;
+ }
+
+ /*
+ * Destination address is a solicited node multicast address.
+ * We need to fill in
+ * the low 24 bits with low 24 bits of target's address.
+ */
+ h0->ip.dst_address.as_u8[13] = ip0->dst_address.as_u8[13];
+ h0->ip.dst_address.as_u8[14] = ip0->dst_address.as_u8[14];
+ h0->ip.dst_address.as_u8[15] = ip0->dst_address.as_u8[15];
+
+ h0->neighbor.target_address = ip0->dst_address;
+
+ clib_memcpy (h0->link_layer_option.ethernet_address,
+ hw_if0->hw_address, vec_len (hw_if0->hw_address));
+
+ /* $$$$ appears we need this; why is the checksum non-zero? */
+ h0->neighbor.icmp.checksum = 0;
+ h0->neighbor.icmp.checksum =
+ ip6_tcp_udp_icmp_compute_checksum (vm, 0, &h0->ip,
+ &bogus_length);
+
+ ASSERT (bogus_length == 0);
+
+ vlib_buffer_copy_trace_flag (vm, p0, bi0);
+ b0 = vlib_get_buffer (vm, bi0);
+ vnet_buffer (b0)->sw_if_index[VLIB_TX]
+ = vnet_buffer (p0)->sw_if_index[VLIB_TX];
+
+ /* Add rewrite/encap string. */
+ vnet_rewrite_one_header (adj0[0], h0, sizeof (ethernet_header_t));
+ vlib_buffer_advance (b0, -adj0->rewrite_header.data_bytes);
+
+ next0 = IP6_DISCOVER_NEIGHBOR_NEXT_REPLY_TX;
+
+ vlib_set_next_frame_buffer (vm, node, next0, bi0);
+ }
+ }
+
+ vlib_put_next_frame (vm, node, IP6_DISCOVER_NEIGHBOR_NEXT_DROP,
+ n_left_to_next_drop);
+ }
+
+ return frame->n_vectors;
+}
+
+static uword
+ip6_discover_neighbor (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ return (ip6_discover_neighbor_inline (vm, node, frame, 0));
+}
+
+static uword
+ip6_glean (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ return (ip6_discover_neighbor_inline (vm, node, frame, 1));
+}
+
+static char *ip6_discover_neighbor_error_strings[] = {
+ [IP6_DISCOVER_NEIGHBOR_ERROR_DROP] = "address overflow drops",
+ [IP6_DISCOVER_NEIGHBOR_ERROR_REQUEST_SENT] = "neighbor solicitations sent",
+ [IP6_DISCOVER_NEIGHBOR_ERROR_NO_SOURCE_ADDRESS]
+ = "no source address for ND solicitation",
+};
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (ip6_discover_neighbor_node) =
+{
+ .function = ip6_discover_neighbor,
+ .name = "ip6-discover-neighbor",
+ .vector_size = sizeof (u32),
+ .format_trace = format_ip6_forward_next_trace,
+ .n_errors = ARRAY_LEN (ip6_discover_neighbor_error_strings),
+ .error_strings = ip6_discover_neighbor_error_strings,
+ .n_next_nodes = IP6_DISCOVER_NEIGHBOR_N_NEXT,
+ .next_nodes =
+ {
+ [IP6_DISCOVER_NEIGHBOR_NEXT_DROP] = "error-drop",
+ [IP6_DISCOVER_NEIGHBOR_NEXT_REPLY_TX] = "interface-output",
+ },
+};
+/* *INDENT-ON* */
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (ip6_glean_node) =
+{
+ .function = ip6_glean,
+ .name = "ip6-glean",
+ .vector_size = sizeof (u32),
+ .format_trace = format_ip6_forward_next_trace,
+ .n_errors = ARRAY_LEN (ip6_discover_neighbor_error_strings),
+ .error_strings = ip6_discover_neighbor_error_strings,
+ .n_next_nodes = IP6_DISCOVER_NEIGHBOR_N_NEXT,
+ .next_nodes =
+ {
+ [IP6_DISCOVER_NEIGHBOR_NEXT_DROP] = "error-drop",
+ [IP6_DISCOVER_NEIGHBOR_NEXT_REPLY_TX] = "interface-output",
+ },
+};
+/* *INDENT-ON* */
+
+clib_error_t *
+ip6_probe_neighbor (vlib_main_t * vm, ip6_address_t * dst, u32 sw_if_index)
+{
+ vnet_main_t *vnm = vnet_get_main ();
+ ip6_main_t *im = &ip6_main;
+ icmp6_neighbor_solicitation_header_t *h;
+ ip6_address_t *src;
+ ip_interface_address_t *ia;
+ ip_adjacency_t *adj;
+ vnet_hw_interface_t *hi;
+ vnet_sw_interface_t *si;
+ vlib_buffer_t *b;
+ u32 bi = 0;
+ int bogus_length;
+
+ si = vnet_get_sw_interface (vnm, sw_if_index);
+
+ if (!(si->flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP))
+ {
+ return clib_error_return (0, "%U: interface %U down",
+ format_ip6_address, dst,
+ format_vnet_sw_if_index_name, vnm,
+ sw_if_index);
+ }
+
+ src =
+ ip6_interface_address_matching_destination (im, dst, sw_if_index, &ia);
+ if (!src)
+ {
+ vnm->api_errno = VNET_API_ERROR_NO_MATCHING_INTERFACE;
+ return clib_error_return
+ (0, "no matching interface address for destination %U (interface %U)",
+ format_ip6_address, dst,
+ format_vnet_sw_if_index_name, vnm, sw_if_index);
+ }
+
+ h =
+ vlib_packet_template_get_packet (vm,
+ &im->discover_neighbor_packet_template,
+ &bi);
+
+ hi = vnet_get_sup_hw_interface (vnm, sw_if_index);
+
+ /* Destination address is a solicited node multicast address. We need to fill in
+ the low 24 bits with low 24 bits of target's address. */
+ h->ip.dst_address.as_u8[13] = dst->as_u8[13];
+ h->ip.dst_address.as_u8[14] = dst->as_u8[14];
+ h->ip.dst_address.as_u8[15] = dst->as_u8[15];
+
+ h->ip.src_address = src[0];
+ h->neighbor.target_address = dst[0];
+
+ clib_memcpy (h->link_layer_option.ethernet_address, hi->hw_address,
+ vec_len (hi->hw_address));
+
+ h->neighbor.icmp.checksum =
+ ip6_tcp_udp_icmp_compute_checksum (vm, 0, &h->ip, &bogus_length);
+ ASSERT (bogus_length == 0);
+
+ b = vlib_get_buffer (vm, bi);
+ vnet_buffer (b)->sw_if_index[VLIB_RX] =
+ vnet_buffer (b)->sw_if_index[VLIB_TX] = sw_if_index;
+
+ /* Add encapsulation string for software interface (e.g. ethernet header). */
+ adj = ip_get_adjacency (&im->lookup_main, ia->neighbor_probe_adj_index);
+ vnet_rewrite_one_header (adj[0], h, sizeof (ethernet_header_t));
+ vlib_buffer_advance (b, -adj->rewrite_header.data_bytes);
+
+ {
+ vlib_frame_t *f = vlib_get_frame_to_node (vm, hi->output_node_index);
+ u32 *to_next = vlib_frame_vector_args (f);
+ to_next[0] = bi;
+ f->n_vectors = 1;
+ vlib_put_frame_to_node (vm, hi->output_node_index, f);
+ }
+
+ return /* no error */ 0;
+}
+
+typedef enum
+{
+ IP6_REWRITE_NEXT_DROP,
+ IP6_REWRITE_NEXT_ICMP_ERROR,
+} ip6_rewrite_next_t;
+
+always_inline uword
+ip6_rewrite_inline (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame, int is_midchain)
+{
+ ip_lookup_main_t *lm = &ip6_main.lookup_main;
+ u32 *from = vlib_frame_vector_args (frame);
+ u32 n_left_from, n_left_to_next, *to_next, next_index;
+ vlib_node_runtime_t *error_node =
+ vlib_node_get_runtime (vm, ip6_input_node.index);
+
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+ u32 cpu_index = os_get_cpu_number ();
+
+ while (n_left_from > 0)
+ {
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ ip_adjacency_t *adj0, *adj1;
+ vlib_buffer_t *p0, *p1;
+ ip6_header_t *ip0, *ip1;
+ u32 pi0, rw_len0, next0, error0, adj_index0;
+ u32 pi1, rw_len1, next1, error1, adj_index1;
+ u32 tx_sw_if_index0, tx_sw_if_index1;
+
+ /* Prefetch next iteration. */
+ {
+ vlib_buffer_t *p2, *p3;
+
+ p2 = vlib_get_buffer (vm, from[2]);
+ p3 = vlib_get_buffer (vm, from[3]);
+
+ vlib_prefetch_buffer_header (p2, LOAD);
+ vlib_prefetch_buffer_header (p3, LOAD);
+
+ CLIB_PREFETCH (p2->pre_data, 32, STORE);
+ CLIB_PREFETCH (p3->pre_data, 32, STORE);
+
+ CLIB_PREFETCH (p2->data, sizeof (ip0[0]), STORE);
+ CLIB_PREFETCH (p3->data, sizeof (ip0[0]), STORE);
+ }
+
+ pi0 = to_next[0] = from[0];
+ pi1 = to_next[1] = from[1];
+
+ from += 2;
+ n_left_from -= 2;
+ to_next += 2;
+ n_left_to_next -= 2;
+
+ p0 = vlib_get_buffer (vm, pi0);
+ p1 = vlib_get_buffer (vm, pi1);
+
+ adj_index0 = vnet_buffer (p0)->ip.adj_index[VLIB_TX];
+ adj_index1 = vnet_buffer (p1)->ip.adj_index[VLIB_TX];
+
+ /* We should never rewrite a pkt using the MISS adjacency */
+ ASSERT (adj_index0 && adj_index1);
+
+ ip0 = vlib_buffer_get_current (p0);
+ ip1 = vlib_buffer_get_current (p1);
+
+ error0 = error1 = IP6_ERROR_NONE;
+ next0 = next1 = IP6_REWRITE_NEXT_DROP;
+
+ if (PREDICT_TRUE (!(p0->flags & VNET_BUFFER_LOCALLY_ORIGINATED)))
+ {
+ i32 hop_limit0 = ip0->hop_limit;
+
+ /* Input node should have reject packets with hop limit 0. */
+ ASSERT (ip0->hop_limit > 0);
+
+ hop_limit0 -= 1;
+
+ ip0->hop_limit = hop_limit0;
+
+ /*
+ * If the hop count drops below 1 when forwarding, generate
+ * an ICMP response.
+ */
+ if (PREDICT_FALSE (hop_limit0 <= 0))
+ {
+ error0 = IP6_ERROR_TIME_EXPIRED;
+ next0 = IP6_REWRITE_NEXT_ICMP_ERROR;
+ vnet_buffer (p0)->sw_if_index[VLIB_TX] = (u32) ~ 0;
+ icmp6_error_set_vnet_buffer (p0, ICMP6_time_exceeded,
+ ICMP6_time_exceeded_ttl_exceeded_in_transit,
+ 0);
+ }
+ }
+ else
+ {
+ p0->flags &= ~VNET_BUFFER_LOCALLY_ORIGINATED;
+ }
+ if (PREDICT_TRUE (!(p1->flags & VNET_BUFFER_LOCALLY_ORIGINATED)))
+ {
+ i32 hop_limit1 = ip1->hop_limit;
+
+ /* Input node should have reject packets with hop limit 0. */
+ ASSERT (ip1->hop_limit > 0);
+
+ hop_limit1 -= 1;
+
+ ip1->hop_limit = hop_limit1;
+
+ /*
+ * If the hop count drops below 1 when forwarding, generate
+ * an ICMP response.
+ */
+ if (PREDICT_FALSE (hop_limit1 <= 0))
+ {
+ error1 = IP6_ERROR_TIME_EXPIRED;
+ next1 = IP6_REWRITE_NEXT_ICMP_ERROR;
+ vnet_buffer (p1)->sw_if_index[VLIB_TX] = (u32) ~ 0;
+ icmp6_error_set_vnet_buffer (p1, ICMP6_time_exceeded,
+ ICMP6_time_exceeded_ttl_exceeded_in_transit,
+ 0);
+ }
+ }
+ else
+ {
+ p1->flags &= ~VNET_BUFFER_LOCALLY_ORIGINATED;
+ }
+ adj0 = ip_get_adjacency (lm, adj_index0);
+ adj1 = ip_get_adjacency (lm, adj_index1);
+
+ rw_len0 = adj0[0].rewrite_header.data_bytes;
+ rw_len1 = adj1[0].rewrite_header.data_bytes;
+ vnet_buffer (p0)->ip.save_rewrite_length = rw_len0;
+ vnet_buffer (p1)->ip.save_rewrite_length = rw_len1;
+
+ vlib_increment_combined_counter (&adjacency_counters,
+ cpu_index, adj_index0,
+ /* packet increment */ 0,
+ /* byte increment */ rw_len0);
+ vlib_increment_combined_counter (&adjacency_counters,
+ cpu_index, adj_index1,
+ /* packet increment */ 0,
+ /* byte increment */ rw_len1);
+
+ /* Check MTU of outgoing interface. */
+ error0 =
+ (vlib_buffer_length_in_chain (vm, p0) >
+ adj0[0].
+ rewrite_header.max_l3_packet_bytes ? IP6_ERROR_MTU_EXCEEDED :
+ error0);
+ error1 =
+ (vlib_buffer_length_in_chain (vm, p1) >
+ adj1[0].
+ rewrite_header.max_l3_packet_bytes ? IP6_ERROR_MTU_EXCEEDED :
+ error1);
+
+ /* Don't adjust the buffer for hop count issue; icmp-error node
+ * wants to see the IP headerr */
+ if (PREDICT_TRUE (error0 == IP6_ERROR_NONE))
+ {
+ p0->current_data -= rw_len0;
+ p0->current_length += rw_len0;
+
+ tx_sw_if_index0 = adj0[0].rewrite_header.sw_if_index;
+ vnet_buffer (p0)->sw_if_index[VLIB_TX] = tx_sw_if_index0;
+ next0 = adj0[0].rewrite_header.next_index;
+
+ vnet_feature_arc_start (lm->output_feature_arc_index,
+ tx_sw_if_index0, &next0, p0);
+ }
+ if (PREDICT_TRUE (error1 == IP6_ERROR_NONE))
+ {
+ p1->current_data -= rw_len1;
+ p1->current_length += rw_len1;
+
+ tx_sw_if_index1 = adj1[0].rewrite_header.sw_if_index;
+ vnet_buffer (p1)->sw_if_index[VLIB_TX] = tx_sw_if_index1;
+ next1 = adj1[0].rewrite_header.next_index;
+
+ vnet_feature_arc_start (lm->output_feature_arc_index,
+ tx_sw_if_index1, &next1, p1);
+ }
+
+ /* Guess we are only writing on simple Ethernet header. */
+ vnet_rewrite_two_headers (adj0[0], adj1[0],
+ ip0, ip1, sizeof (ethernet_header_t));
+
+ if (is_midchain)
+ {
+ adj0->sub_type.midchain.fixup_func (vm, adj0, p0);
+ adj1->sub_type.midchain.fixup_func (vm, adj1, p1);
+ }
+
+ vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
+ to_next, n_left_to_next,
+ pi0, pi1, next0, next1);
+ }
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ ip_adjacency_t *adj0;
+ vlib_buffer_t *p0;
+ ip6_header_t *ip0;
+ u32 pi0, rw_len0;
+ u32 adj_index0, next0, error0;
+ u32 tx_sw_if_index0;
+
+ pi0 = to_next[0] = from[0];
+
+ p0 = vlib_get_buffer (vm, pi0);
+
+ adj_index0 = vnet_buffer (p0)->ip.adj_index[VLIB_TX];
+
+ /* We should never rewrite a pkt using the MISS adjacency */
+ ASSERT (adj_index0);
+
+ adj0 = ip_get_adjacency (lm, adj_index0);
+
+ ip0 = vlib_buffer_get_current (p0);
+
+ error0 = IP6_ERROR_NONE;
+ next0 = IP6_REWRITE_NEXT_DROP;
+
+ /* Check hop limit */
+ if (PREDICT_TRUE (!(p0->flags & VNET_BUFFER_LOCALLY_ORIGINATED)))
+ {
+ i32 hop_limit0 = ip0->hop_limit;
+
+ ASSERT (ip0->hop_limit > 0);
+
+ hop_limit0 -= 1;
+
+ ip0->hop_limit = hop_limit0;
+
+ if (PREDICT_FALSE (hop_limit0 <= 0))
+ {
+ /*
+ * If the hop count drops below 1 when forwarding, generate
+ * an ICMP response.
+ */
+ error0 = IP6_ERROR_TIME_EXPIRED;
+ next0 = IP6_REWRITE_NEXT_ICMP_ERROR;
+ vnet_buffer (p0)->sw_if_index[VLIB_TX] = (u32) ~ 0;
+ icmp6_error_set_vnet_buffer (p0, ICMP6_time_exceeded,
+ ICMP6_time_exceeded_ttl_exceeded_in_transit,
+ 0);
+ }
+ }
+ else
+ {
+ p0->flags &= ~VNET_BUFFER_LOCALLY_ORIGINATED;
+ }
+
+ /* Guess we are only writing on simple Ethernet header. */
+ vnet_rewrite_one_header (adj0[0], ip0, sizeof (ethernet_header_t));
+
+ /* Update packet buffer attributes/set output interface. */
+ rw_len0 = adj0[0].rewrite_header.data_bytes;
+ vnet_buffer (p0)->ip.save_rewrite_length = rw_len0;
+
+ vlib_increment_combined_counter (&adjacency_counters,
+ cpu_index, adj_index0,
+ /* packet increment */ 0,
+ /* byte increment */ rw_len0);
+
+ /* Check MTU of outgoing interface. */
+ error0 =
+ (vlib_buffer_length_in_chain (vm, p0) >
+ adj0[0].
+ rewrite_header.max_l3_packet_bytes ? IP6_ERROR_MTU_EXCEEDED :
+ error0);
+
+ /* Don't adjust the buffer for hop count issue; icmp-error node
+ * wants to see the IP headerr */
+ if (PREDICT_TRUE (error0 == IP6_ERROR_NONE))
+ {
+ p0->current_data -= rw_len0;
+ p0->current_length += rw_len0;
+
+ tx_sw_if_index0 = adj0[0].rewrite_header.sw_if_index;
+
+ vnet_buffer (p0)->sw_if_index[VLIB_TX] = tx_sw_if_index0;
+ next0 = adj0[0].rewrite_header.next_index;
+
+ vnet_feature_arc_start (lm->output_feature_arc_index,
+ tx_sw_if_index0, &next0, p0);
+ }
+
+ if (is_midchain)
+ {
+ adj0->sub_type.midchain.fixup_func (vm, adj0, p0);
+ }
+
+ p0->error = error_node->errors[error0];
+
+ from += 1;
+ n_left_from -= 1;
+ to_next += 1;
+ n_left_to_next -= 1;
+
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ pi0, next0);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ /* Need to do trace after rewrites to pick up new packet data. */
+ if (node->flags & VLIB_NODE_FLAG_TRACE)
+ ip6_forward_next_trace (vm, node, frame, VLIB_TX);
+
+ return frame->n_vectors;
+}
+
+static uword
+ip6_rewrite (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ return ip6_rewrite_inline (vm, node, frame,
+ /* midchain */ 0);
+}
+
+static uword
+ip6_midchain (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ return ip6_rewrite_inline (vm, node, frame,
+ /* midchain */ 1);
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (ip6_midchain_node) =
+{
+ .function = ip6_midchain,
+ .name = "ip6-midchain",
+ .vector_size = sizeof (u32),
+ .format_trace = format_ip6_forward_next_trace,
+ .sibling_of = "ip6-rewrite",
+ };
+/* *INDENT-ON* */
+
+VLIB_NODE_FUNCTION_MULTIARCH (ip6_midchain_node, ip6_midchain);
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (ip6_rewrite_node) =
+{
+ .function = ip6_rewrite,
+ .name = "ip6-rewrite",
+ .vector_size = sizeof (u32),
+ .format_trace = format_ip6_rewrite_trace,
+ .n_next_nodes = 2,
+ .next_nodes =
+ {
+ [IP6_REWRITE_NEXT_DROP] = "error-drop",
+ [IP6_REWRITE_NEXT_ICMP_ERROR] = "ip6-icmp-error",
+ },
+};
+/* *INDENT-ON* */
+
+VLIB_NODE_FUNCTION_MULTIARCH (ip6_rewrite_node, ip6_rewrite);
+
+/*
+ * Hop-by-Hop handling
+ */
+
+ip6_hop_by_hop_main_t ip6_hop_by_hop_main;
+
+#define foreach_ip6_hop_by_hop_error \
+_(PROCESSED, "pkts with ip6 hop-by-hop options") \
+_(FORMAT, "incorrectly formatted hop-by-hop options") \
+_(UNKNOWN_OPTION, "unknown ip6 hop-by-hop options")
+
+typedef enum
+{
+#define _(sym,str) IP6_HOP_BY_HOP_ERROR_##sym,
+ foreach_ip6_hop_by_hop_error
+#undef _
+ IP6_HOP_BY_HOP_N_ERROR,
+} ip6_hop_by_hop_error_t;
+
+/*
+ * Primary h-b-h handler trace support
+ * We work pretty hard on the problem for obvious reasons
+ */
+typedef struct
+{
+ u32 next_index;
+ u32 trace_len;
+ u8 option_data[256];
+} ip6_hop_by_hop_trace_t;
+
+vlib_node_registration_t ip6_hop_by_hop_node;
+
+static char *ip6_hop_by_hop_error_strings[] = {
+#define _(sym,string) string,
+ foreach_ip6_hop_by_hop_error
+#undef _
+};
+
+static u8 *
+format_ip6_hop_by_hop_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ ip6_hop_by_hop_trace_t *t = va_arg (*args, ip6_hop_by_hop_trace_t *);
+ ip6_hop_by_hop_header_t *hbh0;
+ ip6_hop_by_hop_option_t *opt0, *limit0;
+ ip6_hop_by_hop_main_t *hm = &ip6_hop_by_hop_main;
+
+ u8 type0;
+
+ hbh0 = (ip6_hop_by_hop_header_t *) t->option_data;
+
+ s = format (s, "IP6_HOP_BY_HOP: next index %d len %d traced %d",
+ t->next_index, (hbh0->length + 1) << 3, t->trace_len);
+
+ opt0 = (ip6_hop_by_hop_option_t *) (hbh0 + 1);
+ limit0 = (ip6_hop_by_hop_option_t *) ((u8 *) hbh0) + t->trace_len;
+
+ while (opt0 < limit0)
+ {
+ type0 = opt0->type;
+ switch (type0)
+ {
+ case 0: /* Pad, just stop */
+ opt0 = (ip6_hop_by_hop_option_t *) ((u8 *) opt0) + 1;
+ break;
+
+ default:
+ if (hm->trace[type0])
+ {
+ s = (*hm->trace[type0]) (s, opt0);
+ }
+ else
+ {
+ s =
+ format (s, "\n unrecognized option %d length %d", type0,
+ opt0->length);
+ }
+ opt0 =
+ (ip6_hop_by_hop_option_t *) (((u8 *) opt0) + opt0->length +
+ sizeof (ip6_hop_by_hop_option_t));
+ break;
+ }
+ }
+ return s;
+}
+
+always_inline u8
+ip6_scan_hbh_options (vlib_buffer_t * b0,
+ ip6_header_t * ip0,
+ ip6_hop_by_hop_header_t * hbh0,
+ ip6_hop_by_hop_option_t * opt0,
+ ip6_hop_by_hop_option_t * limit0, u32 * next0)
+{
+ ip6_hop_by_hop_main_t *hm = &ip6_hop_by_hop_main;
+ u8 type0;
+ u8 error0 = 0;
+
+ while (opt0 < limit0)
+ {
+ type0 = opt0->type;
+ switch (type0)
+ {
+ case 0: /* Pad1 */
+ opt0 = (ip6_hop_by_hop_option_t *) ((u8 *) opt0) + 1;
+ continue;
+ case 1: /* PadN */
+ break;
+ default:
+ if (hm->options[type0])
+ {
+ if ((*hm->options[type0]) (b0, ip0, opt0) < 0)
+ {
+ error0 = IP6_HOP_BY_HOP_ERROR_FORMAT;
+ return (error0);
+ }
+ }
+ else
+ {
+ /* Unrecognized mandatory option, check the two high order bits */
+ switch (opt0->type & HBH_OPTION_TYPE_HIGH_ORDER_BITS)
+ {
+ case HBH_OPTION_TYPE_SKIP_UNKNOWN:
+ break;
+ case HBH_OPTION_TYPE_DISCARD_UNKNOWN:
+ error0 = IP6_HOP_BY_HOP_ERROR_UNKNOWN_OPTION;
+ *next0 = IP_LOOKUP_NEXT_DROP;
+ break;
+ case HBH_OPTION_TYPE_DISCARD_UNKNOWN_ICMP:
+ error0 = IP6_HOP_BY_HOP_ERROR_UNKNOWN_OPTION;
+ *next0 = IP_LOOKUP_NEXT_ICMP_ERROR;
+ icmp6_error_set_vnet_buffer (b0, ICMP6_parameter_problem,
+ ICMP6_parameter_problem_unrecognized_option,
+ (u8 *) opt0 - (u8 *) ip0);
+ break;
+ case HBH_OPTION_TYPE_DISCARD_UNKNOWN_ICMP_NOT_MCAST:
+ error0 = IP6_HOP_BY_HOP_ERROR_UNKNOWN_OPTION;
+ if (!ip6_address_is_multicast (&ip0->dst_address))
+ {
+ *next0 = IP_LOOKUP_NEXT_ICMP_ERROR;
+ icmp6_error_set_vnet_buffer (b0,
+ ICMP6_parameter_problem,
+ ICMP6_parameter_problem_unrecognized_option,
+ (u8 *) opt0 - (u8 *) ip0);
+ }
+ else
+ {
+ *next0 = IP_LOOKUP_NEXT_DROP;
+ }
+ break;
+ }
+ return (error0);
+ }
+ }
+ opt0 =
+ (ip6_hop_by_hop_option_t *) (((u8 *) opt0) + opt0->length +
+ sizeof (ip6_hop_by_hop_option_t));
+ }
+ return (error0);
+}
+
+/*
+ * Process the Hop-by-Hop Options header
+ */
+static uword
+ip6_hop_by_hop (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ vlib_node_runtime_t *error_node =
+ vlib_node_get_runtime (vm, ip6_hop_by_hop_node.index);
+ ip6_hop_by_hop_main_t *hm = &ip6_hop_by_hop_main;
+ u32 n_left_from, *from, *to_next;
+ ip_lookup_next_t next_index;
+ ip6_main_t *im = &ip6_main;
+ ip_lookup_main_t *lm = &im->lookup_main;
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ u32 bi0, bi1;
+ vlib_buffer_t *b0, *b1;
+ u32 next0, next1;
+ ip6_header_t *ip0, *ip1;
+ ip6_hop_by_hop_header_t *hbh0, *hbh1;
+ ip6_hop_by_hop_option_t *opt0, *limit0, *opt1, *limit1;
+ u8 error0 = 0, error1 = 0;
+
+ /* Prefetch next iteration. */
+ {
+ vlib_buffer_t *p2, *p3;
+
+ p2 = vlib_get_buffer (vm, from[2]);
+ p3 = vlib_get_buffer (vm, from[3]);
+
+ vlib_prefetch_buffer_header (p2, LOAD);
+ vlib_prefetch_buffer_header (p3, LOAD);
+
+ CLIB_PREFETCH (p2->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
+ CLIB_PREFETCH (p3->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
+ }
+
+ /* Speculatively enqueue b0, b1 to the current next frame */
+ to_next[0] = bi0 = from[0];
+ to_next[1] = bi1 = from[1];
+ from += 2;
+ to_next += 2;
+ n_left_from -= 2;
+ n_left_to_next -= 2;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ b1 = vlib_get_buffer (vm, bi1);
+
+ /* Default use the next_index from the adjacency. A HBH option rarely redirects to a different node */
+ u32 adj_index0 = vnet_buffer (b0)->ip.adj_index[VLIB_TX];
+ ip_adjacency_t *adj0 = ip_get_adjacency (lm, adj_index0);
+ u32 adj_index1 = vnet_buffer (b1)->ip.adj_index[VLIB_TX];
+ ip_adjacency_t *adj1 = ip_get_adjacency (lm, adj_index1);
+
+ /* Default use the next_index from the adjacency. A HBH option rarely redirects to a different node */
+ next0 = adj0->lookup_next_index;
+ next1 = adj1->lookup_next_index;
+
+ ip0 = vlib_buffer_get_current (b0);
+ ip1 = vlib_buffer_get_current (b1);
+ hbh0 = (ip6_hop_by_hop_header_t *) (ip0 + 1);
+ hbh1 = (ip6_hop_by_hop_header_t *) (ip1 + 1);
+ opt0 = (ip6_hop_by_hop_option_t *) (hbh0 + 1);
+ opt1 = (ip6_hop_by_hop_option_t *) (hbh1 + 1);
+ limit0 =
+ (ip6_hop_by_hop_option_t *) ((u8 *) hbh0 +
+ ((hbh0->length + 1) << 3));
+ limit1 =
+ (ip6_hop_by_hop_option_t *) ((u8 *) hbh1 +
+ ((hbh1->length + 1) << 3));
+
+ /*
+ * Basic validity checks
+ */
+ if ((hbh0->length + 1) << 3 >
+ clib_net_to_host_u16 (ip0->payload_length))
+ {
+ error0 = IP6_HOP_BY_HOP_ERROR_FORMAT;
+ next0 = IP_LOOKUP_NEXT_DROP;
+ goto outdual;
+ }
+ /* Scan the set of h-b-h options, process ones that we understand */
+ error0 = ip6_scan_hbh_options (b0, ip0, hbh0, opt0, limit0, &next0);
+
+ if ((hbh1->length + 1) << 3 >
+ clib_net_to_host_u16 (ip1->payload_length))
+ {
+ error1 = IP6_HOP_BY_HOP_ERROR_FORMAT;
+ next1 = IP_LOOKUP_NEXT_DROP;
+ goto outdual;
+ }
+ /* Scan the set of h-b-h options, process ones that we understand */
+ error1 = ip6_scan_hbh_options (b1, ip1, hbh1, opt1, limit1, &next1);
+
+ outdual:
+ /* Has the classifier flagged this buffer for special treatment? */
+ if (PREDICT_FALSE
+ ((error0 == 0)
+ && (vnet_buffer (b0)->l2_classify.opaque_index & OI_DECAP)))
+ next0 = hm->next_override;
+
+ /* Has the classifier flagged this buffer for special treatment? */
+ if (PREDICT_FALSE
+ ((error1 == 0)
+ && (vnet_buffer (b1)->l2_classify.opaque_index & OI_DECAP)))
+ next1 = hm->next_override;
+
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)))
+ {
+ if (b0->flags & VLIB_BUFFER_IS_TRACED)
+ {
+ ip6_hop_by_hop_trace_t *t =
+ vlib_add_trace (vm, node, b0, sizeof (*t));
+ u32 trace_len = (hbh0->length + 1) << 3;
+ t->next_index = next0;
+ /* Capture the h-b-h option verbatim */
+ trace_len =
+ trace_len <
+ ARRAY_LEN (t->option_data) ? trace_len :
+ ARRAY_LEN (t->option_data);
+ t->trace_len = trace_len;
+ clib_memcpy (t->option_data, hbh0, trace_len);
+ }
+ if (b1->flags & VLIB_BUFFER_IS_TRACED)
+ {
+ ip6_hop_by_hop_trace_t *t =
+ vlib_add_trace (vm, node, b1, sizeof (*t));
+ u32 trace_len = (hbh1->length + 1) << 3;
+ t->next_index = next1;
+ /* Capture the h-b-h option verbatim */
+ trace_len =
+ trace_len <
+ ARRAY_LEN (t->option_data) ? trace_len :
+ ARRAY_LEN (t->option_data);
+ t->trace_len = trace_len;
+ clib_memcpy (t->option_data, hbh1, trace_len);
+ }
+
+ }
+
+ b0->error = error_node->errors[error0];
+ b1->error = error_node->errors[error1];
+
+ /* verify speculative enqueue, maybe switch current next frame */
+ vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next,
+ n_left_to_next, bi0, bi1, next0,
+ next1);
+ }
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 bi0;
+ vlib_buffer_t *b0;
+ u32 next0;
+ ip6_header_t *ip0;
+ ip6_hop_by_hop_header_t *hbh0;
+ ip6_hop_by_hop_option_t *opt0, *limit0;
+ u8 error0 = 0;
+
+ /* Speculatively enqueue b0 to the current next frame */
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ /*
+ * Default use the next_index from the adjacency.
+ * A HBH option rarely redirects to a different node
+ */
+ u32 adj_index0 = vnet_buffer (b0)->ip.adj_index[VLIB_TX];
+ ip_adjacency_t *adj0 = ip_get_adjacency (lm, adj_index0);
+ next0 = adj0->lookup_next_index;
+
+ ip0 = vlib_buffer_get_current (b0);
+ hbh0 = (ip6_hop_by_hop_header_t *) (ip0 + 1);
+ opt0 = (ip6_hop_by_hop_option_t *) (hbh0 + 1);
+ limit0 =
+ (ip6_hop_by_hop_option_t *) ((u8 *) hbh0 +
+ ((hbh0->length + 1) << 3));
+
+ /*
+ * Basic validity checks
+ */
+ if ((hbh0->length + 1) << 3 >
+ clib_net_to_host_u16 (ip0->payload_length))
+ {
+ error0 = IP6_HOP_BY_HOP_ERROR_FORMAT;
+ next0 = IP_LOOKUP_NEXT_DROP;
+ goto out0;
+ }
+
+ /* Scan the set of h-b-h options, process ones that we understand */
+ error0 = ip6_scan_hbh_options (b0, ip0, hbh0, opt0, limit0, &next0);
+
+ out0:
+ /* Has the classifier flagged this buffer for special treatment? */
+ if (PREDICT_FALSE
+ ((error0 == 0)
+ && (vnet_buffer (b0)->l2_classify.opaque_index & OI_DECAP)))
+ next0 = hm->next_override;
+
+ if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ ip6_hop_by_hop_trace_t *t =
+ vlib_add_trace (vm, node, b0, sizeof (*t));
+ u32 trace_len = (hbh0->length + 1) << 3;
+ t->next_index = next0;
+ /* Capture the h-b-h option verbatim */
+ trace_len =
+ trace_len <
+ ARRAY_LEN (t->option_data) ? trace_len :
+ ARRAY_LEN (t->option_data);
+ t->trace_len = trace_len;
+ clib_memcpy (t->option_data, hbh0, trace_len);
+ }
+
+ b0->error = error_node->errors[error0];
+
+ /* verify speculative enqueue, maybe switch current next frame */
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
+ n_left_to_next, bi0, next0);
+ }
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+ return frame->n_vectors;
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (ip6_hop_by_hop_node) =
+{
+ .function = ip6_hop_by_hop,
+ .name = "ip6-hop-by-hop",
+ .sibling_of = "ip6-lookup",
+ .vector_size = sizeof (u32),
+ .format_trace = format_ip6_hop_by_hop_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+ .n_errors = ARRAY_LEN (ip6_hop_by_hop_error_strings),
+ .error_strings = ip6_hop_by_hop_error_strings,
+ .n_next_nodes = 0,
+};
+/* *INDENT-ON* */
+
+VLIB_NODE_FUNCTION_MULTIARCH (ip6_hop_by_hop_node, ip6_hop_by_hop);
+
+static clib_error_t *
+ip6_hop_by_hop_init (vlib_main_t * vm)
+{
+ ip6_hop_by_hop_main_t *hm = &ip6_hop_by_hop_main;
+ memset (hm->options, 0, sizeof (hm->options));
+ memset (hm->trace, 0, sizeof (hm->trace));
+ hm->next_override = IP6_LOOKUP_NEXT_POP_HOP_BY_HOP;
+ return (0);
+}
+
+VLIB_INIT_FUNCTION (ip6_hop_by_hop_init);
+
+void
+ip6_hbh_set_next_override (uword next)
+{
+ ip6_hop_by_hop_main_t *hm = &ip6_hop_by_hop_main;
+
+ hm->next_override = next;
+}
+
+int
+ip6_hbh_register_option (u8 option,
+ int options (vlib_buffer_t * b, ip6_header_t * ip,
+ ip6_hop_by_hop_option_t * opt),
+ u8 * trace (u8 * s, ip6_hop_by_hop_option_t * opt))
+{
+ ip6_main_t *im = &ip6_main;
+ ip6_hop_by_hop_main_t *hm = &ip6_hop_by_hop_main;
+
+ ASSERT (option < ARRAY_LEN (hm->options));
+
+ /* Already registered */
+ if (hm->options[option])
+ return (-1);
+
+ hm->options[option] = options;
+ hm->trace[option] = trace;
+
+ /* Set global variable */
+ im->hbh_enabled = 1;
+
+ return (0);
+}
+
+int
+ip6_hbh_unregister_option (u8 option)
+{
+ ip6_main_t *im = &ip6_main;
+ ip6_hop_by_hop_main_t *hm = &ip6_hop_by_hop_main;
+
+ ASSERT (option < ARRAY_LEN (hm->options));
+
+ /* Not registered */
+ if (!hm->options[option])
+ return (-1);
+
+ hm->options[option] = NULL;
+ hm->trace[option] = NULL;
+
+ /* Disable global knob if this was the last option configured */
+ int i;
+ bool found = false;
+ for (i = 0; i < 256; i++)
+ {
+ if (hm->options[option])
+ {
+ found = true;
+ break;
+ }
+ }
+ if (!found)
+ im->hbh_enabled = 0;
+
+ return (0);
+}
+
+/* Global IP6 main. */
+ip6_main_t ip6_main;
+
+static clib_error_t *
+ip6_lookup_init (vlib_main_t * vm)
+{
+ ip6_main_t *im = &ip6_main;
+ clib_error_t *error;
+ uword i;
+
+ if ((error = vlib_call_init_function (vm, vnet_feature_init)))
+ return error;
+
+ for (i = 0; i < ARRAY_LEN (im->fib_masks); i++)
+ {
+ u32 j, i0, i1;
+
+ i0 = i / 32;
+ i1 = i % 32;
+
+ for (j = 0; j < i0; j++)
+ im->fib_masks[i].as_u32[j] = ~0;
+
+ if (i1)
+ im->fib_masks[i].as_u32[i0] =
+ clib_host_to_net_u32 (pow2_mask (i1) << (32 - i1));
+ }
+
+ ip_lookup_init (&im->lookup_main, /* is_ip6 */ 1);
+
+ if (im->lookup_table_nbuckets == 0)
+ im->lookup_table_nbuckets = IP6_FIB_DEFAULT_HASH_NUM_BUCKETS;
+
+ im->lookup_table_nbuckets = 1 << max_log2 (im->lookup_table_nbuckets);
+
+ if (im->lookup_table_size == 0)
+ im->lookup_table_size = IP6_FIB_DEFAULT_HASH_MEMORY_SIZE;
+
+ BV (clib_bihash_init) (&(im->ip6_table[IP6_FIB_TABLE_FWDING].ip6_hash),
+ "ip6 FIB fwding table",
+ im->lookup_table_nbuckets, im->lookup_table_size);
+ BV (clib_bihash_init) (&im->ip6_table[IP6_FIB_TABLE_NON_FWDING].ip6_hash,
+ "ip6 FIB non-fwding table",
+ im->lookup_table_nbuckets, im->lookup_table_size);
+
+ /* Create FIB with index 0 and table id of 0. */
+ fib_table_find_or_create_and_lock (FIB_PROTOCOL_IP6, 0);
+
+ {
+ pg_node_t *pn;
+ pn = pg_get_node (ip6_lookup_node.index);
+ pn->unformat_edit = unformat_pg_ip6_header;
+ }
+
+ /* Unless explicitly configured, don't process HBH options */
+ im->hbh_enabled = 0;
+
+ {
+ icmp6_neighbor_solicitation_header_t p;
+
+ memset (&p, 0, sizeof (p));
+
+ p.ip.ip_version_traffic_class_and_flow_label =
+ clib_host_to_net_u32 (0x6 << 28);
+ p.ip.payload_length =
+ clib_host_to_net_u16 (sizeof (p) -
+ STRUCT_OFFSET_OF
+ (icmp6_neighbor_solicitation_header_t, neighbor));
+ p.ip.protocol = IP_PROTOCOL_ICMP6;
+ p.ip.hop_limit = 255;
+ ip6_set_solicited_node_multicast_address (&p.ip.dst_address, 0);
+
+ p.neighbor.icmp.type = ICMP6_neighbor_solicitation;
+
+ p.link_layer_option.header.type =
+ ICMP6_NEIGHBOR_DISCOVERY_OPTION_source_link_layer_address;
+ p.link_layer_option.header.n_data_u64s =
+ sizeof (p.link_layer_option) / sizeof (u64);
+
+ vlib_packet_template_init (vm,
+ &im->discover_neighbor_packet_template,
+ &p, sizeof (p),
+ /* alloc chunk size */ 8,
+ "ip6 neighbor discovery");
+ }
+
+ return error;
+}
+
+VLIB_INIT_FUNCTION (ip6_lookup_init);
+
+static clib_error_t *
+add_del_ip6_interface_table (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ vnet_main_t *vnm = vnet_get_main ();
+ clib_error_t *error = 0;
+ u32 sw_if_index, table_id;
+
+ sw_if_index = ~0;
+
+ if (!unformat_user (input, unformat_vnet_sw_interface, vnm, &sw_if_index))
+ {
+ error = clib_error_return (0, "unknown interface `%U'",
+ format_unformat_error, input);
+ goto done;
+ }
+
+ if (unformat (input, "%d", &table_id))
+ ;
+ else
+ {
+ error = clib_error_return (0, "expected table id `%U'",
+ format_unformat_error, input);
+ goto done;
+ }
+
+ {
+ u32 fib_index = fib_table_find_or_create_and_lock (FIB_PROTOCOL_IP6,
+ table_id);
+
+ vec_validate (ip6_main.fib_index_by_sw_if_index, sw_if_index);
+ ip6_main.fib_index_by_sw_if_index[sw_if_index] = fib_index;
+ }
+
+
+done:
+ return error;
+}
+
+/*?
+ * Place the indicated interface into the supplied IPv6 FIB table (also known
+ * as a VRF). If the FIB table does not exist, this command creates it. To
+ * display the current IPv6 FIB table, use the command '<em>show ip6 fib</em>'.
+ * FIB table will only be displayed if a route has been added to the table, or
+ * an IP Address is assigned to an interface in the table (which adds a route
+ * automatically).
+ *
+ * @note IP addresses added after setting the interface IP table end up in
+ * the indicated FIB table. If the IP address is added prior to adding the
+ * interface to the FIB table, it will NOT be part of the FIB table. Predictable
+ * but potentially counter-intuitive results occur if you provision interface
+ * addresses in multiple FIBs. Upon RX, packets will be processed in the last
+ * IP table ID provisioned. It might be marginally useful to evade source RPF
+ * drops to put an interface address into multiple FIBs.
+ *
+ * @cliexpar
+ * Example of how to add an interface to an IPv6 FIB table (where 2 is the table-id):
+ * @cliexcmd{set interface ip6 table GigabitEthernet2/0/0 2}
+ ?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (set_interface_ip6_table_command, static) =
+{
+ .path = "set interface ip6 table",
+ .function = add_del_ip6_interface_table,
+ .short_help = "set interface ip6 table <interface> <table-id>"
+};
+/* *INDENT-ON* */
+
+void
+ip6_link_local_address_from_ethernet_mac_address (ip6_address_t * ip,
+ u8 * mac)
+{
+ ip->as_u64[0] = clib_host_to_net_u64 (0xFE80000000000000ULL);
+ /* Invert the "u" bit */
+ ip->as_u8[8] = mac[0] ^ (1 << 1);
+ ip->as_u8[9] = mac[1];
+ ip->as_u8[10] = mac[2];
+ ip->as_u8[11] = 0xFF;
+ ip->as_u8[12] = 0xFE;
+ ip->as_u8[13] = mac[3];
+ ip->as_u8[14] = mac[4];
+ ip->as_u8[15] = mac[5];
+}
+
+void
+ip6_ethernet_mac_address_from_link_local_address (u8 * mac,
+ ip6_address_t * ip)
+{
+ /* Invert the previously inverted "u" bit */
+ mac[0] = ip->as_u8[8] ^ (1 << 1);
+ mac[1] = ip->as_u8[9];
+ mac[2] = ip->as_u8[10];
+ mac[3] = ip->as_u8[13];
+ mac[4] = ip->as_u8[14];
+ mac[5] = ip->as_u8[15];
+}
+
+static clib_error_t *
+test_ip6_link_command_fn (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ u8 mac[6];
+ ip6_address_t _a, *a = &_a;
+
+ if (unformat (input, "%U", unformat_ethernet_address, mac))
+ {
+ ip6_link_local_address_from_ethernet_mac_address (a, mac);
+ vlib_cli_output (vm, "Link local address: %U", format_ip6_address, a);
+ ip6_ethernet_mac_address_from_link_local_address (mac, a);
+ vlib_cli_output (vm, "Original MAC address: %U",
+ format_ethernet_address, mac);
+ }
+
+ return 0;
+}
+
+/*?
+ * This command converts the given MAC Address into an IPv6 link-local
+ * address.
+ *
+ * @cliexpar
+ * Example of how to create an IPv6 link-local address:
+ * @cliexstart{test ip6 link 16:d9:e0:91:79:86}
+ * Link local address: fe80::14d9:e0ff:fe91:7986
+ * Original MAC address: 16:d9:e0:91:79:86
+ * @cliexend
+?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (test_link_command, static) =
+{
+ .path = "test ip6 link",
+ .function = test_ip6_link_command_fn,
+ .short_help = "test ip6 link <mac-address>",
+};
+/* *INDENT-ON* */
+
+int
+vnet_set_ip6_flow_hash (u32 table_id, u32 flow_hash_config)
+{
+ ip6_main_t *im6 = &ip6_main;
+ ip6_fib_t *fib;
+ uword *p = hash_get (im6->fib_index_by_table_id, table_id);
+
+ if (p == 0)
+ return -1;
+
+ fib = ip6_fib_get (p[0]);
+
+ fib->flow_hash_config = flow_hash_config;
+ return 1;
+}
+
+static clib_error_t *
+set_ip6_flow_hash_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ int matched = 0;
+ u32 table_id = 0;
+ u32 flow_hash_config = 0;
+ int rv;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "table %d", &table_id))
+ matched = 1;
+#define _(a,v) \
+ else if (unformat (input, #a)) { flow_hash_config |= v; matched=1;}
+ foreach_flow_hash_bit
+#undef _
+ else
+ break;
+ }
+
+ if (matched == 0)
+ return clib_error_return (0, "unknown input `%U'",
+ format_unformat_error, input);
+
+ rv = vnet_set_ip6_flow_hash (table_id, flow_hash_config);
+ switch (rv)
+ {
+ case 1:
+ break;
+
+ case -1:
+ return clib_error_return (0, "no such FIB table %d", table_id);
+
+ default:
+ clib_warning ("BUG: illegal flow hash config 0x%x", flow_hash_config);
+ break;
+ }
+
+ return 0;
+}
+
+/*?
+ * Configure the set of IPv6 fields used by the flow hash.
+ *
+ * @cliexpar
+ * @parblock
+ * Example of how to set the flow hash on a given table:
+ * @cliexcmd{set ip6 flow-hash table 8 dst sport dport proto}
+ *
+ * Example of display the configured flow hash:
+ * @cliexstart{show ip6 fib}
+ * ipv6-VRF:0, fib_index 0, flow hash: src dst sport dport proto
+ * @::/0
+ * unicast-ip6-chain
+ * [@0]: dpo-load-balance: [index:5 buckets:1 uRPF:5 to:[0:0]]
+ * [0] [@0]: dpo-drop ip6
+ * fe80::/10
+ * unicast-ip6-chain
+ * [@0]: dpo-load-balance: [index:10 buckets:1 uRPF:10 to:[0:0]]
+ * [0] [@2]: dpo-receive
+ * ff02::1/128
+ * unicast-ip6-chain
+ * [@0]: dpo-load-balance: [index:8 buckets:1 uRPF:8 to:[0:0]]
+ * [0] [@2]: dpo-receive
+ * ff02::2/128
+ * unicast-ip6-chain
+ * [@0]: dpo-load-balance: [index:7 buckets:1 uRPF:7 to:[0:0]]
+ * [0] [@2]: dpo-receive
+ * ff02::16/128
+ * unicast-ip6-chain
+ * [@0]: dpo-load-balance: [index:9 buckets:1 uRPF:9 to:[0:0]]
+ * [0] [@2]: dpo-receive
+ * ff02::1:ff00:0/104
+ * unicast-ip6-chain
+ * [@0]: dpo-load-balance: [index:6 buckets:1 uRPF:6 to:[0:0]]
+ * [0] [@2]: dpo-receive
+ * ipv6-VRF:8, fib_index 1, flow hash: dst sport dport proto
+ * @::/0
+ * unicast-ip6-chain
+ * [@0]: dpo-load-balance: [index:21 buckets:1 uRPF:20 to:[0:0]]
+ * [0] [@0]: dpo-drop ip6
+ * @::a:1:1:0:4/126
+ * unicast-ip6-chain
+ * [@0]: dpo-load-balance: [index:27 buckets:1 uRPF:26 to:[0:0]]
+ * [0] [@4]: ipv6-glean: af_packet0
+ * @::a:1:1:0:7/128
+ * unicast-ip6-chain
+ * [@0]: dpo-load-balance: [index:28 buckets:1 uRPF:27 to:[0:0]]
+ * [0] [@2]: dpo-receive: @::a:1:1:0:7 on af_packet0
+ * fe80::/10
+ * unicast-ip6-chain
+ * [@0]: dpo-load-balance: [index:26 buckets:1 uRPF:25 to:[0:0]]
+ * [0] [@2]: dpo-receive
+ * fe80::fe:3eff:fe3e:9222/128
+ * unicast-ip6-chain
+ * [@0]: dpo-load-balance: [index:29 buckets:1 uRPF:28 to:[0:0]]
+ * [0] [@2]: dpo-receive: fe80::fe:3eff:fe3e:9222 on af_packet0
+ * ff02::1/128
+ * unicast-ip6-chain
+ * [@0]: dpo-load-balance: [index:24 buckets:1 uRPF:23 to:[0:0]]
+ * [0] [@2]: dpo-receive
+ * ff02::2/128
+ * unicast-ip6-chain
+ * [@0]: dpo-load-balance: [index:23 buckets:1 uRPF:22 to:[0:0]]
+ * [0] [@2]: dpo-receive
+ * ff02::16/128
+ * unicast-ip6-chain
+ * [@0]: dpo-load-balance: [index:25 buckets:1 uRPF:24 to:[0:0]]
+ * [0] [@2]: dpo-receive
+ * ff02::1:ff00:0/104
+ * unicast-ip6-chain
+ * [@0]: dpo-load-balance: [index:22 buckets:1 uRPF:21 to:[0:0]]
+ * [0] [@2]: dpo-receive
+ * @cliexend
+ * @endparblock
+?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (set_ip6_flow_hash_command, static) =
+{
+ .path = "set ip6 flow-hash",
+ .short_help =
+ "set ip6 flow-hash table <table-id> [src] [dst] [sport] [dport] [proto] [reverse]",
+ .function = set_ip6_flow_hash_command_fn,
+};
+/* *INDENT-ON* */
+
+static clib_error_t *
+show_ip6_local_command_fn (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ ip6_main_t *im = &ip6_main;
+ ip_lookup_main_t *lm = &im->lookup_main;
+ int i;
+
+ vlib_cli_output (vm, "Protocols handled by ip6_local");
+ for (i = 0; i < ARRAY_LEN (lm->local_next_by_ip_protocol); i++)
+ {
+ if (lm->local_next_by_ip_protocol[i] != IP_LOCAL_NEXT_PUNT)
+ vlib_cli_output (vm, "%d", i);
+ }
+ return 0;
+}
+
+
+
+/*?
+ * Display the set of protocols handled by the local IPv6 stack.
+ *
+ * @cliexpar
+ * Example of how to display local protocol table:
+ * @cliexstart{show ip6 local}
+ * Protocols handled by ip6_local
+ * 17
+ * 43
+ * 58
+ * 115
+ * @cliexend
+?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (show_ip6_local, static) =
+{
+ .path = "show ip6 local",
+ .function = show_ip6_local_command_fn,
+ .short_help = "show ip6 local",
+};
+/* *INDENT-ON* */
+
+int
+vnet_set_ip6_classify_intfc (vlib_main_t * vm, u32 sw_if_index,
+ u32 table_index)
+{
+ vnet_main_t *vnm = vnet_get_main ();
+ vnet_interface_main_t *im = &vnm->interface_main;
+ ip6_main_t *ipm = &ip6_main;
+ ip_lookup_main_t *lm = &ipm->lookup_main;
+ vnet_classify_main_t *cm = &vnet_classify_main;
+ ip6_address_t *if_addr;
+
+ if (pool_is_free_index (im->sw_interfaces, sw_if_index))
+ return VNET_API_ERROR_NO_MATCHING_INTERFACE;
+
+ if (table_index != ~0 && pool_is_free_index (cm->tables, table_index))
+ return VNET_API_ERROR_NO_SUCH_ENTRY;
+
+ vec_validate (lm->classify_table_index_by_sw_if_index, sw_if_index);
+ lm->classify_table_index_by_sw_if_index[sw_if_index] = table_index;
+
+ if_addr = ip6_interface_first_address (ipm, sw_if_index, NULL);
+
+ if (NULL != if_addr)
+ {
+ fib_prefix_t pfx = {
+ .fp_len = 128,
+ .fp_proto = FIB_PROTOCOL_IP6,
+ .fp_addr.ip6 = *if_addr,
+ };
+ u32 fib_index;
+
+ fib_index = fib_table_get_index_for_sw_if_index (FIB_PROTOCOL_IP4,
+ sw_if_index);
+
+
+ if (table_index != (u32) ~ 0)
+ {
+ dpo_id_t dpo = DPO_INVALID;
+
+ dpo_set (&dpo,
+ DPO_CLASSIFY,
+ DPO_PROTO_IP6,
+ classify_dpo_create (DPO_PROTO_IP6, table_index));
+
+ fib_table_entry_special_dpo_add (fib_index,
+ &pfx,
+ FIB_SOURCE_CLASSIFY,
+ FIB_ENTRY_FLAG_NONE, &dpo);
+ dpo_reset (&dpo);
+ }
+ else
+ {
+ fib_table_entry_special_remove (fib_index,
+ &pfx, FIB_SOURCE_CLASSIFY);
+ }
+ }
+
+ return 0;
+}
+
+static clib_error_t *
+set_ip6_classify_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ u32 table_index = ~0;
+ int table_index_set = 0;
+ u32 sw_if_index = ~0;
+ int rv;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "table-index %d", &table_index))
+ table_index_set = 1;
+ else if (unformat (input, "intfc %U", unformat_vnet_sw_interface,
+ vnet_get_main (), &sw_if_index))
+ ;
+ else
+ break;
+ }
+
+ if (table_index_set == 0)
+ return clib_error_return (0, "classify table-index must be specified");
+
+ if (sw_if_index == ~0)
+ return clib_error_return (0, "interface / subif must be specified");
+
+ rv = vnet_set_ip6_classify_intfc (vm, sw_if_index, table_index);
+
+ switch (rv)
+ {
+ case 0:
+ break;
+
+ case VNET_API_ERROR_NO_MATCHING_INTERFACE:
+ return clib_error_return (0, "No such interface");
+
+ case VNET_API_ERROR_NO_SUCH_ENTRY:
+ return clib_error_return (0, "No such classifier table");
+ }
+ return 0;
+}
+
+/*?
+ * Assign a classification table to an interface. The classification
+ * table is created using the '<em>classify table</em>' and '<em>classify session</em>'
+ * commands. Once the table is create, use this command to filter packets
+ * on an interface.
+ *
+ * @cliexpar
+ * Example of how to assign a classification table to an interface:
+ * @cliexcmd{set ip6 classify intfc GigabitEthernet2/0/0 table-index 1}
+?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (set_ip6_classify_command, static) =
+{
+ .path = "set ip6 classify",
+ .short_help =
+ "set ip6 classify intfc <interface> table-index <classify-idx>",
+ .function = set_ip6_classify_command_fn,
+};
+/* *INDENT-ON* */
+
+static clib_error_t *
+ip6_config (vlib_main_t * vm, unformat_input_t * input)
+{
+ ip6_main_t *im = &ip6_main;
+ uword heapsize = 0;
+ u32 tmp;
+ u32 nbuckets = 0;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "hash-buckets %d", &tmp))
+ nbuckets = tmp;
+ else if (unformat (input, "heap-size %dm", &tmp))
+ heapsize = ((u64) tmp) << 20;
+ else if (unformat (input, "heap-size %dM", &tmp))
+ heapsize = ((u64) tmp) << 20;
+ else if (unformat (input, "heap-size %dg", &tmp))
+ heapsize = ((u64) tmp) << 30;
+ else if (unformat (input, "heap-size %dG", &tmp))
+ heapsize = ((u64) tmp) << 30;
+ else
+ return clib_error_return (0, "unknown input '%U'",
+ format_unformat_error, input);
+ }
+
+ im->lookup_table_nbuckets = nbuckets;
+ im->lookup_table_size = heapsize;
+
+ return 0;
+}
+
+VLIB_EARLY_CONFIG_FUNCTION (ip6_config, "ip6");
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/ip/ip6_hop_by_hop.c b/src/vnet/ip/ip6_hop_by_hop.c
new file mode 100644
index 00000000000..3a820b3cefc
--- /dev/null
+++ b/src/vnet/ip/ip6_hop_by_hop.c
@@ -0,0 +1,1194 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+#include <vnet/pg/pg.h>
+#include <vppinfra/error.h>
+
+#include <vnet/ip/ip.h>
+
+#include <vppinfra/hash.h>
+#include <vppinfra/error.h>
+#include <vppinfra/elog.h>
+
+#include <vnet/ip/ip6_hop_by_hop.h>
+#include <vnet/fib/ip6_fib.h>
+#include <vnet/classify/vnet_classify.h>
+
+/**
+ * @file
+ * @brief In-band OAM (iOAM).
+ *
+ * In-band OAM (iOAM) is an implementation study to record operational
+ * information in the packet while the packet traverses a path between
+ * two points in the network.
+ *
+ * VPP can function as in-band OAM encapsulating, transit and
+ * decapsulating node. In this version of VPP in-band OAM data is
+ * transported as options in an IPv6 hop-by-hop extension header. Hence
+ * in-band OAM can be enabled for IPv6 traffic.
+ */
+
+ip6_hop_by_hop_ioam_main_t ip6_hop_by_hop_ioam_main;
+
+#define foreach_ip6_hbyh_ioam_input_next \
+ _(IP6_REWRITE, "ip6-rewrite") \
+ _(IP6_LOOKUP, "ip6-lookup") \
+ _(DROP, "error-drop")
+
+typedef enum
+{
+#define _(s,n) IP6_HBYH_IOAM_INPUT_NEXT_##s,
+ foreach_ip6_hbyh_ioam_input_next
+#undef _
+ IP6_HBYH_IOAM_INPUT_N_NEXT,
+} ip6_hbyh_ioam_input_next_t;
+
+
+u32
+ioam_flow_add (u8 encap, u8 * flow_name)
+{
+ ip6_hop_by_hop_ioam_main_t *hm = &ip6_hop_by_hop_ioam_main;
+ flow_data_t *flow = 0;
+ u32 index = 0;
+ u8 i;
+
+ pool_get (hm->flows, flow);
+ memset (flow, 0, sizeof (flow_data_t));
+
+ index = flow - hm->flows;
+ strncpy ((char *) flow->flow_name, (char *) flow_name, 31);
+
+ if (!encap)
+ IOAM_SET_DECAP (index);
+
+ for (i = 0; i < 255; i++)
+ {
+ if (hm->flow_handler[i])
+ flow->ctx[i] = hm->flow_handler[i] (index, 1);
+ }
+ return (index);
+}
+
+static uword
+unformat_opaque_ioam (unformat_input_t * input, va_list * args)
+{
+ u64 *opaquep = va_arg (*args, u64 *);
+ u8 *flow_name = NULL;
+ uword ret = 0;
+
+ if (unformat (input, "ioam-encap %s", &flow_name))
+ {
+ *opaquep = ioam_flow_add (1, flow_name);
+ ret = 1;
+ }
+ else if (unformat (input, "ioam-decap %s", &flow_name))
+ {
+ *opaquep = ioam_flow_add (0, flow_name);
+ ret = 1;
+ }
+
+ vec_free (flow_name);
+ return ret;
+}
+
+u8 *
+get_flow_name_from_flow_ctx (u32 flow_ctx)
+{
+ flow_data_t *flow = NULL;
+ ip6_hop_by_hop_ioam_main_t *hm = &ip6_hop_by_hop_ioam_main;
+ u32 index;
+
+ index = IOAM_MASK_DECAP_BIT (flow_ctx);
+
+ if (pool_is_free_index (hm->flows, index))
+ return NULL;
+
+ flow = pool_elt_at_index (hm->flows, index);
+ return (flow->flow_name);
+}
+
+/* The main h-b-h tracer will be invoked, no need to do much here */
+int
+ip6_hbh_add_register_option (u8 option,
+ u8 size,
+ int rewrite_options (u8 * rewrite_string,
+ u8 * rewrite_size))
+{
+ ip6_hop_by_hop_ioam_main_t *hm = &ip6_hop_by_hop_ioam_main;
+
+ ASSERT (option < ARRAY_LEN (hm->add_options));
+
+ /* Already registered */
+ if (hm->add_options[option])
+ return (-1);
+
+ hm->add_options[option] = rewrite_options;
+ hm->options_size[option] = size;
+
+ return (0);
+}
+
+int
+ip6_hbh_add_unregister_option (u8 option)
+{
+ ip6_hop_by_hop_ioam_main_t *hm = &ip6_hop_by_hop_ioam_main;
+
+ ASSERT (option < ARRAY_LEN (hm->add_options));
+
+ /* Not registered */
+ if (!hm->add_options[option])
+ return (-1);
+
+ hm->add_options[option] = NULL;
+ hm->options_size[option] = 0;
+ return (0);
+}
+
+/* Config handler registration */
+int
+ip6_hbh_config_handler_register (u8 option,
+ int config_handler (void *data, u8 disable))
+{
+ ip6_hop_by_hop_ioam_main_t *hm = &ip6_hop_by_hop_ioam_main;
+
+ ASSERT (option < ARRAY_LEN (hm->config_handler));
+
+ /* Already registered */
+ if (hm->config_handler[option])
+ return (VNET_API_ERROR_INVALID_REGISTRATION);
+
+ hm->config_handler[option] = config_handler;
+
+ return (0);
+}
+
+int
+ip6_hbh_config_handler_unregister (u8 option)
+{
+ ip6_hop_by_hop_ioam_main_t *hm = &ip6_hop_by_hop_ioam_main;
+
+ ASSERT (option < ARRAY_LEN (hm->config_handler));
+
+ /* Not registered */
+ if (!hm->config_handler[option])
+ return (VNET_API_ERROR_INVALID_REGISTRATION);
+
+ hm->config_handler[option] = NULL;
+ return (0);
+}
+
+/* Flow handler registration */
+int
+ip6_hbh_flow_handler_register (u8 option,
+ u32 ioam_flow_handler (u32 flow_ctx, u8 add))
+{
+ ip6_hop_by_hop_ioam_main_t *hm = &ip6_hop_by_hop_ioam_main;
+
+ ASSERT (option < ARRAY_LEN (hm->flow_handler));
+
+ /* Already registered */
+ if (hm->flow_handler[option])
+ return (VNET_API_ERROR_INVALID_REGISTRATION);
+
+ hm->flow_handler[option] = ioam_flow_handler;
+
+ return (0);
+}
+
+int
+ip6_hbh_flow_handler_unregister (u8 option)
+{
+ ip6_hop_by_hop_ioam_main_t *hm = &ip6_hop_by_hop_ioam_main;
+
+ ASSERT (option < ARRAY_LEN (hm->flow_handler));
+
+ /* Not registered */
+ if (!hm->flow_handler[option])
+ return (VNET_API_ERROR_INVALID_REGISTRATION);
+
+ hm->flow_handler[option] = NULL;
+ return (0);
+}
+
+typedef struct
+{
+ u32 next_index;
+} ip6_add_hop_by_hop_trace_t;
+
+/* packet trace format function */
+static u8 *
+format_ip6_add_hop_by_hop_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ ip6_add_hop_by_hop_trace_t *t = va_arg (*args,
+ ip6_add_hop_by_hop_trace_t *);
+
+ s = format (s, "IP6_ADD_HOP_BY_HOP: next index %d", t->next_index);
+ return s;
+}
+
+vlib_node_registration_t ip6_add_hop_by_hop_node;
+
+#define foreach_ip6_add_hop_by_hop_error \
+_(PROCESSED, "Pkts w/ added ip6 hop-by-hop options")
+
+typedef enum
+{
+#define _(sym,str) IP6_ADD_HOP_BY_HOP_ERROR_##sym,
+ foreach_ip6_add_hop_by_hop_error
+#undef _
+ IP6_ADD_HOP_BY_HOP_N_ERROR,
+} ip6_add_hop_by_hop_error_t;
+
+static char *ip6_add_hop_by_hop_error_strings[] = {
+#define _(sym,string) string,
+ foreach_ip6_add_hop_by_hop_error
+#undef _
+};
+
+static uword
+ip6_add_hop_by_hop_node_fn (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ ip6_hop_by_hop_ioam_main_t *hm = &ip6_hop_by_hop_ioam_main;
+ u32 n_left_from, *from, *to_next;
+ ip_lookup_next_t next_index;
+ u32 processed = 0;
+ u8 *rewrite = hm->rewrite;
+ u32 rewrite_length = vec_len (rewrite);
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+ while (n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ u32 bi0, bi1;
+ vlib_buffer_t *b0, *b1;
+ u32 next0, next1;
+ ip6_header_t *ip0, *ip1;
+ ip6_hop_by_hop_header_t *hbh0, *hbh1;
+ u64 *copy_src0, *copy_dst0, *copy_src1, *copy_dst1;
+ u16 new_l0, new_l1;
+
+ /* Prefetch next iteration. */
+ {
+ vlib_buffer_t *p2, *p3;
+
+ p2 = vlib_get_buffer (vm, from[2]);
+ p3 = vlib_get_buffer (vm, from[3]);
+
+ vlib_prefetch_buffer_header (p2, LOAD);
+ vlib_prefetch_buffer_header (p3, LOAD);
+
+ CLIB_PREFETCH (p2->data - rewrite_length,
+ 2 * CLIB_CACHE_LINE_BYTES, STORE);
+ CLIB_PREFETCH (p3->data - rewrite_length,
+ 2 * CLIB_CACHE_LINE_BYTES, STORE);
+ }
+
+ /* speculatively enqueue b0 and b1 to the current next frame */
+ to_next[0] = bi0 = from[0];
+ to_next[1] = bi1 = from[1];
+ from += 2;
+ to_next += 2;
+ n_left_from -= 2;
+ n_left_to_next -= 2;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ b1 = vlib_get_buffer (vm, bi1);
+
+ /* $$$$$ Dual loop: process 2 x packets here $$$$$ */
+ ip0 = vlib_buffer_get_current (b0);
+ ip1 = vlib_buffer_get_current (b1);
+
+ /* Copy the ip header left by the required amount */
+ copy_dst0 = (u64 *) (((u8 *) ip0) - rewrite_length);
+ copy_dst1 = (u64 *) (((u8 *) ip1) - rewrite_length);
+ copy_src0 = (u64 *) ip0;
+ copy_src1 = (u64 *) ip1;
+
+ copy_dst0[0] = copy_src0[0];
+ copy_dst0[1] = copy_src0[1];
+ copy_dst0[2] = copy_src0[2];
+ copy_dst0[3] = copy_src0[3];
+ copy_dst0[4] = copy_src0[4];
+
+ copy_dst1[0] = copy_src1[0];
+ copy_dst1[1] = copy_src1[1];
+ copy_dst1[2] = copy_src1[2];
+ copy_dst1[3] = copy_src1[3];
+ copy_dst1[4] = copy_src1[4];
+
+ vlib_buffer_advance (b0, -(word) rewrite_length);
+ vlib_buffer_advance (b1, -(word) rewrite_length);
+ ip0 = vlib_buffer_get_current (b0);
+ ip1 = vlib_buffer_get_current (b1);
+
+ hbh0 = (ip6_hop_by_hop_header_t *) (ip0 + 1);
+ hbh1 = (ip6_hop_by_hop_header_t *) (ip1 + 1);
+ /* $$$ tune, rewrite_length is a multiple of 8 */
+ clib_memcpy (hbh0, rewrite, rewrite_length);
+ clib_memcpy (hbh1, rewrite, rewrite_length);
+ /* Patch the protocol chain, insert the h-b-h (type 0) header */
+ hbh0->protocol = ip0->protocol;
+ hbh1->protocol = ip1->protocol;
+ ip0->protocol = 0;
+ ip1->protocol = 0;
+ new_l0 =
+ clib_net_to_host_u16 (ip0->payload_length) + rewrite_length;
+ new_l1 =
+ clib_net_to_host_u16 (ip1->payload_length) + rewrite_length;
+ ip0->payload_length = clib_host_to_net_u16 (new_l0);
+ ip1->payload_length = clib_host_to_net_u16 (new_l1);
+
+ /* Populate the (first) h-b-h list elt */
+ next0 = IP6_HBYH_IOAM_INPUT_NEXT_IP6_LOOKUP;
+ next1 = IP6_HBYH_IOAM_INPUT_NEXT_IP6_LOOKUP;
+
+
+ /* $$$$$ End of processing 2 x packets $$$$$ */
+
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)))
+ {
+ if (b0->flags & VLIB_BUFFER_IS_TRACED)
+ {
+ ip6_add_hop_by_hop_trace_t *t =
+ vlib_add_trace (vm, node, b0, sizeof (*t));
+ t->next_index = next0;
+ }
+ if (b1->flags & VLIB_BUFFER_IS_TRACED)
+ {
+ ip6_add_hop_by_hop_trace_t *t =
+ vlib_add_trace (vm, node, b1, sizeof (*t));
+ t->next_index = next1;
+ }
+ }
+ processed += 2;
+ /* verify speculative enqueues, maybe switch current next frame */
+ vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, bi1, next0, next1);
+ }
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 bi0;
+ vlib_buffer_t *b0;
+ u32 next0;
+ ip6_header_t *ip0;
+ ip6_hop_by_hop_header_t *hbh0;
+ u64 *copy_src0, *copy_dst0;
+ u16 new_l0;
+
+ /* speculatively enqueue b0 to the current next frame */
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+
+ ip0 = vlib_buffer_get_current (b0);
+
+ /* Copy the ip header left by the required amount */
+ copy_dst0 = (u64 *) (((u8 *) ip0) - rewrite_length);
+ copy_src0 = (u64 *) ip0;
+
+ copy_dst0[0] = copy_src0[0];
+ copy_dst0[1] = copy_src0[1];
+ copy_dst0[2] = copy_src0[2];
+ copy_dst0[3] = copy_src0[3];
+ copy_dst0[4] = copy_src0[4];
+ vlib_buffer_advance (b0, -(word) rewrite_length);
+ ip0 = vlib_buffer_get_current (b0);
+
+ hbh0 = (ip6_hop_by_hop_header_t *) (ip0 + 1);
+ /* $$$ tune, rewrite_length is a multiple of 8 */
+ clib_memcpy (hbh0, rewrite, rewrite_length);
+ /* Patch the protocol chain, insert the h-b-h (type 0) header */
+ hbh0->protocol = ip0->protocol;
+ ip0->protocol = 0;
+ new_l0 =
+ clib_net_to_host_u16 (ip0->payload_length) + rewrite_length;
+ ip0->payload_length = clib_host_to_net_u16 (new_l0);
+
+ /* Populate the (first) h-b-h list elt */
+ next0 = IP6_HBYH_IOAM_INPUT_NEXT_IP6_LOOKUP;
+
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)
+ && (b0->flags & VLIB_BUFFER_IS_TRACED)))
+ {
+ ip6_add_hop_by_hop_trace_t *t =
+ vlib_add_trace (vm, node, b0, sizeof (*t));
+ t->next_index = next0;
+ }
+
+ processed++;
+
+ /* verify speculative enqueue, maybe switch current next frame */
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, next0);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ vlib_node_increment_counter (vm, ip6_add_hop_by_hop_node.index,
+ IP6_ADD_HOP_BY_HOP_ERROR_PROCESSED, processed);
+ return frame->n_vectors;
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (ip6_add_hop_by_hop_node) = /* *INDENT-OFF* */
+{
+ .function = ip6_add_hop_by_hop_node_fn,.name =
+ "ip6-add-hop-by-hop",.vector_size = sizeof (u32),.format_trace =
+ format_ip6_add_hop_by_hop_trace,.type =
+ VLIB_NODE_TYPE_INTERNAL,.n_errors =
+ ARRAY_LEN (ip6_add_hop_by_hop_error_strings),.error_strings =
+ ip6_add_hop_by_hop_error_strings,
+ /* See ip/lookup.h */
+ .n_next_nodes = IP6_HBYH_IOAM_INPUT_N_NEXT,.next_nodes =
+ {
+#define _(s,n) [IP6_HBYH_IOAM_INPUT_NEXT_##s] = n,
+ foreach_ip6_hbyh_ioam_input_next
+#undef _
+ }
+,};
+/* *INDENT-ON* */
+
+/* *INDENT-ON* */
+
+VLIB_NODE_FUNCTION_MULTIARCH (ip6_add_hop_by_hop_node,
+ ip6_add_hop_by_hop_node_fn);
+/* The main h-b-h tracer was already invoked, no need to do much here */
+typedef struct
+{
+ u32 next_index;
+} ip6_pop_hop_by_hop_trace_t;
+
+/* packet trace format function */
+static u8 *
+format_ip6_pop_hop_by_hop_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ ip6_pop_hop_by_hop_trace_t *t =
+ va_arg (*args, ip6_pop_hop_by_hop_trace_t *);
+
+ s = format (s, "IP6_POP_HOP_BY_HOP: next index %d", t->next_index);
+ return s;
+}
+
+int
+ip6_hbh_pop_register_option (u8 option,
+ int options (vlib_buffer_t * b,
+ ip6_header_t * ip,
+ ip6_hop_by_hop_option_t * opt))
+{
+ ip6_hop_by_hop_ioam_main_t *hm = &ip6_hop_by_hop_ioam_main;
+
+ ASSERT (option < ARRAY_LEN (hm->pop_options));
+
+ /* Already registered */
+ if (hm->pop_options[option])
+ return (-1);
+
+ hm->pop_options[option] = options;
+
+ return (0);
+}
+
+int
+ip6_hbh_pop_unregister_option (u8 option)
+{
+ ip6_hop_by_hop_ioam_main_t *hm = &ip6_hop_by_hop_ioam_main;
+
+ ASSERT (option < ARRAY_LEN (hm->pop_options));
+
+ /* Not registered */
+ if (!hm->pop_options[option])
+ return (-1);
+
+ hm->pop_options[option] = NULL;
+ return (0);
+}
+
+vlib_node_registration_t ip6_pop_hop_by_hop_node;
+
+#define foreach_ip6_pop_hop_by_hop_error \
+_(PROCESSED, "Pkts w/ removed ip6 hop-by-hop options") \
+_(NO_HOHO, "Pkts w/ no ip6 hop-by-hop options") \
+_(OPTION_FAILED, "ip6 pop hop-by-hop failed to process")
+
+typedef enum
+{
+#define _(sym,str) IP6_POP_HOP_BY_HOP_ERROR_##sym,
+ foreach_ip6_pop_hop_by_hop_error
+#undef _
+ IP6_POP_HOP_BY_HOP_N_ERROR,
+} ip6_pop_hop_by_hop_error_t;
+
+static char *ip6_pop_hop_by_hop_error_strings[] = {
+#define _(sym,string) string,
+ foreach_ip6_pop_hop_by_hop_error
+#undef _
+};
+
+static inline void
+ioam_pop_hop_by_hop_processing (vlib_main_t * vm,
+ ip6_header_t * ip0,
+ ip6_hop_by_hop_header_t * hbh0,
+ vlib_buffer_t * b)
+{
+ ip6_hop_by_hop_ioam_main_t *hm = &ip6_hop_by_hop_ioam_main;
+ ip6_hop_by_hop_option_t *opt0, *limit0;
+ u8 type0;
+
+ if (!hbh0 || !ip0)
+ return;
+
+ opt0 = (ip6_hop_by_hop_option_t *) (hbh0 + 1);
+ limit0 = (ip6_hop_by_hop_option_t *)
+ ((u8 *) hbh0 + ((hbh0->length + 1) << 3));
+
+ /* Scan the set of h-b-h options, process ones that we understand */
+ while (opt0 < limit0)
+ {
+ type0 = opt0->type;
+ switch (type0)
+ {
+ case 0: /* Pad1 */
+ opt0 = (ip6_hop_by_hop_option_t *) ((u8 *) opt0) + 1;
+ continue;
+ case 1: /* PadN */
+ break;
+ default:
+ if (hm->pop_options[type0])
+ {
+ if ((*hm->pop_options[type0]) (b, ip0, opt0) < 0)
+ {
+ vlib_node_increment_counter (vm,
+ ip6_pop_hop_by_hop_node.index,
+ IP6_POP_HOP_BY_HOP_ERROR_OPTION_FAILED,
+ 1);
+ }
+ }
+ }
+ opt0 =
+ (ip6_hop_by_hop_option_t *) (((u8 *) opt0) + opt0->length +
+ sizeof (ip6_hop_by_hop_option_t));
+ }
+}
+
+static uword
+ip6_pop_hop_by_hop_node_fn (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ ip6_main_t *im = &ip6_main;
+ ip_lookup_main_t *lm = &im->lookup_main;
+ u32 n_left_from, *from, *to_next;
+ ip_lookup_next_t next_index;
+ u32 processed = 0;
+ u32 no_header = 0;
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ u32 bi0, bi1;
+ vlib_buffer_t *b0, *b1;
+ u32 next0, next1;
+ u32 adj_index0, adj_index1;
+ ip6_header_t *ip0, *ip1;
+ ip_adjacency_t *adj0, *adj1;
+ ip6_hop_by_hop_header_t *hbh0, *hbh1;
+ u64 *copy_dst0, *copy_src0, *copy_dst1, *copy_src1;
+ u16 new_l0, new_l1;
+
+ /* Prefetch next iteration. */
+ {
+ vlib_buffer_t *p2, *p3;
+
+ p2 = vlib_get_buffer (vm, from[2]);
+ p3 = vlib_get_buffer (vm, from[3]);
+
+ vlib_prefetch_buffer_header (p2, LOAD);
+ vlib_prefetch_buffer_header (p3, LOAD);
+
+ CLIB_PREFETCH (p2->data, CLIB_CACHE_LINE_BYTES, STORE);
+ CLIB_PREFETCH (p3->data, CLIB_CACHE_LINE_BYTES, STORE);
+ }
+
+ /* speculatively enqueue b0 and b1 to the current next frame */
+ to_next[0] = bi0 = from[0];
+ to_next[1] = bi1 = from[1];
+ from += 2;
+ to_next += 2;
+ n_left_from -= 2;
+ n_left_to_next -= 2;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ b1 = vlib_get_buffer (vm, bi1);
+
+ /* $$$$$ Dual loop: process 2 x packets here $$$$$ */
+ ip0 = vlib_buffer_get_current (b0);
+ ip1 = vlib_buffer_get_current (b1);
+ adj_index0 = vnet_buffer (b0)->ip.adj_index[VLIB_TX];
+ adj_index1 = vnet_buffer (b1)->ip.adj_index[VLIB_TX];
+ adj0 = ip_get_adjacency (lm, adj_index0);
+ adj1 = ip_get_adjacency (lm, adj_index1);
+
+ next0 = adj0->lookup_next_index;
+ next1 = adj1->lookup_next_index;
+
+ hbh0 = (ip6_hop_by_hop_header_t *) (ip0 + 1);
+ hbh1 = (ip6_hop_by_hop_header_t *) (ip1 + 1);
+
+ ioam_pop_hop_by_hop_processing (vm, ip0, hbh0, b0);
+ ioam_pop_hop_by_hop_processing (vm, ip1, hbh1, b1);
+
+ vlib_buffer_advance (b0, (hbh0->length + 1) << 3);
+ vlib_buffer_advance (b1, (hbh1->length + 1) << 3);
+
+ new_l0 = clib_net_to_host_u16 (ip0->payload_length) -
+ ((hbh0->length + 1) << 3);
+ new_l1 = clib_net_to_host_u16 (ip1->payload_length) -
+ ((hbh1->length + 1) << 3);
+
+ ip0->payload_length = clib_host_to_net_u16 (new_l0);
+ ip1->payload_length = clib_host_to_net_u16 (new_l1);
+
+ ip0->protocol = hbh0->protocol;
+ ip1->protocol = hbh1->protocol;
+
+ copy_src0 = (u64 *) ip0;
+ copy_src1 = (u64 *) ip1;
+ copy_dst0 = copy_src0 + (hbh0->length + 1);
+ copy_dst0[4] = copy_src0[4];
+ copy_dst0[3] = copy_src0[3];
+ copy_dst0[2] = copy_src0[2];
+ copy_dst0[1] = copy_src0[1];
+ copy_dst0[0] = copy_src0[0];
+ copy_dst1 = copy_src1 + (hbh1->length + 1);
+ copy_dst1[4] = copy_src1[4];
+ copy_dst1[3] = copy_src1[3];
+ copy_dst1[2] = copy_src1[2];
+ copy_dst1[1] = copy_src1[1];
+ copy_dst1[0] = copy_src1[0];
+ processed += 2;
+ /* $$$$$ End of processing 2 x packets $$$$$ */
+
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)))
+ {
+ if (b0->flags & VLIB_BUFFER_IS_TRACED)
+ {
+ ip6_pop_hop_by_hop_trace_t *t =
+ vlib_add_trace (vm, node, b0, sizeof (*t));
+ t->next_index = next0;
+ }
+ if (b1->flags & VLIB_BUFFER_IS_TRACED)
+ {
+ ip6_pop_hop_by_hop_trace_t *t =
+ vlib_add_trace (vm, node, b1, sizeof (*t));
+ t->next_index = next1;
+ }
+ }
+
+ /* verify speculative enqueues, maybe switch current next frame */
+ vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, bi1, next0, next1);
+ }
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 bi0;
+ vlib_buffer_t *b0;
+ u32 next0;
+ u32 adj_index0;
+ ip6_header_t *ip0;
+ ip_adjacency_t *adj0;
+ ip6_hop_by_hop_header_t *hbh0;
+ u64 *copy_dst0, *copy_src0;
+ u16 new_l0;
+
+ /* speculatively enqueue b0 to the current next frame */
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+
+ ip0 = vlib_buffer_get_current (b0);
+ adj_index0 = vnet_buffer (b0)->ip.adj_index[VLIB_TX];
+ adj0 = ip_get_adjacency (lm, adj_index0);
+
+ /* Default use the next_index from the adjacency. */
+ next0 = adj0->lookup_next_index;
+
+ /* Perfectly normal to end up here w/ out h-b-h header */
+ hbh0 = (ip6_hop_by_hop_header_t *) (ip0 + 1);
+
+ /* TODO:Temporarily doing it here.. do this validation in end_of_path_cb */
+ ioam_pop_hop_by_hop_processing (vm, ip0, hbh0, b0);
+ /* Pop the trace data */
+ vlib_buffer_advance (b0, (hbh0->length + 1) << 3);
+ new_l0 = clib_net_to_host_u16 (ip0->payload_length) -
+ ((hbh0->length + 1) << 3);
+ ip0->payload_length = clib_host_to_net_u16 (new_l0);
+ ip0->protocol = hbh0->protocol;
+ copy_src0 = (u64 *) ip0;
+ copy_dst0 = copy_src0 + (hbh0->length + 1);
+ copy_dst0[4] = copy_src0[4];
+ copy_dst0[3] = copy_src0[3];
+ copy_dst0[2] = copy_src0[2];
+ copy_dst0[1] = copy_src0[1];
+ copy_dst0[0] = copy_src0[0];
+ processed++;
+
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)
+ && (b0->flags & VLIB_BUFFER_IS_TRACED)))
+ {
+ ip6_pop_hop_by_hop_trace_t *t =
+ vlib_add_trace (vm, node, b0, sizeof (*t));
+ t->next_index = next0;
+ }
+
+ /* verify speculative enqueue, maybe switch current next frame */
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, next0);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ vlib_node_increment_counter (vm, ip6_pop_hop_by_hop_node.index,
+ IP6_POP_HOP_BY_HOP_ERROR_PROCESSED, processed);
+ vlib_node_increment_counter (vm, ip6_pop_hop_by_hop_node.index,
+ IP6_POP_HOP_BY_HOP_ERROR_NO_HOHO, no_header);
+ return frame->n_vectors;
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (ip6_pop_hop_by_hop_node) =
+{
+ .function = ip6_pop_hop_by_hop_node_fn,.name =
+ "ip6-pop-hop-by-hop",.vector_size = sizeof (u32),.format_trace =
+ format_ip6_pop_hop_by_hop_trace,.type =
+ VLIB_NODE_TYPE_INTERNAL,.sibling_of = "ip6-lookup",.n_errors =
+ ARRAY_LEN (ip6_pop_hop_by_hop_error_strings),.error_strings =
+ ip6_pop_hop_by_hop_error_strings,
+ /* See ip/lookup.h */
+.n_next_nodes = 0,};
+
+/* *INDENT-ON* */
+
+VLIB_NODE_FUNCTION_MULTIARCH (ip6_pop_hop_by_hop_node,
+ ip6_pop_hop_by_hop_node_fn);
+static clib_error_t *
+ip6_hop_by_hop_ioam_init (vlib_main_t * vm)
+{
+ clib_error_t *error;
+ ip6_hop_by_hop_ioam_main_t *hm = &ip6_hop_by_hop_ioam_main;
+
+ if ((error = vlib_call_init_function (vm, ip_main_init)))
+ return (error);
+
+ if ((error = vlib_call_init_function (vm, ip6_lookup_init)))
+ return error;
+
+ hm->vlib_main = vm;
+ hm->vnet_main = vnet_get_main ();
+ hm->unix_time_0 = (u32) time (0); /* Store starting time */
+ hm->vlib_time_0 = vlib_time_now (vm);
+ hm->ioam_flag = IOAM_HBYH_MOD;
+ memset (hm->add_options, 0, sizeof (hm->add_options));
+ memset (hm->pop_options, 0, sizeof (hm->pop_options));
+ memset (hm->options_size, 0, sizeof (hm->options_size));
+
+ vnet_classify_register_unformat_opaque_index_fn (unformat_opaque_ioam);
+
+ return (0);
+}
+
+VLIB_INIT_FUNCTION (ip6_hop_by_hop_ioam_init);
+
+int
+ip6_ioam_set_rewrite (u8 ** rwp, int has_trace_option,
+ int has_pot_option, int has_seqno_option)
+{
+ ip6_hop_by_hop_ioam_main_t *hm = &ip6_hop_by_hop_ioam_main;
+ u8 *rewrite = NULL;
+ u32 size, rnd_size;
+ ip6_hop_by_hop_header_t *hbh;
+ u8 *current;
+ u8 *trace_data_size = NULL;
+ u8 *pot_data_size = NULL;
+
+ vec_free (*rwp);
+
+ if (has_trace_option == 0 && has_pot_option == 0)
+ return -1;
+
+ /* Work out how much space we need */
+ size = sizeof (ip6_hop_by_hop_header_t);
+
+ //if (has_trace_option && hm->get_sizeof_options[HBH_OPTION_TYPE_IOAM_TRACE_DATA_LIST] != 0)
+ if (has_trace_option
+ && hm->options_size[HBH_OPTION_TYPE_IOAM_TRACE_DATA_LIST] != 0)
+ {
+ size += hm->options_size[HBH_OPTION_TYPE_IOAM_TRACE_DATA_LIST];
+ }
+ if (has_pot_option
+ && hm->add_options[HBH_OPTION_TYPE_IOAM_PROOF_OF_TRANSIT] != 0)
+ {
+ size += hm->options_size[HBH_OPTION_TYPE_IOAM_PROOF_OF_TRANSIT];
+ }
+
+ if (has_seqno_option)
+ {
+ size += hm->options_size[HBH_OPTION_TYPE_IOAM_EDGE_TO_EDGE];
+ }
+
+ /* Round to a multiple of 8 octets */
+ rnd_size = (size + 7) & ~7;
+
+ /* allocate it, zero-fill / pad by construction */
+ vec_validate (rewrite, rnd_size - 1);
+
+ hbh = (ip6_hop_by_hop_header_t *) rewrite;
+ /* Length of header in 8 octet units, not incl first 8 octets */
+ hbh->length = (rnd_size >> 3) - 1;
+ current = (u8 *) (hbh + 1);
+
+ if (has_trace_option
+ && hm->add_options[HBH_OPTION_TYPE_IOAM_TRACE_DATA_LIST] != 0)
+ {
+ if (0 != (hm->options_size[HBH_OPTION_TYPE_IOAM_TRACE_DATA_LIST]))
+ {
+ trace_data_size =
+ &hm->options_size[HBH_OPTION_TYPE_IOAM_TRACE_DATA_LIST];
+ if (0 ==
+ hm->add_options[HBH_OPTION_TYPE_IOAM_TRACE_DATA_LIST] (current,
+ trace_data_size))
+ current += *trace_data_size;
+ }
+ }
+ if (has_pot_option
+ && hm->add_options[HBH_OPTION_TYPE_IOAM_PROOF_OF_TRANSIT] != 0)
+ {
+ pot_data_size =
+ &hm->options_size[HBH_OPTION_TYPE_IOAM_PROOF_OF_TRANSIT];
+ if (0 ==
+ hm->add_options[HBH_OPTION_TYPE_IOAM_PROOF_OF_TRANSIT] (current,
+ pot_data_size))
+ current += *pot_data_size;
+ }
+
+ if (has_seqno_option &&
+ (hm->add_options[HBH_OPTION_TYPE_IOAM_EDGE_TO_EDGE] != 0))
+ {
+ if (0 == hm->add_options[HBH_OPTION_TYPE_IOAM_EDGE_TO_EDGE] (current,
+ &
+ (hm->options_size
+ [HBH_OPTION_TYPE_IOAM_EDGE_TO_EDGE])))
+ current += hm->options_size[HBH_OPTION_TYPE_IOAM_EDGE_TO_EDGE];
+ }
+
+ *rwp = rewrite;
+ return 0;
+}
+
+clib_error_t *
+clear_ioam_rewrite_fn (void)
+{
+ ip6_hop_by_hop_ioam_main_t *hm = &ip6_hop_by_hop_ioam_main;
+
+ vec_free (hm->rewrite);
+ hm->rewrite = 0;
+ hm->has_trace_option = 0;
+ hm->has_pot_option = 0;
+ hm->has_seqno_option = 0;
+ hm->has_analyse_option = 0;
+ if (hm->config_handler[HBH_OPTION_TYPE_IOAM_TRACE_DATA_LIST])
+ hm->config_handler[HBH_OPTION_TYPE_IOAM_TRACE_DATA_LIST] (NULL, 1);
+
+ if (hm->config_handler[HBH_OPTION_TYPE_IOAM_PROOF_OF_TRANSIT])
+ hm->config_handler[HBH_OPTION_TYPE_IOAM_PROOF_OF_TRANSIT] (NULL, 1);
+
+ if (hm->config_handler[HBH_OPTION_TYPE_IOAM_EDGE_TO_EDGE])
+ {
+ hm->config_handler[HBH_OPTION_TYPE_IOAM_EDGE_TO_EDGE] ((void *)
+ &hm->has_analyse_option,
+ 1);
+ }
+
+ return 0;
+}
+
+clib_error_t *
+clear_ioam_rewrite_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ return (clear_ioam_rewrite_fn ());
+}
+
+/*?
+ * This command clears all the In-band OAM (iOAM) features enabled by
+ * the '<em>set ioam rewrite</em>' command. Use '<em>show ioam summary</em>' to
+ * verify the configured settings cleared.
+ *
+ * @cliexpar
+ * Example of how to clear iOAM features:
+ * @cliexcmd{clear ioam rewrite}
+?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (ip6_clear_ioam_rewrite_cmd, static) = {
+ .path = "clear ioam rewrite",
+ .short_help = "clear ioam rewrite",
+ .function = clear_ioam_rewrite_command_fn,
+};
+/* *INDENT-ON* */
+
+clib_error_t *
+ip6_ioam_enable (int has_trace_option, int has_pot_option,
+ int has_seqno_option, int has_analyse_option)
+{
+ int rv;
+ ip6_hop_by_hop_ioam_main_t *hm = &ip6_hop_by_hop_ioam_main;
+ rv = ip6_ioam_set_rewrite (&hm->rewrite, has_trace_option,
+ has_pot_option, has_seqno_option);
+
+ switch (rv)
+ {
+ case 0:
+ if (has_trace_option)
+ {
+ hm->has_trace_option = has_trace_option;
+ if (hm->config_handler[HBH_OPTION_TYPE_IOAM_TRACE_DATA_LIST])
+ hm->config_handler[HBH_OPTION_TYPE_IOAM_TRACE_DATA_LIST] (NULL,
+ 0);
+ }
+
+ if (has_pot_option)
+ {
+ hm->has_pot_option = has_pot_option;
+ if (hm->config_handler[HBH_OPTION_TYPE_IOAM_PROOF_OF_TRANSIT])
+ hm->config_handler[HBH_OPTION_TYPE_IOAM_PROOF_OF_TRANSIT] (NULL,
+ 0);
+ }
+ hm->has_analyse_option = has_analyse_option;
+ if (has_seqno_option)
+ {
+ hm->has_seqno_option = has_seqno_option;
+ if (hm->config_handler[HBH_OPTION_TYPE_IOAM_EDGE_TO_EDGE])
+ {
+ hm->config_handler[HBH_OPTION_TYPE_IOAM_EDGE_TO_EDGE] ((void *)
+ &has_analyse_option,
+ 0);
+ }
+ }
+ break;
+
+ default:
+ return clib_error_return_code (0, rv, 0,
+ "ip6_ioam_set_rewrite returned %d", rv);
+ }
+
+ return 0;
+}
+
+
+static clib_error_t *
+ip6_set_ioam_rewrite_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ int has_trace_option = 0;
+ int has_pot_option = 0;
+ int has_seqno_option = 0;
+ int has_analyse_option = 0;
+ clib_error_t *rv = 0;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "trace"))
+ has_trace_option = 1;
+ else if (unformat (input, "pot"))
+ has_pot_option = 1;
+ else if (unformat (input, "seqno"))
+ has_seqno_option = 1;
+ else if (unformat (input, "analyse"))
+ has_analyse_option = 1;
+ else
+ break;
+ }
+
+
+ rv = ip6_ioam_enable (has_trace_option, has_pot_option,
+ has_seqno_option, has_analyse_option);
+
+ return rv;
+}
+
+/*?
+ * This command is used to enable In-band OAM (iOAM) features on IPv6.
+ * '<em>trace</em>' is used to enable iOAM trace feature. '<em>pot</em>' is used to
+ * enable the Proof Of Transit feature. '<em>ppc</em>' is used to indicate the
+ * Per Packet Counter feature for Edge to Edge processing. '<em>ppc</em>' is
+ * used to indicate if this node is an '<em>encap</em>' node (iOAM edge node
+ * where packet enters iOAM domain), a '<em>decap</em>' node (iOAM edge node
+ * where packet leaves iOAM domain) or '<em>none</em>' (iOAM node where packet
+ * is in-transit through the iOAM domain). '<em>ppc</em>' can only be set if
+ * '<em>trace</em>' or '<em>pot</em>' is enabled.
+ *
+ * Use '<em>clear ioam rewrite</em>' to disable all features enabled by this
+ * command. Use '<em>show ioam summary</em>' to verify the configured settings.
+ *
+ * @cliexpar
+ * Example of how to enable trace and pot with ppc set to encap:
+ * @cliexcmd{set ioam rewrite trace pot ppc encap}
+?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (ip6_set_ioam_rewrite_cmd, static) = {
+ .path = "set ioam rewrite",
+ .short_help = "set ioam [trace] [pot] [seqno] [analyse]",
+ .function = ip6_set_ioam_rewrite_command_fn,
+};
+/* *INDENT-ON* */
+
+static clib_error_t *
+ip6_show_ioam_summary_cmd_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ ip6_hop_by_hop_ioam_main_t *hm = &ip6_hop_by_hop_ioam_main;
+ u8 *s = 0;
+
+
+ if (!is_zero_ip6_address (&hm->adj))
+ {
+ s = format (s, " REWRITE FLOW CONFIGS - \n");
+ s = format (s, " Destination Address : %U\n",
+ format_ip6_address, &hm->adj, sizeof (ip6_address_t));
+ s =
+ format (s, " Flow operation : %d (%s)\n",
+ hm->ioam_flag,
+ (hm->ioam_flag ==
+ IOAM_HBYH_ADD) ? "Add" : ((hm->ioam_flag ==
+ IOAM_HBYH_MOD) ? "Mod" : "Pop"));
+ }
+ else
+ {
+ s = format (s, " REWRITE FLOW CONFIGS - Not configured\n");
+ }
+
+
+ s = format (s, " TRACE OPTION - %d (%s)\n",
+ hm->has_trace_option,
+ (hm->has_trace_option ? "Enabled" : "Disabled"));
+ if (hm->has_trace_option)
+ s =
+ format (s,
+ "Try 'show ioam trace and show ioam-trace profile' for more information\n");
+
+
+ s = format (s, " POT OPTION - %d (%s)\n",
+ hm->has_pot_option,
+ (hm->has_pot_option ? "Enabled" : "Disabled"));
+ if (hm->has_pot_option)
+ s =
+ format (s,
+ "Try 'show ioam pot and show pot profile' for more information\n");
+
+ s = format (s, " EDGE TO EDGE - SeqNo OPTION - %d (%s)\n",
+ hm->has_seqno_option,
+ hm->has_seqno_option ? "Enabled" : "Disabled");
+ if (hm->has_seqno_option)
+ s = format (s, "Try 'show ioam e2e' for more information\n");
+
+ s = format (s, " iOAM Analyse OPTION - %d (%s)\n",
+ hm->has_analyse_option,
+ hm->has_analyse_option ? "Enabled" : "Disabled");
+
+ vlib_cli_output (vm, "%v", s);
+ vec_free (s);
+ return 0;
+}
+
+/*?
+ * This command displays the current configuration data for In-band
+ * OAM (iOAM).
+ *
+ * @cliexpar
+ * Example to show the iOAM configuration:
+ * @cliexstart{show ioam summary}
+ * REWRITE FLOW CONFIGS -
+ * Destination Address : ff02::1
+ * Flow operation : 2 (Pop)
+ * TRACE OPTION - 1 (Enabled)
+ * Try 'show ioam trace and show ioam-trace profile' for more information
+ * POT OPTION - 1 (Enabled)
+ * Try 'show ioam pot and show pot profile' for more information
+ * EDGE TO EDGE - PPC OPTION - 1 (Encap)
+ * @cliexend
+?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (ip6_show_ioam_run_cmd, static) = {
+ .path = "show ioam summary",
+ .short_help = "show ioam summary",
+ .function = ip6_show_ioam_summary_cmd_fn,
+};
+/* *INDENT-ON* */
+
+void
+vnet_register_ioam_end_of_path_callback (void *cb)
+{
+ ip6_hop_by_hop_ioam_main_t *hm = &ip6_hop_by_hop_ioam_main;
+
+ hm->ioam_end_of_path_cb = cb;
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/ip/ip6_hop_by_hop.h b/src/vnet/ip/ip6_hop_by_hop.h
new file mode 100644
index 00000000000..acfaa37ed2f
--- /dev/null
+++ b/src/vnet/ip/ip6_hop_by_hop.h
@@ -0,0 +1,217 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef __included_ip6_hop_by_hop_ioam_h__
+#define __included_ip6_hop_by_hop_ioam_h__
+
+#include <vnet/ip/ip6_hop_by_hop_packet.h>
+#include <vnet/ip/ip.h>
+
+
+#define MAX_IP6_HBH_OPTION 256
+
+/* To determine whether a node is decap MS bit is set */
+#define IOAM_DECAP_BIT 0x80000000
+
+#define IOAM_DEAP_ENABLED(opaque_data) (opaque_data & IOAM_DECAP_BIT)
+
+#define IOAM_SET_DECAP(opaque_data) \
+ (opaque_data |= IOAM_DECAP_BIT)
+
+#define IOAM_MASK_DECAP_BIT(x) (x & ~IOAM_DECAP_BIT)
+
+/*
+ * Stores the run time flow data of hbh options
+ */
+typedef struct
+{
+ u32 ctx[MAX_IP6_HBH_OPTION];
+ u8 flow_name[64];
+} flow_data_t;
+
+typedef struct
+{
+ /* The current rewrite we're using */
+ u8 *rewrite;
+
+ /* Trace data processing callback */
+ void *ioam_end_of_path_cb;
+ /* Configuration data */
+ /* Adjacency */
+ ip6_address_t adj;
+#define IOAM_HBYH_ADD 0
+#define IOAM_HBYH_MOD 1
+#define IOAM_HBYH_POP 2
+ u8 ioam_flag;
+ /* time scale transform. Joy. */
+ u32 unix_time_0;
+ f64 vlib_time_0;
+
+
+ /* Trace option */
+ u8 has_trace_option;
+
+ /* Pot option */
+ u8 has_pot_option;
+
+ /* Per Packet Counter option */
+ u8 has_seqno_option;
+
+ /* Enabling analyis of iOAM data on decap node */
+ u8 has_analyse_option;
+
+ /* Array of function pointers to ADD and POP HBH option handling routines */
+ u8 options_size[MAX_IP6_HBH_OPTION];
+ int (*add_options[MAX_IP6_HBH_OPTION]) (u8 * rewrite_string,
+ u8 * rewrite_size);
+ int (*pop_options[MAX_IP6_HBH_OPTION]) (vlib_buffer_t * b,
+ ip6_header_t * ip,
+ ip6_hop_by_hop_option_t * opt);
+ int (*get_sizeof_options[MAX_IP6_HBH_OPTION]) (u32 * rewrite_size);
+ int (*config_handler[MAX_IP6_HBH_OPTION]) (void *data, u8 disable);
+
+ /* Array of function pointers to handle hbh options being used with classifier */
+ u32 (*flow_handler[MAX_IP6_HBH_OPTION]) (u32 flow_ctx, u8 add);
+ flow_data_t *flows;
+
+ /* convenience */
+ vlib_main_t *vlib_main;
+ vnet_main_t *vnet_main;
+} ip6_hop_by_hop_ioam_main_t;
+
+extern ip6_hop_by_hop_ioam_main_t ip6_hop_by_hop_ioam_main;
+
+extern u8 *format_path_map (u8 * s, va_list * args);
+
+extern clib_error_t *ip6_ioam_enable (int has_trace_option,
+ int has_pot_option,
+ int has_seqno_option,
+ int has_analyse_option);
+
+extern int ip6_ioam_set_destination (ip6_address_t * addr, u32 mask_width,
+ u32 vrf_id, int is_add, int is_pop,
+ int is_none);
+
+extern clib_error_t *clear_ioam_rewrite_fn (void);
+
+static inline u8
+is_zero_ip4_address (ip4_address_t * a)
+{
+ return (a->as_u32 == 0);
+}
+
+static inline void
+copy_ip6_address (ip6_address_t * dst, ip6_address_t * src)
+{
+ dst->as_u64[0] = src->as_u64[0];
+ dst->as_u64[1] = src->as_u64[1];
+}
+
+static inline void
+set_zero_ip6_address (ip6_address_t * a)
+{
+ a->as_u64[0] = 0;
+ a->as_u64[1] = 0;
+}
+
+static inline u8
+cmp_ip6_address (ip6_address_t * a1, ip6_address_t * a2)
+{
+ return ((a1->as_u64[0] == a2->as_u64[0])
+ && (a1->as_u64[1] == a2->as_u64[1]));
+}
+
+static inline u8
+is_zero_ip6_address (ip6_address_t * a)
+{
+ return ((a->as_u64[0] == 0) && (a->as_u64[1] == 0));
+}
+
+int ip6_hbh_add_register_option (u8 option,
+ u8 size,
+ int rewrite_options (u8 * rewrite_string,
+ u8 * size));
+int ip6_hbh_add_unregister_option (u8 option);
+
+int ip6_hbh_pop_register_option (u8 option,
+ int options (vlib_buffer_t * b,
+ ip6_header_t * ip,
+ ip6_hop_by_hop_option_t * opt));
+int ip6_hbh_pop_unregister_option (u8 option);
+
+int
+ip6_hbh_get_sizeof_register_option (u8 option,
+ int get_sizeof_hdr_options (u32 *
+ rewrite_size));
+
+int
+ip6_ioam_set_rewrite (u8 ** rwp, int has_trace_option,
+ int has_pot_option, int has_seq_no);
+
+int
+ip6_hbh_config_handler_register (u8 option,
+ int config_handler (void *data, u8 disable));
+
+int ip6_hbh_config_handler_unregister (u8 option);
+
+int ip6_hbh_flow_handler_register (u8 option,
+ u32 ioam_flow_handler (u32 flow_ctx,
+ u8 add));
+
+int ip6_hbh_flow_handler_unregister (u8 option);
+
+u8 *get_flow_name_from_flow_ctx (u32 flow_ctx);
+
+static inline flow_data_t *
+get_flow (u32 index)
+{
+ flow_data_t *flow = NULL;
+ ip6_hop_by_hop_ioam_main_t *hm = &ip6_hop_by_hop_ioam_main;
+
+ if (pool_is_free_index (hm->flows, index))
+ return NULL;
+
+ flow = pool_elt_at_index (hm->flows, index);
+ return flow;
+}
+
+static inline u32
+get_flow_data_from_flow_ctx (u32 flow_ctx, u8 option)
+{
+ flow_data_t *flow = NULL;
+ ip6_hop_by_hop_ioam_main_t *hm = &ip6_hop_by_hop_ioam_main;
+ u32 index;
+
+ index = IOAM_MASK_DECAP_BIT (flow_ctx);
+ //flow = pool_elt_at_index (hm->flows, index);
+ flow = &hm->flows[index];
+ return (flow->ctx[option]);
+}
+
+static inline u8
+is_seqno_enabled (void)
+{
+ return (ip6_hop_by_hop_ioam_main.has_seqno_option);
+}
+
+int ip6_trace_profile_setup ();
+#endif /* __included_ip6_hop_by_hop_ioam_h__ */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/ip/ip6_hop_by_hop_packet.h b/src/vnet/ip/ip6_hop_by_hop_packet.h
new file mode 100644
index 00000000000..543ba8b0533
--- /dev/null
+++ b/src/vnet/ip/ip6_hop_by_hop_packet.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef __included_ip6_hop_by_hop_packet_h__
+#define __included_ip6_hop_by_hop_packet_h__
+
+typedef struct
+{
+ /* Protocol for next header */
+ u8 protocol;
+ /*
+ * Length of hop_by_hop header in 8 octet units,
+ * not including the first 8 octets
+ */
+ u8 length;
+} ip6_hop_by_hop_header_t;
+
+typedef struct
+{
+ /* Option Type */
+#define HBH_OPTION_TYPE_SKIP_UNKNOWN (0x00)
+#define HBH_OPTION_TYPE_DISCARD_UNKNOWN (0x40)
+#define HBH_OPTION_TYPE_DISCARD_UNKNOWN_ICMP (0x80)
+#define HBH_OPTION_TYPE_DISCARD_UNKNOWN_ICMP_NOT_MCAST (0xc0)
+#define HBH_OPTION_TYPE_HIGH_ORDER_BITS (0xc0)
+#define HBH_OPTION_TYPE_DATA_CHANGE_ENROUTE (1<<5)
+ u8 type;
+ /* Length in octets of the option data field */
+ u8 length;
+} ip6_hop_by_hop_option_t;
+
+/* $$$$ IANA banana constants */
+#define HBH_OPTION_TYPE_IOAM_TRACE_DATA_LIST 59 /* Third highest bit set (change en-route) */
+#define HBH_OPTION_TYPE_IOAM_PROOF_OF_TRANSIT 60 /* Third highest bit set (change en-route) */
+#define HBH_OPTION_TYPE_IOAM_EDGE_TO_EDGE 29
+
+
+/* *INDENT-OFF* */
+typedef CLIB_PACKED(struct {
+ ip6_hop_by_hop_option_t hdr;
+ u8 e2e_type;
+ u8 reserved;
+ u32 e2e_data;
+}) ioam_e2e_option_t;
+/* *INDENT-ON* */
+
+#endif /* __included_ip6_hop_by_hop_packet_h__ */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/ip/ip6_input.c b/src/vnet/ip/ip6_input.c
new file mode 100644
index 00000000000..bbc2cebaa39
--- /dev/null
+++ b/src/vnet/ip/ip6_input.c
@@ -0,0 +1,353 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * ip/ip6_input.c: IP v6 input node
+ *
+ * Copyright (c) 2008 Eliot Dresselhaus
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <vnet/ip/ip.h>
+#include <vnet/ethernet/ethernet.h>
+#include <vnet/ppp/ppp.h>
+#include <vnet/hdlc/hdlc.h>
+
+typedef struct
+{
+ u8 packet_data[64];
+} ip6_input_trace_t;
+
+static u8 *
+format_ip6_input_trace (u8 * s, va_list * va)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*va, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*va, vlib_node_t *);
+ ip6_input_trace_t *t = va_arg (*va, ip6_input_trace_t *);
+
+ s = format (s, "%U",
+ format_ip6_header, t->packet_data, sizeof (t->packet_data));
+
+ return s;
+}
+
+typedef enum
+{
+ IP6_INPUT_NEXT_DROP,
+ IP6_INPUT_NEXT_LOOKUP,
+ IP6_INPUT_NEXT_ICMP_ERROR,
+ IP6_INPUT_N_NEXT,
+} ip6_input_next_t;
+
+/* Validate IP v6 packets and pass them either to forwarding code
+ or drop exception packets. */
+static uword
+ip6_input (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ vnet_main_t *vnm = vnet_get_main ();
+ ip6_main_t *im = &ip6_main;
+ ip_lookup_main_t *lm = &im->lookup_main;
+ u32 n_left_from, *from, *to_next;
+ ip6_input_next_t next_index;
+ vlib_node_runtime_t *error_node =
+ vlib_node_get_runtime (vm, ip6_input_node.index);
+ vlib_simple_counter_main_t *cm;
+ u32 cpu_index = os_get_cpu_number ();
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+
+ if (node->flags & VLIB_NODE_FLAG_TRACE)
+ vlib_trace_frame_buffers_only (vm, node, from, frame->n_vectors,
+ /* stride */ 1,
+ sizeof (ip6_input_trace_t));
+
+ cm = vec_elt_at_index (vnm->interface_main.sw_if_counters,
+ VNET_INTERFACE_COUNTER_IP6);
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ vlib_buffer_t *p0, *p1;
+ ip6_header_t *ip0, *ip1;
+ u32 pi0, sw_if_index0, next0 = 0;
+ u32 pi1, sw_if_index1, next1 = 0;
+ u8 error0, error1, arc0, arc1;
+
+ /* Prefetch next iteration. */
+ {
+ vlib_buffer_t *p2, *p3;
+
+ p2 = vlib_get_buffer (vm, from[2]);
+ p3 = vlib_get_buffer (vm, from[3]);
+
+ vlib_prefetch_buffer_header (p2, LOAD);
+ vlib_prefetch_buffer_header (p3, LOAD);
+
+ CLIB_PREFETCH (p2->data, sizeof (ip0[0]), LOAD);
+ CLIB_PREFETCH (p3->data, sizeof (ip1[0]), LOAD);
+ }
+
+ pi0 = from[0];
+ pi1 = from[1];
+
+ to_next[0] = pi0;
+ to_next[1] = pi1;
+ from += 2;
+ to_next += 2;
+ n_left_from -= 2;
+ n_left_to_next -= 2;
+
+ p0 = vlib_get_buffer (vm, pi0);
+ p1 = vlib_get_buffer (vm, pi1);
+
+ ip0 = vlib_buffer_get_current (p0);
+ ip1 = vlib_buffer_get_current (p1);
+
+ sw_if_index0 = vnet_buffer (p0)->sw_if_index[VLIB_RX];
+ sw_if_index1 = vnet_buffer (p1)->sw_if_index[VLIB_RX];
+
+ arc0 =
+ ip6_address_is_multicast (&ip0->dst_address) ?
+ lm->mcast_feature_arc_index : lm->ucast_feature_arc_index;
+ arc1 =
+ ip6_address_is_multicast (&ip1->dst_address) ?
+ lm->mcast_feature_arc_index : lm->ucast_feature_arc_index;
+
+ vnet_buffer (p0)->ip.adj_index[VLIB_RX] = ~0;
+ vnet_buffer (p1)->ip.adj_index[VLIB_RX] = ~0;
+
+ vnet_feature_arc_start (arc0, sw_if_index0, &next0, p0);
+ vnet_feature_arc_start (arc1, sw_if_index1, &next1, p1);
+
+ vlib_increment_simple_counter (cm, cpu_index, sw_if_index0, 1);
+ vlib_increment_simple_counter (cm, cpu_index, sw_if_index1, 1);
+
+ error0 = error1 = IP6_ERROR_NONE;
+
+ /* Version != 6? Drop it. */
+ error0 =
+ (clib_net_to_host_u32
+ (ip0->ip_version_traffic_class_and_flow_label) >> 28) !=
+ 6 ? IP6_ERROR_VERSION : error0;
+ error1 =
+ (clib_net_to_host_u32
+ (ip1->ip_version_traffic_class_and_flow_label) >> 28) !=
+ 6 ? IP6_ERROR_VERSION : error1;
+
+ /* hop limit < 1? Drop it. for link-local broadcast packets,
+ * like dhcpv6 packets from client has hop-limit 1, which should not
+ * be dropped.
+ */
+ error0 = ip0->hop_limit < 1 ? IP6_ERROR_TIME_EXPIRED : error0;
+ error1 = ip1->hop_limit < 1 ? IP6_ERROR_TIME_EXPIRED : error1;
+
+ /* L2 length must be at least minimal IP header. */
+ error0 =
+ p0->current_length <
+ sizeof (ip0[0]) ? IP6_ERROR_TOO_SHORT : error0;
+ error1 =
+ p1->current_length <
+ sizeof (ip1[0]) ? IP6_ERROR_TOO_SHORT : error1;
+
+ if (PREDICT_FALSE (error0 != IP6_ERROR_NONE))
+ {
+ if (error0 == IP6_ERROR_TIME_EXPIRED)
+ {
+ icmp6_error_set_vnet_buffer (p0, ICMP6_time_exceeded,
+ ICMP6_time_exceeded_ttl_exceeded_in_transit,
+ 0);
+ next0 = IP6_INPUT_NEXT_ICMP_ERROR;
+ }
+ else
+ {
+ next0 = IP6_INPUT_NEXT_DROP;
+ }
+ }
+ if (PREDICT_FALSE (error1 != IP6_ERROR_NONE))
+ {
+ if (error1 == IP6_ERROR_TIME_EXPIRED)
+ {
+ icmp6_error_set_vnet_buffer (p1, ICMP6_time_exceeded,
+ ICMP6_time_exceeded_ttl_exceeded_in_transit,
+ 0);
+ next1 = IP6_INPUT_NEXT_ICMP_ERROR;
+ }
+ else
+ {
+ next1 = IP6_INPUT_NEXT_DROP;
+ }
+ }
+
+ p0->error = error_node->errors[error0];
+ p1->error = error_node->errors[error1];
+
+ vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
+ to_next, n_left_to_next,
+ pi0, pi1, next0, next1);
+ }
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ vlib_buffer_t *p0;
+ ip6_header_t *ip0;
+ u32 pi0, sw_if_index0, next0 = 0;
+ u8 error0, arc0;
+
+ pi0 = from[0];
+ to_next[0] = pi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ p0 = vlib_get_buffer (vm, pi0);
+ ip0 = vlib_buffer_get_current (p0);
+
+ sw_if_index0 = vnet_buffer (p0)->sw_if_index[VLIB_RX];
+ arc0 =
+ ip6_address_is_multicast (&ip0->dst_address) ?
+ lm->mcast_feature_arc_index : lm->ucast_feature_arc_index;
+ vnet_buffer (p0)->ip.adj_index[VLIB_RX] = ~0;
+ vnet_feature_arc_start (arc0, sw_if_index0, &next0, p0);
+
+ vlib_increment_simple_counter (cm, cpu_index, sw_if_index0, 1);
+ error0 = IP6_ERROR_NONE;
+
+ /* Version != 6? Drop it. */
+ error0 =
+ (clib_net_to_host_u32
+ (ip0->ip_version_traffic_class_and_flow_label) >> 28) !=
+ 6 ? IP6_ERROR_VERSION : error0;
+
+ /* hop limit < 1? Drop it. for link-local broadcast packets,
+ * like dhcpv6 packets from client has hop-limit 1, which should not
+ * be dropped.
+ */
+ error0 = ip0->hop_limit < 1 ? IP6_ERROR_TIME_EXPIRED : error0;
+
+ /* L2 length must be at least minimal IP header. */
+ error0 =
+ p0->current_length <
+ sizeof (ip0[0]) ? IP6_ERROR_TOO_SHORT : error0;
+
+ if (PREDICT_FALSE (error0 != IP6_ERROR_NONE))
+ {
+ if (error0 == IP6_ERROR_TIME_EXPIRED)
+ {
+ icmp6_error_set_vnet_buffer (p0, ICMP6_time_exceeded,
+ ICMP6_time_exceeded_ttl_exceeded_in_transit,
+ 0);
+ next0 = IP6_INPUT_NEXT_ICMP_ERROR;
+ }
+ else
+ {
+ next0 = IP6_INPUT_NEXT_DROP;
+ }
+ }
+ p0->error = error_node->errors[error0];
+
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ pi0, next0);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ return frame->n_vectors;
+}
+
+static char *ip6_error_strings[] = {
+#define _(sym,string) string,
+ foreach_ip6_error
+#undef _
+};
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (ip6_input_node) = {
+ .function = ip6_input,
+ .name = "ip6-input",
+ .vector_size = sizeof (u32),
+
+ .n_errors = IP6_N_ERROR,
+ .error_strings = ip6_error_strings,
+
+ .n_next_nodes = IP6_INPUT_N_NEXT,
+ .next_nodes = {
+ [IP6_INPUT_NEXT_DROP] = "error-drop",
+ [IP6_INPUT_NEXT_LOOKUP] = "ip6-lookup",
+ [IP6_INPUT_NEXT_ICMP_ERROR] = "ip6-icmp-error",
+ },
+
+ .format_buffer = format_ip6_header,
+ .format_trace = format_ip6_input_trace,
+};
+/* *INDENT-ON* */
+
+VLIB_NODE_FUNCTION_MULTIARCH (ip6_input_node, ip6_input)
+ static clib_error_t *ip6_init (vlib_main_t * vm)
+{
+ ethernet_register_input_type (vm, ETHERNET_TYPE_IP6, ip6_input_node.index);
+ ppp_register_input_protocol (vm, PPP_PROTOCOL_ip6, ip6_input_node.index);
+ hdlc_register_input_protocol (vm, HDLC_PROTOCOL_ip6, ip6_input_node.index);
+
+ {
+ pg_node_t *pn;
+ pn = pg_get_node (ip6_input_node.index);
+ pn->unformat_edit = unformat_pg_ip6_header;
+ }
+
+ /* Set flow hash to something non-zero. */
+ ip6_main.flow_hash_seed = 0xdeadbeef;
+
+ /* Default hop limit for packets we generate. */
+ ip6_main.host_config.ttl = 64;
+
+ return /* no error */ 0;
+}
+
+VLIB_INIT_FUNCTION (ip6_init);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/ip/ip6_neighbor.c b/src/vnet/ip/ip6_neighbor.c
new file mode 100644
index 00000000000..5a1c9e86b4f
--- /dev/null
+++ b/src/vnet/ip/ip6_neighbor.c
@@ -0,0 +1,4088 @@
+/*
+ * ip/ip6_neighbor.c: IP6 neighbor handling
+ *
+ * Copyright (c) 2010 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/ip/ip.h>
+#include <vnet/ip/ip6_neighbor.h>
+#include <vnet/ethernet/ethernet.h>
+#include <vppinfra/mhash.h>
+#include <vppinfra/md5.h>
+#include <vnet/adj/adj.h>
+#include <vnet/fib/fib_table.h>
+#include <vnet/fib/ip6_fib.h>
+
+/**
+ * @file
+ * @brief IPv6 Neighbor Adjacency and Neighbor Discovery.
+ *
+ * The files contains the API and CLI code for managing IPv6 neighbor
+ * adjacency tables and neighbor discovery logic.
+ */
+
+/* can't use sizeof link_layer_address, that's 8 */
+#define ETHER_MAC_ADDR_LEN 6
+
+/* advertised prefix option */
+typedef struct
+{
+ /* basic advertised information */
+ ip6_address_t prefix;
+ u8 prefix_len;
+ int adv_on_link_flag;
+ int adv_autonomous_flag;
+ u32 adv_valid_lifetime_in_secs;
+ u32 adv_pref_lifetime_in_secs;
+
+ /* advertised values are computed from these times if decrementing */
+ f64 valid_lifetime_expires;
+ f64 pref_lifetime_expires;
+
+ /* local information */
+ int enabled;
+ int deprecated_prefix_flag;
+ int decrement_lifetime_flag;
+
+#define MIN_ADV_VALID_LIFETIME 7203 /* seconds */
+#define DEF_ADV_VALID_LIFETIME 2592000
+#define DEF_ADV_PREF_LIFETIME 604800
+
+ /* extensions are added here, mobile, DNS etc.. */
+} ip6_radv_prefix_t;
+
+
+typedef struct
+{
+ /* group information */
+ u8 type;
+ ip6_address_t mcast_address;
+ u16 num_sources;
+ ip6_address_t *mcast_source_address_pool;
+} ip6_mldp_group_t;
+
+/* configured router advertisement information per ipv6 interface */
+typedef struct
+{
+
+ /* advertised config information, zero means unspecified */
+ u8 curr_hop_limit;
+ int adv_managed_flag;
+ int adv_other_flag;
+ u16 adv_router_lifetime_in_sec;
+ u32 adv_neighbor_reachable_time_in_msec;
+ u32 adv_time_in_msec_between_retransmitted_neighbor_solicitations;
+
+ /* mtu option */
+ u32 adv_link_mtu;
+
+ /* source link layer option */
+ u8 link_layer_address[8];
+ u8 link_layer_addr_len;
+
+ /* prefix option */
+ ip6_radv_prefix_t *adv_prefixes_pool;
+
+ /* Hash table mapping address to index in interface advertised prefix pool. */
+ mhash_t address_to_prefix_index;
+
+ /* MLDP group information */
+ ip6_mldp_group_t *mldp_group_pool;
+
+ /* Hash table mapping address to index in mldp address pool. */
+ mhash_t address_to_mldp_index;
+
+ /* local information */
+ u32 sw_if_index;
+ u32 fib_index;
+ int send_radv; /* radv on/off on this interface - set by config */
+ int cease_radv; /* we are ceasing to send - set byf config */
+ int send_unicast;
+ int adv_link_layer_address;
+ int prefix_option;
+ int failed_device_check;
+ int all_routers_mcast;
+ u32 seed;
+ u64 randomizer;
+ int ref_count;
+ adj_index_t all_nodes_adj_index;
+ adj_index_t all_routers_adj_index;
+ adj_index_t all_mldv2_routers_adj_index;
+
+ /* timing information */
+#define DEF_MAX_RADV_INTERVAL 200
+#define DEF_MIN_RADV_INTERVAL .75 * DEF_MAX_RADV_INTERVAL
+#define DEF_CURR_HOP_LIMIT 64
+#define DEF_DEF_RTR_LIFETIME 3 * DEF_MAX_RADV_INTERVAL
+#define MAX_DEF_RTR_LIFETIME 9000
+
+#define MAX_INITIAL_RTR_ADVERT_INTERVAL 16 /* seconds */
+#define MAX_INITIAL_RTR_ADVERTISEMENTS 3 /*transmissions */
+#define MIN_DELAY_BETWEEN_RAS 3 /* seconds */
+#define MAX_DELAY_BETWEEN_RAS 1800 /* seconds */
+#define MAX_RA_DELAY_TIME .5 /* seconds */
+
+ f64 max_radv_interval;
+ f64 min_radv_interval;
+ f64 min_delay_between_radv;
+ f64 max_delay_between_radv;
+ f64 max_rtr_default_lifetime;
+
+ f64 last_radv_time;
+ f64 last_multicast_time;
+ f64 next_multicast_time;
+
+
+ u32 initial_adverts_count;
+ f64 initial_adverts_interval;
+ u32 initial_adverts_sent;
+
+ /* stats */
+ u32 n_advertisements_sent;
+ u32 n_solicitations_rcvd;
+ u32 n_solicitations_dropped;
+
+ /* Link local address to use (defaults to underlying physical for logical interfaces */
+ ip6_address_t link_local_address;
+ u8 link_local_prefix_len;
+
+} ip6_radv_t;
+
+typedef struct
+{
+ u32 next_index;
+ uword node_index;
+ uword type_opaque;
+ uword data;
+ /* Used for nd event notification only */
+ void *data_callback;
+ u32 pid;
+} pending_resolution_t;
+
+
+typedef struct
+{
+ /* Hash tables mapping name to opcode. */
+ uword *opcode_by_name;
+
+ /* lite beer "glean" adjacency handling */
+ mhash_t pending_resolutions_by_address;
+ pending_resolution_t *pending_resolutions;
+
+ /* Mac address change notification */
+ mhash_t mac_changes_by_address;
+ pending_resolution_t *mac_changes;
+
+ u32 *neighbor_input_next_index_by_hw_if_index;
+
+ ip6_neighbor_t *neighbor_pool;
+
+ mhash_t neighbor_index_by_key;
+
+ u32 *if_radv_pool_index_by_sw_if_index;
+
+ ip6_radv_t *if_radv_pool;
+
+ /* Neighbor attack mitigation */
+ u32 limit_neighbor_cache_size;
+ u32 neighbor_delete_rotor;
+
+} ip6_neighbor_main_t;
+
+static ip6_neighbor_main_t ip6_neighbor_main;
+static ip6_address_t ip6a_zero; /* ip6 address 0 */
+
+static u8 *
+format_ip6_neighbor_ip6_entry (u8 * s, va_list * va)
+{
+ vlib_main_t *vm = va_arg (*va, vlib_main_t *);
+ ip6_neighbor_t *n = va_arg (*va, ip6_neighbor_t *);
+ vnet_main_t *vnm = vnet_get_main ();
+ vnet_sw_interface_t *si;
+ u8 *flags = 0;
+
+ if (!n)
+ return format (s, "%=12s%=20s%=6s%=20s%=40s", "Time", "Address", "Flags",
+ "Link layer", "Interface");
+
+ if (n->flags & IP6_NEIGHBOR_FLAG_DYNAMIC)
+ flags = format (flags, "D");
+
+ if (n->flags & IP6_NEIGHBOR_FLAG_STATIC)
+ flags = format (flags, "S");
+
+ si = vnet_get_sw_interface (vnm, n->key.sw_if_index);
+ s = format (s, "%=12U%=20U%=6s%=20U%=40U",
+ format_vlib_cpu_time, vm, n->cpu_time_last_updated,
+ format_ip6_address, &n->key.ip6_address,
+ flags ? (char *) flags : "",
+ format_ethernet_address, n->link_layer_address,
+ format_vnet_sw_interface_name, vnm, si);
+
+ vec_free (flags);
+ return s;
+}
+
+static clib_error_t *
+ip6_neighbor_sw_interface_up_down (vnet_main_t * vnm,
+ u32 sw_if_index, u32 flags)
+{
+ ip6_neighbor_main_t *nm = &ip6_neighbor_main;
+ ip6_neighbor_t *n;
+
+ if (!(flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP))
+ {
+ u32 i, *to_delete = 0;
+
+ /* *INDENT-OFF* */
+ pool_foreach (n, nm->neighbor_pool,
+ ({
+ if (n->key.sw_if_index == sw_if_index)
+ vec_add1 (to_delete, n - nm->neighbor_pool);
+ }));
+ /* *INDENT-ON* */
+
+ for (i = 0; i < vec_len (to_delete); i++)
+ {
+ n = pool_elt_at_index (nm->neighbor_pool, to_delete[i]);
+ mhash_unset (&nm->neighbor_index_by_key, &n->key, 0);
+ fib_table_entry_delete_index (n->fib_entry_index, FIB_SOURCE_ADJ);
+ pool_put (nm->neighbor_pool, n);
+ }
+
+ vec_free (to_delete);
+ }
+
+ return 0;
+}
+
+VNET_SW_INTERFACE_ADMIN_UP_DOWN_FUNCTION (ip6_neighbor_sw_interface_up_down);
+
+static void
+unset_random_neighbor_entry (void)
+{
+ ip6_neighbor_main_t *nm = &ip6_neighbor_main;
+ vnet_main_t *vnm = vnet_get_main ();
+ vlib_main_t *vm = vnm->vlib_main;
+ ip6_neighbor_t *e;
+ u32 index;
+
+ index = pool_next_index (nm->neighbor_pool, nm->neighbor_delete_rotor);
+ nm->neighbor_delete_rotor = index;
+
+ /* Try again from elt 0, could happen if an intfc goes down */
+ if (index == ~0)
+ {
+ index = pool_next_index (nm->neighbor_pool, nm->neighbor_delete_rotor);
+ nm->neighbor_delete_rotor = index;
+ }
+
+ /* Nothing left in the pool */
+ if (index == ~0)
+ return;
+
+ e = pool_elt_at_index (nm->neighbor_pool, index);
+
+ vnet_unset_ip6_ethernet_neighbor (vm, e->key.sw_if_index,
+ &e->key.ip6_address,
+ e->link_layer_address,
+ ETHER_MAC_ADDR_LEN);
+}
+
+typedef struct
+{
+ u8 is_add;
+ u8 is_static;
+ u8 link_layer_address[6];
+ u32 sw_if_index;
+ ip6_address_t addr;
+} ip6_neighbor_set_unset_rpc_args_t;
+
+static void ip6_neighbor_set_unset_rpc_callback
+ (ip6_neighbor_set_unset_rpc_args_t * a);
+
+static void set_unset_ip6_neighbor_rpc
+ (vlib_main_t * vm,
+ u32 sw_if_index,
+ ip6_address_t * a, u8 * link_layer_addreess, int is_add, int is_static)
+{
+ ip6_neighbor_set_unset_rpc_args_t args;
+ void vl_api_rpc_call_main_thread (void *fp, u8 * data, u32 data_length);
+
+ args.sw_if_index = sw_if_index;
+ args.is_add = is_add;
+ args.is_static = is_static;
+ clib_memcpy (&args.addr, a, sizeof (*a));
+ clib_memcpy (args.link_layer_address, link_layer_addreess, 6);
+
+ vl_api_rpc_call_main_thread (ip6_neighbor_set_unset_rpc_callback,
+ (u8 *) & args, sizeof (args));
+}
+
+static void
+ip6_nbr_probe (ip_adjacency_t * adj)
+{
+ icmp6_neighbor_solicitation_header_t *h;
+ vnet_main_t *vnm = vnet_get_main ();
+ ip6_main_t *im = &ip6_main;
+ ip_interface_address_t *ia;
+ ip6_address_t *dst, *src;
+ vnet_hw_interface_t *hi;
+ vnet_sw_interface_t *si;
+ vlib_buffer_t *b;
+ int bogus_length;
+ vlib_main_t *vm;
+ u32 bi = 0;
+
+ vm = vlib_get_main ();
+
+ si = vnet_get_sw_interface (vnm, adj->rewrite_header.sw_if_index);
+ dst = &adj->sub_type.nbr.next_hop.ip6;
+
+ if (!(si->flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP))
+ {
+ return;
+ }
+ src = ip6_interface_address_matching_destination (im, dst,
+ adj->rewrite_header.
+ sw_if_index, &ia);
+ if (!src)
+ {
+ return;
+ }
+
+ h = vlib_packet_template_get_packet (vm,
+ &im->discover_neighbor_packet_template,
+ &bi);
+
+ hi = vnet_get_sup_hw_interface (vnm, adj->rewrite_header.sw_if_index);
+
+ h->ip.dst_address.as_u8[13] = dst->as_u8[13];
+ h->ip.dst_address.as_u8[14] = dst->as_u8[14];
+ h->ip.dst_address.as_u8[15] = dst->as_u8[15];
+ h->ip.src_address = src[0];
+ h->neighbor.target_address = dst[0];
+
+ clib_memcpy (h->link_layer_option.ethernet_address,
+ hi->hw_address, vec_len (hi->hw_address));
+
+ h->neighbor.icmp.checksum =
+ ip6_tcp_udp_icmp_compute_checksum (vm, 0, &h->ip, &bogus_length);
+ ASSERT (bogus_length == 0);
+
+ b = vlib_get_buffer (vm, bi);
+ vnet_buffer (b)->sw_if_index[VLIB_RX] =
+ vnet_buffer (b)->sw_if_index[VLIB_TX] = adj->rewrite_header.sw_if_index;
+
+ /* Add encapsulation string for software interface (e.g. ethernet header). */
+ vnet_rewrite_one_header (adj[0], h, sizeof (ethernet_header_t));
+ vlib_buffer_advance (b, -adj->rewrite_header.data_bytes);
+
+ {
+ vlib_frame_t *f = vlib_get_frame_to_node (vm, hi->output_node_index);
+ u32 *to_next = vlib_frame_vector_args (f);
+ to_next[0] = bi;
+ f->n_vectors = 1;
+ vlib_put_frame_to_node (vm, hi->output_node_index, f);
+ }
+}
+
+static void
+ip6_nd_mk_complete (adj_index_t ai, ip6_neighbor_t * nbr)
+{
+ adj_nbr_update_rewrite (ai, ADJ_NBR_REWRITE_FLAG_COMPLETE,
+ ethernet_build_rewrite (vnet_get_main (),
+ nbr->key.sw_if_index,
+ adj_get_link_type (ai),
+ nbr->link_layer_address));
+}
+
+static void
+ip6_nd_mk_incomplete (adj_index_t ai)
+{
+ ip_adjacency_t *adj = adj_get (ai);
+
+ adj_nbr_update_rewrite (ai,
+ ADJ_NBR_REWRITE_FLAG_INCOMPLETE,
+ ethernet_build_rewrite (vnet_get_main (),
+ adj->rewrite_header.
+ sw_if_index,
+ adj_get_link_type (ai),
+ VNET_REWRITE_FOR_SW_INTERFACE_ADDRESS_BROADCAST));
+}
+
+#define IP6_NBR_MK_KEY(k, sw_if_index, addr) \
+{ \
+ k.sw_if_index = sw_if_index; \
+ k.ip6_address = *addr; \
+ k.pad = 0; \
+}
+
+static ip6_neighbor_t *
+ip6_nd_find (u32 sw_if_index, const ip6_address_t * addr)
+{
+ ip6_neighbor_main_t *nm = &ip6_neighbor_main;
+ ip6_neighbor_t *n = NULL;
+ ip6_neighbor_key_t k;
+ uword *p;
+
+ IP6_NBR_MK_KEY (k, sw_if_index, addr);
+
+ p = mhash_get (&nm->neighbor_index_by_key, &k);
+ if (p)
+ {
+ n = pool_elt_at_index (nm->neighbor_pool, p[0]);
+ }
+
+ return (n);
+}
+
+static adj_walk_rc_t
+ip6_nd_mk_complete_walk (adj_index_t ai, void *ctx)
+{
+ ip6_neighbor_t *nbr = ctx;
+
+ ip6_nd_mk_complete (ai, nbr);
+
+ return (ADJ_WALK_RC_CONTINUE);
+}
+
+static adj_walk_rc_t
+ip6_nd_mk_incomplete_walk (adj_index_t ai, void *ctx)
+{
+ ip6_nd_mk_incomplete (ai);
+
+ return (ADJ_WALK_RC_CONTINUE);
+}
+
+void
+ip6_ethernet_update_adjacency (vnet_main_t * vnm, u32 sw_if_index, u32 ai)
+{
+ ip6_neighbor_t *nbr;
+ ip_adjacency_t *adj;
+
+ adj = adj_get (ai);
+
+ nbr = ip6_nd_find (sw_if_index, &adj->sub_type.nbr.next_hop.ip6);
+
+ if (NULL != nbr)
+ {
+ adj_nbr_walk_nh6 (sw_if_index, &nbr->key.ip6_address,
+ ip6_nd_mk_complete_walk, nbr);
+ }
+ else
+ {
+ /*
+ * no matching ND entry.
+ * construct the rewrite required to for an ND packet, and stick
+ * that in the adj's pipe to smoke.
+ */
+ adj_nbr_update_rewrite (ai,
+ ADJ_NBR_REWRITE_FLAG_INCOMPLETE,
+ ethernet_build_rewrite (vnm,
+ sw_if_index,
+ VNET_LINK_IP6,
+ VNET_REWRITE_FOR_SW_INTERFACE_ADDRESS_BROADCAST));
+
+ /*
+ * since the FIB has added this adj for a route, it makes sense it may
+ * want to forward traffic sometime soon. Let's send a speculative ND.
+ * just one. If we were to do periodically that wouldn't be bad either,
+ * but that's more code than i'm prepared to write at this time for
+ * relatively little reward.
+ */
+ ip6_nbr_probe (adj);
+ }
+}
+
+int
+vnet_set_ip6_ethernet_neighbor (vlib_main_t * vm,
+ u32 sw_if_index,
+ ip6_address_t * a,
+ u8 * link_layer_address,
+ uword n_bytes_link_layer_address,
+ int is_static)
+{
+ ip6_neighbor_main_t *nm = &ip6_neighbor_main;
+ ip6_neighbor_key_t k;
+ ip6_neighbor_t *n = 0;
+ int make_new_nd_cache_entry = 1;
+ uword *p;
+ u32 next_index;
+ pending_resolution_t *pr, *mc;
+
+ if (os_get_cpu_number ())
+ {
+ set_unset_ip6_neighbor_rpc (vm, sw_if_index, a, link_layer_address,
+ 1 /* set new neighbor */ , is_static);
+ return 0;
+ }
+
+ k.sw_if_index = sw_if_index;
+ k.ip6_address = a[0];
+ k.pad = 0;
+
+ p = mhash_get (&nm->neighbor_index_by_key, &k);
+ if (p)
+ {
+ n = pool_elt_at_index (nm->neighbor_pool, p[0]);
+ /* Refuse to over-write static neighbor entry. */
+ if (!is_static && (n->flags & IP6_NEIGHBOR_FLAG_STATIC))
+ return -2;
+ make_new_nd_cache_entry = 0;
+ }
+
+ if (make_new_nd_cache_entry)
+ {
+ fib_prefix_t pfx = {
+ .fp_len = 128,
+ .fp_proto = FIB_PROTOCOL_IP6,
+ .fp_addr = {
+ .ip6 = k.ip6_address,
+ }
+ ,
+ };
+ u32 fib_index;
+
+ pool_get (nm->neighbor_pool, n);
+ mhash_set (&nm->neighbor_index_by_key, &k, n - nm->neighbor_pool,
+ /* old value */ 0);
+ n->key = k;
+
+ clib_memcpy (n->link_layer_address,
+ link_layer_address, n_bytes_link_layer_address);
+
+ /*
+ * create the adj-fib. the entry in the FIB table for and to the peer.
+ */
+ fib_index = ip6_main.fib_index_by_sw_if_index[n->key.sw_if_index];
+ n->fib_entry_index = fib_table_entry_update_one_path (fib_index, &pfx, FIB_SOURCE_ADJ, FIB_ENTRY_FLAG_NONE, FIB_PROTOCOL_IP6, &pfx.fp_addr, n->key.sw_if_index, ~0, 1, NULL, // no label stack
+ FIB_ROUTE_PATH_FLAG_NONE);
+ }
+ else
+ {
+ /*
+ * prevent a DoS attack from the data-plane that
+ * spams us with no-op updates to the MAC address
+ */
+ if (0 == memcmp (n->link_layer_address,
+ link_layer_address, n_bytes_link_layer_address))
+ return -1;
+
+ clib_memcpy (n->link_layer_address,
+ link_layer_address, n_bytes_link_layer_address);
+ }
+
+ /* Update time stamp and flags. */
+ n->cpu_time_last_updated = clib_cpu_time_now ();
+ if (is_static)
+ n->flags |= IP6_NEIGHBOR_FLAG_STATIC;
+ else
+ n->flags |= IP6_NEIGHBOR_FLAG_DYNAMIC;
+
+ adj_nbr_walk_nh6 (sw_if_index,
+ &n->key.ip6_address, ip6_nd_mk_complete_walk, n);
+
+ /* Customer(s) waiting for this address to be resolved? */
+ p = mhash_get (&nm->pending_resolutions_by_address, a);
+ if (p)
+ {
+ next_index = p[0];
+
+ while (next_index != (u32) ~ 0)
+ {
+ pr = pool_elt_at_index (nm->pending_resolutions, next_index);
+ vlib_process_signal_event (vm, pr->node_index,
+ pr->type_opaque, pr->data);
+ next_index = pr->next_index;
+ pool_put (nm->pending_resolutions, pr);
+ }
+
+ mhash_unset (&nm->pending_resolutions_by_address, a, 0);
+ }
+
+ /* Customer(s) requesting ND event for this address? */
+ p = mhash_get (&nm->mac_changes_by_address, a);
+ if (p)
+ {
+ next_index = p[0];
+
+ while (next_index != (u32) ~ 0)
+ {
+ int (*fp) (u32, u8 *, u32, ip6_address_t *);
+ int rv = 1;
+ mc = pool_elt_at_index (nm->mac_changes, next_index);
+ fp = mc->data_callback;
+
+ /* Call the user's data callback, return 1 to suppress dup events */
+ if (fp)
+ rv =
+ (*fp) (mc->data, link_layer_address, sw_if_index, &ip6a_zero);
+ /*
+ * Signal the resolver process, as long as the user
+ * says they want to be notified
+ */
+ if (rv == 0)
+ vlib_process_signal_event (vm, mc->node_index,
+ mc->type_opaque, mc->data);
+ next_index = mc->next_index;
+ }
+ }
+
+ return 0;
+}
+
+int
+vnet_unset_ip6_ethernet_neighbor (vlib_main_t * vm,
+ u32 sw_if_index,
+ ip6_address_t * a,
+ u8 * link_layer_address,
+ uword n_bytes_link_layer_address)
+{
+ ip6_neighbor_main_t *nm = &ip6_neighbor_main;
+ ip6_neighbor_key_t k;
+ ip6_neighbor_t *n;
+ uword *p;
+ int rv = 0;
+
+ if (os_get_cpu_number ())
+ {
+ set_unset_ip6_neighbor_rpc (vm, sw_if_index, a, link_layer_address,
+ 0 /* unset */ , 0);
+ return 0;
+ }
+
+ k.sw_if_index = sw_if_index;
+ k.ip6_address = a[0];
+ k.pad = 0;
+
+ p = mhash_get (&nm->neighbor_index_by_key, &k);
+ if (p == 0)
+ {
+ rv = -1;
+ goto out;
+ }
+
+ n = pool_elt_at_index (nm->neighbor_pool, p[0]);
+ mhash_unset (&nm->neighbor_index_by_key, &n->key, 0);
+
+ adj_nbr_walk_nh6 (sw_if_index,
+ &n->key.ip6_address, ip6_nd_mk_incomplete_walk, NULL);
+
+ fib_table_entry_delete_index (n->fib_entry_index, FIB_SOURCE_ADJ);
+ pool_put (nm->neighbor_pool, n);
+
+out:
+ return rv;
+}
+
+static void ip6_neighbor_set_unset_rpc_callback
+ (ip6_neighbor_set_unset_rpc_args_t * a)
+{
+ vlib_main_t *vm = vlib_get_main ();
+ if (a->is_add)
+ vnet_set_ip6_ethernet_neighbor (vm, a->sw_if_index, &a->addr,
+ a->link_layer_address, 6, a->is_static);
+ else
+ vnet_unset_ip6_ethernet_neighbor (vm, a->sw_if_index, &a->addr,
+ a->link_layer_address, 6);
+}
+
+static int
+ip6_neighbor_sort (void *a1, void *a2)
+{
+ vnet_main_t *vnm = vnet_get_main ();
+ ip6_neighbor_t *n1 = a1, *n2 = a2;
+ int cmp;
+ cmp = vnet_sw_interface_compare (vnm, n1->key.sw_if_index,
+ n2->key.sw_if_index);
+ if (!cmp)
+ cmp = ip6_address_compare (&n1->key.ip6_address, &n2->key.ip6_address);
+ return cmp;
+}
+
+ip6_neighbor_t *
+ip6_neighbors_entries (u32 sw_if_index)
+{
+ ip6_neighbor_main_t *nm = &ip6_neighbor_main;
+ ip6_neighbor_t *n, *ns = 0;
+
+ /* *INDENT-OFF* */
+ pool_foreach (n, nm->neighbor_pool,
+ ({
+ if (sw_if_index != ~0 && n->key.sw_if_index != sw_if_index)
+ continue;
+ vec_add1 (ns, n[0]);
+ }));
+ /* *INDENT-ON* */
+
+ if (ns)
+ vec_sort_with_function (ns, ip6_neighbor_sort);
+ return ns;
+}
+
+static clib_error_t *
+show_ip6_neighbors (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ vnet_main_t *vnm = vnet_get_main ();
+ ip6_neighbor_t *n, *ns;
+ clib_error_t *error = 0;
+ u32 sw_if_index;
+
+ /* Filter entries by interface if given. */
+ sw_if_index = ~0;
+ (void) unformat_user (input, unformat_vnet_sw_interface, vnm, &sw_if_index);
+
+ ns = ip6_neighbors_entries (sw_if_index);
+ if (ns)
+ {
+ vlib_cli_output (vm, "%U", format_ip6_neighbor_ip6_entry, vm, 0);
+ vec_foreach (n, ns)
+ {
+ vlib_cli_output (vm, "%U", format_ip6_neighbor_ip6_entry, vm, n);
+ }
+ vec_free (ns);
+ }
+
+ return error;
+}
+
+/*?
+ * This command is used to display the adjacent IPv6 hosts found via
+ * neighbor discovery. Optionally, limit the output to the specified
+ * interface.
+ *
+ * @cliexpar
+ * Example of how to display the IPv6 neighbor adjacency table:
+ * @cliexstart{show ip6 neighbors}
+ * Time Address Flags Link layer Interface
+ * 34.0910 ::a:1:1:0:7 02:fe:6a:07:39:6f GigabitEthernet2/0/0
+ * 173.2916 ::b:5:1:c:2 02:fe:50:62:3a:94 GigabitEthernet2/0/0
+ * 886.6654 ::1:1:c:0:9 S 02:fe:e4:45:27:5b GigabitEthernet3/0/0
+ * @cliexend
+ * Example of how to display the IPv6 neighbor adjacency table for given interface:
+ * @cliexstart{show ip6 neighbors GigabitEthernet2/0/0}
+ * Time Address Flags Link layer Interface
+ * 34.0910 ::a:1:1:0:7 02:fe:6a:07:39:6f GigabitEthernet2/0/0
+ * 173.2916 ::b:5:1:c:2 02:fe:50:62:3a:94 GigabitEthernet2/0/0
+ * @cliexend
+?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (show_ip6_neighbors_command, static) = {
+ .path = "show ip6 neighbors",
+ .function = show_ip6_neighbors,
+ .short_help = "show ip6 neighbors [<interface>]",
+};
+/* *INDENT-ON* */
+
+static clib_error_t *
+set_ip6_neighbor (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ vnet_main_t *vnm = vnet_get_main ();
+ ip6_address_t addr;
+ u8 mac_address[6];
+ int addr_valid = 0;
+ int is_del = 0;
+ int is_static = 0;
+ u32 sw_if_index;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ /* intfc, ip6-address, mac-address */
+ if (unformat (input, "%U %U %U",
+ unformat_vnet_sw_interface, vnm, &sw_if_index,
+ unformat_ip6_address, &addr,
+ unformat_ethernet_address, mac_address))
+ addr_valid = 1;
+
+ else if (unformat (input, "delete") || unformat (input, "del"))
+ is_del = 1;
+ else if (unformat (input, "static"))
+ is_static = 1;
+ else
+ break;
+ }
+
+ if (!addr_valid)
+ return clib_error_return (0, "Missing interface, ip6 or hw address");
+
+ if (!is_del)
+ vnet_set_ip6_ethernet_neighbor (vm, sw_if_index, &addr,
+ mac_address, sizeof (mac_address),
+ is_static);
+ else
+ vnet_unset_ip6_ethernet_neighbor (vm, sw_if_index, &addr,
+ mac_address, sizeof (mac_address));
+ return 0;
+}
+
+/*?
+ * This command is used to manually add an entry to the IPv6 neighbor
+ * adjacency table. Optionally, the entry can be added as static. It is
+ * also used to remove an entry from the table. Use the '<em>show ip6
+ * neighbors</em>' command to display all learned and manually entered entries.
+ *
+ * @cliexpar
+ * Example of how to add a static entry to the IPv6 neighbor adjacency table:
+ * @cliexcmd{set ip6 neighbor GigabitEthernet2/0/0 ::1:1:c:0:9 02:fe:e4:45:27:5b static}
+ * Example of how to delete an entry from the IPv6 neighbor adjacency table:
+ * @cliexcmd{set ip6 neighbor del GigabitEthernet2/0/0 ::1:1:c:0:9 02:fe:e4:45:27:5b}
+?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (set_ip6_neighbor_command, static) =
+{
+ .path = "set ip6 neighbor",
+ .function = set_ip6_neighbor,
+ .short_help = "set ip6 neighbor [del] <interface> <ip6-address> <mac-address> [static]",
+};
+/* *INDENT-ON* */
+
+typedef enum
+{
+ ICMP6_NEIGHBOR_SOLICITATION_NEXT_DROP,
+ ICMP6_NEIGHBOR_SOLICITATION_NEXT_REPLY,
+ ICMP6_NEIGHBOR_SOLICITATION_N_NEXT,
+} icmp6_neighbor_solicitation_or_advertisement_next_t;
+
+static_always_inline uword
+icmp6_neighbor_solicitation_or_advertisement (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame,
+ uword is_solicitation)
+{
+ vnet_main_t *vnm = vnet_get_main ();
+ ip6_main_t *im = &ip6_main;
+ uword n_packets = frame->n_vectors;
+ u32 *from, *to_next;
+ u32 n_left_from, n_left_to_next, next_index, n_advertisements_sent;
+ icmp6_neighbor_discovery_option_type_t option_type;
+ vlib_node_runtime_t *error_node =
+ vlib_node_get_runtime (vm, ip6_icmp_input_node.index);
+ int bogus_length;
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = n_packets;
+ next_index = node->cached_next_index;
+
+ if (node->flags & VLIB_NODE_FLAG_TRACE)
+ vlib_trace_frame_buffers_only (vm, node, from, frame->n_vectors,
+ /* stride */ 1,
+ sizeof (icmp6_input_trace_t));
+
+ option_type =
+ (is_solicitation
+ ? ICMP6_NEIGHBOR_DISCOVERY_OPTION_source_link_layer_address
+ : ICMP6_NEIGHBOR_DISCOVERY_OPTION_target_link_layer_address);
+ n_advertisements_sent = 0;
+
+ while (n_left_from > 0)
+ {
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ vlib_buffer_t *p0;
+ ip6_header_t *ip0;
+ icmp6_neighbor_solicitation_or_advertisement_header_t *h0;
+ icmp6_neighbor_discovery_ethernet_link_layer_address_option_t *o0;
+ u32 bi0, options_len0, sw_if_index0, next0, error0;
+ u32 ip6_sadd_link_local, ip6_sadd_unspecified;
+ int is_rewrite0;
+ u32 ni0;
+
+ bi0 = to_next[0] = from[0];
+
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ p0 = vlib_get_buffer (vm, bi0);
+ ip0 = vlib_buffer_get_current (p0);
+ h0 = ip6_next_header (ip0);
+ options_len0 =
+ clib_net_to_host_u16 (ip0->payload_length) - sizeof (h0[0]);
+
+ error0 = ICMP6_ERROR_NONE;
+ sw_if_index0 = vnet_buffer (p0)->sw_if_index[VLIB_RX];
+ ip6_sadd_link_local =
+ ip6_address_is_link_local_unicast (&ip0->src_address);
+ ip6_sadd_unspecified =
+ ip6_address_is_unspecified (&ip0->src_address);
+
+ /* Check that source address is unspecified, link-local or else on-link. */
+ if (!ip6_sadd_unspecified && !ip6_sadd_link_local)
+ {
+ u32 src_adj_index0 = ip6_src_lookup_for_packet (im, p0, ip0);
+
+ if (ADJ_INDEX_INVALID != src_adj_index0)
+ {
+ ip_adjacency_t *adj0 =
+ ip_get_adjacency (&im->lookup_main, src_adj_index0);
+
+ /* Allow all realistic-looking rewrite adjacencies to pass */
+ ni0 = adj0->lookup_next_index;
+ is_rewrite0 = (ni0 >= IP_LOOKUP_NEXT_ARP) &&
+ (ni0 < IP6_LOOKUP_N_NEXT);
+
+ error0 = ((adj0->rewrite_header.sw_if_index != sw_if_index0
+ || !is_rewrite0)
+ ?
+ ICMP6_ERROR_NEIGHBOR_SOLICITATION_SOURCE_NOT_ON_LINK
+ : error0);
+ }
+ else
+ {
+ error0 =
+ ICMP6_ERROR_NEIGHBOR_SOLICITATION_SOURCE_NOT_ON_LINK;
+ }
+ }
+
+ o0 = (void *) (h0 + 1);
+ o0 = ((options_len0 == 8 && o0->header.type == option_type
+ && o0->header.n_data_u64s == 1) ? o0 : 0);
+
+ /* If src address unspecified or link local, donot learn neighbor MAC */
+ if (PREDICT_TRUE (error0 == ICMP6_ERROR_NONE && o0 != 0 &&
+ !ip6_sadd_unspecified && !ip6_sadd_link_local))
+ {
+ ip6_neighbor_main_t *nm = &ip6_neighbor_main;
+ if (nm->limit_neighbor_cache_size &&
+ pool_elts (nm->neighbor_pool) >=
+ nm->limit_neighbor_cache_size)
+ unset_random_neighbor_entry ();
+ vnet_set_ip6_ethernet_neighbor (vm, sw_if_index0,
+ is_solicitation ?
+ &ip0->src_address :
+ &h0->target_address,
+ o0->ethernet_address,
+ sizeof (o0->ethernet_address),
+ 0);
+ }
+
+ if (is_solicitation && error0 == ICMP6_ERROR_NONE)
+ {
+ /* Check that target address is local to this router. */
+ fib_node_index_t fei;
+ u32 fib_index;
+
+ fib_index =
+ ip6_fib_table_get_index_for_sw_if_index (sw_if_index0);
+
+ if (~0 == fib_index)
+ {
+ error0 = ICMP6_ERROR_NEIGHBOR_SOLICITATION_SOURCE_UNKNOWN;
+ }
+ else
+ {
+ fei = ip6_fib_table_lookup_exact_match (fib_index,
+ &h0->target_address,
+ 128);
+
+ if (FIB_NODE_INDEX_INVALID == fei ||
+ !(FIB_ENTRY_FLAG_LOCAL &
+ fib_entry_get_flags_for_source (fei,
+ FIB_SOURCE_INTERFACE)))
+ {
+ error0 =
+ ICMP6_ERROR_NEIGHBOR_SOLICITATION_SOURCE_UNKNOWN;
+ }
+ }
+ }
+
+ if (is_solicitation)
+ next0 = (error0 != ICMP6_ERROR_NONE
+ ? ICMP6_NEIGHBOR_SOLICITATION_NEXT_DROP
+ : ICMP6_NEIGHBOR_SOLICITATION_NEXT_REPLY);
+ else
+ {
+ next0 = 0;
+ error0 = error0 == ICMP6_ERROR_NONE ?
+ ICMP6_ERROR_NEIGHBOR_ADVERTISEMENTS_RX : error0;
+ }
+
+ if (is_solicitation && error0 == ICMP6_ERROR_NONE)
+ {
+ vnet_sw_interface_t *sw_if0;
+ ethernet_interface_t *eth_if0;
+ ethernet_header_t *eth0;
+
+ /* dst address is either source address or the all-nodes mcast addr */
+ if (!ip6_sadd_unspecified)
+ ip0->dst_address = ip0->src_address;
+ else
+ ip6_set_reserved_multicast_address (&ip0->dst_address,
+ IP6_MULTICAST_SCOPE_link_local,
+ IP6_MULTICAST_GROUP_ID_all_hosts);
+
+ ip0->src_address = h0->target_address;
+ ip0->hop_limit = 255;
+ h0->icmp.type = ICMP6_neighbor_advertisement;
+
+ sw_if0 = vnet_get_sup_sw_interface (vnm, sw_if_index0);
+ ASSERT (sw_if0->type == VNET_SW_INTERFACE_TYPE_HARDWARE);
+ eth_if0 =
+ ethernet_get_interface (&ethernet_main, sw_if0->hw_if_index);
+ if (eth_if0 && o0)
+ {
+ clib_memcpy (o0->ethernet_address, eth_if0->address, 6);
+ o0->header.type =
+ ICMP6_NEIGHBOR_DISCOVERY_OPTION_target_link_layer_address;
+ }
+
+ h0->advertisement_flags = clib_host_to_net_u32
+ (ICMP6_NEIGHBOR_ADVERTISEMENT_FLAG_SOLICITED
+ | ICMP6_NEIGHBOR_ADVERTISEMENT_FLAG_OVERRIDE);
+
+ h0->icmp.checksum = 0;
+ h0->icmp.checksum =
+ ip6_tcp_udp_icmp_compute_checksum (vm, p0, ip0,
+ &bogus_length);
+ ASSERT (bogus_length == 0);
+
+ /* Reuse current MAC header, copy SMAC to DMAC and
+ * interface MAC to SMAC */
+ vlib_buffer_advance (p0, -ethernet_buffer_header_size (p0));
+ eth0 = vlib_buffer_get_current (p0);
+ clib_memcpy (eth0->dst_address, eth0->src_address, 6);
+ if (eth_if0)
+ clib_memcpy (eth0->src_address, eth_if0->address, 6);
+
+ /* Setup input and output sw_if_index for packet */
+ ASSERT (vnet_buffer (p0)->sw_if_index[VLIB_RX] == sw_if_index0);
+ vnet_buffer (p0)->sw_if_index[VLIB_TX] = sw_if_index0;
+ vnet_buffer (p0)->sw_if_index[VLIB_RX] =
+ vnet_main.local_interface_sw_if_index;
+
+ n_advertisements_sent++;
+ }
+
+ p0->error = error_node->errors[error0];
+
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, next0);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ /* Account for advertisements sent. */
+ vlib_error_count (vm, error_node->node_index,
+ ICMP6_ERROR_NEIGHBOR_ADVERTISEMENTS_TX,
+ n_advertisements_sent);
+
+ return frame->n_vectors;
+}
+
+/* for "syslogging" - use elog for now */
+#define foreach_log_level \
+ _ (DEBUG, "DEBUG") \
+ _ (INFO, "INFORMATION") \
+ _ (NOTICE, "NOTICE") \
+ _ (WARNING, "WARNING") \
+ _ (ERR, "ERROR") \
+ _ (CRIT, "CRITICAL") \
+ _ (ALERT, "ALERT") \
+ _ (EMERG, "EMERGENCY")
+
+typedef enum
+{
+#define _(f,s) LOG_##f,
+ foreach_log_level
+#undef _
+} log_level_t;
+
+static char *log_level_strings[] = {
+#define _(f,s) s,
+ foreach_log_level
+#undef _
+};
+
+static int logmask = 1 << LOG_DEBUG;
+
+static void
+ip6_neighbor_syslog (vlib_main_t * vm, int priority, char *fmt, ...)
+{
+ /* just use elog for now */
+ u8 *what;
+ va_list va;
+
+ if ((priority > LOG_EMERG) || !(logmask & (1 << priority)))
+ return;
+
+ va_start (va, fmt);
+ if (fmt)
+ {
+ what = va_format (0, fmt, &va);
+
+ ELOG_TYPE_DECLARE (e) =
+ {
+ .format = "ip6 nd: (%s): %s",.format_args = "T4T4",};
+ struct
+ {
+ u32 s[2];
+ } *ed;
+ ed = ELOG_DATA (&vm->elog_main, e);
+ ed->s[0] = elog_string (&vm->elog_main, log_level_strings[priority]);
+ ed->s[1] = elog_string (&vm->elog_main, (char *) what);
+ }
+ va_end (va);
+ return;
+}
+
+/* ipv6 neighbor discovery - router advertisements */
+typedef enum
+{
+ ICMP6_ROUTER_SOLICITATION_NEXT_DROP,
+ ICMP6_ROUTER_SOLICITATION_NEXT_REPLY_RW,
+ ICMP6_ROUTER_SOLICITATION_NEXT_REPLY_TX,
+ ICMP6_ROUTER_SOLICITATION_N_NEXT,
+} icmp6_router_solicitation_or_advertisement_next_t;
+
+static_always_inline uword
+icmp6_router_solicitation (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ vnet_main_t *vnm = vnet_get_main ();
+ ip6_main_t *im = &ip6_main;
+ ip6_neighbor_main_t *nm = &ip6_neighbor_main;
+ uword n_packets = frame->n_vectors;
+ u32 *from, *to_next;
+ u32 n_left_from, n_left_to_next, next_index;
+ u32 n_advertisements_sent = 0;
+ int bogus_length;
+
+ icmp6_neighbor_discovery_option_type_t option_type;
+
+ vlib_node_runtime_t *error_node =
+ vlib_node_get_runtime (vm, ip6_icmp_input_node.index);
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = n_packets;
+ next_index = node->cached_next_index;
+
+ if (node->flags & VLIB_NODE_FLAG_TRACE)
+ vlib_trace_frame_buffers_only (vm, node, from, frame->n_vectors,
+ /* stride */ 1,
+ sizeof (icmp6_input_trace_t));
+
+ /* source may append his LL address */
+ option_type = ICMP6_NEIGHBOR_DISCOVERY_OPTION_source_link_layer_address;
+
+ while (n_left_from > 0)
+ {
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ vlib_buffer_t *p0;
+ ip6_header_t *ip0;
+ ip6_radv_t *radv_info = 0;
+
+ icmp6_neighbor_discovery_header_t *h0;
+ icmp6_neighbor_discovery_ethernet_link_layer_address_option_t *o0;
+
+ u32 bi0, options_len0, sw_if_index0, next0, error0;
+ u32 is_solicitation = 1, is_dropped = 0;
+ u32 is_unspecified, is_link_local;
+
+ bi0 = to_next[0] = from[0];
+
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ p0 = vlib_get_buffer (vm, bi0);
+ ip0 = vlib_buffer_get_current (p0);
+ h0 = ip6_next_header (ip0);
+ options_len0 =
+ clib_net_to_host_u16 (ip0->payload_length) - sizeof (h0[0]);
+ is_unspecified = ip6_address_is_unspecified (&ip0->src_address);
+ is_link_local =
+ ip6_address_is_link_local_unicast (&ip0->src_address);
+
+ error0 = ICMP6_ERROR_NONE;
+ sw_if_index0 = vnet_buffer (p0)->sw_if_index[VLIB_RX];
+
+ /* check if solicitation (not from nd_timer node) */
+ if (ip6_address_is_unspecified (&ip0->dst_address))
+ is_solicitation = 0;
+
+ /* Check that source address is unspecified, link-local or else on-link. */
+ if (!is_unspecified && !is_link_local)
+ {
+ u32 src_adj_index0 = ip6_src_lookup_for_packet (im, p0, ip0);
+
+ if (ADJ_INDEX_INVALID != src_adj_index0)
+ {
+ ip_adjacency_t *adj0 = ip_get_adjacency (&im->lookup_main,
+ src_adj_index0);
+
+ error0 = (adj0->rewrite_header.sw_if_index != sw_if_index0
+ ?
+ ICMP6_ERROR_ROUTER_SOLICITATION_SOURCE_NOT_ON_LINK
+ : error0);
+ }
+ else
+ {
+ error0 = ICMP6_ERROR_ROUTER_SOLICITATION_SOURCE_NOT_ON_LINK;
+ }
+ }
+
+ /* check for source LL option and process */
+ o0 = (void *) (h0 + 1);
+ o0 = ((options_len0 == 8
+ && o0->header.type == option_type
+ && o0->header.n_data_u64s == 1) ? o0 : 0);
+
+ /* if src address unspecified IGNORE any options */
+ if (PREDICT_TRUE (error0 == ICMP6_ERROR_NONE && o0 != 0 &&
+ !is_unspecified && !is_link_local))
+ {
+ ip6_neighbor_main_t *nm = &ip6_neighbor_main;
+ if (nm->limit_neighbor_cache_size &&
+ pool_elts (nm->neighbor_pool) >=
+ nm->limit_neighbor_cache_size)
+ unset_random_neighbor_entry ();
+
+ vnet_set_ip6_ethernet_neighbor (vm, sw_if_index0,
+ &ip0->src_address,
+ o0->ethernet_address,
+ sizeof (o0->ethernet_address),
+ 0);
+ }
+
+ /* default is to drop */
+ next0 = ICMP6_ROUTER_SOLICITATION_NEXT_DROP;
+
+ if (error0 == ICMP6_ERROR_NONE)
+ {
+ vnet_sw_interface_t *sw_if0;
+ ethernet_interface_t *eth_if0;
+ u32 adj_index0;
+
+ sw_if0 = vnet_get_sup_sw_interface (vnm, sw_if_index0);
+ ASSERT (sw_if0->type == VNET_SW_INTERFACE_TYPE_HARDWARE);
+ eth_if0 =
+ ethernet_get_interface (&ethernet_main, sw_if0->hw_if_index);
+
+ /* only support ethernet interface type for now */
+ error0 =
+ (!eth_if0) ? ICMP6_ERROR_ROUTER_SOLICITATION_UNSUPPORTED_INTF
+ : error0;
+
+ if (error0 == ICMP6_ERROR_NONE)
+ {
+ u32 ri;
+
+ /* adjust the sizeof the buffer to just include the ipv6 header */
+ p0->current_length -=
+ (options_len0 +
+ sizeof (icmp6_neighbor_discovery_header_t));
+
+ /* look up the radv_t information for this interface */
+ vec_validate_init_empty
+ (nm->if_radv_pool_index_by_sw_if_index, sw_if_index0, ~0);
+
+ ri = nm->if_radv_pool_index_by_sw_if_index[sw_if_index0];
+
+ if (ri != ~0)
+ radv_info = pool_elt_at_index (nm->if_radv_pool, ri);
+
+ error0 =
+ ((!radv_info) ?
+ ICMP6_ERROR_ROUTER_SOLICITATION_RADV_NOT_CONFIG :
+ error0);
+
+ if (error0 == ICMP6_ERROR_NONE)
+ {
+ f64 now = vlib_time_now (vm);
+
+ /* for solicited adverts - need to rate limit */
+ if (is_solicitation)
+ {
+ if ((now - radv_info->last_radv_time) <
+ MIN_DELAY_BETWEEN_RAS)
+ is_dropped = 1;
+ else
+ radv_info->last_radv_time = now;
+ }
+
+ /* send now */
+ icmp6_router_advertisement_header_t rh;
+
+ rh.icmp.type = ICMP6_router_advertisement;
+ rh.icmp.code = 0;
+ rh.icmp.checksum = 0;
+
+ rh.current_hop_limit = radv_info->curr_hop_limit;
+ rh.router_lifetime_in_sec =
+ clib_host_to_net_u16
+ (radv_info->adv_router_lifetime_in_sec);
+ rh.
+ time_in_msec_between_retransmitted_neighbor_solicitations
+ =
+ clib_host_to_net_u32 (radv_info->
+ adv_time_in_msec_between_retransmitted_neighbor_solicitations);
+ rh.neighbor_reachable_time_in_msec =
+ clib_host_to_net_u32 (radv_info->
+ adv_neighbor_reachable_time_in_msec);
+
+ rh.flags =
+ (radv_info->adv_managed_flag) ?
+ ICMP6_ROUTER_DISCOVERY_FLAG_ADDRESS_CONFIG_VIA_DHCP :
+ 0;
+ rh.flags |=
+ ((radv_info->adv_other_flag) ?
+ ICMP6_ROUTER_DISCOVERY_FLAG_OTHER_CONFIG_VIA_DHCP :
+ 0);
+
+
+ u16 payload_length =
+ sizeof (icmp6_router_advertisement_header_t);
+
+ vlib_buffer_add_data (vm,
+ p0->free_list_index,
+ bi0,
+ (void *) &rh,
+ sizeof
+ (icmp6_router_advertisement_header_t));
+
+ if (radv_info->adv_link_layer_address)
+ {
+ icmp6_neighbor_discovery_ethernet_link_layer_address_option_t
+ h;
+
+ h.header.type =
+ ICMP6_NEIGHBOR_DISCOVERY_OPTION_source_link_layer_address;
+ h.header.n_data_u64s = 1;
+
+ /* copy ll address */
+ clib_memcpy (&h.ethernet_address[0],
+ eth_if0->address, 6);
+
+ vlib_buffer_add_data (vm,
+ p0->free_list_index,
+ bi0,
+ (void *) &h,
+ sizeof
+ (icmp6_neighbor_discovery_ethernet_link_layer_address_option_t));
+
+ payload_length +=
+ sizeof
+ (icmp6_neighbor_discovery_ethernet_link_layer_address_option_t);
+ }
+
+ /* add MTU option */
+ if (radv_info->adv_link_mtu)
+ {
+ icmp6_neighbor_discovery_mtu_option_t h;
+
+ h.unused = 0;
+ h.mtu =
+ clib_host_to_net_u32 (radv_info->adv_link_mtu);
+ h.header.type = ICMP6_NEIGHBOR_DISCOVERY_OPTION_mtu;
+ h.header.n_data_u64s = 1;
+
+ payload_length +=
+ sizeof (icmp6_neighbor_discovery_mtu_option_t);
+
+ vlib_buffer_add_data (vm,
+ p0->free_list_index,
+ bi0,
+ (void *) &h,
+ sizeof
+ (icmp6_neighbor_discovery_mtu_option_t));
+ }
+
+ /* add advertised prefix options */
+ ip6_radv_prefix_t *pr_info;
+
+ /* *INDENT-OFF* */
+ pool_foreach (pr_info, radv_info->adv_prefixes_pool,
+ ({
+ if(pr_info->enabled &&
+ (!pr_info->decrement_lifetime_flag
+ || (pr_info->pref_lifetime_expires >0)))
+ {
+ /* advertise this prefix */
+ icmp6_neighbor_discovery_prefix_information_option_t h;
+
+ h.header.type = ICMP6_NEIGHBOR_DISCOVERY_OPTION_prefix_information;
+ h.header.n_data_u64s = (sizeof(icmp6_neighbor_discovery_prefix_information_option_t) >> 3);
+
+ h.dst_address_length = pr_info->prefix_len;
+
+ h.flags = (pr_info->adv_on_link_flag) ? ICMP6_NEIGHBOR_DISCOVERY_PREFIX_INFORMATION_FLAG_ON_LINK : 0;
+ h.flags |= (pr_info->adv_autonomous_flag) ? ICMP6_NEIGHBOR_DISCOVERY_PREFIX_INFORMATION_AUTO : 0;
+
+ if(radv_info->cease_radv && pr_info->deprecated_prefix_flag)
+ {
+ h.valid_time = clib_host_to_net_u32(MIN_ADV_VALID_LIFETIME);
+ h.preferred_time = 0;
+ }
+ else
+ {
+ if(pr_info->decrement_lifetime_flag)
+ {
+ pr_info->adv_valid_lifetime_in_secs = ((pr_info->valid_lifetime_expires > now)) ?
+ (pr_info->valid_lifetime_expires - now) : 0;
+
+ pr_info->adv_pref_lifetime_in_secs = ((pr_info->pref_lifetime_expires > now)) ?
+ (pr_info->pref_lifetime_expires - now) : 0;
+ }
+
+ h.valid_time = clib_host_to_net_u32(pr_info->adv_valid_lifetime_in_secs);
+ h.preferred_time = clib_host_to_net_u32(pr_info->adv_pref_lifetime_in_secs) ;
+ }
+ h.unused = 0;
+
+ clib_memcpy(&h.dst_address, &pr_info->prefix, sizeof(ip6_address_t));
+
+ payload_length += sizeof( icmp6_neighbor_discovery_prefix_information_option_t);
+
+ vlib_buffer_add_data (vm,
+ p0->free_list_index,
+ bi0,
+ (void *)&h, sizeof(icmp6_neighbor_discovery_prefix_information_option_t));
+
+ }
+ }));
+ /* *INDENT-ON* */
+
+ /* add additional options before here */
+
+ /* finish building the router advertisement... */
+ if (!is_unspecified && radv_info->send_unicast)
+ {
+ ip0->dst_address = ip0->src_address;
+ }
+ else
+ {
+ /* target address is all-nodes mcast addr */
+ ip6_set_reserved_multicast_address
+ (&ip0->dst_address,
+ IP6_MULTICAST_SCOPE_link_local,
+ IP6_MULTICAST_GROUP_ID_all_hosts);
+ }
+
+ /* source address MUST be the link-local address */
+ ip0->src_address = radv_info->link_local_address;
+
+ ip0->hop_limit = 255;
+ ip0->payload_length =
+ clib_host_to_net_u16 (payload_length);
+
+ icmp6_router_advertisement_header_t *rh0 =
+ (icmp6_router_advertisement_header_t *) (ip0 + 1);
+ rh0->icmp.checksum =
+ ip6_tcp_udp_icmp_compute_checksum (vm, p0, ip0,
+ &bogus_length);
+ ASSERT (bogus_length == 0);
+
+ /* setup output if and adjacency */
+ vnet_buffer (p0)->sw_if_index[VLIB_RX] =
+ vnet_main.local_interface_sw_if_index;
+
+ if (is_solicitation)
+ {
+ ethernet_header_t *eth0;
+ /* Reuse current MAC header, copy SMAC to DMAC and
+ * interface MAC to SMAC */
+ vlib_buffer_reset (p0);
+ eth0 = vlib_buffer_get_current (p0);
+ clib_memcpy (eth0->dst_address, eth0->src_address,
+ 6);
+ clib_memcpy (eth0->src_address, eth_if0->address,
+ 6);
+ next0 =
+ is_dropped ? next0 :
+ ICMP6_ROUTER_SOLICITATION_NEXT_REPLY_TX;
+ vnet_buffer (p0)->sw_if_index[VLIB_TX] =
+ sw_if_index0;
+ }
+ else
+ {
+ adj_index0 = radv_info->all_nodes_adj_index;
+ if (adj_index0 == 0)
+ error0 = ICMP6_ERROR_DST_LOOKUP_MISS;
+ else
+ {
+ ip_adjacency_t *adj0 =
+ ip_get_adjacency (&im->lookup_main,
+ adj_index0);
+ error0 =
+ ((adj0->rewrite_header.sw_if_index !=
+ sw_if_index0
+ || adj0->lookup_next_index !=
+ IP_LOOKUP_NEXT_REWRITE) ?
+ ICMP6_ERROR_ROUTER_SOLICITATION_DEST_UNKNOWN
+ : error0);
+ next0 =
+ is_dropped ? next0 :
+ ICMP6_ROUTER_SOLICITATION_NEXT_REPLY_RW;
+ vnet_buffer (p0)->ip.adj_index[VLIB_TX] =
+ adj_index0;
+ }
+ }
+ p0->flags |= VNET_BUFFER_LOCALLY_ORIGINATED;
+
+ radv_info->n_solicitations_dropped += is_dropped;
+ radv_info->n_solicitations_rcvd += is_solicitation;
+
+ if ((error0 == ICMP6_ERROR_NONE) && !is_dropped)
+ {
+ radv_info->n_advertisements_sent++;
+ n_advertisements_sent++;
+ }
+ }
+ }
+ }
+
+ p0->error = error_node->errors[error0];
+
+ if (error0 != ICMP6_ERROR_NONE)
+ vlib_error_count (vm, error_node->node_index, error0, 1);
+
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, next0);
+
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ /* Account for router advertisements sent. */
+ vlib_error_count (vm, error_node->node_index,
+ ICMP6_ERROR_ROUTER_ADVERTISEMENTS_TX,
+ n_advertisements_sent);
+
+ return frame->n_vectors;
+}
+
+ /* validate advertised info for consistancy (see RFC-4861 section 6.2.7) - log any inconsistencies, packet will always be dropped */
+static_always_inline uword
+icmp6_router_advertisement (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ vnet_main_t *vnm = vnet_get_main ();
+ ip6_neighbor_main_t *nm = &ip6_neighbor_main;
+ uword n_packets = frame->n_vectors;
+ u32 *from, *to_next;
+ u32 n_left_from, n_left_to_next, next_index;
+ u32 n_advertisements_rcvd = 0;
+
+ vlib_node_runtime_t *error_node =
+ vlib_node_get_runtime (vm, ip6_icmp_input_node.index);
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = n_packets;
+ next_index = node->cached_next_index;
+
+ if (node->flags & VLIB_NODE_FLAG_TRACE)
+ vlib_trace_frame_buffers_only (vm, node, from, frame->n_vectors,
+ /* stride */ 1,
+ sizeof (icmp6_input_trace_t));
+
+ while (n_left_from > 0)
+ {
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ vlib_buffer_t *p0;
+ ip6_header_t *ip0;
+ ip6_radv_t *radv_info = 0;
+ icmp6_router_advertisement_header_t *h0;
+ u32 bi0, options_len0, sw_if_index0, next0, error0;
+
+ bi0 = to_next[0] = from[0];
+
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ p0 = vlib_get_buffer (vm, bi0);
+ ip0 = vlib_buffer_get_current (p0);
+ h0 = ip6_next_header (ip0);
+ options_len0 =
+ clib_net_to_host_u16 (ip0->payload_length) - sizeof (h0[0]);
+
+ error0 = ICMP6_ERROR_NONE;
+ sw_if_index0 = vnet_buffer (p0)->sw_if_index[VLIB_RX];
+
+ /* Check that source address is link-local */
+ error0 = (!ip6_address_is_link_local_unicast (&ip0->src_address)) ?
+ ICMP6_ERROR_ROUTER_ADVERTISEMENT_SOURCE_NOT_LINK_LOCAL : error0;
+
+ /* default is to drop */
+ next0 = ICMP6_ROUTER_SOLICITATION_NEXT_DROP;
+
+ n_advertisements_rcvd++;
+
+ if (error0 == ICMP6_ERROR_NONE)
+ {
+ vnet_sw_interface_t *sw_if0;
+ ethernet_interface_t *eth_if0;
+
+ sw_if0 = vnet_get_sup_sw_interface (vnm, sw_if_index0);
+ ASSERT (sw_if0->type == VNET_SW_INTERFACE_TYPE_HARDWARE);
+ eth_if0 =
+ ethernet_get_interface (&ethernet_main, sw_if0->hw_if_index);
+
+ /* only support ethernet interface type for now */
+ error0 =
+ (!eth_if0) ? ICMP6_ERROR_ROUTER_SOLICITATION_UNSUPPORTED_INTF
+ : error0;
+
+ if (error0 == ICMP6_ERROR_NONE)
+ {
+ u32 ri;
+
+ /* look up the radv_t information for this interface */
+ vec_validate_init_empty
+ (nm->if_radv_pool_index_by_sw_if_index, sw_if_index0, ~0);
+
+ ri = nm->if_radv_pool_index_by_sw_if_index[sw_if_index0];
+
+ if (ri != ~0)
+ radv_info = pool_elt_at_index (nm->if_radv_pool, ri);
+
+ error0 =
+ ((!radv_info) ?
+ ICMP6_ERROR_ROUTER_SOLICITATION_RADV_NOT_CONFIG :
+ error0);
+
+ if (error0 == ICMP6_ERROR_NONE)
+ {
+ /* validate advertised information */
+ if ((h0->current_hop_limit && radv_info->curr_hop_limit)
+ && (h0->current_hop_limit !=
+ radv_info->curr_hop_limit))
+ {
+ ip6_neighbor_syslog (vm, LOG_WARNING,
+ "our AdvCurHopLimit on %U doesn't agree with %U",
+ format_vnet_sw_if_index_name,
+ vnm, sw_if_index0,
+ format_ip6_address,
+ &ip0->src_address);
+ }
+
+ if ((h0->flags &
+ ICMP6_ROUTER_DISCOVERY_FLAG_ADDRESS_CONFIG_VIA_DHCP)
+ != radv_info->adv_managed_flag)
+ {
+ ip6_neighbor_syslog (vm, LOG_WARNING,
+ "our AdvManagedFlag on %U doesn't agree with %U",
+ format_vnet_sw_if_index_name,
+ vnm, sw_if_index0,
+ format_ip6_address,
+ &ip0->src_address);
+ }
+
+ if ((h0->flags &
+ ICMP6_ROUTER_DISCOVERY_FLAG_OTHER_CONFIG_VIA_DHCP)
+ != radv_info->adv_other_flag)
+ {
+ ip6_neighbor_syslog (vm, LOG_WARNING,
+ "our AdvOtherConfigFlag on %U doesn't agree with %U",
+ format_vnet_sw_if_index_name,
+ vnm, sw_if_index0,
+ format_ip6_address,
+ &ip0->src_address);
+ }
+
+ if ((h0->
+ time_in_msec_between_retransmitted_neighbor_solicitations
+ && radv_info->
+ adv_time_in_msec_between_retransmitted_neighbor_solicitations)
+ && (h0->
+ time_in_msec_between_retransmitted_neighbor_solicitations
+ !=
+ clib_host_to_net_u32 (radv_info->
+ adv_time_in_msec_between_retransmitted_neighbor_solicitations)))
+ {
+ ip6_neighbor_syslog (vm, LOG_WARNING,
+ "our AdvRetransTimer on %U doesn't agree with %U",
+ format_vnet_sw_if_index_name,
+ vnm, sw_if_index0,
+ format_ip6_address,
+ &ip0->src_address);
+ }
+
+ if ((h0->neighbor_reachable_time_in_msec &&
+ radv_info->adv_neighbor_reachable_time_in_msec) &&
+ (h0->neighbor_reachable_time_in_msec !=
+ clib_host_to_net_u32
+ (radv_info->adv_neighbor_reachable_time_in_msec)))
+ {
+ ip6_neighbor_syslog (vm, LOG_WARNING,
+ "our AdvReachableTime on %U doesn't agree with %U",
+ format_vnet_sw_if_index_name,
+ vnm, sw_if_index0,
+ format_ip6_address,
+ &ip0->src_address);
+ }
+
+ /* check for MTU or prefix options or .. */
+ u8 *opt_hdr = (u8 *) (h0 + 1);
+ while (options_len0 > 0)
+ {
+ icmp6_neighbor_discovery_option_header_t *o0 =
+ (icmp6_neighbor_discovery_option_header_t *)
+ opt_hdr;
+ int opt_len = o0->n_data_u64s << 3;
+ icmp6_neighbor_discovery_option_type_t option_type =
+ o0->type;
+
+ if (options_len0 < 2)
+ {
+ ip6_neighbor_syslog (vm, LOG_ERR,
+ "malformed RA packet on %U from %U",
+ format_vnet_sw_if_index_name,
+ vnm, sw_if_index0,
+ format_ip6_address,
+ &ip0->src_address);
+ break;
+ }
+
+ if (opt_len == 0)
+ {
+ ip6_neighbor_syslog (vm, LOG_ERR,
+ " zero length option in RA on %U from %U",
+ format_vnet_sw_if_index_name,
+ vnm, sw_if_index0,
+ format_ip6_address,
+ &ip0->src_address);
+ break;
+ }
+ else if (opt_len > options_len0)
+ {
+ ip6_neighbor_syslog (vm, LOG_ERR,
+ "option length in RA packet greater than total length on %U from %U",
+ format_vnet_sw_if_index_name,
+ vnm, sw_if_index0,
+ format_ip6_address,
+ &ip0->src_address);
+ break;
+ }
+
+ options_len0 -= opt_len;
+ opt_hdr += opt_len;
+
+ switch (option_type)
+ {
+ case ICMP6_NEIGHBOR_DISCOVERY_OPTION_mtu:
+ {
+ icmp6_neighbor_discovery_mtu_option_t *h =
+ (icmp6_neighbor_discovery_mtu_option_t
+ *) (o0);
+
+ if (opt_len < sizeof (*h))
+ break;
+
+ if ((h->mtu && radv_info->adv_link_mtu) &&
+ (h->mtu !=
+ clib_host_to_net_u32
+ (radv_info->adv_link_mtu)))
+ {
+ ip6_neighbor_syslog (vm, LOG_WARNING,
+ "our AdvLinkMTU on %U doesn't agree with %U",
+ format_vnet_sw_if_index_name,
+ vnm, sw_if_index0,
+ format_ip6_address,
+ &ip0->src_address);
+ }
+ }
+ break;
+
+ case ICMP6_NEIGHBOR_DISCOVERY_OPTION_prefix_information:
+ {
+ icmp6_neighbor_discovery_prefix_information_option_t
+ * h =
+ (icmp6_neighbor_discovery_prefix_information_option_t
+ *) (o0);
+
+ /* validate advertised prefix options */
+ ip6_radv_prefix_t *pr_info;
+ u32 preferred, valid;
+
+ if (opt_len < sizeof (*h))
+ break;
+
+ preferred =
+ clib_net_to_host_u32 (h->preferred_time);
+ valid = clib_net_to_host_u32 (h->valid_time);
+
+ /* look for matching prefix - if we our advertising it, it better be consistant */
+ /* *INDENT-OFF* */
+ pool_foreach (pr_info, radv_info->adv_prefixes_pool,
+ ({
+
+ ip6_address_t mask;
+ ip6_address_mask_from_width(&mask, pr_info->prefix_len);
+
+ if(pr_info->enabled &&
+ (pr_info->prefix_len == h->dst_address_length) &&
+ ip6_address_is_equal_masked (&pr_info->prefix, &h->dst_address, &mask))
+ {
+ /* found it */
+ if(!pr_info->decrement_lifetime_flag &&
+ valid != pr_info->adv_valid_lifetime_in_secs)
+ {
+ ip6_neighbor_syslog(vm, LOG_WARNING,
+ "our ADV validlifetime on %U for %U does not agree with %U",
+ format_vnet_sw_if_index_name, vnm, sw_if_index0,format_ip6_address, &pr_info->prefix,
+ format_ip6_address, &h->dst_address);
+ }
+ if(!pr_info->decrement_lifetime_flag &&
+ preferred != pr_info->adv_pref_lifetime_in_secs)
+ {
+ ip6_neighbor_syslog(vm, LOG_WARNING,
+ "our ADV preferredlifetime on %U for %U does not agree with %U",
+ format_vnet_sw_if_index_name, vnm, sw_if_index0,format_ip6_address, &pr_info->prefix,
+ format_ip6_address, &h->dst_address);
+ }
+ }
+ break;
+ }));
+ /* *INDENT-ON* */
+ break;
+ }
+ default:
+ /* skip this one */
+ break;
+ }
+ }
+ }
+ }
+ }
+
+ p0->error = error_node->errors[error0];
+
+ if (error0 != ICMP6_ERROR_NONE)
+ vlib_error_count (vm, error_node->node_index, error0, 1);
+
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, next0);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ /* Account for router advertisements sent. */
+ vlib_error_count (vm, error_node->node_index,
+ ICMP6_ERROR_ROUTER_ADVERTISEMENTS_RX,
+ n_advertisements_rcvd);
+
+ return frame->n_vectors;
+}
+
+/* create and initialize router advertisement parameters with default values for this intfc */
+static u32
+ip6_neighbor_sw_interface_add_del (vnet_main_t * vnm,
+ u32 sw_if_index, u32 is_add)
+{
+ ip6_neighbor_main_t *nm = &ip6_neighbor_main;
+ ip6_radv_t *a = 0;
+ u32 ri = ~0;
+ vnet_sw_interface_t *sw_if0;
+ ethernet_interface_t *eth_if0 = 0;
+
+ /* lookup radv container - ethernet interfaces only */
+ sw_if0 = vnet_get_sup_sw_interface (vnm, sw_if_index);
+ if (sw_if0->type == VNET_SW_INTERFACE_TYPE_HARDWARE)
+ eth_if0 = ethernet_get_interface (&ethernet_main, sw_if0->hw_if_index);
+
+ if (!eth_if0)
+ return ri;
+
+ vec_validate_init_empty (nm->if_radv_pool_index_by_sw_if_index, sw_if_index,
+ ~0);
+ ri = nm->if_radv_pool_index_by_sw_if_index[sw_if_index];
+
+ if (ri != ~0)
+ {
+ a = pool_elt_at_index (nm->if_radv_pool, ri);
+
+ if (!is_add)
+ {
+ u32 i, *to_delete = 0;
+ ip6_radv_prefix_t *p;
+ ip6_mldp_group_t *m;
+
+ /* remove adjacencies */
+ adj_unlock (a->all_nodes_adj_index);
+ adj_unlock (a->all_routers_adj_index);
+ adj_unlock (a->all_mldv2_routers_adj_index);
+
+ /* clean up prefix_pool */
+ /* *INDENT-OFF* */
+ pool_foreach (p, a->adv_prefixes_pool,
+ ({
+ vec_add1 (to_delete, p - a->adv_prefixes_pool);
+ }));
+ /* *INDENT-ON* */
+
+ for (i = 0; i < vec_len (to_delete); i++)
+ {
+ p = pool_elt_at_index (a->adv_prefixes_pool, to_delete[i]);
+ mhash_unset (&a->address_to_prefix_index, &p->prefix, 0);
+ pool_put (a->adv_prefixes_pool, p);
+ }
+
+ vec_free (to_delete);
+ to_delete = 0;
+
+ /* clean up mldp group pool */
+ /* *INDENT-OFF* */
+ pool_foreach (m, a->mldp_group_pool,
+ ({
+ vec_add1 (to_delete, m - a->mldp_group_pool);
+ }));
+ /* *INDENT-ON* */
+
+ for (i = 0; i < vec_len (to_delete); i++)
+ {
+ m = pool_elt_at_index (a->mldp_group_pool, to_delete[i]);
+ mhash_unset (&a->address_to_mldp_index, &m->mcast_address, 0);
+ pool_put (a->mldp_group_pool, m);
+ }
+
+ vec_free (to_delete);
+
+ pool_put (nm->if_radv_pool, a);
+ nm->if_radv_pool_index_by_sw_if_index[sw_if_index] = ~0;
+ ri = ~0;
+ }
+ }
+ else
+ {
+ if (is_add)
+ {
+ vnet_hw_interface_t *hw_if0;
+
+ hw_if0 = vnet_get_sup_hw_interface (vnm, sw_if_index);
+
+ pool_get (nm->if_radv_pool, a);
+
+ ri = a - nm->if_radv_pool;
+ nm->if_radv_pool_index_by_sw_if_index[sw_if_index] = ri;
+
+ /* initialize default values (most of which are zero) */
+ memset (a, 0, sizeof (a[0]));
+
+ a->sw_if_index = sw_if_index;
+ a->fib_index = ~0;
+ a->max_radv_interval = DEF_MAX_RADV_INTERVAL;
+ a->min_radv_interval = DEF_MIN_RADV_INTERVAL;
+ a->curr_hop_limit = DEF_CURR_HOP_LIMIT;
+ a->adv_router_lifetime_in_sec = DEF_DEF_RTR_LIFETIME;
+
+ a->adv_link_layer_address = 1; /* send ll address source address option */
+
+ a->min_delay_between_radv = MIN_DELAY_BETWEEN_RAS;
+ a->max_delay_between_radv = MAX_DELAY_BETWEEN_RAS;
+ a->max_rtr_default_lifetime = MAX_DEF_RTR_LIFETIME;
+ a->seed = (u32) clib_cpu_time_now ();
+ (void) random_u32 (&a->seed);
+ a->randomizer = clib_cpu_time_now ();
+ (void) random_u64 (&a->randomizer);
+
+ a->initial_adverts_count = MAX_INITIAL_RTR_ADVERTISEMENTS;
+ a->initial_adverts_sent = a->initial_adverts_count - 1;
+ a->initial_adverts_interval = MAX_INITIAL_RTR_ADVERT_INTERVAL;
+
+ /* deafult is to send */
+ a->send_radv = 1;
+
+ /* fill in radv_info for this interface that will be needed later */
+ a->adv_link_mtu = hw_if0->max_l3_packet_bytes[VLIB_RX];
+
+ clib_memcpy (a->link_layer_address, eth_if0->address, 6);
+
+ /* fill in default link-local address (this may be overridden) */
+ ip6_link_local_address_from_ethernet_address
+ (&a->link_local_address, eth_if0->address);
+ a->link_local_prefix_len = 64;
+
+ mhash_init (&a->address_to_prefix_index, sizeof (uword),
+ sizeof (ip6_address_t));
+ mhash_init (&a->address_to_mldp_index, sizeof (uword),
+ sizeof (ip6_address_t));
+
+ {
+ u8 link_layer_address[6] = { 0x33, 0x33, 0x00, 0x00, 0x00,
+ IP6_MULTICAST_GROUP_ID_all_hosts
+ };
+
+ a->all_nodes_adj_index =
+ adj_rewrite_add_and_lock (FIB_PROTOCOL_IP6, VNET_LINK_IP6,
+ sw_if_index, link_layer_address);
+ }
+
+ {
+ u8 link_layer_address[6] = { 0x33, 0x33, 0x00, 0x00, 0x00,
+ IP6_MULTICAST_GROUP_ID_all_routers
+ };
+
+ a->all_routers_adj_index =
+ adj_rewrite_add_and_lock (FIB_PROTOCOL_IP6, VNET_LINK_IP6,
+ sw_if_index, link_layer_address);
+ }
+
+ {
+ u8 link_layer_address[6] = { 0x33, 0x33, 0x00, 0x00, 0x00,
+ IP6_MULTICAST_GROUP_ID_mldv2_routers
+ };
+
+ a->all_mldv2_routers_adj_index =
+ adj_rewrite_add_and_lock (FIB_PROTOCOL_IP6,
+ VNET_LINK_IP6,
+ sw_if_index, link_layer_address);
+ }
+
+ /* add multicast groups we will always be reporting */
+ ip6_address_t addr;
+ ip6_mldp_group_t *mcast_group_info;
+
+ ip6_set_reserved_multicast_address (&addr,
+ IP6_MULTICAST_SCOPE_link_local,
+ IP6_MULTICAST_GROUP_ID_all_hosts);
+
+ /* lookup mldp info for this interface */
+
+ uword *p = mhash_get (&a->address_to_mldp_index, &addr);
+ mcast_group_info =
+ p ? pool_elt_at_index (a->mldp_group_pool, p[0]) : 0;
+
+ /* add address */
+ if (!mcast_group_info)
+ {
+ /* add */
+ u32 mi;
+ pool_get (a->mldp_group_pool, mcast_group_info);
+
+ mi = mcast_group_info - a->mldp_group_pool;
+ mhash_set (&a->address_to_mldp_index, &addr, mi, /* old_value */
+ 0);
+
+ mcast_group_info->type = 4;
+ mcast_group_info->mcast_source_address_pool = 0;
+ mcast_group_info->num_sources = 0;
+ clib_memcpy (&mcast_group_info->mcast_address, &addr,
+ sizeof (ip6_address_t));
+ }
+
+ ip6_set_reserved_multicast_address (&addr,
+ IP6_MULTICAST_SCOPE_link_local,
+ IP6_MULTICAST_GROUP_ID_all_routers);
+
+ p = mhash_get (&a->address_to_mldp_index, &addr);
+ mcast_group_info =
+ p ? pool_elt_at_index (a->mldp_group_pool, p[0]) : 0;
+
+ if (!mcast_group_info)
+ {
+ /* add */
+ u32 mi;
+ pool_get (a->mldp_group_pool, mcast_group_info);
+
+ mi = mcast_group_info - a->mldp_group_pool;
+ mhash_set (&a->address_to_mldp_index, &addr, mi, /* old_value */
+ 0);
+
+ mcast_group_info->type = 4;
+ mcast_group_info->mcast_source_address_pool = 0;
+ mcast_group_info->num_sources = 0;
+ clib_memcpy (&mcast_group_info->mcast_address, &addr,
+ sizeof (ip6_address_t));
+ }
+
+ ip6_set_reserved_multicast_address (&addr,
+ IP6_MULTICAST_SCOPE_link_local,
+ IP6_MULTICAST_GROUP_ID_mldv2_routers);
+
+ p = mhash_get (&a->address_to_mldp_index, &addr);
+ mcast_group_info =
+ p ? pool_elt_at_index (a->mldp_group_pool, p[0]) : 0;
+
+ if (!mcast_group_info)
+ {
+ /* add */
+ u32 mi;
+ pool_get (a->mldp_group_pool, mcast_group_info);
+
+ mi = mcast_group_info - a->mldp_group_pool;
+ mhash_set (&a->address_to_mldp_index, &addr, mi, /* old_value */
+ 0);
+
+ mcast_group_info->type = 4;
+ mcast_group_info->mcast_source_address_pool = 0;
+ mcast_group_info->num_sources = 0;
+ clib_memcpy (&mcast_group_info->mcast_address, &addr,
+ sizeof (ip6_address_t));
+ }
+ }
+ }
+ return ri;
+}
+
+/* send an mldpv2 report */
+static void
+ip6_neighbor_send_mldpv2_report (u32 sw_if_index)
+{
+ vnet_main_t *vnm = vnet_get_main ();
+ vlib_main_t *vm = vnm->vlib_main;
+ ip6_neighbor_main_t *nm = &ip6_neighbor_main;
+ vnet_sw_interface_t *sw_if0;
+ ethernet_interface_t *eth_if0;
+ u32 ri;
+ int bogus_length;
+
+ ip6_radv_t *radv_info;
+ u16 payload_length;
+ vlib_buffer_t *b0;
+ ip6_header_t *ip0;
+ u32 *to_next;
+ vlib_frame_t *f;
+ u32 bo0;
+ u32 n_to_alloc = 1;
+ u32 n_allocated;
+
+ icmp6_multicast_listener_report_header_t *rh0;
+ icmp6_multicast_listener_report_packet_t *rp0;
+
+ sw_if0 = vnet_get_sup_sw_interface (vnm, sw_if_index);
+ ASSERT (sw_if0->type == VNET_SW_INTERFACE_TYPE_HARDWARE);
+ eth_if0 = ethernet_get_interface (&ethernet_main, sw_if0->hw_if_index);
+
+ if (!eth_if0 || !vnet_sw_interface_is_admin_up (vnm, sw_if_index))
+ return;
+
+ /* look up the radv_t information for this interface */
+ vec_validate_init_empty (nm->if_radv_pool_index_by_sw_if_index, sw_if_index,
+ ~0);
+
+ ri = nm->if_radv_pool_index_by_sw_if_index[sw_if_index];
+
+ if (ri == ~0)
+ return;
+
+ /* send report now - build a mldpv2 report packet */
+ n_allocated = vlib_buffer_alloc_from_free_list (vm,
+ &bo0,
+ n_to_alloc,
+ VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX);
+ if (PREDICT_FALSE (n_allocated == 0))
+ {
+ clib_warning ("buffer allocation failure");
+ return;
+ }
+
+ b0 = vlib_get_buffer (vm, bo0);
+
+ /* adjust the sizeof the buffer to just include the ipv6 header */
+ b0->current_length = sizeof (icmp6_multicast_listener_report_packet_t);
+
+ payload_length = sizeof (icmp6_multicast_listener_report_header_t);
+
+ b0->error = ICMP6_ERROR_NONE;
+
+ rp0 = vlib_buffer_get_current (b0);
+ ip0 = (ip6_header_t *) & rp0->ip;
+ rh0 = (icmp6_multicast_listener_report_header_t *) & rp0->report_hdr;
+
+ memset (rp0, 0x0, sizeof (icmp6_multicast_listener_report_packet_t));
+
+ ip0->ip_version_traffic_class_and_flow_label =
+ clib_host_to_net_u32 (0x6 << 28);
+
+ ip0->protocol = IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS;
+ /* for DEBUG - vnet driver won't seem to emit router alerts */
+ /* ip0->protocol = IP_PROTOCOL_ICMP6; */
+ ip0->hop_limit = 1;
+
+ rh0->icmp.type = ICMP6_multicast_listener_report_v2;
+
+ /* source address MUST be the link-local address */
+ radv_info = pool_elt_at_index (nm->if_radv_pool, ri);
+ ip0->src_address = radv_info->link_local_address;
+
+ /* destination is all mldpv2 routers */
+ ip6_set_reserved_multicast_address (&ip0->dst_address,
+ IP6_MULTICAST_SCOPE_link_local,
+ IP6_MULTICAST_GROUP_ID_mldv2_routers);
+
+ /* add reports here */
+ ip6_mldp_group_t *m;
+ int num_addr_records = 0;
+ icmp6_multicast_address_record_t rr;
+
+ /* fill in the hop-by-hop extension header (router alert) info */
+ rh0->ext_hdr.next_hdr = IP_PROTOCOL_ICMP6;
+ rh0->ext_hdr.n_data_u64s = 0;
+
+ rh0->alert.type = IP6_MLDP_ALERT_TYPE;
+ rh0->alert.len = 2;
+ rh0->alert.value = 0;
+
+ rh0->pad.type = 1;
+ rh0->pad.len = 0;
+
+ rh0->icmp.checksum = 0;
+
+ /* *INDENT-OFF* */
+ pool_foreach (m, radv_info->mldp_group_pool,
+ ({
+ rr.type = m->type;
+ rr.aux_data_len_u32s = 0;
+ rr.num_sources = clib_host_to_net_u16 (m->num_sources);
+ clib_memcpy(&rr.mcast_addr, &m->mcast_address, sizeof(ip6_address_t));
+
+ num_addr_records++;
+
+ vlib_buffer_add_data
+ (vm, b0->free_list_index, bo0,
+ (void *)&rr, sizeof(icmp6_multicast_address_record_t));
+
+ payload_length += sizeof( icmp6_multicast_address_record_t);
+ }));
+ /* *INDENT-ON* */
+
+ rh0->rsvd = 0;
+ rh0->num_addr_records = clib_host_to_net_u16 (num_addr_records);
+
+ /* update lengths */
+ ip0->payload_length = clib_host_to_net_u16 (payload_length);
+
+ rh0->icmp.checksum = ip6_tcp_udp_icmp_compute_checksum (vm, b0, ip0,
+ &bogus_length);
+ ASSERT (bogus_length == 0);
+
+ /*
+ * OK to override w/ no regard for actual FIB, because
+ * ip6-rewrite only looks at the adjacency.
+ */
+ vnet_buffer (b0)->sw_if_index[VLIB_RX] =
+ vnet_main.local_interface_sw_if_index;
+
+ vnet_buffer (b0)->ip.adj_index[VLIB_TX] =
+ radv_info->all_mldv2_routers_adj_index;
+ b0->flags |= VNET_BUFFER_LOCALLY_ORIGINATED;
+
+ vlib_node_t *node = vlib_get_node_by_name (vm, (u8 *) "ip6-rewrite");
+
+ f = vlib_get_frame_to_node (vm, node->index);
+ to_next = vlib_frame_vector_args (f);
+ to_next[0] = bo0;
+ f->n_vectors = 1;
+
+ vlib_put_frame_to_node (vm, node->index, f);
+ return;
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (ip6_icmp_router_solicitation_node,static) =
+{
+ .function = icmp6_router_solicitation,
+ .name = "icmp6-router-solicitation",
+
+ .vector_size = sizeof (u32),
+
+ .format_trace = format_icmp6_input_trace,
+
+ .n_next_nodes = ICMP6_ROUTER_SOLICITATION_N_NEXT,
+ .next_nodes = {
+ [ICMP6_ROUTER_SOLICITATION_NEXT_DROP] = "error-drop",
+ [ICMP6_ROUTER_SOLICITATION_NEXT_REPLY_RW] = "ip6-rewrite",
+ [ICMP6_ROUTER_SOLICITATION_NEXT_REPLY_TX] = "interface-output",
+ },
+};
+/* *INDENT-ON* */
+
+/* send a RA or update the timer info etc.. */
+static uword
+ip6_neighbor_process_timer_event (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ vnet_main_t *vnm = vnet_get_main ();
+ ip6_neighbor_main_t *nm = &ip6_neighbor_main;
+ ip6_radv_t *radv_info;
+ vlib_frame_t *f = 0;
+ u32 n_this_frame = 0;
+ u32 n_left_to_next = 0;
+ u32 *to_next = 0;
+ u32 bo0;
+ icmp6_router_solicitation_header_t *h0;
+ vlib_buffer_t *b0;
+ f64 now = vlib_time_now (vm);
+
+ /* Interface ip6 radv info list */
+ /* *INDENT-OFF* */
+ pool_foreach (radv_info, nm->if_radv_pool,
+ ({
+ if( !vnet_sw_interface_is_admin_up (vnm, radv_info->sw_if_index))
+ {
+ radv_info->initial_adverts_sent = radv_info->initial_adverts_count-1;
+ radv_info->next_multicast_time = now;
+ radv_info->last_multicast_time = now;
+ radv_info->last_radv_time = 0;
+ radv_info->all_routers_mcast = 0;
+ continue;
+ }
+
+ /* Make sure that we've joined the all-routers multicast group */
+ if(!radv_info->all_routers_mcast)
+ {
+ /* send MDLP_REPORT_EVENT message */
+ ip6_neighbor_send_mldpv2_report(radv_info->sw_if_index);
+ radv_info->all_routers_mcast = 1;
+ }
+
+ /* is it time to send a multicast RA on this interface? */
+ if(radv_info->send_radv && (now >= radv_info->next_multicast_time))
+ {
+ u32 n_to_alloc = 1;
+ u32 n_allocated;
+
+ f64 rfn = (radv_info->max_radv_interval - radv_info->min_radv_interval) *
+ random_f64 (&radv_info->seed) + radv_info->min_radv_interval;
+
+ /* multicast send - compute next multicast send time */
+ if( radv_info->initial_adverts_sent > 0)
+ {
+ radv_info->initial_adverts_sent--;
+ if(rfn > radv_info-> initial_adverts_interval)
+ rfn = radv_info-> initial_adverts_interval;
+
+ /* check to see if we are ceasing to send */
+ if( radv_info->initial_adverts_sent == 0)
+ if(radv_info->cease_radv)
+ radv_info->send_radv = 0;
+ }
+
+ radv_info->next_multicast_time = rfn + now;
+ radv_info->last_multicast_time = now;
+
+ /* send advert now - build a "solicted" router advert with unspecified source address */
+ n_allocated = vlib_buffer_alloc_from_free_list
+ (vm, &bo0, n_to_alloc, VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX);
+
+ if (PREDICT_FALSE(n_allocated == 0))
+ {
+ clib_warning ("buffer allocation failure");
+ continue;
+ }
+ b0 = vlib_get_buffer (vm, bo0);
+ b0->current_length = sizeof( icmp6_router_solicitation_header_t);
+ b0->error = ICMP6_ERROR_NONE;
+ vnet_buffer (b0)->sw_if_index[VLIB_RX] = radv_info->sw_if_index;
+
+ h0 = vlib_buffer_get_current (b0);
+
+ memset (h0, 0, sizeof (icmp6_router_solicitation_header_t));
+
+ h0->ip.ip_version_traffic_class_and_flow_label = clib_host_to_net_u32 (0x6 << 28);
+ h0->ip.payload_length = clib_host_to_net_u16 (sizeof (icmp6_router_solicitation_header_t)
+ - STRUCT_OFFSET_OF (icmp6_router_solicitation_header_t, neighbor));
+ h0->ip.protocol = IP_PROTOCOL_ICMP6;
+ h0->ip.hop_limit = 255;
+
+ /* set src/dst address as "unspecified" this marks this packet as internally generated rather than recieved */
+ h0->ip.src_address.as_u64[0] = 0;
+ h0->ip.src_address.as_u64[1] = 0;
+
+ h0->ip.dst_address.as_u64[0] = 0;
+ h0->ip.dst_address.as_u64[1] = 0;
+
+ h0->neighbor.icmp.type = ICMP6_router_solicitation;
+
+ if (PREDICT_FALSE(f == 0))
+ {
+ f = vlib_get_frame_to_node (vm, ip6_icmp_router_solicitation_node.index);
+ to_next = vlib_frame_vector_args (f);
+ n_left_to_next = VLIB_FRAME_SIZE;
+ n_this_frame = 0;
+ }
+
+ n_this_frame++;
+ n_left_to_next--;
+ to_next[0] = bo0;
+ to_next += 1;
+
+ if (PREDICT_FALSE(n_left_to_next == 0))
+ {
+ f->n_vectors = n_this_frame;
+ vlib_put_frame_to_node (vm, ip6_icmp_router_solicitation_node.index, f);
+ f = 0;
+ }
+ }
+ }));
+ /* *INDENT-ON* */
+
+ if (f)
+ {
+ ASSERT (n_this_frame);
+ f->n_vectors = n_this_frame;
+ vlib_put_frame_to_node (vm, ip6_icmp_router_solicitation_node.index, f);
+ }
+ return 0;
+}
+
+static uword
+ip6_icmp_neighbor_discovery_event_process (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ uword event_type;
+ ip6_icmp_neighbor_discovery_event_data_t *event_data;
+
+ /* init code here */
+
+ while (1)
+ {
+ vlib_process_wait_for_event_or_clock (vm, 1. /* seconds */ );
+
+ event_data = vlib_process_get_event_data (vm, &event_type);
+
+ if (!event_data)
+ {
+ /* No events found: timer expired. */
+ /* process interface list and send RAs as appropriate, update timer info */
+ ip6_neighbor_process_timer_event (vm, node, frame);
+ }
+ else
+ {
+ switch (event_type)
+ {
+
+ case ICMP6_ND_EVENT_INIT:
+ break;
+
+ case ~0:
+ break;
+
+ default:
+ ASSERT (0);
+ }
+
+ if (event_data)
+ _vec_len (event_data) = 0;
+ }
+ }
+ return frame->n_vectors;
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (ip6_icmp_router_advertisement_node,static) =
+{
+ .function = icmp6_router_advertisement,
+ .name = "icmp6-router-advertisement",
+
+ .vector_size = sizeof (u32),
+
+ .format_trace = format_icmp6_input_trace,
+
+ .n_next_nodes = 1,
+ .next_nodes = {
+ [0] = "error-drop",
+ },
+};
+/* *INDENT-ON* */
+
+vlib_node_registration_t ip6_icmp_neighbor_discovery_event_node = {
+
+ .function = ip6_icmp_neighbor_discovery_event_process,
+ .name = "ip6-icmp-neighbor-discovery-event-process",
+ .type = VLIB_NODE_TYPE_PROCESS,
+};
+
+static uword
+icmp6_neighbor_solicitation (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ return icmp6_neighbor_solicitation_or_advertisement (vm, node, frame,
+ /* is_solicitation */
+ 1);
+}
+
+static uword
+icmp6_neighbor_advertisement (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ return icmp6_neighbor_solicitation_or_advertisement (vm, node, frame,
+ /* is_solicitation */
+ 0);
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (ip6_icmp_neighbor_solicitation_node,static) =
+{
+ .function = icmp6_neighbor_solicitation,
+ .name = "icmp6-neighbor-solicitation",
+
+ .vector_size = sizeof (u32),
+
+ .format_trace = format_icmp6_input_trace,
+
+ .n_next_nodes = ICMP6_NEIGHBOR_SOLICITATION_N_NEXT,
+ .next_nodes = {
+ [ICMP6_NEIGHBOR_SOLICITATION_NEXT_DROP] = "error-drop",
+ [ICMP6_NEIGHBOR_SOLICITATION_NEXT_REPLY] = "interface-output",
+ },
+};
+/* *INDENT-ON* */
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (ip6_icmp_neighbor_advertisement_node,static) =
+{
+ .function = icmp6_neighbor_advertisement,
+ .name = "icmp6-neighbor-advertisement",
+
+ .vector_size = sizeof (u32),
+
+ .format_trace = format_icmp6_input_trace,
+
+ .n_next_nodes = 1,
+ .next_nodes = {
+ [0] = "error-drop",
+ },
+};
+/* *INDENT-ON* */
+
+/* API support functions */
+int
+ip6_neighbor_ra_config (vlib_main_t * vm, u32 sw_if_index,
+ u8 suppress, u8 managed, u8 other,
+ u8 ll_option, u8 send_unicast, u8 cease,
+ u8 use_lifetime, u32 lifetime,
+ u32 initial_count, u32 initial_interval,
+ u32 max_interval, u32 min_interval, u8 is_no)
+{
+ ip6_neighbor_main_t *nm = &ip6_neighbor_main;
+ int error;
+ u32 ri;
+
+ /* look up the radv_t information for this interface */
+ vec_validate_init_empty (nm->if_radv_pool_index_by_sw_if_index, sw_if_index,
+ ~0);
+ ri = nm->if_radv_pool_index_by_sw_if_index[sw_if_index];
+ error = (ri != ~0) ? 0 : VNET_API_ERROR_INVALID_SW_IF_INDEX;
+
+ if (!error)
+ {
+
+ ip6_radv_t *radv_info;
+ radv_info = pool_elt_at_index (nm->if_radv_pool, ri);
+
+ if ((max_interval != 0) && (min_interval == 0))
+ min_interval = .75 * max_interval;
+
+ max_interval =
+ (max_interval !=
+ 0) ? ((is_no) ? DEF_MAX_RADV_INTERVAL : max_interval) :
+ radv_info->max_radv_interval;
+ min_interval =
+ (min_interval !=
+ 0) ? ((is_no) ? DEF_MIN_RADV_INTERVAL : min_interval) :
+ radv_info->min_radv_interval;
+ lifetime =
+ (use_lifetime !=
+ 0) ? ((is_no) ? DEF_DEF_RTR_LIFETIME : lifetime) :
+ radv_info->adv_router_lifetime_in_sec;
+
+ if (lifetime)
+ {
+ if (lifetime > MAX_DEF_RTR_LIFETIME)
+ lifetime = MAX_DEF_RTR_LIFETIME;
+
+ if (lifetime <= max_interval)
+ return VNET_API_ERROR_INVALID_VALUE;
+ }
+
+ if (min_interval != 0)
+ {
+ if ((min_interval > .75 * max_interval) || (min_interval < 3))
+ return VNET_API_ERROR_INVALID_VALUE;
+ }
+
+ if ((initial_count > MAX_INITIAL_RTR_ADVERTISEMENTS) ||
+ (initial_interval > MAX_INITIAL_RTR_ADVERT_INTERVAL))
+ return VNET_API_ERROR_INVALID_VALUE;
+
+ /*
+ if "flag" is set and is_no is true then restore default value else set value corresponding to "flag"
+ if "flag" is clear don't change corresponding value
+ */
+ radv_info->send_radv =
+ (suppress != 0) ? ((is_no != 0) ? 1 : 0) : radv_info->send_radv;
+ radv_info->adv_managed_flag =
+ (managed != 0) ? ((is_no) ? 0 : 1) : radv_info->adv_managed_flag;
+ radv_info->adv_other_flag =
+ (other != 0) ? ((is_no) ? 0 : 1) : radv_info->adv_other_flag;
+ radv_info->adv_link_layer_address =
+ (ll_option !=
+ 0) ? ((is_no) ? 1 : 0) : radv_info->adv_link_layer_address;
+ radv_info->send_unicast =
+ (send_unicast != 0) ? ((is_no) ? 0 : 1) : radv_info->send_unicast;
+ radv_info->cease_radv =
+ (cease != 0) ? ((is_no) ? 0 : 1) : radv_info->cease_radv;
+
+ radv_info->min_radv_interval = min_interval;
+ radv_info->max_radv_interval = max_interval;
+ radv_info->adv_router_lifetime_in_sec = lifetime;
+
+ radv_info->initial_adverts_count =
+ (initial_count !=
+ 0) ? ((is_no) ? MAX_INITIAL_RTR_ADVERTISEMENTS : initial_count) :
+ radv_info->initial_adverts_count;
+ radv_info->initial_adverts_interval =
+ (initial_interval !=
+ 0) ? ((is_no) ? MAX_INITIAL_RTR_ADVERT_INTERVAL : initial_interval) :
+ radv_info->initial_adverts_interval;
+
+ /* restart */
+ if ((cease != 0) && (is_no))
+ radv_info->send_radv = 1;
+
+ radv_info->initial_adverts_sent = radv_info->initial_adverts_count - 1;
+ radv_info->next_multicast_time = vlib_time_now (vm);
+ radv_info->last_multicast_time = vlib_time_now (vm);
+ radv_info->last_radv_time = 0;
+ }
+ return (error);
+}
+
+int
+ip6_neighbor_ra_prefix (vlib_main_t * vm, u32 sw_if_index,
+ ip6_address_t * prefix_addr, u8 prefix_len,
+ u8 use_default, u32 val_lifetime, u32 pref_lifetime,
+ u8 no_advertise, u8 off_link, u8 no_autoconfig,
+ u8 no_onlink, u8 is_no)
+{
+ ip6_neighbor_main_t *nm = &ip6_neighbor_main;
+ int error;
+
+ u32 ri;
+
+ /* look up the radv_t information for this interface */
+ vec_validate_init_empty (nm->if_radv_pool_index_by_sw_if_index, sw_if_index,
+ ~0);
+
+ ri = nm->if_radv_pool_index_by_sw_if_index[sw_if_index];
+
+ error = (ri != ~0) ? 0 : VNET_API_ERROR_INVALID_SW_IF_INDEX;
+
+ if (!error)
+ {
+ f64 now = vlib_time_now (vm);
+ ip6_radv_t *radv_info;
+ radv_info = pool_elt_at_index (nm->if_radv_pool, ri);
+
+ /* prefix info add, delete or update */
+ ip6_radv_prefix_t *prefix;
+
+ /* lookup prefix info for this address on this interface */
+ uword *p = mhash_get (&radv_info->address_to_prefix_index, prefix_addr);
+
+ prefix = p ? pool_elt_at_index (radv_info->adv_prefixes_pool, p[0]) : 0;
+
+ if (is_no)
+ {
+ /* delete */
+ if (!prefix)
+ return VNET_API_ERROR_INVALID_VALUE; /* invalid prefix */
+
+ if (prefix->prefix_len != prefix_len)
+ return VNET_API_ERROR_INVALID_VALUE_2;
+
+ /* FIXME - Should the DP do this or the CP ? */
+ /* do specific delete processing here before returning */
+ /* try to remove from routing table */
+
+ mhash_unset (&radv_info->address_to_prefix_index, prefix_addr,
+ /* old_value */ 0);
+ pool_put (radv_info->adv_prefixes_pool, prefix);
+
+ radv_info->initial_adverts_sent =
+ radv_info->initial_adverts_count - 1;
+ radv_info->next_multicast_time = vlib_time_now (vm);
+ radv_info->last_multicast_time = vlib_time_now (vm);
+ radv_info->last_radv_time = 0;
+ return (error);
+ }
+
+ /* adding or changing */
+ if (!prefix)
+ {
+ /* add */
+ u32 pi;
+ pool_get (radv_info->adv_prefixes_pool, prefix);
+ pi = prefix - radv_info->adv_prefixes_pool;
+ mhash_set (&radv_info->address_to_prefix_index, prefix_addr, pi,
+ /* old_value */ 0);
+
+ memset (prefix, 0x0, sizeof (ip6_radv_prefix_t));
+
+ prefix->prefix_len = prefix_len;
+ clib_memcpy (&prefix->prefix, prefix_addr, sizeof (ip6_address_t));
+
+ /* initialize default values */
+ prefix->adv_on_link_flag = 1; /* L bit set */
+ prefix->adv_autonomous_flag = 1; /* A bit set */
+ prefix->adv_valid_lifetime_in_secs = DEF_ADV_VALID_LIFETIME;
+ prefix->adv_pref_lifetime_in_secs = DEF_ADV_PREF_LIFETIME;
+ prefix->enabled = 1;
+ prefix->decrement_lifetime_flag = 1;
+ prefix->deprecated_prefix_flag = 1;
+
+ if (off_link == 0)
+ {
+ /* FIXME - Should the DP do this or the CP ? */
+ /* insert prefix into routing table as a connected prefix */
+ }
+
+ if (use_default)
+ goto restart;
+ }
+ else
+ {
+
+ if (prefix->prefix_len != prefix_len)
+ return VNET_API_ERROR_INVALID_VALUE_2;
+
+ if (off_link != 0)
+ {
+ /* FIXME - Should the DP do this or the CP ? */
+ /* remove from routing table if already there */
+ }
+ }
+
+ if ((val_lifetime == ~0) || (pref_lifetime == ~0))
+ {
+ prefix->adv_valid_lifetime_in_secs = ~0;
+ prefix->adv_pref_lifetime_in_secs = ~0;
+ prefix->decrement_lifetime_flag = 0;
+ }
+ else
+ {
+ prefix->adv_valid_lifetime_in_secs = val_lifetime;;
+ prefix->adv_pref_lifetime_in_secs = pref_lifetime;
+ }
+
+ /* copy remaining */
+ prefix->enabled = !(no_advertise != 0);
+ prefix->adv_on_link_flag = !((off_link != 0) || (no_onlink != 0));
+ prefix->adv_autonomous_flag = !(no_autoconfig != 0);
+
+ restart:
+ /* restart */
+ /* fill in the expiration times */
+ prefix->valid_lifetime_expires =
+ now + prefix->adv_valid_lifetime_in_secs;
+ prefix->pref_lifetime_expires = now + prefix->adv_pref_lifetime_in_secs;
+
+ radv_info->initial_adverts_sent = radv_info->initial_adverts_count - 1;
+ radv_info->next_multicast_time = vlib_time_now (vm);
+ radv_info->last_multicast_time = vlib_time_now (vm);
+ radv_info->last_radv_time = 0;
+ }
+ return (error);
+}
+
+clib_error_t *
+ip6_neighbor_cmd (vlib_main_t * vm, unformat_input_t * main_input,
+ vlib_cli_command_t * cmd)
+{
+ vnet_main_t *vnm = vnet_get_main ();
+ ip6_neighbor_main_t *nm = &ip6_neighbor_main;
+ clib_error_t *error = 0;
+ u8 is_no = 0;
+ u8 suppress = 0, managed = 0, other = 0;
+ u8 suppress_ll_option = 0, send_unicast = 0, cease = 0;
+ u8 use_lifetime = 0;
+ u32 sw_if_index, ra_lifetime = 0, ra_initial_count =
+ 0, ra_initial_interval = 0;
+ u32 ra_max_interval = 0, ra_min_interval = 0;
+
+ unformat_input_t _line_input, *line_input = &_line_input;
+ vnet_sw_interface_t *sw_if0;
+
+ int add_radv_info = 1;
+ __attribute__ ((unused)) ip6_radv_t *radv_info = 0;
+ ip6_address_t ip6_addr;
+ u32 addr_len;
+
+
+ /* Get a line of input. */
+ if (!unformat_user (main_input, unformat_line_input, line_input))
+ return 0;
+
+ /* get basic radv info for this interface */
+ if (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+
+ if (unformat_user (line_input,
+ unformat_vnet_sw_interface, vnm, &sw_if_index))
+ {
+ u32 ri;
+ ethernet_interface_t *eth_if0 = 0;
+
+ sw_if0 = vnet_get_sup_sw_interface (vnm, sw_if_index);
+ if (sw_if0->type == VNET_SW_INTERFACE_TYPE_HARDWARE)
+ eth_if0 =
+ ethernet_get_interface (&ethernet_main, sw_if0->hw_if_index);
+
+ if (!eth_if0)
+ {
+ error =
+ clib_error_return (0, "Interface must be of ethernet type");
+ goto done;
+ }
+
+ /* look up the radv_t information for this interface */
+ vec_validate_init_empty (nm->if_radv_pool_index_by_sw_if_index,
+ sw_if_index, ~0);
+
+ ri = nm->if_radv_pool_index_by_sw_if_index[sw_if_index];
+
+ if (ri != ~0)
+ {
+ radv_info = pool_elt_at_index (nm->if_radv_pool, ri);
+ }
+ else
+ {
+ error = clib_error_return (0, "unknown interface %U'",
+ format_unformat_error, line_input);
+ goto done;
+ }
+ }
+ else
+ {
+ error = clib_error_return (0, "invalid interface name %U'",
+ format_unformat_error, line_input);
+ goto done;
+ }
+ }
+
+ /* get the rest of the command */
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "no"))
+ is_no = 1;
+ else if (unformat (line_input, "prefix %U/%d",
+ unformat_ip6_address, &ip6_addr, &addr_len))
+ {
+ add_radv_info = 0;
+ break;
+ }
+ else if (unformat (line_input, "ra-managed-config-flag"))
+ {
+ managed = 1;
+ break;
+ }
+ else if (unformat (line_input, "ra-other-config-flag"))
+ {
+ other = 1;
+ break;
+ }
+ else if (unformat (line_input, "ra-suppress") ||
+ unformat (line_input, "ra-surpress"))
+ {
+ suppress = 1;
+ break;
+ }
+ else if (unformat (line_input, "ra-suppress-link-layer") ||
+ unformat (line_input, "ra-surpress-link-layer"))
+ {
+ suppress_ll_option = 1;
+ break;
+ }
+ else if (unformat (line_input, "ra-send-unicast"))
+ {
+ send_unicast = 1;
+ break;
+ }
+ else if (unformat (line_input, "ra-lifetime"))
+ {
+ if (!unformat (line_input, "%d", &ra_lifetime))
+ return (error = unformat_parse_error (line_input));
+ use_lifetime = 1;
+ break;
+ }
+ else if (unformat (line_input, "ra-initial"))
+ {
+ if (!unformat
+ (line_input, "%d %d", &ra_initial_count, &ra_initial_interval))
+ return (error = unformat_parse_error (line_input));
+ break;
+ }
+ else if (unformat (line_input, "ra-interval"))
+ {
+ if (!unformat (line_input, "%d", &ra_max_interval))
+ return (error = unformat_parse_error (line_input));
+
+ if (!unformat (line_input, "%d", &ra_min_interval))
+ ra_min_interval = 0;
+ break;
+ }
+ else if (unformat (line_input, "ra-cease"))
+ {
+ cease = 1;
+ break;
+ }
+ else
+ return (unformat_parse_error (line_input));
+ }
+
+ if (add_radv_info)
+ {
+ ip6_neighbor_ra_config (vm, sw_if_index,
+ suppress, managed, other,
+ suppress_ll_option, send_unicast, cease,
+ use_lifetime, ra_lifetime,
+ ra_initial_count, ra_initial_interval,
+ ra_max_interval, ra_min_interval, is_no);
+ }
+ else
+ {
+ u32 valid_lifetime_in_secs = 0;
+ u32 pref_lifetime_in_secs = 0;
+ u8 use_prefix_default_values = 0;
+ u8 no_advertise = 0;
+ u8 off_link = 0;
+ u8 no_autoconfig = 0;
+ u8 no_onlink = 0;
+
+ /* get the rest of the command */
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "default"))
+ {
+ use_prefix_default_values = 1;
+ break;
+ }
+ else if (unformat (line_input, "infinite"))
+ {
+ valid_lifetime_in_secs = ~0;
+ pref_lifetime_in_secs = ~0;
+ break;
+ }
+ else if (unformat (line_input, "%d %d", &valid_lifetime_in_secs,
+ &pref_lifetime_in_secs))
+ break;
+ else
+ break;
+ }
+
+
+ /* get the rest of the command */
+ while (!use_prefix_default_values &&
+ unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "no-advertise"))
+ no_advertise = 1;
+ else if (unformat (line_input, "off-link"))
+ off_link = 1;
+ else if (unformat (line_input, "no-autoconfig"))
+ no_autoconfig = 1;
+ else if (unformat (line_input, "no-onlink"))
+ no_onlink = 1;
+ else
+ return (unformat_parse_error (line_input));
+ }
+
+ ip6_neighbor_ra_prefix (vm, sw_if_index,
+ &ip6_addr, addr_len,
+ use_prefix_default_values,
+ valid_lifetime_in_secs,
+ pref_lifetime_in_secs,
+ no_advertise,
+ off_link, no_autoconfig, no_onlink, is_no);
+ }
+
+ unformat_free (line_input);
+
+done:
+ return error;
+}
+
+static void
+ip6_print_addrs (vlib_main_t * vm, u32 * addrs)
+{
+ ip_lookup_main_t *lm = &ip6_main.lookup_main;
+ u32 i;
+
+ for (i = 0; i < vec_len (addrs); i++)
+ {
+ ip_interface_address_t *a =
+ pool_elt_at_index (lm->if_address_pool, addrs[i]);
+ ip6_address_t *address = ip_interface_address_get_address (lm, a);
+
+ vlib_cli_output (vm, "\t\t%U/%d",
+ format_ip6_address, address, a->address_length);
+ }
+}
+
+static clib_error_t *
+show_ip6_interface_cmd (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ vnet_main_t *vnm = vnet_get_main ();
+ ip6_neighbor_main_t *nm = &ip6_neighbor_main;
+ clib_error_t *error = 0;
+ u32 sw_if_index;
+
+ sw_if_index = ~0;
+
+ if (unformat_user (input, unformat_vnet_sw_interface, vnm, &sw_if_index))
+ {
+ u32 ri;
+
+ /* look up the radv_t information for this interface */
+ vec_validate_init_empty (nm->if_radv_pool_index_by_sw_if_index,
+ sw_if_index, ~0);
+
+ ri = nm->if_radv_pool_index_by_sw_if_index[sw_if_index];
+
+ if (ri != ~0)
+ {
+ ip_lookup_main_t *lm = &ip6_main.lookup_main;
+ ip6_radv_t *radv_info;
+ radv_info = pool_elt_at_index (nm->if_radv_pool, ri);
+
+ vlib_cli_output (vm, "%U is admin %s\n",
+ format_vnet_sw_interface_name, vnm,
+ vnet_get_sw_interface (vnm, sw_if_index),
+ (vnet_sw_interface_is_admin_up (vnm, sw_if_index) ?
+ "up" : "down"));
+
+ u32 ai;
+ u32 *link_scope = 0, *global_scope = 0;
+ u32 *local_scope = 0, *unknown_scope = 0;
+ ip_interface_address_t *a;
+
+ vec_validate_init_empty (lm->if_address_pool_index_by_sw_if_index,
+ sw_if_index, ~0);
+ ai = lm->if_address_pool_index_by_sw_if_index[sw_if_index];
+
+ while (ai != (u32) ~ 0)
+ {
+ a = pool_elt_at_index (lm->if_address_pool, ai);
+ ip6_address_t *address =
+ ip_interface_address_get_address (lm, a);
+
+ if (ip6_address_is_link_local_unicast (address))
+ vec_add1 (link_scope, ai);
+ else if (ip6_address_is_global_unicast (address))
+ vec_add1 (global_scope, ai);
+ else if (ip6_address_is_local_unicast (address))
+ vec_add1 (local_scope, ai);
+ else
+ vec_add1 (unknown_scope, ai);
+
+ ai = a->next_this_sw_interface;
+ }
+
+ if (vec_len (link_scope))
+ {
+ vlib_cli_output (vm, "\tLink-local address(es):\n");
+ ip6_print_addrs (vm, link_scope);
+ vec_free (link_scope);
+ }
+
+ if (vec_len (local_scope))
+ {
+ vlib_cli_output (vm, "\tLocal unicast address(es):\n");
+ ip6_print_addrs (vm, local_scope);
+ vec_free (local_scope);
+ }
+
+ if (vec_len (global_scope))
+ {
+ vlib_cli_output (vm, "\tGlobal unicast address(es):\n");
+ ip6_print_addrs (vm, global_scope);
+ vec_free (global_scope);
+ }
+
+ if (vec_len (unknown_scope))
+ {
+ vlib_cli_output (vm, "\tOther-scope address(es):\n");
+ ip6_print_addrs (vm, unknown_scope);
+ vec_free (unknown_scope);
+ }
+
+ vlib_cli_output (vm, "\tJoined group address(es):\n");
+ ip6_mldp_group_t *m;
+ /* *INDENT-OFF* */
+ pool_foreach (m, radv_info->mldp_group_pool,
+ ({
+ vlib_cli_output (vm, "\t\t%U\n", format_ip6_address,
+ &m->mcast_address);
+ }));
+ /* *INDENT-ON* */
+
+ vlib_cli_output (vm, "\tAdvertised Prefixes:\n");
+ ip6_radv_prefix_t *p;
+ /* *INDENT-OFF* */
+ pool_foreach (p, radv_info->adv_prefixes_pool,
+ ({
+ vlib_cli_output (vm, "\t\tprefix %U, length %d\n",
+ format_ip6_address, &p->prefix, p->prefix_len);
+ }));
+ /* *INDENT-ON* */
+
+ vlib_cli_output (vm, "\tMTU is %d\n", radv_info->adv_link_mtu);
+ vlib_cli_output (vm, "\tICMP error messages are unlimited\n");
+ vlib_cli_output (vm, "\tICMP redirects are disabled\n");
+ vlib_cli_output (vm, "\tICMP unreachables are not sent\n");
+ vlib_cli_output (vm, "\tND DAD is disabled\n");
+ //vlib_cli_output (vm, "\tND reachable time is %d milliseconds\n",);
+ vlib_cli_output (vm, "\tND advertised reachable time is %d\n",
+ radv_info->adv_neighbor_reachable_time_in_msec);
+ vlib_cli_output (vm,
+ "\tND advertised retransmit interval is %d (msec)\n",
+ radv_info->
+ adv_time_in_msec_between_retransmitted_neighbor_solicitations);
+
+ u32 ra_interval = radv_info->max_radv_interval;
+ u32 ra_interval_min = radv_info->min_radv_interval;
+ vlib_cli_output (vm,
+ "\tND router advertisements are sent every %d seconds (min interval is %d)\n",
+ ra_interval, ra_interval_min);
+ vlib_cli_output (vm,
+ "\tND router advertisements live for %d seconds\n",
+ radv_info->adv_router_lifetime_in_sec);
+ vlib_cli_output (vm,
+ "\tHosts %s stateless autoconfig for addresses\n",
+ (radv_info->adv_managed_flag) ? "use" :
+ " don't use");
+ vlib_cli_output (vm, "\tND router advertisements sent %d\n",
+ radv_info->n_advertisements_sent);
+ vlib_cli_output (vm, "\tND router solicitations received %d\n",
+ radv_info->n_solicitations_rcvd);
+ vlib_cli_output (vm, "\tND router solicitations dropped %d\n",
+ radv_info->n_solicitations_dropped);
+ }
+ else
+ {
+ error = clib_error_return (0, "IPv6 not enabled on interface",
+ format_unformat_error, input);
+
+ }
+ }
+ return error;
+}
+
+/*?
+ * This command is used to display various IPv6 attributes on a given
+ * interface.
+ *
+ * @cliexpar
+ * Example of how to display IPv6 settings:
+ * @cliexstart{show ip6 interface GigabitEthernet2/0/0}
+ * GigabitEthernet2/0/0 is admin up
+ * Link-local address(es):
+ * fe80::ab8/64
+ * Joined group address(es):
+ * ff02::1
+ * ff02::2
+ * ff02::16
+ * ff02::1:ff00:ab8
+ * Advertised Prefixes:
+ * prefix fe80::fe:28ff:fe9c:75b3, length 64
+ * MTU is 1500
+ * ICMP error messages are unlimited
+ * ICMP redirects are disabled
+ * ICMP unreachables are not sent
+ * ND DAD is disabled
+ * ND advertised reachable time is 0
+ * ND advertised retransmit interval is 0 (msec)
+ * ND router advertisements are sent every 200 seconds (min interval is 150)
+ * ND router advertisements live for 600 seconds
+ * Hosts use stateless autoconfig for addresses
+ * ND router advertisements sent 19336
+ * ND router solicitations received 0
+ * ND router solicitations dropped 0
+ * @cliexend
+ * Example of output if IPv6 is not enabled on the interface:
+ * @cliexstart{show ip6 interface GigabitEthernet2/0/0}
+ * show ip6 interface: IPv6 not enabled on interface
+ * @cliexend
+?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (show_ip6_interface_command, static) =
+{
+ .path = "show ip6 interface",
+ .function = show_ip6_interface_cmd,
+ .short_help = "show ip6 interface <interface>",
+};
+/* *INDENT-ON* */
+
+clib_error_t *
+disable_ip6_interface (vlib_main_t * vm, u32 sw_if_index)
+{
+ clib_error_t *error = 0;
+ ip6_neighbor_main_t *nm = &ip6_neighbor_main;
+ u32 ri;
+
+ /* look up the radv_t information for this interface */
+ vec_validate_init_empty (nm->if_radv_pool_index_by_sw_if_index, sw_if_index,
+ ~0);
+ ri = nm->if_radv_pool_index_by_sw_if_index[sw_if_index];
+
+ /* if not created - do nothing */
+ if (ri != ~0)
+ {
+ vnet_main_t *vnm = vnet_get_main ();
+ ip6_radv_t *radv_info;
+
+ radv_info = pool_elt_at_index (nm->if_radv_pool, ri);
+
+ /* check radv_info ref count for other ip6 addresses on this interface */
+ if (radv_info->ref_count == 0)
+ {
+ /* essentially "disables" ipv6 on this interface */
+ error = ip6_add_del_interface_address (vm, sw_if_index,
+ &radv_info->
+ link_local_address,
+ radv_info->
+ link_local_prefix_len,
+ 1 /* is_del */ );
+
+ ip6_neighbor_sw_interface_add_del (vnm, sw_if_index,
+ 0 /* is_add */ );
+ }
+ }
+ return error;
+}
+
+int
+ip6_interface_enabled (vlib_main_t * vm, u32 sw_if_index)
+{
+ ip6_neighbor_main_t *nm = &ip6_neighbor_main;
+ u32 ri = ~0;
+
+ /* look up the radv_t information for this interface */
+ vec_validate_init_empty (nm->if_radv_pool_index_by_sw_if_index, sw_if_index,
+ ~0);
+
+ ri = nm->if_radv_pool_index_by_sw_if_index[sw_if_index];
+
+ return ri != ~0;
+}
+
+clib_error_t *
+enable_ip6_interface (vlib_main_t * vm, u32 sw_if_index)
+{
+ clib_error_t *error = 0;
+ ip6_neighbor_main_t *nm = &ip6_neighbor_main;
+ u32 ri;
+ int is_add = 1;
+
+ /* look up the radv_t information for this interface */
+ vec_validate_init_empty (nm->if_radv_pool_index_by_sw_if_index, sw_if_index,
+ ~0);
+
+ ri = nm->if_radv_pool_index_by_sw_if_index[sw_if_index];
+
+ /* if not created yet */
+ if (ri == ~0)
+ {
+ vnet_main_t *vnm = vnet_get_main ();
+ vnet_sw_interface_t *sw_if0;
+
+ sw_if0 = vnet_get_sup_sw_interface (vnm, sw_if_index);
+ if (sw_if0->type == VNET_SW_INTERFACE_TYPE_HARDWARE)
+ {
+ ethernet_interface_t *eth_if0;
+
+ eth_if0 =
+ ethernet_get_interface (&ethernet_main, sw_if0->hw_if_index);
+ if (eth_if0)
+ {
+ /* create radv_info. for this interface. This holds all the info needed for router adverts */
+ ri =
+ ip6_neighbor_sw_interface_add_del (vnm, sw_if_index, is_add);
+
+ if (ri != ~0)
+ {
+ ip6_radv_t *radv_info;
+ ip6_address_t link_local_address;
+
+ radv_info = pool_elt_at_index (nm->if_radv_pool, ri);
+
+ ip6_link_local_address_from_ethernet_mac_address
+ (&link_local_address, eth_if0->address);
+
+ sw_if0 = vnet_get_sw_interface (vnm, sw_if_index);
+ if (sw_if0->type == VNET_SW_INTERFACE_TYPE_SUB)
+ {
+ /* make up an interface id */
+ md5_context_t m;
+ u8 digest[16];
+
+ link_local_address.as_u64[0] = radv_info->randomizer;
+
+ md5_init (&m);
+ md5_add (&m, &link_local_address, 16);
+ md5_finish (&m, digest);
+
+ clib_memcpy (&link_local_address, digest, 16);
+
+ radv_info->randomizer = link_local_address.as_u64[0];
+
+ link_local_address.as_u64[0] =
+ clib_host_to_net_u64 (0xFE80000000000000ULL);
+ /* clear u bit */
+ link_local_address.as_u8[8] &= 0xfd;
+ }
+
+ /* essentially "enables" ipv6 on this interface */
+ error = ip6_add_del_interface_address (vm, sw_if_index,
+ &link_local_address,
+ 128
+ /* address width */ ,
+ 0 /* is_del */ );
+
+ if (error)
+ ip6_neighbor_sw_interface_add_del (vnm, sw_if_index,
+ !is_add);
+ else
+ {
+ radv_info->link_local_address = link_local_address;
+ radv_info->link_local_prefix_len = 64;
+ }
+ }
+ }
+ }
+ }
+ return error;
+}
+
+static clib_error_t *
+enable_ip6_interface_cmd (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ vnet_main_t *vnm = vnet_get_main ();
+ clib_error_t *error = 0;
+ u32 sw_if_index;
+
+ sw_if_index = ~0;
+
+ if (unformat_user (input, unformat_vnet_sw_interface, vnm, &sw_if_index))
+ {
+ enable_ip6_interface (vm, sw_if_index);
+ }
+ else
+ {
+ error = clib_error_return (0, "unknown interface\n'",
+ format_unformat_error, input);
+
+ }
+ return error;
+}
+
+/*?
+ * This command is used to enable IPv6 on a given interface.
+ *
+ * @cliexpar
+ * Example of how enable IPv6 on a given interface:
+ * @cliexcmd{enable ip6 interface GigabitEthernet2/0/0}
+?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (enable_ip6_interface_command, static) =
+{
+ .path = "enable ip6 interface",
+ .function = enable_ip6_interface_cmd,
+ .short_help = "enable ip6 interface <interface>",
+};
+/* *INDENT-ON* */
+
+static clib_error_t *
+disable_ip6_interface_cmd (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ vnet_main_t *vnm = vnet_get_main ();
+ clib_error_t *error = 0;
+ u32 sw_if_index;
+
+ sw_if_index = ~0;
+
+ if (unformat_user (input, unformat_vnet_sw_interface, vnm, &sw_if_index))
+ {
+ error = disable_ip6_interface (vm, sw_if_index);
+ }
+ else
+ {
+ error = clib_error_return (0, "unknown interface\n'",
+ format_unformat_error, input);
+
+ }
+ return error;
+}
+
+/*?
+ * This command is used to disable IPv6 on a given interface.
+ *
+ * @cliexpar
+ * Example of how disable IPv6 on a given interface:
+ * @cliexcmd{disable ip6 interface GigabitEthernet2/0/0}
+?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (disable_ip6_interface_command, static) =
+{
+ .path = "disable ip6 interface",
+ .function = disable_ip6_interface_cmd,
+ .short_help = "disable ip6 interface <interface>",
+};
+/* *INDENT-ON* */
+
+/*?
+ * This command is used to configure the neighbor discovery
+ * parameters on a given interface. Use the '<em>show ip6 interface</em>'
+ * command to display some of the current neighbor discovery parameters
+ * on a given interface. This command has three formats:
+ *
+ *
+ * <b>Format 1 - Router Advertisement Options:</b> (Only one can be entered in a single command)
+ *
+ * '<em><b>ip6 nd <interface> [no] [ra-managed-config-flag] | [ra-other-config-flag] | [ra-suppress] | [ra-suppress-link-layer] | [ra-send-unicast] | [ra-lifetime <lifetime>] | [ra-initial <cnt> <interval>] | [ra-interval <max-interval> [<min-interval>]] | [ra-cease]</b></em>'
+ *
+ * Where:
+ *
+ * <em>[no] ra-managed-config-flag</em> - Advertises in ICMPv6
+ * router-advertisement messages to use stateful address
+ * auto-configuration to obtain address information (sets the M-bit).
+ * Default is the M-bit is not set and the '<em>no</em>' option
+ * returns it to this default state.
+ *
+ * <em>[no] ra-other-config-flag</em> - Indicates in ICMPv6
+ * router-advertisement messages that hosts use stateful auto
+ * configuration to obtain nonaddress related information (sets
+ * the O-bit). Default is the O-bit is not set and the '<em>no</em>'
+ * option returns it to this default state.
+ *
+ * <em>[no] ra-suppress</em> - Disables sending ICMPv6 router-advertisement
+ * messages. The '<em>no</em>' option implies to enable sending ICMPv6
+ * router-advertisement messages.
+ *
+ * <em>[no] ra-suppress-link-layer</em> - Indicates not to include the
+ * optional source link-layer address in the ICMPv6 router-advertisement
+ * messages. Default is to include the optional source link-layer address
+ * and the '<em>no</em>' option returns it to this default state.
+ *
+ * <em>[no] ra-send-unicast</em> - Use the source address of the
+ * router-solicitation message if availiable. The default is to use
+ * multicast address of all nodes, and the '<em>no</em>' option returns
+ * it to this default state.
+ *
+ * <em>[no] ra-lifetime <lifetime></em> - Advertises the lifetime of a
+ * default router in ICMPv6 router-advertisement messages. The range is
+ * from 0 to 9000 seconds. '<em><lifetime></em>' must be greater than
+ * '<em><max-interval></em>'. The default value is 600 seconds and the
+ * '<em>no</em>' option returns it to this default value.
+ *
+ * <em>[no] ra-initial <cnt> <interval></em> - Number of initial ICMPv6
+ * router-advertisement messages sent and the interval between each
+ * message. Range for count is 1 - 3 and default is 3. Range for interval
+ * is 1 to 16 seconds, and default is 16 seconds. The '<em>no</em>' option
+ * returns both to their default value.
+ *
+ * <em>[no] ra-interval <max-interval> [<min-interval>]</em> - Configures the
+ * interval between sending ICMPv6 router-advertisement messages. The
+ * range for max-interval is from 4 to 200 seconds. min-interval can not
+ * be more than 75% of max-interval. If not set, min-interval will be
+ * set to 75% of max-interval. The range for min-interval is from 3 to
+ * 150 seconds. The '<em>no</em>' option returns both to their default
+ * value.
+ *
+ * <em>[no] ra-cease</em> - Cease sending ICMPv6 router-advertisement messages.
+ * The '<em>no</em>' options implies to start (or restart) sending
+ * ICMPv6 router-advertisement messages.
+ *
+ *
+ * <b>Format 2 - Prefix Options:</b>
+ *
+ * '<em><b>ip6 nd <interface> [no] prefix <ip6-address>/<width> [<valid-lifetime> <pref-lifetime> | infinite] [no-advertise] [off-link] [no-autoconfig] [no-onlink]</b></em>'
+ *
+ * Where:
+ *
+ * <em>no</em> - All additional flags are ignored and the prefix is deleted.
+ *
+ * <em><valid-lifetime> <pref-lifetime></em> - '<em><valid-lifetime></em>' is the
+ * length of time in seconds during what the prefix is valid for the purpose of
+ * on-link determination. Range is 7203 to 2592000 seconds and default is 2592000
+ * seconds (30 days). '<em><pref-lifetime></em>' is the prefered-lifetime and is the
+ * length of time in seconds during what addresses generated from the prefix remain
+ * preferred. Range is 0 to 604800 seconds and default is 604800 seconds (7 days).
+ *
+ * <em>infinite</em> - Both '<em><valid-lifetime></em>' and '<em><<pref-lifetime></em>'
+ * are inifinte, no timeout.
+ *
+ * <em>no-advertise</em> - Do not send full router address in prefix
+ * advertisement. Default is to advertise (i.e. - This flag is off by default).
+ *
+ * <em>off-link</em> - Prefix is off-link, clear L-bit in packet. Default is on-link
+ * (i.e. - This flag is off and L-bit in packet is set by default and this prefix can
+ * be used for on-link determination). '<em>no-onlink</em>' also controls the L-bit.
+ *
+ * <em>no-autoconfig</em> - Do not use prefix for autoconfiguration, clear A-bit in packet.
+ * Default is autoconfig (i.e. - This flag is off and A-bit in packet is set by default.
+ *
+ * <em>no-onlink</em> - Do not use prefix for onlink determination, clear L-bit in packet.
+ * Default is on-link (i.e. - This flag is off and L-bit in packet is set by default and
+ * this prefix can be used for on-link determination). '<em>off-link</em>' also controls
+ * the L-bit.
+ *
+ *
+ * <b>Format 3: - Default of Prefix:</b>
+ *
+ * '<em><b>ip6 nd <interface> [no] prefix <ip6-address>/<width> default</b></em>'
+ *
+ * When a new prefix is added (or existing one is being overwritten) <em>default</em>
+ * uses default values for the prefix. If <em>no</em> is used, the <em>default</em>
+ * is ignored and the prefix is deleted.
+ *
+ *
+ * @cliexpar
+ * Example of how set a router advertisement option:
+ * @cliexcmd{ip6 nd GigabitEthernet2/0/0 ra-interval 100 20}
+ * Example of how to add a prefix:
+ * @cliexcmd{ip6 nd GigabitEthernet2/0/0 prefix fe80::fe:28ff:fe9c:75b3/64 infinite no-advertise}
+ * Example of how to delete a prefix:
+ * @cliexcmd{ip6 nd GigabitEthernet2/0/0 no prefix fe80::fe:28ff:fe9c:75b3/64}
+?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (ip6_nd_command, static) =
+{
+ .path = "ip6 nd",
+ .short_help = "ip6 nd <interface> ...",
+ .function = ip6_neighbor_cmd,
+};
+/* *INDENT-ON* */
+
+clib_error_t *
+set_ip6_link_local_address (vlib_main_t * vm,
+ u32 sw_if_index,
+ ip6_address_t * address, u8 address_length)
+{
+ clib_error_t *error = 0;
+ ip6_neighbor_main_t *nm = &ip6_neighbor_main;
+ u32 ri;
+ ip6_radv_t *radv_info;
+ vnet_main_t *vnm = vnet_get_main ();
+
+ if (!ip6_address_is_link_local_unicast (address))
+ {
+ vnm->api_errno = VNET_API_ERROR_ADDRESS_NOT_LINK_LOCAL;
+ return (error = clib_error_return (0, "address not link-local",
+ format_unformat_error));
+ }
+
+ /* call enable ipv6 */
+ enable_ip6_interface (vm, sw_if_index);
+
+ ri = nm->if_radv_pool_index_by_sw_if_index[sw_if_index];
+
+ if (ri != ~0)
+ {
+ radv_info = pool_elt_at_index (nm->if_radv_pool, ri);
+
+ /* save if link local address (overwrite default) */
+
+ /* delete the old one */
+ error = ip6_add_del_interface_address (vm, sw_if_index,
+ &radv_info->link_local_address,
+ radv_info->link_local_prefix_len
+ /* address width */ ,
+ 1 /* is_del */ );
+
+ if (!error)
+ {
+ /* add the new one */
+ error = ip6_add_del_interface_address (vm, sw_if_index,
+ address, address_length
+ /* address width */ ,
+ 0 /* is_del */ );
+
+ if (!error)
+ {
+ radv_info->link_local_address = *address;
+ radv_info->link_local_prefix_len = address_length;
+ }
+ }
+ }
+ else
+ {
+ vnm->api_errno = VNET_API_ERROR_IP6_NOT_ENABLED;
+ error = clib_error_return (0, "ip6 not enabled for interface",
+ format_unformat_error);
+ }
+ return error;
+}
+
+clib_error_t *
+set_ip6_link_local_address_cmd (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ vnet_main_t *vnm = vnet_get_main ();
+ clib_error_t *error = 0;
+ u32 sw_if_index;
+ ip6_address_t ip6_addr;
+ u32 addr_len = 0;
+
+ if (unformat_user (input, unformat_vnet_sw_interface, vnm, &sw_if_index))
+ {
+ /* get the rest of the command */
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "%U/%d",
+ unformat_ip6_address, &ip6_addr, &addr_len))
+ break;
+ else
+ return (unformat_parse_error (input));
+ }
+ }
+ error = set_ip6_link_local_address (vm, sw_if_index, &ip6_addr, addr_len);
+ return error;
+}
+
+/*?
+ * This command is used to assign an IPv6 Link-local address to an
+ * interface. This command will enable IPv6 on an interface if it
+ * is not already enabled. Use the '<em>show ip6 interface</em>' command
+ * to display the assigned Link-local address.
+ *
+ * @cliexpar
+ * Example of how to assign an IPv6 Link-local address to an interface:
+ * @cliexcmd{set ip6 link-local address GigabitEthernet2/0/0 FE80::AB8/64}
+?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (set_ip6_link_local_address_command, static) =
+{
+ .path = "set ip6 link-local address",
+ .short_help = "set ip6 link-local address <interface> <ip6-address>/<width>",
+ .function = set_ip6_link_local_address_cmd,
+};
+/* *INDENT-ON* */
+
+/* callback when an interface address is added or deleted */
+static void
+ip6_neighbor_add_del_interface_address (ip6_main_t * im,
+ uword opaque,
+ u32 sw_if_index,
+ ip6_address_t * address,
+ u32 address_length,
+ u32 if_address_index, u32 is_delete)
+{
+ vnet_main_t *vnm = vnet_get_main ();
+ ip6_neighbor_main_t *nm = &ip6_neighbor_main;
+ u32 ri;
+ vlib_main_t *vm = vnm->vlib_main;
+ ip6_radv_t *radv_info;
+ ip6_address_t a;
+ ip6_mldp_group_t *mcast_group_info;
+
+ /* create solicited node multicast address for this interface adddress */
+ ip6_set_solicited_node_multicast_address (&a, 0);
+
+ a.as_u8[0xd] = address->as_u8[0xd];
+ a.as_u8[0xe] = address->as_u8[0xe];
+ a.as_u8[0xf] = address->as_u8[0xf];
+
+ if (!is_delete)
+ {
+ /* try to create radv_info - does nothing if ipv6 already enabled */
+ enable_ip6_interface (vm, sw_if_index);
+
+ /* look up the radv_t information for this interface */
+ vec_validate_init_empty (nm->if_radv_pool_index_by_sw_if_index,
+ sw_if_index, ~0);
+ ri = nm->if_radv_pool_index_by_sw_if_index[sw_if_index];
+ if (ri != ~0)
+ {
+ /* get radv_info */
+ radv_info = pool_elt_at_index (nm->if_radv_pool, ri);
+
+ /* add address */
+ if (!ip6_address_is_link_local_unicast (address))
+ radv_info->ref_count++;
+
+ /* lookup prefix info for this address on this interface */
+ uword *p = mhash_get (&radv_info->address_to_mldp_index, &a);
+ mcast_group_info =
+ p ? pool_elt_at_index (radv_info->mldp_group_pool, p[0]) : 0;
+
+ /* add -solicted node multicast address */
+ if (!mcast_group_info)
+ {
+ /* add */
+ u32 mi;
+ pool_get (radv_info->mldp_group_pool, mcast_group_info);
+
+ mi = mcast_group_info - radv_info->mldp_group_pool;
+ mhash_set (&radv_info->address_to_mldp_index, &a, mi,
+ /* old_value */ 0);
+
+ mcast_group_info->type = 4;
+ mcast_group_info->mcast_source_address_pool = 0;
+ mcast_group_info->num_sources = 0;
+ clib_memcpy (&mcast_group_info->mcast_address, &a,
+ sizeof (ip6_address_t));
+ }
+ }
+ }
+ else
+ {
+
+ /* delete */
+ /* look up the radv_t information for this interface */
+ vec_validate_init_empty (nm->if_radv_pool_index_by_sw_if_index,
+ sw_if_index, ~0);
+ ri = nm->if_radv_pool_index_by_sw_if_index[sw_if_index];
+ if (ri != ~0)
+ {
+ /* get radv_info */
+ radv_info = pool_elt_at_index (nm->if_radv_pool, ri);
+
+ /* lookup prefix info for this address on this interface */
+ uword *p = mhash_get (&radv_info->address_to_mldp_index, &a);
+ mcast_group_info =
+ p ? pool_elt_at_index (radv_info->mldp_group_pool, p[0]) : 0;
+
+ if (mcast_group_info)
+ {
+ mhash_unset (&radv_info->address_to_mldp_index, &a,
+ /* old_value */ 0);
+ pool_put (radv_info->mldp_group_pool, mcast_group_info);
+ }
+
+ /* if interface up send MLDP "report" */
+ radv_info->all_routers_mcast = 0;
+
+ /* add address */
+ if (!ip6_address_is_link_local_unicast (address))
+ radv_info->ref_count--;
+ }
+ }
+}
+
+clib_error_t *
+ip6_set_neighbor_limit (u32 neighbor_limit)
+{
+ ip6_neighbor_main_t *nm = &ip6_neighbor_main;
+
+ nm->limit_neighbor_cache_size = neighbor_limit;
+ return 0;
+}
+
+static clib_error_t *
+ip6_neighbor_init (vlib_main_t * vm)
+{
+ ip6_neighbor_main_t *nm = &ip6_neighbor_main;
+ ip6_main_t *im = &ip6_main;
+
+ mhash_init (&nm->neighbor_index_by_key,
+ /* value size */ sizeof (uword),
+ /* key size */ sizeof (ip6_neighbor_key_t));
+
+ icmp6_register_type (vm, ICMP6_neighbor_solicitation,
+ ip6_icmp_neighbor_solicitation_node.index);
+ icmp6_register_type (vm, ICMP6_neighbor_advertisement,
+ ip6_icmp_neighbor_advertisement_node.index);
+ icmp6_register_type (vm, ICMP6_router_solicitation,
+ ip6_icmp_router_solicitation_node.index);
+ icmp6_register_type (vm, ICMP6_router_advertisement,
+ ip6_icmp_router_advertisement_node.index);
+
+ /* handler node for ip6 neighbor discovery events and timers */
+ vlib_register_node (vm, &ip6_icmp_neighbor_discovery_event_node);
+
+ /* add call backs */
+ ip6_add_del_interface_address_callback_t cb;
+ memset (&cb, 0x0, sizeof (ip6_add_del_interface_address_callback_t));
+
+ /* when an interface address changes... */
+ cb.function = ip6_neighbor_add_del_interface_address;
+ cb.function_opaque = 0;
+ vec_add1 (im->add_del_interface_address_callbacks, cb);
+
+ mhash_init (&nm->pending_resolutions_by_address,
+ /* value size */ sizeof (uword),
+ /* key size */ sizeof (ip6_address_t));
+
+ mhash_init (&nm->mac_changes_by_address,
+ /* value size */ sizeof (uword),
+ /* key size */ sizeof (ip6_address_t));
+
+ /* default, configurable */
+ nm->limit_neighbor_cache_size = 50000;
+
+#if 0
+ /* $$$$ Hack fix for today */
+ vec_validate_init_empty
+ (im->discover_neighbor_next_index_by_hw_if_index, 32, 0 /* drop */ );
+#endif
+
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (ip6_neighbor_init);
+
+
+void
+vnet_register_ip6_neighbor_resolution_event (vnet_main_t * vnm,
+ void *address_arg,
+ uword node_index,
+ uword type_opaque, uword data)
+{
+ ip6_neighbor_main_t *nm = &ip6_neighbor_main;
+ ip6_address_t *address = address_arg;
+ uword *p;
+ pending_resolution_t *pr;
+
+ pool_get (nm->pending_resolutions, pr);
+
+ pr->next_index = ~0;
+ pr->node_index = node_index;
+ pr->type_opaque = type_opaque;
+ pr->data = data;
+
+ p = mhash_get (&nm->pending_resolutions_by_address, address);
+ if (p)
+ {
+ /* Insert new resolution at the head of the list */
+ pr->next_index = p[0];
+ mhash_unset (&nm->pending_resolutions_by_address, address, 0);
+ }
+
+ mhash_set (&nm->pending_resolutions_by_address, address,
+ pr - nm->pending_resolutions, 0 /* old value */ );
+}
+
+int
+vnet_add_del_ip6_nd_change_event (vnet_main_t * vnm,
+ void *data_callback,
+ u32 pid,
+ void *address_arg,
+ uword node_index,
+ uword type_opaque, uword data, int is_add)
+{
+ ip6_neighbor_main_t *nm = &ip6_neighbor_main;
+ ip6_address_t *address = address_arg;
+ uword *p;
+ pending_resolution_t *mc;
+ void (*fp) (u32, u8 *) = data_callback;
+
+ if (is_add)
+ {
+ pool_get (nm->mac_changes, mc);
+
+ mc->next_index = ~0;
+ mc->node_index = node_index;
+ mc->type_opaque = type_opaque;
+ mc->data = data;
+ mc->data_callback = data_callback;
+ mc->pid = pid;
+
+ p = mhash_get (&nm->mac_changes_by_address, address);
+ if (p)
+ {
+ /* Insert new resolution at the head of the list */
+ mc->next_index = p[0];
+ mhash_unset (&nm->mac_changes_by_address, address, 0);
+ }
+
+ mhash_set (&nm->mac_changes_by_address, address,
+ mc - nm->mac_changes, 0);
+ return 0;
+ }
+ else
+ {
+ u32 index;
+ pending_resolution_t *mc_last = 0;
+
+ p = mhash_get (&nm->mac_changes_by_address, address);
+ if (p == 0)
+ return VNET_API_ERROR_NO_SUCH_ENTRY;
+
+ index = p[0];
+
+ while (index != (u32) ~ 0)
+ {
+ mc = pool_elt_at_index (nm->mac_changes, index);
+ if (mc->node_index == node_index &&
+ mc->type_opaque == type_opaque && mc->pid == pid)
+ {
+ /* Clients may need to clean up pool entries, too */
+ if (fp)
+ (*fp) (mc->data, 0 /* no new mac addrs */ );
+ if (index == p[0])
+ {
+ mhash_unset (&nm->mac_changes_by_address, address, 0);
+ if (mc->next_index != ~0)
+ mhash_set (&nm->mac_changes_by_address, address,
+ mc->next_index, 0);
+ pool_put (nm->mac_changes, mc);
+ return 0;
+ }
+ else
+ {
+ ASSERT (mc_last);
+ mc_last->next_index = mc->next_index;
+ pool_put (nm->mac_changes, mc);
+ return 0;
+ }
+ }
+ mc_last = mc;
+ index = mc->next_index;
+ }
+
+ return VNET_API_ERROR_NO_SUCH_ENTRY;
+ }
+}
+
+int
+vnet_ip6_nd_term (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_buffer_t * p0,
+ ethernet_header_t * eth,
+ ip6_header_t * ip, u32 sw_if_index, u16 bd_index, u8 shg)
+{
+ ip6_neighbor_main_t *nm = &ip6_neighbor_main;
+ icmp6_neighbor_solicitation_or_advertisement_header_t *ndh;
+ pending_resolution_t *mc;
+ uword *p;
+
+ ndh = ip6_next_header (ip);
+ if (ndh->icmp.type != ICMP6_neighbor_solicitation &&
+ ndh->icmp.type != ICMP6_neighbor_advertisement)
+ return 0;
+
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) &&
+ (p0->flags & VLIB_BUFFER_IS_TRACED)))
+ {
+ u8 *t0 = vlib_add_trace (vm, node, p0,
+ sizeof (icmp6_input_trace_t));
+ clib_memcpy (t0, ip, sizeof (icmp6_input_trace_t));
+ }
+
+ /* Check if anyone want ND events for L2 BDs */
+ p = mhash_get (&nm->mac_changes_by_address, &ip6a_zero);
+ if (p && shg == 0 && /* Only SHG 0 interface which is more likely local */
+ !ip6_address_is_link_local_unicast (&ip->src_address))
+ {
+ u32 next_index = p[0];
+ while (next_index != (u32) ~ 0)
+ {
+ int (*fp) (u32, u8 *, u32, ip6_address_t *);
+ int rv = 1;
+ mc = pool_elt_at_index (nm->mac_changes, next_index);
+ fp = mc->data_callback;
+ /* Call the callback, return 1 to suppress dup events */
+ if (fp)
+ rv = (*fp) (mc->data,
+ eth->src_address, sw_if_index, &ip->src_address);
+ /* Signal the resolver process */
+ if (rv == 0)
+ vlib_process_signal_event (vm, mc->node_index,
+ mc->type_opaque, mc->data);
+ next_index = mc->next_index;
+ }
+ }
+
+ /* Check if MAC entry exsist for solicited target IP */
+ if (ndh->icmp.type == ICMP6_neighbor_solicitation)
+ {
+ icmp6_neighbor_discovery_ethernet_link_layer_address_option_t *opt;
+ l2_bridge_domain_t *bd_config;
+ u8 *macp;
+
+ opt = (void *) (ndh + 1);
+ if ((opt->header.type !=
+ ICMP6_NEIGHBOR_DISCOVERY_OPTION_source_link_layer_address) ||
+ (opt->header.n_data_u64s != 1))
+ return 0; /* source link layer address option not present */
+
+ bd_config = vec_elt_at_index (l2input_main.bd_configs, bd_index);
+ macp =
+ (u8 *) hash_get_mem (bd_config->mac_by_ip6, &ndh->target_address);
+ if (macp)
+ { /* found ip-mac entry, generate eighbor advertisement response */
+ int bogus_length;
+ vlib_node_runtime_t *error_node =
+ vlib_node_get_runtime (vm, ip6_icmp_input_node.index);
+ ip->dst_address = ip->src_address;
+ ip->src_address = ndh->target_address;
+ ip->hop_limit = 255;
+ opt->header.type =
+ ICMP6_NEIGHBOR_DISCOVERY_OPTION_target_link_layer_address;
+ clib_memcpy (opt->ethernet_address, macp, 6);
+ ndh->icmp.type = ICMP6_neighbor_advertisement;
+ ndh->advertisement_flags = clib_host_to_net_u32
+ (ICMP6_NEIGHBOR_ADVERTISEMENT_FLAG_SOLICITED |
+ ICMP6_NEIGHBOR_ADVERTISEMENT_FLAG_OVERRIDE);
+ ndh->icmp.checksum = 0;
+ ndh->icmp.checksum =
+ ip6_tcp_udp_icmp_compute_checksum (vm, p0, ip, &bogus_length);
+ clib_memcpy (eth->dst_address, eth->src_address, 6);
+ clib_memcpy (eth->src_address, macp, 6);
+ vlib_error_count (vm, error_node->node_index,
+ ICMP6_ERROR_NEIGHBOR_ADVERTISEMENTS_TX, 1);
+ return 1;
+ }
+ }
+
+ return 0;
+
+}
+
+void
+ethernet_ndp_change_mac (u32 sw_if_index)
+{
+ ip6_neighbor_main_t *nm = &ip6_neighbor_main;
+ ip6_neighbor_t *n;
+
+ /* *INDENT-OFF* */
+ pool_foreach (n, nm->neighbor_pool,
+ ({
+ if (n->key.sw_if_index == sw_if_index)
+ {
+ adj_nbr_walk_nh6 (sw_if_index,
+ &n->key.ip6_address,
+ ip6_nd_mk_complete_walk, n);
+ }
+ }));
+ /* *INDENT-ON* */
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/ip/ip6_neighbor.h b/src/vnet/ip/ip6_neighbor.h
new file mode 100644
index 00000000000..b2c9f48ae8a
--- /dev/null
+++ b/src/vnet/ip/ip6_neighbor.h
@@ -0,0 +1,52 @@
+/*
+ *
+ * ip6_neighboor.h: ip6 neighbor structures
+ *
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef included_ip6_neighbor_h
+#define included_ip6_neighbor_h
+
+#include <vnet/fib/fib_types.h>
+
+typedef struct
+{
+ ip6_address_t ip6_address;
+ u32 sw_if_index;
+ u32 pad;
+} ip6_neighbor_key_t;
+
+typedef struct
+{
+ ip6_neighbor_key_t key;
+ u8 link_layer_address[8];
+ u16 flags;
+#define IP6_NEIGHBOR_FLAG_STATIC (1 << 0)
+#define IP6_NEIGHBOR_FLAG_DYNAMIC (2 << 0)
+ u64 cpu_time_last_updated;
+ fib_node_index_t fib_entry_index;
+} ip6_neighbor_t;
+
+ip6_neighbor_t *ip6_neighbors_entries (u32 sw_if_index);
+
+#endif /* included_ip6_neighbor_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/ip/ip6_packet.h b/src/vnet/ip/ip6_packet.h
new file mode 100644
index 00000000000..1e551c8b67d
--- /dev/null
+++ b/src/vnet/ip/ip6_packet.h
@@ -0,0 +1,499 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * ip6/packet.h: ip6 packet format
+ *
+ * Copyright (c) 2008 Eliot Dresselhaus
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef included_ip6_packet_h
+#define included_ip6_packet_h
+
+#include <vnet/ip/tcp_packet.h>
+#include <vnet/ip/ip4_packet.h>
+
+typedef union
+{
+ u8 as_u8[16];
+ u16 as_u16[8];
+ u32 as_u32[4];
+ u64 as_u64[2];
+ uword as_uword[16 / sizeof (uword)];
+}
+ip6_address_t;
+
+/* Packed so that the mhash key doesn't include uninitialized pad bytes */
+/* *INDENT-OFF* */
+typedef CLIB_PACKED (struct {
+ /* IP address must be first for ip_interface_address_get_address() to work */
+ ip6_address_t ip6_addr;
+ u32 fib_index;
+}) ip6_address_fib_t;
+/* *INDENT-ON* */
+
+/* *INDENT-OFF* */
+typedef CLIB_PACKED (union {
+ struct {
+ u32 pad[3];
+ ip4_address_t ip4;
+ };
+ ip6_address_t ip6;
+ u8 as_u8[16];
+ u64 as_u64[2];
+}) ip46_address_t;
+/* *INDENT-ON* */
+#define ip46_address_is_ip4(ip46) (((ip46)->pad[0] | (ip46)->pad[1] | (ip46)->pad[2]) == 0)
+#define ip46_address_mask_ip4(ip46) ((ip46)->pad[0] = (ip46)->pad[1] = (ip46)->pad[2] = 0)
+#define ip46_address_set_ip4(ip46, ip) (ip46_address_mask_ip4(ip46), (ip46)->ip4 = (ip)[0])
+#define ip46_address_reset(ip46) ((ip46)->as_u64[0] = (ip46)->as_u64[1] = 0)
+#define ip46_address_cmp(ip46_1, ip46_2) (memcmp(ip46_1, ip46_2, sizeof(*ip46_1)))
+#define ip46_address_is_zero(ip46) (((ip46)->as_u64[0] == 0) && ((ip46)->as_u64[1] == 0))
+
+always_inline void
+ip46_from_addr_buf (u32 is_ipv6, u8 * buf, ip46_address_t * ip)
+{
+ if (is_ipv6)
+ ip->ip6 = *((ip6_address_t *) buf);
+ else
+ ip46_address_set_ip4 (ip, (ip4_address_t *) buf);
+}
+
+always_inline void
+ip6_addr_fib_init (ip6_address_fib_t * addr_fib, ip6_address_t * address,
+ u32 fib_index)
+{
+ addr_fib->ip6_addr.as_u64[0] = address->as_u64[0];
+ addr_fib->ip6_addr.as_u64[1] = address->as_u64[1];
+ addr_fib->fib_index = fib_index;
+}
+
+/* Special addresses:
+ unspecified ::/128
+ loopback ::1/128
+ global unicast 2000::/3
+ unique local unicast fc00::/7
+ link local unicast fe80::/10
+ multicast ff00::/8
+ ietf reserved everything else. */
+
+#define foreach_ip6_multicast_address_scope \
+ _ (loopback, 0x1) \
+ _ (link_local, 0x2) \
+ _ (admin_local, 0x4) \
+ _ (site_local, 0x5) \
+ _ (organization_local, 0x8) \
+ _ (global, 0xe)
+
+#define foreach_ip6_multicast_link_local_group_id \
+ _ (all_hosts, 0x1) \
+ _ (all_routers, 0x2) \
+ _ (rip_routers, 0x9) \
+ _ (eigrp_routers, 0xa) \
+ _ (pim_routers, 0xd) \
+ _ (mldv2_routers, 0x16)
+
+typedef enum
+{
+#define _(f,n) IP6_MULTICAST_SCOPE_##f = n,
+ foreach_ip6_multicast_address_scope
+#undef _
+} ip6_multicast_address_scope_t;
+
+typedef enum
+{
+#define _(f,n) IP6_MULTICAST_GROUP_ID_##f = n,
+ foreach_ip6_multicast_link_local_group_id
+#undef _
+} ip6_multicast_link_local_group_id_t;
+
+always_inline uword
+ip6_address_is_multicast (ip6_address_t * a)
+{
+ return a->as_u8[0] == 0xff;
+}
+
+always_inline uword
+ip46_address_is_multicast (ip46_address_t * a)
+{
+ return ip46_address_is_ip4 (a) ? ip4_address_is_multicast (&a->ip4) :
+ ip6_address_is_multicast (&a->ip6);
+}
+
+always_inline void
+ip6_set_reserved_multicast_address (ip6_address_t * a,
+ ip6_multicast_address_scope_t scope,
+ u16 id)
+{
+ a->as_u64[0] = a->as_u64[1] = 0;
+ a->as_u16[0] = clib_host_to_net_u16 (0xff00 | scope);
+ a->as_u16[7] = clib_host_to_net_u16 (id);
+}
+
+always_inline void
+ip6_set_solicited_node_multicast_address (ip6_address_t * a, u32 id)
+{
+ /* 0xff02::1:ffXX:XXXX. */
+ a->as_u64[0] = a->as_u64[1] = 0;
+ a->as_u16[0] = clib_host_to_net_u16 (0xff02);
+ a->as_u8[11] = 1;
+ ASSERT ((id >> 24) == 0);
+ id |= 0xff << 24;
+ a->as_u32[3] = clib_host_to_net_u32 (id);
+}
+
+always_inline void
+ip6_link_local_address_from_ethernet_address (ip6_address_t * a,
+ u8 * ethernet_address)
+{
+ a->as_u64[0] = a->as_u64[1] = 0;
+ a->as_u16[0] = clib_host_to_net_u16 (0xfe80);
+ /* Always set locally administered bit (6). */
+ a->as_u8[0x8] = ethernet_address[0] | (1 << 6);
+ a->as_u8[0x9] = ethernet_address[1];
+ a->as_u8[0xa] = ethernet_address[2];
+ a->as_u8[0xb] = 0xff;
+ a->as_u8[0xc] = 0xfe;
+ a->as_u8[0xd] = ethernet_address[3];
+ a->as_u8[0xe] = ethernet_address[4];
+ a->as_u8[0xf] = ethernet_address[5];
+}
+
+always_inline void
+ip6_multicast_ethernet_address (u8 * ethernet_address, u32 group_id)
+{
+ ethernet_address[0] = 0x33;
+ ethernet_address[1] = 0x33;
+ ethernet_address[2] = ((group_id >> 24) & 0xff);
+ ethernet_address[3] = ((group_id >> 16) & 0xff);
+ ethernet_address[4] = ((group_id >> 8) & 0xff);
+ ethernet_address[5] = ((group_id >> 0) & 0xff);
+}
+
+always_inline uword
+ip6_address_is_equal (ip6_address_t * a, ip6_address_t * b)
+{
+ int i;
+ for (i = 0; i < ARRAY_LEN (a->as_uword); i++)
+ if (a->as_uword[i] != b->as_uword[i])
+ return 0;
+ return 1;
+}
+
+always_inline uword
+ip6_address_is_equal_masked (ip6_address_t * a, ip6_address_t * b,
+ ip6_address_t * mask)
+{
+ int i;
+ for (i = 0; i < ARRAY_LEN (a->as_uword); i++)
+ {
+ uword a_masked, b_masked;
+ a_masked = a->as_uword[i] & mask->as_uword[i];
+ b_masked = b->as_uword[i] & mask->as_uword[i];
+
+ if (a_masked != b_masked)
+ return 0;
+ }
+ return 1;
+}
+
+always_inline void
+ip6_address_mask (ip6_address_t * a, ip6_address_t * mask)
+{
+ int i;
+ for (i = 0; i < ARRAY_LEN (a->as_uword); i++)
+ a->as_uword[i] &= mask->as_uword[i];
+}
+
+always_inline void
+ip6_address_set_zero (ip6_address_t * a)
+{
+ int i;
+ for (i = 0; i < ARRAY_LEN (a->as_uword); i++)
+ a->as_uword[i] = 0;
+}
+
+always_inline void
+ip6_address_mask_from_width (ip6_address_t * a, u32 width)
+{
+ int i, byte, bit, bitnum;
+ ASSERT (width <= 128);
+ memset (a, 0, sizeof (a[0]));
+ for (i = 0; i < width; i++)
+ {
+ bitnum = (7 - (i & 7));
+ byte = i / 8;
+ bit = 1 << bitnum;
+ a->as_u8[byte] |= bit;
+ }
+}
+
+always_inline uword
+ip6_address_is_zero (ip6_address_t * a)
+{
+ int i;
+ for (i = 0; i < ARRAY_LEN (a->as_uword); i++)
+ if (a->as_uword[i] != 0)
+ return 0;
+ return 1;
+}
+
+/* Check for unspecified address ::0 */
+always_inline uword
+ip6_address_is_unspecified (ip6_address_t * a)
+{
+ return ip6_address_is_zero (a);
+}
+
+/* Check for loopback address ::1 */
+always_inline uword
+ip6_address_is_loopback (ip6_address_t * a)
+{
+ uword is_loopback;
+ u8 save = a->as_u8[15];
+ a->as_u8[15] = save ^ 1;
+ is_loopback = ip6_address_is_zero (a);
+ a->as_u8[15] = save;
+ return is_loopback;
+}
+
+/* Check for link local unicast fe80::/10. */
+always_inline uword
+ip6_address_is_link_local_unicast (ip6_address_t * a)
+{
+ return a->as_u8[0] == 0xfe && (a->as_u8[1] & 0xc0) == 0x80;
+}
+
+/* Check for unique local unicast fc00::/7. */
+always_inline uword
+ip6_address_is_local_unicast (ip6_address_t * a)
+{
+ return (a->as_u8[0] & 0xfe) == 0xfc;
+}
+
+/* Check for unique global unicast 2000::/3. */
+always_inline uword
+ip6_address_is_global_unicast (ip6_address_t * a)
+{
+ return (a->as_u8[0] & 0xe0) == 0x20;
+}
+
+/* Check for solicited node multicast 0xff02::1:ff00:0/104 */
+always_inline uword
+ip6_is_solicited_node_multicast_address (ip6_address_t * a)
+{
+ return (a->as_u32[0] == clib_host_to_net_u32 (0xff020000)
+ && a->as_u32[1] == 0
+ && a->as_u32[2] == clib_host_to_net_u32 (1)
+ && a->as_u8[12] == 0xff);
+}
+
+typedef struct
+{
+ /* 4 bit version, 8 bit traffic class and 20 bit flow label. */
+ u32 ip_version_traffic_class_and_flow_label;
+
+ /* Total packet length not including this header (but including
+ any extension headers if present). */
+ u16 payload_length;
+
+ /* Protocol for next header. */
+ u8 protocol;
+
+ /* Hop limit decremented by router at each hop. */
+ u8 hop_limit;
+
+ /* Source and destination address. */
+ ip6_address_t src_address, dst_address;
+} ip6_header_t;
+
+always_inline void *
+ip6_next_header (ip6_header_t * i)
+{
+ return (void *) (i + 1);
+}
+
+always_inline void
+ip6_copy_header (ip6_header_t * dst, const ip6_header_t * src)
+{
+ dst->ip_version_traffic_class_and_flow_label =
+ src->ip_version_traffic_class_and_flow_label;
+ dst->payload_length = src->payload_length;
+ dst->protocol = src->protocol;
+ dst->hop_limit = src->hop_limit;
+
+ dst->src_address.as_uword[0] = src->src_address.as_uword[0];
+ dst->src_address.as_uword[1] = src->src_address.as_uword[1];
+ dst->dst_address.as_uword[0] = src->dst_address.as_uword[0];
+ dst->dst_address.as_uword[1] = src->dst_address.as_uword[1];
+}
+
+always_inline void
+ip6_tcp_reply_x1 (ip6_header_t * ip0, tcp_header_t * tcp0)
+{
+ {
+ ip6_address_t src0, dst0;
+
+ src0 = ip0->src_address;
+ dst0 = ip0->dst_address;
+ ip0->src_address = dst0;
+ ip0->dst_address = src0;
+ }
+
+ {
+ u16 src0, dst0;
+
+ src0 = tcp0->ports.src;
+ dst0 = tcp0->ports.dst;
+ tcp0->ports.src = dst0;
+ tcp0->ports.dst = src0;
+ }
+}
+
+always_inline void
+ip6_tcp_reply_x2 (ip6_header_t * ip0, ip6_header_t * ip1,
+ tcp_header_t * tcp0, tcp_header_t * tcp1)
+{
+ {
+ ip6_address_t src0, dst0, src1, dst1;
+
+ src0 = ip0->src_address;
+ src1 = ip1->src_address;
+ dst0 = ip0->dst_address;
+ dst1 = ip1->dst_address;
+ ip0->src_address = dst0;
+ ip1->src_address = dst1;
+ ip0->dst_address = src0;
+ ip1->dst_address = src1;
+ }
+
+ {
+ u16 src0, dst0, src1, dst1;
+
+ src0 = tcp0->ports.src;
+ src1 = tcp1->ports.src;
+ dst0 = tcp0->ports.dst;
+ dst1 = tcp1->ports.dst;
+ tcp0->ports.src = dst0;
+ tcp1->ports.src = dst1;
+ tcp0->ports.dst = src0;
+ tcp1->ports.dst = src1;
+ }
+}
+
+
+/* *INDENT-OFF* */
+typedef CLIB_PACKED (struct {
+ u8 data;
+}) ip6_pad1_option_t;
+/* *INDENT-ON* */
+
+/* *INDENT-OFF* */
+typedef CLIB_PACKED (struct {
+ u8 type;
+ u8 len;
+ u8 data[0];
+}) ip6_padN_option_t;
+/* *INDENT-ON* */
+
+/* *INDENT-OFF* */
+typedef CLIB_PACKED (struct {
+#define IP6_MLDP_ALERT_TYPE 0x5
+ u8 type;
+ u8 len;
+ u16 value;
+}) ip6_router_alert_option_t;
+/* *INDENT-ON* */
+
+/* *INDENT-OFF* */
+typedef CLIB_PACKED (struct {
+ u8 next_hdr;
+ /* Length of this header plus option data in 8 byte units. */
+ u8 n_data_u64s;
+}) ip6_ext_header_t;
+
+always_inline u8 ip6_ext_hdr(u8 nexthdr)
+{
+ /*
+ * find out if nexthdr is an extension header or a protocol
+ */
+ return (nexthdr == IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS) ||
+ (nexthdr == IP_PROTOCOL_IP6_NONXT) ||
+ (nexthdr == IP_PROTOCOL_IPV6_FRAGMENTATION) ||
+ (nexthdr == IP_PROTOCOL_IPSEC_AH) ||
+ (nexthdr == IP_PROTOCOL_IPV6_ROUTE) ||
+ (nexthdr == IP_PROTOCOL_IP6_DESTINATION_OPTIONS);
+}
+
+#define ip6_ext_header_len(p) (((p)->n_data_u64s+1) << 3)
+#define ip6_ext_authhdr_len(p) (((p)->n_data_u64s+2) << 2)
+
+always_inline void *
+ip6_ext_next_header (ip6_ext_header_t *ext_hdr )
+{ return (void *)((u8 *) ext_hdr + ip6_ext_header_len(ext_hdr)); }
+
+typedef CLIB_PACKED (struct {
+ u8 next_hdr;
+ /* Length of this header plus option data in 8 byte units. */
+ u8 n_data_u64s;
+ u8 data[0];
+}) ip6_hop_by_hop_ext_t;
+/* *INDENT-ON* */
+
+/* *INDENT-OFF* */
+typedef CLIB_PACKED (struct {
+ u8 next_hdr;
+ u8 rsv;
+ u16 fragment_offset_and_more;
+ u32 identification;
+}) ip6_frag_hdr_t;
+/* *INDENT-ON* */
+
+#define ip6_frag_hdr_offset(hdr) \
+ (clib_net_to_host_u16((hdr)->fragment_offset_and_more) >> 3)
+
+#define ip6_frag_hdr_more(hdr) \
+ (clib_net_to_host_u16((hdr)->fragment_offset_and_more) & 0x1)
+
+#define ip6_frag_hdr_offset_and_more(offset, more) \
+ clib_host_to_net_u16(((offset) << 3) + !!(more))
+
+#endif /* included_ip6_packet_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/ip/ip6_pg.c b/src/vnet/ip/ip6_pg.c
new file mode 100644
index 00000000000..ba1e4ad9a58
--- /dev/null
+++ b/src/vnet/ip/ip6_pg.c
@@ -0,0 +1,231 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * ip/ip6_pg: IP v4 packet-generator interface
+ *
+ * Copyright (c) 2008 Eliot Dresselhaus
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <vnet/ip/ip.h>
+#include <vnet/pg/pg.h>
+
+static void
+ip6_pg_edit_function (pg_main_t * pg,
+ pg_stream_t * s,
+ pg_edit_group_t * g, u32 * packets, u32 n_packets)
+{
+ vlib_main_t *vm = vlib_get_main ();
+ u32 ip_header_offset = g->start_byte_offset;
+
+ while (n_packets >= 2)
+ {
+ u32 pi0, pi1;
+ vlib_buffer_t *p0, *p1;
+ ip6_header_t *ip0, *ip1;
+
+ pi0 = packets[0];
+ pi1 = packets[1];
+ p0 = vlib_get_buffer (vm, pi0);
+ p1 = vlib_get_buffer (vm, pi1);
+ n_packets -= 2;
+ packets += 2;
+
+ ip0 = (void *) (p0->data + ip_header_offset);
+ ip1 = (void *) (p1->data + ip_header_offset);
+
+ ip0->payload_length =
+ clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, p0) -
+ ip_header_offset - sizeof (ip0[0]));
+ ip1->payload_length =
+ clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, p1) -
+ ip_header_offset - sizeof (ip1[0]));
+ }
+
+ while (n_packets >= 1)
+ {
+ u32 pi0;
+ vlib_buffer_t *p0;
+ ip6_header_t *ip0;
+
+ pi0 = packets[0];
+ p0 = vlib_get_buffer (vm, pi0);
+ n_packets -= 1;
+ packets += 1;
+
+ ip0 = (void *) (p0->data + ip_header_offset);
+
+ ip0->payload_length =
+ clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, p0) -
+ ip_header_offset - sizeof (ip0[0]));
+ }
+}
+
+typedef struct
+{
+ pg_edit_t ip_version;
+ pg_edit_t traffic_class;
+ pg_edit_t flow_label;
+ pg_edit_t payload_length;
+ pg_edit_t protocol;
+ pg_edit_t hop_limit;
+ pg_edit_t src_address, dst_address;
+} pg_ip6_header_t;
+
+static inline void
+pg_ip6_header_init (pg_ip6_header_t * p)
+{
+ /* Initialize fields that are not bit fields in the IP header. */
+#define _(f) pg_edit_init (&p->f, ip6_header_t, f);
+ _(payload_length);
+ _(hop_limit);
+ _(protocol);
+ _(src_address);
+ _(dst_address);
+#undef _
+
+ /* Initialize bit fields. */
+ pg_edit_init_bitfield (&p->ip_version, ip6_header_t,
+ ip_version_traffic_class_and_flow_label, 28, 4);
+ pg_edit_init_bitfield (&p->traffic_class, ip6_header_t,
+ ip_version_traffic_class_and_flow_label, 20, 8);
+ pg_edit_init_bitfield (&p->flow_label, ip6_header_t,
+ ip_version_traffic_class_and_flow_label, 0, 20);
+}
+
+uword
+unformat_pg_ip6_header (unformat_input_t * input, va_list * args)
+{
+ pg_stream_t *s = va_arg (*args, pg_stream_t *);
+ pg_ip6_header_t *p;
+ u32 group_index;
+
+ p = pg_create_edit_group (s, sizeof (p[0]), sizeof (ip6_header_t),
+ &group_index);
+ pg_ip6_header_init (p);
+
+ /* Defaults. */
+ pg_edit_set_fixed (&p->ip_version, 6);
+ pg_edit_set_fixed (&p->traffic_class, 0);
+ pg_edit_set_fixed (&p->flow_label, 0);
+ pg_edit_set_fixed (&p->hop_limit, 64);
+
+ p->payload_length.type = PG_EDIT_UNSPECIFIED;
+
+ if (!unformat (input, "%U: %U -> %U",
+ unformat_pg_edit,
+ unformat_ip_protocol, &p->protocol,
+ unformat_pg_edit,
+ unformat_ip6_address, &p->src_address,
+ unformat_pg_edit, unformat_ip6_address, &p->dst_address))
+ goto error;
+
+ /* Parse options. */
+ while (1)
+ {
+ if (unformat (input, "version %U",
+ unformat_pg_edit, unformat_pg_number, &p->ip_version))
+ ;
+
+ else if (unformat (input, "traffic-class %U",
+ unformat_pg_edit,
+ unformat_pg_number, &p->traffic_class))
+ ;
+
+ else if (unformat (input, "length %U",
+ unformat_pg_edit,
+ unformat_pg_number, &p->payload_length))
+ ;
+
+ else if (unformat (input, "hop-limit %U",
+ unformat_pg_edit, unformat_pg_number, &p->hop_limit))
+ ;
+
+ /* Can't parse input: try next protocol level. */
+ else
+ break;
+ }
+
+ {
+ ip_main_t *im = &ip_main;
+ ip_protocol_t protocol;
+ ip_protocol_info_t *pi;
+
+ pi = 0;
+ if (p->protocol.type == PG_EDIT_FIXED)
+ {
+ protocol = pg_edit_get_value (&p->protocol, PG_EDIT_LO);
+ pi = ip_get_protocol_info (im, protocol);
+ }
+
+ if (pi && pi->unformat_pg_edit
+ && unformat_user (input, pi->unformat_pg_edit, s))
+ ;
+
+ else if (!unformat_user (input, unformat_pg_payload, s))
+ goto error;
+
+ if (p->payload_length.type == PG_EDIT_UNSPECIFIED
+ && s->min_packet_bytes == s->max_packet_bytes
+ && group_index + 1 < vec_len (s->edit_groups))
+ {
+ pg_edit_set_fixed (&p->payload_length,
+ pg_edit_group_n_bytes (s,
+ group_index) -
+ sizeof (ip6_header_t));
+ }
+
+ p = pg_get_edit_group (s, group_index);
+ if (p->payload_length.type == PG_EDIT_UNSPECIFIED)
+ {
+ pg_edit_group_t *g = pg_stream_get_group (s, group_index);
+ g->edit_function = ip6_pg_edit_function;
+ }
+
+ return 1;
+ }
+
+error:
+ /* Free up any edits we may have added. */
+ pg_free_edit_group (s);
+ return 0;
+}
+
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/ip/ip_api.c b/src/vnet/ip/ip_api.c
new file mode 100644
index 00000000000..cd9b7397d29
--- /dev/null
+++ b/src/vnet/ip/ip_api.c
@@ -0,0 +1,1196 @@
+/*
+ *------------------------------------------------------------------
+ * ip_api.c - vnet ip api
+ *
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+
+#include <vnet/vnet.h>
+#include <vlibmemory/api.h>
+
+#include <vnet/interface.h>
+#include <vnet/api_errno.h>
+#include <vnet/ethernet/ethernet.h>
+#include <vnet/ip/ip.h>
+#include <vnet/ip/ip6_neighbor.h>
+#include <vnet/fib/fib_table.h>
+#include <vnet/fib/fib_api.h>
+#include <vnet/dpo/drop_dpo.h>
+#include <vnet/dpo/receive_dpo.h>
+#include <vnet/dpo/lookup_dpo.h>
+#include <vnet/dpo/classify_dpo.h>
+#include <vnet/dpo/ip_null_dpo.h>
+#include <vnet/ethernet/arp_packet.h>
+
+#include <vnet/vnet_msg_enum.h>
+
+#define vl_typedefs /* define message structures */
+#include <vnet/vnet_all_api_h.h>
+#undef vl_typedefs
+
+#define vl_endianfun /* define message structures */
+#include <vnet/vnet_all_api_h.h>
+#undef vl_endianfun
+
+/* instantiate all the print functions we know about */
+#define vl_print(handle, ...) vlib_cli_output (handle, __VA_ARGS__)
+#define vl_printfun
+#include <vnet/vnet_all_api_h.h>
+#undef vl_printfun
+
+#include <vlibapi/api_helper_macros.h>
+
+#define foreach_ip_api_msg \
+_(IP_FIB_DUMP, ip_fib_dump) \
+_(IP_FIB_DETAILS, ip_fib_details) \
+_(IP6_FIB_DUMP, ip6_fib_dump) \
+_(IP6_FIB_DETAILS, ip6_fib_details) \
+_(IP_NEIGHBOR_DUMP, ip_neighbor_dump) \
+_(IP_NEIGHBOR_DETAILS, ip_neighbor_details) \
+_(IP_ADDRESS_DUMP, ip_address_dump) \
+_(IP_DUMP, ip_dump) \
+_(IP_NEIGHBOR_ADD_DEL, ip_neighbor_add_del) \
+_(IP_ADD_DEL_ROUTE, ip_add_del_route) \
+_(SET_IP_FLOW_HASH,set_ip_flow_hash) \
+_(SW_INTERFACE_IP6ND_RA_CONFIG, sw_interface_ip6nd_ra_config) \
+_(SW_INTERFACE_IP6ND_RA_PREFIX, sw_interface_ip6nd_ra_prefix) \
+_(SW_INTERFACE_IP6_ENABLE_DISABLE, sw_interface_ip6_enable_disable ) \
+_(SW_INTERFACE_IP6_SET_LINK_LOCAL_ADDRESS, \
+ sw_interface_ip6_set_link_local_address)
+
+extern void stats_dslock_with_hint (int hint, int tag);
+extern void stats_dsunlock (void);
+
+static void
+send_ip_neighbor_details (u8 is_ipv6,
+ u8 is_static,
+ u8 * mac_address,
+ u8 * ip_address,
+ unix_shared_memory_queue_t * q, u32 context)
+{
+ vl_api_ip_neighbor_details_t *mp;
+
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ memset (mp, 0, sizeof (*mp));
+ mp->_vl_msg_id = ntohs (VL_API_IP_NEIGHBOR_DETAILS);
+ mp->context = context;
+ mp->is_ipv6 = is_ipv6;
+ mp->is_static = is_static;
+ memcpy (mp->mac_address, mac_address, 6);
+ memcpy (mp->ip_address, ip_address, (is_ipv6) ? 16 : 4);
+
+ vl_msg_api_send_shmem (q, (u8 *) & mp);
+}
+
+static void
+vl_api_ip_neighbor_details_t_handler (vl_api_ip_neighbor_details_t * mp)
+{
+ clib_warning ("BUG");
+}
+
+static void
+vl_api_ip_neighbor_dump_t_handler (vl_api_ip_neighbor_dump_t * mp)
+{
+ unix_shared_memory_queue_t *q;
+
+ q = vl_api_client_index_to_input_queue (mp->client_index);
+ if (q == 0)
+ return;
+
+ u32 sw_if_index = ntohl (mp->sw_if_index);
+
+ if (mp->is_ipv6)
+ {
+ ip6_neighbor_t *n, *ns;
+
+ ns = ip6_neighbors_entries (sw_if_index);
+ /* *INDENT-OFF* */
+ vec_foreach (n, ns)
+ {
+ send_ip_neighbor_details
+ (mp->is_ipv6, ((n->flags & IP6_NEIGHBOR_FLAG_STATIC) ? 1 : 0),
+ (u8 *) n->link_layer_address,
+ (u8 *) & (n->key.ip6_address.as_u8),
+ q, mp->context);
+ }
+ /* *INDENT-ON* */
+ vec_free (ns);
+ }
+ else
+ {
+ ethernet_arp_ip4_entry_t *n, *ns;
+
+ ns = ip4_neighbor_entries (sw_if_index);
+ /* *INDENT-OFF* */
+ vec_foreach (n, ns)
+ {
+ send_ip_neighbor_details (mp->is_ipv6,
+ ((n->flags & ETHERNET_ARP_IP4_ENTRY_FLAG_STATIC) ? 1 : 0),
+ (u8*) n->ethernet_address,
+ (u8*) & (n->ip4_address.as_u8),
+ q, mp->context);
+ }
+ /* *INDENT-ON* */
+ vec_free (ns);
+ }
+}
+
+
+void
+copy_fib_next_hop (fib_route_path_encode_t * api_rpath, void *fp_arg)
+{
+ int is_ip4;
+ vl_api_fib_path_t *fp = (vl_api_fib_path_t *) fp_arg;
+
+ if (api_rpath->rpath.frp_proto == FIB_PROTOCOL_IP4)
+ fp->afi = IP46_TYPE_IP4;
+ else if (api_rpath->rpath.frp_proto == FIB_PROTOCOL_IP6)
+ fp->afi = IP46_TYPE_IP6;
+ else
+ {
+ is_ip4 = ip46_address_is_ip4 (&api_rpath->rpath.frp_addr);
+ if (is_ip4)
+ fp->afi = IP46_TYPE_IP4;
+ else
+ fp->afi = IP46_TYPE_IP6;
+ }
+ if (fp->afi == IP46_TYPE_IP4)
+ memcpy (fp->next_hop, &api_rpath->rpath.frp_addr.ip4,
+ sizeof (api_rpath->rpath.frp_addr.ip4));
+ else
+ memcpy (fp->next_hop, &api_rpath->rpath.frp_addr.ip6,
+ sizeof (api_rpath->rpath.frp_addr.ip6));
+}
+
+static void
+vl_api_ip_fib_details_t_handler (vl_api_ip_fib_details_t * mp)
+{
+ clib_warning ("BUG");
+}
+
+static void
+vl_api_ip_fib_details_t_endian (vl_api_ip_fib_details_t * mp)
+{
+ clib_warning ("BUG");
+}
+
+static void
+vl_api_ip_fib_details_t_print (vl_api_ip_fib_details_t * mp)
+{
+ clib_warning ("BUG");
+}
+
+static void
+send_ip_fib_details (vpe_api_main_t * am,
+ unix_shared_memory_queue_t * q,
+ u32 table_id, fib_prefix_t * pfx,
+ fib_route_path_encode_t * api_rpaths, u32 context)
+{
+ vl_api_ip_fib_details_t *mp;
+ fib_route_path_encode_t *api_rpath;
+ vl_api_fib_path_t *fp;
+ int path_count;
+
+ path_count = vec_len (api_rpaths);
+ mp = vl_msg_api_alloc (sizeof (*mp) + path_count * sizeof (*fp));
+ if (!mp)
+ return;
+ memset (mp, 0, sizeof (*mp));
+ mp->_vl_msg_id = ntohs (VL_API_IP_FIB_DETAILS);
+ mp->context = context;
+
+ mp->table_id = htonl (table_id);
+ mp->address_length = pfx->fp_len;
+ memcpy (mp->address, &pfx->fp_addr.ip4, sizeof (pfx->fp_addr.ip4));
+
+ mp->count = htonl (path_count);
+ fp = mp->path;
+ vec_foreach (api_rpath, api_rpaths)
+ {
+ memset (fp, 0, sizeof (*fp));
+ switch (api_rpath->dpo.dpoi_type)
+ {
+ case DPO_RECEIVE:
+ fp->is_local = true;
+ break;
+ case DPO_DROP:
+ fp->is_drop = true;
+ break;
+ case DPO_IP_NULL:
+ switch (api_rpath->dpo.dpoi_index)
+ {
+ case IP_NULL_ACTION_NONE:
+ fp->is_drop = true;
+ break;
+ case IP_NULL_ACTION_SEND_ICMP_UNREACH:
+ fp->is_unreach = true;
+ break;
+ case IP_NULL_ACTION_SEND_ICMP_PROHIBIT:
+ fp->is_prohibit = true;
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ fp->weight = htonl (api_rpath->rpath.frp_weight);
+ fp->sw_if_index = htonl (api_rpath->rpath.frp_sw_if_index);
+ copy_fib_next_hop (api_rpath, fp);
+ fp++;
+ }
+
+ vl_msg_api_send_shmem (q, (u8 *) & mp);
+}
+
+static void
+vl_api_ip_fib_dump_t_handler (vl_api_ip_fib_dump_t * mp)
+{
+ vpe_api_main_t *am = &vpe_api_main;
+ unix_shared_memory_queue_t *q;
+ ip4_main_t *im = &ip4_main;
+ fib_table_t *fib_table;
+ fib_node_index_t lfei, *lfeip, *lfeis = NULL;
+ mpls_label_t key;
+ fib_prefix_t pfx;
+ u32 fib_index;
+ fib_route_path_encode_t *api_rpaths;
+ int i;
+
+ q = vl_api_client_index_to_input_queue (mp->client_index);
+ if (q == 0)
+ return;
+
+ /* *INDENT-OFF* */
+ pool_foreach (fib_table, im->fibs,
+ ({
+ for (i = 0; i < ARRAY_LEN (fib_table->v4.fib_entry_by_dst_address); i++)
+ {
+ hash_foreach(key, lfei, fib_table->v4.fib_entry_by_dst_address[i],
+ ({
+ vec_add1(lfeis, lfei);
+ }));
+ }
+ }));
+ /* *INDENT-ON* */
+
+ vec_sort_with_function (lfeis, fib_entry_cmp_for_sort);
+
+ vec_foreach (lfeip, lfeis)
+ {
+ fib_entry_get_prefix (*lfeip, &pfx);
+ fib_index = fib_entry_get_fib_index (*lfeip);
+ fib_table = fib_table_get (fib_index, pfx.fp_proto);
+ api_rpaths = NULL;
+ fib_entry_encode (*lfeip, &api_rpaths);
+ send_ip_fib_details (am, q,
+ fib_table->ft_table_id, &pfx, api_rpaths,
+ mp->context);
+ vec_free (api_rpaths);
+ }
+
+ vec_free (lfeis);
+}
+
+static void
+vl_api_ip6_fib_details_t_handler (vl_api_ip6_fib_details_t * mp)
+{
+ clib_warning ("BUG");
+}
+
+static void
+vl_api_ip6_fib_details_t_endian (vl_api_ip6_fib_details_t * mp)
+{
+ clib_warning ("BUG");
+}
+
+static void
+vl_api_ip6_fib_details_t_print (vl_api_ip6_fib_details_t * mp)
+{
+ clib_warning ("BUG");
+}
+
+static void
+send_ip6_fib_details (vpe_api_main_t * am,
+ unix_shared_memory_queue_t * q,
+ u32 table_id, fib_prefix_t * pfx,
+ fib_route_path_encode_t * api_rpaths, u32 context)
+{
+ vl_api_ip6_fib_details_t *mp;
+ fib_route_path_encode_t *api_rpath;
+ vl_api_fib_path_t *fp;
+ int path_count;
+
+ path_count = vec_len (api_rpaths);
+ mp = vl_msg_api_alloc (sizeof (*mp) + path_count * sizeof (*fp));
+ if (!mp)
+ return;
+ memset (mp, 0, sizeof (*mp));
+ mp->_vl_msg_id = ntohs (VL_API_IP6_FIB_DETAILS);
+ mp->context = context;
+
+ mp->table_id = htonl (table_id);
+ mp->address_length = pfx->fp_len;
+ memcpy (mp->address, &pfx->fp_addr.ip6, sizeof (pfx->fp_addr.ip6));
+
+ mp->count = htonl (path_count);
+ fp = mp->path;
+ vec_foreach (api_rpath, api_rpaths)
+ {
+ memset (fp, 0, sizeof (*fp));
+ switch (api_rpath->dpo.dpoi_type)
+ {
+ case DPO_RECEIVE:
+ fp->is_local = true;
+ break;
+ case DPO_DROP:
+ fp->is_drop = true;
+ break;
+ case DPO_IP_NULL:
+ switch (api_rpath->dpo.dpoi_index)
+ {
+ case IP_NULL_DPO_ACTION_NUM + IP_NULL_ACTION_NONE:
+ fp->is_drop = true;
+ break;
+ case IP_NULL_DPO_ACTION_NUM + IP_NULL_ACTION_SEND_ICMP_UNREACH:
+ fp->is_unreach = true;
+ break;
+ case IP_NULL_DPO_ACTION_NUM + IP_NULL_ACTION_SEND_ICMP_PROHIBIT:
+ fp->is_prohibit = true;
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ fp->weight = htonl (api_rpath->rpath.frp_weight);
+ fp->sw_if_index = htonl (api_rpath->rpath.frp_sw_if_index);
+ copy_fib_next_hop (api_rpath, fp);
+ fp++;
+ }
+
+ vl_msg_api_send_shmem (q, (u8 *) & mp);
+}
+
+typedef struct apt_ip6_fib_show_ctx_t_
+{
+ u32 fib_index;
+ fib_node_index_t *entries;
+} api_ip6_fib_show_ctx_t;
+
+static void
+api_ip6_fib_table_put_entries (clib_bihash_kv_24_8_t * kvp, void *arg)
+{
+ api_ip6_fib_show_ctx_t *ctx = arg;
+
+ if ((kvp->key[2] >> 32) == ctx->fib_index)
+ {
+ vec_add1 (ctx->entries, kvp->value);
+ }
+}
+
+static void
+api_ip6_fib_table_get_all (unix_shared_memory_queue_t * q,
+ vl_api_ip6_fib_dump_t * mp,
+ fib_table_t * fib_table)
+{
+ vpe_api_main_t *am = &vpe_api_main;
+ ip6_main_t *im6 = &ip6_main;
+ ip6_fib_t *fib = &fib_table->v6;
+ fib_node_index_t *fib_entry_index;
+ api_ip6_fib_show_ctx_t ctx = {
+ .fib_index = fib->index,.entries = NULL,
+ };
+ fib_route_path_encode_t *api_rpaths;
+ fib_prefix_t pfx;
+
+ BV (clib_bihash_foreach_key_value_pair)
+ ((BVT (clib_bihash) *) & im6->ip6_table[IP6_FIB_TABLE_NON_FWDING].
+ ip6_hash, api_ip6_fib_table_put_entries, &ctx);
+
+ vec_sort_with_function (ctx.entries, fib_entry_cmp_for_sort);
+
+ vec_foreach (fib_entry_index, ctx.entries)
+ {
+ fib_entry_get_prefix (*fib_entry_index, &pfx);
+ api_rpaths = NULL;
+ fib_entry_encode (*fib_entry_index, &api_rpaths);
+ send_ip6_fib_details (am, q,
+ fib_table->ft_table_id,
+ &pfx, api_rpaths, mp->context);
+ vec_free (api_rpaths);
+ }
+
+ vec_free (ctx.entries);
+}
+
+static void
+vl_api_ip6_fib_dump_t_handler (vl_api_ip6_fib_dump_t * mp)
+{
+ unix_shared_memory_queue_t *q;
+ ip6_main_t *im6 = &ip6_main;
+ fib_table_t *fib_table;
+
+ q = vl_api_client_index_to_input_queue (mp->client_index);
+ if (q == 0)
+ return;
+
+ /* *INDENT-OFF* */
+ pool_foreach (fib_table, im6->fibs,
+ ({
+ api_ip6_fib_table_get_all(q, mp, fib_table);
+ }));
+ /* *INDENT-ON* */
+}
+
+static void
+vl_api_ip_neighbor_add_del_t_handler (vl_api_ip_neighbor_add_del_t * mp,
+ vlib_main_t * vm)
+{
+ vl_api_ip_neighbor_add_del_reply_t *rmp;
+ vnet_main_t *vnm = vnet_get_main ();
+ int rv = 0;
+
+ VALIDATE_SW_IF_INDEX (mp);
+
+ stats_dslock_with_hint (1 /* release hint */ , 7 /* tag */ );
+
+ /*
+ * there's no validation here of the ND/ARP entry being added.
+ * The expectation is that the FIB will ensure that nothing bad
+ * will come of adding bogus entries.
+ */
+ if (mp->is_ipv6)
+ {
+ if (mp->is_add)
+ rv = vnet_set_ip6_ethernet_neighbor
+ (vm, ntohl (mp->sw_if_index),
+ (ip6_address_t *) (mp->dst_address),
+ mp->mac_address, sizeof (mp->mac_address), mp->is_static);
+ else
+ rv = vnet_unset_ip6_ethernet_neighbor
+ (vm, ntohl (mp->sw_if_index),
+ (ip6_address_t *) (mp->dst_address),
+ mp->mac_address, sizeof (mp->mac_address));
+ }
+ else
+ {
+ ethernet_arp_ip4_over_ethernet_address_t a;
+
+ clib_memcpy (&a.ethernet, mp->mac_address, 6);
+ clib_memcpy (&a.ip4, mp->dst_address, 4);
+
+ if (mp->is_add)
+ rv = vnet_arp_set_ip4_over_ethernet (vnm, ntohl (mp->sw_if_index),
+ &a, mp->is_static);
+ else
+ rv =
+ vnet_arp_unset_ip4_over_ethernet (vnm, ntohl (mp->sw_if_index), &a);
+ }
+
+ BAD_SW_IF_INDEX_LABEL;
+
+ stats_dsunlock ();
+ REPLY_MACRO (VL_API_IP_NEIGHBOR_ADD_DEL_REPLY);
+}
+
+int
+add_del_route_t_handler (u8 is_multipath,
+ u8 is_add,
+ u8 is_drop,
+ u8 is_unreach,
+ u8 is_prohibit,
+ u8 is_local,
+ u8 is_classify,
+ u32 classify_table_index,
+ u8 is_resolve_host,
+ u8 is_resolve_attached,
+ u32 fib_index,
+ const fib_prefix_t * prefix,
+ u8 next_hop_proto_is_ip4,
+ const ip46_address_t * next_hop,
+ u32 next_hop_sw_if_index,
+ u8 next_hop_fib_index,
+ u32 next_hop_weight,
+ mpls_label_t next_hop_via_label,
+ mpls_label_t * next_hop_out_label_stack)
+{
+ vnet_classify_main_t *cm = &vnet_classify_main;
+ fib_route_path_flags_t path_flags = FIB_ROUTE_PATH_FLAG_NONE;
+ fib_route_path_t path = {
+ .frp_proto = (next_hop_proto_is_ip4 ?
+ FIB_PROTOCOL_IP4 : FIB_PROTOCOL_IP6),
+ .frp_addr = (NULL == next_hop ? zero_addr : *next_hop),
+ .frp_sw_if_index = next_hop_sw_if_index,
+ .frp_fib_index = next_hop_fib_index,
+ .frp_weight = next_hop_weight,
+ .frp_label_stack = next_hop_out_label_stack,
+ };
+ fib_route_path_t *paths = NULL;
+
+ if (MPLS_LABEL_INVALID != next_hop_via_label)
+ {
+ path.frp_proto = FIB_PROTOCOL_MPLS;
+ path.frp_local_label = next_hop_via_label;
+ }
+ if (is_resolve_host)
+ path_flags |= FIB_ROUTE_PATH_RESOLVE_VIA_HOST;
+ if (is_resolve_attached)
+ path_flags |= FIB_ROUTE_PATH_RESOLVE_VIA_ATTACHED;
+
+ path.frp_flags = path_flags;
+
+ if (is_multipath)
+ {
+ stats_dslock_with_hint (1 /* release hint */ , 10 /* tag */ );
+
+
+ vec_add1 (paths, path);
+
+ if (is_add)
+ fib_table_entry_path_add2 (fib_index,
+ prefix,
+ FIB_SOURCE_API,
+ FIB_ENTRY_FLAG_NONE, paths);
+ else
+ fib_table_entry_path_remove2 (fib_index,
+ prefix, FIB_SOURCE_API, paths);
+
+ vec_free (paths);
+ stats_dsunlock ();
+ return 0;
+ }
+
+ stats_dslock_with_hint (1 /* release hint */ , 2 /* tag */ );
+
+ if (is_drop || is_local || is_classify || is_unreach || is_prohibit)
+ {
+ /*
+ * special route types that link directly to the adj
+ */
+ if (is_add)
+ {
+ dpo_id_t dpo = DPO_INVALID;
+ dpo_proto_t dproto;
+
+ dproto = fib_proto_to_dpo (prefix->fp_proto);
+
+ if (is_drop)
+ ip_null_dpo_add_and_lock (dproto, IP_NULL_ACTION_NONE, &dpo);
+ else if (is_local)
+ receive_dpo_add_or_lock (dproto, ~0, NULL, &dpo);
+ else if (is_unreach)
+ ip_null_dpo_add_and_lock (dproto,
+ IP_NULL_ACTION_SEND_ICMP_UNREACH, &dpo);
+ else if (is_prohibit)
+ ip_null_dpo_add_and_lock (dproto,
+ IP_NULL_ACTION_SEND_ICMP_PROHIBIT,
+ &dpo);
+ else if (is_classify)
+ {
+ if (pool_is_free_index (cm->tables,
+ ntohl (classify_table_index)))
+ {
+ stats_dsunlock ();
+ return VNET_API_ERROR_NO_SUCH_TABLE;
+ }
+
+ dpo_set (&dpo, DPO_CLASSIFY, dproto,
+ classify_dpo_create (dproto,
+ ntohl (classify_table_index)));
+ }
+ else
+ {
+ stats_dsunlock ();
+ return VNET_API_ERROR_NO_SUCH_TABLE;
+ }
+
+ fib_table_entry_special_dpo_update (fib_index,
+ prefix,
+ FIB_SOURCE_API,
+ FIB_ENTRY_FLAG_EXCLUSIVE, &dpo);
+ dpo_reset (&dpo);
+ }
+ else
+ {
+ fib_table_entry_special_remove (fib_index, prefix, FIB_SOURCE_API);
+ }
+ }
+ else
+ {
+ if (is_add)
+ {
+ vec_add1 (paths, path);
+ fib_table_entry_update (fib_index,
+ prefix,
+ FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, paths);
+ vec_free (paths);
+ }
+ else
+ {
+ fib_table_entry_delete (fib_index, prefix, FIB_SOURCE_API);
+ }
+ }
+
+ stats_dsunlock ();
+ return (0);
+}
+
+int
+add_del_route_check (fib_protocol_t table_proto,
+ u32 table_id,
+ u32 next_hop_sw_if_index,
+ fib_protocol_t next_hop_table_proto,
+ u32 next_hop_table_id,
+ u8 create_missing_tables,
+ u32 * fib_index, u32 * next_hop_fib_index)
+{
+ vnet_main_t *vnm = vnet_get_main ();
+
+ *fib_index = fib_table_find (table_proto, ntohl (table_id));
+ if (~0 == *fib_index)
+ {
+ if (create_missing_tables)
+ {
+ *fib_index = fib_table_find_or_create_and_lock (table_proto,
+ ntohl (table_id));
+ }
+ else
+ {
+ /* No such VRF, and we weren't asked to create one */
+ return VNET_API_ERROR_NO_SUCH_FIB;
+ }
+ }
+
+ if (~0 != ntohl (next_hop_sw_if_index))
+ {
+ if (pool_is_free_index (vnm->interface_main.sw_interfaces,
+ ntohl (next_hop_sw_if_index)))
+ {
+ return VNET_API_ERROR_NO_MATCHING_INTERFACE;
+ }
+ }
+ else
+ {
+ *next_hop_fib_index = fib_table_find (next_hop_table_proto,
+ ntohl (next_hop_table_id));
+
+ if (~0 == *next_hop_fib_index)
+ {
+ if (create_missing_tables)
+ {
+ *next_hop_fib_index =
+ fib_table_find_or_create_and_lock (next_hop_table_proto,
+ ntohl (next_hop_table_id));
+ }
+ else
+ {
+ /* No such VRF, and we weren't asked to create one */
+ return VNET_API_ERROR_NO_SUCH_FIB;
+ }
+ }
+ }
+
+ return (0);
+}
+
+static int
+ip4_add_del_route_t_handler (vl_api_ip_add_del_route_t * mp)
+{
+ u32 fib_index, next_hop_fib_index;
+ mpls_label_t *label_stack = NULL;
+ int rv, ii, n_labels;;
+
+ rv = add_del_route_check (FIB_PROTOCOL_IP4,
+ mp->table_id,
+ mp->next_hop_sw_if_index,
+ FIB_PROTOCOL_IP4,
+ mp->next_hop_table_id,
+ mp->create_vrf_if_needed,
+ &fib_index, &next_hop_fib_index);
+
+ if (0 != rv)
+ return (rv);
+
+ fib_prefix_t pfx = {
+ .fp_len = mp->dst_address_length,
+ .fp_proto = FIB_PROTOCOL_IP4,
+ };
+ clib_memcpy (&pfx.fp_addr.ip4, mp->dst_address, sizeof (pfx.fp_addr.ip4));
+
+ ip46_address_t nh;
+ memset (&nh, 0, sizeof (nh));
+ memcpy (&nh.ip4, mp->next_hop_address, sizeof (nh.ip4));
+
+ n_labels = mp->next_hop_n_out_labels;
+ if (n_labels == 0)
+ ;
+ else if (1 == n_labels)
+ vec_add1 (label_stack, ntohl (mp->next_hop_out_label_stack[0]));
+ else
+ {
+ vec_validate (label_stack, n_labels - 1);
+ for (ii = 0; ii < n_labels; ii++)
+ label_stack[ii] = ntohl (mp->next_hop_out_label_stack[ii]);
+ }
+
+ return (add_del_route_t_handler (mp->is_multipath,
+ mp->is_add,
+ mp->is_drop,
+ mp->is_unreach,
+ mp->is_prohibit,
+ mp->is_local,
+ mp->is_classify,
+ mp->classify_table_index,
+ mp->is_resolve_host,
+ mp->is_resolve_attached,
+ fib_index, &pfx, 1,
+ &nh,
+ ntohl (mp->next_hop_sw_if_index),
+ next_hop_fib_index,
+ mp->next_hop_weight,
+ ntohl (mp->next_hop_via_label),
+ label_stack));
+}
+
+static int
+ip6_add_del_route_t_handler (vl_api_ip_add_del_route_t * mp)
+{
+ u32 fib_index, next_hop_fib_index;
+ mpls_label_t *label_stack = NULL;
+ int rv, ii, n_labels;;
+
+ rv = add_del_route_check (FIB_PROTOCOL_IP6,
+ mp->table_id,
+ mp->next_hop_sw_if_index,
+ FIB_PROTOCOL_IP6,
+ mp->next_hop_table_id,
+ mp->create_vrf_if_needed,
+ &fib_index, &next_hop_fib_index);
+
+ if (0 != rv)
+ return (rv);
+
+ fib_prefix_t pfx = {
+ .fp_len = mp->dst_address_length,
+ .fp_proto = FIB_PROTOCOL_IP6,
+ };
+ clib_memcpy (&pfx.fp_addr.ip6, mp->dst_address, sizeof (pfx.fp_addr.ip6));
+
+ ip46_address_t nh;
+ memset (&nh, 0, sizeof (nh));
+ memcpy (&nh.ip6, mp->next_hop_address, sizeof (nh.ip6));
+
+ n_labels = mp->next_hop_n_out_labels;
+ if (n_labels == 0)
+ ;
+ else if (1 == n_labels)
+ vec_add1 (label_stack, ntohl (mp->next_hop_out_label_stack[0]));
+ else
+ {
+ vec_validate (label_stack, n_labels - 1);
+ for (ii = 0; ii < n_labels; ii++)
+ label_stack[ii] = ntohl (mp->next_hop_out_label_stack[ii]);
+ }
+
+ return (add_del_route_t_handler (mp->is_multipath,
+ mp->is_add,
+ mp->is_drop,
+ mp->is_unreach,
+ mp->is_prohibit,
+ mp->is_local,
+ mp->is_classify,
+ mp->classify_table_index,
+ mp->is_resolve_host,
+ mp->is_resolve_attached,
+ fib_index, &pfx, 0,
+ &nh, ntohl (mp->next_hop_sw_if_index),
+ next_hop_fib_index,
+ mp->next_hop_weight,
+ ntohl (mp->next_hop_via_label),
+ label_stack));
+}
+
+void
+vl_api_ip_add_del_route_t_handler (vl_api_ip_add_del_route_t * mp)
+{
+ vl_api_ip_add_del_route_reply_t *rmp;
+ int rv;
+ vnet_main_t *vnm = vnet_get_main ();
+
+ vnm->api_errno = 0;
+
+ if (mp->is_ipv6)
+ rv = ip6_add_del_route_t_handler (mp);
+ else
+ rv = ip4_add_del_route_t_handler (mp);
+
+ rv = (rv == 0) ? vnm->api_errno : rv;
+
+ REPLY_MACRO (VL_API_IP_ADD_DEL_ROUTE_REPLY);
+}
+
+static void
+send_ip_details (vpe_api_main_t * am,
+ unix_shared_memory_queue_t * q, u32 sw_if_index, u32 context)
+{
+ vl_api_ip_details_t *mp;
+
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ memset (mp, 0, sizeof (*mp));
+ mp->_vl_msg_id = ntohs (VL_API_IP_DETAILS);
+
+ mp->sw_if_index = ntohl (sw_if_index);
+ mp->context = context;
+
+ vl_msg_api_send_shmem (q, (u8 *) & mp);
+}
+
+static void
+send_ip_address_details (vpe_api_main_t * am,
+ unix_shared_memory_queue_t * q,
+ u8 * ip, u16 prefix_length, u8 is_ipv6, u32 context)
+{
+ vl_api_ip_address_details_t *mp;
+
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ memset (mp, 0, sizeof (*mp));
+ mp->_vl_msg_id = ntohs (VL_API_IP_ADDRESS_DETAILS);
+
+ if (is_ipv6)
+ {
+ clib_memcpy (&mp->ip, ip, sizeof (mp->ip));
+ }
+ else
+ {
+ u32 *tp = (u32 *) mp->ip;
+ *tp = *(u32 *) ip;
+ }
+ mp->prefix_length = prefix_length;
+ mp->context = context;
+
+ vl_msg_api_send_shmem (q, (u8 *) & mp);
+}
+
+static void
+vl_api_ip_address_dump_t_handler (vl_api_ip_address_dump_t * mp)
+{
+ vpe_api_main_t *am = &vpe_api_main;
+ unix_shared_memory_queue_t *q;
+ ip6_address_t *r6;
+ ip4_address_t *r4;
+ ip6_main_t *im6 = &ip6_main;
+ ip4_main_t *im4 = &ip4_main;
+ ip_lookup_main_t *lm6 = &im6->lookup_main;
+ ip_lookup_main_t *lm4 = &im4->lookup_main;
+ ip_interface_address_t *ia = 0;
+ u32 sw_if_index = ~0;
+ int rv __attribute__ ((unused)) = 0;
+
+ VALIDATE_SW_IF_INDEX (mp);
+
+ sw_if_index = ntohl (mp->sw_if_index);
+
+ q = vl_api_client_index_to_input_queue (mp->client_index);
+ if (q == 0)
+ return;
+
+ if (mp->is_ipv6)
+ {
+ /* *INDENT-OFF* */
+ foreach_ip_interface_address (lm6, ia, sw_if_index,
+ 1 /* honor unnumbered */,
+ ({
+ r6 = ip_interface_address_get_address (lm6, ia);
+ u16 prefix_length = ia->address_length;
+ send_ip_address_details(am, q, (u8*)r6, prefix_length, 1, mp->context);
+ }));
+ /* *INDENT-ON* */
+ }
+ else
+ {
+ /* *INDENT-OFF* */
+ foreach_ip_interface_address (lm4, ia, sw_if_index,
+ 1 /* honor unnumbered */,
+ ({
+ r4 = ip_interface_address_get_address (lm4, ia);
+ u16 prefix_length = ia->address_length;
+ send_ip_address_details(am, q, (u8*)r4, prefix_length, 0, mp->context);
+ }));
+ /* *INDENT-ON* */
+ }
+ BAD_SW_IF_INDEX_LABEL;
+}
+
+static void
+vl_api_ip_dump_t_handler (vl_api_ip_dump_t * mp)
+{
+ vpe_api_main_t *am = &vpe_api_main;
+ vnet_main_t *vnm = vnet_get_main ();
+ vlib_main_t *vm = vlib_get_main ();
+ vnet_interface_main_t *im = &vnm->interface_main;
+ unix_shared_memory_queue_t *q;
+ vnet_sw_interface_t *si, *sorted_sis;
+ u32 sw_if_index = ~0;
+
+ q = vl_api_client_index_to_input_queue (mp->client_index);
+ if (q == 0)
+ {
+ return;
+ }
+
+ /* Gather interfaces. */
+ sorted_sis = vec_new (vnet_sw_interface_t, pool_elts (im->sw_interfaces));
+ _vec_len (sorted_sis) = 0;
+ /* *INDENT-OFF* */
+ pool_foreach (si, im->sw_interfaces,
+ ({
+ vec_add1 (sorted_sis, si[0]);
+ }));
+ /* *INDENT-ON* */
+
+ vec_foreach (si, sorted_sis)
+ {
+ if (!(si->flags & VNET_SW_INTERFACE_FLAG_UNNUMBERED))
+ {
+ if (mp->is_ipv6 && !ip6_interface_enabled (vm, si->sw_if_index))
+ {
+ continue;
+ }
+ sw_if_index = si->sw_if_index;
+ send_ip_details (am, q, sw_if_index, mp->context);
+ }
+ }
+}
+
+static void
+set_ip6_flow_hash (vl_api_set_ip_flow_hash_t * mp)
+{
+ vl_api_set_ip_flow_hash_reply_t *rmp;
+ int rv = VNET_API_ERROR_UNIMPLEMENTED;
+
+ clib_warning ("unimplemented...");
+
+ REPLY_MACRO (VL_API_SET_IP_FLOW_HASH_REPLY);
+}
+
+static void
+set_ip4_flow_hash (vl_api_set_ip_flow_hash_t * mp)
+{
+ vl_api_set_ip_flow_hash_reply_t *rmp;
+ int rv;
+ u32 table_id;
+ flow_hash_config_t flow_hash_config = 0;
+
+ table_id = ntohl (mp->vrf_id);
+
+#define _(a,b) if (mp->a) flow_hash_config |= b;
+ foreach_flow_hash_bit;
+#undef _
+
+ rv = vnet_set_ip4_flow_hash (table_id, flow_hash_config);
+
+ REPLY_MACRO (VL_API_SET_IP_FLOW_HASH_REPLY);
+}
+
+
+static void
+vl_api_set_ip_flow_hash_t_handler (vl_api_set_ip_flow_hash_t * mp)
+{
+ if (mp->is_ipv6 == 0)
+ set_ip4_flow_hash (mp);
+ else
+ set_ip6_flow_hash (mp);
+}
+
+static void
+ vl_api_sw_interface_ip6nd_ra_config_t_handler
+ (vl_api_sw_interface_ip6nd_ra_config_t * mp)
+{
+ vl_api_sw_interface_ip6nd_ra_config_reply_t *rmp;
+ vlib_main_t *vm = vlib_get_main ();
+ int rv = 0;
+ u8 is_no, suppress, managed, other, ll_option, send_unicast, cease,
+ default_router;
+
+ is_no = mp->is_no == 1;
+ suppress = mp->suppress == 1;
+ managed = mp->managed == 1;
+ other = mp->other == 1;
+ ll_option = mp->ll_option == 1;
+ send_unicast = mp->send_unicast == 1;
+ cease = mp->cease == 1;
+ default_router = mp->default_router == 1;
+
+ VALIDATE_SW_IF_INDEX (mp);
+
+ rv = ip6_neighbor_ra_config (vm, ntohl (mp->sw_if_index),
+ suppress, managed, other,
+ ll_option, send_unicast, cease,
+ default_router, ntohl (mp->lifetime),
+ ntohl (mp->initial_count),
+ ntohl (mp->initial_interval),
+ ntohl (mp->max_interval),
+ ntohl (mp->min_interval), is_no);
+
+ BAD_SW_IF_INDEX_LABEL;
+
+ REPLY_MACRO (VL_API_SW_INTERFACE_IP6ND_RA_CONFIG_REPLY);
+}
+
+static void
+ vl_api_sw_interface_ip6nd_ra_prefix_t_handler
+ (vl_api_sw_interface_ip6nd_ra_prefix_t * mp)
+{
+ vlib_main_t *vm = vlib_get_main ();
+ vl_api_sw_interface_ip6nd_ra_prefix_reply_t *rmp;
+ int rv = 0;
+ u8 is_no, use_default, no_advertise, off_link, no_autoconfig, no_onlink;
+
+ VALIDATE_SW_IF_INDEX (mp);
+
+ is_no = mp->is_no == 1;
+ use_default = mp->use_default == 1;
+ no_advertise = mp->no_advertise == 1;
+ off_link = mp->off_link == 1;
+ no_autoconfig = mp->no_autoconfig == 1;
+ no_onlink = mp->no_onlink == 1;
+
+ rv = ip6_neighbor_ra_prefix (vm, ntohl (mp->sw_if_index),
+ (ip6_address_t *) mp->address,
+ mp->address_length, use_default,
+ ntohl (mp->val_lifetime),
+ ntohl (mp->pref_lifetime), no_advertise,
+ off_link, no_autoconfig, no_onlink, is_no);
+
+ BAD_SW_IF_INDEX_LABEL;
+ REPLY_MACRO (VL_API_SW_INTERFACE_IP6ND_RA_PREFIX_REPLY);
+}
+
+static void
+ vl_api_sw_interface_ip6_enable_disable_t_handler
+ (vl_api_sw_interface_ip6_enable_disable_t * mp)
+{
+ vlib_main_t *vm = vlib_get_main ();
+ vl_api_sw_interface_ip6_enable_disable_reply_t *rmp;
+ vnet_main_t *vnm = vnet_get_main ();
+ int rv = 0;
+ clib_error_t *error;
+
+ vnm->api_errno = 0;
+
+ VALIDATE_SW_IF_INDEX (mp);
+
+ error =
+ (mp->enable == 1) ? enable_ip6_interface (vm,
+ ntohl (mp->sw_if_index)) :
+ disable_ip6_interface (vm, ntohl (mp->sw_if_index));
+
+ if (error)
+ {
+ clib_error_report (error);
+ rv = VNET_API_ERROR_UNSPECIFIED;
+ }
+ else
+ {
+ rv = vnm->api_errno;
+ }
+
+ BAD_SW_IF_INDEX_LABEL;
+
+ REPLY_MACRO (VL_API_SW_INTERFACE_IP6_ENABLE_DISABLE_REPLY);
+}
+
+static void
+ vl_api_sw_interface_ip6_set_link_local_address_t_handler
+ (vl_api_sw_interface_ip6_set_link_local_address_t * mp)
+{
+ vlib_main_t *vm = vlib_get_main ();
+ vl_api_sw_interface_ip6_set_link_local_address_reply_t *rmp;
+ int rv = 0;
+ clib_error_t *error;
+ vnet_main_t *vnm = vnet_get_main ();
+
+ vnm->api_errno = 0;
+
+ VALIDATE_SW_IF_INDEX (mp);
+
+ error = set_ip6_link_local_address (vm,
+ ntohl (mp->sw_if_index),
+ (ip6_address_t *) mp->address,
+ mp->address_length);
+ if (error)
+ {
+ clib_error_report (error);
+ rv = VNET_API_ERROR_UNSPECIFIED;
+ }
+ else
+ {
+ rv = vnm->api_errno;
+ }
+
+ BAD_SW_IF_INDEX_LABEL;
+
+ REPLY_MACRO (VL_API_SW_INTERFACE_IP6_SET_LINK_LOCAL_ADDRESS_REPLY);
+}
+
+
+#define vl_msg_name_crc_list
+#include <vnet/ip/ip.api.h>
+#undef vl_msg_name_crc_list
+
+static void
+setup_message_id_table (api_main_t * am)
+{
+#define _(id,n,crc) vl_msg_api_add_msg_name_crc (am, #n "_" #crc, id);
+ foreach_vl_msg_name_crc_ip;
+#undef _
+}
+
+static clib_error_t *
+ip_api_hookup (vlib_main_t * vm)
+{
+ api_main_t *am = &api_main;
+
+#define _(N,n) \
+ vl_msg_api_set_handlers(VL_API_##N, #n, \
+ vl_api_##n##_t_handler, \
+ vl_noop_handler, \
+ vl_api_##n##_t_endian, \
+ vl_api_##n##_t_print, \
+ sizeof(vl_api_##n##_t), 1);
+ foreach_ip_api_msg;
+#undef _
+
+ /*
+ * Set up the (msg_name, crc, message-id) table
+ */
+ setup_message_id_table (am);
+
+ return 0;
+}
+
+VLIB_API_INIT_FUNCTION (ip_api_hookup);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/ip/ip_checksum.c b/src/vnet/ip/ip_checksum.c
new file mode 100644
index 00000000000..6a9cf657a5c
--- /dev/null
+++ b/src/vnet/ip/ip_checksum.c
@@ -0,0 +1,228 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * ip4/ip_checksum.c: ip/tcp/udp checksums
+ *
+ * Copyright (c) 2008 Eliot Dresselhaus
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <vnet/ip/ip.h>
+
+ip_csum_t
+ip_incremental_checksum (ip_csum_t sum, void *_data, uword n_bytes)
+{
+ uword data = pointer_to_uword (_data);
+ ip_csum_t sum0, sum1;
+
+ sum0 = 0;
+ sum1 = sum;
+
+ /* Align data pointer to 64 bits. */
+#define _(t) \
+do { \
+ if (n_bytes >= sizeof (t) \
+ && sizeof (t) < sizeof (ip_csum_t) \
+ && (data % (2 * sizeof (t))) != 0) \
+ { \
+ sum0 += * uword_to_pointer (data, t *); \
+ data += sizeof (t); \
+ n_bytes -= sizeof (t); \
+ } \
+} while (0)
+
+ _(u8);
+ _(u16);
+ if (BITS (ip_csum_t) > 32)
+ _(u32);
+
+#undef _
+
+ {
+ ip_csum_t *d = uword_to_pointer (data, ip_csum_t *);
+
+ while (n_bytes >= 2 * sizeof (d[0]))
+ {
+ sum0 = ip_csum_with_carry (sum0, d[0]);
+ sum1 = ip_csum_with_carry (sum1, d[1]);
+ d += 2;
+ n_bytes -= 2 * sizeof (d[0]);
+ }
+
+ data = pointer_to_uword (d);
+ }
+
+#define _(t) \
+do { \
+ if (n_bytes >= sizeof (t) && sizeof (t) <= sizeof (ip_csum_t)) \
+ { \
+ sum0 = ip_csum_with_carry (sum0, * uword_to_pointer (data, t *)); \
+ data += sizeof (t); \
+ n_bytes -= sizeof (t); \
+ } \
+} while (0)
+
+ if (BITS (ip_csum_t) > 32)
+ _(u64);
+ _(u32);
+ _(u16);
+ _(u8);
+
+#undef _
+
+ /* Combine even and odd sums. */
+ sum0 = ip_csum_with_carry (sum0, sum1);
+
+ return sum0;
+}
+
+ip_csum_t
+ip_csum_and_memcpy (ip_csum_t sum, void *dst, void *src, uword n_bytes)
+{
+ uword n_left;
+ ip_csum_t sum0 = sum, sum1;
+ n_left = n_bytes;
+
+ if (n_left && (pointer_to_uword (dst) & sizeof (u8)))
+ {
+ u8 *d8, val;
+
+ d8 = dst;
+ val = ((u8 *) src)[0];
+ d8[0] = val;
+ dst += 1;
+ src += 1;
+ n_left -= 1;
+ sum0 =
+ ip_csum_with_carry (sum0, val << (8 * CLIB_ARCH_IS_LITTLE_ENDIAN));
+ }
+
+ while ((n_left >= sizeof (u16))
+ && (pointer_to_uword (dst) & (sizeof (sum) - sizeof (u16))))
+ {
+ u16 *d16, *s16;
+
+ d16 = dst;
+ s16 = src;
+
+ d16[0] = clib_mem_unaligned (&s16[0], u16);
+
+ sum0 = ip_csum_with_carry (sum0, d16[0]);
+ dst += sizeof (u16);
+ src += sizeof (u16);
+ n_left -= sizeof (u16);
+ }
+
+ sum1 = 0;
+ while (n_left >= 2 * sizeof (sum))
+ {
+ ip_csum_t dst0, dst1;
+ ip_csum_t *dst_even, *src_even;
+
+ dst_even = dst;
+ src_even = src;
+ dst0 = clib_mem_unaligned (&src_even[0], ip_csum_t);
+ dst1 = clib_mem_unaligned (&src_even[1], ip_csum_t);
+
+ dst_even[0] = dst0;
+ dst_even[1] = dst1;
+
+ dst += 2 * sizeof (dst_even[0]);
+ src += 2 * sizeof (dst_even[0]);
+ n_left -= 2 * sizeof (dst_even[0]);
+
+ sum0 = ip_csum_with_carry (sum0, dst0);
+ sum1 = ip_csum_with_carry (sum1, dst1);
+ }
+
+ sum0 = ip_csum_with_carry (sum0, sum1);
+ while (n_left >= 1 * sizeof (sum))
+ {
+ ip_csum_t dst0, *dst_even, *src_even;
+
+ dst_even = dst;
+ src_even = src;
+
+ dst0 = clib_mem_unaligned (&src_even[0], ip_csum_t);
+
+ dst_even[0] = dst0;
+
+ dst += 1 * sizeof (sum);
+ src += 1 * sizeof (sum);
+ n_left -= 1 * sizeof (sum);
+
+ sum0 = ip_csum_with_carry (sum0, dst0);
+ }
+
+ while (n_left >= sizeof (u16))
+ {
+ u16 dst0, *dst_short, *src_short;
+
+ dst_short = dst;
+ src_short = src;
+
+ dst0 = clib_mem_unaligned (&src_short[0], u16);
+
+ dst_short[0] = dst0;
+
+ sum0 = ip_csum_with_carry (sum0, dst_short[0]);
+ dst += 1 * sizeof (dst0);
+ src += 1 * sizeof (dst0);
+ n_left -= 1 * sizeof (dst0);
+
+ }
+
+ if (n_left == 1)
+ {
+ u8 *d8, *s8, val;
+
+ d8 = dst;
+ s8 = src;
+
+ d8[0] = val = s8[0];
+ d8 += 1;
+ s8 += 1;
+ n_left -= 1;
+ sum0 = ip_csum_with_carry (sum0, val << (8 * CLIB_ARCH_IS_BIG_ENDIAN));
+ }
+
+ return sum0;
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/ip/ip_frag.c b/src/vnet/ip/ip_frag.c
new file mode 100644
index 00000000000..ca062bfd5e8
--- /dev/null
+++ b/src/vnet/ip/ip_frag.c
@@ -0,0 +1,581 @@
+/*---------------------------------------------------------------------------
+ * Copyright (c) 2009-2014 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *---------------------------------------------------------------------------
+ */
+/*
+ * IPv4 Fragmentation Node
+ *
+ *
+ */
+
+#include "ip_frag.h"
+
+#include <vnet/ip/ip.h>
+
+
+typedef struct
+{
+ u8 ipv6;
+ u16 header_offset;
+ u16 mtu;
+ u8 next;
+ u16 n_fragments;
+} ip_frag_trace_t;
+
+static u8 *
+format_ip_frag_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ ip_frag_trace_t *t = va_arg (*args, ip_frag_trace_t *);
+ s = format (s, "IPv%s offset: %u mtu: %u fragments: %u",
+ t->ipv6 ? "6" : "4", t->header_offset, t->mtu, t->n_fragments);
+ return s;
+}
+
+static u32 running_fragment_id;
+
+static void
+ip4_frag_do_fragment (vlib_main_t * vm, u32 pi, u32 ** buffer,
+ ip_frag_error_t * error)
+{
+ vlib_buffer_t *p;
+ ip4_header_t *ip4;
+ u16 mtu, ptr, len, max, rem, offset, ip_frag_id, ip_frag_offset;
+ u8 *packet, more;
+
+ vec_add1 (*buffer, pi);
+ p = vlib_get_buffer (vm, pi);
+ offset = vnet_buffer (p)->ip_frag.header_offset;
+ mtu = vnet_buffer (p)->ip_frag.mtu;
+ packet = (u8 *) vlib_buffer_get_current (p);
+ ip4 = (ip4_header_t *) (packet + offset);
+
+ rem = clib_net_to_host_u16 (ip4->length) - sizeof (*ip4);
+ ptr = 0;
+ max = (mtu - sizeof (*ip4) - vnet_buffer (p)->ip_frag.header_offset) & ~0x7;
+
+ if (rem < (p->current_length - offset - sizeof (*ip4)))
+ {
+ *error = IP_FRAG_ERROR_MALFORMED;
+ return;
+ }
+
+ if (mtu < sizeof (*ip4))
+ {
+ *error = IP_FRAG_ERROR_CANT_FRAGMENT_HEADER;
+ return;
+ }
+
+ if (ip4->flags_and_fragment_offset &
+ clib_host_to_net_u16 (IP4_HEADER_FLAG_DONT_FRAGMENT))
+ {
+ *error = IP_FRAG_ERROR_DONT_FRAGMENT_SET;
+ return;
+ }
+
+ if (ip4_is_fragment (ip4))
+ {
+ ip_frag_id = ip4->fragment_id;
+ ip_frag_offset = ip4_get_fragment_offset (ip4);
+ more =
+ ! !(ip4->flags_and_fragment_offset &
+ clib_host_to_net_u16 (IP4_HEADER_FLAG_MORE_FRAGMENTS));
+ }
+ else
+ {
+ ip_frag_id = (++running_fragment_id);
+ ip_frag_offset = 0;
+ more = 0;
+ }
+
+ //Do the actual fragmentation
+ while (rem)
+ {
+ u32 bi;
+ vlib_buffer_t *b;
+ ip4_header_t *fip4;
+
+ len =
+ (rem >
+ (mtu - sizeof (*ip4) -
+ vnet_buffer (p)->ip_frag.header_offset)) ? max : rem;
+
+ if (ptr == 0)
+ {
+ bi = pi;
+ b = p;
+ fip4 = (ip4_header_t *) (vlib_buffer_get_current (b) + offset);
+ }
+ else
+ {
+ if (!vlib_buffer_alloc (vm, &bi, 1))
+ {
+ *error = IP_FRAG_ERROR_MEMORY;
+ return;
+ }
+ vec_add1 (*buffer, bi);
+ b = vlib_get_buffer (vm, bi);
+ vnet_buffer (b)->sw_if_index[VLIB_RX] =
+ vnet_buffer (p)->sw_if_index[VLIB_RX];
+ vnet_buffer (b)->sw_if_index[VLIB_TX] =
+ vnet_buffer (p)->sw_if_index[VLIB_TX];
+ fip4 = (ip4_header_t *) (vlib_buffer_get_current (b) + offset);
+
+ //Copy offset and ip4 header
+ clib_memcpy (b->data, packet, offset + sizeof (*ip4));
+ //Copy data
+ clib_memcpy (((u8 *) (fip4)) + sizeof (*fip4),
+ packet + offset + sizeof (*fip4) + ptr, len);
+ }
+ b->current_length = offset + len + sizeof (*fip4);
+
+ fip4->fragment_id = ip_frag_id;
+ fip4->flags_and_fragment_offset =
+ clib_host_to_net_u16 ((ptr >> 3) + ip_frag_offset);
+ fip4->flags_and_fragment_offset |=
+ clib_host_to_net_u16 (((len != rem) || more) << 13);
+ // ((len0 != rem0) || more0) << 13 is optimization for
+ // ((len0 != rem0) || more0) ? IP4_HEADER_FLAG_MORE_FRAGMENTS : 0
+ fip4->length = clib_host_to_net_u16 (len + sizeof (*fip4));
+ fip4->checksum = ip4_header_checksum (fip4);
+
+ if (vnet_buffer (p)->ip_frag.flags & IP_FRAG_FLAG_IP4_HEADER)
+ {
+ //Encapsulating ipv4 header
+ ip4_header_t *encap_header4 =
+ (ip4_header_t *) vlib_buffer_get_current (b);
+ encap_header4->length = clib_host_to_net_u16 (b->current_length);
+ encap_header4->checksum = ip4_header_checksum (encap_header4);
+ }
+ else if (vnet_buffer (p)->ip_frag.flags & IP_FRAG_FLAG_IP6_HEADER)
+ {
+ //Encapsulating ipv6 header
+ ip6_header_t *encap_header6 =
+ (ip6_header_t *) vlib_buffer_get_current (b);
+ encap_header6->payload_length =
+ clib_host_to_net_u16 (b->current_length -
+ sizeof (*encap_header6));
+ }
+
+ rem -= len;
+ ptr += len;
+ }
+}
+
+void
+ip_frag_set_vnet_buffer (vlib_buffer_t * b, u16 offset, u16 mtu,
+ u8 next_index, u8 flags)
+{
+ vnet_buffer (b)->ip_frag.header_offset = offset;
+ vnet_buffer (b)->ip_frag.mtu = mtu;
+ vnet_buffer (b)->ip_frag.next_index = next_index;
+ vnet_buffer (b)->ip_frag.flags = flags;
+}
+
+static uword
+ip4_frag (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
+ vlib_node_runtime_t *error_node =
+ vlib_node_get_runtime (vm, ip4_frag_node.index);
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+ u32 frag_sent = 0, small_packets = 0;
+ u32 *buffer = 0;
+
+ while (n_left_from > 0)
+ {
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 pi0, *frag_from, frag_left;
+ vlib_buffer_t *p0;
+ ip_frag_error_t error0;
+ ip4_frag_next_t next0;
+
+ //Note: The packet is not enqueued now.
+ //It is instead put in a vector where other fragments
+ //will be put as well.
+ pi0 = from[0];
+ from += 1;
+ n_left_from -= 1;
+ error0 = IP_FRAG_ERROR_NONE;
+
+ p0 = vlib_get_buffer (vm, pi0);
+ ip4_frag_do_fragment (vm, pi0, &buffer, &error0);
+
+ if (PREDICT_FALSE (p0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ ip_frag_trace_t *tr =
+ vlib_add_trace (vm, node, p0, sizeof (*tr));
+ tr->header_offset = vnet_buffer (p0)->ip_frag.header_offset;
+ tr->mtu = vnet_buffer (p0)->ip_frag.mtu;
+ tr->ipv6 = 0;
+ tr->n_fragments = vec_len (buffer);
+ tr->next = vnet_buffer (p0)->ip_frag.next_index;
+ }
+
+ if (error0 == IP_FRAG_ERROR_DONT_FRAGMENT_SET)
+ {
+ icmp4_error_set_vnet_buffer (p0, ICMP4_destination_unreachable,
+ ICMP4_destination_unreachable_fragmentation_needed_and_dont_fragment_set,
+ vnet_buffer (p0)->ip_frag.mtu);
+ vlib_buffer_advance (p0,
+ vnet_buffer (p0)->ip_frag.header_offset);
+ next0 = IP4_FRAG_NEXT_ICMP_ERROR;
+ }
+ else
+ next0 =
+ (error0 ==
+ IP_FRAG_ERROR_NONE) ? vnet_buffer (p0)->
+ ip_frag.next_index : IP4_FRAG_NEXT_DROP;
+
+ if (error0 == IP_FRAG_ERROR_NONE)
+ {
+ frag_sent += vec_len (buffer);
+ small_packets += (vec_len (buffer) == 1);
+ }
+ else
+ vlib_error_count (vm, ip4_frag_node.index, error0, 1);
+
+ //Send fragments that were added in the frame
+ frag_from = buffer;
+ frag_left = vec_len (buffer);
+
+ while (frag_left > 0)
+ {
+ while (frag_left > 0 && n_left_to_next > 0)
+ {
+ u32 i;
+ i = to_next[0] = frag_from[0];
+ frag_from += 1;
+ frag_left -= 1;
+ to_next += 1;
+ n_left_to_next -= 1;
+
+ vlib_get_buffer (vm, i)->error = error_node->errors[error0];
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next, i,
+ next0);
+ }
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ vlib_get_next_frame (vm, node, next_index, to_next,
+ n_left_to_next);
+ }
+ vec_reset_length (buffer);
+ }
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+ vec_free (buffer);
+
+ vlib_node_increment_counter (vm, ip4_frag_node.index,
+ IP_FRAG_ERROR_FRAGMENT_SENT, frag_sent);
+ vlib_node_increment_counter (vm, ip4_frag_node.index,
+ IP_FRAG_ERROR_SMALL_PACKET, small_packets);
+
+ return frame->n_vectors;
+}
+
+
+static void
+ip6_frag_do_fragment (vlib_main_t * vm, u32 pi, u32 ** buffer,
+ ip_frag_error_t * error)
+{
+ vlib_buffer_t *p;
+ ip6_header_t *ip6_hdr;
+ ip6_frag_hdr_t *frag_hdr;
+ u8 *payload, *next_header;
+
+ p = vlib_get_buffer (vm, pi);
+
+ //Parsing the IPv6 headers
+ ip6_hdr =
+ vlib_buffer_get_current (p) + vnet_buffer (p)->ip_frag.header_offset;
+ payload = (u8 *) (ip6_hdr + 1);
+ next_header = &ip6_hdr->protocol;
+ if (*next_header == IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS)
+ {
+ next_header = payload;
+ payload += payload[1] * 8;
+ }
+
+ if (*next_header == IP_PROTOCOL_IP6_DESTINATION_OPTIONS)
+ {
+ next_header = payload;
+ payload += payload[1] * 8;
+ }
+
+ if (*next_header == IP_PROTOCOL_IPV6_ROUTE)
+ {
+ next_header = payload;
+ payload += payload[1] * 8;
+ }
+
+ if (PREDICT_FALSE
+ (payload >= (u8 *) vlib_buffer_get_current (p) + p->current_length))
+ {
+ //A malicious packet could set an extension header with a too big size
+ //and make us modify another vlib_buffer
+ *error = IP_FRAG_ERROR_MALFORMED;
+ return;
+ }
+
+ u8 has_more;
+ u16 initial_offset;
+ if (*next_header == IP_PROTOCOL_IPV6_FRAGMENTATION)
+ {
+ //The fragmentation header is already there
+ frag_hdr = (ip6_frag_hdr_t *) payload;
+ has_more = ip6_frag_hdr_more (frag_hdr);
+ initial_offset = ip6_frag_hdr_offset (frag_hdr);
+ }
+ else
+ {
+ //Insert a fragmentation header in the packet
+ u8 nh = *next_header;
+ *next_header = IP_PROTOCOL_IPV6_FRAGMENTATION;
+ vlib_buffer_advance (p, -sizeof (*frag_hdr));
+ u8 *start = vlib_buffer_get_current (p);
+ memmove (start, start + sizeof (*frag_hdr),
+ payload - (start + sizeof (*frag_hdr)));
+ frag_hdr = (ip6_frag_hdr_t *) (payload - sizeof (*frag_hdr));
+ frag_hdr->identification = ++running_fragment_id;
+ frag_hdr->next_hdr = nh;
+ frag_hdr->rsv = 0;
+ has_more = 0;
+ initial_offset = 0;
+ }
+ payload = (u8 *) (frag_hdr + 1);
+
+ u16 headers_len = payload - (u8 *) vlib_buffer_get_current (p);
+ u16 max_payload = vnet_buffer (p)->ip_frag.mtu - headers_len;
+ u16 rem = p->current_length - headers_len;
+ u16 ptr = 0;
+
+ if (max_payload < 8)
+ {
+ *error = IP_FRAG_ERROR_CANT_FRAGMENT_HEADER;
+ return;
+ }
+
+ while (rem)
+ {
+ u32 bi;
+ vlib_buffer_t *b;
+ u16 len = (rem > max_payload) ? (max_payload & ~0x7) : rem;
+ rem -= len;
+
+ if (ptr != 0)
+ {
+ if (!vlib_buffer_alloc (vm, &bi, 1))
+ {
+ *error = IP_FRAG_ERROR_MEMORY;
+ return;
+ }
+ b = vlib_get_buffer (vm, bi);
+ vnet_buffer (b)->sw_if_index[VLIB_RX] =
+ vnet_buffer (p)->sw_if_index[VLIB_RX];
+ vnet_buffer (b)->sw_if_index[VLIB_TX] =
+ vnet_buffer (p)->sw_if_index[VLIB_TX];
+ clib_memcpy (vlib_buffer_get_current (b),
+ vlib_buffer_get_current (p), headers_len);
+ clib_memcpy (vlib_buffer_get_current (b) + headers_len,
+ payload + ptr, len);
+ frag_hdr =
+ vlib_buffer_get_current (b) + headers_len - sizeof (*frag_hdr);
+ }
+ else
+ {
+ bi = pi;
+ b = vlib_get_buffer (vm, bi);
+ //frag_hdr already set here
+ }
+
+ ip6_hdr =
+ vlib_buffer_get_current (b) + vnet_buffer (p)->ip_frag.header_offset;
+ frag_hdr->fragment_offset_and_more =
+ ip6_frag_hdr_offset_and_more (initial_offset + (ptr >> 3),
+ (rem || has_more));
+ b->current_length = headers_len + len;
+ ip6_hdr->payload_length =
+ clib_host_to_net_u16 (b->current_length -
+ vnet_buffer (p)->ip_frag.header_offset -
+ sizeof (*ip6_hdr));
+
+ if (vnet_buffer (p)->ip_frag.flags & IP_FRAG_FLAG_IP4_HEADER)
+ {
+ //Encapsulating ipv4 header
+ ip4_header_t *encap_header4 =
+ (ip4_header_t *) vlib_buffer_get_current (b);
+ encap_header4->length = clib_host_to_net_u16 (b->current_length);
+ encap_header4->checksum = ip4_header_checksum (encap_header4);
+ }
+ else if (vnet_buffer (p)->ip_frag.flags & IP_FRAG_FLAG_IP6_HEADER)
+ {
+ //Encapsulating ipv6 header
+ ip6_header_t *encap_header6 =
+ (ip6_header_t *) vlib_buffer_get_current (b);
+ encap_header6->payload_length =
+ clib_host_to_net_u16 (b->current_length -
+ sizeof (*encap_header6));
+ }
+
+ vec_add1 (*buffer, bi);
+
+ ptr += len;
+ }
+}
+
+static uword
+ip6_frag (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
+ vlib_node_runtime_t *error_node =
+ vlib_node_get_runtime (vm, ip6_frag_node.index);
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+ u32 frag_sent = 0, small_packets = 0;
+ u32 *buffer = 0;
+
+ while (n_left_from > 0)
+ {
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 pi0, *frag_from, frag_left;
+ vlib_buffer_t *p0;
+ ip_frag_error_t error0;
+ ip6_frag_next_t next0;
+
+ pi0 = from[0];
+ from += 1;
+ n_left_from -= 1;
+ error0 = IP_FRAG_ERROR_NONE;
+
+ p0 = vlib_get_buffer (vm, pi0);
+ ip6_frag_do_fragment (vm, pi0, &buffer, &error0);
+
+ if (PREDICT_FALSE (p0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ ip_frag_trace_t *tr =
+ vlib_add_trace (vm, node, p0, sizeof (*tr));
+ tr->header_offset = vnet_buffer (p0)->ip_frag.header_offset;
+ tr->mtu = vnet_buffer (p0)->ip_frag.mtu;
+ tr->ipv6 = 1;
+ tr->n_fragments = vec_len (buffer);
+ tr->next = vnet_buffer (p0)->ip_frag.next_index;
+ }
+
+ next0 =
+ (error0 ==
+ IP_FRAG_ERROR_NONE) ? vnet_buffer (p0)->
+ ip_frag.next_index : IP6_FRAG_NEXT_DROP;
+ frag_sent += vec_len (buffer);
+ small_packets += (vec_len (buffer) == 1);
+
+ //Send fragments that were added in the frame
+ frag_from = buffer;
+ frag_left = vec_len (buffer);
+ while (frag_left > 0)
+ {
+ while (frag_left > 0 && n_left_to_next > 0)
+ {
+ u32 i;
+ i = to_next[0] = frag_from[0];
+ frag_from += 1;
+ frag_left -= 1;
+ to_next += 1;
+ n_left_to_next -= 1;
+
+ vlib_get_buffer (vm, i)->error = error_node->errors[error0];
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next, i,
+ next0);
+ }
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ vlib_get_next_frame (vm, node, next_index, to_next,
+ n_left_to_next);
+ }
+ vec_reset_length (buffer);
+ }
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+ vec_free (buffer);
+ vlib_node_increment_counter (vm, ip6_frag_node.index,
+ IP_FRAG_ERROR_FRAGMENT_SENT, frag_sent);
+ vlib_node_increment_counter (vm, ip6_frag_node.index,
+ IP_FRAG_ERROR_SMALL_PACKET, small_packets);
+
+ return frame->n_vectors;
+}
+
+static char *ip4_frag_error_strings[] = {
+#define _(sym,string) string,
+ foreach_ip_frag_error
+#undef _
+};
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (ip4_frag_node) = {
+ .function = ip4_frag,
+ .name = IP4_FRAG_NODE_NAME,
+ .vector_size = sizeof (u32),
+ .format_trace = format_ip_frag_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_errors = IP_FRAG_N_ERROR,
+ .error_strings = ip4_frag_error_strings,
+
+ .n_next_nodes = IP4_FRAG_N_NEXT,
+ .next_nodes = {
+ [IP4_FRAG_NEXT_IP4_LOOKUP] = "ip4-lookup",
+ [IP4_FRAG_NEXT_IP6_LOOKUP] = "ip6-lookup",
+ [IP4_FRAG_NEXT_ICMP_ERROR] = "ip4-icmp-error",
+ [IP4_FRAG_NEXT_DROP] = "error-drop"
+ },
+};
+/* *INDENT-ON* */
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (ip6_frag_node) = {
+ .function = ip6_frag,
+ .name = IP6_FRAG_NODE_NAME,
+ .vector_size = sizeof (u32),
+ .format_trace = format_ip_frag_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_errors = IP_FRAG_N_ERROR,
+ .error_strings = ip4_frag_error_strings,
+
+ .n_next_nodes = IP6_FRAG_N_NEXT,
+ .next_nodes = {
+ [IP6_FRAG_NEXT_IP4_LOOKUP] = "ip4-lookup",
+ [IP6_FRAG_NEXT_IP6_LOOKUP] = "ip6-lookup",
+ [IP6_FRAG_NEXT_DROP] = "error-drop"
+ },
+};
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/ip/ip_frag.h b/src/vnet/ip/ip_frag.h
new file mode 100644
index 00000000000..348f5a2fbc6
--- /dev/null
+++ b/src/vnet/ip/ip_frag.h
@@ -0,0 +1,96 @@
+/*---------------------------------------------------------------------------
+ * Copyright (c) 2009-2014 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *---------------------------------------------------------------------------
+ */
+/*
+ * IPv4 and IPv6 Fragmentation Nodes
+ *
+ * A packet sent to those nodes require the following
+ * buffer attributes to be set:
+ * ip_frag.header_offset :
+ * Where to find the IPv4 (or IPv6) header in the packet. Previous
+ * bytes are left untouched and copied in every fragment. The fragments
+ * are then appended. This option is used for fragmented packets
+ * that are encapsulated.
+ * ip_frag.mtu :
+ * Maximum size of IP packets, header included, but ignoring
+ * the 'ip_frag.header_offset' copied bytes.
+ * ip_frag.next_index :
+ * One of ip_frag_next_t, indicating to which exit node the fragments
+ * should be sent to.
+ *
+ */
+
+#ifndef IP_FRAG_H
+#define IP_FRAG_H
+
+#include <vnet/vnet.h>
+
+#define IP_FRAG_FLAG_IP4_HEADER 0x01 //Encapsulating IPv4 header
+#define IP_FRAG_FLAG_IP6_HEADER 0x02 //Encapsulating IPv6 header
+
+#define IP4_FRAG_NODE_NAME "ip4-frag"
+#define IP6_FRAG_NODE_NAME "ip6-frag"
+
+extern vlib_node_registration_t ip4_frag_node;
+extern vlib_node_registration_t ip6_frag_node;
+
+typedef enum
+{
+ IP4_FRAG_NEXT_IP4_LOOKUP,
+ IP4_FRAG_NEXT_IP6_LOOKUP,
+ IP4_FRAG_NEXT_ICMP_ERROR,
+ IP4_FRAG_NEXT_DROP,
+ IP4_FRAG_N_NEXT
+} ip4_frag_next_t;
+
+typedef enum
+{
+ IP6_FRAG_NEXT_IP4_LOOKUP,
+ IP6_FRAG_NEXT_IP6_LOOKUP,
+ IP6_FRAG_NEXT_DROP,
+ IP6_FRAG_N_NEXT
+} ip6_frag_next_t;
+
+#define foreach_ip_frag_error \
+ /* Must be first. */ \
+ _(NONE, "packet fragmented") \
+ _(SMALL_PACKET, "packet smaller than MTU") \
+ _(FRAGMENT_SENT, "number of sent fragments") \
+ _(CANT_FRAGMENT_HEADER, "can't fragment header") \
+ _(DONT_FRAGMENT_SET, "can't fragment this packet") \
+ _(MALFORMED, "malformed packet") \
+ _(MEMORY, "could not allocate buffer") \
+ _(UNKNOWN, "unknown error")
+
+typedef enum
+{
+#define _(sym,str) IP_FRAG_ERROR_##sym,
+ foreach_ip_frag_error
+#undef _
+ IP_FRAG_N_ERROR,
+} ip_frag_error_t;
+
+void ip_frag_set_vnet_buffer (vlib_buffer_t * b, u16 offset, u16 mtu,
+ u8 next_index, u8 flags);
+
+#endif /* ifndef IP_FRAG_H */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/ip/ip_init.c b/src/vnet/ip/ip_init.c
new file mode 100644
index 00000000000..f7635b35d0e
--- /dev/null
+++ b/src/vnet/ip/ip_init.c
@@ -0,0 +1,152 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * ip/ip_init.c: ip generic initialization
+ *
+ * Copyright (c) 2008 Eliot Dresselhaus
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <vnet/ip/ip.h>
+
+ip_main_t ip_main;
+
+clib_error_t *
+ip_main_init (vlib_main_t * vm)
+{
+ ip_main_t *im = &ip_main;
+ clib_error_t *error = 0;
+
+ memset (im, 0, sizeof (im[0]));
+
+ {
+ ip_protocol_info_t *pi;
+ u32 i;
+
+#define ip_protocol(n,s) \
+do { \
+ vec_add2 (im->protocol_infos, pi, 1); \
+ pi->protocol = n; \
+ pi->name = (u8 *) #s; \
+} while (0);
+
+#include "protocols.def"
+
+#undef ip_protocol
+
+ im->protocol_info_by_name = hash_create_string (0, sizeof (uword));
+ for (i = 0; i < vec_len (im->protocol_infos); i++)
+ {
+ pi = im->protocol_infos + i;
+
+ hash_set_mem (im->protocol_info_by_name, pi->name, i);
+ hash_set (im->protocol_info_by_protocol, pi->protocol, i);
+ }
+ }
+
+ {
+ tcp_udp_port_info_t *pi;
+ u32 i;
+ static char *port_names[] = {
+#define ip_port(s,n) #s,
+#include "ports.def"
+#undef ip_port
+ };
+ static u16 ports[] = {
+#define ip_port(s,n) n,
+#include "ports.def"
+#undef ip_port
+ };
+
+ vec_resize (im->port_infos, ARRAY_LEN (port_names));
+ im->port_info_by_name = hash_create_string (0, sizeof (uword));
+
+ for (i = 0; i < vec_len (im->port_infos); i++)
+ {
+ pi = im->port_infos + i;
+ pi->port = clib_host_to_net_u16 (ports[i]);
+ pi->name = (u8 *) port_names[i];
+ hash_set_mem (im->port_info_by_name, pi->name, i);
+ hash_set (im->port_info_by_port, pi->port, i);
+ }
+ }
+
+ if ((error = vlib_call_init_function (vm, vnet_main_init)))
+ return error;
+
+ if ((error = vlib_call_init_function (vm, ip4_init)))
+ return error;
+
+ if ((error = vlib_call_init_function (vm, ip6_init)))
+ return error;
+
+ if ((error = vlib_call_init_function (vm, icmp4_init)))
+ return error;
+
+ if ((error = vlib_call_init_function (vm, icmp6_init)))
+ return error;
+
+ if ((error = vlib_call_init_function (vm, ip6_hop_by_hop_init)))
+ return error;
+
+ if ((error = vlib_call_init_function (vm, udp_local_init)))
+ return error;
+
+ if ((error = vlib_call_init_function (vm, udp_init)))
+ return error;
+
+ if ((error = vlib_call_init_function (vm, ip_classify_init)))
+ return error;
+
+ if ((error = vlib_call_init_function (vm, input_acl_init)))
+ return error;
+
+ if ((error = vlib_call_init_function (vm, policer_classify_init)))
+ return error;
+
+ if ((error = vlib_call_init_function (vm, flow_classify_init)))
+ return error;
+
+ return error;
+}
+
+VLIB_INIT_FUNCTION (ip_main_init);
+
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/ip/ip_input_acl.c b/src/vnet/ip/ip_input_acl.c
new file mode 100644
index 00000000000..b0b52ab11c3
--- /dev/null
+++ b/src/vnet/ip/ip_input_acl.c
@@ -0,0 +1,450 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <vnet/ip/ip.h>
+#include <vnet/classify/vnet_classify.h>
+#include <vnet/classify/input_acl.h>
+
+typedef struct
+{
+ u32 sw_if_index;
+ u32 next_index;
+ u32 table_index;
+ u32 offset;
+} ip_inacl_trace_t;
+
+/* packet trace format function */
+static u8 *
+format_ip_inacl_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ ip_inacl_trace_t *t = va_arg (*args, ip_inacl_trace_t *);
+
+ s = format (s, "INACL: sw_if_index %d, next_index %d, table %d, offset %d",
+ t->sw_if_index, t->next_index, t->table_index, t->offset);
+ return s;
+}
+
+vlib_node_registration_t ip4_inacl_node;
+vlib_node_registration_t ip6_inacl_node;
+
+#define foreach_ip_inacl_error \
+_(MISS, "input ACL misses") \
+_(HIT, "input ACL hits") \
+_(CHAIN_HIT, "input ACL hits after chain walk")
+
+typedef enum
+{
+#define _(sym,str) IP_INACL_ERROR_##sym,
+ foreach_ip_inacl_error
+#undef _
+ IP_INACL_N_ERROR,
+} ip_inacl_error_t;
+
+static char *ip_inacl_error_strings[] = {
+#define _(sym,string) string,
+ foreach_ip_inacl_error
+#undef _
+};
+
+static inline uword
+ip_inacl_inline (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame, int is_ip4)
+{
+ u32 n_left_from, *from, *to_next;
+ acl_next_index_t next_index;
+ input_acl_main_t *am = &input_acl_main;
+ vnet_classify_main_t *vcm = am->vnet_classify_main;
+ f64 now = vlib_time_now (vm);
+ u32 hits = 0;
+ u32 misses = 0;
+ u32 chain_hits = 0;
+ input_acl_table_id_t tid;
+ vlib_node_runtime_t *error_node;
+ u32 n_next_nodes;
+
+ n_next_nodes = node->n_next_nodes;
+
+ if (is_ip4)
+ {
+ tid = INPUT_ACL_TABLE_IP4;
+ error_node = vlib_node_get_runtime (vm, ip4_input_node.index);
+ }
+ else
+ {
+ tid = INPUT_ACL_TABLE_IP6;
+ error_node = vlib_node_get_runtime (vm, ip6_input_node.index);
+ }
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+
+ /* First pass: compute hashes */
+
+ while (n_left_from > 2)
+ {
+ vlib_buffer_t *b0, *b1;
+ u32 bi0, bi1;
+ u8 *h0, *h1;
+ u32 sw_if_index0, sw_if_index1;
+ u32 table_index0, table_index1;
+ vnet_classify_table_t *t0, *t1;
+
+ /* prefetch next iteration */
+ {
+ vlib_buffer_t *p1, *p2;
+
+ p1 = vlib_get_buffer (vm, from[1]);
+ p2 = vlib_get_buffer (vm, from[2]);
+
+ vlib_prefetch_buffer_header (p1, STORE);
+ CLIB_PREFETCH (p1->data, CLIB_CACHE_LINE_BYTES, STORE);
+ vlib_prefetch_buffer_header (p2, STORE);
+ CLIB_PREFETCH (p2->data, CLIB_CACHE_LINE_BYTES, STORE);
+ }
+
+ bi0 = from[0];
+ b0 = vlib_get_buffer (vm, bi0);
+
+ bi1 = from[1];
+ b1 = vlib_get_buffer (vm, bi1);
+
+ sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+ table_index0 =
+ am->classify_table_index_by_sw_if_index[tid][sw_if_index0];
+
+ sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_RX];
+ table_index1 =
+ am->classify_table_index_by_sw_if_index[tid][sw_if_index1];
+
+ t0 = pool_elt_at_index (vcm->tables, table_index0);
+
+ t1 = pool_elt_at_index (vcm->tables, table_index1);
+
+ if (t0->current_data_flag == CLASSIFY_FLAG_USE_CURR_DATA)
+ h0 = (void *) vlib_buffer_get_current (b0) + t0->current_data_offset;
+ else
+ h0 = b0->data;
+
+ vnet_buffer (b0)->l2_classify.hash =
+ vnet_classify_hash_packet (t0, (u8 *) h0);
+
+ vnet_classify_prefetch_bucket (t0, vnet_buffer (b0)->l2_classify.hash);
+
+ if (t1->current_data_flag == CLASSIFY_FLAG_USE_CURR_DATA)
+ h1 = (void *) vlib_buffer_get_current (b1) + t1->current_data_offset;
+ else
+ h1 = b1->data;
+
+ vnet_buffer (b1)->l2_classify.hash =
+ vnet_classify_hash_packet (t1, (u8 *) h1);
+
+ vnet_classify_prefetch_bucket (t1, vnet_buffer (b1)->l2_classify.hash);
+
+ vnet_buffer (b0)->l2_classify.table_index = table_index0;
+
+ vnet_buffer (b1)->l2_classify.table_index = table_index1;
+
+ from += 2;
+ n_left_from -= 2;
+ }
+
+ while (n_left_from > 0)
+ {
+ vlib_buffer_t *b0;
+ u32 bi0;
+ u8 *h0;
+ u32 sw_if_index0;
+ u32 table_index0;
+ vnet_classify_table_t *t0;
+
+ bi0 = from[0];
+ b0 = vlib_get_buffer (vm, bi0);
+
+ sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+ table_index0 =
+ am->classify_table_index_by_sw_if_index[tid][sw_if_index0];
+
+ t0 = pool_elt_at_index (vcm->tables, table_index0);
+
+ if (t0->current_data_flag == CLASSIFY_FLAG_USE_CURR_DATA)
+ h0 = (void *) vlib_buffer_get_current (b0) + t0->current_data_offset;
+ else
+ h0 = b0->data;
+
+ vnet_buffer (b0)->l2_classify.hash =
+ vnet_classify_hash_packet (t0, (u8 *) h0);
+
+ vnet_buffer (b0)->l2_classify.table_index = table_index0;
+ vnet_classify_prefetch_bucket (t0, vnet_buffer (b0)->l2_classify.hash);
+
+ from++;
+ n_left_from--;
+ }
+
+ next_index = node->cached_next_index;
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ /* Not enough load/store slots to dual loop... */
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 bi0;
+ vlib_buffer_t *b0;
+ u32 next0 = ACL_NEXT_INDEX_DENY;
+ u32 table_index0;
+ vnet_classify_table_t *t0;
+ vnet_classify_entry_t *e0;
+ u64 hash0;
+ u8 *h0;
+ u8 error0;
+
+ /* Stride 3 seems to work best */
+ if (PREDICT_TRUE (n_left_from > 3))
+ {
+ vlib_buffer_t *p1 = vlib_get_buffer (vm, from[3]);
+ vnet_classify_table_t *tp1;
+ u32 table_index1;
+ u64 phash1;
+
+ table_index1 = vnet_buffer (p1)->l2_classify.table_index;
+
+ if (PREDICT_TRUE (table_index1 != ~0))
+ {
+ tp1 = pool_elt_at_index (vcm->tables, table_index1);
+ phash1 = vnet_buffer (p1)->l2_classify.hash;
+ vnet_classify_prefetch_entry (tp1, phash1);
+ }
+ }
+
+ /* speculatively enqueue b0 to the current next frame */
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ table_index0 = vnet_buffer (b0)->l2_classify.table_index;
+ e0 = 0;
+ t0 = 0;
+ vnet_get_config_data (am->vnet_config_main[tid],
+ &b0->current_config_index, &next0,
+ /* # bytes of config data */ 0);
+
+ vnet_buffer (b0)->l2_classify.opaque_index = ~0;
+
+ if (PREDICT_TRUE (table_index0 != ~0))
+ {
+ hash0 = vnet_buffer (b0)->l2_classify.hash;
+ t0 = pool_elt_at_index (vcm->tables, table_index0);
+
+ if (t0->current_data_flag == CLASSIFY_FLAG_USE_CURR_DATA)
+ h0 =
+ (void *) vlib_buffer_get_current (b0) +
+ t0->current_data_offset;
+ else
+ h0 = b0->data;
+
+ e0 = vnet_classify_find_entry (t0, (u8 *) h0, hash0, now);
+ if (e0)
+ {
+ vnet_buffer (b0)->l2_classify.opaque_index
+ = e0->opaque_index;
+ vlib_buffer_advance (b0, e0->advance);
+
+ next0 = (e0->next_index < n_next_nodes) ?
+ e0->next_index : next0;
+
+ hits++;
+
+ if (is_ip4)
+ error0 = (next0 == ACL_NEXT_INDEX_DENY) ?
+ IP4_ERROR_INACL_SESSION_DENY : IP4_ERROR_NONE;
+ else
+ error0 = (next0 == ACL_NEXT_INDEX_DENY) ?
+ IP6_ERROR_INACL_SESSION_DENY : IP6_ERROR_NONE;
+ b0->error = error_node->errors[error0];
+
+ if (e0->action == CLASSIFY_ACTION_SET_IP4_FIB_INDEX ||
+ e0->action == CLASSIFY_ACTION_SET_IP6_FIB_INDEX)
+ vnet_buffer (b0)->sw_if_index[VLIB_TX] = e0->metadata;
+ }
+ else
+ {
+ while (1)
+ {
+ if (PREDICT_TRUE (t0->next_table_index != ~0))
+ t0 = pool_elt_at_index (vcm->tables,
+ t0->next_table_index);
+ else
+ {
+ next0 = (t0->miss_next_index < n_next_nodes) ?
+ t0->miss_next_index : next0;
+
+ misses++;
+
+ if (is_ip4)
+ error0 = (next0 == ACL_NEXT_INDEX_DENY) ?
+ IP4_ERROR_INACL_TABLE_MISS : IP4_ERROR_NONE;
+ else
+ error0 = (next0 == ACL_NEXT_INDEX_DENY) ?
+ IP6_ERROR_INACL_TABLE_MISS : IP6_ERROR_NONE;
+ b0->error = error_node->errors[error0];
+ break;
+ }
+
+ if (t0->current_data_flag ==
+ CLASSIFY_FLAG_USE_CURR_DATA)
+ h0 =
+ (void *) vlib_buffer_get_current (b0) +
+ t0->current_data_offset;
+ else
+ h0 = b0->data;
+
+ hash0 = vnet_classify_hash_packet (t0, (u8 *) h0);
+ e0 = vnet_classify_find_entry
+ (t0, (u8 *) h0, hash0, now);
+ if (e0)
+ {
+ vnet_buffer (b0)->l2_classify.opaque_index
+ = e0->opaque_index;
+ vlib_buffer_advance (b0, e0->advance);
+ next0 = (e0->next_index < n_next_nodes) ?
+ e0->next_index : next0;
+ hits++;
+ chain_hits++;
+
+ if (is_ip4)
+ error0 = (next0 == ACL_NEXT_INDEX_DENY) ?
+ IP4_ERROR_INACL_SESSION_DENY : IP4_ERROR_NONE;
+ else
+ error0 = (next0 == ACL_NEXT_INDEX_DENY) ?
+ IP6_ERROR_INACL_SESSION_DENY : IP6_ERROR_NONE;
+ b0->error = error_node->errors[error0];
+
+ if (e0->action == CLASSIFY_ACTION_SET_IP4_FIB_INDEX
+ || e0->action ==
+ CLASSIFY_ACTION_SET_IP6_FIB_INDEX)
+ vnet_buffer (b0)->sw_if_index[VLIB_TX] =
+ e0->metadata;
+ break;
+ }
+ }
+ }
+ }
+
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)
+ && (b0->flags & VLIB_BUFFER_IS_TRACED)))
+ {
+ ip_inacl_trace_t *t =
+ vlib_add_trace (vm, node, b0, sizeof (*t));
+ t->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+ t->next_index = next0;
+ t->table_index = t0 ? t0 - vcm->tables : ~0;
+ t->offset = (e0 && t0) ? vnet_classify_get_offset (t0, e0) : ~0;
+ }
+
+ /* verify speculative enqueue, maybe switch current next frame */
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, next0);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ vlib_node_increment_counter (vm, node->node_index,
+ IP_INACL_ERROR_MISS, misses);
+ vlib_node_increment_counter (vm, node->node_index,
+ IP_INACL_ERROR_HIT, hits);
+ vlib_node_increment_counter (vm, node->node_index,
+ IP_INACL_ERROR_CHAIN_HIT, chain_hits);
+ return frame->n_vectors;
+}
+
+static uword
+ip4_inacl (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ return ip_inacl_inline (vm, node, frame, 1 /* is_ip4 */ );
+}
+
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (ip4_inacl_node) = {
+ .function = ip4_inacl,
+ .name = "ip4-inacl",
+ .vector_size = sizeof (u32),
+ .format_trace = format_ip_inacl_trace,
+ .n_errors = ARRAY_LEN(ip_inacl_error_strings),
+ .error_strings = ip_inacl_error_strings,
+
+ .n_next_nodes = ACL_NEXT_INDEX_N_NEXT,
+ .next_nodes = {
+ [ACL_NEXT_INDEX_DENY] = "error-drop",
+ },
+};
+/* *INDENT-ON* */
+
+VLIB_NODE_FUNCTION_MULTIARCH (ip4_inacl_node, ip4_inacl);
+
+static uword
+ip6_inacl (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ return ip_inacl_inline (vm, node, frame, 0 /* is_ip4 */ );
+}
+
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (ip6_inacl_node) = {
+ .function = ip6_inacl,
+ .name = "ip6-inacl",
+ .vector_size = sizeof (u32),
+ .format_trace = format_ip_inacl_trace,
+ .n_errors = ARRAY_LEN(ip_inacl_error_strings),
+ .error_strings = ip_inacl_error_strings,
+
+ .n_next_nodes = ACL_NEXT_INDEX_N_NEXT,
+ .next_nodes = {
+ [ACL_NEXT_INDEX_DENY] = "error-drop",
+ },
+};
+/* *INDENT-ON* */
+
+VLIB_NODE_FUNCTION_MULTIARCH (ip6_inacl_node, ip6_inacl);
+
+static clib_error_t *
+ip_inacl_init (vlib_main_t * vm)
+{
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (ip_inacl_init);
+
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/ip/ip_packet.h b/src/vnet/ip/ip_packet.h
new file mode 100644
index 00000000000..d3f3de771bc
--- /dev/null
+++ b/src/vnet/ip/ip_packet.h
@@ -0,0 +1,180 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * ip/ip_packet.h: packet format common between ip4 & ip6
+ *
+ * Copyright (c) 2008 Eliot Dresselhaus
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef included_ip_packet_h
+#define included_ip_packet_h
+
+#include <vppinfra/byte_order.h>
+#include <vppinfra/error.h>
+
+typedef enum ip_protocol
+{
+#define ip_protocol(n,s) IP_PROTOCOL_##s = n,
+#include "protocols.def"
+#undef ip_protocol
+} ip_protocol_t;
+
+/* TCP/UDP ports. */
+typedef enum
+{
+#define ip_port(s,n) IP_PORT_##s = n,
+#include "ports.def"
+#undef ip_port
+} ip_port_t;
+
+/* Classifies protocols into UDP, ICMP or other. */
+typedef enum
+{
+ IP_BUILTIN_PROTOCOL_UDP,
+ IP_BUILTIN_PROTOCOL_ICMP,
+ IP_BUILTIN_PROTOCOL_UNKNOWN,
+} ip_builtin_protocol_t;
+
+#define foreach_ip_builtin_multicast_group \
+ _ (1, all_hosts_on_subnet) \
+ _ (2, all_routers_on_subnet) \
+ _ (4, dvmrp) \
+ _ (5, ospf_all_routers) \
+ _ (6, ospf_designated_routers) \
+ _ (13, pim) \
+ _ (18, vrrp) \
+ _ (102, hsrp) \
+ _ (22, igmp_v3)
+
+typedef enum
+{
+#define _(n,f) IP_MULTICAST_GROUP_##f = n,
+ foreach_ip_builtin_multicast_group
+#undef _
+} ip_multicast_group_t;
+
+/* IP checksum support. */
+
+/* Incremental checksum update. */
+typedef uword ip_csum_t;
+
+always_inline ip_csum_t
+ip_csum_with_carry (ip_csum_t sum, ip_csum_t x)
+{
+ ip_csum_t t = sum + x;
+ return t + (t < x);
+}
+
+/* Update checksum changing field at even byte offset from x -> 0. */
+always_inline ip_csum_t
+ip_csum_add_even (ip_csum_t c, ip_csum_t x)
+{
+ ip_csum_t d;
+
+ d = c - x;
+
+ /* Fold in carry from high bit. */
+ d -= d > c;
+
+ ASSERT (ip_csum_with_carry (d, x) == c);
+
+ return d;
+}
+
+/* Update checksum changing field at even byte offset from 0 -> x. */
+always_inline ip_csum_t
+ip_csum_sub_even (ip_csum_t c, ip_csum_t x)
+{
+ return ip_csum_with_carry (c, x);
+}
+
+always_inline ip_csum_t
+ip_csum_update_inline (ip_csum_t sum, ip_csum_t old, ip_csum_t new,
+ u32 field_byte_offset, u32 field_n_bytes)
+{
+ /* For even 1-byte fields on big-endian and odd 1-byte fields on little endian
+ we need to shift byte into place for checksum. */
+ if ((field_n_bytes % 2)
+ && (field_byte_offset % 2) == CLIB_ARCH_IS_LITTLE_ENDIAN)
+ {
+ old = old << 8;
+ new = new << 8;
+ }
+ sum = ip_csum_sub_even (sum, old);
+ sum = ip_csum_add_even (sum, new);
+ return sum;
+}
+
+#define ip_csum_update(sum,old,new,type,field) \
+ ip_csum_update_inline ((sum), (old), (new), \
+ STRUCT_OFFSET_OF (type, field), \
+ STRUCT_SIZE_OF (type, field))
+
+always_inline u16
+ip_csum_fold (ip_csum_t c)
+{
+ /* Reduce to 16 bits. */
+#if uword_bits == 64
+ c = (c & (ip_csum_t) 0xffffffff) + (c >> (ip_csum_t) 32);
+ c = (c & 0xffff) + (c >> 16);
+#endif
+
+ c = (c & 0xffff) + (c >> 16);
+ c = (c & 0xffff) + (c >> 16);
+
+ return c;
+}
+
+/* Copy data and checksum at the same time. */
+ip_csum_t ip_csum_and_memcpy (ip_csum_t sum, void *dst, void *src,
+ uword n_bytes);
+
+always_inline u16
+ip_csum_and_memcpy_fold (ip_csum_t sum, void *dst)
+{
+ return ip_csum_fold (sum);
+}
+
+/* Checksum routine. */
+ip_csum_t ip_incremental_checksum (ip_csum_t sum, void *data, uword n_bytes);
+
+#endif /* included_ip_packet_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/ip/ip_source_and_port_range_check.h b/src/vnet/ip/ip_source_and_port_range_check.h
new file mode 100644
index 00000000000..fefe5ff1fd9
--- /dev/null
+++ b/src/vnet/ip/ip_source_and_port_range_check.h
@@ -0,0 +1,148 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef included_ip_ip_source_and_port_range_check_h
+#define included_ip_ip_source_and_port_range_check_h
+
+
+typedef struct
+{
+ /* convenience */
+ vlib_main_t *vlib_main;
+ vnet_main_t *vnet_main;
+} source_range_check_main_t;
+
+source_range_check_main_t source_range_check_main;
+
+typedef enum
+{
+ IP_SOURCE_AND_PORT_RANGE_CHECK_PROTOCOL_TCP_OUT,
+ IP_SOURCE_AND_PORT_RANGE_CHECK_PROTOCOL_UDP_OUT,
+ IP_SOURCE_AND_PORT_RANGE_CHECK_PROTOCOL_TCP_IN,
+ IP_SOURCE_AND_PORT_RANGE_CHECK_PROTOCOL_UDP_IN,
+ IP_SOURCE_AND_PORT_RANGE_CHECK_N_PROTOCOLS,
+} ip_source_and_port_range_check_protocol_t;
+
+typedef struct
+{
+ u32 fib_index[IP_SOURCE_AND_PORT_RANGE_CHECK_N_PROTOCOLS];
+} ip_source_and_port_range_check_config_t;
+
+#define IP_SOURCE_AND_PORT_RANGE_CHECK_RANGE_LIMIT VLIB_BUFFER_PRE_DATA_SIZE/(2*sizeof(u16x8));
+
+typedef struct
+{
+ union
+ {
+ u16x8 as_u16x8;
+ u16 as_u16[8];
+ };
+} u16x8vec_t;
+
+typedef struct
+{
+ u16x8vec_t low;
+ u16x8vec_t hi;
+} protocol_port_range_t;
+
+/**
+ * @brief The number of supported ranges per-data path object.
+ * If more ranges are required, bump this number.
+ */
+#define N_PORT_RANGES_PER_DPO 64
+#define N_RANGES_PER_BLOCK (sizeof(u16x8vec_t)/2)
+#define N_BLOCKS_PER_DPO (N_PORT_RANGES_PER_DPO/N_RANGES_PER_BLOCK)
+
+/**
+ * @brief
+ * The object that is in the data-path to perform the check.
+ *
+ * Some trade-offs here; memory vs performance.
+ *
+ * performance:
+ * the principle factor is d-cache line misses/hits.
+ * so we want the data layout to minimise the d-cache misses. This
+ * means not following dependent reads. i.e. not doing
+ *
+ * struct B {
+ * u16 n_ranges;
+ * range_t *ragnes; // vector of ranges.
+ * }
+ *
+ * so to read ranges[0] we would first d-cache miss on the address
+ * of the object of type B, for which we would need to wait before we
+ * can get the address of B->ranges.
+ * So this layout is better:
+ *
+ * struct B {
+ * u16 n_ranges;
+ * range_t ragnes[N];
+ * }
+ *
+ * memory:
+ * the latter layout above is more memory hungry. And N needs to be:
+ * 1 - sized for the maximum required
+ * 2 - fixed, so that objects of type B can be pool allocated and so
+ * 'get'-able using an index.
+ * An option over fixed might be to allocate contiguous chunk from
+ * the pool (like we used to do for multi-path adjs).
+ */
+typedef struct protocol_port_range_dpo_t_
+{
+ /**
+ * The number of blocks from the 'block' array below
+ * that have rnages configured. We keep this count so that in the data-path
+ * we can limit the loop to be only over the blocks we need
+ */
+ u16 n_used_blocks;
+
+ /**
+ * The total number of free ranges from all blocks.
+ * Used to prevent overrun of the ranges available.
+ */
+ u16 n_free_ranges;
+
+ /**
+ * the fixed size array of ranges
+ */
+ protocol_port_range_t blocks[N_BLOCKS_PER_DPO];
+} protocol_port_range_dpo_t;
+
+int ip4_source_and_port_range_check_add_del (ip4_address_t * address,
+ u32 length,
+ u32 vrf_id,
+ u16 * low_ports,
+ u16 * hi_ports, int is_add);
+
+// This will be moved to another file in another patch -- for API freeze
+int ip6_source_and_port_range_check_add_del (ip6_address_t * address,
+ u32 length,
+ u32 vrf_id,
+ u16 * low_ports,
+ u16 * hi_ports, int is_add);
+
+int set_ip_source_and_port_range_check (vlib_main_t * vm,
+ u32 * fib_index,
+ u32 sw_if_index, u32 is_add);
+
+#endif /* included ip_source_and_port_range_check_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/ip/lookup.c b/src/vnet/ip/lookup.c
new file mode 100644
index 00000000000..734a4cd7cfb
--- /dev/null
+++ b/src/vnet/ip/lookup.c
@@ -0,0 +1,967 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * ip/ip_lookup.c: ip4/6 adjacency and lookup table managment
+ *
+ * Copyright (c) 2008 Eliot Dresselhaus
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <vnet/ip/ip.h>
+#include <vnet/adj/adj.h>
+#include <vnet/fib/fib_table.h>
+#include <vnet/fib/ip4_fib.h>
+#include <vnet/fib/ip6_fib.h>
+#include <vnet/mpls/mpls.h>
+#include <vnet/dpo/drop_dpo.h>
+#include <vnet/dpo/classify_dpo.h>
+#include <vnet/dpo/punt_dpo.h>
+#include <vnet/dpo/receive_dpo.h>
+#include <vnet/dpo/ip_null_dpo.h>
+
+/**
+ * @file
+ * @brief IPv4 and IPv6 adjacency and lookup table managment.
+ *
+ */
+
+clib_error_t *
+ip_interface_address_add_del (ip_lookup_main_t * lm,
+ u32 sw_if_index,
+ void *addr_fib,
+ u32 address_length,
+ u32 is_del, u32 * result_if_address_index)
+{
+ vnet_main_t *vnm = vnet_get_main ();
+ ip_interface_address_t *a, *prev, *next;
+ uword *p = mhash_get (&lm->address_to_if_address_index, addr_fib);
+
+ vec_validate_init_empty (lm->if_address_pool_index_by_sw_if_index,
+ sw_if_index, ~0);
+ a = p ? pool_elt_at_index (lm->if_address_pool, p[0]) : 0;
+
+ /* Verify given length. */
+ if ((a && (address_length != a->address_length)) || (address_length == 0))
+ {
+ vnm->api_errno = VNET_API_ERROR_ADDRESS_LENGTH_MISMATCH;
+ return clib_error_create
+ ("%U wrong length (expected %d) for interface %U",
+ lm->format_address_and_length, addr_fib,
+ address_length, a ? a->address_length : -1,
+ format_vnet_sw_if_index_name, vnm, sw_if_index);
+ }
+
+ if (is_del)
+ {
+ if (!a)
+ {
+ vnet_sw_interface_t *si = vnet_get_sw_interface (vnm, sw_if_index);
+ vnm->api_errno = VNET_API_ERROR_ADDRESS_NOT_FOUND_FOR_INTERFACE;
+ return clib_error_create ("%U not found for interface %U",
+ lm->format_address_and_length,
+ addr_fib, address_length,
+ format_vnet_sw_interface_name, vnm, si);
+ }
+
+ if (a->prev_this_sw_interface != ~0)
+ {
+ prev =
+ pool_elt_at_index (lm->if_address_pool,
+ a->prev_this_sw_interface);
+ prev->next_this_sw_interface = a->next_this_sw_interface;
+ }
+ if (a->next_this_sw_interface != ~0)
+ {
+ next =
+ pool_elt_at_index (lm->if_address_pool,
+ a->next_this_sw_interface);
+ next->prev_this_sw_interface = a->prev_this_sw_interface;
+
+ if (a->prev_this_sw_interface == ~0)
+ lm->if_address_pool_index_by_sw_if_index[sw_if_index] =
+ a->next_this_sw_interface;
+ }
+
+ if ((a->next_this_sw_interface == ~0)
+ && (a->prev_this_sw_interface == ~0))
+ lm->if_address_pool_index_by_sw_if_index[sw_if_index] = ~0;
+
+ mhash_unset (&lm->address_to_if_address_index, addr_fib,
+ /* old_value */ 0);
+ pool_put (lm->if_address_pool, a);
+
+ if (result_if_address_index)
+ *result_if_address_index = ~0;
+ }
+
+ else if (!a)
+ {
+ u32 pi; /* previous index */
+ u32 ai;
+ u32 hi; /* head index */
+
+ pool_get (lm->if_address_pool, a);
+ memset (a, ~0, sizeof (a[0]));
+ ai = a - lm->if_address_pool;
+
+ hi = pi = lm->if_address_pool_index_by_sw_if_index[sw_if_index];
+ prev = 0;
+ while (pi != (u32) ~ 0)
+ {
+ prev = pool_elt_at_index (lm->if_address_pool, pi);
+ pi = prev->next_this_sw_interface;
+ }
+ pi = prev ? prev - lm->if_address_pool : (u32) ~ 0;
+
+ a->address_key = mhash_set (&lm->address_to_if_address_index,
+ addr_fib, ai, /* old_value */ 0);
+ a->address_length = address_length;
+ a->sw_if_index = sw_if_index;
+ a->flags = 0;
+ a->prev_this_sw_interface = pi;
+ a->next_this_sw_interface = ~0;
+ if (prev)
+ prev->next_this_sw_interface = ai;
+
+ lm->if_address_pool_index_by_sw_if_index[sw_if_index] =
+ (hi != ~0) ? hi : ai;
+ if (result_if_address_index)
+ *result_if_address_index = ai;
+ }
+ else
+ {
+ if (result_if_address_index)
+ *result_if_address_index = a - lm->if_address_pool;
+ }
+
+
+ return /* no error */ 0;
+}
+
+void
+ip_lookup_init (ip_lookup_main_t * lm, u32 is_ip6)
+{
+ /* ensure that adjacency is cacheline aligned and sized */
+ STATIC_ASSERT (STRUCT_OFFSET_OF (ip_adjacency_t, cacheline0) == 0,
+ "Cache line marker must be 1st element in struct");
+ STATIC_ASSERT (STRUCT_OFFSET_OF (ip_adjacency_t, cacheline1) ==
+ CLIB_CACHE_LINE_BYTES,
+ "Data in cache line 0 is bigger than cache line size");
+
+ /* Preallocate three "special" adjacencies */
+ lm->adjacency_heap = adj_pool;
+
+ if (!lm->fib_result_n_bytes)
+ lm->fib_result_n_bytes = sizeof (uword);
+
+ lm->is_ip6 = is_ip6;
+ if (is_ip6)
+ {
+ lm->format_address_and_length = format_ip6_address_and_length;
+ mhash_init (&lm->address_to_if_address_index, sizeof (uword),
+ sizeof (ip6_address_fib_t));
+ }
+ else
+ {
+ lm->format_address_and_length = format_ip4_address_and_length;
+ mhash_init (&lm->address_to_if_address_index, sizeof (uword),
+ sizeof (ip4_address_fib_t));
+ }
+
+ {
+ int i;
+
+ /* Setup all IP protocols to be punted and builtin-unknown. */
+ for (i = 0; i < 256; i++)
+ {
+ lm->local_next_by_ip_protocol[i] = IP_LOCAL_NEXT_PUNT;
+ lm->builtin_protocol_by_ip_protocol[i] = IP_BUILTIN_PROTOCOL_UNKNOWN;
+ }
+
+ lm->local_next_by_ip_protocol[IP_PROTOCOL_UDP] = IP_LOCAL_NEXT_UDP_LOOKUP;
+ lm->local_next_by_ip_protocol[is_ip6 ? IP_PROTOCOL_ICMP6 :
+ IP_PROTOCOL_ICMP] = IP_LOCAL_NEXT_ICMP;
+ lm->builtin_protocol_by_ip_protocol[IP_PROTOCOL_UDP] =
+ IP_BUILTIN_PROTOCOL_UDP;
+ lm->builtin_protocol_by_ip_protocol[is_ip6 ? IP_PROTOCOL_ICMP6 :
+ IP_PROTOCOL_ICMP] =
+ IP_BUILTIN_PROTOCOL_ICMP;
+ }
+}
+
+u8 *
+format_ip_flow_hash_config (u8 * s, va_list * args)
+{
+ flow_hash_config_t flow_hash_config = va_arg (*args, u32);
+
+#define _(n,v) if (flow_hash_config & v) s = format (s, "%s ", #n);
+ foreach_flow_hash_bit;
+#undef _
+
+ return s;
+}
+
+u8 *
+format_ip_lookup_next (u8 * s, va_list * args)
+{
+ ip_lookup_next_t n = va_arg (*args, ip_lookup_next_t);
+ char *t = 0;
+
+ switch (n)
+ {
+ default:
+ s = format (s, "unknown %d", n);
+ return s;
+
+ case IP_LOOKUP_NEXT_DROP:
+ t = "drop";
+ break;
+ case IP_LOOKUP_NEXT_PUNT:
+ t = "punt";
+ break;
+ case IP_LOOKUP_NEXT_ARP:
+ t = "arp";
+ break;
+ case IP_LOOKUP_NEXT_MIDCHAIN:
+ t = "midchain";
+ break;
+ case IP_LOOKUP_NEXT_GLEAN:
+ t = "glean";
+ break;
+ case IP_LOOKUP_NEXT_REWRITE:
+ break;
+ }
+
+ if (t)
+ vec_add (s, t, strlen (t));
+
+ return s;
+}
+
+u8 *
+format_ip_adjacency_packet_data (u8 * s, va_list * args)
+{
+ vnet_main_t *vnm = va_arg (*args, vnet_main_t *);
+ u32 adj_index = va_arg (*args, u32);
+ u8 *packet_data = va_arg (*args, u8 *);
+ u32 n_packet_data_bytes = va_arg (*args, u32);
+ ip_adjacency_t *adj = adj_get (adj_index);
+
+ switch (adj->lookup_next_index)
+ {
+ case IP_LOOKUP_NEXT_REWRITE:
+ s = format (s, "%U",
+ format_vnet_rewrite_header,
+ vnm->vlib_main, &adj->rewrite_header, packet_data,
+ n_packet_data_bytes);
+ break;
+
+ default:
+ break;
+ }
+
+ return s;
+}
+
+static uword
+unformat_dpo (unformat_input_t * input, va_list * args)
+{
+ dpo_id_t *dpo = va_arg (*args, dpo_id_t *);
+ fib_protocol_t fp = va_arg (*args, int);
+ dpo_proto_t proto;
+
+ proto = fib_proto_to_dpo (fp);
+
+ if (unformat (input, "drop"))
+ dpo_copy (dpo, drop_dpo_get (proto));
+ else if (unformat (input, "punt"))
+ dpo_copy (dpo, punt_dpo_get (proto));
+ else if (unformat (input, "local"))
+ receive_dpo_add_or_lock (proto, ~0, NULL, dpo);
+ else if (unformat (input, "null-send-unreach"))
+ ip_null_dpo_add_and_lock (proto, IP_NULL_ACTION_SEND_ICMP_UNREACH, dpo);
+ else if (unformat (input, "null-send-prohibit"))
+ ip_null_dpo_add_and_lock (proto, IP_NULL_ACTION_SEND_ICMP_PROHIBIT, dpo);
+ else if (unformat (input, "null"))
+ ip_null_dpo_add_and_lock (proto, IP_NULL_ACTION_NONE, dpo);
+ else if (unformat (input, "classify"))
+ {
+ u32 classify_table_index;
+
+ if (!unformat (input, "%d", &classify_table_index))
+ {
+ clib_warning ("classify adj must specify table index");
+ return 0;
+ }
+
+ dpo_set (dpo, DPO_CLASSIFY, proto,
+ classify_dpo_create (proto, classify_table_index));
+ }
+ else
+ return 0;
+
+ return 1;
+}
+
+const ip46_address_t zero_addr = {
+ .as_u64 = {
+ 0, 0},
+};
+
+u32
+fib_table_id_find_fib_index (fib_protocol_t proto, u32 table_id)
+{
+ ip4_main_t *im4 = &ip4_main;
+ ip6_main_t *im6 = &ip6_main;
+ uword *p;
+
+ switch (proto)
+ {
+ case FIB_PROTOCOL_IP4:
+ p = hash_get (im4->fib_index_by_table_id, table_id);
+ break;
+ case FIB_PROTOCOL_IP6:
+ p = hash_get (im6->fib_index_by_table_id, table_id);
+ break;
+ default:
+ p = NULL;
+ break;
+ }
+ if (NULL != p)
+ {
+ return (p[0]);
+ }
+ return (~0);
+}
+
+clib_error_t *
+vnet_ip_route_cmd (vlib_main_t * vm,
+ unformat_input_t * main_input, vlib_cli_command_t * cmd)
+{
+ unformat_input_t _line_input, *line_input = &_line_input;
+ fib_route_path_t *rpaths = NULL, rpath;
+ dpo_id_t dpo = DPO_INVALID, *dpos = NULL;
+ fib_prefix_t *prefixs = NULL, pfx;
+ mpls_label_t out_label, via_label;
+ clib_error_t *error = NULL;
+ u32 table_id, is_del;
+ vnet_main_t *vnm;
+ u32 fib_index;
+ f64 count;
+ int i;
+
+ vnm = vnet_get_main ();
+ is_del = 0;
+ table_id = 0;
+ count = 1;
+ memset (&pfx, 0, sizeof (pfx));
+ out_label = via_label = MPLS_LABEL_INVALID;
+
+ /* Get a line of input. */
+ if (!unformat_user (main_input, unformat_line_input, line_input))
+ return 0;
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ memset (&rpath, 0, sizeof (rpath));
+
+ if (unformat (line_input, "table %d", &table_id))
+ ;
+ else if (unformat (line_input, "del"))
+ is_del = 1;
+ else if (unformat (line_input, "add"))
+ is_del = 0;
+ else if (unformat (line_input, "resolve-via-host"))
+ {
+ if (vec_len (rpaths) == 0)
+ {
+ error = clib_error_return (0, "Paths then flags");
+ goto done;
+ }
+ rpaths[vec_len (rpaths) - 1].frp_flags |=
+ FIB_ROUTE_PATH_RESOLVE_VIA_HOST;
+ }
+ else if (unformat (line_input, "resolve-via-attached"))
+ {
+ if (vec_len (rpaths) == 0)
+ {
+ error = clib_error_return (0, "Paths then flags");
+ goto done;
+ }
+ rpaths[vec_len (rpaths) - 1].frp_flags |=
+ FIB_ROUTE_PATH_RESOLVE_VIA_ATTACHED;
+ }
+ else if (unformat (line_input, "out-label %U",
+ unformat_mpls_unicast_label, &out_label))
+ {
+ if (vec_len (rpaths) == 0)
+ {
+ error = clib_error_return (0, "Paths then labels");
+ goto done;
+ }
+ vec_add1 (rpaths[vec_len (rpaths) - 1].frp_label_stack, out_label);
+ }
+ else if (unformat (line_input, "via-label %U",
+ unformat_mpls_unicast_label, &rpath.frp_local_label))
+ {
+ rpath.frp_weight = 1;
+ rpath.frp_proto = FIB_PROTOCOL_MPLS;
+ rpath.frp_sw_if_index = ~0;
+ vec_add1 (rpaths, rpath);
+ }
+ else if (unformat (line_input, "count %f", &count))
+ ;
+
+ else if (unformat (line_input, "%U/%d",
+ unformat_ip4_address, &pfx.fp_addr.ip4, &pfx.fp_len))
+ {
+ pfx.fp_proto = FIB_PROTOCOL_IP4;
+ vec_add1 (prefixs, pfx);
+ }
+ else if (unformat (line_input, "%U/%d",
+ unformat_ip6_address, &pfx.fp_addr.ip6, &pfx.fp_len))
+ {
+ pfx.fp_proto = FIB_PROTOCOL_IP6;
+ vec_add1 (prefixs, pfx);
+ }
+ else if (unformat (line_input, "via %U %U weight %u",
+ unformat_ip4_address,
+ &rpath.frp_addr.ip4,
+ unformat_vnet_sw_interface, vnm,
+ &rpath.frp_sw_if_index, &rpath.frp_weight))
+ {
+ rpath.frp_proto = FIB_PROTOCOL_IP4;
+ vec_add1 (rpaths, rpath);
+ }
+
+ else if (unformat (line_input, "via %U %U weight %u",
+ unformat_ip6_address,
+ &rpath.frp_addr.ip6,
+ unformat_vnet_sw_interface, vnm,
+ &rpath.frp_sw_if_index, &rpath.frp_weight))
+ {
+ rpath.frp_proto = FIB_PROTOCOL_IP6;
+ vec_add1 (rpaths, rpath);
+ }
+
+ else if (unformat (line_input, "via %U %U",
+ unformat_ip4_address,
+ &rpath.frp_addr.ip4,
+ unformat_vnet_sw_interface, vnm,
+ &rpath.frp_sw_if_index))
+ {
+ rpath.frp_weight = 1;
+ rpath.frp_proto = FIB_PROTOCOL_IP4;
+ vec_add1 (rpaths, rpath);
+ }
+
+ else if (unformat (line_input, "via %U %U",
+ unformat_ip6_address,
+ &rpath.frp_addr.ip6,
+ unformat_vnet_sw_interface, vnm,
+ &rpath.frp_sw_if_index))
+ {
+ rpath.frp_weight = 1;
+ rpath.frp_proto = FIB_PROTOCOL_IP6;
+ vec_add1 (rpaths, rpath);
+ }
+ else if (unformat (line_input, "via %U next-hop-table %d",
+ unformat_ip4_address,
+ &rpath.frp_addr.ip4, &rpath.frp_fib_index))
+ {
+ rpath.frp_weight = 1;
+ rpath.frp_sw_if_index = ~0;
+ rpath.frp_proto = FIB_PROTOCOL_IP4;
+ vec_add1 (rpaths, rpath);
+ }
+ else if (unformat (line_input, "via %U next-hop-table %d",
+ unformat_ip6_address,
+ &rpath.frp_addr.ip6, &rpath.frp_fib_index))
+ {
+ rpath.frp_weight = 1;
+ rpath.frp_sw_if_index = ~0;
+ rpath.frp_proto = FIB_PROTOCOL_IP6;
+ vec_add1 (rpaths, rpath);
+ }
+ else if (unformat (line_input, "via %U",
+ unformat_ip4_address, &rpath.frp_addr.ip4))
+ {
+ /*
+ * the recursive next-hops are by default in the same table
+ * as the prefix
+ */
+ rpath.frp_fib_index = table_id;
+ rpath.frp_weight = 1;
+ rpath.frp_sw_if_index = ~0;
+ rpath.frp_proto = FIB_PROTOCOL_IP4;
+ vec_add1 (rpaths, rpath);
+ }
+ else if (unformat (line_input, "via %U",
+ unformat_ip6_address, &rpath.frp_addr.ip6))
+ {
+ rpath.frp_fib_index = table_id;
+ rpath.frp_weight = 1;
+ rpath.frp_sw_if_index = ~0;
+ rpath.frp_proto = FIB_PROTOCOL_IP6;
+ vec_add1 (rpaths, rpath);
+ }
+ else if (unformat (line_input,
+ "lookup in table %d", &rpath.frp_fib_index))
+ {
+ rpath.frp_proto = pfx.fp_proto;
+ rpath.frp_sw_if_index = ~0;
+ vec_add1 (rpaths, rpath);
+ }
+ else if (vec_len (prefixs) > 0 &&
+ unformat (line_input, "via %U",
+ unformat_vnet_sw_interface, vnm,
+ &rpath.frp_sw_if_index))
+ {
+ rpath.frp_weight = 1;
+ rpath.frp_proto = prefixs[0].fp_proto;
+ vec_add1 (rpaths, rpath);
+ }
+ else if (vec_len (prefixs) > 0 &&
+ unformat (line_input, "via %U",
+ unformat_dpo, &dpo, prefixs[0].fp_proto))
+ {
+ vec_add1 (dpos, dpo);
+ }
+ else
+ {
+ error = unformat_parse_error (line_input);
+ goto done;
+ }
+ }
+
+ unformat_free (line_input);
+
+ if (vec_len (prefixs) == 0)
+ {
+ error =
+ clib_error_return (0, "expected ip4/ip6 destination address/length.");
+ goto done;
+ }
+
+ if (!is_del && vec_len (rpaths) + vec_len (dpos) == 0)
+ {
+ error = clib_error_return (0, "expected paths.");
+ goto done;
+ }
+
+ if (~0 == table_id)
+ {
+ /*
+ * if no table_id is passed we will manipulate the default
+ */
+ fib_index = 0;
+ }
+ else
+ {
+ fib_index = fib_table_id_find_fib_index (prefixs[0].fp_proto, table_id);
+
+ if (~0 == fib_index)
+ {
+ error = clib_error_return (0, "Nonexistent table id %d", table_id);
+ goto done;
+ }
+ }
+
+ for (i = 0; i < vec_len (prefixs); i++)
+ {
+ if (is_del && 0 == vec_len (rpaths))
+ {
+ fib_table_entry_delete (fib_index, &prefixs[i], FIB_SOURCE_CLI);
+ }
+ else if (!is_del && 1 == vec_len (dpos))
+ {
+ fib_table_entry_special_dpo_add (fib_index,
+ &prefixs[i],
+ FIB_SOURCE_CLI,
+ FIB_ENTRY_FLAG_EXCLUSIVE,
+ &dpos[0]);
+ dpo_reset (&dpos[0]);
+ }
+ else if (vec_len (dpos) > 0)
+ {
+ error =
+ clib_error_return (0,
+ "Load-balancing over multiple special adjacencies is unsupported");
+ goto done;
+ }
+ else if (0 < vec_len (rpaths))
+ {
+ u32 k, j, n, incr;
+ ip46_address_t dst = prefixs[i].fp_addr;
+ f64 t[2];
+ n = count;
+ t[0] = vlib_time_now (vm);
+ incr = 1 << ((FIB_PROTOCOL_IP4 == prefixs[0].fp_proto ? 32 : 128) -
+ prefixs[i].fp_len);
+
+ for (k = 0; k < n; k++)
+ {
+ for (j = 0; j < vec_len (rpaths); j++)
+ {
+ u32 fi;
+ /*
+ * the CLI parsing stored table Ids, swap to FIB indicies
+ */
+ fi = fib_table_id_find_fib_index (prefixs[i].fp_proto,
+ rpaths[i].frp_fib_index);
+
+ if (~0 == fi)
+ {
+ error =
+ clib_error_return (0, "Via table %d does not exist",
+ rpaths[i].frp_fib_index);
+ goto done;
+ }
+ rpaths[i].frp_fib_index = fi;
+
+ fib_prefix_t rpfx = {
+ .fp_len = prefixs[i].fp_len,
+ .fp_proto = prefixs[i].fp_proto,
+ .fp_addr = dst,
+ };
+
+ if (is_del)
+ fib_table_entry_path_remove2 (fib_index,
+ &rpfx,
+ FIB_SOURCE_CLI, &rpaths[j]);
+ else
+ fib_table_entry_path_add2 (fib_index,
+ &rpfx,
+ FIB_SOURCE_CLI,
+ FIB_ENTRY_FLAG_NONE,
+ &rpaths[j]);
+ }
+
+ if (FIB_PROTOCOL_IP4 == prefixs[0].fp_proto)
+ {
+ dst.ip4.as_u32 =
+ clib_host_to_net_u32 (incr +
+ clib_net_to_host_u32 (dst.
+ ip4.as_u32));
+ }
+ else
+ {
+ int bucket = (incr < 64 ? 0 : 1);
+ dst.ip6.as_u64[bucket] =
+ clib_host_to_net_u64 (incr +
+ clib_net_to_host_u64 (dst.ip6.as_u64
+ [bucket]));
+
+ }
+ }
+ t[1] = vlib_time_now (vm);
+ if (count > 1)
+ vlib_cli_output (vm, "%.6e routes/sec", count / (t[1] - t[0]));
+ }
+ else
+ {
+ error = clib_error_return (0, "Don't understand what you want...");
+ goto done;
+ }
+ }
+
+
+done:
+ vec_free (dpos);
+ vec_free (prefixs);
+ vec_free (rpaths);
+ return error;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (vlib_cli_ip_command, static) = {
+ .path = "ip",
+ .short_help = "Internet protocol (IP) commands",
+};
+/* *INDENT-ON* */
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (vlib_cli_ip6_command, static) = {
+ .path = "ip6",
+ .short_help = "Internet protocol version 6 (IPv6) commands",
+};
+/* *INDENT-ON* */
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (vlib_cli_show_ip_command, static) = {
+ .path = "show ip",
+ .short_help = "Internet protocol (IP) show commands",
+};
+/* *INDENT-ON* */
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (vlib_cli_show_ip6_command, static) = {
+ .path = "show ip6",
+ .short_help = "Internet protocol version 6 (IPv6) show commands",
+};
+/* *INDENT-ON* */
+
+/*?
+ * This command is used to add or delete IPv4 or IPv6 routes. All
+ * IP Addresses ('<em><dst-ip-addr>/<width></em>',
+ * '<em><next-hop-ip-addr></em>' and '<em><adj-hop-ip-addr></em>')
+ * can be IPv4 or IPv6, but all must be of the same form in a single
+ * command. To display the current set of routes, use the commands
+ * '<em>show ip fib</em>' and '<em>show ip6 fib</em>'.
+ *
+ * @cliexpar
+ * Example of how to add a straight forward static route:
+ * @cliexcmd{ip route add 6.0.1.2/32 via 6.0.0.1 GigabitEthernet2/0/0}
+ * Example of how to delete a straight forward static route:
+ * @cliexcmd{ip route del 6.0.1.2/32 via 6.0.0.1 GigabitEthernet2/0/0}
+ * Mainly for route add/del performance testing, one can add or delete
+ * multiple routes by adding 'count N' to the previous item:
+ * @cliexcmd{ip route add count 10 7.0.0.0/24 via 6.0.0.1 GigabitEthernet2/0/0}
+ * Add multiple routes for the same destination to create equal-cost multipath:
+ * @cliexcmd{ip route add 7.0.0.1/32 via 6.0.0.1 GigabitEthernet2/0/0}
+ * @cliexcmd{ip route add 7.0.0.1/32 via 6.0.0.2 GigabitEthernet2/0/0}
+ * For unequal-cost multipath, specify the desired weights. This
+ * combination of weights results in 3/4 of the traffic following the
+ * second path, 1/4 following the first path:
+ * @cliexcmd{ip route add 7.0.0.1/32 via 6.0.0.1 GigabitEthernet2/0/0 weight 1}
+ * @cliexcmd{ip route add 7.0.0.1/32 via 6.0.0.2 GigabitEthernet2/0/0 weight 3}
+ * To add a route to a particular FIB table (VRF), use:
+ * @cliexcmd{ip route add 172.16.24.0/24 table 7 via GigabitEthernet2/0/0}
+ ?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (ip_route_command, static) = {
+ .path = "ip route",
+ .short_help = "ip route [add|del] [count <n>] <dst-ip-addr>/<width> [table <table-id>] [via <next-hop-ip-addr> [<interface>] [weight <weight>]] | [via arp <interface> <adj-hop-ip-addr>] | [via drop|punt|local<id>|arp|classify <classify-idx>] [lookup in table <out-table-id>]",
+ .function = vnet_ip_route_cmd,
+ .is_mp_safe = 1,
+};
+/* *INDENT-ON* */
+
+/*
+ * The next two routines address a longstanding script hemorrhoid.
+ * Probing a v4 or v6 neighbor needs to appear to be synchronous,
+ * or dependent route-adds will simply fail.
+ */
+static clib_error_t *
+ip6_probe_neighbor_wait (vlib_main_t * vm, ip6_address_t * a, u32 sw_if_index,
+ int retry_count)
+{
+ vnet_main_t *vnm = vnet_get_main ();
+ clib_error_t *e;
+ int i;
+ int resolved = 0;
+ uword event_type;
+ uword *event_data = 0;
+
+ ASSERT (vlib_in_process_context (vm));
+
+ if (retry_count > 0)
+ vnet_register_ip6_neighbor_resolution_event
+ (vnm, a, vlib_get_current_process (vm)->node_runtime.node_index,
+ 1 /* event */ , 0 /* data */ );
+
+ for (i = 0; i < retry_count; i++)
+ {
+ /* The interface may be down, etc. */
+ e = ip6_probe_neighbor (vm, a, sw_if_index);
+
+ if (e)
+ return e;
+
+ vlib_process_wait_for_event_or_clock (vm, 1.0);
+ event_type = vlib_process_get_events (vm, &event_data);
+ switch (event_type)
+ {
+ case 1: /* resolved... */
+ vlib_cli_output (vm, "Resolved %U", format_ip6_address, a);
+ resolved = 1;
+ goto done;
+
+ case ~0: /* timeout */
+ break;
+
+ default:
+ clib_warning ("unknown event_type %d", event_type);
+ }
+ vec_reset_length (event_data);
+ }
+
+done:
+
+ if (!resolved)
+ return clib_error_return (0, "Resolution failed for %U",
+ format_ip6_address, a);
+ return 0;
+}
+
+static clib_error_t *
+ip4_probe_neighbor_wait (vlib_main_t * vm, ip4_address_t * a, u32 sw_if_index,
+ int retry_count)
+{
+ vnet_main_t *vnm = vnet_get_main ();
+ clib_error_t *e;
+ int i;
+ int resolved = 0;
+ uword event_type;
+ uword *event_data = 0;
+
+ ASSERT (vlib_in_process_context (vm));
+
+ if (retry_count > 0)
+ vnet_register_ip4_arp_resolution_event
+ (vnm, a, vlib_get_current_process (vm)->node_runtime.node_index,
+ 1 /* event */ , 0 /* data */ );
+
+ for (i = 0; i < retry_count; i++)
+ {
+ /* The interface may be down, etc. */
+ e = ip4_probe_neighbor (vm, a, sw_if_index);
+
+ if (e)
+ return e;
+
+ vlib_process_wait_for_event_or_clock (vm, 1.0);
+ event_type = vlib_process_get_events (vm, &event_data);
+ switch (event_type)
+ {
+ case 1: /* resolved... */
+ vlib_cli_output (vm, "Resolved %U", format_ip4_address, a);
+ resolved = 1;
+ goto done;
+
+ case ~0: /* timeout */
+ break;
+
+ default:
+ clib_warning ("unknown event_type %d", event_type);
+ }
+ vec_reset_length (event_data);
+ }
+
+done:
+
+ vec_reset_length (event_data);
+
+ if (!resolved)
+ return clib_error_return (0, "Resolution failed for %U",
+ format_ip4_address, a);
+ return 0;
+}
+
+static clib_error_t *
+probe_neighbor_address (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ vnet_main_t *vnm = vnet_get_main ();
+ unformat_input_t _line_input, *line_input = &_line_input;
+ ip4_address_t a4;
+ ip6_address_t a6;
+ clib_error_t *error = 0;
+ u32 sw_if_index = ~0;
+ int retry_count = 3;
+ int is_ip4 = 1;
+ int address_set = 0;
+
+ /* Get a line of input. */
+ if (!unformat_user (input, unformat_line_input, line_input))
+ return 0;
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat_user (line_input, unformat_vnet_sw_interface, vnm,
+ &sw_if_index))
+ ;
+ else if (unformat (line_input, "retry %d", &retry_count))
+ ;
+
+ else if (unformat (line_input, "%U", unformat_ip4_address, &a4))
+ address_set++;
+ else if (unformat (line_input, "%U", unformat_ip6_address, &a6))
+ {
+ address_set++;
+ is_ip4 = 0;
+ }
+ else
+ return clib_error_return (0, "unknown input '%U'",
+ format_unformat_error, line_input);
+ }
+
+ unformat_free (line_input);
+
+ if (sw_if_index == ~0)
+ return clib_error_return (0, "Interface required, not set.");
+ if (address_set == 0)
+ return clib_error_return (0, "ip address required, not set.");
+ if (address_set > 1)
+ return clib_error_return (0, "Multiple ip addresses not supported.");
+
+ if (is_ip4)
+ error = ip4_probe_neighbor_wait (vm, &a4, sw_if_index, retry_count);
+ else
+ error = ip6_probe_neighbor_wait (vm, &a6, sw_if_index, retry_count);
+
+ return error;
+}
+
+/*?
+ * The '<em>ip probe-neighbor</em>' command ARPs for IPv4 addresses or
+ * attempts IPv6 neighbor discovery depending on the supplied IP address
+ * format.
+ *
+ * @note This command will not immediately affect the indicated FIB; it
+ * is not suitable for use in establishing a FIB entry prior to adding
+ * recursive FIB entries. As in: don't use it in a script to probe a
+ * gateway prior to adding a default route. It won't work. Instead,
+ * configure a static ARP cache entry [see '<em>set ip arp</em>'], or
+ * a static IPv6 neighbor [see '<em>set ip6 neighbor</em>'].
+ *
+ * @cliexpar
+ * Example of probe for an IPv4 address:
+ * @cliexcmd{ip probe-neighbor GigabitEthernet2/0/0 172.16.1.2}
+?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (ip_probe_neighbor_command, static) = {
+ .path = "ip probe-neighbor",
+ .function = probe_neighbor_address,
+ .short_help = "ip probe-neighbor <interface> <ip4-addr> | <ip6-addr> [retry nn]",
+ .is_mp_safe = 1,
+};
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/ip/lookup.h b/src/vnet/ip/lookup.h
new file mode 100644
index 00000000000..3dbd7b3b8e8
--- /dev/null
+++ b/src/vnet/ip/lookup.h
@@ -0,0 +1,498 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * ip/ip_lookup.h: ip (4 or 6) lookup structures, adjacencies, ...
+ *
+ * Copyright (c) 2008 Eliot Dresselhaus
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * @file
+ * Definitions for all things IP (v4|v6) unicast and multicast lookup related.
+ *
+ * - Adjacency definitions and registration.
+ * - Callbacks on route add.
+ * - Callbacks on interface address change.
+ */
+#ifndef included_ip_lookup_h
+#define included_ip_lookup_h
+
+#include <vnet/vnet.h>
+#include <vlib/buffer.h>
+#include <vnet/ip/ip4_packet.h>
+#include <vnet/ip/ip6_packet.h>
+#include <vnet/fib/fib_node.h>
+#include <vnet/dpo/dpo.h>
+#include <vnet/feature/feature.h>
+
+/** @brief Common (IP4/IP6) next index stored in adjacency. */
+typedef enum
+{
+ /** Adjacency to drop this packet. */
+ IP_LOOKUP_NEXT_DROP,
+ /** Adjacency to punt this packet. */
+ IP_LOOKUP_NEXT_PUNT,
+
+ /** This packet is for one of our own IP addresses. */
+ IP_LOOKUP_NEXT_LOCAL,
+
+ /** This packet matches an "incomplete adjacency" and packets
+ need to be passed to ARP to find rewrite string for
+ this destination. */
+ IP_LOOKUP_NEXT_ARP,
+
+ /** This packet matches an "interface route" and packets
+ need to be passed to ARP to find rewrite string for
+ this destination. */
+ IP_LOOKUP_NEXT_GLEAN,
+
+ /** This packet is to be rewritten and forwarded to the next
+ processing node. This is typically the output interface but
+ might be another node for further output processing. */
+ IP_LOOKUP_NEXT_REWRITE,
+
+ /** This packets follow a load-balance */
+ IP_LOOKUP_NEXT_LOAD_BALANCE,
+
+ /** This packets follow a mid-chain adjacency */
+ IP_LOOKUP_NEXT_MIDCHAIN,
+
+ /** This packets needs to go to ICMP error */
+ IP_LOOKUP_NEXT_ICMP_ERROR,
+
+ IP_LOOKUP_N_NEXT,
+} ip_lookup_next_t;
+
+typedef enum
+{
+ IP4_LOOKUP_N_NEXT = IP_LOOKUP_N_NEXT,
+} ip4_lookup_next_t;
+
+typedef enum
+{
+ /* Hop-by-hop header handling */
+ IP6_LOOKUP_NEXT_HOP_BY_HOP = IP_LOOKUP_N_NEXT,
+ IP6_LOOKUP_NEXT_ADD_HOP_BY_HOP,
+ IP6_LOOKUP_NEXT_POP_HOP_BY_HOP,
+ IP6_LOOKUP_N_NEXT,
+} ip6_lookup_next_t;
+
+#define IP4_LOOKUP_NEXT_NODES { \
+ [IP_LOOKUP_NEXT_DROP] = "ip4-drop", \
+ [IP_LOOKUP_NEXT_PUNT] = "ip4-punt", \
+ [IP_LOOKUP_NEXT_LOCAL] = "ip4-local", \
+ [IP_LOOKUP_NEXT_ARP] = "ip4-arp", \
+ [IP_LOOKUP_NEXT_GLEAN] = "ip4-glean", \
+ [IP_LOOKUP_NEXT_REWRITE] = "ip4-rewrite", \
+ [IP_LOOKUP_NEXT_MIDCHAIN] = "ip4-midchain", \
+ [IP_LOOKUP_NEXT_LOAD_BALANCE] = "ip4-load-balance", \
+ [IP_LOOKUP_NEXT_ICMP_ERROR] = "ip4-icmp-error", \
+}
+
+#define IP6_LOOKUP_NEXT_NODES { \
+ [IP_LOOKUP_NEXT_DROP] = "ip6-drop", \
+ [IP_LOOKUP_NEXT_PUNT] = "ip6-punt", \
+ [IP_LOOKUP_NEXT_LOCAL] = "ip6-local", \
+ [IP_LOOKUP_NEXT_ARP] = "ip6-discover-neighbor", \
+ [IP_LOOKUP_NEXT_GLEAN] = "ip6-glean", \
+ [IP_LOOKUP_NEXT_REWRITE] = "ip6-rewrite", \
+ [IP_LOOKUP_NEXT_MIDCHAIN] = "ip6-midchain", \
+ [IP_LOOKUP_NEXT_LOAD_BALANCE] = "ip6-load-balance", \
+ [IP_LOOKUP_NEXT_ICMP_ERROR] = "ip6-icmp-error", \
+ [IP6_LOOKUP_NEXT_HOP_BY_HOP] = "ip6-hop-by-hop", \
+ [IP6_LOOKUP_NEXT_ADD_HOP_BY_HOP] = "ip6-add-hop-by-hop", \
+ [IP6_LOOKUP_NEXT_POP_HOP_BY_HOP] = "ip6-pop-hop-by-hop", \
+}
+
+/** Flow hash configuration */
+#define IP_FLOW_HASH_SRC_ADDR (1<<0)
+#define IP_FLOW_HASH_DST_ADDR (1<<1)
+#define IP_FLOW_HASH_PROTO (1<<2)
+#define IP_FLOW_HASH_SRC_PORT (1<<3)
+#define IP_FLOW_HASH_DST_PORT (1<<4)
+#define IP_FLOW_HASH_REVERSE_SRC_DST (1<<5)
+
+/** Default: 5-tuple without the "reverse" bit */
+#define IP_FLOW_HASH_DEFAULT (0x1F)
+
+#define foreach_flow_hash_bit \
+_(src, IP_FLOW_HASH_SRC_ADDR) \
+_(dst, IP_FLOW_HASH_DST_ADDR) \
+_(sport, IP_FLOW_HASH_SRC_PORT) \
+_(dport, IP_FLOW_HASH_DST_PORT) \
+_(proto, IP_FLOW_HASH_PROTO) \
+_(reverse, IP_FLOW_HASH_REVERSE_SRC_DST)
+
+/**
+ * A flow hash configuration is a mask of the flow hash options
+ */
+typedef u32 flow_hash_config_t;
+
+/**
+ * Forward delcartion
+ */
+struct ip_adjacency_t_;
+
+/**
+ * @brief A function type for post-rewrite fixups on midchain adjacency
+ */
+typedef void (*adj_midchain_fixup_t) (vlib_main_t * vm,
+ struct ip_adjacency_t_ * adj,
+ vlib_buffer_t * b0);
+
+/**
+ * @brief Flags on an IP adjacency
+ */
+typedef enum ip_adjacency_flags_t_
+{
+ /**
+ * Currently a sync walk is active. Used to prevent re-entrant walking
+ */
+ IP_ADJ_SYNC_WALK_ACTIVE = (1 << 0),
+} ip_adjacency_flags_t;
+
+/** @brief IP unicast adjacency.
+ @note cache aligned.
+*/
+typedef struct ip_adjacency_t_
+{
+ CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
+
+ /** Number of adjecencies in block. Greater than 1 means multipath;
+ otherwise equal to 1. */
+ u16 n_adj;
+
+ /** Next hop after ip4-lookup. */
+ union
+ {
+ ip_lookup_next_t lookup_next_index:16;
+ u16 lookup_next_index_as_int;
+ };
+
+ /** Interface address index for this local/arp adjacency. */
+ u32 if_address_index;
+
+ /** Force re-lookup in a different FIB. ~0 => normal behavior */
+ u16 mcast_group_index;
+
+ /** Highest possible perf subgraph arc interposition, e.g. for ip6 ioam */
+ u16 saved_lookup_next_index;
+
+ /*
+ * link/ether-type
+ */
+ vnet_link_t ia_link;
+ u8 ia_nh_proto;
+
+ union
+ {
+ /**
+ * IP_LOOKUP_NEXT_ARP/IP_LOOKUP_NEXT_REWRITE
+ *
+ * neighbour adjacency sub-type;
+ */
+ struct
+ {
+ ip46_address_t next_hop;
+ } nbr;
+ /**
+ * IP_LOOKUP_NEXT_MIDCHAIN
+ *
+ * A nbr adj that is also recursive. Think tunnels.
+ * A nbr adj can transition to be of type MDICHAIN
+ * so be sure to leave the two structs with the next_hop
+ * fields aligned.
+ */
+ struct
+ {
+ /**
+ * The recursive next-hop
+ */
+ ip46_address_t next_hop;
+ /**
+ * The node index of the tunnel's post rewrite/TX function.
+ */
+ u32 tx_function_node;
+ /**
+ * The next DPO to use
+ */
+ dpo_id_t next_dpo;
+ /**
+ * A function to perform the post-rewrite fixup
+ */
+ adj_midchain_fixup_t fixup_func;
+ } midchain;
+ /**
+ * IP_LOOKUP_NEXT_GLEAN
+ *
+ * Glean the address to ARP for from the packet's destination
+ */
+ struct
+ {
+ ip46_address_t receive_addr;
+ } glean;
+ } sub_type;
+
+ CLIB_CACHE_LINE_ALIGN_MARK (cacheline1);
+
+ /* Rewrite in second/third cache lines */
+ vnet_declare_rewrite (VLIB_BUFFER_PRE_DATA_SIZE);
+
+ /*
+ * member not accessed in the data plane are relgated to the
+ * remaining cachelines
+ */
+ fib_node_t ia_node;
+
+ /**
+ * Flags on the adjacency
+ */
+ ip_adjacency_flags_t ia_flags;
+
+} ip_adjacency_t;
+
+STATIC_ASSERT ((STRUCT_OFFSET_OF (ip_adjacency_t, cacheline0) == 0),
+ "IP adjacency cachline 0 is not offset");
+STATIC_ASSERT ((STRUCT_OFFSET_OF (ip_adjacency_t, cacheline1) ==
+ CLIB_CACHE_LINE_BYTES),
+ "IP adjacency cachline 1 is more than one cachline size offset");
+
+/* An all zeros address */
+extern const ip46_address_t zero_addr;
+
+/* IP multicast adjacency. */
+typedef struct
+{
+ /* Handle for this adjacency in adjacency heap. */
+ u32 heap_handle;
+
+ /* Number of adjecencies in block. */
+ u32 n_adj;
+
+ /* Rewrite string. */
+ vnet_declare_rewrite (64 - 2 * sizeof (u32));
+}
+ip_multicast_rewrite_t;
+
+typedef struct
+{
+ /* ip4-multicast-rewrite next index. */
+ u32 next_index;
+
+ u8 n_rewrite_bytes;
+
+ u8 rewrite_string[64 - 1 * sizeof (u32) - 1 * sizeof (u8)];
+}
+ip_multicast_rewrite_string_t;
+
+typedef struct
+{
+ ip_multicast_rewrite_t *rewrite_heap;
+
+ ip_multicast_rewrite_string_t *rewrite_strings;
+
+ /* Negative rewrite string index; >= 0 sw_if_index.
+ Sorted. Used to hash. */
+ i32 **adjacency_id_vector;
+
+ uword *adjacency_by_id_vector;
+} ip_multicast_lookup_main_t;
+
+typedef struct
+{
+ /* Key for mhash; in fact, just a byte offset into mhash key vector. */
+ u32 address_key;
+
+ /* Interface which has this address. */
+ u32 sw_if_index;
+
+ /* Adjacency for neighbor probe (ARP) for this interface address. */
+ u32 neighbor_probe_adj_index;
+
+ /* Address (prefix) length for this interface. */
+ u16 address_length;
+
+ /* Will be used for something eventually. Primary vs. secondary? */
+ u16 flags;
+
+ /* Next and previous pointers for doubly linked list of
+ addresses per software interface. */
+ u32 next_this_sw_interface;
+ u32 prev_this_sw_interface;
+} ip_interface_address_t;
+
+typedef enum
+{
+ IP_LOCAL_NEXT_DROP,
+ IP_LOCAL_NEXT_PUNT,
+ IP_LOCAL_NEXT_UDP_LOOKUP,
+ IP_LOCAL_NEXT_ICMP,
+ IP_LOCAL_N_NEXT,
+} ip_local_next_t;
+
+struct ip_lookup_main_t;
+
+typedef struct ip_lookup_main_t
+{
+ /* Adjacency heap. */
+ ip_adjacency_t *adjacency_heap;
+
+ /** load-balance packet/byte counters indexed by LB index. */
+ vlib_combined_counter_main_t load_balance_counters;
+
+ /** Pool of addresses that are assigned to interfaces. */
+ ip_interface_address_t *if_address_pool;
+
+ /** Hash table mapping address to index in interface address pool. */
+ mhash_t address_to_if_address_index;
+
+ /** Head of doubly linked list of interface addresses for each software interface.
+ ~0 means this interface has no address. */
+ u32 *if_address_pool_index_by_sw_if_index;
+
+ /** First table index to use for this interface, ~0 => none */
+ u32 *classify_table_index_by_sw_if_index;
+
+ /** Feature arc indices */
+ u8 mcast_feature_arc_index;
+ u8 ucast_feature_arc_index;
+ u8 output_feature_arc_index;
+
+ /** Number of bytes in a fib result. Must be at least
+ sizeof (uword). First word is always adjacency index. */
+ u32 fib_result_n_bytes, fib_result_n_words;
+
+ format_function_t *format_fib_result;
+
+ /** 1 for ip6; 0 for ip4. */
+ u32 is_ip6;
+
+ /** Either format_ip4_address_and_length or format_ip6_address_and_length. */
+ format_function_t *format_address_and_length;
+
+ /** Special adjacency format functions */
+ format_function_t **special_adjacency_format_functions;
+
+ /** Table mapping ip protocol to ip[46]-local node next index. */
+ u8 local_next_by_ip_protocol[256];
+
+ /** IP_BUILTIN_PROTOCOL_{TCP,UDP,ICMP,OTHER} by protocol in IP header. */
+ u8 builtin_protocol_by_ip_protocol[256];
+} ip_lookup_main_t;
+
+always_inline ip_adjacency_t *
+ip_get_adjacency (ip_lookup_main_t * lm, u32 adj_index)
+{
+ ip_adjacency_t *adj;
+
+ adj = vec_elt_at_index (lm->adjacency_heap, adj_index);
+
+ return adj;
+}
+
+#define ip_prefetch_adjacency(lm,adj_index,type) \
+do { \
+ ip_adjacency_t * _adj = (lm)->adjacency_heap + (adj_index); \
+ CLIB_PREFETCH (_adj, sizeof (_adj[0]), type); \
+} while (0)
+
+/* Create new block of given number of contiguous adjacencies. */
+ip_adjacency_t *ip_add_adjacency (ip_lookup_main_t * lm,
+ ip_adjacency_t * adj,
+ u32 n_adj, u32 * adj_index_result);
+
+clib_error_t *ip_interface_address_add_del (ip_lookup_main_t * lm,
+ u32 sw_if_index,
+ void *address,
+ u32 address_length,
+ u32 is_del, u32 * result_index);
+
+u8 *format_ip_flow_hash_config (u8 * s, va_list * args);
+
+always_inline ip_interface_address_t *
+ip_get_interface_address (ip_lookup_main_t * lm, void *addr_fib)
+{
+ uword *p = mhash_get (&lm->address_to_if_address_index, addr_fib);
+ return p ? pool_elt_at_index (lm->if_address_pool, p[0]) : 0;
+}
+
+u32 fib_table_id_find_fib_index (fib_protocol_t proto, u32 table_id);
+
+always_inline void *
+ip_interface_address_get_address (ip_lookup_main_t * lm,
+ ip_interface_address_t * a)
+{
+ return mhash_key_to_mem (&lm->address_to_if_address_index, a->address_key);
+}
+
+/* *INDENT-OFF* */
+#define foreach_ip_interface_address(lm,a,sw_if_index,loop,body) \
+do { \
+ vnet_main_t *_vnm = vnet_get_main(); \
+ u32 _sw_if_index = sw_if_index; \
+ vnet_sw_interface_t *_swif; \
+ _swif = vnet_get_sw_interface (_vnm, _sw_if_index); \
+ \
+ /* \
+ * Loop => honor unnumbered interface addressing. \
+ */ \
+ if (loop && _swif->flags & VNET_SW_INTERFACE_FLAG_UNNUMBERED) \
+ _sw_if_index = _swif->unnumbered_sw_if_index; \
+ u32 _ia = \
+ (vec_len((lm)->if_address_pool_index_by_sw_if_index) \
+ > (_sw_if_index)) \
+ ? vec_elt ((lm)->if_address_pool_index_by_sw_if_index, \
+ (_sw_if_index)) : (u32)~0; \
+ ip_interface_address_t * _a; \
+ while (_ia != ~0) \
+ { \
+ _a = pool_elt_at_index ((lm)->if_address_pool, _ia); \
+ _ia = _a->next_this_sw_interface; \
+ (a) = _a; \
+ body; \
+ } \
+} while (0)
+/* *INDENT-ON* */
+
+void ip_lookup_init (ip_lookup_main_t * lm, u32 ip_lookup_node_index);
+
+#endif /* included_ip_lookup_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/ip/ping.c b/src/vnet/ip/ping.c
new file mode 100644
index 00000000000..68dbe759ebc
--- /dev/null
+++ b/src/vnet/ip/ping.c
@@ -0,0 +1,888 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/ip/ping.h>
+#include <vnet/fib/ip6_fib.h>
+#include <vnet/fib/ip4_fib.h>
+#include <vnet/fib/fib_entry.h>
+
+/**
+ * @file
+ * @brief IPv4 and IPv6 ICMP Ping.
+ *
+ * This file contains code to suppport IPv4 or IPv6 ICMP ECHO_REQUEST to
+ * network hosts.
+ *
+ */
+
+
+u8 *
+format_icmp_echo_trace (u8 * s, va_list * va)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*va, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*va, vlib_node_t *);
+ icmp_echo_trace_t *t = va_arg (*va, icmp_echo_trace_t *);
+
+ s = format (s, "ICMP echo id %d seq %d%s",
+ clib_net_to_host_u16 (t->id),
+ clib_net_to_host_u16 (t->seq), t->bound ? "" : " (unknown)");
+
+ return s;
+}
+
+/*
+ * If we can find the ping run by an ICMP ID, then we send the signal
+ * to the CLI process referenced by that ping run, alongside with
+ * a freshly made copy of the packet.
+ * I opted for a packet copy to keep the main packet processing path
+ * the same as for all the other nodes.
+ *
+ */
+
+static int
+signal_ip46_icmp_reply_event (vlib_main_t * vm,
+ u8 event_type, vlib_buffer_t * b0)
+{
+ ping_main_t *pm = &ping_main;
+ u16 net_icmp_id = 0;
+ u32 bi0_copy = 0;
+
+ switch (event_type)
+ {
+ case PING_RESPONSE_IP4:
+ {
+ icmp4_echo_request_header_t *h0 = vlib_buffer_get_current (b0);
+ net_icmp_id = h0->icmp_echo.id;
+ }
+ break;
+ case PING_RESPONSE_IP6:
+ {
+ icmp6_echo_request_header_t *h0 = vlib_buffer_get_current (b0);
+ net_icmp_id = h0->icmp_echo.id;
+ }
+ break;
+ default:
+ return 0;
+ }
+
+ uword *p = hash_get (pm->ping_run_by_icmp_id,
+ clib_net_to_host_u16 (net_icmp_id));
+ if (!p)
+ return 0;
+
+ ping_run_t *pr = vec_elt_at_index (pm->ping_runs, p[0]);
+ if (vlib_buffer_alloc (vm, &bi0_copy, 1) == 1)
+ {
+ void *dst = vlib_buffer_get_current (vlib_get_buffer (vm, bi0_copy));
+ clib_memcpy (dst, vlib_buffer_get_current (b0), b0->current_length);
+ }
+ /* If buffer_alloc failed, bi0_copy == 0 - just signaling an event. */
+
+ vlib_process_signal_event (vm, pr->cli_process_id, event_type, bi0_copy);
+ return 1;
+}
+
+/*
+ * Process ICMPv6 echo replies
+ */
+static uword
+ip6_icmp_echo_reply_node_fn (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ u32 n_left_from, *from;
+
+ from = vlib_frame_vector_args (frame); /* array of buffer indices */
+ n_left_from = frame->n_vectors; /* number of buffer indices */
+
+ while (n_left_from > 0)
+ {
+ u32 bi0;
+ vlib_buffer_t *b0;
+ u32 next0;
+
+ bi0 = from[0];
+ b0 = vlib_get_buffer (vm, bi0);
+
+ next0 = signal_ip46_icmp_reply_event (vm, PING_RESPONSE_IP6, b0) ?
+ ICMP6_ECHO_REPLY_NEXT_DROP : ICMP6_ECHO_REPLY_NEXT_PUNT;
+
+ if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ icmp6_echo_request_header_t *h0 = vlib_buffer_get_current (b0);
+ icmp_echo_trace_t *tr = vlib_add_trace (vm, node, b0, sizeof (*tr));
+ tr->id = h0->icmp_echo.id;
+ tr->seq = h0->icmp_echo.seq;
+ tr->bound = (next0 == ICMP6_ECHO_REPLY_NEXT_DROP);
+ }
+
+ /* push this pkt to the next graph node */
+ vlib_set_next_frame_buffer (vm, node, next0, bi0);
+
+ from += 1;
+ n_left_from -= 1;
+ }
+
+ return frame->n_vectors;
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (ip6_icmp_echo_reply_node, static) =
+{
+ .function = ip6_icmp_echo_reply_node_fn,
+ .name = "ip6-icmp-echo-reply",
+ .vector_size = sizeof (u32),
+ .format_trace = format_icmp_echo_trace,
+ .n_next_nodes = ICMP6_ECHO_REPLY_N_NEXT,
+ .next_nodes = {
+ [ICMP6_ECHO_REPLY_NEXT_DROP] = "error-drop",
+ [ICMP6_ECHO_REPLY_NEXT_PUNT] = "error-punt",
+ },
+};
+/* *INDENT-ON* */
+
+/*
+ * Process ICMPv4 echo replies
+ */
+static uword
+ip4_icmp_echo_reply_node_fn (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ u32 n_left_from, *from;
+
+ from = vlib_frame_vector_args (frame); /* array of buffer indices */
+ n_left_from = frame->n_vectors; /* number of buffer indices */
+
+ while (n_left_from > 0)
+ {
+ u32 bi0;
+ vlib_buffer_t *b0;
+ u32 next0;
+
+ bi0 = from[0];
+ b0 = vlib_get_buffer (vm, bi0);
+
+ next0 = signal_ip46_icmp_reply_event (vm, PING_RESPONSE_IP4, b0) ?
+ ICMP4_ECHO_REPLY_NEXT_DROP : ICMP4_ECHO_REPLY_NEXT_PUNT;
+
+ if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ icmp4_echo_request_header_t *h0 = vlib_buffer_get_current (b0);
+ icmp_echo_trace_t *tr = vlib_add_trace (vm, node, b0, sizeof (*tr));
+ tr->id = h0->icmp_echo.id;
+ tr->seq = h0->icmp_echo.seq;
+ tr->bound = (next0 == ICMP4_ECHO_REPLY_NEXT_DROP);
+ }
+
+ /* push this pkt to the next graph node */
+ vlib_set_next_frame_buffer (vm, node, next0, bi0);
+
+ from += 1;
+ n_left_from -= 1;
+ }
+
+ return frame->n_vectors;
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (ip4_icmp_echo_reply_node, static) =
+{
+ .function = ip4_icmp_echo_reply_node_fn,
+ .name = "ip4-icmp-echo-reply",
+ .vector_size = sizeof (u32),
+ .format_trace = format_icmp_echo_trace,
+ .n_next_nodes = ICMP4_ECHO_REPLY_N_NEXT,
+ .next_nodes = {
+ [ICMP4_ECHO_REPLY_NEXT_DROP] = "error-drop",
+ [ICMP4_ECHO_REPLY_NEXT_PUNT] = "error-punt",
+ },
+};
+/* *INDENT-ON* */
+
+char *ip6_lookup_next_nodes[] = IP6_LOOKUP_NEXT_NODES;
+char *ip4_lookup_next_nodes[] = IP4_LOOKUP_NEXT_NODES;
+
+/* get first interface address */
+static ip6_address_t *
+ip6_interface_first_address (ip6_main_t * im, u32 sw_if_index)
+{
+ ip_lookup_main_t *lm = &im->lookup_main;
+ ip_interface_address_t *ia = 0;
+ ip6_address_t *result = 0;
+
+ /* *INDENT-OFF* */
+ foreach_ip_interface_address (lm, ia, sw_if_index,
+ 1 /* honor unnumbered */ ,
+ ({
+ ip6_address_t * a =
+ ip_interface_address_get_address (lm, ia);
+ result = a;
+ break;
+ }));
+ /* *INDENT-ON* */
+ return result;
+}
+
+/* Fill in the ICMP ECHO structure, return the safety-checked and possibly shrunk data_len */
+static u16
+init_icmp46_echo_request (icmp46_echo_request_t * icmp46_echo,
+ u16 seq_host, u16 id_host, u16 data_len)
+{
+ int i;
+ icmp46_echo->seq = clib_host_to_net_u16 (seq_host);
+ icmp46_echo->id = clib_host_to_net_u16 (id_host);
+
+ for (i = 0; i < sizeof (icmp46_echo->data); i++)
+ {
+ icmp46_echo->data[i] = i % 256;
+ }
+
+ if (data_len > sizeof (icmp46_echo_request_t))
+ {
+ data_len = sizeof (icmp46_echo_request_t);
+ }
+ return data_len;
+}
+
+static send_ip46_ping_result_t
+send_ip6_ping (vlib_main_t * vm, ip6_main_t * im,
+ u32 table_id, ip6_address_t * pa6,
+ u32 sw_if_index, u16 seq_host, u16 id_host, u16 data_len,
+ u8 verbose)
+{
+ icmp6_echo_request_header_t *h0;
+ u32 bi0 = 0;
+ int bogus_length = 0;
+ vlib_buffer_t *p0;
+ vlib_frame_t *f;
+ u32 *to_next;
+
+ if (vlib_buffer_alloc (vm, &bi0, 1) != 1)
+ return SEND_PING_ALLOC_FAIL;
+
+ p0 = vlib_get_buffer (vm, bi0);
+
+ /*
+ * if the user did not provide a source interface, use the any interface
+ * that the destination resolves via.
+ */
+ if (~0 == sw_if_index)
+ {
+ fib_node_index_t fib_entry_index;
+ u32 fib_index;
+
+ fib_index = ip6_fib_index_from_table_id (table_id);
+
+ if (~0 == fib_index)
+ {
+ vlib_buffer_free (vm, &bi0, 1);
+ return SEND_PING_NO_TABLE;
+ }
+
+ fib_entry_index = ip6_fib_table_lookup (fib_index, pa6, 128);
+ sw_if_index = fib_entry_get_resolving_interface (fib_entry_index);
+ /*
+ * Set the TX interface to force ip-lookup to use its table ID
+ */
+ vnet_buffer (p0)->sw_if_index[VLIB_TX] = fib_index;
+ }
+ else
+ {
+ /*
+ * force an IP lookup in the table bound to the user's chosen
+ * source interface.
+ */
+ vnet_buffer (p0)->sw_if_index[VLIB_TX] =
+ ip6_fib_table_get_index_for_sw_if_index (sw_if_index);
+ }
+
+ if (~0 == sw_if_index)
+ {
+ vlib_buffer_free (vm, &bi0, 1);
+ return SEND_PING_NO_INTERFACE;
+ }
+
+ vnet_buffer (p0)->sw_if_index[VLIB_RX] = sw_if_index;
+
+ h0 = vlib_buffer_get_current (p0);
+
+ /* Fill in ip6 header fields */
+ h0->ip6.ip_version_traffic_class_and_flow_label =
+ clib_host_to_net_u32 (0x6 << 28);
+ h0->ip6.payload_length = 0; /* Set below */
+ h0->ip6.protocol = IP_PROTOCOL_ICMP6;
+ h0->ip6.hop_limit = 255;
+ h0->ip6.dst_address = *pa6;
+ h0->ip6.src_address = *pa6;
+
+ /* Fill in the correct source now */
+ ip6_address_t *a = ip6_interface_first_address (im, sw_if_index);
+ h0->ip6.src_address = a[0];
+
+ /* Fill in icmp fields */
+ h0->icmp.type = ICMP6_echo_request;
+ h0->icmp.code = 0;
+ h0->icmp.checksum = 0;
+
+ data_len =
+ init_icmp46_echo_request (&h0->icmp_echo, seq_host, id_host, data_len);
+ h0->icmp_echo.time_sent = vlib_time_now (vm);
+
+ /* Fix up the lengths */
+ h0->ip6.payload_length =
+ clib_host_to_net_u16 (data_len + sizeof (icmp46_header_t));
+
+ p0->current_length = clib_net_to_host_u16 (h0->ip6.payload_length) +
+ STRUCT_OFFSET_OF (icmp6_echo_request_header_t, icmp);
+
+ /* Calculate the ICMP checksum */
+ h0->icmp.checksum = 0;
+ h0->icmp.checksum =
+ ip6_tcp_udp_icmp_compute_checksum (vm, 0, &h0->ip6, &bogus_length);
+
+ /* Enqueue the packet right now */
+ f = vlib_get_frame_to_node (vm, ip6_lookup_node.index);
+ to_next = vlib_frame_vector_args (f);
+ to_next[0] = bi0;
+ f->n_vectors = 1;
+ vlib_put_frame_to_node (vm, ip6_lookup_node.index, f);
+
+ return SEND_PING_OK;
+}
+
+static send_ip46_ping_result_t
+send_ip4_ping (vlib_main_t * vm,
+ ip4_main_t * im,
+ u32 table_id,
+ ip4_address_t * pa4,
+ u32 sw_if_index,
+ u16 seq_host, u16 id_host, u16 data_len, u8 verbose)
+{
+ icmp4_echo_request_header_t *h0;
+ u32 bi0 = 0;
+ ip_lookup_main_t *lm = &im->lookup_main;
+ vlib_buffer_t *p0;
+ vlib_frame_t *f;
+ u32 *to_next;
+ u32 if_add_index0;
+
+ if (vlib_buffer_alloc (vm, &bi0, 1) != 1)
+ return SEND_PING_ALLOC_FAIL;
+
+ p0 = vlib_get_buffer (vm, bi0);
+
+ /*
+ * if the user did not provide a source interface, use the any interface
+ * that the destination resolves via.
+ */
+ if (~0 == sw_if_index)
+ {
+ fib_node_index_t fib_entry_index;
+ u32 fib_index;
+
+ fib_index = ip4_fib_index_from_table_id (table_id);
+
+ if (~0 == fib_index)
+ {
+ vlib_buffer_free (vm, &bi0, 1);
+ return SEND_PING_NO_TABLE;
+ }
+
+ fib_entry_index =
+ ip4_fib_table_lookup (ip4_fib_get (fib_index), pa4, 32);
+ sw_if_index = fib_entry_get_resolving_interface (fib_entry_index);
+ /*
+ * Set the TX interface to force ip-lookup to use the user's table ID
+ */
+ vnet_buffer (p0)->sw_if_index[VLIB_TX] = fib_index;
+ }
+ else
+ {
+ /*
+ * force an IP lookup in the table bound to the user's chosen
+ * source interface.
+ */
+ vnet_buffer (p0)->sw_if_index[VLIB_TX] =
+ ip4_fib_table_get_index_for_sw_if_index (sw_if_index);
+ }
+
+ if (~0 == sw_if_index)
+ {
+ vlib_buffer_free (vm, &bi0, 1);
+ return SEND_PING_NO_INTERFACE;
+ }
+
+ vnet_buffer (p0)->sw_if_index[VLIB_RX] = sw_if_index;
+
+ h0 = vlib_buffer_get_current (p0);
+
+ /* Fill in ip4 header fields */
+ h0->ip4.checksum = 0;
+ h0->ip4.ip_version_and_header_length = 0x45;
+ h0->ip4.tos = 0;
+ h0->ip4.length = 0; /* Set below */
+ h0->ip4.fragment_id = 0;
+ h0->ip4.flags_and_fragment_offset = 0;
+ h0->ip4.ttl = 0xff;
+ h0->ip4.protocol = IP_PROTOCOL_ICMP;
+ h0->ip4.dst_address = *pa4;
+ h0->ip4.src_address = *pa4;
+
+ /* Fill in the correct source now */
+ if_add_index0 = lm->if_address_pool_index_by_sw_if_index[sw_if_index];
+ if (PREDICT_TRUE (if_add_index0 != ~0))
+ {
+ ip_interface_address_t *if_add =
+ pool_elt_at_index (lm->if_address_pool, if_add_index0);
+ ip4_address_t *if_ip = ip_interface_address_get_address (lm, if_add);
+ h0->ip4.src_address = *if_ip;
+ if (verbose)
+ {
+ vlib_cli_output (vm, "Source address: %U",
+ format_ip4_address, &h0->ip4.src_address);
+ }
+ }
+
+ /* Fill in icmp fields */
+ h0->icmp.type = ICMP4_echo_request;
+ h0->icmp.code = 0;
+ h0->icmp.checksum = 0;
+
+ data_len =
+ init_icmp46_echo_request (&h0->icmp_echo, seq_host, id_host, data_len);
+ h0->icmp_echo.time_sent = vlib_time_now (vm);
+
+ /* Fix up the lengths */
+ h0->ip4.length =
+ clib_host_to_net_u16 (data_len + sizeof (icmp46_header_t) +
+ sizeof (ip4_header_t));
+
+ p0->current_length = clib_net_to_host_u16 (h0->ip4.length);
+
+ /* Calculate the IP and ICMP checksums */
+ h0->ip4.checksum = ip4_header_checksum (&(h0->ip4));
+ h0->icmp.checksum =
+ ~ip_csum_fold (ip_incremental_checksum (0, &(h0->icmp),
+ p0->current_length -
+ sizeof (ip4_header_t)));
+
+ /* Enqueue the packet right now */
+ f = vlib_get_frame_to_node (vm, ip4_lookup_node.index);
+ to_next = vlib_frame_vector_args (f);
+ to_next[0] = bi0;
+ f->n_vectors = 1;
+ vlib_put_frame_to_node (vm, ip4_lookup_node.index, f);
+
+ return SEND_PING_OK;
+}
+
+
+static void
+print_ip6_icmp_reply (vlib_main_t * vm, u32 bi0)
+{
+ vlib_buffer_t *b0 = vlib_get_buffer (vm, bi0);
+ icmp6_echo_request_header_t *h0 = vlib_buffer_get_current (b0);
+ f64 rtt = vlib_time_now (vm) - h0->icmp_echo.time_sent;
+
+ vlib_cli_output (vm,
+ "%d bytes from %U: icmp_seq=%d ttl=%d time=%.4f ms",
+ clib_host_to_net_u16 (h0->ip6.payload_length),
+ format_ip6_address,
+ &h0->ip6.src_address,
+ clib_host_to_net_u16 (h0->icmp_echo.seq),
+ h0->ip6.hop_limit, rtt * 1000.0);
+}
+
+static void
+print_ip4_icmp_reply (vlib_main_t * vm, u32 bi0)
+{
+ vlib_buffer_t *b0 = vlib_get_buffer (vm, bi0);
+ icmp4_echo_request_header_t *h0 = vlib_buffer_get_current (b0);
+ f64 rtt = vlib_time_now (vm) - h0->icmp_echo.time_sent;
+ u32 rcvd_icmp_len =
+ clib_host_to_net_u16 (h0->ip4.length) -
+ (4 * (0xF & h0->ip4.ip_version_and_header_length));
+
+ vlib_cli_output (vm,
+ "%d bytes from %U: icmp_seq=%d ttl=%d time=%.4f ms",
+ rcvd_icmp_len,
+ format_ip4_address,
+ &h0->ip4.src_address,
+ clib_host_to_net_u16 (h0->icmp_echo.seq),
+ h0->ip4.ttl, rtt * 1000.0);
+}
+
+
+/*
+ * Perform the ping run with the given parameters in the current CLI process.
+ * Depending on whether pa4 or pa6 is set, runs IPv4 or IPv6 ping.
+ * The amusing side effect is of course if both are set, then both pings are sent.
+ * This behavior can be used to ping a dualstack host over IPv4 and IPv6 at once.
+ */
+
+static void
+run_ping_ip46_address (vlib_main_t * vm, u32 table_id, ip4_address_t * pa4,
+ ip6_address_t * pa6, u32 sw_if_index,
+ f64 ping_interval, u32 ping_repeat, u32 data_len,
+ u32 verbose)
+{
+ int i;
+ ping_main_t *pm = &ping_main;
+ uword curr_proc = vlib_current_process (vm);
+ u32 n_replies = 0;
+ u32 n_requests = 0;
+ ping_run_t *pr = 0;
+ u32 ping_run_index = 0;
+ u16 icmp_id;
+
+ static u32 rand_seed = 0;
+
+ if (PREDICT_FALSE (!rand_seed))
+ rand_seed = random_default_seed ();
+
+ icmp_id = random_u32 (&rand_seed) & 0xffff;
+
+ while (hash_get (pm->ping_run_by_icmp_id, icmp_id))
+ {
+ vlib_cli_output (vm, "ICMP ID collision at %d, incrementing", icmp_id);
+ icmp_id++;
+ }
+ pool_get (pm->ping_runs, pr);
+ ping_run_index = pr - pm->ping_runs;
+ pr->cli_process_id = curr_proc;
+ pr->icmp_id = icmp_id;
+ hash_set (pm->ping_run_by_icmp_id, icmp_id, ping_run_index);
+ for (i = 1; i <= ping_repeat; i++)
+ {
+ f64 sleep_interval;
+ f64 time_ping_sent = vlib_time_now (vm);
+ /* Reset pr: running ping in other process could have changed pm->ping_runs */
+ pr = vec_elt_at_index (pm->ping_runs, ping_run_index);
+ pr->curr_seq = i;
+ if (pa6 &&
+ (SEND_PING_OK ==
+ send_ip6_ping (vm, ping_main.ip6_main, table_id, pa6, sw_if_index,
+ i, icmp_id, data_len, verbose)))
+ {
+ n_requests++;
+ }
+ if (pa4 &&
+ (SEND_PING_OK ==
+ send_ip4_ping (vm, ping_main.ip4_main, table_id, pa4, sw_if_index,
+ i, icmp_id, data_len, verbose)))
+ {
+ n_requests++;
+ }
+ while ((i <= ping_repeat)
+ &&
+ ((sleep_interval =
+ time_ping_sent + ping_interval - vlib_time_now (vm)) > 0.0))
+ {
+ uword event_type, *event_data = 0;
+ vlib_process_wait_for_event_or_clock (vm, sleep_interval);
+ event_type = vlib_process_get_events (vm, &event_data);
+ switch (event_type)
+ {
+ case ~0: /* no events => timeout */
+ break;
+ case PING_RESPONSE_IP6:
+ {
+ int i;
+ for (i = 0; i < vec_len (event_data); i++)
+ {
+ u32 bi0 = event_data[0];
+ print_ip6_icmp_reply (vm, bi0);
+ n_replies++;
+ if (0 != bi0)
+ {
+ vlib_buffer_free (vm, &bi0, 1);
+ }
+ }
+ }
+ break;
+ case PING_RESPONSE_IP4:
+ {
+ int i;
+ for (i = 0; i < vec_len (event_data); i++)
+ {
+ u32 bi0 = event_data[0];
+ print_ip4_icmp_reply (vm, bi0);
+ n_replies++;
+ if (0 != bi0)
+ {
+ vlib_buffer_free (vm, &bi0, 1);
+ }
+ }
+ }
+ break;
+ default:
+ /* someone pressed a key, abort */
+ vlib_cli_output (vm, "Aborted due to a keypress.");
+ i = 1 + ping_repeat;
+ break;
+ }
+ }
+ }
+ vlib_cli_output (vm, "\n");
+ {
+ float loss =
+ (0 ==
+ n_requests) ? 0 : 100.0 * ((float) n_requests -
+ (float) n_replies) / (float) n_requests;
+ vlib_cli_output (vm,
+ "Statistics: %u sent, %u received, %f%% packet loss\n",
+ n_requests, n_replies, loss);
+ /* Reset pr: running ping in other process could have changed pm->ping_runs */
+ pr = vec_elt_at_index (pm->ping_runs, ping_run_index);
+ hash_unset (pm->ping_run_by_icmp_id, icmp_id);
+ pool_put (pm->ping_runs, pr);
+ }
+}
+
+
+
+
+
+static clib_error_t *
+ping_ip_address (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ ip4_address_t a4;
+ ip6_address_t a6;
+ clib_error_t *error = 0;
+ u32 ping_repeat = 5;
+ u8 ping_ip4, ping_ip6;
+ vnet_main_t *vnm = vnet_get_main ();
+ u32 data_len = PING_DEFAULT_DATA_LEN;
+ u32 verbose = 0;
+ f64 ping_interval = PING_DEFAULT_INTERVAL;
+ u32 sw_if_index, table_id;
+
+ table_id = 0;
+ ping_ip4 = ping_ip6 = 0;
+ sw_if_index = ~0;
+
+ if (unformat (input, "%U", unformat_ip4_address, &a4))
+ {
+ ping_ip4 = 1;
+ }
+ else if (unformat (input, "%U", unformat_ip6_address, &a6))
+ {
+ ping_ip6 = 1;
+ }
+ else if (unformat (input, "ipv4"))
+ {
+ if (unformat (input, "%U", unformat_ip4_address, &a4))
+ {
+ ping_ip4 = 1;
+ }
+ else
+ {
+ error =
+ clib_error_return (0,
+ "expecting IPv4 address but got `%U'",
+ format_unformat_error, input);
+ }
+ }
+ else if (unformat (input, "ipv6"))
+ {
+ if (unformat (input, "%U", unformat_ip6_address, &a6))
+ {
+ ping_ip6 = 1;
+ }
+ else
+ {
+ error =
+ clib_error_return (0,
+ "expecting IPv6 address but got `%U'",
+ format_unformat_error, input);
+ }
+ }
+ else
+ {
+ error =
+ clib_error_return (0,
+ "expecting IP4/IP6 address `%U'. Usage: ping <addr> [source <intf>] [size <datasz>] [repeat <count>] [verbose]",
+ format_unformat_error, input);
+ goto done;
+ }
+
+ /* allow for the second AF in the same ping */
+ if (!ping_ip4 && (unformat (input, "ipv4")))
+ {
+ if (unformat (input, "%U", unformat_ip4_address, &a4))
+ {
+ ping_ip4 = 1;
+ }
+ }
+ else if (!ping_ip6 && (unformat (input, "ipv6")))
+ {
+ if (unformat (input, "%U", unformat_ip6_address, &a6))
+ {
+ ping_ip6 = 1;
+ }
+ }
+
+ /* parse the rest of the parameters in a cycle */
+ while (!unformat_eof (input, NULL))
+ {
+ if (unformat (input, "source"))
+ {
+ if (!unformat_user
+ (input, unformat_vnet_sw_interface, vnm, &sw_if_index))
+ {
+ error =
+ clib_error_return (0,
+ "unknown interface `%U'",
+ format_unformat_error, input);
+ goto done;
+ }
+ }
+ else if (unformat (input, "size"))
+ {
+ if (!unformat (input, "%u", &data_len))
+ {
+ error =
+ clib_error_return (0,
+ "expecting size but got `%U'",
+ format_unformat_error, input);
+ goto done;
+ }
+ }
+ else if (unformat (input, "table-id"))
+ {
+ if (!unformat (input, "du", &table_id))
+ {
+ error =
+ clib_error_return (0,
+ "expecting table-id but got `%U'",
+ format_unformat_error, input);
+ goto done;
+ }
+ }
+ else if (unformat (input, "interval"))
+ {
+ if (!unformat (input, "%f", &ping_interval))
+ {
+ error =
+ clib_error_return (0,
+ "expecting interval (floating point number) got `%U'",
+ format_unformat_error, input);
+ goto done;
+ }
+ }
+ else if (unformat (input, "repeat"))
+ {
+ if (!unformat (input, "%u", &ping_repeat))
+ {
+ error =
+ clib_error_return (0,
+ "expecting repeat count but got `%U'",
+ format_unformat_error, input);
+ goto done;
+ }
+ }
+ else if (unformat (input, "verbose"))
+ {
+ verbose = 1;
+ }
+ else
+ {
+ error = clib_error_return (0, "unknown input `%U'",
+ format_unformat_error, input);
+ goto done;
+ }
+ }
+
+ run_ping_ip46_address (vm, table_id, ping_ip4 ? &a4 : NULL,
+ ping_ip6 ? &a6 : NULL, sw_if_index, ping_interval,
+ ping_repeat, data_len, verbose);
+done:
+ return error;
+}
+
+/*?
+ * This command sends an ICMP ECHO_REQUEST to network hosts. The address
+ * can be an IPv4 or IPv6 address (or both at the same time).
+ *
+ * @cliexpar
+ * @parblock
+ * Example of how ping an IPv4 address:
+ * @cliexstart{ping 172.16.1.2 source GigabitEthernet2/0/0 repeat 2}
+ * 64 bytes from 172.16.1.2: icmp_seq=1 ttl=64 time=.1090 ms
+ * 64 bytes from 172.16.1.2: icmp_seq=2 ttl=64 time=.0914 ms
+ *
+ * Statistics: 2 sent, 2 received, 0% packet loss
+ * @cliexend
+ *
+ * Example of how ping both an IPv4 address and IPv6 address at the same time:
+ * @cliexstart{ping 172.16.1.2 ipv6 fe80::24a5:f6ff:fe9c:3a36 source GigabitEthernet2/0/0 repeat 2 verbose}
+ * Adjacency index: 10, sw_if_index: 1
+ * Adj: ip6-discover-neighbor
+ * Adj Interface: 0
+ * Forced set interface: 1
+ * Adjacency index: 0, sw_if_index: 4294967295
+ * Adj: ip4-miss
+ * Adj Interface: 0
+ * Forced set interface: 1
+ * Source address: 172.16.1.1
+ * 64 bytes from 172.16.1.2: icmp_seq=1 ttl=64 time=.1899 ms
+ * Adjacency index: 10, sw_if_index: 1
+ * Adj: ip6-discover-neighbor
+ * Adj Interface: 0
+ * Forced set interface: 1
+ * Adjacency index: 0, sw_if_index: 4294967295
+ * Adj: ip4-miss
+ * Adj Interface: 0
+ * Forced set interface: 1
+ * Source address: 172.16.1.1
+ * 64 bytes from 172.16.1.2: icmp_seq=2 ttl=64 time=.0910 ms
+ *
+ * Statistics: 4 sent, 2 received, 50% packet loss
+ * @cliexend
+ * @endparblock
+?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (ping_command, static) =
+{
+ .path = "ping",
+ .function = ping_ip_address,
+ .short_help = "ping {<ip-addr> | ipv4 <ip4-addr> | ipv6 <ip6-addr>}"
+ " [ipv4 <ip4-addr> | ipv6 <ip6-addr>] [source <interface>]"
+ " [size <pktsize>] [interval <sec>] [repeat <cnt>] [table-id <id>]"
+ " [verbose]",
+};
+/* *INDENT-ON* */
+
+static clib_error_t *
+ping_cli_init (vlib_main_t * vm)
+{
+ ping_main_t *pm = &ping_main;
+ pm->ip6_main = &ip6_main;
+ pm->ip4_main = &ip4_main;
+ icmp6_register_type (vm, ICMP6_echo_reply, ip6_icmp_echo_reply_node.index);
+ ip4_icmp_register_type (vm, ICMP4_echo_reply,
+ ip4_icmp_echo_reply_node.index);
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (ping_cli_init);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/ip/ping.h b/src/vnet/ip/ping.h
new file mode 100644
index 00000000000..8f41f45c5f9
--- /dev/null
+++ b/src/vnet/ip/ping.h
@@ -0,0 +1,108 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef included_vnet_ping_h
+#define included_vnet_ping_h
+
+
+#include <vnet/ip/ip.h>
+
+#include <vnet/ip/lookup.h>
+
+typedef enum
+{
+ PING_RESPONSE_IP6 = 42,
+ PING_RESPONSE_IP4,
+} ping_response_type_t;
+
+typedef enum
+{
+ SEND_PING_OK = 0,
+ SEND_PING_ALLOC_FAIL,
+ SEND_PING_NO_INTERFACE,
+ SEND_PING_NO_TABLE,
+} send_ip46_ping_result_t;
+
+/*
+ * Currently running ping command.
+ */
+typedef struct ping_run_t
+{
+ u16 icmp_id;
+ u16 curr_seq;
+ uword cli_process_id;
+} ping_run_t;
+
+typedef struct ping_main_t
+{
+ ip6_main_t *ip6_main;
+ ip4_main_t *ip4_main;
+ ping_run_t *ping_runs;
+ /* hash table to find back the CLI process for a reply */
+ // uword *cli_proc_by_icmp_id;
+ ping_run_t *ping_run_by_icmp_id;
+} ping_main_t;
+
+ping_main_t ping_main;
+
+#define PING_DEFAULT_DATA_LEN 60
+#define PING_DEFAULT_INTERVAL 1.0
+
+#define PING_MAXIMUM_DATA_SIZE 2000
+
+typedef CLIB_PACKED (struct
+ {
+ u16 id;
+ u16 seq; f64 time_sent; u8 data[PING_MAXIMUM_DATA_SIZE];
+ }) icmp46_echo_request_t;
+
+
+typedef CLIB_PACKED (struct
+ {
+ ip6_header_t ip6;
+ icmp46_header_t icmp; icmp46_echo_request_t icmp_echo;
+ }) icmp6_echo_request_header_t;
+
+typedef CLIB_PACKED (struct
+ {
+ ip4_header_t ip4;
+ icmp46_header_t icmp; icmp46_echo_request_t icmp_echo;
+ }) icmp4_echo_request_header_t;
+
+
+typedef struct
+{
+ u16 id;
+ u16 seq;
+ u8 bound;
+} icmp_echo_trace_t;
+
+
+
+
+typedef enum
+{
+ ICMP6_ECHO_REPLY_NEXT_DROP,
+ ICMP6_ECHO_REPLY_NEXT_PUNT,
+ ICMP6_ECHO_REPLY_N_NEXT,
+} icmp6_echo_reply_next_t;
+
+typedef enum
+{
+ ICMP4_ECHO_REPLY_NEXT_DROP,
+ ICMP4_ECHO_REPLY_NEXT_PUNT,
+ ICMP4_ECHO_REPLY_N_NEXT,
+} icmp4_echo_reply_next_t;
+
+#endif /* included_vnet_ping_h */
diff --git a/src/vnet/ip/ports.def b/src/vnet/ip/ports.def
new file mode 100644
index 00000000000..cdb754f5b2e
--- /dev/null
+++ b/src/vnet/ip/ports.def
@@ -0,0 +1,757 @@
+/*
+ * ip/ports.def: tcp/udp port definitions
+ *
+ * Eliot Dresselhaus
+ * August, 2005
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/*
+PORT NUMBERS
+
+(last updated 18 October 2005)
+
+The port numbers are divided into three ranges: the Well Known Ports,
+the Registered Ports, and the Dynamic and/or Private Ports.
+
+The Well Known Ports are those from 0 through 1023.
+
+The Registered Ports are those from 1024 through 49151
+
+The Dynamic and/or Private Ports are those from 49152 through 65535
+
+
+************************************************************************
+* PLEASE NOTE THE FOLLOWING: *
+* *
+* 1. UNASSIGNED PORT NUMBERS SHOULD NOT BE USED. THE IANA WILL ASSIGN *
+* THE NUMBER FOR THE PORT AFTER YOUR APPLICATION HAS BEEN APPROVED. *
+* *
+* 2. ASSIGNMENT OF A PORT NUMBER DOES NOT IN ANY WAY IMPLY AN *
+* ENDORSEMENT OF AN APPLICATION OR PRODUCT, AND THE FACT THAT NETWORK *
+* TRAFFIC IS FLOWING TO OR FROM A REGISTERED PORT DOES NOT MEAN THAT *
+* IT IS "GOOD" TRAFFIC. FIREWALL AND SYSTEM ADMINISTRATORS SHOULD *
+* CHOOSE HOW TO CONFIGURE THEIR SYSTEMS BASED ON THEIR KNOWLEDGE OF *
+* THE TRAFFIC IN QUESTION, NOT WHETHER THERE IS A PORT NUMBER *
+* REGISTERED OR NOT. *
+************************************************************************
+
+
+WELL KNOWN PORT NUMBERS
+
+The Well Known Ports are assigned by the IANA and on most systems can
+only be used by system (or root) processes or by programs executed by
+privileged users.
+
+Ports are used in the TCP [RFC793] to name the ends of logical
+connections which carry long term conversations. For the purpose of
+providing services to unknown callers, a service contact port is
+defined. This list specifies the port used by the server process as
+its contact port. The contact port is sometimes called the
+"well-known port".
+
+To the extent possible, these same port assignments are used with the
+UDP [RFC768].
+
+The range for assigned ports managed by the IANA is 0-1023.
+*/
+ip_port (TCPMUX, 1)
+ip_port (COMPRESS_NET_MANAGEMENT, 2)
+ip_port (COMPRESS_NET, 3)
+ip_port (RJE, 5)
+ip_port (ECHO, 7)
+ip_port (DISCARD, 9)
+ip_port (SYSTAT, 11)
+ip_port (DAYTIME, 13)
+ip_port (QOTD, 17)
+ip_port (MSP, 18)
+ip_port (CHARGEN, 19)
+ip_port (FTP_DATA, 20)
+ip_port (FTP, 21)
+ip_port (SSH, 22)
+ip_port (TELNET, 23)
+ip_port (SMTP, 25)
+ip_port (NSW_FE, 27)
+ip_port (MSG_ICP, 29)
+ip_port (MSG_AUTH, 31)
+ip_port (DSP, 33)
+ip_port (TIME, 37)
+ip_port (RAP, 38)
+ip_port (RLP, 39)
+ip_port (GRAPHICS, 41)
+ip_port (NAME, 42)
+ip_port (NAMESERVER, 42)
+ip_port (NICNAME, 43)
+ip_port (MPM_FLAGS, 44)
+ip_port (MPM, 45)
+ip_port (MPM_SND, 46)
+ip_port (NI_FTP, 47)
+ip_port (AUDITD, 48)
+ip_port (TACACS, 49)
+ip_port (RE_MAIL_CK, 50)
+ip_port (LA_MAINT, 51)
+ip_port (XNS_TIME, 52)
+ip_port (DNS, 53)
+ip_port (XNS_CH, 54)
+ip_port (ISI_GL, 55)
+ip_port (XNS_AUTH, 56)
+ip_port (XNS_MAIL, 58)
+ip_port (NI_MAIL, 61)
+ip_port (ACAS, 62)
+ip_port (WHOIS_PLUS_PLUS, 63)
+ip_port (COVIA, 64)
+ip_port (TACACS_DS, 65)
+ip_port (ORACLE_SQL_NET, 66)
+ip_port (BOOTPS, 67)
+ip_port (BOOTPC, 68)
+ip_port (TFTP, 69)
+ip_port (GOPHER, 70)
+ip_port (NETRJS_1, 71)
+ip_port (NETRJS_2, 72)
+ip_port (NETRJS_3, 73)
+ip_port (NETRJS_4, 74)
+ip_port (DEOS, 76)
+ip_port (VETTCP, 78)
+ip_port (FINGER, 79)
+ip_port (WWW, 80)
+ip_port (HOSTS2_NS, 81)
+ip_port (XFER, 82)
+ip_port (MIT_ML_DEV, 83)
+ip_port (CTF, 84)
+ip_port (MIT_ML_DEV1, 85)
+ip_port (MFCOBOL, 86)
+ip_port (KERBEROS, 88)
+ip_port (SU_MIT_TG, 89)
+ip_port (DNSIX, 90)
+ip_port (MIT_DOV, 91)
+ip_port (NPP, 92)
+ip_port (DCP, 93)
+ip_port (OBJCALL, 94)
+ip_port (SUPDUP, 95)
+ip_port (DIXIE, 96)
+ip_port (SWIFT_RVF, 97)
+ip_port (TACNEWS, 98)
+ip_port (METAGRAM, 99)
+ip_port (NEWACCT, 100)
+ip_port (HOSTNAME, 101)
+ip_port (ISO_TSAP, 102)
+ip_port (GPPITNP, 103)
+ip_port (ACR_NEMA, 104)
+ip_port (CSO, 105)
+ip_port (CSNET_NS, 105)
+ip_port (3COM_TSMUX, 106)
+ip_port (RTELNET, 107)
+ip_port (SNAGAS, 108)
+ip_port (POP2, 109)
+ip_port (POP3, 110)
+ip_port (SUNRPC, 111)
+ip_port (MCIDAS, 112)
+ip_port (IDENT, 113)
+ip_port (SFTP, 115)
+ip_port (ANSANOTIFY, 116)
+ip_port (UUCP_PATH, 117)
+ip_port (SQLSERV, 118)
+ip_port (NNTP, 119)
+ip_port (CFDPTKT, 120)
+ip_port (ERPC, 121)
+ip_port (SMAKYNET, 122)
+ip_port (NTP, 123)
+ip_port (ANSATRADER, 124)
+ip_port (LOCUS_MAP, 125)
+ip_port (NXEDIT, 126)
+ip_port (LOCUS_CON, 127)
+ip_port (GSS_XLICEN, 128)
+ip_port (PWDGEN, 129)
+ip_port (CISCO_FNA, 130)
+ip_port (CISCO_TNA, 131)
+ip_port (CISCO_SYS, 132)
+ip_port (STATSRV, 133)
+ip_port (INGRES_NET, 134)
+ip_port (EPMAP, 135)
+ip_port (PROFILE, 136)
+ip_port (NETBIOS_NS, 137)
+ip_port (NETBIOS_DGM, 138)
+ip_port (NETBIOS_SSN, 139)
+ip_port (EMFIS_DATA, 140)
+ip_port (EMFIS_CNTL, 141)
+ip_port (BL_IDM, 142)
+ip_port (IMAP, 143)
+ip_port (UMA, 144)
+ip_port (UAAC, 145)
+ip_port (ISO_TP0, 146)
+ip_port (ISO_IP, 147)
+ip_port (JARGON, 148)
+ip_port (AED_512, 149)
+ip_port (SQL_NET, 150)
+ip_port (HEMS, 151)
+ip_port (BFTP, 152)
+ip_port (SGMP, 153)
+ip_port (NETSC_PROD, 154)
+ip_port (NETSC_DEV, 155)
+ip_port (SQLSRV, 156)
+ip_port (KNET_CMP, 157)
+ip_port (PCMAIL_SRV, 158)
+ip_port (NSS_ROUTING, 159)
+ip_port (SGMP_TRAPS, 160)
+ip_port (SNMP, 161)
+ip_port (SNMPTRAP, 162)
+ip_port (CMIP_MAN, 163)
+ip_port (CMIP_AGENT, 164)
+ip_port (XNS_COURIER, 165)
+ip_port (S_NET, 166)
+ip_port (NAMP, 167)
+ip_port (RSVD, 168)
+ip_port (SEND, 169)
+ip_port (PRINT_SRV, 170)
+ip_port (MULTIPLEX, 171)
+ip_port (CL1, 172)
+ip_port (XYPLEX_MUX, 173)
+ip_port (MAILQ, 174)
+ip_port (VMNET, 175)
+ip_port (GENRAD_MUX, 176)
+ip_port (XDMCP, 177)
+ip_port (NEXTSTEP, 178)
+ip_port (BGP, 179)
+ip_port (RIS, 180)
+ip_port (UNIFY, 181)
+ip_port (AUDIT, 182)
+ip_port (OCBINDER, 183)
+ip_port (OCSERVER, 184)
+ip_port (REMOTE_KIS, 185)
+ip_port (KIS, 186)
+ip_port (ACI, 187)
+ip_port (MUMPS, 188)
+ip_port (QFT, 189)
+ip_port (GACP, 190)
+ip_port (PROSPERO, 191)
+ip_port (OSU_NMS, 192)
+ip_port (SRMP, 193)
+ip_port (IRC, 194)
+ip_port (DN6_NLM_AUD, 195)
+ip_port (DN6_SMM_RED, 196)
+ip_port (DLS, 197)
+ip_port (DLS_MON, 198)
+ip_port (SMUX, 199)
+ip_port (SRC, 200)
+ip_port (AT_RTMP, 201)
+ip_port (AT_NBP, 202)
+ip_port (AT_3, 203)
+ip_port (AT_ECHO, 204)
+ip_port (AT_5, 205)
+ip_port (AT_ZIS, 206)
+ip_port (AT_7, 207)
+ip_port (AT_8, 208)
+ip_port (QMTP, 209)
+ip_port (Z39_50, 210)
+ip_port (TI914CG, 211)
+ip_port (ANET, 212)
+ip_port (IPX, 213)
+ip_port (VMPWSCS, 214)
+ip_port (SOFTPC, 215)
+ip_port (CAILIC, 216)
+ip_port (DBASE, 217)
+ip_port (MPP, 218)
+ip_port (UARPS, 219)
+ip_port (IMAP3, 220)
+ip_port (FLN_SPX, 221)
+ip_port (RSH_SPX, 222)
+ip_port (CDC, 223)
+ip_port (MASQDIALER, 224)
+ip_port (DIRECT, 242)
+ip_port (SUR_MEAS, 243)
+ip_port (INBUSINESS, 244)
+ip_port (LINK, 245)
+ip_port (DSP3270, 246)
+ip_port (SUBNTBCST_TFTP, 247)
+ip_port (BHFHS, 248)
+ip_port (RAP1, 256)
+ip_port (SET, 257)
+ip_port (YAK_CHAT, 258)
+ip_port (ESRO_GEN, 259)
+ip_port (OPENPORT, 260)
+ip_port (NSIIOPS, 261)
+ip_port (ARCISDMS, 262)
+ip_port (HDAP, 263)
+ip_port (BGMP, 264)
+ip_port (X_BONE_CTL, 265)
+ip_port (SST, 266)
+ip_port (TD_SERVICE, 267)
+ip_port (TD_REPLICA, 268)
+ip_port (HTTP_MGMT, 280)
+ip_port (PERSONAL_LINK, 281)
+ip_port (CABLEPORT_AX, 282)
+ip_port (RESCAP, 283)
+ip_port (CORERJD, 284)
+ip_port (FXP, 286)
+ip_port (K_BLOCK, 287)
+ip_port (NOVASTORBAKCUP, 308)
+ip_port (ENTRUSTTIME, 309)
+ip_port (BHMDS, 310)
+ip_port (ASIP_WEBADMIN, 311)
+ip_port (VSLMP, 312)
+ip_port (MAGENTA_LOGIC, 313)
+ip_port (OPALIS_ROBOT, 314)
+ip_port (DPSI, 315)
+ip_port (DECAUTH, 316)
+ip_port (ZANNET, 317)
+ip_port (PKIX_TIMESTAMP, 318)
+ip_port (PTP_EVENT, 319)
+ip_port (PTP_GENERAL, 320)
+ip_port (PIP, 321)
+ip_port (RTSPS, 322)
+ip_port (TEXAR, 333)
+ip_port (PDAP, 344)
+ip_port (PAWSERV, 345)
+ip_port (ZSERV, 346)
+ip_port (FATSERV, 347)
+ip_port (CSI_SGWP, 348)
+ip_port (MFTP, 349)
+ip_port (MATIP_TYPE_A, 350)
+ip_port (MATIP_TYPE_B, 351)
+ip_port (BHOETTY, 351)
+ip_port (DTAG_STE_SB, 352)
+ip_port (BHOEDAP4, 352)
+ip_port (NDSAUTH, 353)
+ip_port (BH611, 354)
+ip_port (DATEX_ASN, 355)
+ip_port (CLOANTO_NET_1, 356)
+ip_port (BHEVENT, 357)
+ip_port (SHRINKWRAP, 358)
+ip_port (NSRMP, 359)
+ip_port (SCOI2ODIALOG, 360)
+ip_port (SEMANTIX, 361)
+ip_port (SRSSEND, 362)
+ip_port (RSVP_TUNNEL, 363)
+ip_port (AURORA_CMGR, 364)
+ip_port (DTK, 365)
+ip_port (ODMR, 366)
+ip_port (MORTGAGEWARE, 367)
+ip_port (QBIKGDP, 368)
+ip_port (RPC2PORTMAP, 369)
+ip_port (CODAAUTH2, 370)
+ip_port (CLEARCASE, 371)
+ip_port (ULISTPROC, 372)
+ip_port (LEGENT_1, 373)
+ip_port (LEGENT_2, 374)
+ip_port (HASSLE, 375)
+ip_port (NIP, 376)
+ip_port (TNETOS, 377)
+ip_port (DSETOS, 378)
+ip_port (IS99C, 379)
+ip_port (IS99S, 380)
+ip_port (HP_COLLECTOR, 381)
+ip_port (HP_MANAGED_NODE, 382)
+ip_port (HP_ALARM_MGR, 383)
+ip_port (ARNS, 384)
+ip_port (IBM_APP, 385)
+ip_port (ASA, 386)
+ip_port (AURP, 387)
+ip_port (UNIDATA_LDM, 388)
+ip_port (LDAP, 389)
+ip_port (UIS, 390)
+ip_port (SYNOTICS_RELAY, 391)
+ip_port (SYNOTICS_BROKER, 392)
+ip_port (META5, 393)
+ip_port (EMBL_NDT, 394)
+ip_port (NETCP, 395)
+ip_port (NETWARE_IP, 396)
+ip_port (MPTN, 397)
+ip_port (KRYPTOLAN, 398)
+ip_port (ISO_TSAP_C2, 399)
+ip_port (WORK_SOL, 400)
+ip_port (UPS, 401)
+ip_port (GENIE, 402)
+ip_port (DECAP, 403)
+ip_port (NCED, 404)
+ip_port (NCLD, 405)
+ip_port (IMSP, 406)
+ip_port (TIMBUKTU, 407)
+ip_port (PRM_SM, 408)
+ip_port (PRM_NM, 409)
+ip_port (DECLADEBUG, 410)
+ip_port (RMT, 411)
+ip_port (SYNOPTICS_TRAP, 412)
+ip_port (SMSP, 413)
+ip_port (INFOSEEK, 414)
+ip_port (BNET, 415)
+ip_port (SILVERPLATTER, 416)
+ip_port (ONMUX, 417)
+ip_port (HYPER_G, 418)
+ip_port (ARIEL1, 419)
+ip_port (SMPTE, 420)
+ip_port (ARIEL2, 421)
+ip_port (ARIEL3, 422)
+ip_port (OPC_JOB_START, 423)
+ip_port (OPC_JOB_TRACK, 424)
+ip_port (ICAD_EL, 425)
+ip_port (SMARTSDP, 426)
+ip_port (SVRLOC, 427)
+ip_port (OCS_CMU, 428)
+ip_port (OCS_AMU, 429)
+ip_port (UTMPSD, 430)
+ip_port (UTMPCD, 431)
+ip_port (IASD, 432)
+ip_port (NNSP, 433)
+ip_port (MOBILEIP_AGENT, 434)
+ip_port (MOBILIP_MN, 435)
+ip_port (DNA_CML, 436)
+ip_port (COMSCM, 437)
+ip_port (DSFGW, 438)
+ip_port (DASP, 439)
+ip_port (SGCP, 440)
+ip_port (DECVMS_SYSMGT, 441)
+ip_port (CVC_HOSTD, 442)
+ip_port (HTTPS, 443)
+ip_port (SNPP, 444)
+ip_port (MICROSOFT_DS, 445)
+ip_port (DDM_RDB, 446)
+ip_port (DDM_DFM, 447)
+ip_port (DDM_SSL, 448)
+ip_port (AS_SERVERMAP, 449)
+ip_port (TSERVER, 450)
+ip_port (SFS_SMP_NET, 451)
+ip_port (SFS_CONFIG, 452)
+ip_port (CREATIVESERVER, 453)
+ip_port (CONTENTSERVER, 454)
+ip_port (CREATIVEPARTNR, 455)
+ip_port (MACON_TCP, 456)
+ip_port (SCOHELP, 457)
+ip_port (APPLEQTC, 458)
+ip_port (AMPR_RCMD, 459)
+ip_port (SKRONK, 460)
+ip_port (DATASURFSRV, 461)
+ip_port (DATASURFSRVSEC, 462)
+ip_port (ALPES, 463)
+ip_port (KPASSWD, 464)
+ip_port (URD, 465)
+ip_port (DIGITAL_VRC, 466)
+ip_port (MYLEX_MAPD, 467)
+ip_port (PHOTURIS, 468)
+ip_port (RCP, 469)
+ip_port (SCX_PROXY, 470)
+ip_port (MONDEX, 471)
+ip_port (LJK_LOGIN, 472)
+ip_port (HYBRID_POP, 473)
+ip_port (TN_TL_W1, 474)
+ip_port (TCPNETHASPSRV, 475)
+ip_port (TN_TL_FD1, 476)
+ip_port (SS7NS, 477)
+ip_port (SPSC, 478)
+ip_port (IAFSERVER, 479)
+ip_port (IAFDBASE, 480)
+ip_port (PH, 481)
+ip_port (BGS_NSI, 482)
+ip_port (ULPNET, 483)
+ip_port (INTEGRA_SME, 484)
+ip_port (POWERBURST, 485)
+ip_port (AVIAN, 486)
+ip_port (SAFT, 487)
+ip_port (GSS_HTTP, 488)
+ip_port (NEST_PROTOCOL, 489)
+ip_port (MICOM_PFS, 490)
+ip_port (GO_LOGIN, 491)
+ip_port (TICF_1, 492)
+ip_port (TICF_2, 493)
+ip_port (POV_RAY, 494)
+ip_port (INTECOURIER, 495)
+ip_port (PIM_RP_DISC, 496)
+ip_port (DANTZ, 497)
+ip_port (SIAM, 498)
+ip_port (ISO_ILL, 499)
+ip_port (ISAKMP, 500)
+ip_port (STMF, 501)
+ip_port (ASA_APPL_PROTO, 502)
+ip_port (INTRINSA, 503)
+ip_port (CITADEL, 504)
+ip_port (MAILBOX_LM, 505)
+ip_port (OHIMSRV, 506)
+ip_port (CRS, 507)
+ip_port (XVTTP, 508)
+ip_port (SNARE, 509)
+ip_port (FCP, 510)
+ip_port (PASSGO, 511)
+ip_port (EXEC, 512)
+ip_port (LOGIN, 513)
+ip_port (SHELL, 514)
+ip_port (PRINTER, 515)
+ip_port (VIDEOTEX, 516)
+ip_port (TALK, 517)
+ip_port (NTALK, 518)
+ip_port (UTIME, 519)
+ip_port (EFS, 520)
+ip_port (RIPNG, 521)
+ip_port (ULP, 522)
+ip_port (IBM_DB2, 523)
+ip_port (NCP, 524)
+ip_port (TIMED, 525)
+ip_port (TEMPO, 526)
+ip_port (STX, 527)
+ip_port (CUSTIX, 528)
+ip_port (IRC_SERV, 529)
+ip_port (COURIER, 530)
+ip_port (CONFERENCE, 531)
+ip_port (NETNEWS, 532)
+ip_port (NETWALL, 533)
+ip_port (MM_ADMIN, 534)
+ip_port (IIOP, 535)
+ip_port (OPALIS_RDV, 536)
+ip_port (NMSP, 537)
+ip_port (GDOMAP, 538)
+ip_port (APERTUS_LDP, 539)
+ip_port (UUCP, 540)
+ip_port (UUCP_RLOGIN, 541)
+ip_port (COMMERCE, 542)
+ip_port (KLOGIN, 543)
+ip_port (KSHELL, 544)
+ip_port (APPLEQTCSRVR, 545)
+ip_port (DHCPV6_CLIENT, 546)
+ip_port (DHCPV6_SERVER, 547)
+ip_port (AFPOVERTCP, 548)
+ip_port (IDFP, 549)
+ip_port (NEW_RWHO, 550)
+ip_port (CYBERCASH, 551)
+ip_port (DEVSHR_NTS, 552)
+ip_port (PIRP, 553)
+ip_port (RTSP, 554)
+ip_port (DSF, 555)
+ip_port (REMOTEFS, 556)
+ip_port (OPENVMS_SYSIPC, 557)
+ip_port (SDNSKMP, 558)
+ip_port (TEEDTAP, 559)
+ip_port (RMONITOR, 560)
+ip_port (MONITOR, 561)
+ip_port (CHSHELL, 562)
+ip_port (NNTPS, 563)
+ip_port (9PFS, 564)
+ip_port (WHOAMI, 565)
+ip_port (STREETTALK, 566)
+ip_port (BANYAN_RPC, 567)
+ip_port (MS_SHUTTLE, 568)
+ip_port (MS_ROME, 569)
+ip_port (METER, 570)
+ip_port (METER1, 571)
+ip_port (SONAR, 572)
+ip_port (BANYAN_VIP, 573)
+ip_port (FTP_AGENT, 574)
+ip_port (VEMMI, 575)
+ip_port (IPCD, 576)
+ip_port (VNAS, 577)
+ip_port (IPDD, 578)
+ip_port (DECBSRV, 579)
+ip_port (SNTP_HEARTBEAT, 580)
+ip_port (BDP, 581)
+ip_port (SCC_SECURITY, 582)
+ip_port (PHILIPS_VC, 583)
+ip_port (KEYSERVER, 584)
+ip_port (IMAP4_SSL, 585)
+ip_port (PASSWORD_CHG, 586)
+ip_port (SUBMISSION, 587)
+ip_port (CAL, 588)
+ip_port (EYELINK, 589)
+ip_port (TNS_CML, 590)
+ip_port (HTTP_ALT, 591)
+ip_port (EUDORA_SET, 592)
+ip_port (HTTP_RPC_EPMAP, 593)
+ip_port (TPIP, 594)
+ip_port (CAB_PROTOCOL, 595)
+ip_port (SMSD, 596)
+ip_port (PTCNAMESERVICE, 597)
+ip_port (SCO_WEBSRVRMG3, 598)
+ip_port (ACP, 599)
+ip_port (IPCSERVER, 600)
+ip_port (SYSLOG_CONN, 601)
+ip_port (XMLRPC_BEEP, 602)
+ip_port (IDXP, 603)
+ip_port (TUNNEL, 604)
+ip_port (SOAP_BEEP, 605)
+ip_port (URM, 606)
+ip_port (NQS, 607)
+ip_port (SIFT_UFT, 608)
+ip_port (NPMP_TRAP, 609)
+ip_port (NPMP_LOCAL, 610)
+ip_port (NPMP_GUI, 611)
+ip_port (HMMP_IND, 612)
+ip_port (HMMP_OP, 613)
+ip_port (SSHELL, 614)
+ip_port (SCO_INETMGR, 615)
+ip_port (SCO_SYSMGR, 616)
+ip_port (SCO_DTMGR, 617)
+ip_port (DEI_ICDA, 618)
+ip_port (COMPAQ_EVM, 619)
+ip_port (SCO_WEBSRVRMGR, 620)
+ip_port (ESCP_IP, 621)
+ip_port (COLLABORATOR, 622)
+ip_port (ASF_RMCP, 623)
+ip_port (CRYPTOADMIN, 624)
+ip_port (DEC_DLM, 625)
+ip_port (ASIA, 626)
+ip_port (PASSGO_TIVOLI, 627)
+ip_port (QMQP, 628)
+ip_port (3COM_AMP3, 629)
+ip_port (RDA, 630)
+ip_port (IPP, 631)
+ip_port (BMPP, 632)
+ip_port (SERVSTAT, 633)
+ip_port (GINAD, 634)
+ip_port (RLZDBASE, 635)
+ip_port (LDAPS, 636)
+ip_port (LANSERVER, 637)
+ip_port (MCNS_SEC, 638)
+ip_port (MSDP, 639)
+ip_port (ENTRUST_SPS, 640)
+ip_port (REPCMD, 641)
+ip_port (ESRO_EMSDP, 642)
+ip_port (SANITY, 643)
+ip_port (DWR, 644)
+ip_port (PSSC, 645)
+ip_port (LDP, 646)
+ip_port (DHCP_FAILOVER, 647)
+ip_port (RRP, 648)
+ip_port (CADVIEW_3D, 649)
+ip_port (OBEX, 650)
+ip_port (IEEE_MMS, 651)
+ip_port (HELLO_PORT, 652)
+ip_port (REPSCMD, 653)
+ip_port (AODV, 654)
+ip_port (TINC, 655)
+ip_port (SPMP, 656)
+ip_port (RMC, 657)
+ip_port (TENFOLD, 658)
+ip_port (MAC_SRVR_ADMIN, 660)
+ip_port (HAP, 661)
+ip_port (PFTP, 662)
+ip_port (PURENOISE, 663)
+ip_port (ASF_SECURE_RMCP, 664)
+ip_port (SUN_DR, 665)
+ip_port (MDQS, 666)
+ip_port (DOOM, 666)
+ip_port (DISCLOSE, 667)
+ip_port (MECOMM, 668)
+ip_port (MEREGISTER, 669)
+ip_port (VACDSM_SWS, 670)
+ip_port (VACDSM_APP, 671)
+ip_port (VPPS_QUA, 672)
+ip_port (CIMPLEX, 673)
+ip_port (ACAP, 674)
+ip_port (DCTP, 675)
+ip_port (VPPS_VIA, 676)
+ip_port (VPP, 677)
+ip_port (GGF_NCP, 678)
+ip_port (MRM, 679)
+ip_port (ENTRUST_AAAS, 680)
+ip_port (ENTRUST_AAMS, 681)
+ip_port (XFR, 682)
+ip_port (CORBA_IIOP, 683)
+ip_port (CORBA_IIOP_SSL, 684)
+ip_port (MDC_PORTMAPPER, 685)
+ip_port (HCP_WISMAR, 686)
+ip_port (ASIPREGISTRY, 687)
+ip_port (REALM_RUSD, 688)
+ip_port (NMAP, 689)
+ip_port (VATP, 690)
+ip_port (MSEXCH_ROUTING, 691)
+ip_port (HYPERWAVE_ISP, 692)
+ip_port (CONNENDP, 693)
+ip_port (HA_CLUSTER, 694)
+ip_port (IEEE_MMS_SSL, 695)
+ip_port (RUSHD, 696)
+ip_port (UUIDGEN, 697)
+ip_port (OLSR, 698)
+ip_port (ACCESSNETWORK, 699)
+ip_port (EPP, 700)
+ip_port (LMP, 701)
+ip_port (IRIS_BEEP, 702)
+ip_port (ELCSD, 704)
+ip_port (AGENTX, 705)
+ip_port (SILC, 706)
+ip_port (BORLAND_DSJ, 707)
+ip_port (ENTRUST_KMSH, 709)
+ip_port (ENTRUST_ASH, 710)
+ip_port (CISCO_TDP, 711)
+ip_port (TBRPF, 712)
+ip_port (NETVIEWDM1, 729)
+ip_port (NETVIEWDM2, 730)
+ip_port (NETVIEWDM3, 731)
+ip_port (NETGW, 741)
+ip_port (NETRCS, 742)
+ip_port (FLEXLM, 744)
+ip_port (FUJITSU_DEV, 747)
+ip_port (RIS_CM, 748)
+ip_port (KERBEROS_ADM, 749)
+ip_port (RFILE, 750)
+ip_port (PUMP, 751)
+ip_port (QRH, 752)
+ip_port (RRH, 753)
+ip_port (TELL, 754)
+ip_port (NLOGIN, 758)
+ip_port (CON, 759)
+ip_port (NS, 760)
+ip_port (RXE, 761)
+ip_port (QUOTAD, 762)
+ip_port (CYCLESERV, 763)
+ip_port (OMSERV, 764)
+ip_port (WEBSTER, 765)
+ip_port (PHONEBOOK, 767)
+ip_port (VID, 769)
+ip_port (CADLOCK, 770)
+ip_port (RTIP, 771)
+ip_port (CYCLESERV2, 772)
+ip_port (SUBMIT, 773)
+ip_port (RPASSWD, 774)
+ip_port (ENTOMB, 775)
+ip_port (WPAGES, 776)
+ip_port (MULTILING_HTTP, 777)
+ip_port (WPGS, 780)
+ip_port (MDBS_DAEMON, 800)
+ip_port (DEVICE, 801)
+ip_port (FCP_UDP, 810)
+ip_port (ITM_MCELL_S, 828)
+ip_port (PKIX_3_CA_RA, 829)
+ip_port (DHCP_FAILOVER2, 847)
+ip_port (GDOI, 848)
+ip_port (ISCSI, 860)
+ip_port (RSYNC, 873)
+ip_port (ICLCNET_LOCATE, 886)
+ip_port (ICLCNET_SVINFO, 887)
+ip_port (ACCESSBUILDER, 888)
+ip_port (CDDBP, 888)
+ip_port (OMGINITIALREFS, 900)
+ip_port (SMPNAMERES, 901)
+ip_port (IDEAFARM_CHAT, 902)
+ip_port (IDEAFARM_CATCH, 903)
+ip_port (XACT_BACKUP, 911)
+ip_port (APEX_MESH, 912)
+ip_port (APEX_EDGE, 913)
+ip_port (FTPS_DATA, 989)
+ip_port (FTPS, 990)
+ip_port (NAS, 991)
+ip_port (TELNETS, 992)
+ip_port (IMAPS, 993)
+ip_port (IRCS, 994)
+ip_port (POP3S, 995)
+ip_port (VSINET, 996)
+ip_port (MAITRD, 997)
+ip_port (BUSBOY, 998)
+ip_port (GARCON, 999)
+ip_port (PUPROUTER, 999)
+ip_port (CADLOCK2, 1000)
+ip_port (SURF, 1010)
+
diff --git a/src/vnet/ip/protocols.def b/src/vnet/ip/protocols.def
new file mode 100644
index 00000000000..77fab31da05
--- /dev/null
+++ b/src/vnet/ip/protocols.def
@@ -0,0 +1,162 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/* Emacs editing mode -*-C-*-
+
+From http://www.iana.org/assignments/protocol-numbers
+
+PROTOCOL NUMBERS
+
+(last updated 18 October 2004)
+
+In the Internet Protocol version 4 (IPv4) [RFC791] there is a field,
+called "Protocol", to identify the next level protocol. This is an 8
+bit field. In Internet Protocol version 6 (IPv6) [RFC1883] this field
+is called the "Next Header" field.
+*/
+ip_protocol (0, IP6_HOP_BY_HOP_OPTIONS)
+ip_protocol (1, ICMP)
+ip_protocol (2, IGMP)
+ip_protocol (3, GGP)
+ip_protocol (4, IP_IN_IP)
+ip_protocol (5, ST)
+ip_protocol (6, TCP)
+ip_protocol (7, CBT)
+ip_protocol (8, EGP)
+ip_protocol (9, IGP)
+ip_protocol (10, BBN_RCC_MON)
+ip_protocol (11, NVP_II)
+ip_protocol (12, PUP)
+ip_protocol (13, ARGUS)
+ip_protocol (14, EMCON)
+ip_protocol (15, XNET)
+ip_protocol (16, CHAOS)
+ip_protocol (17, UDP)
+ip_protocol (18, MUX)
+ip_protocol (19, DCN_MEAS)
+ip_protocol (20, HMP)
+ip_protocol (21, PRM)
+ip_protocol (22, XNS_IDP)
+ip_protocol (23, TRUNK_1)
+ip_protocol (24, TRUNK_2)
+ip_protocol (25, LEAF_1)
+ip_protocol (26, LEAF_2)
+ip_protocol (27, RDP)
+ip_protocol (28, IRTP)
+ip_protocol (29, ISO_TP4)
+ip_protocol (30, NETBLT)
+ip_protocol (31, MFE_NSP)
+ip_protocol (32, MERIT_INP)
+ip_protocol (33, SEP)
+ip_protocol (34, 3PC)
+ip_protocol (35, IDPR)
+ip_protocol (36, XTP)
+ip_protocol (37, DDP)
+ip_protocol (38, IDPR_CMTP)
+ip_protocol (39, TP)
+ip_protocol (40, IL)
+ip_protocol (41, IPV6)
+ip_protocol (42, SDRP)
+ip_protocol (43, IPV6_ROUTE)
+ip_protocol (44, IPV6_FRAGMENTATION)
+ip_protocol (45, IDRP)
+ip_protocol (46, RSVP)
+ip_protocol (47, GRE)
+ip_protocol (48, MHRP)
+ip_protocol (49, BNA)
+ip_protocol (50, IPSEC_ESP)
+ip_protocol (51, IPSEC_AH)
+ip_protocol (52, I_NLSP)
+ip_protocol (53, SWIPE)
+ip_protocol (54, NARP)
+ip_protocol (55, MOBILE)
+ip_protocol (56, TLSP)
+ip_protocol (57, SKIP)
+ip_protocol (58, ICMP6)
+ip_protocol (59, IP6_NONXT)
+ip_protocol (60, IP6_DESTINATION_OPTIONS)
+ip_protocol (62, CFTP)
+ip_protocol (64, SAT_EXPAK)
+ip_protocol (65, KRYPTOLAN)
+ip_protocol (66, RVD)
+ip_protocol (67, IPPC)
+ip_protocol (69, SAT_MON)
+ip_protocol (70, VISA)
+ip_protocol (71, IPCV)
+ip_protocol (72, CPNX)
+ip_protocol (73, CPHB)
+ip_protocol (74, WSN)
+ip_protocol (75, PVP)
+ip_protocol (76, BR_SAT_MON)
+ip_protocol (77, SUN_ND)
+ip_protocol (78, WB_MON)
+ip_protocol (79, WB_EXPAK)
+ip_protocol (80, ISO_IP)
+ip_protocol (81, VMTP)
+ip_protocol (82, SECURE_VMTP)
+ip_protocol (83, VINES)
+ip_protocol (84, TTP)
+ip_protocol (85, NSFNET_IGP)
+ip_protocol (86, DGP)
+ip_protocol (87, TCF)
+ip_protocol (88, EIGRP)
+ip_protocol (89, OSPF)
+ip_protocol (90, SPRITE_RPC)
+ip_protocol (91, LARP)
+ip_protocol (92, MTP)
+ip_protocol (93, AX)
+ip_protocol (94, IPIP)
+ip_protocol (95, MICP)
+ip_protocol (96, SCC_SP)
+ip_protocol (97, ETHERIP)
+ip_protocol (98, ENCAP)
+ip_protocol (100, GMTP)
+ip_protocol (101, IFMP)
+ip_protocol (102, PNNI)
+ip_protocol (103, PIM)
+ip_protocol (104, ARIS)
+ip_protocol (105, SCPS)
+ip_protocol (106, QNX)
+ip_protocol (107, A)
+ip_protocol (108, IPCOMP)
+ip_protocol (109, SNP)
+ip_protocol (110, COMPAQ_PEER)
+ip_protocol (111, IPX_IN_IP)
+ip_protocol (112, VRRP)
+ip_protocol (113, PGM)
+ip_protocol (115, L2TP)
+ip_protocol (116, DDX)
+ip_protocol (117, IATP)
+ip_protocol (118, STP)
+ip_protocol (119, SRP)
+ip_protocol (120, UTI)
+ip_protocol (121, SMP)
+ip_protocol (122, SM)
+ip_protocol (123, PTP)
+ip_protocol (124, ISIS)
+ip_protocol (125, FIRE)
+ip_protocol (126, CRTP)
+ip_protocol (127, CRUDP)
+ip_protocol (128, SSCOPMCE)
+ip_protocol (129, IPLT)
+ip_protocol (130, SPS)
+ip_protocol (131, PIPE)
+ip_protocol (132, SCTP)
+ip_protocol (133, FC)
+ip_protocol (134, RSVP_E2E_IGNORE)
+ip_protocol (135, MOBILITY)
+ip_protocol (136, UDP_LITE)
+ip_protocol (137, MPLS_IN_IP)
+ip_protocol (255, RESERVED)
+
diff --git a/src/vnet/ip/punt.c b/src/vnet/ip/punt.c
new file mode 100644
index 00000000000..9c735128a3b
--- /dev/null
+++ b/src/vnet/ip/punt.c
@@ -0,0 +1,323 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @file
+ * @brief Local TCP/IP stack punt infrastructure.
+ *
+ * Provides a set of VPP nodes togather with the relevant APIs and CLI
+ * commands in order to adjust and dispatch packets from the VPP data plane
+ * to the local TCP/IP stack
+ */
+#include <vlib/vlib.h>
+#include <vnet/pg/pg.h>
+#include <vnet/ip/udp.h>
+#include <vnet/ip/punt.h>
+
+#define foreach_punt_next \
+ _ (PUNT, "error-punt")
+
+typedef enum
+{
+#define _(s,n) PUNT_NEXT_##s,
+ foreach_punt_next
+#undef _
+ PUNT_N_NEXT,
+} punt_next_t;
+
+vlib_node_registration_t udp4_punt_node;
+vlib_node_registration_t udp6_punt_node;
+
+/** @brief IPv4/IPv6 UDP punt node main loop.
+
+ This is the main loop inline function for IPv4/IPv6 UDP punt
+ transition node.
+
+ @param vm vlib_main_t corresponding to the current thread
+ @param node vlib_node_runtime_t
+ @param frame vlib_frame_t whose contents should be dispatched
+ @param is_ipv4 indicates if called for IPv4 or IPv6 node
+*/
+always_inline uword
+udp46_punt_inline (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame, int is_ip4)
+{
+ u32 n_left_from, *from, *to_next;
+ word advance;
+
+ from = vlib_frame_vector_args (from_frame);
+ n_left_from = from_frame->n_vectors;
+
+ /* udp[46]_lookup hands us the data payload, not the IP header */
+ if (is_ip4)
+ advance = -(sizeof (ip4_header_t) + sizeof (udp_header_t));
+ else
+ advance = -(sizeof (ip6_header_t) + sizeof (udp_header_t));
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame (vm, node, PUNT_NEXT_PUNT, to_next, n_left_to_next);
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 bi0;
+ vlib_buffer_t *b0;
+
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ vlib_buffer_advance (b0, advance);
+ b0->error = node->errors[PUNT_ERROR_UDP_PORT];
+ }
+
+ vlib_put_next_frame (vm, node, PUNT_NEXT_PUNT, n_left_to_next);
+ }
+
+ return from_frame->n_vectors;
+}
+
+static char *punt_error_strings[] = {
+#define punt_error(n,s) s,
+#include "punt_error.def"
+#undef punt_error
+};
+
+/** @brief IPv4 UDP punt node.
+ @node ip4-udp-punt
+
+ This is the IPv4 UDP punt transition node. It is registered as a next
+ node for the "ip4-udp-lookup" handling UDP port(s) requested for punt.
+ The buffer's current data pointer is adjusted to the original packet
+ IPv4 header. All buffers are dispatched to "error-punt".
+
+ @param vm vlib_main_t corresponding to the current thread
+ @param node vlib_node_runtime_t
+ @param frame vlib_frame_t whose contents should be dispatched
+
+ @par Graph mechanics: next index usage
+
+ @em Sets:
+ - <code>vnet_buffer(b)->current_data</code>
+ - <code>vnet_buffer(b)->current_len</code>
+
+ <em>Next Index:</em>
+ - Dispatches the packet to the "error-punt" node
+*/
+static uword
+udp4_punt (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * from_frame)
+{
+ return udp46_punt_inline (vm, node, from_frame, 1 /* is_ip4 */ );
+}
+
+/** @brief IPv6 UDP punt node.
+ @node ip6-udp-punt
+
+ This is the IPv6 UDP punt transition node. It is registered as a next
+ node for the "ip6-udp-lookup" handling UDP port(s) requested for punt.
+ The buffer's current data pointer is adjusted to the original packet
+ IPv6 header. All buffers are dispatched to "error-punt".
+
+ @param vm vlib_main_t corresponding to the current thread
+ @param node vlib_node_runtime_t
+ @param frame vlib_frame_t whose contents should be dispatched
+
+ @par Graph mechanics: next index usage
+
+ @em Sets:
+ - <code>vnet_buffer(b)->current_data</code>
+ - <code>vnet_buffer(b)->current_len</code>
+
+ <em>Next Index:</em>
+ - Dispatches the packet to the "error-punt" node
+*/
+static uword
+udp6_punt (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * from_frame)
+{
+ return udp46_punt_inline (vm, node, from_frame, 0 /* is_ip4 */ );
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (udp4_punt_node) = {
+ .function = udp4_punt,
+ .name = "ip4-udp-punt",
+ /* Takes a vector of packets. */
+ .vector_size = sizeof (u32),
+
+ .n_errors = PUNT_N_ERROR,
+ .error_strings = punt_error_strings,
+
+ .n_next_nodes = PUNT_N_NEXT,
+ .next_nodes = {
+#define _(s,n) [PUNT_NEXT_##s] = n,
+ foreach_punt_next
+#undef _
+ },
+};
+
+VLIB_NODE_FUNCTION_MULTIARCH (udp4_punt_node, udp4_punt);
+
+VLIB_REGISTER_NODE (udp6_punt_node) = {
+ .function = udp6_punt,
+ .name = "ip6-udp-punt",
+ /* Takes a vector of packets. */
+ .vector_size = sizeof (u32),
+
+ .n_errors = PUNT_N_ERROR,
+ .error_strings = punt_error_strings,
+
+ .n_next_nodes = PUNT_N_NEXT,
+ .next_nodes = {
+#define _(s,n) [PUNT_NEXT_##s] = n,
+ foreach_punt_next
+#undef _
+ },
+};
+/* *INDENT-ON* */
+
+VLIB_NODE_FUNCTION_MULTIARCH (udp6_punt_node, udp6_punt);;
+
+/**
+ * @brief Request IP traffic punt to the local TCP/IP stack.
+ *
+ * @em Note
+ * - UDP is the only protocol supported in the current implementation
+ * - When requesting UDP punt port number(s) must be specified
+ * - All TCP traffic is currently punted to the host by default
+ *
+ * @param vm vlib_main_t corresponding to the current thread
+ * @param ipv IP protcol version.
+ * 4 - IPv4, 6 - IPv6, ~0 for both IPv6 and IPv4
+ * @param protocol 8-bits L4 protocol value
+ * Only value of 17 (UDP) is currently supported
+ * @param port 16-bits L4 (TCP/IP) port number when applicable
+ *
+ * @returns 0 on success, non-zero value otherwise
+ */
+clib_error_t *
+vnet_punt_add_del (vlib_main_t * vm, u8 ipv, u8 protocol, u16 port,
+ int is_add)
+{
+ /* For now we only support UDP punt */
+ if (protocol != IP_PROTOCOL_UDP)
+ return clib_error_return (0,
+ "only UDP protocol (%d) is supported, got %d",
+ IP_PROTOCOL_UDP, protocol);
+
+ if (ipv != (u8) ~ 0 && ipv != 4 && ipv != 6)
+ return clib_error_return (0, "IP version must be 4 or 6, got %d", ipv);
+
+ if (port == (u16) ~ 0)
+ {
+ if (ipv == 4 || ipv == (u8) ~ 0)
+ udp_punt_unknown (vm, 1, is_add);
+
+ if (ipv == 6 || ipv == (u8) ~ 0)
+ udp_punt_unknown (vm, 0, is_add);
+
+ return 0;
+ }
+
+ else if (is_add)
+ {
+ if (ipv == 4 || ipv == (u8) ~ 0)
+ udp_register_dst_port (vm, port, udp4_punt_node.index, 1);
+
+ if (ipv == 6 || ipv == (u8) ~ 0)
+ udp_register_dst_port (vm, port, udp6_punt_node.index, 0);
+
+ return 0;
+ }
+
+ else
+ return clib_error_return (0, "punt delete is not supported yet");
+}
+
+static clib_error_t *
+udp_punt_cli (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ u32 udp_port;
+ int is_add = 1;
+ clib_error_t *error;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "del"))
+ is_add = 0;
+ if (unformat (input, "all"))
+ {
+ /* punt both IPv6 and IPv4 when used in CLI */
+ error = vnet_punt_add_del (vm, ~0, IP_PROTOCOL_UDP, ~0, is_add);
+ if (error)
+ clib_error_report (error);
+ }
+ else if (unformat (input, "%d", &udp_port))
+ {
+ /* punt both IPv6 and IPv4 when used in CLI */
+ error = vnet_punt_add_del (vm, ~0, IP_PROTOCOL_UDP,
+ udp_port, is_add);
+ if (error)
+ clib_error_report (error);
+ }
+ }
+
+ return 0;
+}
+
+/*?
+ * The set of '<em>set punt</em>' commands allows specific IP traffic to
+ * be punted to the host TCP/IP stack
+ *
+ * @em Note
+ * - UDP is the only protocol supported in the current implementation
+ * - All TCP traffic is currently punted to the host by default
+ *
+ * @cliexpar
+ * @parblock
+ * Example of how to request NTP traffic to be punted
+ * @cliexcmd{set punt udp 125}
+ *
+ * Example of how to request all 'unknown' UDP traffic to be punted
+ * @cliexcmd{set punt udp all}
+ *
+ * Example of how to stop all 'unknown' UDP traffic to be punted
+ * @cliexcmd{set punt udp del all}
+ * @endparblock
+?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (punt_udp_command, static) = {
+ .path = "set punt udp",
+ .short_help = "set punt udp [del] <all | port-num1 [port-num2 ...]>",
+ .function = udp_punt_cli,
+};
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/ip/punt.h b/src/vnet/ip/punt.h
new file mode 100644
index 00000000000..09a9d4c55bf
--- /dev/null
+++ b/src/vnet/ip/punt.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @file
+ * @brief Definitions for punt infrastructure.
+ */
+#ifndef included_punt_h
+#define included_punt_h
+
+typedef enum
+{
+#define punt_error(n,s) PUNT_ERROR_##n,
+#include <vnet/ip/punt_error.def>
+#undef punt_error
+ PUNT_N_ERROR,
+} punt_error_t;
+
+
+clib_error_t *vnet_punt_add_del (vlib_main_t * vm, u8 ipv,
+ u8 protocol, u16 port, int is_add);
+
+#endif /* included_punt_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/ip/punt_error.def b/src/vnet/ip/punt_error.def
new file mode 100644
index 00000000000..a76d7e7b817
--- /dev/null
+++ b/src/vnet/ip/punt_error.def
@@ -0,0 +1,19 @@
+/*
+ * punt_error.def: punt errors
+ *
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+punt_error (NONE, "no error")
+punt_error (UDP_PORT, "udp port punt")
diff --git a/src/vnet/ip/tcp_packet.h b/src/vnet/ip/tcp_packet.h
new file mode 100644
index 00000000000..ed402403592
--- /dev/null
+++ b/src/vnet/ip/tcp_packet.h
@@ -0,0 +1,138 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * ip4/tcp_packet.h: TCP packet format (see RFC 793)
+ *
+ * Copyright (c) 2008 Eliot Dresselhaus
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef included_tcp_packet_h
+#define included_tcp_packet_h
+
+/* TCP flags bit 0 first. */
+#define foreach_tcp_flag \
+ _ (FIN) \
+ _ (SYN) \
+ _ (RST) \
+ _ (PSH) \
+ _ (ACK) \
+ _ (URG) \
+ _ (ECE) \
+ _ (CWR)
+
+enum
+{
+#define _(f) TCP_FLAG_BIT_##f,
+ foreach_tcp_flag
+#undef _
+ TCP_N_FLAG_BITS,
+
+#define _(f) TCP_FLAG_##f = 1 << TCP_FLAG_BIT_##f,
+ foreach_tcp_flag
+#undef _
+};
+
+typedef struct
+{
+ /* Source and destination port. */
+ union
+ {
+ union
+ {
+ struct
+ {
+ u16 src, dst;
+ };
+ u32 src_and_dst;
+ } ports;
+ u16 src_port, dst_port;
+ };
+
+ /* Sequence and acknowledgment number. */
+ u32 seq_number, ack_number;
+
+ /* Size of TCP header in 32-bit units plus 4 reserved bits. */
+ u8 tcp_header_u32s_and_reserved;
+
+ /* see foreach_tcp_flag for enumation of tcp flags. */
+ u8 flags;
+
+ /* Current window advertised by sender.
+ This is the number of bytes sender is willing to receive
+ right now. */
+ u16 window;
+
+ /* Checksum of TCP pseudo header and data. */
+ u16 checksum;
+
+ u16 urgent_pointer;
+} tcp_header_t;
+
+always_inline int
+tcp_header_bytes (tcp_header_t * t)
+{
+ return (t->tcp_header_u32s_and_reserved >> 4) * sizeof (u32);
+}
+
+/* TCP options. */
+typedef enum tcp_option_type
+{
+ TCP_OPTION_END = 0,
+ TCP_OPTION_NOP = 1,
+ TCP_OPTION_MSS = 2,
+ TCP_OPTION_WINDOW_SCALE = 3,
+ TCP_OPTION_SACK_PERMITTED = 4,
+ TCP_OPTION_SACK_BLOCK = 5,
+ TCP_OPTION_TIME_STAMP = 8,
+} tcp_option_type_t;
+
+/* All except NOP and END have 1 byte length field. */
+typedef struct
+{
+ tcp_option_type_t type:8;
+
+ /* Length of this option in bytes. */
+ u8 length;
+} tcp_option_with_length_t;
+
+#endif /* included_tcp_packet_h */
+
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/ip/udp.h b/src/vnet/ip/udp.h
new file mode 100644
index 00000000000..03c62e0b684
--- /dev/null
+++ b/src/vnet/ip/udp.h
@@ -0,0 +1,313 @@
+/*
+ * ip/udp.h: udp protocol
+ *
+ * Copyright (c) 2013 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef included_udp_h
+#define included_udp_h
+
+#include <vnet/vnet.h>
+#include <vnet/ip/udp_packet.h>
+#include <vnet/ip/ip.h>
+#include <vnet/ip/ip4.h>
+#include <vnet/ip/ip4_packet.h>
+#include <vnet/pg/pg.h>
+#include <vnet/ip/format.h>
+
+typedef enum
+{
+#define udp_error(n,s) UDP_ERROR_##n,
+#include <vnet/ip/udp_error.def>
+#undef udp_error
+ UDP_N_ERROR,
+} udp_error_t;
+
+#define foreach_udp4_dst_port \
+_ (67, dhcp_to_server) \
+_ (68, dhcp_to_client) \
+_ (500, ikev2) \
+_ (3784, bfd4) \
+_ (4341, lisp_gpe) \
+_ (4342, lisp_cp) \
+_ (4739, ipfix) \
+_ (4789, vxlan) \
+_ (4789, vxlan6) \
+_ (4790, vxlan_gpe) \
+_ (6633, vpath_3)
+
+
+#define foreach_udp6_dst_port \
+_ (547, dhcpv6_to_server) \
+_ (546, dhcpv6_to_client) \
+_ (3784, bfd6) \
+_ (4341, lisp_gpe6) \
+_ (4342, lisp_cp6) \
+_ (4790, vxlan6_gpe) \
+_ (6633, vpath6_3)
+
+typedef enum
+{
+#define _(n,f) UDP_DST_PORT_##f = n,
+ foreach_udp4_dst_port foreach_udp6_dst_port
+#undef _
+} udp_dst_port_t;
+
+typedef enum
+{
+#define _(n,f) UDP6_DST_PORT_##f = n,
+ foreach_udp6_dst_port
+#undef _
+} udp6_dst_port_t;
+
+typedef struct
+{
+ /* Name (a c string). */
+ char *name;
+
+ /* GRE protocol type in host byte order. */
+ udp_dst_port_t dst_port;
+
+ /* Node which handles this type. */
+ u32 node_index;
+
+ /* Next index for this type. */
+ u32 next_index;
+} udp_dst_port_info_t;
+
+typedef enum
+{
+ UDP_IP6 = 0,
+ UDP_IP4, /* the code is full of is_ip4... */
+ N_UDP_AF,
+} udp_af_t;
+
+typedef struct
+{
+ udp_dst_port_info_t *dst_port_infos[N_UDP_AF];
+
+ /* Hash tables mapping name/protocol to protocol info index. */
+ uword *dst_port_info_by_name[N_UDP_AF];
+ uword *dst_port_info_by_dst_port[N_UDP_AF];
+
+ /* convenience */
+ vlib_main_t *vlib_main;
+} udp_main_t;
+
+always_inline udp_dst_port_info_t *
+udp_get_dst_port_info (udp_main_t * um, udp_dst_port_t dst_port, u8 is_ip4)
+{
+ uword *p = hash_get (um->dst_port_info_by_dst_port[is_ip4], dst_port);
+ return p ? vec_elt_at_index (um->dst_port_infos[is_ip4], p[0]) : 0;
+}
+
+format_function_t format_udp_header;
+format_function_t format_udp_rx_trace;
+
+unformat_function_t unformat_udp_header;
+
+void udp_register_dst_port (vlib_main_t * vm,
+ udp_dst_port_t dst_port,
+ u32 node_index, u8 is_ip4);
+
+void udp_punt_unknown (vlib_main_t * vm, u8 is_ip4, u8 is_add);
+
+always_inline void
+ip_udp_fixup_one (vlib_main_t * vm, vlib_buffer_t * b0, u8 is_ip4)
+{
+ u16 new_l0;
+ udp_header_t *udp0;
+
+ if (is_ip4)
+ {
+ ip4_header_t *ip0;
+ ip_csum_t sum0;
+ u16 old_l0 = 0;
+
+ ip0 = vlib_buffer_get_current (b0);
+
+ /* fix the <bleep>ing outer-IP checksum */
+ sum0 = ip0->checksum;
+ /* old_l0 always 0, see the rewrite setup */
+ new_l0 = clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0));
+
+ sum0 = ip_csum_update (sum0, old_l0, new_l0, ip4_header_t,
+ length /* changed member */ );
+ ip0->checksum = ip_csum_fold (sum0);
+ ip0->length = new_l0;
+
+ /* Fix UDP length */
+ udp0 = (udp_header_t *) (ip0 + 1);
+ new_l0 = clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0)
+ - sizeof (*ip0));
+ udp0->length = new_l0;
+ }
+ else
+ {
+ ip6_header_t *ip0;
+ int bogus0;
+
+ ip0 = vlib_buffer_get_current (b0);
+
+ new_l0 = clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0)
+ - sizeof (*ip0));
+ ip0->payload_length = new_l0;
+
+ /* Fix UDP length */
+ udp0 = (udp_header_t *) (ip0 + 1);
+ udp0->length = new_l0;
+
+ udp0->checksum =
+ ip6_tcp_udp_icmp_compute_checksum (vm, b0, ip0, &bogus0);
+ ASSERT (bogus0 == 0);
+
+ if (udp0->checksum == 0)
+ udp0->checksum = 0xffff;
+ }
+}
+
+always_inline void
+ip_udp_encap_one (vlib_main_t * vm, vlib_buffer_t * b0, u8 * ec0, word ec_len,
+ u8 is_ip4)
+{
+ vlib_buffer_advance (b0, -ec_len);
+
+ if (is_ip4)
+ {
+ ip4_header_t *ip0;
+
+ ip0 = vlib_buffer_get_current (b0);
+
+ /* Apply the encap string. */
+ clib_memcpy (ip0, ec0, ec_len);
+ ip_udp_fixup_one (vm, b0, 1);
+ }
+ else
+ {
+ ip6_header_t *ip0;
+
+ ip0 = vlib_buffer_get_current (b0);
+
+ /* Apply the encap string. */
+ clib_memcpy (ip0, ec0, ec_len);
+ ip_udp_fixup_one (vm, b0, 0);
+ }
+}
+
+always_inline void
+ip_udp_encap_two (vlib_main_t * vm, vlib_buffer_t * b0, vlib_buffer_t * b1,
+ u8 * ec0, u8 * ec1, word ec_len, u8 is_v4)
+{
+ u16 new_l0, new_l1;
+ udp_header_t *udp0, *udp1;
+
+ ASSERT (_vec_len (ec0) == _vec_len (ec1));
+
+ vlib_buffer_advance (b0, -ec_len);
+ vlib_buffer_advance (b1, -ec_len);
+
+ if (is_v4)
+ {
+ ip4_header_t *ip0, *ip1;
+ ip_csum_t sum0, sum1;
+ u16 old_l0 = 0, old_l1 = 0;
+
+ ip0 = vlib_buffer_get_current (b0);
+ ip1 = vlib_buffer_get_current (b1);
+
+ /* Apply the encap string */
+ clib_memcpy (ip0, ec0, ec_len);
+ clib_memcpy (ip1, ec1, ec_len);
+
+ /* fix the <bleep>ing outer-IP checksum */
+ sum0 = ip0->checksum;
+ sum1 = ip1->checksum;
+
+ /* old_l0 always 0, see the rewrite setup */
+ new_l0 = clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0));
+ new_l1 = clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b1));
+
+ sum0 = ip_csum_update (sum0, old_l0, new_l0, ip4_header_t,
+ length /* changed member */ );
+ sum1 = ip_csum_update (sum1, old_l1, new_l1, ip4_header_t,
+ length /* changed member */ );
+
+ ip0->checksum = ip_csum_fold (sum0);
+ ip1->checksum = ip_csum_fold (sum1);
+
+ ip0->length = new_l0;
+ ip1->length = new_l1;
+
+ /* Fix UDP length */
+ udp0 = (udp_header_t *) (ip0 + 1);
+ udp1 = (udp_header_t *) (ip1 + 1);
+
+ new_l0 =
+ clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0) -
+ sizeof (*ip0));
+ new_l1 =
+ clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b1) -
+ sizeof (*ip1));
+ udp0->length = new_l0;
+ udp1->length = new_l1;
+ }
+ else
+ {
+ ip6_header_t *ip0, *ip1;
+ int bogus0, bogus1;
+
+ ip0 = vlib_buffer_get_current (b0);
+ ip1 = vlib_buffer_get_current (b1);
+
+ /* Apply the encap string. */
+ clib_memcpy (ip0, ec0, ec_len);
+ clib_memcpy (ip1, ec1, ec_len);
+
+ new_l0 = clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0)
+ - sizeof (*ip0));
+ new_l1 = clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b1)
+ - sizeof (*ip1));
+ ip0->payload_length = new_l0;
+ ip1->payload_length = new_l1;
+
+ /* Fix UDP length */
+ udp0 = (udp_header_t *) (ip0 + 1);
+ udp1 = (udp_header_t *) (ip1 + 1);
+
+ udp0->length = new_l0;
+ udp1->length = new_l1;
+
+ udp0->checksum =
+ ip6_tcp_udp_icmp_compute_checksum (vm, b0, ip0, &bogus0);
+ udp1->checksum =
+ ip6_tcp_udp_icmp_compute_checksum (vm, b1, ip1, &bogus1);
+ ASSERT (bogus0 == 0);
+ ASSERT (bogus1 == 0);
+
+ if (udp0->checksum == 0)
+ udp0->checksum = 0xffff;
+ if (udp1->checksum == 0)
+ udp1->checksum = 0xffff;
+ }
+}
+
+#endif /* included_udp_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/ip/udp_error.def b/src/vnet/ip/udp_error.def
new file mode 100644
index 00000000000..bfdae0acc77
--- /dev/null
+++ b/src/vnet/ip/udp_error.def
@@ -0,0 +1,21 @@
+/*
+ * udp_error.def: udp errors
+ *
+ * Copyright (c) 2013-2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+udp_error (NONE, "no error")
+udp_error (NO_LISTENER, "no listener for dst port")
+udp_error (LENGTH_ERROR, "UDP packets with length errors")
+udp_error (PUNT, "no listener punt")
diff --git a/src/vnet/ip/udp_format.c b/src/vnet/ip/udp_format.c
new file mode 100644
index 00000000000..abdf561e8c9
--- /dev/null
+++ b/src/vnet/ip/udp_format.c
@@ -0,0 +1,91 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * ip/udp_format.c: udp formatting
+ *
+ * Copyright (c) 2008 Eliot Dresselhaus
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <vnet/ip/ip.h>
+
+/* Format UDP header. */
+u8 *
+format_udp_header (u8 * s, va_list * args)
+{
+ udp_header_t *udp = va_arg (*args, udp_header_t *);
+ u32 max_header_bytes = va_arg (*args, u32);
+ uword indent;
+ u32 header_bytes = sizeof (udp[0]);
+
+ /* Nothing to do. */
+ if (max_header_bytes < sizeof (udp[0]))
+ return format (s, "UDP header truncated");
+
+ indent = format_get_indent (s);
+ indent += 2;
+
+ s = format (s, "UDP: %d -> %d",
+ clib_net_to_host_u16 (udp->src_port),
+ clib_net_to_host_u16 (udp->dst_port));
+
+ s = format (s, "\n%Ulength %d, checksum 0x%04x",
+ format_white_space, indent,
+ clib_net_to_host_u16 (udp->length),
+ clib_net_to_host_u16 (udp->checksum));
+
+ /* Recurse into next protocol layer. */
+ if (max_header_bytes != 0 && header_bytes < max_header_bytes)
+ {
+ ip_main_t *im = &ip_main;
+ tcp_udp_port_info_t *pi;
+
+ pi = ip_get_tcp_udp_port_info (im, udp->dst_port);
+
+ if (pi && pi->format_header)
+ s = format (s, "\n%U%U",
+ format_white_space, indent - 2, pi->format_header,
+ /* next protocol header */ (udp + 1),
+ max_header_bytes - sizeof (udp[0]));
+ }
+
+ return s;
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/ip/udp_init.c b/src/vnet/ip/udp_init.c
new file mode 100644
index 00000000000..1241ca4ab32
--- /dev/null
+++ b/src/vnet/ip/udp_init.c
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * ip/udp_init.c: udp initialization
+ *
+ * Copyright (c) 2008 Eliot Dresselhaus
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <vnet/ip/ip.h>
+
+clib_error_t *
+udp_init (vlib_main_t * vm)
+{
+ ip_main_t *im = &ip_main;
+ ip_protocol_info_t *pi;
+ clib_error_t *error;
+
+ error = vlib_call_init_function (vm, ip_main_init);
+
+ if (!error)
+ {
+ pi = ip_get_protocol_info (im, IP_PROTOCOL_UDP);
+ if (pi == 0)
+ return clib_error_return (0, "UDP protocol info AWOL");
+ pi->format_header = format_udp_header;
+ pi->unformat_pg_edit = unformat_pg_udp_header;
+ }
+
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (udp_init);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/ip/udp_local.c b/src/vnet/ip/udp_local.c
new file mode 100644
index 00000000000..13ab6e4fb32
--- /dev/null
+++ b/src/vnet/ip/udp_local.c
@@ -0,0 +1,645 @@
+/*
+ * node.c: udp packet processing
+ *
+ * Copyright (c) 2013 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vlib/vlib.h>
+#include <vnet/pg/pg.h>
+#include <vnet/ip/udp.h>
+#include <vnet/ip/udp_packet.h>
+#include <vppinfra/sparse_vec.h>
+
+udp_main_t udp_main;
+
+#define foreach_udp_input_next \
+ _ (PUNT, "error-punt") \
+ _ (DROP, "error-drop") \
+ _ (ICMP4_ERROR, "ip4-icmp-error") \
+ _ (ICMP6_ERROR, "ip6-icmp-error")
+
+typedef enum
+{
+#define _(s,n) UDP_INPUT_NEXT_##s,
+ foreach_udp_input_next
+#undef _
+ UDP_INPUT_N_NEXT,
+} udp_input_next_t;
+
+typedef struct
+{
+ u16 src_port;
+ u16 dst_port;
+ u8 bound;
+} udp_rx_trace_t;
+
+u8 *
+format_udp_rx_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ udp_rx_trace_t *t = va_arg (*args, udp_rx_trace_t *);
+
+ s = format (s, "UDP: src-port %d dst-port %d%s",
+ clib_net_to_host_u16 (t->src_port),
+ clib_net_to_host_u16 (t->dst_port),
+ t->bound ? "" : " (no listener)");
+ return s;
+}
+
+typedef struct
+{
+ /* Sparse vector mapping udp dst_port in network byte order
+ to next index. */
+ u16 *next_by_dst_port;
+ u8 punt_unknown;
+} udp_input_runtime_t;
+
+vlib_node_registration_t udp4_input_node;
+vlib_node_registration_t udp6_input_node;
+
+always_inline uword
+udp46_input_inline (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame, int is_ip4)
+{
+ udp_input_runtime_t *rt = is_ip4 ?
+ (void *) vlib_node_get_runtime_data (vm, udp4_input_node.index)
+ : (void *) vlib_node_get_runtime_data (vm, udp6_input_node.index);
+ __attribute__ ((unused)) u32 n_left_from, next_index, *from, *to_next;
+ word n_no_listener = 0;
+ u8 punt_unknown = rt->punt_unknown;
+
+ from = vlib_frame_vector_args (from_frame);
+ n_left_from = from_frame->n_vectors;
+
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ u32 bi0, bi1;
+ vlib_buffer_t *b0, *b1;
+ udp_header_t *h0 = 0, *h1 = 0;
+ u32 i0, i1, dst_port0, dst_port1;
+ u32 advance0, advance1;
+ u32 error0, next0, error1, next1;
+
+ /* Prefetch next iteration. */
+ {
+ vlib_buffer_t *p2, *p3;
+
+ p2 = vlib_get_buffer (vm, from[2]);
+ p3 = vlib_get_buffer (vm, from[3]);
+
+ vlib_prefetch_buffer_header (p2, LOAD);
+ vlib_prefetch_buffer_header (p3, LOAD);
+
+ CLIB_PREFETCH (p2->data, sizeof (h0[0]), LOAD);
+ CLIB_PREFETCH (p3->data, sizeof (h1[0]), LOAD);
+ }
+
+ bi0 = from[0];
+ bi1 = from[1];
+ to_next[0] = bi0;
+ to_next[1] = bi1;
+ from += 2;
+ to_next += 2;
+ n_left_to_next -= 2;
+ n_left_from -= 2;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ b1 = vlib_get_buffer (vm, bi1);
+
+ /* ip4/6_local hands us the ip header, not the udp header */
+ if (is_ip4)
+ {
+ advance0 = sizeof (ip4_header_t);
+ advance1 = sizeof (ip4_header_t);
+ }
+ else
+ {
+ advance0 = sizeof (ip6_header_t);
+ advance1 = sizeof (ip6_header_t);
+ }
+
+ if (PREDICT_FALSE (b0->current_length < advance0 + sizeof (*h0)))
+ {
+ error0 = UDP_ERROR_LENGTH_ERROR;
+ next0 = UDP_INPUT_NEXT_DROP;
+ }
+ else
+ {
+ vlib_buffer_advance (b0, advance0);
+ h0 = vlib_buffer_get_current (b0);
+ error0 = next0 = 0;
+ if (PREDICT_FALSE (clib_net_to_host_u16 (h0->length) >
+ vlib_buffer_length_in_chain (vm, b0)))
+ {
+ error0 = UDP_ERROR_LENGTH_ERROR;
+ next0 = UDP_INPUT_NEXT_DROP;
+ }
+ }
+
+ if (PREDICT_FALSE (b1->current_length < advance1 + sizeof (*h1)))
+ {
+ error1 = UDP_ERROR_LENGTH_ERROR;
+ next1 = UDP_INPUT_NEXT_DROP;
+ }
+ else
+ {
+ vlib_buffer_advance (b1, advance1);
+ h1 = vlib_buffer_get_current (b1);
+ error1 = next1 = 0;
+ if (PREDICT_FALSE (clib_net_to_host_u16 (h1->length) >
+ vlib_buffer_length_in_chain (vm, b1)))
+ {
+ error1 = UDP_ERROR_LENGTH_ERROR;
+ next1 = UDP_INPUT_NEXT_DROP;
+ }
+ }
+
+ /* Index sparse array with network byte order. */
+ dst_port0 = (error0 == 0) ? h0->dst_port : 0;
+ dst_port1 = (error1 == 0) ? h1->dst_port : 0;
+ sparse_vec_index2 (rt->next_by_dst_port, dst_port0, dst_port1,
+ &i0, &i1);
+ next0 = (error0 == 0) ? vec_elt (rt->next_by_dst_port, i0) : next0;
+ next1 = (error1 == 0) ? vec_elt (rt->next_by_dst_port, i1) : next1;
+
+ if (PREDICT_FALSE (i0 == SPARSE_VEC_INVALID_INDEX))
+ {
+ // move the pointer back so icmp-error can find the
+ // ip packet header
+ vlib_buffer_advance (b0, -(word) advance0);
+
+ if (PREDICT_FALSE (punt_unknown))
+ {
+ b0->error = node->errors[UDP_ERROR_PUNT];
+ next0 = UDP_INPUT_NEXT_PUNT;
+ }
+ else if (is_ip4)
+ {
+ icmp4_error_set_vnet_buffer (b0,
+ ICMP4_destination_unreachable,
+ ICMP4_destination_unreachable_port_unreachable,
+ 0);
+ next0 = UDP_INPUT_NEXT_ICMP4_ERROR;
+ n_no_listener++;
+ }
+ else
+ {
+ icmp6_error_set_vnet_buffer (b0,
+ ICMP6_destination_unreachable,
+ ICMP6_destination_unreachable_port_unreachable,
+ 0);
+ next0 = UDP_INPUT_NEXT_ICMP6_ERROR;
+ n_no_listener++;
+ }
+ }
+ else
+ {
+ b0->error = node->errors[UDP_ERROR_NONE];
+ // advance to the payload
+ vlib_buffer_advance (b0, sizeof (*h0));
+ }
+
+ if (PREDICT_FALSE (i1 == SPARSE_VEC_INVALID_INDEX))
+ {
+ // move the pointer back so icmp-error can find the
+ // ip packet header
+ vlib_buffer_advance (b1, -(word) advance1);
+
+ if (PREDICT_FALSE (punt_unknown))
+ {
+ b1->error = node->errors[UDP_ERROR_PUNT];
+ next1 = UDP_INPUT_NEXT_PUNT;
+ }
+ else if (is_ip4)
+ {
+ icmp4_error_set_vnet_buffer (b1,
+ ICMP4_destination_unreachable,
+ ICMP4_destination_unreachable_port_unreachable,
+ 0);
+ next1 = UDP_INPUT_NEXT_ICMP4_ERROR;
+ n_no_listener++;
+ }
+ else
+ {
+ icmp6_error_set_vnet_buffer (b1,
+ ICMP6_destination_unreachable,
+ ICMP6_destination_unreachable_port_unreachable,
+ 0);
+ next1 = UDP_INPUT_NEXT_ICMP6_ERROR;
+ n_no_listener++;
+ }
+ }
+ else
+ {
+ b1->error = node->errors[UDP_ERROR_NONE];
+ // advance to the payload
+ vlib_buffer_advance (b1, sizeof (*h1));
+ }
+
+ if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ udp_rx_trace_t *tr = vlib_add_trace (vm, node,
+ b0, sizeof (*tr));
+ if (b0->error != node->errors[UDP_ERROR_LENGTH_ERROR])
+ {
+ tr->src_port = h0 ? h0->src_port : 0;
+ tr->dst_port = h0 ? h0->dst_port : 0;
+ tr->bound = (next0 != UDP_INPUT_NEXT_ICMP4_ERROR &&
+ next0 != UDP_INPUT_NEXT_ICMP6_ERROR);
+ }
+ }
+ if (PREDICT_FALSE (b1->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ udp_rx_trace_t *tr = vlib_add_trace (vm, node,
+ b1, sizeof (*tr));
+ if (b1->error != node->errors[UDP_ERROR_LENGTH_ERROR])
+ {
+ tr->src_port = h1 ? h1->src_port : 0;
+ tr->dst_port = h1 ? h1->dst_port : 0;
+ tr->bound = (next1 != UDP_INPUT_NEXT_ICMP4_ERROR &&
+ next1 != UDP_INPUT_NEXT_ICMP6_ERROR);
+ }
+ }
+
+ vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, bi1, next0, next1);
+ }
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 bi0;
+ vlib_buffer_t *b0;
+ udp_header_t *h0 = 0;
+ u32 i0, next0;
+ u32 advance0;
+
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+
+ /* ip4/6_local hands us the ip header, not the udp header */
+ if (is_ip4)
+ advance0 = sizeof (ip4_header_t);
+ else
+ advance0 = sizeof (ip6_header_t);
+
+ if (PREDICT_FALSE (b0->current_length < advance0 + sizeof (*h0)))
+ {
+ b0->error = node->errors[UDP_ERROR_LENGTH_ERROR];
+ next0 = UDP_INPUT_NEXT_DROP;
+ goto trace_x1;
+ }
+
+ vlib_buffer_advance (b0, advance0);
+
+ h0 = vlib_buffer_get_current (b0);
+
+ if (PREDICT_TRUE (clib_net_to_host_u16 (h0->length) <=
+ vlib_buffer_length_in_chain (vm, b0)))
+ {
+ i0 = sparse_vec_index (rt->next_by_dst_port, h0->dst_port);
+ next0 = vec_elt (rt->next_by_dst_port, i0);
+
+ if (PREDICT_FALSE (i0 == SPARSE_VEC_INVALID_INDEX))
+ {
+ // move the pointer back so icmp-error can find the
+ // ip packet header
+ vlib_buffer_advance (b0, -(word) advance0);
+
+ if (PREDICT_FALSE (punt_unknown))
+ {
+ b0->error = node->errors[UDP_ERROR_PUNT];
+ next0 = UDP_INPUT_NEXT_PUNT;
+ }
+ else if (is_ip4)
+ {
+ icmp4_error_set_vnet_buffer (b0,
+ ICMP4_destination_unreachable,
+ ICMP4_destination_unreachable_port_unreachable,
+ 0);
+ next0 = UDP_INPUT_NEXT_ICMP4_ERROR;
+ n_no_listener++;
+ }
+ else
+ {
+ icmp6_error_set_vnet_buffer (b0,
+ ICMP6_destination_unreachable,
+ ICMP6_destination_unreachable_port_unreachable,
+ 0);
+ next0 = UDP_INPUT_NEXT_ICMP6_ERROR;
+ n_no_listener++;
+ }
+ }
+ else
+ {
+ b0->error = node->errors[UDP_ERROR_NONE];
+ // advance to the payload
+ vlib_buffer_advance (b0, sizeof (*h0));
+ }
+ }
+ else
+ {
+ b0->error = node->errors[UDP_ERROR_LENGTH_ERROR];
+ next0 = UDP_INPUT_NEXT_DROP;
+ }
+
+ trace_x1:
+ if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ udp_rx_trace_t *tr = vlib_add_trace (vm, node,
+ b0, sizeof (*tr));
+ if (b0->error != node->errors[UDP_ERROR_LENGTH_ERROR])
+ {
+ tr->src_port = h0->src_port;
+ tr->dst_port = h0->dst_port;
+ tr->bound = (next0 != UDP_INPUT_NEXT_ICMP4_ERROR &&
+ next0 != UDP_INPUT_NEXT_ICMP6_ERROR);
+ }
+ }
+
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, next0);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+ vlib_error_count (vm, node->node_index, UDP_ERROR_NO_LISTENER,
+ n_no_listener);
+ return from_frame->n_vectors;
+}
+
+static char *udp_error_strings[] = {
+#define udp_error(n,s) s,
+#include "udp_error.def"
+#undef udp_error
+};
+
+static uword
+udp4_input (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * from_frame)
+{
+ return udp46_input_inline (vm, node, from_frame, 1 /* is_ip4 */ );
+}
+
+static uword
+udp6_input (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * from_frame)
+{
+ return udp46_input_inline (vm, node, from_frame, 0 /* is_ip4 */ );
+}
+
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (udp4_input_node) = {
+ .function = udp4_input,
+ .name = "ip4-udp-lookup",
+ /* Takes a vector of packets. */
+ .vector_size = sizeof (u32),
+
+ .runtime_data_bytes = sizeof (udp_input_runtime_t),
+
+ .n_errors = UDP_N_ERROR,
+ .error_strings = udp_error_strings,
+
+ .n_next_nodes = UDP_INPUT_N_NEXT,
+ .next_nodes = {
+#define _(s,n) [UDP_INPUT_NEXT_##s] = n,
+ foreach_udp_input_next
+#undef _
+ },
+
+ .format_buffer = format_udp_header,
+ .format_trace = format_udp_rx_trace,
+ .unformat_buffer = unformat_udp_header,
+};
+/* *INDENT-ON* */
+
+VLIB_NODE_FUNCTION_MULTIARCH (udp4_input_node, udp4_input);
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (udp6_input_node) = {
+ .function = udp6_input,
+ .name = "ip6-udp-lookup",
+ /* Takes a vector of packets. */
+ .vector_size = sizeof (u32),
+
+ .runtime_data_bytes = sizeof (udp_input_runtime_t),
+
+ .n_errors = UDP_N_ERROR,
+ .error_strings = udp_error_strings,
+
+ .n_next_nodes = UDP_INPUT_N_NEXT,
+ .next_nodes = {
+#define _(s,n) [UDP_INPUT_NEXT_##s] = n,
+ foreach_udp_input_next
+#undef _
+ },
+
+ .format_buffer = format_udp_header,
+ .format_trace = format_udp_rx_trace,
+ .unformat_buffer = unformat_udp_header,
+};
+/* *INDENT-ON* */
+
+VLIB_NODE_FUNCTION_MULTIARCH (udp6_input_node, udp6_input);
+
+static void
+add_dst_port (udp_main_t * um,
+ udp_dst_port_t dst_port, char *dst_port_name, u8 is_ip4)
+{
+ udp_dst_port_info_t *pi;
+ u32 i;
+
+ vec_add2 (um->dst_port_infos[is_ip4], pi, 1);
+ i = pi - um->dst_port_infos[is_ip4];
+
+ pi->name = dst_port_name;
+ pi->dst_port = dst_port;
+ pi->next_index = pi->node_index = ~0;
+
+ hash_set (um->dst_port_info_by_dst_port[is_ip4], dst_port, i);
+
+ if (pi->name)
+ hash_set_mem (um->dst_port_info_by_name[is_ip4], pi->name, i);
+}
+
+void
+udp_register_dst_port (vlib_main_t * vm,
+ udp_dst_port_t dst_port, u32 node_index, u8 is_ip4)
+{
+ udp_main_t *um = &udp_main;
+ udp_dst_port_info_t *pi;
+ udp_input_runtime_t *rt;
+ u16 *n;
+
+ {
+ clib_error_t *error = vlib_call_init_function (vm, udp_local_init);
+ if (error)
+ clib_error_report (error);
+ }
+
+ pi = udp_get_dst_port_info (um, dst_port, is_ip4);
+ if (!pi)
+ {
+ add_dst_port (um, dst_port, 0, is_ip4);
+ pi = udp_get_dst_port_info (um, dst_port, is_ip4);
+ ASSERT (pi);
+ }
+
+ pi->node_index = node_index;
+ pi->next_index = vlib_node_add_next (vm,
+ is_ip4 ? udp4_input_node.index
+ : udp6_input_node.index, node_index);
+
+ /* Setup udp protocol -> next index sparse vector mapping. */
+ rt = vlib_node_get_runtime_data
+ (vm, is_ip4 ? udp4_input_node.index : udp6_input_node.index);
+ n = sparse_vec_validate (rt->next_by_dst_port,
+ clib_host_to_net_u16 (dst_port));
+ n[0] = pi->next_index;
+}
+
+void
+udp_punt_unknown (vlib_main_t * vm, u8 is_ip4, u8 is_add)
+{
+ udp_input_runtime_t *rt;
+
+ {
+ clib_error_t *error = vlib_call_init_function (vm, udp_local_init);
+ if (error)
+ clib_error_report (error);
+ }
+
+ rt = vlib_node_get_runtime_data
+ (vm, is_ip4 ? udp4_input_node.index : udp6_input_node.index);
+
+ rt->punt_unknown = is_add;
+}
+
+/* Parse a UDP header. */
+uword
+unformat_udp_header (unformat_input_t * input, va_list * args)
+{
+ u8 **result = va_arg (*args, u8 **);
+ udp_header_t *udp;
+ __attribute__ ((unused)) int old_length;
+ u16 src_port, dst_port;
+
+ /* Allocate space for IP header. */
+ {
+ void *p;
+
+ old_length = vec_len (*result);
+ vec_add2 (*result, p, sizeof (ip4_header_t));
+ udp = p;
+ }
+
+ memset (udp, 0, sizeof (udp[0]));
+ if (unformat (input, "src-port %d dst-port %d", &src_port, &dst_port))
+ {
+ udp->src_port = clib_host_to_net_u16 (src_port);
+ udp->dst_port = clib_host_to_net_u16 (dst_port);
+ return 1;
+ }
+ return 0;
+}
+
+static void
+udp_setup_node (vlib_main_t * vm, u32 node_index)
+{
+ vlib_node_t *n = vlib_get_node (vm, node_index);
+ pg_node_t *pn = pg_get_node (node_index);
+
+ n->format_buffer = format_udp_header;
+ n->unformat_buffer = unformat_udp_header;
+ pn->unformat_edit = unformat_pg_udp_header;
+}
+
+clib_error_t *
+udp_local_init (vlib_main_t * vm)
+{
+ udp_input_runtime_t *rt;
+ udp_main_t *um = &udp_main;
+ int i;
+
+ {
+ clib_error_t *error;
+ error = vlib_call_init_function (vm, udp_init);
+ if (error)
+ clib_error_report (error);
+ }
+
+
+ for (i = 0; i < 2; i++)
+ {
+ um->dst_port_info_by_name[i] = hash_create_string (0, sizeof (uword));
+ um->dst_port_info_by_dst_port[i] = hash_create (0, sizeof (uword));
+ }
+
+ udp_setup_node (vm, udp4_input_node.index);
+ udp_setup_node (vm, udp6_input_node.index);
+
+ rt = vlib_node_get_runtime_data (vm, udp4_input_node.index);
+
+ rt->next_by_dst_port = sparse_vec_new
+ ( /* elt bytes */ sizeof (rt->next_by_dst_port[0]),
+ /* bits in index */ BITS (((udp_header_t *) 0)->dst_port));
+
+ rt->punt_unknown = 0;
+
+#define _(n,s) add_dst_port (um, UDP_DST_PORT_##s, #s, 1 /* is_ip4 */);
+ foreach_udp4_dst_port
+#undef _
+ rt = vlib_node_get_runtime_data (vm, udp6_input_node.index);
+
+ rt->next_by_dst_port = sparse_vec_new
+ ( /* elt bytes */ sizeof (rt->next_by_dst_port[0]),
+ /* bits in index */ BITS (((udp_header_t *) 0)->dst_port));
+
+ rt->punt_unknown = 0;
+
+#define _(n,s) add_dst_port (um, UDP_DST_PORT_##s, #s, 0 /* is_ip4 */);
+ foreach_udp6_dst_port
+#undef _
+ ip4_register_protocol (IP_PROTOCOL_UDP, udp4_input_node.index);
+ /* Note: ip6 differs from ip4, UDP is hotwired to ip6-udp-lookup */
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (udp_local_init);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/ip/udp_packet.h b/src/vnet/ip/udp_packet.h
new file mode 100644
index 00000000000..beea3059246
--- /dev/null
+++ b/src/vnet/ip/udp_packet.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * ip4/udp_packet.h: UDP packet format
+ *
+ * Copyright (c) 2008 Eliot Dresselhaus
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef included_udp_packet_h
+#define included_udp_packet_h
+
+typedef struct
+{
+ /* Source and destination port. */
+ u16 src_port, dst_port;
+
+ /* Length of UDP header plus payload. */
+ u16 length;
+
+ /* Checksum of UDP pseudo-header and data or
+ zero if checksum is disabled. */
+ u16 checksum;
+} udp_header_t;
+
+#endif /* included_udp_packet_h */
+
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/ip/udp_pg.c b/src/vnet/ip/udp_pg.c
new file mode 100644
index 00000000000..c9d8d38ca4a
--- /dev/null
+++ b/src/vnet/ip/udp_pg.c
@@ -0,0 +1,237 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * ip/udp_pg: UDP packet-generator interface
+ *
+ * Copyright (c) 2008 Eliot Dresselhaus
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <vnet/pg/pg.h>
+#include <vnet/ip/ip.h> /* for unformat_udp_udp_port */
+
+#define UDP_PG_EDIT_LENGTH (1 << 0)
+#define UDP_PG_EDIT_CHECKSUM (1 << 1)
+
+always_inline void
+udp_pg_edit_function_inline (pg_main_t * pg,
+ pg_stream_t * s,
+ pg_edit_group_t * g,
+ u32 * packets, u32 n_packets, u32 flags)
+{
+ vlib_main_t *vm = vlib_get_main ();
+ u32 ip_offset, udp_offset;
+
+ udp_offset = g->start_byte_offset;
+ ip_offset = (g - 1)->start_byte_offset;
+
+ while (n_packets >= 1)
+ {
+ vlib_buffer_t *p0;
+ ip4_header_t *ip0;
+ udp_header_t *udp0;
+ u32 udp_len0;
+
+ p0 = vlib_get_buffer (vm, packets[0]);
+ n_packets -= 1;
+ packets += 1;
+
+ ip0 = (void *) (p0->data + ip_offset);
+ udp0 = (void *) (p0->data + udp_offset);
+ udp_len0 = clib_net_to_host_u16 (ip0->length) - sizeof (ip0[0]);
+
+ if (flags & UDP_PG_EDIT_LENGTH)
+ udp0->length =
+ clib_net_to_host_u16 (vlib_buffer_length_in_chain (vm, p0)
+ - ip_offset);
+
+ /* Initialize checksum with header. */
+ if (flags & UDP_PG_EDIT_CHECKSUM)
+ {
+ ip_csum_t sum0;
+
+ sum0 = clib_mem_unaligned (&ip0->src_address, u64);
+
+ sum0 = ip_csum_with_carry
+ (sum0, clib_host_to_net_u32 (udp_len0 + (ip0->protocol << 16)));
+
+ /* Invalidate possibly old checksum. */
+ udp0->checksum = 0;
+
+ sum0 =
+ ip_incremental_checksum_buffer (vm, p0, udp_offset, udp_len0,
+ sum0);
+
+ sum0 = ~ip_csum_fold (sum0);
+
+ /* Zero checksum means checksumming disabled. */
+ sum0 = sum0 != 0 ? sum0 : 0xffff;
+
+ udp0->checksum = sum0;
+ }
+ }
+}
+
+static void
+udp_pg_edit_function (pg_main_t * pg,
+ pg_stream_t * s,
+ pg_edit_group_t * g, u32 * packets, u32 n_packets)
+{
+ switch (g->edit_function_opaque)
+ {
+ case UDP_PG_EDIT_LENGTH:
+ udp_pg_edit_function_inline (pg, s, g, packets, n_packets,
+ UDP_PG_EDIT_LENGTH);
+ break;
+
+ case UDP_PG_EDIT_CHECKSUM:
+ udp_pg_edit_function_inline (pg, s, g, packets, n_packets,
+ UDP_PG_EDIT_CHECKSUM);
+ break;
+
+ case UDP_PG_EDIT_CHECKSUM | UDP_PG_EDIT_LENGTH:
+ udp_pg_edit_function_inline (pg, s, g, packets, n_packets,
+ UDP_PG_EDIT_CHECKSUM | UDP_PG_EDIT_LENGTH);
+ break;
+
+ default:
+ ASSERT (0);
+ break;
+ }
+}
+
+typedef struct
+{
+ pg_edit_t src_port, dst_port;
+ pg_edit_t length;
+ pg_edit_t checksum;
+} pg_udp_header_t;
+
+static inline void
+pg_udp_header_init (pg_udp_header_t * p)
+{
+ /* Initialize fields that are not bit fields in the IP header. */
+#define _(f) pg_edit_init (&p->f, udp_header_t, f);
+ _(src_port);
+ _(dst_port);
+ _(length);
+ _(checksum);
+#undef _
+}
+
+uword
+unformat_pg_udp_header (unformat_input_t * input, va_list * args)
+{
+ pg_stream_t *s = va_arg (*args, pg_stream_t *);
+ pg_udp_header_t *p;
+ u32 group_index;
+
+ p = pg_create_edit_group (s, sizeof (p[0]), sizeof (udp_header_t),
+ &group_index);
+ pg_udp_header_init (p);
+
+ /* Defaults. */
+ p->checksum.type = PG_EDIT_UNSPECIFIED;
+ p->length.type = PG_EDIT_UNSPECIFIED;
+
+ if (!unformat (input, "UDP: %U -> %U",
+ unformat_pg_edit,
+ unformat_tcp_udp_port, &p->src_port,
+ unformat_pg_edit, unformat_tcp_udp_port, &p->dst_port))
+ goto error;
+
+ /* Parse options. */
+ while (1)
+ {
+ if (unformat (input, "length %U",
+ unformat_pg_edit, unformat_pg_number, &p->length))
+ ;
+
+ else if (unformat (input, "checksum %U",
+ unformat_pg_edit, unformat_pg_number, &p->checksum))
+ ;
+
+ /* Can't parse input: try next protocol level. */
+ else
+ break;
+ }
+
+ {
+ ip_main_t *im = &ip_main;
+ u16 dst_port;
+ tcp_udp_port_info_t *pi;
+
+ pi = 0;
+ if (p->dst_port.type == PG_EDIT_FIXED)
+ {
+ dst_port = pg_edit_get_value (&p->dst_port, PG_EDIT_LO);
+ pi = ip_get_tcp_udp_port_info (im, dst_port);
+ }
+
+ if (pi && pi->unformat_pg_edit
+ && unformat_user (input, pi->unformat_pg_edit, s))
+ ;
+
+ else if (!unformat_user (input, unformat_pg_payload, s))
+ goto error;
+
+ p = pg_get_edit_group (s, group_index);
+ if (p->checksum.type == PG_EDIT_UNSPECIFIED
+ || p->length.type == PG_EDIT_UNSPECIFIED)
+ {
+ pg_edit_group_t *g = pg_stream_get_group (s, group_index);
+ g->edit_function = udp_pg_edit_function;
+ g->edit_function_opaque = 0;
+ if (p->checksum.type == PG_EDIT_UNSPECIFIED)
+ g->edit_function_opaque |= UDP_PG_EDIT_CHECKSUM;
+ if (p->length.type == PG_EDIT_UNSPECIFIED)
+ g->edit_function_opaque |= UDP_PG_EDIT_LENGTH;
+ }
+
+ return 1;
+ }
+
+error:
+ /* Free up any edits we may have added. */
+ pg_free_edit_group (s);
+ return 0;
+}
+
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/ipsec-gre/dir.dox b/src/vnet/ipsec-gre/dir.dox
new file mode 100644
index 00000000000..e6ffd10b01b
--- /dev/null
+++ b/src/vnet/ipsec-gre/dir.dox
@@ -0,0 +1,18 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ @dir vnet/vnet/ipsec-gre
+ @brief L2-GRE over IPSec tunnel interface implementation
+*/
diff --git a/src/vnet/ipsec-gre/error.def b/src/vnet/ipsec-gre/error.def
new file mode 100644
index 00000000000..d84e8ed1759
--- /dev/null
+++ b/src/vnet/ipsec-gre/error.def
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * @file
+ * @brief L2-GRE over IPSec errors.
+ */
+
+
+ipsec_gre_error (NONE, "no error")
+ipsec_gre_error (UNKNOWN_PROTOCOL, "unknown protocol")
+ipsec_gre_error (UNSUPPORTED_VERSION, "unsupported version")
+ipsec_gre_error (PKTS_DECAP, "GRE input packets decapsulated")
+ipsec_gre_error (PKTS_ENCAP, "GRE output packets encapsulated")
+ipsec_gre_error (NO_SUCH_TUNNEL, "GRE input packets dropped due to missing tunnel")
diff --git a/src/vnet/ipsec-gre/interface.c b/src/vnet/ipsec-gre/interface.c
new file mode 100644
index 00000000000..56832ee1006
--- /dev/null
+++ b/src/vnet/ipsec-gre/interface.c
@@ -0,0 +1,311 @@
+/*
+ * gre_interface.c: gre interfaces
+ *
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * @file
+ * @brief L2-GRE over IPSec tunnel interface.
+ *
+ * Creates ipsec-gre tunnel interface.
+ * Provides a command line interface so humans can interact with VPP.
+ */
+
+#include <vnet/vnet.h>
+#include <vnet/pg/pg.h>
+#include <vnet/ipsec-gre/ipsec_gre.h>
+#include <vnet/ip/format.h>
+#include <vnet/ipsec/ipsec.h>
+
+#if DPDK_CRYPTO==1
+#include <vnet/devices/dpdk/ipsec/esp.h>
+#define ESP_NODE "dpdk-esp-encrypt"
+#else
+#include <vnet/ipsec/esp.h>
+#define ESP_NODE "esp-encrypt"
+#endif
+
+u8 *
+format_ipsec_gre_tunnel (u8 * s, va_list * args)
+{
+ ipsec_gre_tunnel_t *t = va_arg (*args, ipsec_gre_tunnel_t *);
+ ipsec_gre_main_t *gm = &ipsec_gre_main;
+
+ s = format (s,
+ "[%d] %U (src) %U (dst) local-sa %d remote-sa %d",
+ t - gm->tunnels,
+ format_ip4_address, &t->tunnel_src,
+ format_ip4_address, &t->tunnel_dst,
+ t->local_sa_id, t->remote_sa_id);
+ return s;
+}
+
+static clib_error_t *
+show_ipsec_gre_tunnel_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ ipsec_gre_main_t *igm = &ipsec_gre_main;
+ ipsec_gre_tunnel_t *t;
+
+ if (pool_elts (igm->tunnels) == 0)
+ vlib_cli_output (vm, "No IPSec GRE tunnels configured...");
+
+ /* *INDENT-OFF* */
+ pool_foreach (t, igm->tunnels,
+ ({
+ vlib_cli_output (vm, "%U", format_ipsec_gre_tunnel, t);
+ }));
+ /* *INDENT-ON* */
+
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (show_ipsec_gre_tunnel_command, static) = {
+ .path = "show ipsec gre tunnel",
+ .function = show_ipsec_gre_tunnel_command_fn,
+};
+/* *INDENT-ON* */
+
+/* force inclusion from application's main.c */
+clib_error_t *
+ipsec_gre_interface_init (vlib_main_t * vm)
+{
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (ipsec_gre_interface_init);
+
+/**
+ * @brief Add or delete ipsec-gre tunnel interface.
+ *
+ * @param *a vnet_ipsec_gre_add_del_tunnel_args_t - tunnel interface parameters
+ * @param *sw_if_indexp u32 - software interface index
+ * @return int - 0 if success otherwise <code>VNET_API_ERROR_</code>
+ */
+int
+vnet_ipsec_gre_add_del_tunnel (vnet_ipsec_gre_add_del_tunnel_args_t * a,
+ u32 * sw_if_indexp)
+{
+ ipsec_gre_main_t *igm = &ipsec_gre_main;
+ vnet_main_t *vnm = igm->vnet_main;
+ ip4_main_t *im = &ip4_main;
+ ipsec_gre_tunnel_t *t;
+ vnet_hw_interface_t *hi;
+ u32 hw_if_index, sw_if_index;
+ u32 slot;
+ uword *p;
+ u64 key;
+ ipsec_add_del_ipsec_gre_tunnel_args_t args;
+
+ memset (&args, 0, sizeof (args));
+ args.is_add = a->is_add;
+ args.local_sa_id = a->lsa;
+ args.remote_sa_id = a->rsa;
+ args.local_ip.as_u32 = a->src.as_u32;
+ args.remote_ip.as_u32 = a->dst.as_u32;
+
+ key = (u64) a->src.as_u32 << 32 | (u64) a->dst.as_u32;
+ p = hash_get (igm->tunnel_by_key, key);
+
+ if (a->is_add)
+ {
+ /* check if same src/dst pair exists */
+ if (p)
+ return VNET_API_ERROR_INVALID_VALUE;
+
+ pool_get_aligned (igm->tunnels, t, CLIB_CACHE_LINE_BYTES);
+ memset (t, 0, sizeof (*t));
+
+ if (vec_len (igm->free_ipsec_gre_tunnel_hw_if_indices) > 0)
+ {
+ vnet_interface_main_t *im = &vnm->interface_main;
+
+ hw_if_index = igm->free_ipsec_gre_tunnel_hw_if_indices
+ [vec_len (igm->free_ipsec_gre_tunnel_hw_if_indices) - 1];
+ _vec_len (igm->free_ipsec_gre_tunnel_hw_if_indices) -= 1;
+
+ hi = vnet_get_hw_interface (vnm, hw_if_index);
+ hi->dev_instance = t - igm->tunnels;
+ hi->hw_instance = hi->dev_instance;
+
+ /* clear old stats of freed tunnel before reuse */
+ sw_if_index = hi->sw_if_index;
+ vnet_interface_counter_lock (im);
+ vlib_zero_combined_counter
+ (&im->combined_sw_if_counters[VNET_INTERFACE_COUNTER_TX],
+ sw_if_index);
+ vlib_zero_combined_counter
+ (&im->combined_sw_if_counters[VNET_INTERFACE_COUNTER_RX],
+ sw_if_index);
+ vlib_zero_simple_counter
+ (&im->sw_if_counters[VNET_INTERFACE_COUNTER_DROP], sw_if_index);
+ vnet_interface_counter_unlock (im);
+ }
+ else
+ {
+ hw_if_index = vnet_register_interface
+ (vnm, ipsec_gre_device_class.index, t - igm->tunnels,
+ ipsec_gre_hw_interface_class.index, t - igm->tunnels);
+ hi = vnet_get_hw_interface (vnm, hw_if_index);
+ sw_if_index = hi->sw_if_index;
+ }
+
+ t->hw_if_index = hw_if_index;
+ t->sw_if_index = sw_if_index;
+ t->local_sa_id = a->lsa;
+ t->remote_sa_id = a->rsa;
+ t->local_sa = ipsec_get_sa_index_by_sa_id (a->lsa);
+ t->remote_sa = ipsec_get_sa_index_by_sa_id (a->rsa);
+
+ ip4_sw_interface_enable_disable (sw_if_index, 1);
+
+ vec_validate_init_empty (igm->tunnel_index_by_sw_if_index,
+ sw_if_index, ~0);
+ igm->tunnel_index_by_sw_if_index[sw_if_index] = t - igm->tunnels;
+
+ vec_validate (im->fib_index_by_sw_if_index, sw_if_index);
+
+ hi->min_packet_bytes = 64 + sizeof (gre_header_t) +
+ sizeof (ip4_header_t) + sizeof (esp_header_t) + sizeof (esp_footer_t);
+ hi->per_packet_overhead_bytes =
+ /* preamble */ 8 + /* inter frame gap */ 12;
+
+ /* Standard default gre MTU. */
+ hi->max_l3_packet_bytes[VLIB_RX] = hi->max_l3_packet_bytes[VLIB_TX] =
+ 9000;
+
+ clib_memcpy (&t->tunnel_src, &a->src, sizeof (t->tunnel_src));
+ clib_memcpy (&t->tunnel_dst, &a->dst, sizeof (t->tunnel_dst));
+
+ hash_set (igm->tunnel_by_key, key, t - igm->tunnels);
+
+ slot = vlib_node_add_named_next_with_slot
+ (vnm->vlib_main, hi->tx_node_index, ESP_NODE,
+ IPSEC_GRE_OUTPUT_NEXT_ESP_ENCRYPT);
+
+ ASSERT (slot == IPSEC_GRE_OUTPUT_NEXT_ESP_ENCRYPT);
+
+ }
+ else
+ { /* !is_add => delete */
+ /* tunnel needs to exist */
+ if (!p)
+ return VNET_API_ERROR_NO_SUCH_ENTRY;
+
+ t = pool_elt_at_index (igm->tunnels, p[0]);
+
+ sw_if_index = t->sw_if_index;
+ ip4_sw_interface_enable_disable (sw_if_index, 0);
+ vnet_sw_interface_set_flags (vnm, sw_if_index, 0 /* down */ );
+ /* make sure tunnel is removed from l2 bd or xconnect */
+ set_int_l2_mode (igm->vlib_main, vnm, MODE_L3, sw_if_index, 0, 0, 0, 0);
+ vec_add1 (igm->free_ipsec_gre_tunnel_hw_if_indices, t->hw_if_index);
+ igm->tunnel_index_by_sw_if_index[sw_if_index] = ~0;
+
+ hash_unset (igm->tunnel_by_key, key);
+ pool_put (igm->tunnels, t);
+ }
+
+ if (sw_if_indexp)
+ *sw_if_indexp = sw_if_index;
+
+ return ipsec_add_del_ipsec_gre_tunnel (vnm, &args);
+}
+
+static clib_error_t *
+create_ipsec_gre_tunnel_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ unformat_input_t _line_input, *line_input = &_line_input;
+ u8 is_add = 1;
+ u32 num_m_args = 0;
+ ip4_address_t src, dst;
+ u32 lsa = 0, rsa = 0;
+ vnet_ipsec_gre_add_del_tunnel_args_t _a, *a = &_a;
+ int rv;
+ u32 sw_if_index;
+
+ /* Get a line of input. */
+ if (!unformat_user (input, unformat_line_input, line_input))
+ return 0;
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "del"))
+ is_add = 0;
+ else if (unformat (line_input, "src %U", unformat_ip4_address, &src))
+ num_m_args++;
+ else if (unformat (line_input, "dst %U", unformat_ip4_address, &dst))
+ num_m_args++;
+ else if (unformat (line_input, "local-sa %d", &lsa))
+ num_m_args++;
+ else if (unformat (line_input, "remote-sa %d", &rsa))
+ num_m_args++;
+ else
+ return clib_error_return (0, "unknown input `%U'",
+ format_unformat_error, input);
+ }
+ unformat_free (line_input);
+
+ if (num_m_args < 4)
+ return clib_error_return (0, "mandatory argument(s) missing");
+
+ if (memcmp (&src, &dst, sizeof (src)) == 0)
+ return clib_error_return (0, "src and dst are identical");
+
+ memset (a, 0, sizeof (*a));
+ a->is_add = is_add;
+ a->lsa = lsa;
+ a->rsa = rsa;
+ clib_memcpy (&a->src, &src, sizeof (src));
+ clib_memcpy (&a->dst, &dst, sizeof (dst));
+
+ rv = vnet_ipsec_gre_add_del_tunnel (a, &sw_if_index);
+
+ switch (rv)
+ {
+ case 0:
+ vlib_cli_output (vm, "%U\n", format_vnet_sw_if_index_name,
+ vnet_get_main (), sw_if_index);
+ break;
+ case VNET_API_ERROR_INVALID_VALUE:
+ return clib_error_return (0, "GRE tunnel already exists...");
+ default:
+ return clib_error_return (0,
+ "vnet_ipsec_gre_add_del_tunnel returned %d",
+ rv);
+ }
+
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (create_ipsec_gre_tunnel_command, static) = {
+ .path = "create ipsec gre tunnel",
+ .short_help = "create ipsec gre tunnel src <addr> dst <addr> "
+ "local-sa <id> remote-sa <id> [del]",
+ .function = create_ipsec_gre_tunnel_command_fn,
+};
+/* *INDENT-ON* */
+
+/*
+* fd.io coding-style-patch-verification: ON
+*
+* Local Variables:
+* eval: (c-set-style "gnu")
+* End:
+*/
diff --git a/src/vnet/ipsec-gre/ipsec_gre.api b/src/vnet/ipsec-gre/ipsec_gre.api
new file mode 100644
index 00000000000..793bca0afcd
--- /dev/null
+++ b/src/vnet/ipsec-gre/ipsec_gre.api
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2015-2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/** \brief Add / del ipsec gre tunnel request
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param local_sa_id - local SA id
+ @param remote_sa_id - remote SA id
+ @param is_add - 1 if adding the tunnel, 0 if deleting
+ @param src_address - tunnel source address
+ @param dst_address - tunnel destination address
+*/
+define ipsec_gre_add_del_tunnel {
+ u32 client_index;
+ u32 context;
+ u32 local_sa_id;
+ u32 remote_sa_id;
+ u8 is_add;
+ u8 src_address[4];
+ u8 dst_address[4];
+};
+
+/** \brief Reply for add / del ipsec gre tunnel request
+ @param context - returned sender context, to match reply w/ request
+ @param retval - return code
+ @param sw_if_index - software index of the new ipsec gre tunnel
+*/
+define ipsec_gre_add_del_tunnel_reply {
+ u32 context;
+ i32 retval;
+ u32 sw_if_index;
+};
+
+/** \brief Dump ipsec gre tunnel table
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param tunnel_index - gre tunnel identifier or -1 in case of all tunnels
+*/
+define ipsec_gre_tunnel_dump {
+ u32 client_index;
+ u32 context;
+ u32 sw_if_index;
+};
+
+/** \brief ipsec gre tunnel operational state response
+ @param context - returned sender context, to match reply w/ request
+ @param sw_if_index - software index of the ipsec gre tunnel
+ @param local_sa_id - local SA id
+ @param remote_sa_id - remote SA id
+ @param src_address - tunnel source address
+ @param dst_address - tunnel destination address
+*/
+define ipsec_gre_tunnel_details {
+ u32 context;
+ u32 sw_if_index;
+ u32 local_sa_id;
+ u32 remote_sa_id;
+ u8 src_address[4];
+ u8 dst_address[4];
+};
+
+/*
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
+ \ No newline at end of file
diff --git a/src/vnet/ipsec-gre/ipsec_gre.c b/src/vnet/ipsec-gre/ipsec_gre.c
new file mode 100644
index 00000000000..a0b065ac283
--- /dev/null
+++ b/src/vnet/ipsec-gre/ipsec_gre.c
@@ -0,0 +1,407 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * @file
+ * @brief L2-GRE over IPSec packet processing.
+ *
+ * Add GRE header to thr packet and send it to the esp-encrypt node.
+*/
+
+#include <vnet/vnet.h>
+#include <vnet/ipsec-gre/ipsec_gre.h>
+
+ipsec_gre_main_t ipsec_gre_main;
+
+/**
+ * @brief IPv4 and GRE header union.
+ *
+*/
+typedef struct
+{
+ union
+ {
+ ip4_and_gre_header_t ip4_and_gre;
+ u64 as_u64[3];
+ };
+} ip4_and_gre_union_t;
+
+/**
+ * @brief Packet trace.
+ *
+*/
+typedef struct
+{
+ u32 tunnel_id; /**< Tunnel-id / index in tunnel vector */
+
+ u32 length; /**< pkt length */
+
+ ip4_address_t src; /**< tunnel src IPv4 address */
+ ip4_address_t dst; /**< tunnel dst IPv4 address */
+
+ u32 sa_id; /**< tunnel IPSec SA id */
+} ipsec_gre_tx_trace_t;
+
+u8 *
+format_ipsec_gre_tx_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ ipsec_gre_tx_trace_t *t = va_arg (*args, ipsec_gre_tx_trace_t *);
+
+ s = format (s, "GRE: tunnel %d len %d src %U dst %U sa-id %d",
+ t->tunnel_id, clib_net_to_host_u16 (t->length),
+ format_ip4_address, &t->src.as_u8,
+ format_ip4_address, &t->dst.as_u8, t->sa_id);
+ return s;
+}
+
+/**
+ * @brief IPSec-GRE tunnel interface tx function.
+ *
+ * Add GRE header to the packet.
+ *
+ * @param vm vlib_main_t corresponding to the current thread.
+ * @param node vlib_node_runtime_t data for this node.
+ * @param frame vlib_frame_t whose contents should be dispatched.
+ *
+ * @par Graph mechanics: buffer metadata, next index usage
+ *
+ * <em>Uses:</em>
+ * - <code>node->runtime_data</code>
+ * - Match tunnel by <code>rd->dev_instance</code> in IPSec-GRE tunnels
+ * pool.
+ *
+ * <em>Sets:</em>
+ * - <code>vnet_buffer(b)->output_features.ipsec_sad_index</code>
+ * - Set IPSec Security Association for packet encryption.
+ * - <code>vnet_buffer(b)->sw_if_index[VLIB_TX]</code>
+ * - Reset output sw_if_index.
+ *
+ * <em>Nexd Index:</em>
+ * - Dispatches the packet to the esp-encrypt node.
+*/
+static uword
+ipsec_gre_interface_tx (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ ipsec_gre_main_t *igm = &ipsec_gre_main;
+ u32 next_index;
+ u32 *from, *to_next, n_left_from, n_left_to_next;
+ vnet_interface_output_runtime_t *rd = (void *) node->runtime_data;
+ ipsec_gre_tunnel_t *t = pool_elt_at_index (igm->tunnels, rd->dev_instance);
+
+ /* Vector of buffer / pkt indices we're supposed to process */
+ from = vlib_frame_vector_args (frame);
+
+ /* Number of buffers / pkts */
+ n_left_from = frame->n_vectors;
+
+ /* Speculatively send the first buffer to the last disposition we used */
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ /* set up to enqueue to our disposition with index = next_index */
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ /*
+ * As long as we have enough pkts left to process two pkts
+ * and prefetch two pkts...
+ */
+ while (n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ vlib_buffer_t *b0, *b1;
+ ip4_header_t *ip0, *ip1;
+ ip4_and_gre_union_t *h0, *h1;
+ u32 bi0, next0, bi1, next1;
+ __attribute__ ((unused)) u8 error0, error1;
+ u16 gre_protocol0, gre_protocol1;
+
+ /* Prefetch the next iteration */
+ {
+ vlib_buffer_t *p2, *p3;
+
+ p2 = vlib_get_buffer (vm, from[2]);
+ p3 = vlib_get_buffer (vm, from[3]);
+
+ vlib_prefetch_buffer_header (p2, LOAD);
+ vlib_prefetch_buffer_header (p3, LOAD);
+
+ /*
+ * Prefetch packet data. We expect to overwrite
+ * the inbound L2 header with an ip header and a
+ * gre header. Might want to prefetch the last line
+ * of rewrite space as well; need profile data
+ */
+ CLIB_PREFETCH (p2->data, CLIB_CACHE_LINE_BYTES, STORE);
+ CLIB_PREFETCH (p3->data, CLIB_CACHE_LINE_BYTES, STORE);
+ }
+
+ /* Pick up the next two buffer indices */
+ bi0 = from[0];
+ bi1 = from[1];
+
+ /* Speculatively enqueue them where we sent the last buffer */
+ to_next[0] = bi0;
+ to_next[1] = bi1;
+ from += 2;
+ to_next += 2;
+ n_left_to_next -= 2;
+ n_left_from -= 2;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ b1 = vlib_get_buffer (vm, bi1);
+
+ ip0 = vlib_buffer_get_current (b0);
+ gre_protocol0 = clib_net_to_host_u16 (0x01);
+
+ ip1 = vlib_buffer_get_current (b1);
+ gre_protocol1 = clib_net_to_host_u16 (0x01);
+
+ vlib_buffer_advance (b0, -sizeof (*h0));
+ vlib_buffer_advance (b1, -sizeof (*h1));
+
+ h0 = vlib_buffer_get_current (b0);
+ h1 = vlib_buffer_get_current (b1);
+ h0->as_u64[0] = 0;
+ h0->as_u64[1] = 0;
+ h0->as_u64[2] = 0;
+
+ h1->as_u64[0] = 0;
+ h1->as_u64[1] = 0;
+ h1->as_u64[2] = 0;
+
+ ip0 = &h0->ip4_and_gre.ip4;
+ h0->ip4_and_gre.gre.protocol = gre_protocol0;
+ ip0->ip_version_and_header_length = 0x45;
+ ip0->ttl = 254;
+ ip0->protocol = IP_PROTOCOL_GRE;
+
+ ip1 = &h1->ip4_and_gre.ip4;
+ h1->ip4_and_gre.gre.protocol = gre_protocol1;
+ ip1->ip_version_and_header_length = 0x45;
+ ip1->ttl = 254;
+ ip1->protocol = IP_PROTOCOL_GRE;
+
+ ip0->length =
+ clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0));
+ ip1->length =
+ clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b1));
+ ip0->src_address.as_u32 = t->tunnel_src.as_u32;
+ ip1->src_address.as_u32 = t->tunnel_src.as_u32;
+ ip0->dst_address.as_u32 = t->tunnel_dst.as_u32;
+ ip1->dst_address.as_u32 = t->tunnel_dst.as_u32;
+ ip0->checksum = ip4_header_checksum (ip0);
+ ip1->checksum = ip4_header_checksum (ip1);
+
+ vnet_buffer (b0)->sw_if_index[VLIB_RX] =
+ vnet_buffer (b0)->sw_if_index[VLIB_TX];
+ vnet_buffer (b1)->sw_if_index[VLIB_RX] =
+ vnet_buffer (b1)->sw_if_index[VLIB_TX];
+
+ vnet_buffer (b0)->ipsec.sad_index = t->local_sa;
+ vnet_buffer (b1)->ipsec.sad_index = t->local_sa;
+
+ vnet_buffer (b0)->sw_if_index[VLIB_TX] = (u32) ~ 0;
+ vnet_buffer (b1)->sw_if_index[VLIB_TX] = (u32) ~ 0;
+
+ next0 = IPSEC_GRE_OUTPUT_NEXT_ESP_ENCRYPT;
+ next1 = IPSEC_GRE_OUTPUT_NEXT_ESP_ENCRYPT;
+ error0 = IPSEC_GRE_ERROR_NONE;
+ error1 = IPSEC_GRE_ERROR_NONE;
+
+ if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ ipsec_gre_tx_trace_t *tr = vlib_add_trace (vm, node,
+ b0, sizeof (*tr));
+ tr->tunnel_id = t - igm->tunnels;
+ tr->length = ip0->length;
+ tr->src.as_u32 = ip0->src_address.as_u32;
+ tr->dst.as_u32 = ip0->dst_address.as_u32;
+ tr->sa_id = t->local_sa_id;
+ }
+
+ if (PREDICT_FALSE (b1->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ ipsec_gre_tx_trace_t *tr = vlib_add_trace (vm, node,
+ b1, sizeof (*tr));
+ tr->tunnel_id = t - igm->tunnels;
+ tr->length = ip1->length;
+ tr->src.as_u32 = ip1->src_address.as_u32;
+ tr->dst.as_u32 = ip1->dst_address.as_u32;
+ tr->sa_id = t->local_sa_id;
+ }
+
+ vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, bi1, next0, next1);
+ }
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ vlib_buffer_t *b0;
+ ip4_header_t *ip0;
+ ip4_and_gre_union_t *h0;
+ u32 bi0, next0;
+ __attribute__ ((unused)) u8 error0;
+ u16 gre_protocol0;
+
+ bi0 = to_next[0] = from[0];
+ from += 1;
+ n_left_from -= 1;
+ to_next += 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+
+ gre_protocol0 = clib_net_to_host_u16 (0x01);
+
+ vlib_buffer_advance (b0, -sizeof (*h0));
+
+ h0 = vlib_buffer_get_current (b0);
+ h0->as_u64[0] = 0;
+ h0->as_u64[1] = 0;
+ h0->as_u64[2] = 0;
+
+ ip0 = &h0->ip4_and_gre.ip4;
+ h0->ip4_and_gre.gre.protocol = gre_protocol0;
+ ip0->ip_version_and_header_length = 0x45;
+ ip0->ttl = 254;
+ ip0->protocol = IP_PROTOCOL_GRE;
+ ip0->length =
+ clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0));
+ ip0->src_address.as_u32 = t->tunnel_src.as_u32;
+ ip0->dst_address.as_u32 = t->tunnel_dst.as_u32;
+ ip0->checksum = ip4_header_checksum (ip0);
+
+ vnet_buffer (b0)->sw_if_index[VLIB_RX] =
+ vnet_buffer (b0)->sw_if_index[VLIB_TX];
+ vnet_buffer (b0)->ipsec.sad_index = t->local_sa;
+ vnet_buffer (b0)->sw_if_index[VLIB_TX] = (u32) ~ 0;
+
+ next0 = IPSEC_GRE_OUTPUT_NEXT_ESP_ENCRYPT;
+ error0 = IPSEC_GRE_ERROR_NONE;
+
+ if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ ipsec_gre_tx_trace_t *tr = vlib_add_trace (vm, node,
+ b0, sizeof (*tr));
+ tr->tunnel_id = t - igm->tunnels;
+ tr->length = ip0->length;
+ tr->src.as_u32 = ip0->src_address.as_u32;
+ tr->dst.as_u32 = ip0->dst_address.as_u32;
+ tr->sa_id = t->local_sa_id;
+ }
+
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, next0);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ vlib_node_increment_counter (vm, ipsec_gre_input_node.index,
+ IPSEC_GRE_ERROR_PKTS_ENCAP, frame->n_vectors);
+
+ return frame->n_vectors;
+}
+
+static clib_error_t *
+ipsec_gre_interface_admin_up_down (vnet_main_t * vnm, u32 hw_if_index,
+ u32 flags)
+{
+ if (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP)
+ vnet_hw_interface_set_flags (vnm, hw_if_index,
+ VNET_HW_INTERFACE_FLAG_LINK_UP);
+ else
+ vnet_hw_interface_set_flags (vnm, hw_if_index, 0 /* down */ );
+
+ return /* no error */ 0;
+}
+
+static u8 *
+format_ipsec_gre_tunnel_name (u8 * s, va_list * args)
+{
+ u32 dev_instance = va_arg (*args, u32);
+ return format (s, "ipsec-gre%d", dev_instance);
+}
+
+static u8 *
+format_ipsec_gre_device (u8 * s, va_list * args)
+{
+ u32 dev_instance = va_arg (*args, u32);
+ CLIB_UNUSED (int verbose) = va_arg (*args, int);
+
+ s = format (s, "IPSEC-GRE tunnel: id %d\n", dev_instance);
+ return s;
+}
+
+/* *INDENT-OFF* */
+VNET_DEVICE_CLASS (ipsec_gre_device_class) = {
+ .name = "IPSec GRE tunnel device",
+ .format_device_name = format_ipsec_gre_tunnel_name,
+ .format_device = format_ipsec_gre_device,
+ .format_tx_trace = format_ipsec_gre_tx_trace,
+ .tx_function = ipsec_gre_interface_tx,
+ .admin_up_down_function = ipsec_gre_interface_admin_up_down,
+};
+
+VLIB_DEVICE_TX_FUNCTION_MULTIARCH (ipsec_gre_device_class,
+ ipsec_gre_interface_tx)
+
+
+VNET_HW_INTERFACE_CLASS (ipsec_gre_hw_interface_class) = {
+ .name = "IPSEC-GRE",
+};
+/* *INDENT-ON* */
+
+static clib_error_t *
+ipsec_gre_init (vlib_main_t * vm)
+{
+ ipsec_gre_main_t *igm = &ipsec_gre_main;
+ clib_error_t *error;
+
+ memset (igm, 0, sizeof (igm[0]));
+ igm->vlib_main = vm;
+ igm->vnet_main = vnet_get_main ();
+
+ if ((error = vlib_call_init_function (vm, ip_main_init)))
+ return error;
+
+ if ((error = vlib_call_init_function (vm, ip4_lookup_init)))
+ return error;
+
+ igm->tunnel_by_key = hash_create (0, sizeof (uword));
+
+ return vlib_call_init_function (vm, ipsec_gre_input_init);
+}
+
+VLIB_INIT_FUNCTION (ipsec_gre_init);
+
+ipsec_gre_main_t *
+ipsec_gre_get_main (vlib_main_t * vm)
+{
+ vlib_call_init_function (vm, ipsec_gre_init);
+ return &ipsec_gre_main;
+}
+
+/*
+* fd.io coding-style-patch-verification: ON
+*
+* Local Variables:
+* eval: (c-set-style "gnu")
+* End:
+*/
diff --git a/src/vnet/ipsec-gre/ipsec_gre.h b/src/vnet/ipsec-gre/ipsec_gre.h
new file mode 100644
index 00000000000..a2ca64b6f74
--- /dev/null
+++ b/src/vnet/ipsec-gre/ipsec_gre.h
@@ -0,0 +1,114 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * @file
+ * @brief L2-GRE over IPSec packet processing.
+*/
+
+#ifndef included_ipsec_gre_h
+#define included_ipsec_gre_h
+
+#include <vnet/vnet.h>
+#include <vnet/gre/packet.h>
+#include <vnet/gre/gre.h>
+#include <vnet/ip/ip.h>
+#include <vnet/ip/ip4.h>
+#include <vnet/ip/ip4_packet.h>
+#include <vnet/pg/pg.h>
+#include <vnet/ip/format.h>
+
+extern vnet_hw_interface_class_t ipsec_gre_hw_interface_class;
+
+/**
+ * @brief IPSec-GRE errors.
+ *
+*/
+typedef enum
+{
+#define ipsec_gre_error(n,s) IPSEC_GRE_ERROR_##n,
+#include <vnet/ipsec-gre/error.def>
+#undef ipsec_gre_error
+ IPSEC_GRE_N_ERROR,
+} ipsec_gre_error_t;
+
+/**
+ * @brief IPSec-GRE tunnel parameters.
+ *
+*/
+typedef struct
+{
+ ip4_address_t tunnel_src; /**< tunnel IPv4 src address */
+ ip4_address_t tunnel_dst; /**< tunnel IPv4 dst address */
+ u32 local_sa; /**< local IPSec SA index */
+ u32 remote_sa; /**< remote IPSec SA index */
+ u32 local_sa_id; /**< local IPSec SA id */
+ u32 remote_sa_id; /**< remote IPSec SA id */
+ u32 hw_if_index;; /**< hardware interface index */
+ u32 sw_if_index;; /**< software interface index */
+} ipsec_gre_tunnel_t;
+
+/**
+ * @brief IPSec-GRE state.
+ *
+*/
+typedef struct
+{
+ ipsec_gre_tunnel_t *tunnels; /**< pool of tunnel instances */
+
+ uword *tunnel_by_key; /**< hash mapping src/dst addr pair to tunnel */
+
+ u32 *free_ipsec_gre_tunnel_hw_if_indices; /**< free vlib hw_if_indices */
+
+ u32 *tunnel_index_by_sw_if_index; /**< mapping from sw_if_index to tunnel
+ index */
+
+ vlib_main_t *vlib_main; /**< convenience */
+ vnet_main_t *vnet_main; /**< convenience */
+} ipsec_gre_main_t;
+
+ipsec_gre_main_t ipsec_gre_main;
+
+extern vlib_node_registration_t ipsec_gre_input_node;
+extern vnet_device_class_t ipsec_gre_device_class;
+
+/* manually added to the interface output node in ipsec_gre.c */
+#define IPSEC_GRE_OUTPUT_NEXT_ESP_ENCRYPT 1
+
+/**
+ * @brief IPSec-GRE tunnel add/del arguments.
+ *
+*/
+typedef struct
+{
+ u8 is_add; /**< 1 - add, 0 - delete */
+
+ ip4_address_t src; /**< tunnel IPv4 src address */
+ ip4_address_t dst; /**< tunnel IPv4 dst address */
+ u32 lsa; /**< local IPSec SA id */
+ u32 rsa; /**< remote IPSec SA id */
+} vnet_ipsec_gre_add_del_tunnel_args_t;
+
+int vnet_ipsec_gre_add_del_tunnel
+ (vnet_ipsec_gre_add_del_tunnel_args_t * a, u32 * sw_if_indexp);
+
+#endif /* included_ipsec_gre_h */
+
+/*
+* fd.io coding-style-patch-verification: ON
+*
+* Local Variables:
+* eval: (c-set-style "gnu")
+* End:
+*/
diff --git a/src/vnet/ipsec-gre/ipsec_gre_api.c b/src/vnet/ipsec-gre/ipsec_gre_api.c
new file mode 100644
index 00000000000..a7ea1490bae
--- /dev/null
+++ b/src/vnet/ipsec-gre/ipsec_gre_api.c
@@ -0,0 +1,190 @@
+/*
+ *------------------------------------------------------------------
+ * ipsec_gre_api.c - ipsec_gre api
+ *
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+
+#include <vnet/vnet.h>
+#include <vlibmemory/api.h>
+
+#include <vnet/interface.h>
+#include <vnet/api_errno.h>
+#include <vnet/ipsec-gre/ipsec_gre.h>
+
+#include <vnet/vnet_msg_enum.h>
+
+#define vl_typedefs /* define message structures */
+#include <vnet/vnet_all_api_h.h>
+#undef vl_typedefs
+
+#define vl_endianfun /* define message structures */
+#include <vnet/vnet_all_api_h.h>
+#undef vl_endianfun
+
+/* instantiate all the print functions we know about */
+#define vl_print(handle, ...) vlib_cli_output (handle, __VA_ARGS__)
+#define vl_printfun
+#include <vnet/vnet_all_api_h.h>
+#undef vl_printfun
+
+#include <vlibapi/api_helper_macros.h>
+
+#define foreach_vpe_api_msg \
+_(IPSEC_GRE_ADD_DEL_TUNNEL, ipsec_gre_add_del_tunnel) \
+_(IPSEC_GRE_TUNNEL_DUMP, ipsec_gre_tunnel_dump)
+
+static void
+vl_api_ipsec_gre_add_del_tunnel_t_handler (vl_api_ipsec_gre_add_del_tunnel_t *
+ mp)
+{
+ vl_api_ipsec_gre_add_del_tunnel_reply_t *rmp;
+ int rv = 0;
+ vnet_ipsec_gre_add_del_tunnel_args_t _a, *a = &_a;
+ u32 sw_if_index = ~0;
+
+ /* Check src & dst are different */
+ if (memcmp (mp->src_address, mp->dst_address, 4) == 0)
+ {
+ rv = VNET_API_ERROR_SAME_SRC_DST;
+ goto out;
+ }
+
+ memset (a, 0, sizeof (*a));
+
+ /* ip addresses sent in network byte order */
+ clib_memcpy (&(a->src), mp->src_address, 4);
+ clib_memcpy (&(a->dst), mp->dst_address, 4);
+ a->is_add = mp->is_add;
+ a->lsa = ntohl (mp->local_sa_id);
+ a->rsa = ntohl (mp->remote_sa_id);
+
+ rv = vnet_ipsec_gre_add_del_tunnel (a, &sw_if_index);
+
+out:
+ /* *INDENT-OFF* */
+ REPLY_MACRO2(VL_API_GRE_ADD_DEL_TUNNEL_REPLY,
+ ({
+ rmp->sw_if_index = ntohl (sw_if_index);
+ }));
+ /* *INDENT-ON* */
+}
+
+static void send_ipsec_gre_tunnel_details
+ (ipsec_gre_tunnel_t * t, unix_shared_memory_queue_t * q, u32 context)
+{
+ vl_api_ipsec_gre_tunnel_details_t *rmp;
+
+ rmp = vl_msg_api_alloc (sizeof (*rmp));
+ memset (rmp, 0, sizeof (*rmp));
+ rmp->_vl_msg_id = ntohs (VL_API_IPSEC_GRE_TUNNEL_DETAILS);
+ clib_memcpy (rmp->src_address, &(t->tunnel_src), 4);
+ clib_memcpy (rmp->dst_address, &(t->tunnel_dst), 4);
+ rmp->sw_if_index = htonl (t->sw_if_index);
+ rmp->local_sa_id = htonl (t->local_sa_id);
+ rmp->remote_sa_id = htonl (t->remote_sa_id);
+ rmp->context = context;
+
+ vl_msg_api_send_shmem (q, (u8 *) & rmp);
+}
+
+static void vl_api_ipsec_gre_tunnel_dump_t_handler
+ (vl_api_ipsec_gre_tunnel_dump_t * mp)
+{
+ unix_shared_memory_queue_t *q;
+ ipsec_gre_main_t *igm = &ipsec_gre_main;
+ ipsec_gre_tunnel_t *t;
+ u32 sw_if_index;
+
+ q = vl_api_client_index_to_input_queue (mp->client_index);
+ if (q == 0)
+ {
+ return;
+ }
+
+ sw_if_index = ntohl (mp->sw_if_index);
+
+ if (~0 == sw_if_index)
+ {
+ /* *INDENT-OFF* */
+ pool_foreach (t, igm->tunnels,
+ ({
+ send_ipsec_gre_tunnel_details(t, q, mp->context);
+ }));
+ /* *INDENT-ON* */
+ }
+ else
+ {
+ if ((sw_if_index >= vec_len (igm->tunnel_index_by_sw_if_index)) ||
+ (~0 == igm->tunnel_index_by_sw_if_index[sw_if_index]))
+ {
+ return;
+ }
+ t = &igm->tunnels[igm->tunnel_index_by_sw_if_index[sw_if_index]];
+ send_ipsec_gre_tunnel_details (t, q, mp->context);
+ }
+}
+
+/*
+ * ipsec_gre_api_hookup
+ * Add vpe's API message handlers to the table.
+ * vlib has alread mapped shared memory and
+ * added the client registration handlers.
+ * See .../vlib-api/vlibmemory/memclnt_vlib.c:memclnt_process()
+ */
+#define vl_msg_name_crc_list
+#include <vnet/vnet_all_api_h.h>
+#undef vl_msg_name_crc_list
+
+static void
+setup_message_id_table (api_main_t * am)
+{
+#define _(id,n,crc) vl_msg_api_add_msg_name_crc (am, #n "_" #crc, id);
+ foreach_vl_msg_name_crc_ipsec_gre;
+#undef _
+}
+
+static clib_error_t *
+ipsec_gre_api_hookup (vlib_main_t * vm)
+{
+ api_main_t *am = &api_main;
+
+#define _(N,n) \
+ vl_msg_api_set_handlers(VL_API_##N, #n, \
+ vl_api_##n##_t_handler, \
+ vl_noop_handler, \
+ vl_api_##n##_t_endian, \
+ vl_api_##n##_t_print, \
+ sizeof(vl_api_##n##_t), 1);
+ foreach_vpe_api_msg;
+#undef _
+
+ /*
+ * Set up the (msg_name, crc, message-id) table
+ */
+ setup_message_id_table (am);
+
+ return 0;
+}
+
+VLIB_API_INIT_FUNCTION (ipsec_gre_api_hookup);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/ipsec-gre/ipsec_gre_doc.md b/src/vnet/ipsec-gre/ipsec_gre_doc.md
new file mode 100644
index 00000000000..e1bb9cdab1a
--- /dev/null
+++ b/src/vnet/ipsec-gre/ipsec_gre_doc.md
@@ -0,0 +1,74 @@
+# VPP L2-GRE over IPsec implementation {#ipsec_gre_doc}
+
+This is a memo intended to contain documentation of the VPP L2-GRE over IPsec implementation.
+Everything that is not directly obvious should come here.
+
+
+## L2-GRE over IPsec
+GRE encapsulate layer 2 traffic and IPSec encrypt what is encapsulated by GRE. The whole point of L2-GRE over IPSec is to tunnel layer 2 over GRE and IPSec by bridging the physical interface with IPSec-GRE tunnel interface.
+
+There are 2 dedicated nodes for encapsulation:
+* ipsec-gre<n>-tx - add GRE header
+* esp-encrypt - encrypt GRE packet to ESP packet
+
+There are 3 dedicated nodes for decapsulation:
+* ipsec-if-input - match IPSec SA by source IP address and SPI in ESP packet
+* esp-decrypt - decrypt ESP packet
+* ipsec-gre-input - remove GRE header
+
+
+### Configuration
+
+L2-GRE over IPsec support the following CLI configuration command:
+ create ipsec gre tunnel src <addr> dst <addr> local-sa <id> remote-sa <id> [del]
+
+src: tunnel source IPv4 address
+dst: tunnel destination IPv4 address
+local-sa: tunnel local IPSec Security Association
+remote-sa: tunnel remote IPSec Security Association
+del: delete IPSec-GRE tunnel
+
+L2-GRE over IPsec support the following API configuration command:
+ ipsec_gre_add_del_tunnel src <addr> dst <addr> local_sa <sa-id> remote_sa <sa-id> [del]
+
+src: tunnel source IPv4 address
+dst: tunnel destination IPv4 address
+local_sa: tunnel local IPSec Security Association
+remote_sa: tunnel remote IPSec Security Association
+del: delete IPSec-GRE tunnel
+
+
+### Configuration example
+
+Interface GigabitEthernet0/9/0 is in bridge with ipsec-gre0 tunnel interface, interface GigabitEthernet0/8/0 sending encapsulated and encrypted traffic.
+
+Configure IPv4 address on sending interface:
+set int ip address GigabitEthernet0/8/0 192.168.1.1/24
+
+Configure IPSec Security Associations:
+ipsec sa add 10 spi 1001 esp crypto-key 4a506a794f574265564551694d653768 crypto-alg aes-cbc-128 integ-key 4339314b55523947594d6d3547666b45764e6a58 integ-alg sha1-96
+ipsec sa add 20 spi 1000 esp crypto-key 49517065716d6235726c734a4372466c crypto-alg aes-cbc-128 integ-key 307439636a5542735133595835546f68534e4f64 integ-alg sha1-96
+
+Create IPSec-GRE tunnel:
+create ipsec gre tunnel src 192.168.1.1 dst 192.168.1.2 local-sa 10 remote-sa 20
+
+Set interfaces state:
+set int state GigabitEthernet0/8/0 up
+set int state GigabitEthernet0/9/0 up
+set int state ipsec-gre0 up
+
+Bridge physical interface with IPSec-GRE tunnel interface:
+set interface l2 bridge GigabitEthernet0/9/0 1
+set interface l2 bridge ipsec-gre0 1
+
+
+### Operational data
+
+L2-GRE over IPsec support the following CLI show command:
+ show ipsec gre tunnel
+
+L2-GRE over IPsec support the following API dump command:
+ ipsec_gre_tunnel_dump [sw_if_index <nn>]
+
+sw_if_index: software interface index of the IPSec-GRE tunnel interface
+
diff --git a/src/vnet/ipsec-gre/node.c b/src/vnet/ipsec-gre/node.c
new file mode 100644
index 00000000000..d20f248a6c8
--- /dev/null
+++ b/src/vnet/ipsec-gre/node.c
@@ -0,0 +1,433 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * @file
+ * @brief L2-GRE over IPSec packet processing.
+ *
+ * Removes GRE header from the packet and sends it to the l2-input node.
+*/
+
+#include <vlib/vlib.h>
+#include <vnet/pg/pg.h>
+#include <vnet/ipsec-gre/ipsec_gre.h>
+#include <vppinfra/sparse_vec.h>
+
+#define foreach_ipsec_gre_input_next \
+_(PUNT, "error-punt") \
+_(DROP, "error-drop") \
+_(L2_INPUT, "l2-input")
+
+typedef enum {
+#define _(s,n) IPSEC_GRE_INPUT_NEXT_##s,
+ foreach_ipsec_gre_input_next
+#undef _
+ IPSEC_GRE_INPUT_N_NEXT,
+} ipsec_gre_input_next_t;
+
+typedef struct {
+ u32 tunnel_id;
+ u32 length;
+ ip4_address_t src;
+ ip4_address_t dst;
+} ipsec_gre_rx_trace_t;
+
+u8 * format_ipsec_gre_rx_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ ipsec_gre_rx_trace_t * t = va_arg (*args, ipsec_gre_rx_trace_t *);
+
+ s = format (s, "GRE: tunnel %d len %d src %U dst %U",
+ t->tunnel_id, clib_net_to_host_u16(t->length),
+ format_ip4_address, &t->src.as_u8,
+ format_ip4_address, &t->dst.as_u8);
+ return s;
+}
+
+/**
+ * @brief L2-GRE over IPSec input node.
+ * @node ipsec-gre-input
+ *
+ * This node remove GRE header.
+ *
+ * @param vm vlib_main_t corresponding to the current thread.
+ * @param node vlib_node_runtime_t data for this node.
+ * @param from_frame vlib_frame_t whose contents should be dispatched.
+ *
+ * @par Graph mechanics: buffer metadata, next index usage
+ *
+ * <em>Uses:</em>
+ * - <code>ip->src_address</code> and <code>ip->dst_address</code>
+ * - Match tunnel by source and destination addresses in GRE IP header.
+ *
+ * <em>Sets:</em>
+ * - <code>vnet_buffer(b)->gre.src</code>
+ * - Save tunnel source IPv4 address.
+ * - <code>vnet_buffer(b)->gre.dst</code>
+ * - Save tunnel destination IPv4 address.
+ * - <code>vnet_buffer(b)->sw_if_index[VLIB_RX]</code>
+ * - Set input sw_if_index to IPSec-GRE tunnel for learning.
+ *
+ * <em>Next Index:</em>
+ * - Dispatches the packet to the l2-input node.
+*/
+static uword
+ipsec_gre_input (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame)
+{
+ ipsec_gre_main_t * igm = &ipsec_gre_main;
+ u32 n_left_from, next_index, * from, * to_next;
+ u64 cached_tunnel_key = (u64) ~0;
+ u32 cached_tunnel_sw_if_index = 0, tunnel_sw_if_index;
+
+ from = vlib_frame_vector_args (from_frame);
+ n_left_from = from_frame->n_vectors;
+
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame (vm, node, next_index,
+ to_next, n_left_to_next);
+
+ while (n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ u32 bi0, bi1;
+ vlib_buffer_t * b0, * b1;
+ gre_header_t * h0, * h1;
+ u16 version0, version1, protocol0, protocol1;
+ int verr0, verr1;
+ u32 next0, next1;
+ ip4_header_t *ip0, *ip1;
+
+ /* Prefetch next iteration. */
+ {
+ vlib_buffer_t * p2, * p3;
+
+ p2 = vlib_get_buffer (vm, from[2]);
+ p3 = vlib_get_buffer (vm, from[3]);
+
+ vlib_prefetch_buffer_header (p2, LOAD);
+ vlib_prefetch_buffer_header (p3, LOAD);
+
+ CLIB_PREFETCH (p2->data, sizeof (h0[0]), LOAD);
+ CLIB_PREFETCH (p3->data, sizeof (h1[0]), LOAD);
+ }
+
+ bi0 = from[0];
+ bi1 = from[1];
+ to_next[0] = bi0;
+ to_next[1] = bi1;
+ from += 2;
+ to_next += 2;
+ n_left_to_next -= 2;
+ n_left_from -= 2;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ b1 = vlib_get_buffer (vm, bi1);
+
+ /* ip4_local hands us the ip header, not the gre header */
+ ip0 = vlib_buffer_get_current (b0);
+ ip1 = vlib_buffer_get_current (b1);
+
+ /* Save src + dst ip4 address */
+ vnet_buffer(b0)->gre.src = ip0->src_address.as_u32;
+ vnet_buffer(b0)->gre.dst = ip0->dst_address.as_u32;
+ vnet_buffer(b1)->gre.src = ip1->src_address.as_u32;
+ vnet_buffer(b1)->gre.dst = ip1->dst_address.as_u32;
+
+ vlib_buffer_advance (b0, sizeof (*ip0));
+ vlib_buffer_advance (b1, sizeof (*ip1));
+
+ h0 = vlib_buffer_get_current (b0);
+ h1 = vlib_buffer_get_current (b1);
+
+ protocol0 = clib_net_to_host_u16 (h0->protocol);
+ protocol1 = clib_net_to_host_u16 (h1->protocol);
+ if (PREDICT_TRUE(protocol0 == 0x0001))
+ {
+ next0 = IPSEC_GRE_INPUT_NEXT_L2_INPUT;
+ b0->error = node->errors[IPSEC_GRE_ERROR_NONE];
+ }
+ else
+ {
+ clib_warning("unknown GRE protocol: %d", protocol0);
+ b0->error = node->errors[IPSEC_GRE_ERROR_UNKNOWN_PROTOCOL];
+ next0 = IPSEC_GRE_INPUT_NEXT_DROP;
+ }
+ if (PREDICT_TRUE(protocol1 == 0x0001))
+ {
+ next1 = IPSEC_GRE_INPUT_NEXT_L2_INPUT;
+ b1->error = node->errors[IPSEC_GRE_ERROR_NONE];
+ }
+ else
+ {
+ clib_warning("unknown GRE protocol: %d", protocol1);
+ b1->error = node->errors[IPSEC_GRE_ERROR_UNKNOWN_PROTOCOL];
+ next1 = IPSEC_GRE_INPUT_NEXT_DROP;
+ }
+
+ version0 = clib_net_to_host_u16 (h0->flags_and_version);
+ verr0 = version0 & GRE_VERSION_MASK;
+ version1 = clib_net_to_host_u16 (h1->flags_and_version);
+ verr1 = version1 & GRE_VERSION_MASK;
+
+ b0->error = verr0 ? node->errors[IPSEC_GRE_ERROR_UNSUPPORTED_VERSION]
+ : b0->error;
+ next0 = verr0 ? IPSEC_GRE_INPUT_NEXT_DROP : next0;
+ b1->error = verr1 ? node->errors[IPSEC_GRE_ERROR_UNSUPPORTED_VERSION]
+ : b1->error;
+ next1 = verr1 ? IPSEC_GRE_INPUT_NEXT_DROP : next1;
+
+ /* For L2 payload set input sw_if_index to GRE tunnel for learning */
+ if (PREDICT_TRUE(next0 == IPSEC_GRE_INPUT_NEXT_L2_INPUT))
+ {
+ u64 key = ((u64)(vnet_buffer(b0)->gre.dst) << 32) |
+ (u64)(vnet_buffer(b0)->gre.src);
+
+ if (cached_tunnel_key != key)
+ {
+ vnet_hw_interface_t * hi;
+ ipsec_gre_tunnel_t * t;
+ uword * p;
+
+ p = hash_get (igm->tunnel_by_key, key);
+ if (!p)
+ {
+ next0 = IPSEC_GRE_INPUT_NEXT_DROP;
+ b0->error = node->errors[IPSEC_GRE_ERROR_NO_SUCH_TUNNEL];
+ goto drop0;
+ }
+ t = pool_elt_at_index (igm->tunnels, p[0]);
+ hi = vnet_get_hw_interface (igm->vnet_main,
+ t->hw_if_index);
+ tunnel_sw_if_index = hi->sw_if_index;
+ cached_tunnel_sw_if_index = tunnel_sw_if_index;
+ }
+ else
+ {
+ tunnel_sw_if_index = cached_tunnel_sw_if_index;
+ }
+ vnet_buffer(b0)->sw_if_index[VLIB_RX] = tunnel_sw_if_index;
+ }
+
+drop0:
+ /* For L2 payload set input sw_if_index to GRE tunnel for learning */
+ if (PREDICT_TRUE(next1 == IPSEC_GRE_INPUT_NEXT_L2_INPUT))
+ {
+ u64 key = ((u64)(vnet_buffer(b1)->gre.dst) << 32) |
+ (u64)(vnet_buffer(b1)->gre.src);
+
+ if (cached_tunnel_key != key)
+ {
+ vnet_hw_interface_t * hi;
+ ipsec_gre_tunnel_t * t;
+ uword * p;
+
+ p = hash_get (igm->tunnel_by_key, key);
+ if (!p)
+ {
+ next1 = IPSEC_GRE_INPUT_NEXT_DROP;
+ b1->error = node->errors[IPSEC_GRE_ERROR_NO_SUCH_TUNNEL];
+ goto drop1;
+ }
+ t = pool_elt_at_index (igm->tunnels, p[0]);
+ hi = vnet_get_hw_interface (igm->vnet_main,
+ t->hw_if_index);
+ tunnel_sw_if_index = hi->sw_if_index;
+ cached_tunnel_sw_if_index = tunnel_sw_if_index;
+ }
+ else
+ {
+ tunnel_sw_if_index = cached_tunnel_sw_if_index;
+ }
+ vnet_buffer(b1)->sw_if_index[VLIB_RX] = tunnel_sw_if_index;
+ }
+
+drop1:
+ if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ ipsec_gre_rx_trace_t *tr = vlib_add_trace (vm, node,
+ b0, sizeof (*tr));
+ tr->tunnel_id = ~0;
+ tr->length = ip0->length;
+ tr->src.as_u32 = ip0->src_address.as_u32;
+ tr->dst.as_u32 = ip0->dst_address.as_u32;
+ }
+
+ if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ ipsec_gre_rx_trace_t *tr = vlib_add_trace (vm, node,
+ b1, sizeof (*tr));
+ tr->tunnel_id = ~0;
+ tr->length = ip1->length;
+ tr->src.as_u32 = ip1->src_address.as_u32;
+ tr->dst.as_u32 = ip1->dst_address.as_u32;
+ }
+
+ vlib_buffer_advance (b0, sizeof (*h0));
+ vlib_buffer_advance (b1, sizeof (*h1));
+
+ vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, bi1, next0, next1);
+ }
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 bi0;
+ vlib_buffer_t * b0;
+ gre_header_t * h0;
+ ip4_header_t * ip0;
+ u16 version0, protocol0;
+ int verr0;
+ u32 next0;
+
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ ip0 = vlib_buffer_get_current (b0);
+
+ vnet_buffer(b0)->gre.src = ip0->src_address.as_u32;
+ vnet_buffer(b0)->gre.dst = ip0->dst_address.as_u32;
+
+ vlib_buffer_advance (b0, sizeof (*ip0));
+
+ h0 = vlib_buffer_get_current (b0);
+
+ protocol0 = clib_net_to_host_u16 (h0->protocol);
+ if (PREDICT_TRUE(protocol0 == 0x0001))
+ {
+ next0 = IPSEC_GRE_INPUT_NEXT_L2_INPUT;
+ b0->error = node->errors[IPSEC_GRE_ERROR_NONE];
+ }
+ else
+ {
+ clib_warning("unknown GRE protocol: %d", protocol0);
+ b0->error = node->errors[IPSEC_GRE_ERROR_UNKNOWN_PROTOCOL];
+ next0 = IPSEC_GRE_INPUT_NEXT_DROP;
+ }
+
+ version0 = clib_net_to_host_u16 (h0->flags_and_version);
+ verr0 = version0 & GRE_VERSION_MASK;
+ b0->error = verr0 ? node->errors[IPSEC_GRE_ERROR_UNSUPPORTED_VERSION]
+ : b0->error;
+ next0 = verr0 ? IPSEC_GRE_INPUT_NEXT_DROP : next0;
+
+ /* For L2 payload set input sw_if_index to GRE tunnel for learning */
+ if (PREDICT_FALSE(next0 == IPSEC_GRE_INPUT_NEXT_L2_INPUT))
+ {
+ u64 key = ((u64)(vnet_buffer(b0)->gre.dst) << 32) |
+ (u64)(vnet_buffer(b0)->gre.src);
+
+ if (cached_tunnel_key != key)
+ {
+ vnet_hw_interface_t * hi;
+ ipsec_gre_tunnel_t * t;
+ uword * p;
+
+ p = hash_get (igm->tunnel_by_key, key);
+ if (!p)
+ {
+ next0 = IPSEC_GRE_INPUT_NEXT_DROP;
+ b0->error = node->errors[IPSEC_GRE_ERROR_NO_SUCH_TUNNEL];
+ goto drop;
+ }
+ t = pool_elt_at_index (igm->tunnels, p[0]);
+ hi = vnet_get_hw_interface (igm->vnet_main,
+ t->hw_if_index);
+ tunnel_sw_if_index = hi->sw_if_index;
+ cached_tunnel_sw_if_index = tunnel_sw_if_index;
+ }
+ else
+ {
+ tunnel_sw_if_index = cached_tunnel_sw_if_index;
+ }
+ vnet_buffer(b0)->sw_if_index[VLIB_RX] = tunnel_sw_if_index;
+ }
+
+drop:
+ if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ ipsec_gre_rx_trace_t *tr = vlib_add_trace (vm, node,
+ b0, sizeof (*tr));
+ tr->tunnel_id = ~0;
+ tr->length = ip0->length;
+ tr->src.as_u32 = ip0->src_address.as_u32;
+ tr->dst.as_u32 = ip0->dst_address.as_u32;
+ }
+
+ vlib_buffer_advance (b0, sizeof (*h0));
+
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, next0);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+ vlib_node_increment_counter (vm, ipsec_gre_input_node.index,
+ IPSEC_GRE_ERROR_PKTS_DECAP, from_frame->n_vectors);
+ return from_frame->n_vectors;
+}
+
+static char * ipsec_gre_error_strings[] = {
+#define ipsec_gre_error(n,s) s,
+#include "error.def"
+#undef ipsec_gre_error
+};
+
+VLIB_REGISTER_NODE (ipsec_gre_input_node) = {
+ .function = ipsec_gre_input,
+ .name = "ipsec-gre-input",
+ /* Takes a vector of packets. */
+ .vector_size = sizeof (u32),
+
+ .n_errors = IPSEC_GRE_N_ERROR,
+ .error_strings = ipsec_gre_error_strings,
+
+ .n_next_nodes = IPSEC_GRE_INPUT_N_NEXT,
+ .next_nodes = {
+#define _(s,n) [IPSEC_GRE_INPUT_NEXT_##s] = n,
+ foreach_ipsec_gre_input_next
+#undef _
+ },
+
+ .format_trace = format_ipsec_gre_rx_trace,
+};
+
+VLIB_NODE_FUNCTION_MULTIARCH (ipsec_gre_input_node, ipsec_gre_input)
+
+static clib_error_t * ipsec_gre_input_init (vlib_main_t * vm)
+{
+ {
+ clib_error_t * error;
+ error = vlib_call_init_function (vm, ipsec_gre_init);
+ if (error)
+ clib_error_report (error);
+ }
+
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (ipsec_gre_input_init);
diff --git a/src/vnet/ipsec/esp.h b/src/vnet/ipsec/esp.h
new file mode 100644
index 00000000000..50cac806d14
--- /dev/null
+++ b/src/vnet/ipsec/esp.h
@@ -0,0 +1,320 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef __ESP_H__
+#define __ESP_H__
+
+#include <openssl/hmac.h>
+#include <openssl/rand.h>
+#include <openssl/evp.h>
+
+typedef struct
+{
+ u32 spi;
+ u32 seq;
+ u8 data[0];
+} esp_header_t;
+
+typedef struct
+{
+ u8 pad_length;
+ u8 next_header;
+} esp_footer_t;
+
+/* *INDENT-OFF* */
+typedef CLIB_PACKED (struct {
+ ip4_header_t ip4;
+ esp_header_t esp;
+}) ip4_and_esp_header_t;
+/* *INDENT-ON* */
+
+/* *INDENT-OFF* */
+typedef CLIB_PACKED (struct {
+ ip6_header_t ip6;
+ esp_header_t esp;
+}) ip6_and_esp_header_t;
+/* *INDENT-ON* */
+
+typedef struct
+{
+ const EVP_CIPHER *type;
+} esp_crypto_alg_t;
+
+typedef struct
+{
+ const EVP_MD *md;
+ u8 trunc_size;
+} esp_integ_alg_t;
+
+typedef struct
+{
+ CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
+ EVP_CIPHER_CTX encrypt_ctx;
+ CLIB_CACHE_LINE_ALIGN_MARK (cacheline1);
+ EVP_CIPHER_CTX decrypt_ctx;
+ CLIB_CACHE_LINE_ALIGN_MARK (cacheline2);
+ HMAC_CTX hmac_ctx;
+ ipsec_crypto_alg_t last_encrypt_alg;
+ ipsec_crypto_alg_t last_decrypt_alg;
+ ipsec_integ_alg_t last_integ_alg;
+} esp_main_per_thread_data_t;
+
+typedef struct
+{
+ esp_crypto_alg_t *esp_crypto_algs;
+ esp_integ_alg_t *esp_integ_algs;
+ esp_main_per_thread_data_t *per_thread_data;
+} esp_main_t;
+
+esp_main_t esp_main;
+
+#define ESP_WINDOW_SIZE (64)
+#define ESP_SEQ_MAX (4294967295UL)
+
+
+always_inline int
+esp_replay_check (ipsec_sa_t * sa, u32 seq)
+{
+ u32 diff;
+
+ if (PREDICT_TRUE (seq > sa->last_seq))
+ return 0;
+
+ diff = sa->last_seq - seq;
+
+ if (ESP_WINDOW_SIZE > diff)
+ return (sa->replay_window & (1ULL << diff)) ? 1 : 0;
+ else
+ return 1;
+
+ return 0;
+}
+
+always_inline int
+esp_replay_check_esn (ipsec_sa_t * sa, u32 seq)
+{
+ u32 tl = sa->last_seq;
+ u32 th = sa->last_seq_hi;
+ u32 diff = tl - seq;
+
+ if (PREDICT_TRUE (tl >= (ESP_WINDOW_SIZE - 1)))
+ {
+ if (seq >= (tl - ESP_WINDOW_SIZE + 1))
+ {
+ sa->seq_hi = th;
+ if (seq <= tl)
+ return (sa->replay_window & (1ULL << diff)) ? 1 : 0;
+ else
+ return 0;
+ }
+ else
+ {
+ sa->seq_hi = th + 1;
+ return 0;
+ }
+ }
+ else
+ {
+ if (seq >= (tl - ESP_WINDOW_SIZE + 1))
+ {
+ sa->seq_hi = th - 1;
+ return (sa->replay_window & (1ULL << diff)) ? 1 : 0;
+ }
+ else
+ {
+ sa->seq_hi = th;
+ if (seq <= tl)
+ return (sa->replay_window & (1ULL << diff)) ? 1 : 0;
+ else
+ return 0;
+ }
+ }
+
+ return 0;
+}
+
+/* TODO seq increment should be atomic to be accessed by multiple workers */
+always_inline void
+esp_replay_advance (ipsec_sa_t * sa, u32 seq)
+{
+ u32 pos;
+
+ if (seq > sa->last_seq)
+ {
+ pos = seq - sa->last_seq;
+ if (pos < ESP_WINDOW_SIZE)
+ sa->replay_window = ((sa->replay_window) << pos) | 1;
+ else
+ sa->replay_window = 1;
+ sa->last_seq = seq;
+ }
+ else
+ {
+ pos = sa->last_seq - seq;
+ sa->replay_window |= (1ULL << pos);
+ }
+}
+
+always_inline void
+esp_replay_advance_esn (ipsec_sa_t * sa, u32 seq)
+{
+ int wrap = sa->seq_hi - sa->last_seq_hi;
+ u32 pos;
+
+ if (wrap == 0 && seq > sa->last_seq)
+ {
+ pos = seq - sa->last_seq;
+ if (pos < ESP_WINDOW_SIZE)
+ sa->replay_window = ((sa->replay_window) << pos) | 1;
+ else
+ sa->replay_window = 1;
+ sa->last_seq = seq;
+ }
+ else if (wrap > 0)
+ {
+ pos = ~seq + sa->last_seq + 1;
+ if (pos < ESP_WINDOW_SIZE)
+ sa->replay_window = ((sa->replay_window) << pos) | 1;
+ else
+ sa->replay_window = 1;
+ sa->last_seq = seq;
+ sa->last_seq_hi = sa->seq_hi;
+ }
+ else if (wrap < 0)
+ {
+ pos = ~seq + sa->last_seq + 1;
+ sa->replay_window |= (1ULL << pos);
+ }
+ else
+ {
+ pos = sa->last_seq - seq;
+ sa->replay_window |= (1ULL << pos);
+ }
+}
+
+always_inline int
+esp_seq_advance (ipsec_sa_t * sa)
+{
+ if (PREDICT_TRUE (sa->use_esn))
+ {
+ if (PREDICT_FALSE (sa->seq == ESP_SEQ_MAX))
+ {
+ if (PREDICT_FALSE
+ (sa->use_anti_replay && sa->seq_hi == ESP_SEQ_MAX))
+ return 1;
+ sa->seq_hi++;
+ }
+ sa->seq++;
+ }
+ else
+ {
+ if (PREDICT_FALSE (sa->use_anti_replay && sa->seq == ESP_SEQ_MAX))
+ return 1;
+ sa->seq++;
+ }
+
+ return 0;
+}
+
+always_inline void
+esp_init ()
+{
+ esp_main_t *em = &esp_main;
+ vlib_thread_main_t *tm = vlib_get_thread_main ();
+
+ memset (em, 0, sizeof (em[0]));
+
+ vec_validate (em->esp_crypto_algs, IPSEC_CRYPTO_N_ALG - 1);
+ em->esp_crypto_algs[IPSEC_CRYPTO_ALG_AES_CBC_128].type = EVP_aes_128_cbc ();
+ em->esp_crypto_algs[IPSEC_CRYPTO_ALG_AES_CBC_192].type = EVP_aes_192_cbc ();
+ em->esp_crypto_algs[IPSEC_CRYPTO_ALG_AES_CBC_256].type = EVP_aes_256_cbc ();
+
+ vec_validate (em->esp_integ_algs, IPSEC_INTEG_N_ALG - 1);
+ esp_integ_alg_t *i;
+
+ i = &em->esp_integ_algs[IPSEC_INTEG_ALG_SHA1_96];
+ i->md = EVP_sha1 ();
+ i->trunc_size = 12;
+
+ i = &em->esp_integ_algs[IPSEC_INTEG_ALG_SHA_256_96];
+ i->md = EVP_sha256 ();
+ i->trunc_size = 12;
+
+ i = &em->esp_integ_algs[IPSEC_INTEG_ALG_SHA_256_128];
+ i->md = EVP_sha256 ();
+ i->trunc_size = 16;
+
+ i = &em->esp_integ_algs[IPSEC_INTEG_ALG_SHA_384_192];
+ i->md = EVP_sha384 ();
+ i->trunc_size = 24;
+
+ i = &em->esp_integ_algs[IPSEC_INTEG_ALG_SHA_512_256];
+ i->md = EVP_sha512 ();
+ i->trunc_size = 32;
+
+ vec_validate_aligned (em->per_thread_data, tm->n_vlib_mains - 1,
+ CLIB_CACHE_LINE_BYTES);
+ int thread_id;
+
+ for (thread_id = 0; thread_id < tm->n_vlib_mains - 1; thread_id++)
+ {
+ EVP_CIPHER_CTX_init (&(em->per_thread_data[thread_id].encrypt_ctx));
+ EVP_CIPHER_CTX_init (&(em->per_thread_data[thread_id].decrypt_ctx));
+ HMAC_CTX_init (&(em->per_thread_data[thread_id].hmac_ctx));
+ }
+}
+
+always_inline unsigned int
+hmac_calc (ipsec_integ_alg_t alg,
+ u8 * key,
+ int key_len,
+ u8 * data, int data_len, u8 * signature, u8 use_esn, u32 seq_hi)
+{
+ esp_main_t *em = &esp_main;
+ u32 cpu_index = os_get_cpu_number ();
+ HMAC_CTX *ctx = &(em->per_thread_data[cpu_index].hmac_ctx);
+ const EVP_MD *md = NULL;
+ unsigned int len;
+
+ ASSERT (alg < IPSEC_INTEG_N_ALG);
+
+ if (PREDICT_FALSE (em->esp_integ_algs[alg].md == 0))
+ return 0;
+
+ if (PREDICT_FALSE (alg != em->per_thread_data[cpu_index].last_integ_alg))
+ {
+ md = em->esp_integ_algs[alg].md;
+ em->per_thread_data[cpu_index].last_integ_alg = alg;
+ }
+
+ HMAC_Init (ctx, key, key_len, md);
+
+ HMAC_Update (ctx, data, data_len);
+
+ if (PREDICT_TRUE (use_esn))
+ HMAC_Update (ctx, (u8 *) & seq_hi, sizeof (seq_hi));
+ HMAC_Final (ctx, signature, &len);
+
+ return em->esp_integ_algs[alg].trunc_size;
+}
+
+#endif /* __ESP_H__ */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/ipsec/esp_decrypt.c b/src/vnet/ipsec/esp_decrypt.c
new file mode 100644
index 00000000000..e69cd85101a
--- /dev/null
+++ b/src/vnet/ipsec/esp_decrypt.c
@@ -0,0 +1,430 @@
+/*
+ * esp_decrypt.c : IPSec ESP decrypt node
+ *
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/vnet.h>
+#include <vnet/api_errno.h>
+#include <vnet/ip/ip.h>
+
+#include <vnet/ipsec/ipsec.h>
+#include <vnet/ipsec/esp.h>
+
+#define foreach_esp_decrypt_next \
+_(DROP, "error-drop") \
+_(IP4_INPUT, "ip4-input") \
+_(IP6_INPUT, "ip6-input") \
+_(IPSEC_GRE_INPUT, "ipsec-gre-input")
+
+#define _(v, s) ESP_DECRYPT_NEXT_##v,
+typedef enum
+{
+ foreach_esp_decrypt_next
+#undef _
+ ESP_DECRYPT_N_NEXT,
+} esp_decrypt_next_t;
+
+
+#define foreach_esp_decrypt_error \
+ _(RX_PKTS, "ESP pkts received") \
+ _(NO_BUFFER, "No buffer (packed dropped)") \
+ _(DECRYPTION_FAILED, "ESP decryption failed") \
+ _(INTEG_ERROR, "Integrity check failed") \
+ _(REPLAY, "SA replayed packet") \
+ _(NOT_IP, "Not IP packet (dropped)")
+
+
+typedef enum
+{
+#define _(sym,str) ESP_DECRYPT_ERROR_##sym,
+ foreach_esp_decrypt_error
+#undef _
+ ESP_DECRYPT_N_ERROR,
+} esp_decrypt_error_t;
+
+static char *esp_decrypt_error_strings[] = {
+#define _(sym,string) string,
+ foreach_esp_decrypt_error
+#undef _
+};
+
+typedef struct
+{
+ ipsec_crypto_alg_t crypto_alg;
+ ipsec_integ_alg_t integ_alg;
+} esp_decrypt_trace_t;
+
+/* packet trace format function */
+static u8 *
+format_esp_decrypt_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ esp_decrypt_trace_t *t = va_arg (*args, esp_decrypt_trace_t *);
+
+ s = format (s, "esp: crypto %U integrity %U",
+ format_ipsec_crypto_alg, t->crypto_alg,
+ format_ipsec_integ_alg, t->integ_alg);
+ return s;
+}
+
+always_inline void
+esp_decrypt_aes_cbc (ipsec_crypto_alg_t alg,
+ u8 * in, u8 * out, size_t in_len, u8 * key, u8 * iv)
+{
+ esp_main_t *em = &esp_main;
+ u32 cpu_index = os_get_cpu_number ();
+ EVP_CIPHER_CTX *ctx = &(em->per_thread_data[cpu_index].decrypt_ctx);
+ const EVP_CIPHER *cipher = NULL;
+ int out_len;
+
+ ASSERT (alg < IPSEC_CRYPTO_N_ALG);
+
+ if (PREDICT_FALSE (em->esp_crypto_algs[alg].type == 0))
+ return;
+
+ if (PREDICT_FALSE (alg != em->per_thread_data[cpu_index].last_decrypt_alg))
+ {
+ cipher = em->esp_crypto_algs[alg].type;
+ em->per_thread_data[cpu_index].last_decrypt_alg = alg;
+ }
+
+ EVP_DecryptInit_ex (ctx, cipher, NULL, key, iv);
+
+ EVP_DecryptUpdate (ctx, out, &out_len, in, in_len);
+ EVP_DecryptFinal_ex (ctx, out + out_len, &out_len);
+}
+
+static uword
+esp_decrypt_node_fn (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * from_frame)
+{
+ u32 n_left_from, *from, next_index, *to_next;
+ ipsec_main_t *im = &ipsec_main;
+ esp_main_t *em = &esp_main;
+ u32 *recycle = 0;
+ from = vlib_frame_vector_args (from_frame);
+ n_left_from = from_frame->n_vectors;
+ u32 cpu_index = os_get_cpu_number ();
+
+ ipsec_alloc_empty_buffers (vm, im);
+
+ u32 *empty_buffers = im->empty_buffers[cpu_index];
+
+ if (PREDICT_FALSE (vec_len (empty_buffers) < n_left_from))
+ {
+ vlib_node_increment_counter (vm, esp_decrypt_node.index,
+ ESP_DECRYPT_ERROR_NO_BUFFER, n_left_from);
+ goto free_buffers_and_exit;
+ }
+
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 i_bi0, o_bi0 = (u32) ~ 0, next0;
+ vlib_buffer_t *i_b0;
+ vlib_buffer_t *o_b0 = 0;
+ esp_header_t *esp0;
+ ipsec_sa_t *sa0;
+ u32 sa_index0 = ~0;
+ u32 seq;
+ ip4_header_t *ih4 = 0, *oh4 = 0;
+ ip6_header_t *ih6 = 0, *oh6 = 0;
+ u8 tunnel_mode = 1;
+ u8 transport_ip6 = 0;
+
+
+ i_bi0 = from[0];
+ from += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ next0 = ESP_DECRYPT_NEXT_DROP;
+
+ i_b0 = vlib_get_buffer (vm, i_bi0);
+ esp0 = vlib_buffer_get_current (i_b0);
+
+ sa_index0 = vnet_buffer (i_b0)->ipsec.sad_index;
+ sa0 = pool_elt_at_index (im->sad, sa_index0);
+
+ seq = clib_host_to_net_u32 (esp0->seq);
+
+ /* anti-replay check */
+ if (sa0->use_anti_replay)
+ {
+ int rv = 0;
+
+ if (PREDICT_TRUE (sa0->use_esn))
+ rv = esp_replay_check_esn (sa0, seq);
+ else
+ rv = esp_replay_check (sa0, seq);
+
+ if (PREDICT_FALSE (rv))
+ {
+ clib_warning ("anti-replay SPI %u seq %u", sa0->spi, seq);
+ vlib_node_increment_counter (vm, esp_decrypt_node.index,
+ ESP_DECRYPT_ERROR_REPLAY, 1);
+ o_bi0 = i_bi0;
+ to_next[0] = o_bi0;
+ to_next += 1;
+ goto trace;
+ }
+ }
+
+ if (PREDICT_TRUE (sa0->integ_alg != IPSEC_INTEG_ALG_NONE))
+ {
+ u8 sig[64];
+ int icv_size = em->esp_integ_algs[sa0->integ_alg].trunc_size;
+ memset (sig, 0, sizeof (sig));
+ u8 *icv =
+ vlib_buffer_get_current (i_b0) + i_b0->current_length -
+ icv_size;
+ i_b0->current_length -= icv_size;
+
+ hmac_calc (sa0->integ_alg, sa0->integ_key, sa0->integ_key_len,
+ (u8 *) esp0, i_b0->current_length, sig, sa0->use_esn,
+ sa0->seq_hi);
+
+ if (PREDICT_FALSE (memcmp (icv, sig, icv_size)))
+ {
+ vlib_node_increment_counter (vm, esp_decrypt_node.index,
+ ESP_DECRYPT_ERROR_INTEG_ERROR,
+ 1);
+ o_bi0 = i_bi0;
+ to_next[0] = o_bi0;
+ to_next += 1;
+ goto trace;
+ }
+ }
+
+ if (PREDICT_TRUE (sa0->use_anti_replay))
+ {
+ if (PREDICT_TRUE (sa0->use_esn))
+ esp_replay_advance_esn (sa0, seq);
+ else
+ esp_replay_advance (sa0, seq);
+ }
+
+ /* grab free buffer */
+ uword last_empty_buffer = vec_len (empty_buffers) - 1;
+ o_bi0 = empty_buffers[last_empty_buffer];
+ to_next[0] = o_bi0;
+ to_next += 1;
+ o_b0 = vlib_get_buffer (vm, o_bi0);
+ vlib_prefetch_buffer_with_index (vm,
+ empty_buffers[last_empty_buffer -
+ 1], STORE);
+ _vec_len (empty_buffers) = last_empty_buffer;
+
+ /* add old buffer to the recycle list */
+ vec_add1 (recycle, i_bi0);
+
+ if (sa0->crypto_alg >= IPSEC_CRYPTO_ALG_AES_CBC_128 &&
+ sa0->crypto_alg <= IPSEC_CRYPTO_ALG_AES_CBC_256)
+ {
+ const int BLOCK_SIZE = 16;
+ const int IV_SIZE = 16;
+ esp_footer_t *f0;
+ u8 ip_hdr_size = 0;
+
+ int blocks =
+ (i_b0->current_length - sizeof (esp_header_t) -
+ IV_SIZE) / BLOCK_SIZE;
+
+ o_b0->current_data = sizeof (ethernet_header_t);
+
+ /* transport mode */
+ if (PREDICT_FALSE (!sa0->is_tunnel && !sa0->is_tunnel_ip6))
+ {
+ tunnel_mode = 0;
+ ih4 =
+ (ip4_header_t *) (i_b0->data +
+ sizeof (ethernet_header_t));
+ if (PREDICT_TRUE
+ ((ih4->ip_version_and_header_length & 0xF0) != 0x40))
+ {
+ if (PREDICT_TRUE
+ ((ih4->ip_version_and_header_length & 0xF0) ==
+ 0x60))
+ {
+ transport_ip6 = 1;
+ ip_hdr_size = sizeof (ip6_header_t);
+ ih6 =
+ (ip6_header_t *) (i_b0->data +
+ sizeof (ethernet_header_t));
+ oh6 = vlib_buffer_get_current (o_b0);
+ }
+ else
+ {
+ vlib_node_increment_counter (vm,
+ esp_decrypt_node.index,
+ ESP_DECRYPT_ERROR_NOT_IP,
+ 1);
+ o_b0 = 0;
+ goto trace;
+ }
+ }
+ else
+ {
+ oh4 = vlib_buffer_get_current (o_b0);
+ ip_hdr_size = sizeof (ip4_header_t);
+ }
+ }
+
+ esp_decrypt_aes_cbc (sa0->crypto_alg,
+ esp0->data + IV_SIZE,
+ (u8 *) vlib_buffer_get_current (o_b0) +
+ ip_hdr_size, BLOCK_SIZE * blocks,
+ sa0->crypto_key, esp0->data);
+
+ o_b0->current_length = (blocks * 16) - 2 + ip_hdr_size;
+ o_b0->flags = VLIB_BUFFER_TOTAL_LENGTH_VALID;
+ f0 =
+ (esp_footer_t *) ((u8 *) vlib_buffer_get_current (o_b0) +
+ o_b0->current_length);
+ o_b0->current_length -= f0->pad_length;
+
+ /* tunnel mode */
+ if (PREDICT_TRUE (tunnel_mode))
+ {
+ if (PREDICT_TRUE (f0->next_header == IP_PROTOCOL_IP_IN_IP))
+ {
+ next0 = ESP_DECRYPT_NEXT_IP4_INPUT;
+ oh4 = vlib_buffer_get_current (o_b0);
+ }
+ else if (f0->next_header == IP_PROTOCOL_IPV6)
+ next0 = ESP_DECRYPT_NEXT_IP6_INPUT;
+ else
+ {
+ clib_warning ("next header: 0x%x", f0->next_header);
+ vlib_node_increment_counter (vm, esp_decrypt_node.index,
+ ESP_DECRYPT_ERROR_DECRYPTION_FAILED,
+ 1);
+ o_b0 = 0;
+ goto trace;
+ }
+ }
+ /* transport mode */
+ else
+ {
+ if (PREDICT_FALSE (transport_ip6))
+ {
+ next0 = ESP_DECRYPT_NEXT_IP6_INPUT;
+ oh6->ip_version_traffic_class_and_flow_label =
+ ih6->ip_version_traffic_class_and_flow_label;
+ oh6->protocol = f0->next_header;
+ oh6->hop_limit = ih6->hop_limit;
+ oh6->src_address.as_u64[0] = ih6->src_address.as_u64[0];
+ oh6->src_address.as_u64[1] = ih6->src_address.as_u64[1];
+ oh6->dst_address.as_u64[0] = ih6->dst_address.as_u64[0];
+ oh6->dst_address.as_u64[1] = ih6->dst_address.as_u64[1];
+ oh6->payload_length =
+ clib_host_to_net_u16 (vlib_buffer_length_in_chain
+ (vm,
+ o_b0) - sizeof (ip6_header_t));
+ }
+ else
+ {
+ next0 = ESP_DECRYPT_NEXT_IP4_INPUT;
+ oh4->ip_version_and_header_length = 0x45;
+ oh4->tos = ih4->tos;
+ oh4->fragment_id = 0;
+ oh4->flags_and_fragment_offset = 0;
+ oh4->ttl = ih4->ttl;
+ oh4->protocol = f0->next_header;
+ oh4->src_address.as_u32 = ih4->src_address.as_u32;
+ oh4->dst_address.as_u32 = ih4->dst_address.as_u32;
+ oh4->length =
+ clib_host_to_net_u16 (vlib_buffer_length_in_chain
+ (vm, o_b0));
+ oh4->checksum = ip4_header_checksum (oh4);
+ }
+ }
+
+ /* for IPSec-GRE tunnel next node is ipsec-gre-input */
+ if (PREDICT_FALSE
+ ((vnet_buffer (i_b0)->ipsec.flags) &
+ IPSEC_FLAG_IPSEC_GRE_TUNNEL))
+ next0 = ESP_DECRYPT_NEXT_IPSEC_GRE_INPUT;
+
+ vnet_buffer (o_b0)->sw_if_index[VLIB_TX] = (u32) ~ 0;
+ }
+
+ trace:
+ if (PREDICT_FALSE (i_b0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ if (o_b0)
+ {
+ o_b0->flags |= VLIB_BUFFER_IS_TRACED;
+ o_b0->trace_index = i_b0->trace_index;
+ esp_decrypt_trace_t *tr =
+ vlib_add_trace (vm, node, o_b0, sizeof (*tr));
+ tr->crypto_alg = sa0->crypto_alg;
+ tr->integ_alg = sa0->integ_alg;
+ }
+ }
+
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
+ n_left_to_next, o_bi0, next0);
+ }
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+ vlib_node_increment_counter (vm, esp_decrypt_node.index,
+ ESP_DECRYPT_ERROR_RX_PKTS,
+ from_frame->n_vectors);
+
+free_buffers_and_exit:
+ if (recycle)
+ vlib_buffer_free (vm, recycle, vec_len (recycle));
+ vec_free (recycle);
+ return from_frame->n_vectors;
+}
+
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (esp_decrypt_node) = {
+ .function = esp_decrypt_node_fn,
+ .name = "esp-decrypt",
+ .vector_size = sizeof (u32),
+ .format_trace = format_esp_decrypt_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_errors = ARRAY_LEN(esp_decrypt_error_strings),
+ .error_strings = esp_decrypt_error_strings,
+
+ .n_next_nodes = ESP_DECRYPT_N_NEXT,
+ .next_nodes = {
+#define _(s,n) [ESP_DECRYPT_NEXT_##s] = n,
+ foreach_esp_decrypt_next
+#undef _
+ },
+};
+/* *INDENT-ON* */
+
+VLIB_NODE_FUNCTION_MULTIARCH (esp_decrypt_node, esp_decrypt_node_fn)
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/ipsec/esp_encrypt.c b/src/vnet/ipsec/esp_encrypt.c
new file mode 100644
index 00000000000..7b7f9b9c4c7
--- /dev/null
+++ b/src/vnet/ipsec/esp_encrypt.c
@@ -0,0 +1,425 @@
+/*
+ * esp_encrypt.c : IPSec ESP encrypt node
+ *
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/vnet.h>
+#include <vnet/api_errno.h>
+#include <vnet/ip/ip.h>
+
+#include <vnet/ipsec/ipsec.h>
+#include <vnet/ipsec/esp.h>
+
+
+#define foreach_esp_encrypt_next \
+_(DROP, "error-drop") \
+_(IP4_LOOKUP, "ip4-lookup") \
+_(IP6_LOOKUP, "ip6-lookup") \
+_(INTERFACE_OUTPUT, "interface-output")
+
+#define _(v, s) ESP_ENCRYPT_NEXT_##v,
+typedef enum
+{
+ foreach_esp_encrypt_next
+#undef _
+ ESP_ENCRYPT_N_NEXT,
+} esp_encrypt_next_t;
+
+#define foreach_esp_encrypt_error \
+ _(RX_PKTS, "ESP pkts received") \
+ _(NO_BUFFER, "No buffer (packet dropped)") \
+ _(DECRYPTION_FAILED, "ESP encryption failed") \
+ _(SEQ_CYCLED, "sequence number cycled")
+
+
+typedef enum
+{
+#define _(sym,str) ESP_ENCRYPT_ERROR_##sym,
+ foreach_esp_encrypt_error
+#undef _
+ ESP_ENCRYPT_N_ERROR,
+} esp_encrypt_error_t;
+
+static char *esp_encrypt_error_strings[] = {
+#define _(sym,string) string,
+ foreach_esp_encrypt_error
+#undef _
+};
+
+vlib_node_registration_t esp_encrypt_node;
+
+typedef struct
+{
+ u32 spi;
+ u32 seq;
+ ipsec_crypto_alg_t crypto_alg;
+ ipsec_integ_alg_t integ_alg;
+} esp_encrypt_trace_t;
+
+/* packet trace format function */
+static u8 *
+format_esp_encrypt_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ esp_encrypt_trace_t *t = va_arg (*args, esp_encrypt_trace_t *);
+
+ s = format (s, "esp: spi %u seq %u crypto %U integrity %U",
+ t->spi, t->seq,
+ format_ipsec_crypto_alg, t->crypto_alg,
+ format_ipsec_integ_alg, t->integ_alg);
+ return s;
+}
+
+always_inline void
+esp_encrypt_aes_cbc (ipsec_crypto_alg_t alg,
+ u8 * in, u8 * out, size_t in_len, u8 * key, u8 * iv)
+{
+ esp_main_t *em = &esp_main;
+ u32 cpu_index = os_get_cpu_number ();
+ EVP_CIPHER_CTX *ctx = &(em->per_thread_data[cpu_index].encrypt_ctx);
+ const EVP_CIPHER *cipher = NULL;
+ int out_len;
+
+ ASSERT (alg < IPSEC_CRYPTO_N_ALG);
+
+ if (PREDICT_FALSE (em->esp_crypto_algs[alg].type == IPSEC_CRYPTO_ALG_NONE))
+ return;
+
+ if (PREDICT_FALSE (alg != em->per_thread_data[cpu_index].last_encrypt_alg))
+ {
+ cipher = em->esp_crypto_algs[alg].type;
+ em->per_thread_data[cpu_index].last_encrypt_alg = alg;
+ }
+
+ EVP_EncryptInit_ex (ctx, cipher, NULL, key, iv);
+
+ EVP_EncryptUpdate (ctx, out, &out_len, in, in_len);
+ EVP_EncryptFinal_ex (ctx, out + out_len, &out_len);
+}
+
+static uword
+esp_encrypt_node_fn (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * from_frame)
+{
+ u32 n_left_from, *from, *to_next = 0, next_index;
+ from = vlib_frame_vector_args (from_frame);
+ n_left_from = from_frame->n_vectors;
+ ipsec_main_t *im = &ipsec_main;
+ u32 *recycle = 0;
+ u32 cpu_index = os_get_cpu_number ();
+
+ ipsec_alloc_empty_buffers (vm, im);
+
+ u32 *empty_buffers = im->empty_buffers[cpu_index];
+
+ if (PREDICT_FALSE (vec_len (empty_buffers) < n_left_from))
+ {
+ vlib_node_increment_counter (vm, esp_encrypt_node.index,
+ ESP_ENCRYPT_ERROR_NO_BUFFER, n_left_from);
+ clib_warning ("no enough empty buffers. discarding frame");
+ goto free_buffers_and_exit;
+ }
+
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 i_bi0, o_bi0, next0;
+ vlib_buffer_t *i_b0, *o_b0 = 0;
+ u32 sa_index0;
+ ipsec_sa_t *sa0;
+ ip4_and_esp_header_t *ih0, *oh0 = 0;
+ ip6_and_esp_header_t *ih6_0, *oh6_0 = 0;
+ uword last_empty_buffer;
+ esp_header_t *o_esp0;
+ esp_footer_t *f0;
+ u8 is_ipv6;
+ u8 ip_hdr_size;
+ u8 next_hdr_type;
+ u32 ip_proto = 0;
+ u8 transport_mode = 0;
+
+ i_bi0 = from[0];
+ from += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ next0 = ESP_ENCRYPT_NEXT_DROP;
+
+ i_b0 = vlib_get_buffer (vm, i_bi0);
+ sa_index0 = vnet_buffer (i_b0)->ipsec.sad_index;
+ sa0 = pool_elt_at_index (im->sad, sa_index0);
+
+ if (PREDICT_FALSE (esp_seq_advance (sa0)))
+ {
+ clib_warning ("sequence number counter has cycled SPI %u",
+ sa0->spi);
+ vlib_node_increment_counter (vm, esp_encrypt_node.index,
+ ESP_ENCRYPT_ERROR_SEQ_CYCLED, 1);
+ //TODO: rekey SA
+ o_bi0 = i_bi0;
+ to_next[0] = o_bi0;
+ to_next += 1;
+ goto trace;
+ }
+
+ /* grab free buffer */
+ last_empty_buffer = vec_len (empty_buffers) - 1;
+ o_bi0 = empty_buffers[last_empty_buffer];
+ o_b0 = vlib_get_buffer (vm, o_bi0);
+ o_b0->flags = VLIB_BUFFER_TOTAL_LENGTH_VALID;
+ o_b0->current_data = sizeof (ethernet_header_t);
+ ih0 = vlib_buffer_get_current (i_b0);
+ vlib_prefetch_buffer_with_index (vm,
+ empty_buffers[last_empty_buffer -
+ 1], STORE);
+ _vec_len (empty_buffers) = last_empty_buffer;
+ to_next[0] = o_bi0;
+ to_next += 1;
+
+ /* add old buffer to the recycle list */
+ vec_add1 (recycle, i_bi0);
+
+ /* is ipv6 */
+ if (PREDICT_FALSE
+ ((ih0->ip4.ip_version_and_header_length & 0xF0) == 0x60))
+ {
+ is_ipv6 = 1;
+ ih6_0 = vlib_buffer_get_current (i_b0);
+ ip_hdr_size = sizeof (ip6_header_t);
+ next_hdr_type = IP_PROTOCOL_IPV6;
+ oh6_0 = vlib_buffer_get_current (o_b0);
+ o_esp0 = vlib_buffer_get_current (o_b0) + sizeof (ip6_header_t);
+
+ oh6_0->ip6.ip_version_traffic_class_and_flow_label =
+ ih6_0->ip6.ip_version_traffic_class_and_flow_label;
+ oh6_0->ip6.protocol = IP_PROTOCOL_IPSEC_ESP;
+ oh6_0->ip6.hop_limit = 254;
+ oh6_0->ip6.src_address.as_u64[0] =
+ ih6_0->ip6.src_address.as_u64[0];
+ oh6_0->ip6.src_address.as_u64[1] =
+ ih6_0->ip6.src_address.as_u64[1];
+ oh6_0->ip6.dst_address.as_u64[0] =
+ ih6_0->ip6.dst_address.as_u64[0];
+ oh6_0->ip6.dst_address.as_u64[1] =
+ ih6_0->ip6.dst_address.as_u64[1];
+ oh6_0->esp.spi = clib_net_to_host_u32 (sa0->spi);
+ oh6_0->esp.seq = clib_net_to_host_u32 (sa0->seq);
+ ip_proto = ih6_0->ip6.protocol;
+
+ next0 = ESP_ENCRYPT_NEXT_IP6_LOOKUP;
+ }
+ else
+ {
+ is_ipv6 = 0;
+ ip_hdr_size = sizeof (ip4_header_t);
+ next_hdr_type = IP_PROTOCOL_IP_IN_IP;
+ oh0 = vlib_buffer_get_current (o_b0);
+ o_esp0 = vlib_buffer_get_current (o_b0) + sizeof (ip4_header_t);
+
+ oh0->ip4.ip_version_and_header_length = 0x45;
+ oh0->ip4.tos = ih0->ip4.tos;
+ oh0->ip4.fragment_id = 0;
+ oh0->ip4.flags_and_fragment_offset = 0;
+ oh0->ip4.ttl = 254;
+ oh0->ip4.protocol = IP_PROTOCOL_IPSEC_ESP;
+ oh0->ip4.src_address.as_u32 = ih0->ip4.src_address.as_u32;
+ oh0->ip4.dst_address.as_u32 = ih0->ip4.dst_address.as_u32;
+ oh0->esp.spi = clib_net_to_host_u32 (sa0->spi);
+ oh0->esp.seq = clib_net_to_host_u32 (sa0->seq);
+ ip_proto = ih0->ip4.protocol;
+
+ next0 = ESP_ENCRYPT_NEXT_IP4_LOOKUP;
+ }
+
+ if (PREDICT_TRUE
+ (!is_ipv6 && sa0->is_tunnel && !sa0->is_tunnel_ip6))
+ {
+ oh0->ip4.src_address.as_u32 = sa0->tunnel_src_addr.ip4.as_u32;
+ oh0->ip4.dst_address.as_u32 = sa0->tunnel_dst_addr.ip4.as_u32;
+
+ vnet_buffer (o_b0)->sw_if_index[VLIB_TX] = (u32) ~ 0;
+ }
+ else if (is_ipv6 && sa0->is_tunnel && sa0->is_tunnel_ip6)
+ {
+ oh6_0->ip6.src_address.as_u64[0] =
+ sa0->tunnel_src_addr.ip6.as_u64[0];
+ oh6_0->ip6.src_address.as_u64[1] =
+ sa0->tunnel_src_addr.ip6.as_u64[1];
+ oh6_0->ip6.dst_address.as_u64[0] =
+ sa0->tunnel_dst_addr.ip6.as_u64[0];
+ oh6_0->ip6.dst_address.as_u64[1] =
+ sa0->tunnel_dst_addr.ip6.as_u64[1];
+
+ vnet_buffer (o_b0)->sw_if_index[VLIB_TX] = (u32) ~ 0;
+ }
+ else
+ {
+ next_hdr_type = ip_proto;
+ if (vnet_buffer (i_b0)->sw_if_index[VLIB_TX] != ~0)
+ {
+ transport_mode = 1;
+ ethernet_header_t *ieh0, *oeh0;
+ ieh0 =
+ (ethernet_header_t *) ((u8 *)
+ vlib_buffer_get_current (i_b0) -
+ sizeof (ethernet_header_t));
+ oeh0 = (ethernet_header_t *) o_b0->data;
+ clib_memcpy (oeh0, ieh0, sizeof (ethernet_header_t));
+ next0 = ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT;
+ vnet_buffer (o_b0)->sw_if_index[VLIB_TX] =
+ vnet_buffer (i_b0)->sw_if_index[VLIB_TX];
+ }
+ vlib_buffer_advance (i_b0, ip_hdr_size);
+ }
+
+ ASSERT (sa0->crypto_alg < IPSEC_CRYPTO_N_ALG);
+
+ if (PREDICT_TRUE (sa0->crypto_alg != IPSEC_CRYPTO_ALG_NONE))
+ {
+
+ const int BLOCK_SIZE = 16;
+ const int IV_SIZE = 16;
+ int blocks = 1 + (i_b0->current_length + 1) / BLOCK_SIZE;
+
+ /* pad packet in input buffer */
+ u8 pad_bytes = BLOCK_SIZE * blocks - 2 - i_b0->current_length;
+ u8 i;
+ u8 *padding =
+ vlib_buffer_get_current (i_b0) + i_b0->current_length;
+ i_b0->current_length = BLOCK_SIZE * blocks;
+ for (i = 0; i < pad_bytes; ++i)
+ {
+ padding[i] = i + 1;
+ }
+ f0 = vlib_buffer_get_current (i_b0) + i_b0->current_length - 2;
+ f0->pad_length = pad_bytes;
+ f0->next_header = next_hdr_type;
+
+ o_b0->current_length = ip_hdr_size + sizeof (esp_header_t) +
+ BLOCK_SIZE * blocks + IV_SIZE;
+
+ vnet_buffer (o_b0)->sw_if_index[VLIB_RX] =
+ vnet_buffer (i_b0)->sw_if_index[VLIB_RX];
+
+ u8 iv[16];
+ RAND_bytes (iv, sizeof (iv));
+
+ clib_memcpy ((u8 *) vlib_buffer_get_current (o_b0) +
+ ip_hdr_size + sizeof (esp_header_t), iv, 16);
+
+ esp_encrypt_aes_cbc (sa0->crypto_alg,
+ (u8 *) vlib_buffer_get_current (i_b0),
+ (u8 *) vlib_buffer_get_current (o_b0) +
+ ip_hdr_size + sizeof (esp_header_t) +
+ IV_SIZE, BLOCK_SIZE * blocks,
+ sa0->crypto_key, iv);
+ }
+
+ o_b0->current_length += hmac_calc (sa0->integ_alg, sa0->integ_key,
+ sa0->integ_key_len,
+ (u8 *) o_esp0,
+ o_b0->current_length -
+ ip_hdr_size,
+ vlib_buffer_get_current (o_b0) +
+ o_b0->current_length,
+ sa0->use_esn, sa0->seq_hi);
+
+
+ if (PREDICT_FALSE (is_ipv6))
+ {
+ oh6_0->ip6.payload_length =
+ clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, o_b0) -
+ sizeof (ip6_header_t));
+ }
+ else
+ {
+ oh0->ip4.length =
+ clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, o_b0));
+ oh0->ip4.checksum = ip4_header_checksum (&oh0->ip4);
+ }
+
+ if (transport_mode)
+ vlib_buffer_reset (o_b0);
+
+ trace:
+ if (PREDICT_FALSE (i_b0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ if (o_b0)
+ {
+ o_b0->flags |= VLIB_BUFFER_IS_TRACED;
+ o_b0->trace_index = i_b0->trace_index;
+ esp_encrypt_trace_t *tr =
+ vlib_add_trace (vm, node, o_b0, sizeof (*tr));
+ tr->spi = sa0->spi;
+ tr->seq = sa0->seq - 1;
+ tr->crypto_alg = sa0->crypto_alg;
+ tr->integ_alg = sa0->integ_alg;
+ }
+ }
+
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next, o_bi0,
+ next0);
+ }
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+ vlib_node_increment_counter (vm, esp_encrypt_node.index,
+ ESP_ENCRYPT_ERROR_RX_PKTS,
+ from_frame->n_vectors);
+
+free_buffers_and_exit:
+ if (recycle)
+ vlib_buffer_free (vm, recycle, vec_len (recycle));
+ vec_free (recycle);
+ return from_frame->n_vectors;
+}
+
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (esp_encrypt_node) = {
+ .function = esp_encrypt_node_fn,
+ .name = "esp-encrypt",
+ .vector_size = sizeof (u32),
+ .format_trace = format_esp_encrypt_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_errors = ARRAY_LEN(esp_encrypt_error_strings),
+ .error_strings = esp_encrypt_error_strings,
+
+ .n_next_nodes = ESP_ENCRYPT_N_NEXT,
+ .next_nodes = {
+#define _(s,n) [ESP_ENCRYPT_NEXT_##s] = n,
+ foreach_esp_encrypt_next
+#undef _
+ },
+};
+/* *INDENT-ON* */
+
+VLIB_NODE_FUNCTION_MULTIARCH (esp_encrypt_node, esp_encrypt_node_fn)
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/ipsec/ikev2.c b/src/vnet/ipsec/ikev2.c
new file mode 100644
index 00000000000..5a6c3674477
--- /dev/null
+++ b/src/vnet/ipsec/ikev2.c
@@ -0,0 +1,2186 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+#include <vnet/pg/pg.h>
+#include <vppinfra/error.h>
+#include <vnet/ip/udp.h>
+#include <vnet/ipsec/ipsec.h>
+#include <vnet/ipsec/ikev2.h>
+#include <vnet/ipsec/ikev2_priv.h>
+
+static int ikev2_delete_tunnel_interface (vnet_main_t * vnm,
+ ikev2_sa_t * sa,
+ ikev2_child_sa_t * child);
+
+#define ikev2_set_state(sa, v) do { \
+ (sa)->state = v; \
+ clib_warning("sa state changed to " #v); \
+ } while(0);
+
+typedef struct
+{
+ u32 next_index;
+ u32 sw_if_index;
+} ikev2_trace_t;
+
+static u8 *
+format_ikev2_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ ikev2_trace_t *t = va_arg (*args, ikev2_trace_t *);
+
+ s = format (s, "ikev2: sw_if_index %d, next index %d",
+ t->sw_if_index, t->next_index);
+ return s;
+}
+
+static vlib_node_registration_t ikev2_node;
+
+#define foreach_ikev2_error \
+_(PROCESSED, "IKEv2 packets processed") \
+_(IKE_SA_INIT_RETRANSMIT, "IKE_SA_INIT retransmit ") \
+_(IKE_SA_INIT_IGNORE, "IKE_SA_INIT ignore (IKE SA already auth)") \
+_(IKE_REQ_RETRANSMIT, "IKE request retransmit") \
+_(IKE_REQ_IGNORE, "IKE request ignore (old msgid)") \
+_(NOT_IKEV2, "Non IKEv2 packets received")
+
+typedef enum
+{
+#define _(sym,str) IKEV2_ERROR_##sym,
+ foreach_ikev2_error
+#undef _
+ IKEV2_N_ERROR,
+} ikev2_error_t;
+
+static char *ikev2_error_strings[] = {
+#define _(sym,string) string,
+ foreach_ikev2_error
+#undef _
+};
+
+typedef enum
+{
+ IKEV2_NEXT_IP4_LOOKUP,
+ IKEV2_NEXT_ERROR_DROP,
+ IKEV2_N_NEXT,
+} ikev2_next_t;
+
+static ikev2_sa_transform_t *
+ikev2_find_transform_data (ikev2_sa_transform_t * t)
+{
+ ikev2_main_t *km = &ikev2_main;
+ ikev2_sa_transform_t *td;
+
+ vec_foreach (td, km->supported_transforms)
+ {
+ if (td->type != t->type)
+ continue;
+
+ if (td->transform_id != t->transform_id)
+ continue;
+
+ if (td->type == IKEV2_TRANSFORM_TYPE_ENCR)
+ {
+ if (vec_len (t->attrs) != 4 || t->attrs[0] != 0x80
+ || t->attrs[1] != 14)
+ continue;
+
+ if (((t->attrs[2] << 8 | t->attrs[3]) / 8) != td->key_len)
+ continue;
+ }
+ return td;
+ }
+ return 0;
+}
+
+static ikev2_sa_proposal_t *
+ikev2_select_proposal (ikev2_sa_proposal_t * proposals,
+ ikev2_protocol_id_t prot_id)
+{
+ ikev2_sa_proposal_t *rv = 0;
+ ikev2_sa_proposal_t *proposal;
+ ikev2_sa_transform_t *transform, *new_t;
+ u8 mandatory_bitmap, optional_bitmap;
+
+ if (prot_id == IKEV2_PROTOCOL_IKE)
+ {
+ mandatory_bitmap = (1 << IKEV2_TRANSFORM_TYPE_ENCR) |
+ (1 << IKEV2_TRANSFORM_TYPE_PRF) |
+ (1 << IKEV2_TRANSFORM_TYPE_INTEG) | (1 << IKEV2_TRANSFORM_TYPE_DH);
+ optional_bitmap = mandatory_bitmap;
+ }
+ else if (prot_id == IKEV2_PROTOCOL_ESP)
+ {
+ mandatory_bitmap = (1 << IKEV2_TRANSFORM_TYPE_ENCR) |
+ (1 << IKEV2_TRANSFORM_TYPE_ESN);
+ optional_bitmap = mandatory_bitmap |
+ (1 << IKEV2_TRANSFORM_TYPE_INTEG) | (1 << IKEV2_TRANSFORM_TYPE_DH);
+ }
+ else if (prot_id == IKEV2_PROTOCOL_AH)
+ {
+ mandatory_bitmap = (1 << IKEV2_TRANSFORM_TYPE_INTEG) |
+ (1 << IKEV2_TRANSFORM_TYPE_ESN);
+ optional_bitmap = mandatory_bitmap | (1 << IKEV2_TRANSFORM_TYPE_DH);
+ }
+ else
+ return 0;
+
+ vec_add2 (rv, proposal, 1);
+
+ vec_foreach (proposal, proposals)
+ {
+ u8 bitmap = 0;
+ if (proposal->protocol_id != prot_id)
+ continue;
+
+ vec_foreach (transform, proposal->transforms)
+ {
+ if ((1 << transform->type) & bitmap)
+ continue;
+
+ if (ikev2_find_transform_data (transform))
+ {
+ bitmap |= 1 << transform->type;
+ vec_add2 (rv->transforms, new_t, 1);
+ clib_memcpy (new_t, transform, sizeof (*new_t));
+ new_t->attrs = vec_dup (transform->attrs);
+ }
+ }
+
+ clib_warning ("bitmap is %x mandatory is %x optional is %x",
+ bitmap, mandatory_bitmap, optional_bitmap);
+
+ if ((bitmap & mandatory_bitmap) == mandatory_bitmap &&
+ (bitmap & ~optional_bitmap) == 0)
+ {
+ rv->proposal_num = proposal->proposal_num;
+ rv->protocol_id = proposal->protocol_id;
+ RAND_bytes ((u8 *) & rv->spi, sizeof (rv->spi));
+ goto done;
+ }
+ else
+ {
+ vec_free (rv->transforms);
+ }
+ }
+
+ vec_free (rv);
+done:
+ return rv;
+}
+
+ikev2_sa_transform_t *
+ikev2_sa_get_td_for_type (ikev2_sa_proposal_t * p,
+ ikev2_transform_type_t type)
+{
+ ikev2_sa_transform_t *t;
+
+ if (!p)
+ return 0;
+
+ vec_foreach (t, p->transforms)
+ {
+ if (t->type == type)
+ return ikev2_find_transform_data (t);
+ }
+ return 0;
+}
+
+ikev2_child_sa_t *
+ikev2_sa_get_child (ikev2_sa_t * sa, u32 spi, ikev2_protocol_id_t prot_id)
+{
+ ikev2_child_sa_t *c;
+ vec_foreach (c, sa->childs)
+ {
+ if (c->i_proposals[0].spi == spi
+ && c->i_proposals[0].protocol_id == prot_id)
+ return c;
+ }
+
+ return 0;
+}
+
+void
+ikev2_sa_free_proposal_vector (ikev2_sa_proposal_t ** v)
+{
+ ikev2_sa_proposal_t *p;
+ ikev2_sa_transform_t *t;
+
+ if (!*v)
+ return;
+
+ vec_foreach (p, *v)
+ {
+ vec_foreach (t, p->transforms)
+ {
+ vec_free (t->attrs);
+ }
+ vec_free (p->transforms);
+ }
+ vec_free (*v);
+};
+
+static void
+ikev2_sa_free_all_child_sa (ikev2_child_sa_t ** childs)
+{
+ ikev2_child_sa_t *c;
+ vec_foreach (c, *childs)
+ {
+ ikev2_sa_free_proposal_vector (&c->r_proposals);
+ ikev2_sa_free_proposal_vector (&c->i_proposals);
+ vec_free (c->sk_ai);
+ vec_free (c->sk_ar);
+ vec_free (c->sk_ei);
+ vec_free (c->sk_er);
+ }
+
+ vec_free (*childs);
+}
+
+static void
+ikev2_sa_del_child_sa (ikev2_sa_t * sa, ikev2_child_sa_t * child)
+{
+ ikev2_sa_free_proposal_vector (&child->r_proposals);
+ ikev2_sa_free_proposal_vector (&child->i_proposals);
+ vec_free (child->sk_ai);
+ vec_free (child->sk_ar);
+ vec_free (child->sk_ei);
+ vec_free (child->sk_er);
+
+ vec_del1 (sa->childs, child - sa->childs);
+}
+
+static void
+ikev2_sa_free_all_vec (ikev2_sa_t * sa)
+{
+ vec_free (sa->i_nonce);
+ vec_free (sa->i_dh_data);
+ vec_free (sa->dh_shared_key);
+
+ ikev2_sa_free_proposal_vector (&sa->r_proposals);
+ ikev2_sa_free_proposal_vector (&sa->i_proposals);
+
+ vec_free (sa->sk_d);
+ vec_free (sa->sk_ai);
+ vec_free (sa->sk_ar);
+ vec_free (sa->sk_ei);
+ vec_free (sa->sk_er);
+ vec_free (sa->sk_pi);
+ vec_free (sa->sk_pr);
+
+ vec_free (sa->i_id.data);
+ vec_free (sa->i_auth.data);
+ vec_free (sa->r_id.data);
+ vec_free (sa->r_auth.data);
+ if (sa->r_auth.key)
+ EVP_PKEY_free (sa->r_auth.key);
+
+ vec_free (sa->del);
+
+ ikev2_sa_free_all_child_sa (&sa->childs);
+}
+
+static void
+ikev2_delete_sa (ikev2_sa_t * sa)
+{
+ ikev2_main_t *km = &ikev2_main;
+ u32 cpu_index = os_get_cpu_number ();
+ uword *p;
+
+ ikev2_sa_free_all_vec (sa);
+
+ p = hash_get (km->per_thread_data[cpu_index].sa_by_rspi, sa->rspi);
+ if (p)
+ {
+ hash_unset (km->per_thread_data[cpu_index].sa_by_rspi, sa->rspi);
+ pool_put (km->per_thread_data[cpu_index].sas, sa);
+ }
+}
+
+static void
+ikev2_generate_sa_init_data (ikev2_sa_t * sa)
+{
+ ikev2_sa_transform_t *t = 0, *t2;
+ ikev2_main_t *km = &ikev2_main;
+
+ if (sa->dh_group == IKEV2_TRANSFORM_DH_TYPE_NONE)
+ {
+ return;
+ }
+
+ /* check if received DH group is on our list of supported groups */
+ vec_foreach (t2, km->supported_transforms)
+ {
+ if (t2->type == IKEV2_TRANSFORM_TYPE_DH && sa->dh_group == t2->dh_type)
+ {
+ t = t2;
+ break;
+ }
+ }
+
+ if (!t)
+ {
+ clib_warning ("unknown dh data group %u (data len %u)", sa->dh_group,
+ vec_len (sa->i_dh_data));
+ sa->dh_group = IKEV2_TRANSFORM_DH_TYPE_NONE;
+ return;
+ }
+
+ /* generate rspi */
+ RAND_bytes ((u8 *) & sa->rspi, 8);
+
+ /* generate nonce */
+ sa->r_nonce = vec_new (u8, IKEV2_NONCE_SIZE);
+ RAND_bytes ((u8 *) sa->r_nonce, IKEV2_NONCE_SIZE);
+
+ /* generate dh keys */
+ ikev2_generate_dh (sa, t);
+}
+
+static void
+ikev2_calc_keys (ikev2_sa_t * sa)
+{
+ u8 *tmp;
+ /* calculate SKEYSEED = prf(Ni | Nr, g^ir) */
+ u8 *skeyseed = 0;
+ u8 *s = 0;
+ ikev2_sa_transform_t *tr_encr, *tr_prf, *tr_integ;
+ tr_encr =
+ ikev2_sa_get_td_for_type (sa->r_proposals, IKEV2_TRANSFORM_TYPE_ENCR);
+ tr_prf =
+ ikev2_sa_get_td_for_type (sa->r_proposals, IKEV2_TRANSFORM_TYPE_PRF);
+ tr_integ =
+ ikev2_sa_get_td_for_type (sa->r_proposals, IKEV2_TRANSFORM_TYPE_INTEG);
+
+ vec_append (s, sa->i_nonce);
+ vec_append (s, sa->r_nonce);
+ skeyseed = ikev2_calc_prf (tr_prf, s, sa->dh_shared_key);
+
+ /* Calculate S = Ni | Nr | SPIi | SPIr */
+ u64 *spi;
+ vec_add2 (s, tmp, 2 * sizeof (*spi));
+ spi = (u64 *) tmp;
+ spi[0] = clib_host_to_net_u64 (sa->ispi);
+ spi[1] = clib_host_to_net_u64 (sa->rspi);
+
+ /* calculate PRFplus */
+ u8 *keymat;
+ int len = tr_prf->key_trunc + /* SK_d */
+ tr_integ->key_len * 2 + /* SK_ai, SK_ar */
+ tr_encr->key_len * 2 + /* SK_ei, SK_er */
+ tr_prf->key_len * 2; /* SK_pi, SK_pr */
+
+ keymat = ikev2_calc_prfplus (tr_prf, skeyseed, s, len);
+ vec_free (skeyseed);
+ vec_free (s);
+
+ int pos = 0;
+
+ /* SK_d */
+ sa->sk_d = vec_new (u8, tr_prf->key_trunc);
+ clib_memcpy (sa->sk_d, keymat + pos, tr_prf->key_trunc);
+ pos += tr_prf->key_trunc;
+
+ /* SK_ai */
+ sa->sk_ai = vec_new (u8, tr_integ->key_len);
+ clib_memcpy (sa->sk_ai, keymat + pos, tr_integ->key_len);
+ pos += tr_integ->key_len;
+
+ /* SK_ar */
+ sa->sk_ar = vec_new (u8, tr_integ->key_len);
+ clib_memcpy (sa->sk_ar, keymat + pos, tr_integ->key_len);
+ pos += tr_integ->key_len;
+
+ /* SK_ei */
+ sa->sk_ei = vec_new (u8, tr_encr->key_len);
+ clib_memcpy (sa->sk_ei, keymat + pos, tr_encr->key_len);
+ pos += tr_encr->key_len;
+
+ /* SK_er */
+ sa->sk_er = vec_new (u8, tr_encr->key_len);
+ clib_memcpy (sa->sk_er, keymat + pos, tr_encr->key_len);
+ pos += tr_encr->key_len;
+
+ /* SK_pi */
+ sa->sk_pi = vec_new (u8, tr_prf->key_len);
+ clib_memcpy (sa->sk_pi, keymat + pos, tr_prf->key_len);
+ pos += tr_prf->key_len;
+
+ /* SK_pr */
+ sa->sk_pr = vec_new (u8, tr_prf->key_len);
+ clib_memcpy (sa->sk_pr, keymat + pos, tr_prf->key_len);
+ pos += tr_prf->key_len;
+
+ vec_free (keymat);
+}
+
+static void
+ikev2_calc_child_keys (ikev2_sa_t * sa, ikev2_child_sa_t * child)
+{
+ u8 *s = 0;
+ ikev2_sa_transform_t *tr_prf, *ctr_encr, *ctr_integ;
+ tr_prf =
+ ikev2_sa_get_td_for_type (sa->r_proposals, IKEV2_TRANSFORM_TYPE_PRF);
+ ctr_encr =
+ ikev2_sa_get_td_for_type (child->r_proposals, IKEV2_TRANSFORM_TYPE_ENCR);
+ ctr_integ =
+ ikev2_sa_get_td_for_type (child->r_proposals, IKEV2_TRANSFORM_TYPE_INTEG);
+
+ vec_append (s, sa->i_nonce);
+ vec_append (s, sa->r_nonce);
+ /* calculate PRFplus */
+ u8 *keymat;
+ int len = ctr_encr->key_len * 2 + ctr_integ->key_len * 2;
+
+ keymat = ikev2_calc_prfplus (tr_prf, sa->sk_d, s, len);
+
+ int pos = 0;
+
+ /* SK_ei */
+ child->sk_ei = vec_new (u8, ctr_encr->key_len);
+ clib_memcpy (child->sk_ei, keymat + pos, ctr_encr->key_len);
+ pos += ctr_encr->key_len;
+
+ /* SK_ai */
+ child->sk_ai = vec_new (u8, ctr_integ->key_len);
+ clib_memcpy (child->sk_ai, keymat + pos, ctr_integ->key_len);
+ pos += ctr_integ->key_len;
+
+ /* SK_er */
+ child->sk_er = vec_new (u8, ctr_encr->key_len);
+ clib_memcpy (child->sk_er, keymat + pos, ctr_encr->key_len);
+ pos += ctr_encr->key_len;
+
+ /* SK_ar */
+ child->sk_ar = vec_new (u8, ctr_integ->key_len);
+ clib_memcpy (child->sk_ar, keymat + pos, ctr_integ->key_len);
+ pos += ctr_integ->key_len;
+
+ ASSERT (pos == len);
+
+ vec_free (keymat);
+}
+
+static void
+ikev2_process_sa_init_req (vlib_main_t * vm, ikev2_sa_t * sa,
+ ike_header_t * ike)
+{
+ int p = 0;
+ u32 len = clib_net_to_host_u32 (ike->length);
+ u8 payload = ike->nextpayload;
+
+ clib_warning ("ispi %lx rspi %lx nextpayload %x version %x "
+ "exchange %x flags %x msgid %x length %u",
+ clib_net_to_host_u64 (ike->ispi),
+ clib_net_to_host_u64 (ike->rspi),
+ payload, ike->version,
+ ike->exchange, ike->flags,
+ clib_net_to_host_u32 (ike->msgid), len);
+
+ sa->ispi = clib_net_to_host_u64 (ike->ispi);
+
+ /* store whole IKE payload - needed for PSK auth */
+ vec_free (sa->last_sa_init_req_packet_data);
+ vec_add (sa->last_sa_init_req_packet_data, ike, len);
+
+ while (p < len && payload != IKEV2_PAYLOAD_NONE)
+ {
+ ike_payload_header_t *ikep = (ike_payload_header_t *) & ike->payload[p];
+ u32 plen = clib_net_to_host_u16 (ikep->length);
+
+ if (plen < sizeof (ike_payload_header_t))
+ return;
+
+ if (payload == IKEV2_PAYLOAD_SA)
+ {
+ ikev2_sa_free_proposal_vector (&sa->i_proposals);
+ sa->i_proposals = ikev2_parse_sa_payload (ikep);
+ }
+ else if (payload == IKEV2_PAYLOAD_KE)
+ {
+ ike_ke_payload_header_t *ke = (ike_ke_payload_header_t *) ikep;
+ sa->dh_group = clib_net_to_host_u16 (ke->dh_group);
+ vec_free (sa->i_dh_data);
+ vec_add (sa->i_dh_data, ke->payload, plen - sizeof (*ke));
+ }
+ else if (payload == IKEV2_PAYLOAD_NONCE)
+ {
+ vec_free (sa->i_nonce);
+ vec_add (sa->i_nonce, ikep->payload, plen - sizeof (*ikep));
+ }
+ else if (payload == IKEV2_PAYLOAD_NOTIFY)
+ {
+ ikev2_notify_t *n = ikev2_parse_notify_payload (ikep);
+ vec_free (n);
+ }
+ else if (payload == IKEV2_PAYLOAD_VENDOR)
+ {
+ ikev2_parse_vendor_payload (ikep);
+ }
+ else
+ {
+ clib_warning ("unknown payload %u flags %x length %u", payload,
+ ikep->flags, plen);
+ if (ikep->flags & IKEV2_PAYLOAD_FLAG_CRITICAL)
+ {
+ ikev2_set_state (sa, IKEV2_STATE_NOTIFY_AND_DELETE);
+ sa->unsupported_cp = payload;
+ return;
+ }
+ }
+
+ payload = ikep->nextpayload;
+ p += plen;
+ }
+
+ ikev2_set_state (sa, IKEV2_STATE_SA_INIT);
+}
+
+static u8 *
+ikev2_decrypt_sk_payload (ikev2_sa_t * sa, ike_header_t * ike, u8 * payload)
+{
+ int p = 0;
+ u8 last_payload = 0;
+ u8 *hmac = 0;
+ u32 len = clib_net_to_host_u32 (ike->length);
+ ike_payload_header_t *ikep = 0;
+ u32 plen = 0;
+ ikev2_sa_transform_t *tr_integ;
+ tr_integ =
+ ikev2_sa_get_td_for_type (sa->r_proposals, IKEV2_TRANSFORM_TYPE_INTEG);
+
+ while (p < len &&
+ *payload != IKEV2_PAYLOAD_NONE && last_payload != IKEV2_PAYLOAD_SK)
+ {
+ ikep = (ike_payload_header_t *) & ike->payload[p];
+ plen = clib_net_to_host_u16 (ikep->length);
+
+ if (plen < sizeof (*ikep))
+ return 0;
+
+ if (*payload == IKEV2_PAYLOAD_SK)
+ {
+ clib_warning ("received IKEv2 payload SK, len %u", plen - 4);
+ last_payload = *payload;
+ }
+ else
+ {
+ clib_warning ("unknown payload %u flags %x length %u", payload,
+ ikep->flags, plen);
+ if (ikep->flags & IKEV2_PAYLOAD_FLAG_CRITICAL)
+ {
+ sa->unsupported_cp = *payload;
+ return 0;
+ }
+ }
+
+ *payload = ikep->nextpayload;
+ p += plen;
+ }
+
+ if (last_payload != IKEV2_PAYLOAD_SK)
+ {
+ clib_warning ("Last payload must be SK");
+ return 0;
+ }
+
+ hmac = ikev2_calc_integr (tr_integ, sa->sk_ai, (u8 *) ike,
+ len - tr_integ->key_trunc);
+
+ plen = plen - sizeof (*ikep) - tr_integ->key_trunc;
+
+ if (memcmp (hmac, &ikep->payload[plen], tr_integ->key_trunc))
+ {
+ clib_warning ("message integrity check failed");
+ vec_free (hmac);
+ return 0;
+ }
+ vec_free (hmac);
+
+ return ikev2_decrypt_data (sa, ikep->payload, plen);
+}
+
+static void
+ikev2_initial_contact_cleanup (ikev2_sa_t * sa)
+{
+ ikev2_main_t *km = &ikev2_main;
+ ikev2_sa_t *tmp;
+ u32 i, *delete = 0;
+ ikev2_child_sa_t *c;
+ u32 cpu_index = os_get_cpu_number ();
+
+ if (!sa->initial_contact)
+ return;
+
+ /* find old IKE SAs with the same authenticated identity */
+ /* *INDENT-OFF* */
+ pool_foreach (tmp, km->per_thread_data[cpu_index].sas, ({
+ if (tmp->i_id.type != sa->i_id.type ||
+ vec_len(tmp->i_id.data) != vec_len(sa->i_id.data) ||
+ memcmp(sa->i_id.data, tmp->i_id.data, vec_len(sa->i_id.data)))
+ continue;
+
+ if (sa->rspi != tmp->rspi)
+ vec_add1(delete, tmp - km->per_thread_data[cpu_index].sas);
+ }));
+ /* *INDENT-ON* */
+
+ for (i = 0; i < vec_len (delete); i++)
+ {
+ tmp = pool_elt_at_index (km->per_thread_data[cpu_index].sas, delete[i]);
+ vec_foreach (c, tmp->childs)
+ ikev2_delete_tunnel_interface (km->vnet_main, tmp, c);
+ ikev2_delete_sa (tmp);
+ }
+
+ vec_free (delete);
+ sa->initial_contact = 0;
+}
+
+static void
+ikev2_process_auth_req (vlib_main_t * vm, ikev2_sa_t * sa, ike_header_t * ike)
+{
+ ikev2_child_sa_t *first_child_sa;
+ int p = 0;
+ u32 len = clib_net_to_host_u32 (ike->length);
+ u8 payload = ike->nextpayload;
+ u8 *plaintext = 0;
+
+ ike_payload_header_t *ikep;
+ u32 plen;
+
+ clib_warning ("ispi %lx rspi %lx nextpayload %x version %x "
+ "exchange %x flags %x msgid %x length %u",
+ clib_net_to_host_u64 (ike->ispi),
+ clib_net_to_host_u64 (ike->rspi),
+ payload, ike->version,
+ ike->exchange, ike->flags,
+ clib_net_to_host_u32 (ike->msgid), len);
+
+ ikev2_calc_keys (sa);
+
+ plaintext = ikev2_decrypt_sk_payload (sa, ike, &payload);
+
+ if (!plaintext)
+ {
+ if (sa->unsupported_cp)
+ ikev2_set_state (sa, IKEV2_STATE_NOTIFY_AND_DELETE);
+ goto cleanup_and_exit;
+ }
+
+ /* create 1st child SA */
+ ikev2_sa_free_all_child_sa (&sa->childs);
+ vec_add2 (sa->childs, first_child_sa, 1);
+
+
+ /* process encrypted payload */
+ p = 0;
+ while (p < vec_len (plaintext) && payload != IKEV2_PAYLOAD_NONE)
+ {
+ ikep = (ike_payload_header_t *) & plaintext[p];
+ plen = clib_net_to_host_u16 (ikep->length);
+
+ if (plen < sizeof (ike_payload_header_t))
+ goto cleanup_and_exit;
+
+ if (payload == IKEV2_PAYLOAD_SA) /* 33 */
+ {
+ clib_warning ("received payload SA, len %u", plen - sizeof (*ikep));
+ ikev2_sa_free_proposal_vector (&first_child_sa->i_proposals);
+ first_child_sa->i_proposals = ikev2_parse_sa_payload (ikep);
+ }
+ else if (payload == IKEV2_PAYLOAD_IDI) /* 35 */
+ {
+ ike_id_payload_header_t *id = (ike_id_payload_header_t *) ikep;
+
+ sa->i_id.type = id->id_type;
+ vec_free (sa->i_id.data);
+ vec_add (sa->i_id.data, id->payload, plen - sizeof (*id));
+
+ clib_warning ("received payload IDi, len %u id_type %u",
+ plen - sizeof (*id), id->id_type);
+ }
+ else if (payload == IKEV2_PAYLOAD_AUTH) /* 39 */
+ {
+ ike_auth_payload_header_t *a = (ike_auth_payload_header_t *) ikep;
+
+ sa->i_auth.method = a->auth_method;
+ vec_free (sa->i_auth.data);
+ vec_add (sa->i_auth.data, a->payload, plen - sizeof (*a));
+
+ clib_warning ("received payload AUTH, len %u auth_type %u",
+ plen - sizeof (*a), a->auth_method);
+ }
+ else if (payload == IKEV2_PAYLOAD_NOTIFY) /* 41 */
+ {
+ ikev2_notify_t *n = ikev2_parse_notify_payload (ikep);
+ if (n->msg_type == IKEV2_NOTIFY_MSG_INITIAL_CONTACT)
+ {
+ sa->initial_contact = 1;
+ }
+ vec_free (n);
+ }
+ else if (payload == IKEV2_PAYLOAD_VENDOR) /* 43 */
+ {
+ ikev2_parse_vendor_payload (ikep);
+ }
+ else if (payload == IKEV2_PAYLOAD_TSI) /* 44 */
+ {
+ clib_warning ("received payload TSi, len %u",
+ plen - sizeof (*ikep));
+
+ vec_free (first_child_sa->tsi);
+ first_child_sa->tsi = ikev2_parse_ts_payload (ikep);
+ }
+ else if (payload == IKEV2_PAYLOAD_TSR) /* 45 */
+ {
+ clib_warning ("received payload TSr, len %u",
+ plen - sizeof (*ikep));
+
+ vec_free (first_child_sa->tsr);
+ first_child_sa->tsr = ikev2_parse_ts_payload (ikep);
+ }
+ else
+ {
+ clib_warning ("unknown payload %u flags %x length %u data %u",
+ payload, ikep->flags, plen - 4,
+ format_hex_bytes, ikep->payload, plen - 4);
+
+ if (ikep->flags & IKEV2_PAYLOAD_FLAG_CRITICAL)
+ {
+ ikev2_set_state (sa, IKEV2_STATE_NOTIFY_AND_DELETE);
+ sa->unsupported_cp = payload;
+ return;
+ }
+ }
+
+ payload = ikep->nextpayload;
+ p += plen;
+ }
+
+cleanup_and_exit:
+ vec_free (plaintext);
+}
+
+static void
+ikev2_process_informational_req (vlib_main_t * vm, ikev2_sa_t * sa,
+ ike_header_t * ike)
+{
+ int p = 0;
+ u32 len = clib_net_to_host_u32 (ike->length);
+ u8 payload = ike->nextpayload;
+ u8 *plaintext = 0;
+
+ ike_payload_header_t *ikep;
+ u32 plen;
+
+ clib_warning ("ispi %lx rspi %lx nextpayload %x version %x "
+ "exchange %x flags %x msgid %x length %u",
+ clib_net_to_host_u64 (ike->ispi),
+ clib_net_to_host_u64 (ike->rspi),
+ payload, ike->version,
+ ike->exchange, ike->flags,
+ clib_net_to_host_u32 (ike->msgid), len);
+
+ plaintext = ikev2_decrypt_sk_payload (sa, ike, &payload);
+
+ if (!plaintext)
+ goto cleanup_and_exit;
+
+ /* process encrypted payload */
+ p = 0;
+ while (p < vec_len (plaintext) && payload != IKEV2_PAYLOAD_NONE)
+ {
+ ikep = (ike_payload_header_t *) & plaintext[p];
+ plen = clib_net_to_host_u16 (ikep->length);
+
+ if (plen < sizeof (ike_payload_header_t))
+ goto cleanup_and_exit;
+
+ if (payload == IKEV2_PAYLOAD_NOTIFY) /* 41 */
+ {
+ ikev2_notify_t *n = ikev2_parse_notify_payload (ikep);
+ if (n->msg_type == IKEV2_NOTIFY_MSG_AUTHENTICATION_FAILED)
+ ikev2_set_state (sa, IKEV2_STATE_AUTH_FAILED);
+ vec_free (n);
+ }
+ else if (payload == IKEV2_PAYLOAD_DELETE) /* 42 */
+ {
+ sa->del = ikev2_parse_delete_payload (ikep);
+ }
+ else if (payload == IKEV2_PAYLOAD_VENDOR) /* 43 */
+ {
+ ikev2_parse_vendor_payload (ikep);
+ }
+ else
+ {
+ clib_warning ("unknown payload %u flags %x length %u data %u",
+ payload, ikep->flags, plen - 4,
+ format_hex_bytes, ikep->payload, plen - 4);
+
+ if (ikep->flags & IKEV2_PAYLOAD_FLAG_CRITICAL)
+ {
+ sa->unsupported_cp = payload;
+ return;
+ }
+ }
+
+ payload = ikep->nextpayload;
+ p += plen;
+ }
+
+cleanup_and_exit:
+ vec_free (plaintext);
+}
+
+static void
+ikev2_process_create_child_sa_req (vlib_main_t * vm, ikev2_sa_t * sa,
+ ike_header_t * ike)
+{
+ int p = 0;
+ u32 len = clib_net_to_host_u32 (ike->length);
+ u8 payload = ike->nextpayload;
+ u8 *plaintext = 0;
+ u8 rekeying = 0;
+ u8 i_nonce[IKEV2_NONCE_SIZE];
+
+ ike_payload_header_t *ikep;
+ u32 plen;
+ ikev2_notify_t *n = 0;
+ ikev2_ts_t *tsi = 0;
+ ikev2_ts_t *tsr = 0;
+ ikev2_sa_proposal_t *proposal = 0;
+ ikev2_child_sa_t *child_sa;
+
+ clib_warning ("ispi %lx rspi %lx nextpayload %x version %x "
+ "exchange %x flags %x msgid %x length %u",
+ clib_net_to_host_u64 (ike->ispi),
+ clib_net_to_host_u64 (ike->rspi),
+ payload, ike->version,
+ ike->exchange, ike->flags,
+ clib_net_to_host_u32 (ike->msgid), len);
+
+ plaintext = ikev2_decrypt_sk_payload (sa, ike, &payload);
+
+ if (!plaintext)
+ goto cleanup_and_exit;
+
+ /* process encrypted payload */
+ p = 0;
+ while (p < vec_len (plaintext) && payload != IKEV2_PAYLOAD_NONE)
+ {
+ ikep = (ike_payload_header_t *) & plaintext[p];
+ plen = clib_net_to_host_u16 (ikep->length);
+
+ if (plen < sizeof (ike_payload_header_t))
+ goto cleanup_and_exit;
+
+ else if (payload == IKEV2_PAYLOAD_SA)
+ {
+ proposal = ikev2_parse_sa_payload (ikep);
+ }
+ else if (payload == IKEV2_PAYLOAD_NOTIFY)
+ {
+ n = ikev2_parse_notify_payload (ikep);
+ if (n->msg_type == IKEV2_NOTIFY_MSG_REKEY_SA)
+ {
+ rekeying = 1;
+ }
+ }
+ else if (payload == IKEV2_PAYLOAD_DELETE)
+ {
+ sa->del = ikev2_parse_delete_payload (ikep);
+ }
+ else if (payload == IKEV2_PAYLOAD_VENDOR)
+ {
+ ikev2_parse_vendor_payload (ikep);
+ }
+ else if (payload == IKEV2_PAYLOAD_NONCE)
+ {
+ clib_memcpy (i_nonce, ikep->payload, plen - sizeof (*ikep));
+ }
+ else if (payload == IKEV2_PAYLOAD_TSI)
+ {
+ tsi = ikev2_parse_ts_payload (ikep);
+ }
+ else if (payload == IKEV2_PAYLOAD_TSR)
+ {
+ tsr = ikev2_parse_ts_payload (ikep);
+ }
+ else
+ {
+ clib_warning ("unknown payload %u flags %x length %u data %u",
+ payload, ikep->flags, plen - 4,
+ format_hex_bytes, ikep->payload, plen - 4);
+
+ if (ikep->flags & IKEV2_PAYLOAD_FLAG_CRITICAL)
+ {
+ sa->unsupported_cp = payload;
+ return;
+ }
+ }
+
+ payload = ikep->nextpayload;
+ p += plen;
+ }
+
+ if (rekeying)
+ {
+ ikev2_rekey_t *rekey;
+ child_sa = ikev2_sa_get_child (sa, n->spi, n->protocol_id);
+ if (!child_sa)
+ {
+ clib_warning ("child SA spi %lx not found", n->spi);
+ goto cleanup_and_exit;
+ }
+ vec_add2 (sa->rekey, rekey, 1);
+ rekey->protocol_id = n->protocol_id;
+ rekey->spi = n->spi;
+ rekey->i_proposal = proposal;
+ rekey->r_proposal =
+ ikev2_select_proposal (proposal, IKEV2_PROTOCOL_ESP);
+ rekey->tsi = tsi;
+ rekey->tsr = tsr;
+ /* update Ni */
+ vec_free (sa->i_nonce);
+ vec_add (sa->i_nonce, i_nonce, IKEV2_NONCE_SIZE);
+ /* generate new Nr */
+ vec_free (sa->r_nonce);
+ sa->r_nonce = vec_new (u8, IKEV2_NONCE_SIZE);
+ RAND_bytes ((u8 *) sa->r_nonce, IKEV2_NONCE_SIZE);
+ }
+
+cleanup_and_exit:
+ vec_free (plaintext);
+ vec_free (n);
+}
+
+static u8 *
+ikev2_sa_generate_authmsg (ikev2_sa_t * sa, int is_responder)
+{
+ u8 *authmsg = 0;
+ u8 *data;
+ u8 *nonce;
+ ikev2_id_t *id;
+ u8 *key;
+ u8 *packet_data;
+ ikev2_sa_transform_t *tr_prf;
+
+ tr_prf =
+ ikev2_sa_get_td_for_type (sa->r_proposals, IKEV2_TRANSFORM_TYPE_PRF);
+
+ if (is_responder)
+ {
+ id = &sa->r_id;
+ key = sa->sk_pr;
+ nonce = sa->i_nonce;
+ packet_data = sa->last_sa_init_res_packet_data;
+ }
+ else
+ {
+ id = &sa->i_id;
+ key = sa->sk_pi;
+ nonce = sa->r_nonce;
+ packet_data = sa->last_sa_init_req_packet_data;
+ }
+
+ data = vec_new (u8, 4);
+ data[0] = id->type;
+ vec_append (data, id->data);
+
+ u8 *id_hash = ikev2_calc_prf (tr_prf, key, data);
+ vec_append (authmsg, packet_data);
+ vec_append (authmsg, nonce);
+ vec_append (authmsg, id_hash);
+ vec_free (id_hash);
+ vec_free (data);
+
+ return authmsg;
+}
+
+static int
+ikev2_ts_cmp (ikev2_ts_t * ts1, ikev2_ts_t * ts2)
+{
+ if (ts1->ts_type == ts2->ts_type && ts1->protocol_id == ts2->protocol_id &&
+ ts1->start_port == ts2->start_port && ts1->end_port == ts2->end_port &&
+ ts1->start_addr.as_u32 == ts2->start_addr.as_u32 &&
+ ts1->end_addr.as_u32 == ts2->end_addr.as_u32)
+ return 1;
+
+ return 0;
+}
+
+static void
+ikev2_sa_match_ts (ikev2_sa_t * sa)
+{
+ ikev2_main_t *km = &ikev2_main;
+ ikev2_profile_t *p;
+ ikev2_ts_t *ts, *tsi = 0, *tsr = 0;
+
+ /* *INDENT-OFF* */
+ pool_foreach (p, km->profiles, ({
+
+ /* check id */
+ if (p->rem_id.type != sa->i_id.type ||
+ vec_len(p->rem_id.data) != vec_len(sa->i_id.data) ||
+ memcmp(p->rem_id.data, sa->i_id.data, vec_len(p->rem_id.data)))
+ continue;
+
+ vec_foreach(ts, sa->childs[0].tsi)
+ {
+ if (ikev2_ts_cmp(&p->rem_ts, ts))
+ {
+ tsi = vec_dup(ts);
+ break;
+ }
+ }
+
+ vec_foreach(ts, sa->childs[0].tsr)
+ {
+ if (ikev2_ts_cmp(&p->loc_ts, ts))
+ {
+ tsr = vec_dup(ts);
+ break;
+ }
+ }
+
+ break;
+ }));
+ /* *INDENT-ON* */
+
+ if (tsi && tsr)
+ {
+ vec_free (sa->childs[0].tsi);
+ vec_free (sa->childs[0].tsr);
+ sa->childs[0].tsi = tsi;
+ sa->childs[0].tsr = tsr;
+ }
+ else
+ {
+ vec_free (tsi);
+ vec_free (tsr);
+ ikev2_set_state (sa, IKEV2_STATE_TS_UNACCEPTABLE);
+ }
+}
+
+static void
+ikev2_sa_auth (ikev2_sa_t * sa)
+{
+ ikev2_main_t *km = &ikev2_main;
+ ikev2_profile_t *p, *sel_p = 0;
+ u8 *authmsg, *key_pad, *psk = 0, *auth = 0;
+ ikev2_sa_transform_t *tr_prf;
+
+ tr_prf =
+ ikev2_sa_get_td_for_type (sa->r_proposals, IKEV2_TRANSFORM_TYPE_PRF);
+
+ /* only shared key and rsa signature */
+ if (!(sa->i_auth.method == IKEV2_AUTH_METHOD_SHARED_KEY_MIC ||
+ sa->i_auth.method == IKEV2_AUTH_METHOD_RSA_SIG))
+ {
+ clib_warning ("unsupported authentication method %u",
+ sa->i_auth.method);
+ ikev2_set_state (sa, IKEV2_STATE_AUTH_FAILED);
+ return;
+ }
+
+ key_pad = format (0, "%s", IKEV2_KEY_PAD);
+ authmsg = ikev2_sa_generate_authmsg (sa, 0);
+
+ /* *INDENT-OFF* */
+ pool_foreach (p, km->profiles, ({
+
+ /* check id */
+ if (p->rem_id.type != sa->i_id.type ||
+ vec_len(p->rem_id.data) != vec_len(sa->i_id.data) ||
+ memcmp(p->rem_id.data, sa->i_id.data, vec_len(p->rem_id.data)))
+ continue;
+
+ if (sa->i_auth.method == IKEV2_AUTH_METHOD_SHARED_KEY_MIC)
+ {
+ if (!p->auth.data ||
+ p->auth.method != IKEV2_AUTH_METHOD_SHARED_KEY_MIC)
+ continue;
+
+ psk = ikev2_calc_prf(tr_prf, p->auth.data, key_pad);
+ auth = ikev2_calc_prf(tr_prf, psk, authmsg);
+
+ if (!memcmp(auth, sa->i_auth.data, vec_len(sa->i_auth.data)))
+ {
+ ikev2_set_state(sa, IKEV2_STATE_AUTHENTICATED);
+ vec_free(auth);
+ sel_p = p;
+ break;
+ }
+
+ }
+ else if (sa->i_auth.method == IKEV2_AUTH_METHOD_RSA_SIG)
+ {
+ if (p->auth.method != IKEV2_AUTH_METHOD_RSA_SIG)
+ continue;
+
+ if (ikev2_verify_sign(p->auth.key, sa->i_auth.data, authmsg) == 1)
+ {
+ ikev2_set_state(sa, IKEV2_STATE_AUTHENTICATED);
+ sel_p = p;
+ break;
+ }
+ }
+
+ vec_free(auth);
+ vec_free(psk);
+ }));
+ /* *INDENT-ON* */
+
+ vec_free (authmsg);
+
+ if (sa->state == IKEV2_STATE_AUTHENTICATED)
+ {
+ vec_free (sa->r_id.data);
+ sa->r_id.data = vec_dup (sel_p->loc_id.data);
+ sa->r_id.type = sel_p->loc_id.type;
+
+ /* generate our auth data */
+ authmsg = ikev2_sa_generate_authmsg (sa, 1);
+ if (sel_p->auth.method == IKEV2_AUTH_METHOD_SHARED_KEY_MIC)
+ {
+ sa->r_auth.data = ikev2_calc_prf (tr_prf, psk, authmsg);
+ sa->r_auth.method = IKEV2_AUTH_METHOD_SHARED_KEY_MIC;
+ }
+ else if (sel_p->auth.method == IKEV2_AUTH_METHOD_RSA_SIG)
+ {
+ sa->r_auth.data = ikev2_calc_sign (km->pkey, authmsg);
+ sa->r_auth.method = IKEV2_AUTH_METHOD_RSA_SIG;
+ }
+ vec_free (authmsg);
+
+ /* select transforms for 1st child sa */
+ ikev2_sa_free_proposal_vector (&sa->childs[0].r_proposals);
+ sa->childs[0].r_proposals =
+ ikev2_select_proposal (sa->childs[0].i_proposals, IKEV2_PROTOCOL_ESP);
+ }
+ else
+ {
+ ikev2_set_state (sa, IKEV2_STATE_AUTH_FAILED);
+ }
+ vec_free (psk);
+ vec_free (key_pad);
+}
+
+static int
+ikev2_create_tunnel_interface (vnet_main_t * vnm, ikev2_sa_t * sa,
+ ikev2_child_sa_t * child)
+{
+ ipsec_add_del_tunnel_args_t a;
+ ikev2_sa_transform_t *tr;
+ u8 encr_type = 0;
+
+ if (!child->r_proposals)
+ {
+ ikev2_set_state (sa, IKEV2_STATE_NO_PROPOSAL_CHOSEN);
+ return 1;
+ }
+
+ memset (&a, 0, sizeof (a));
+ a.is_add = 1;
+ a.local_ip.as_u32 = sa->raddr.as_u32;
+ a.remote_ip.as_u32 = sa->iaddr.as_u32;
+ a.local_spi = child->i_proposals[0].spi;
+ a.remote_spi = child->r_proposals[0].spi;
+ a.anti_replay = 1;
+
+ tr =
+ ikev2_sa_get_td_for_type (child->r_proposals, IKEV2_TRANSFORM_TYPE_ESN);
+ if (tr)
+ a.esn = tr->esn_type;
+ else
+ a.esn = 0;
+
+ tr =
+ ikev2_sa_get_td_for_type (child->r_proposals, IKEV2_TRANSFORM_TYPE_ENCR);
+ if (tr)
+ {
+ if (tr->encr_type == IKEV2_TRANSFORM_ENCR_TYPE_AES_CBC && tr->key_len)
+ {
+ switch (tr->key_len)
+ {
+ case 16:
+ encr_type = IPSEC_CRYPTO_ALG_AES_CBC_128;
+ break;
+ case 24:
+ encr_type = IPSEC_CRYPTO_ALG_AES_CBC_192;
+ break;
+ case 32:
+ encr_type = IPSEC_CRYPTO_ALG_AES_CBC_256;
+ break;
+ default:
+ ikev2_set_state (sa, IKEV2_STATE_NO_PROPOSAL_CHOSEN);
+ return 1;
+ break;
+ }
+ }
+ else
+ {
+ ikev2_set_state (sa, IKEV2_STATE_NO_PROPOSAL_CHOSEN);
+ return 1;
+ }
+ }
+ else
+ {
+ ikev2_set_state (sa, IKEV2_STATE_NO_PROPOSAL_CHOSEN);
+ return 1;
+ }
+
+ tr =
+ ikev2_sa_get_td_for_type (child->r_proposals, IKEV2_TRANSFORM_TYPE_INTEG);
+ if (tr)
+ {
+ if (tr->integ_type != IKEV2_TRANSFORM_INTEG_TYPE_AUTH_HMAC_SHA1_96)
+ {
+ ikev2_set_state (sa, IKEV2_STATE_NO_PROPOSAL_CHOSEN);
+ return 1;
+ }
+ }
+ else
+ {
+ ikev2_set_state (sa, IKEV2_STATE_NO_PROPOSAL_CHOSEN);
+ return 1;
+ }
+
+ ikev2_calc_child_keys (sa, child);
+
+ a.integ_alg = IPSEC_INTEG_ALG_SHA1_96;
+ a.local_integ_key_len = vec_len (child->sk_ar);
+ clib_memcpy (a.local_integ_key, child->sk_ar, a.local_integ_key_len);
+ a.remote_integ_key_len = vec_len (child->sk_ai);
+ clib_memcpy (a.remote_integ_key, child->sk_ai, a.remote_integ_key_len);
+
+ a.crypto_alg = encr_type;
+ a.local_crypto_key_len = vec_len (child->sk_er);
+ clib_memcpy (a.local_crypto_key, child->sk_er, a.local_crypto_key_len);
+ a.remote_crypto_key_len = vec_len (child->sk_ei);
+ clib_memcpy (a.remote_crypto_key, child->sk_ei, a.remote_crypto_key_len);
+
+ ipsec_add_del_tunnel_if (&a);
+
+ return 0;
+}
+
+static int
+ikev2_delete_tunnel_interface (vnet_main_t * vnm, ikev2_sa_t * sa,
+ ikev2_child_sa_t * child)
+{
+ ipsec_add_del_tunnel_args_t a;
+
+ if (!vec_len (child->r_proposals))
+ return 0;
+
+ a.is_add = 0;
+ a.local_ip.as_u32 = sa->raddr.as_u32;
+ a.remote_ip.as_u32 = sa->iaddr.as_u32;
+ a.local_spi = child->i_proposals[0].spi;
+ a.remote_spi = child->r_proposals[0].spi;
+
+ ipsec_add_del_tunnel_if (&a);
+ return 0;
+}
+
+static u32
+ikev2_generate_resp (ikev2_sa_t * sa, ike_header_t * ike)
+{
+ v8 *integ = 0;
+ ike_payload_header_t *ph;
+ u16 plen;
+ u32 tlen = 0;
+
+ ikev2_sa_transform_t *tr_encr, *tr_integ;
+ tr_encr =
+ ikev2_sa_get_td_for_type (sa->r_proposals, IKEV2_TRANSFORM_TYPE_ENCR);
+ tr_integ =
+ ikev2_sa_get_td_for_type (sa->r_proposals, IKEV2_TRANSFORM_TYPE_INTEG);
+
+ ikev2_payload_chain_t *chain = 0;
+ ikev2_payload_new_chain (chain);
+
+ if (ike->exchange == IKEV2_EXCHANGE_SA_INIT)
+ {
+ if (sa->r_proposals == 0)
+ {
+ ikev2_payload_add_notify (chain,
+ IKEV2_NOTIFY_MSG_NO_PROPOSAL_CHOSEN, 0);
+ ikev2_set_state (sa, IKEV2_STATE_NOTIFY_AND_DELETE);
+ }
+ else if (sa->dh_group == IKEV2_TRANSFORM_DH_TYPE_NONE)
+ {
+ u8 *data = vec_new (u8, 2);
+ ikev2_sa_transform_t *tr_dh;
+ tr_dh =
+ ikev2_sa_get_td_for_type (sa->r_proposals,
+ IKEV2_TRANSFORM_TYPE_DH);
+ ASSERT (tr_dh && tr_dh->dh_type);
+
+ data[0] = (tr_dh->dh_type >> 8) & 0xff;
+ data[1] = (tr_dh->dh_type) & 0xff;
+
+ ikev2_payload_add_notify (chain,
+ IKEV2_NOTIFY_MSG_INVALID_KE_PAYLOAD,
+ data);
+ vec_free (data);
+ ikev2_set_state (sa, IKEV2_STATE_NOTIFY_AND_DELETE);
+ }
+ else if (sa->state == IKEV2_STATE_NOTIFY_AND_DELETE)
+ {
+ u8 *data = vec_new (u8, 1);
+
+ data[0] = sa->unsupported_cp;
+ ikev2_payload_add_notify (chain,
+ IKEV2_NOTIFY_MSG_UNSUPPORTED_CRITICAL_PAYLOAD,
+ data);
+ vec_free (data);
+ }
+ else
+ {
+ ike->rspi = clib_host_to_net_u64 (sa->rspi);
+ ikev2_payload_add_sa (chain, sa->r_proposals);
+ ikev2_payload_add_ke (chain, sa->dh_group, sa->r_dh_data);
+ ikev2_payload_add_nonce (chain, sa->r_nonce);
+ }
+ }
+ else if (ike->exchange == IKEV2_EXCHANGE_IKE_AUTH)
+ {
+ if (sa->state == IKEV2_STATE_AUTHENTICATED)
+ {
+ ikev2_payload_add_id (chain, &sa->r_id, IKEV2_PAYLOAD_IDR);
+ ikev2_payload_add_auth (chain, &sa->r_auth);
+ ikev2_payload_add_sa (chain, sa->childs[0].r_proposals);
+ ikev2_payload_add_ts (chain, sa->childs[0].tsi, IKEV2_PAYLOAD_TSI);
+ ikev2_payload_add_ts (chain, sa->childs[0].tsr, IKEV2_PAYLOAD_TSR);
+ }
+ else if (sa->state == IKEV2_STATE_AUTH_FAILED)
+ {
+ ikev2_payload_add_notify (chain,
+ IKEV2_NOTIFY_MSG_AUTHENTICATION_FAILED,
+ 0);
+ ikev2_set_state (sa, IKEV2_STATE_NOTIFY_AND_DELETE);
+ }
+ else if (sa->state == IKEV2_STATE_TS_UNACCEPTABLE)
+ {
+ ikev2_payload_add_notify (chain, IKEV2_NOTIFY_MSG_TS_UNACCEPTABLE,
+ 0);
+ ikev2_payload_add_id (chain, &sa->r_id, IKEV2_PAYLOAD_IDR);
+ ikev2_payload_add_auth (chain, &sa->r_auth);
+ }
+ else if (sa->state == IKEV2_STATE_NO_PROPOSAL_CHOSEN)
+ {
+ ikev2_payload_add_notify (chain,
+ IKEV2_NOTIFY_MSG_NO_PROPOSAL_CHOSEN, 0);
+ ikev2_payload_add_id (chain, &sa->r_id, IKEV2_PAYLOAD_IDR);
+ ikev2_payload_add_auth (chain, &sa->r_auth);
+ ikev2_payload_add_ts (chain, sa->childs[0].tsi, IKEV2_PAYLOAD_TSI);
+ ikev2_payload_add_ts (chain, sa->childs[0].tsr, IKEV2_PAYLOAD_TSR);
+ }
+ else if (sa->state == IKEV2_STATE_NOTIFY_AND_DELETE)
+ {
+ u8 *data = vec_new (u8, 1);
+
+ data[0] = sa->unsupported_cp;
+ ikev2_payload_add_notify (chain,
+ IKEV2_NOTIFY_MSG_UNSUPPORTED_CRITICAL_PAYLOAD,
+ data);
+ vec_free (data);
+ }
+ else
+ {
+ ikev2_set_state (sa, IKEV2_STATE_DELETED);
+ goto done;
+ }
+ }
+ else if (ike->exchange == IKEV2_EXCHANGE_INFORMATIONAL)
+ {
+ /* if pending delete */
+ if (sa->del)
+ {
+ /* The response to a request that deletes the IKE SA is an empty
+ INFORMATIONAL response. */
+ if (sa->del[0].protocol_id == IKEV2_PROTOCOL_IKE)
+ {
+ ikev2_set_state (sa, IKEV2_STATE_NOTIFY_AND_DELETE);
+ }
+ /* The response to a request that deletes ESP or AH SAs will contain
+ delete payloads for the paired SAs going in the other direction. */
+ else
+ {
+ ikev2_payload_add_delete (chain, sa->del);
+ }
+ vec_free (sa->del);
+ sa->del = 0;
+ }
+ /* received N(AUTHENTICATION_FAILED) */
+ else if (sa->state == IKEV2_STATE_AUTH_FAILED)
+ {
+ ikev2_set_state (sa, IKEV2_STATE_DELETED);
+ goto done;
+ }
+ /* received unsupported critical payload */
+ else if (sa->unsupported_cp)
+ {
+ u8 *data = vec_new (u8, 1);
+
+ data[0] = sa->unsupported_cp;
+ ikev2_payload_add_notify (chain,
+ IKEV2_NOTIFY_MSG_UNSUPPORTED_CRITICAL_PAYLOAD,
+ data);
+ vec_free (data);
+ sa->unsupported_cp = 0;
+ }
+ /* else send empty response */
+ }
+ else if (ike->exchange == IKEV2_EXCHANGE_CREATE_CHILD_SA)
+ {
+ if (sa->rekey)
+ {
+ ikev2_payload_add_sa (chain, sa->rekey[0].r_proposal);
+ ikev2_payload_add_nonce (chain, sa->r_nonce);
+ ikev2_payload_add_ts (chain, sa->rekey[0].tsi, IKEV2_PAYLOAD_TSI);
+ ikev2_payload_add_ts (chain, sa->rekey[0].tsr, IKEV2_PAYLOAD_TSR);
+ vec_del1 (sa->rekey, 0);
+ }
+ else if (sa->unsupported_cp)
+ {
+ u8 *data = vec_new (u8, 1);
+
+ data[0] = sa->unsupported_cp;
+ ikev2_payload_add_notify (chain,
+ IKEV2_NOTIFY_MSG_UNSUPPORTED_CRITICAL_PAYLOAD,
+ data);
+ vec_free (data);
+ sa->unsupported_cp = 0;
+ }
+ else
+ {
+ ikev2_payload_add_notify (chain, IKEV2_NOTIFY_MSG_NO_ADDITIONAL_SAS,
+ 0);
+ }
+ }
+
+ /* IKEv2 header */
+ ike->version = IKE_VERSION_2;
+ ike->flags = IKEV2_HDR_FLAG_RESPONSE;
+ ike->nextpayload = IKEV2_PAYLOAD_SK;
+ tlen = sizeof (*ike);
+
+
+ if (ike->exchange == IKEV2_EXCHANGE_SA_INIT)
+ {
+ tlen += vec_len (chain->data);
+ ike->nextpayload = chain->first_payload_type;
+ ike->length = clib_host_to_net_u32 (tlen);
+ clib_memcpy (ike->payload, chain->data, vec_len (chain->data));
+
+ /* store whole IKE payload - needed for PSK auth */
+ vec_free (sa->last_sa_init_res_packet_data);
+ vec_add (sa->last_sa_init_res_packet_data, ike, tlen);
+ }
+ else
+ {
+
+ ikev2_payload_chain_add_padding (chain, tr_encr->block_size);
+
+ /* SK payload */
+ plen = sizeof (*ph);
+ ph = (ike_payload_header_t *) & ike->payload[0];
+ ph->nextpayload = chain->first_payload_type;
+ ph->flags = 0;
+ int enc_len = ikev2_encrypt_data (sa, chain->data, ph->payload);
+ plen += enc_len;
+
+ /* add space for hmac */
+ plen += tr_integ->key_trunc;
+ tlen += plen;
+
+ /* payload and total length */
+ ph->length = clib_host_to_net_u16 (plen);
+ ike->length = clib_host_to_net_u32 (tlen);
+
+ /* calc integrity data for whole packet except hash itself */
+ integ = ikev2_calc_integr (tr_integ, sa->sk_ar, (u8 *) ike,
+ tlen - tr_integ->key_trunc);
+
+ clib_memcpy (ike->payload + tlen - tr_integ->key_trunc - sizeof (*ike),
+ integ, tr_integ->key_trunc);
+
+ /* store whole IKE payload - needed for retransmit */
+ vec_free (sa->last_res_packet_data);
+ vec_add (sa->last_res_packet_data, ike, tlen);
+ }
+
+done:
+ ikev2_payload_destroy_chain (chain);
+ vec_free (integ);
+ return tlen;
+}
+
+static int
+ikev2_retransmit_sa_init (ike_header_t * ike,
+ ip4_address_t iaddr, ip4_address_t raddr)
+{
+ ikev2_main_t *km = &ikev2_main;
+ ikev2_sa_t *sa;
+ u32 cpu_index = os_get_cpu_number ();
+
+ /* *INDENT-OFF* */
+ pool_foreach (sa, km->per_thread_data[cpu_index].sas, ({
+ if (sa->ispi == clib_net_to_host_u64(ike->ispi) &&
+ sa->iaddr.as_u32 == iaddr.as_u32 &&
+ sa->raddr.as_u32 == raddr.as_u32)
+ {
+ int p = 0;
+ u32 len = clib_net_to_host_u32(ike->length);
+ u8 payload = ike->nextpayload;
+
+ while (p < len && payload!= IKEV2_PAYLOAD_NONE) {
+ ike_payload_header_t * ikep = (ike_payload_header_t *) &ike->payload[p];
+ u32 plen = clib_net_to_host_u16(ikep->length);
+
+ if (plen < sizeof(ike_payload_header_t))
+ return -1;
+
+ if (payload == IKEV2_PAYLOAD_NONCE)
+ {
+ if (!memcmp(sa->i_nonce, ikep->payload, plen - sizeof(*ikep)))
+ {
+ /* req is retransmit */
+ if (sa->state == IKEV2_STATE_SA_INIT)
+ {
+ ike_header_t * tmp;
+ tmp = (ike_header_t*)sa->last_sa_init_res_packet_data;
+ ike->ispi = tmp->ispi;
+ ike->rspi = tmp->rspi;
+ ike->nextpayload = tmp->nextpayload;
+ ike->version = tmp->version;
+ ike->exchange = tmp->exchange;
+ ike->flags = tmp->flags;
+ ike->msgid = tmp->msgid;
+ ike->length = tmp->length;
+ clib_memcpy(ike->payload, tmp->payload,
+ clib_net_to_host_u32(tmp->length) - sizeof(*ike));
+ clib_warning("IKE_SA_INIT retransmit from %U to %U",
+ format_ip4_address, &raddr,
+ format_ip4_address, &iaddr);
+ return 1;
+ }
+ /* else ignore req */
+ else
+ {
+ clib_warning("IKE_SA_INIT ignore from %U to %U",
+ format_ip4_address, &raddr,
+ format_ip4_address, &iaddr);
+ return -1;
+ }
+ }
+ }
+ payload = ikep->nextpayload;
+ p+=plen;
+ }
+ }
+ }));
+ /* *INDENT-ON* */
+
+ /* req is not retransmit */
+ return 0;
+}
+
+static int
+ikev2_retransmit_resp (ikev2_sa_t * sa, ike_header_t * ike)
+{
+ u32 msg_id = clib_net_to_host_u32 (ike->msgid);
+
+ /* new req */
+ if (msg_id > sa->last_msg_id)
+ {
+ sa->last_msg_id = msg_id;
+ return 0;
+ }
+ /* retransmitted req */
+ else if (msg_id == sa->last_msg_id)
+ {
+ ike_header_t *tmp;
+ tmp = (ike_header_t *) sa->last_res_packet_data;
+ ike->ispi = tmp->ispi;
+ ike->rspi = tmp->rspi;
+ ike->nextpayload = tmp->nextpayload;
+ ike->version = tmp->version;
+ ike->exchange = tmp->exchange;
+ ike->flags = tmp->flags;
+ ike->msgid = tmp->msgid;
+ ike->length = tmp->length;
+ clib_memcpy (ike->payload, tmp->payload,
+ clib_net_to_host_u32 (tmp->length) - sizeof (*ike));
+ clib_warning ("IKE msgid %u retransmit from %U to %U",
+ msg_id,
+ format_ip4_address, &sa->raddr,
+ format_ip4_address, &sa->iaddr);
+ return 1;
+ }
+ /* old req ignore */
+ else
+ {
+ clib_warning ("IKE msgid %u req ignore from %U to %U",
+ msg_id,
+ format_ip4_address, &sa->raddr,
+ format_ip4_address, &sa->iaddr);
+ return -1;
+ }
+}
+
+static uword
+ikev2_node_fn (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ u32 n_left_from, *from, *to_next;
+ ikev2_next_t next_index;
+ ikev2_main_t *km = &ikev2_main;
+ u32 cpu_index = os_get_cpu_number ();
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 bi0;
+ vlib_buffer_t *b0;
+ u32 next0 = IKEV2_NEXT_ERROR_DROP;
+ u32 sw_if_index0;
+ ip4_header_t *ip40;
+ udp_header_t *udp0;
+ ike_header_t *ike0;
+ ikev2_sa_t *sa0 = 0;
+ ikev2_sa_t sa; /* temporary store for SA */
+ int len = 0;
+ int r;
+
+ /* speculatively enqueue b0 to the current next frame */
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ ike0 = vlib_buffer_get_current (b0);
+ vlib_buffer_advance (b0, -sizeof (*udp0));
+ udp0 = vlib_buffer_get_current (b0);
+ vlib_buffer_advance (b0, -sizeof (*ip40));
+ ip40 = vlib_buffer_get_current (b0);
+
+ if (ike0->version != IKE_VERSION_2)
+ {
+ vlib_node_increment_counter (vm, ikev2_node.index,
+ IKEV2_ERROR_NOT_IKEV2, 1);
+ goto dispatch0;
+ }
+
+ if (ike0->exchange == IKEV2_EXCHANGE_SA_INIT)
+ {
+ sa0 = &sa;
+ memset (sa0, 0, sizeof (*sa0));
+
+ if (ike0->rspi == 0)
+ {
+ sa0->raddr.as_u32 = ip40->dst_address.as_u32;
+ sa0->iaddr.as_u32 = ip40->src_address.as_u32;
+
+ r = ikev2_retransmit_sa_init (ike0, sa0->iaddr, sa0->raddr);
+ if (r == 1)
+ {
+ vlib_node_increment_counter (vm, ikev2_node.index,
+ IKEV2_ERROR_IKE_SA_INIT_RETRANSMIT,
+ 1);
+ len = clib_net_to_host_u32 (ike0->length);
+ goto dispatch0;
+ }
+ else if (r == -1)
+ {
+ vlib_node_increment_counter (vm, ikev2_node.index,
+ IKEV2_ERROR_IKE_SA_INIT_IGNORE,
+ 1);
+ goto dispatch0;
+ }
+
+ ikev2_process_sa_init_req (vm, sa0, ike0);
+
+ if (sa0->state == IKEV2_STATE_SA_INIT)
+ {
+ ikev2_sa_free_proposal_vector (&sa0->r_proposals);
+ sa0->r_proposals =
+ ikev2_select_proposal (sa0->i_proposals,
+ IKEV2_PROTOCOL_IKE);
+ ikev2_generate_sa_init_data (sa0);
+ }
+
+ if (sa0->state == IKEV2_STATE_SA_INIT ||
+ sa0->state == IKEV2_STATE_NOTIFY_AND_DELETE)
+ {
+ len = ikev2_generate_resp (sa0, ike0);
+ }
+
+ if (sa0->state == IKEV2_STATE_SA_INIT)
+ {
+ /* add SA to the pool */
+ pool_get (km->per_thread_data[cpu_index].sas, sa0);
+ clib_memcpy (sa0, &sa, sizeof (*sa0));
+ hash_set (km->per_thread_data[cpu_index].sa_by_rspi,
+ sa0->rspi,
+ sa0 - km->per_thread_data[cpu_index].sas);
+ }
+ else
+ {
+ ikev2_sa_free_all_vec (sa0);
+ }
+ }
+ }
+ else if (ike0->exchange == IKEV2_EXCHANGE_IKE_AUTH)
+ {
+ uword *p;
+ p = hash_get (km->per_thread_data[cpu_index].sa_by_rspi,
+ clib_net_to_host_u64 (ike0->rspi));
+ if (p)
+ {
+ sa0 = pool_elt_at_index (km->per_thread_data[cpu_index].sas,
+ p[0]);
+
+ r = ikev2_retransmit_resp (sa0, ike0);
+ if (r == 1)
+ {
+ vlib_node_increment_counter (vm, ikev2_node.index,
+ IKEV2_ERROR_IKE_REQ_RETRANSMIT,
+ 1);
+ len = clib_net_to_host_u32 (ike0->length);
+ goto dispatch0;
+ }
+ else if (r == -1)
+ {
+ vlib_node_increment_counter (vm, ikev2_node.index,
+ IKEV2_ERROR_IKE_REQ_IGNORE,
+ 1);
+ goto dispatch0;
+ }
+
+ ikev2_process_auth_req (vm, sa0, ike0);
+ ikev2_sa_auth (sa0);
+ if (sa0->state == IKEV2_STATE_AUTHENTICATED)
+ {
+ ikev2_initial_contact_cleanup (sa0);
+ ikev2_sa_match_ts (sa0);
+ if (sa0->state != IKEV2_STATE_TS_UNACCEPTABLE)
+ ikev2_create_tunnel_interface (km->vnet_main, sa0,
+ &sa0->childs[0]);
+ }
+ len = ikev2_generate_resp (sa0, ike0);
+ }
+ }
+ else if (ike0->exchange == IKEV2_EXCHANGE_INFORMATIONAL)
+ {
+ uword *p;
+ p = hash_get (km->per_thread_data[cpu_index].sa_by_rspi,
+ clib_net_to_host_u64 (ike0->rspi));
+ if (p)
+ {
+ sa0 = pool_elt_at_index (km->per_thread_data[cpu_index].sas,
+ p[0]);
+
+ r = ikev2_retransmit_resp (sa0, ike0);
+ if (r == 1)
+ {
+ vlib_node_increment_counter (vm, ikev2_node.index,
+ IKEV2_ERROR_IKE_REQ_RETRANSMIT,
+ 1);
+ len = clib_net_to_host_u32 (ike0->length);
+ goto dispatch0;
+ }
+ else if (r == -1)
+ {
+ vlib_node_increment_counter (vm, ikev2_node.index,
+ IKEV2_ERROR_IKE_REQ_IGNORE,
+ 1);
+ goto dispatch0;
+ }
+
+ ikev2_process_informational_req (vm, sa0, ike0);
+ if (sa0->del)
+ {
+ if (sa0->del[0].protocol_id != IKEV2_PROTOCOL_IKE)
+ {
+ ikev2_delete_t *d, *tmp, *resp = 0;
+ vec_foreach (d, sa0->del)
+ {
+ ikev2_child_sa_t *ch_sa;
+ ch_sa = ikev2_sa_get_child (sa0, d->spi,
+ d->protocol_id);
+ if (ch_sa)
+ {
+ ikev2_delete_tunnel_interface (km->vnet_main,
+ sa0, ch_sa);
+ vec_add2 (resp, tmp, 1);
+ tmp->protocol_id = d->protocol_id;
+ tmp->spi = ch_sa->r_proposals[0].spi;
+ ikev2_sa_del_child_sa (sa0, ch_sa);
+ }
+ }
+ vec_free (sa0->del);
+ sa0->del = resp;
+ }
+ }
+ len = ikev2_generate_resp (sa0, ike0);
+ }
+ }
+ else if (ike0->exchange == IKEV2_EXCHANGE_CREATE_CHILD_SA)
+ {
+ uword *p;
+ p = hash_get (km->per_thread_data[cpu_index].sa_by_rspi,
+ clib_net_to_host_u64 (ike0->rspi));
+ if (p)
+ {
+ sa0 = pool_elt_at_index (km->per_thread_data[cpu_index].sas,
+ p[0]);
+
+ r = ikev2_retransmit_resp (sa0, ike0);
+ if (r == 1)
+ {
+ vlib_node_increment_counter (vm, ikev2_node.index,
+ IKEV2_ERROR_IKE_REQ_RETRANSMIT,
+ 1);
+ len = clib_net_to_host_u32 (ike0->length);
+ goto dispatch0;
+ }
+ else if (r == -1)
+ {
+ vlib_node_increment_counter (vm, ikev2_node.index,
+ IKEV2_ERROR_IKE_REQ_IGNORE,
+ 1);
+ goto dispatch0;
+ }
+
+ ikev2_process_create_child_sa_req (vm, sa0, ike0);
+ if (sa0->rekey)
+ {
+ if (sa0->rekey[0].protocol_id != IKEV2_PROTOCOL_IKE)
+ {
+ ikev2_child_sa_t *child;
+ vec_add2 (sa0->childs, child, 1);
+ child->r_proposals = sa0->rekey[0].r_proposal;
+ child->i_proposals = sa0->rekey[0].i_proposal;
+ child->tsi = sa0->rekey[0].tsi;
+ child->tsr = sa0->rekey[0].tsr;
+ ikev2_create_tunnel_interface (km->vnet_main, sa0,
+ child);
+ }
+ len = ikev2_generate_resp (sa0, ike0);
+ }
+ }
+ }
+ else
+ {
+ clib_warning ("IKEv2 exchange %u packet received from %U to %U",
+ ike0->exchange,
+ format_ip4_address, ip40->src_address.as_u8,
+ format_ip4_address, ip40->dst_address.as_u8);
+ }
+
+ dispatch0:
+ /* if we are sending packet back, rewrite headers */
+ if (len)
+ {
+ next0 = IKEV2_NEXT_IP4_LOOKUP;
+ ip40->dst_address.as_u32 = sa0->iaddr.as_u32;
+ ip40->src_address.as_u32 = sa0->raddr.as_u32;
+ udp0->length =
+ clib_host_to_net_u16 (len + sizeof (udp_header_t));
+ udp0->checksum = 0;
+ b0->current_length =
+ len + sizeof (ip4_header_t) + sizeof (udp_header_t);
+ ip40->length = clib_host_to_net_u16 (b0->current_length);
+ ip40->checksum = ip4_header_checksum (ip40);
+ }
+ /* delete sa */
+ if (sa0 && (sa0->state == IKEV2_STATE_DELETED ||
+ sa0->state == IKEV2_STATE_NOTIFY_AND_DELETE))
+ {
+ ikev2_child_sa_t *c;
+
+ vec_foreach (c, sa0->childs)
+ ikev2_delete_tunnel_interface (km->vnet_main, sa0, c);
+
+ ikev2_delete_sa (sa0);
+ }
+ sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)
+ && (b0->flags & VLIB_BUFFER_IS_TRACED)))
+ {
+ ikev2_trace_t *t = vlib_add_trace (vm, node, b0, sizeof (*t));
+ t->sw_if_index = sw_if_index0;
+ t->next_index = next0;
+ }
+
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
+ n_left_to_next, bi0, next0);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ vlib_node_increment_counter (vm, ikev2_node.index,
+ IKEV2_ERROR_PROCESSED, frame->n_vectors);
+ return frame->n_vectors;
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (ikev2_node,static) = {
+ .function = ikev2_node_fn,
+ .name = "ikev2",
+ .vector_size = sizeof (u32),
+ .format_trace = format_ikev2_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_errors = ARRAY_LEN(ikev2_error_strings),
+ .error_strings = ikev2_error_strings,
+
+ .n_next_nodes = IKEV2_N_NEXT,
+
+ .next_nodes = {
+ [IKEV2_NEXT_IP4_LOOKUP] = "ip4-lookup",
+ [IKEV2_NEXT_ERROR_DROP] = "error-drop",
+ },
+};
+/* *INDENT-ON* */
+
+
+static ikev2_profile_t *
+ikev2_profile_index_by_name (u8 * name)
+{
+ ikev2_main_t *km = &ikev2_main;
+ uword *p;
+
+ p = mhash_get (&km->profile_index_by_name, name);
+ if (!p)
+ return 0;
+
+ return pool_elt_at_index (km->profiles, p[0]);
+}
+
+clib_error_t *
+ikev2_set_local_key (vlib_main_t * vm, u8 * file)
+{
+ ikev2_main_t *km = &ikev2_main;
+
+ km->pkey = ikev2_load_key_file (file);
+ if (km->pkey == NULL)
+ return clib_error_return (0, "load key '%s' failed", file);
+
+ return 0;
+}
+
+clib_error_t *
+ikev2_add_del_profile (vlib_main_t * vm, u8 * name, int is_add)
+{
+ ikev2_main_t *km = &ikev2_main;
+ ikev2_profile_t *p;
+
+ if (is_add)
+ {
+ if (ikev2_profile_index_by_name (name))
+ return clib_error_return (0, "policy %v already exists", name);
+
+ pool_get (km->profiles, p);
+ memset (p, 0, sizeof (*p));
+ p->name = vec_dup (name);
+ uword index = p - km->profiles;
+ mhash_set_mem (&km->profile_index_by_name, name, &index, 0);
+ }
+ else
+ {
+ p = ikev2_profile_index_by_name (name);
+ if (!p)
+ return clib_error_return (0, "policy %v does not exists", name);
+
+ vec_free (p->name);
+ pool_put (km->profiles, p);
+ mhash_unset (&km->profile_index_by_name, name, 0);
+ }
+ return 0;
+}
+
+clib_error_t *
+ikev2_set_profile_auth (vlib_main_t * vm, u8 * name, u8 auth_method,
+ u8 * auth_data, u8 data_hex_format)
+{
+ ikev2_profile_t *p;
+ clib_error_t *r;
+
+ p = ikev2_profile_index_by_name (name);
+
+ if (!p)
+ {
+ r = clib_error_return (0, "unknown profile %v", name);
+ return r;
+ }
+ vec_free (p->auth.data);
+ p->auth.method = auth_method;
+ p->auth.data = vec_dup (auth_data);
+ p->auth.hex = data_hex_format;
+
+ if (auth_method == IKEV2_AUTH_METHOD_RSA_SIG)
+ {
+ vec_add1 (p->auth.data, 0);
+ if (p->auth.key)
+ EVP_PKEY_free (p->auth.key);
+ p->auth.key = ikev2_load_cert_file (auth_data);
+ if (p->auth.key == NULL)
+ return clib_error_return (0, "load cert '%s' failed", auth_data);
+ }
+
+ return 0;
+}
+
+clib_error_t *
+ikev2_set_profile_id (vlib_main_t * vm, u8 * name, u8 id_type, u8 * data,
+ int is_local)
+{
+ ikev2_profile_t *p;
+ clib_error_t *r;
+
+ if (id_type > IKEV2_ID_TYPE_ID_RFC822_ADDR
+ && id_type < IKEV2_ID_TYPE_ID_KEY_ID)
+ {
+ r = clib_error_return (0, "unsupported identity type %U",
+ format_ikev2_id_type, id_type);
+ return r;
+ }
+
+ p = ikev2_profile_index_by_name (name);
+
+ if (!p)
+ {
+ r = clib_error_return (0, "unknown profile %v", name);
+ return r;
+ }
+
+ if (is_local)
+ {
+ vec_free (p->loc_id.data);
+ p->loc_id.type = id_type;
+ p->loc_id.data = vec_dup (data);
+ }
+ else
+ {
+ vec_free (p->rem_id.data);
+ p->rem_id.type = id_type;
+ p->rem_id.data = vec_dup (data);
+ }
+
+ return 0;
+}
+
+clib_error_t *
+ikev2_set_profile_ts (vlib_main_t * vm, u8 * name, u8 protocol_id,
+ u16 start_port, u16 end_port, ip4_address_t start_addr,
+ ip4_address_t end_addr, int is_local)
+{
+ ikev2_profile_t *p;
+ clib_error_t *r;
+
+ p = ikev2_profile_index_by_name (name);
+
+ if (!p)
+ {
+ r = clib_error_return (0, "unknown profile %v", name);
+ return r;
+ }
+
+ if (is_local)
+ {
+ p->loc_ts.start_addr.as_u32 = start_addr.as_u32;
+ p->loc_ts.end_addr.as_u32 = end_addr.as_u32;
+ p->loc_ts.start_port = start_port;
+ p->loc_ts.end_port = end_port;
+ p->loc_ts.protocol_id = protocol_id;
+ p->loc_ts.ts_type = 7;
+ }
+ else
+ {
+ p->rem_ts.start_addr.as_u32 = start_addr.as_u32;
+ p->rem_ts.end_addr.as_u32 = end_addr.as_u32;
+ p->rem_ts.start_port = start_port;
+ p->rem_ts.end_port = end_port;
+ p->rem_ts.protocol_id = protocol_id;
+ p->rem_ts.ts_type = 7;
+ }
+
+ return 0;
+}
+
+
+clib_error_t *
+ikev2_init (vlib_main_t * vm)
+{
+ ikev2_main_t *km = &ikev2_main;
+ clib_error_t *error;
+ vlib_thread_main_t *tm = vlib_get_thread_main ();
+ int thread_id;
+
+ memset (km, 0, sizeof (ikev2_main_t));
+ km->vnet_main = vnet_get_main ();
+ km->vlib_main = vm;
+
+ ikev2_crypto_init (km);
+
+ mhash_init_vec_string (&km->profile_index_by_name, sizeof (uword));
+
+ vec_validate (km->per_thread_data, tm->n_vlib_mains - 1);
+ for (thread_id = 0; thread_id < tm->n_vlib_mains - 1; thread_id++)
+ {
+ km->per_thread_data[thread_id].sa_by_rspi =
+ hash_create (0, sizeof (uword));
+ }
+
+ if ((error = vlib_call_init_function (vm, ikev2_cli_init)))
+ return error;
+
+ udp_register_dst_port (vm, 500, ikev2_node.index, 1);
+
+ return 0;
+}
+
+
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/ipsec/ikev2.h b/src/vnet/ipsec/ikev2.h
new file mode 100644
index 00000000000..723fdde8c1c
--- /dev/null
+++ b/src/vnet/ipsec/ikev2.h
@@ -0,0 +1,410 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef __included_ikev2_h__
+#define __included_ikev2_h__
+
+#include <vnet/vnet.h>
+#include <vnet/ip/ip.h>
+
+#include <vppinfra/error.h>
+
+#define IKEV2_NONCE_SIZE 32
+
+#define IKEV2_KEY_PAD "Key Pad for IKEv2"
+
+typedef u8 v8;
+
+/* *INDENT-OFF* */
+typedef CLIB_PACKED (struct {
+ u64 ispi;
+ u64 rspi;
+ u8 nextpayload;
+ u8 version;
+ u8 exchange;
+ u8 flags;
+ u32 msgid; u32 length; u8 payload[0];
+}) ike_header_t;
+/* *INDENT-ON* */
+
+/* *INDENT-OFF* */
+typedef CLIB_PACKED (struct
+ {
+ u8 nextpayload;
+ u8 flags;
+ u16 length;
+ u16 dh_group;
+ u8 reserved[2]; u8 payload[0];}) ike_ke_payload_header_t;
+/* *INDENT-ON* */
+
+/* *INDENT-OFF* */
+typedef CLIB_PACKED (struct {
+ u8 nextpayload;
+ u8 flags;
+ u16 length; u8 payload[0];
+}) ike_payload_header_t;
+/* *INDENT-ON* */
+
+/* *INDENT-OFF* */
+typedef CLIB_PACKED (struct {
+ u8 nextpayload;
+ u8 flags;
+ u16 length;
+ u8 auth_method;
+ u8 reserved[3];
+ u8 payload[0];
+}) ike_auth_payload_header_t;
+/* *INDENT-ON* */
+
+/* *INDENT-OFF* */
+typedef CLIB_PACKED (struct {
+ u8 nextpayload;
+ u8 flags;
+ u16 length;
+ u8 id_type;
+ u8 reserved[3]; u8 payload[0];
+}) ike_id_payload_header_t;
+/* *INDENT-ON* */
+
+#define IKE_VERSION_2 0x20
+
+#define IKEV2_EXCHANGE_SA_INIT 34
+#define IKEV2_EXCHANGE_IKE_AUTH 35
+#define IKEV2_EXCHANGE_CREATE_CHILD_SA 36
+#define IKEV2_EXCHANGE_INFORMATIONAL 37
+
+#define IKEV2_HDR_FLAG_INITIATOR (1<<3)
+#define IKEV2_HDR_FLAG_VERSION (1<<4)
+#define IKEV2_HDR_FLAG_RESPONSE (1<<5)
+
+#define IKEV2_PAYLOAD_FLAG_CRITICAL (1<<7)
+
+#define IKEV2_PAYLOAD_NONE 0
+#define IKEV2_PAYLOAD_SA 33
+#define IKEV2_PAYLOAD_KE 34
+#define IKEV2_PAYLOAD_IDI 35
+#define IKEV2_PAYLOAD_IDR 36
+#define IKEV2_PAYLOAD_AUTH 39
+#define IKEV2_PAYLOAD_NONCE 40
+#define IKEV2_PAYLOAD_NOTIFY 41
+#define IKEV2_PAYLOAD_DELETE 42
+#define IKEV2_PAYLOAD_VENDOR 43
+#define IKEV2_PAYLOAD_TSI 44
+#define IKEV2_PAYLOAD_TSR 45
+#define IKEV2_PAYLOAD_SK 46
+
+typedef enum
+{
+ IKEV2_PROTOCOL_IKE = 1,
+ IKEV2_PROTOCOL_AH = 2,
+ IKEV2_PROTOCOL_ESP = 3,
+} ikev2_protocol_id_t;
+
+#define foreach_ikev2_notify_msg_type \
+ _( 0, NONE) \
+ _( 1, UNSUPPORTED_CRITICAL_PAYLOAD) \
+ _( 4, INVALID_IKE_SPI) \
+ _( 5, INVALID_MAJOR_VERSION) \
+ _( 7, INVALID_SYNTAX) \
+ _( 8, INVALID_MESSAGE_ID) \
+ _( 11, INVALID_SPI) \
+ _( 14, NO_PROPOSAL_CHOSEN) \
+ _( 17, INVALID_KE_PAYLOAD) \
+ _( 24, AUTHENTICATION_FAILED) \
+ _( 34, SINGLE_PAIR_REQUIRED) \
+ _( 35, NO_ADDITIONAL_SAS) \
+ _( 36, INTERNAL_ADDRESS_FAILURE) \
+ _( 37, FAILED_CP_REQUIRED) \
+ _( 38, TS_UNACCEPTABLE) \
+ _( 39, INVALID_SELECTORS) \
+ _( 40, UNACCEPTABLE_ADDRESSES) \
+ _( 41, UNEXPECTED_NAT_DETECTED) \
+ _( 42, USE_ASSIGNED_HoA) \
+ _( 43, TEMPORARY_FAILURE) \
+ _( 44, CHILD_SA_NOT_FOUND) \
+ _( 45, INVALID_GROUP_ID) \
+ _( 46, AUTHORIZATION_FAILED) \
+ _(16384, INITIAL_CONTACT) \
+ _(16385, SET_WINDOW_SIZE) \
+ _(16386, ADDITIONAL_TS_POSSIBLE) \
+ _(16387, IPCOMP_SUPPORTED) \
+ _(16388, NAT_DETECTION_SOURCE_IP) \
+ _(16389, NAT_DETECTION_DESTINATION_IP) \
+ _(16390, COOKIE) \
+ _(16391, USE_TRANSPORT_MODE) \
+ _(16392, HTTP_CERT_LOOKUP_SUPPORTED) \
+ _(16393, REKEY_SA) \
+ _(16394, ESP_TFC_PADDING_NOT_SUPPORTED) \
+ _(16395, NON_FIRST_FRAGMENTS_ALSO) \
+ _(16396, MOBIKE_SUPPORTED) \
+ _(16397, ADDITIONAL_IP4_ADDRESS) \
+ _(16398, ADDITIONAL_IP6_ADDRESS) \
+ _(16399, NO_ADDITIONAL_ADDRESSES) \
+ _(16400, UPDATE_SA_ADDRESSES) \
+ _(16401, COOKIE2) \
+ _(16402, NO_NATS_ALLOWED) \
+ _(16403, AUTH_LIFETIME) \
+ _(16404, MULTIPLE_AUTH_SUPPORTED) \
+ _(16405, ANOTHER_AUTH_FOLLOWS) \
+ _(16406, REDIRECT_SUPPORTED) \
+ _(16407, REDIRECT) \
+ _(16408, REDIRECTED_FROM) \
+ _(16409, TICKET_LT_OPAQUE) \
+ _(16410, TICKET_REQUEST) \
+ _(16411, TICKET_ACK) \
+ _(16412, TICKET_NACK) \
+ _(16413, TICKET_OPAQUE) \
+ _(16414, LINK_ID) \
+ _(16415, USE_WESP_MODE) \
+ _(16416, ROHC_SUPPORTED) \
+ _(16417, EAP_ONLY_AUTHENTICATION) \
+ _(16418, CHILDLESS_IKEV2_SUPPORTED) \
+ _(16419, QUICK_CRASH_DETECTION) \
+ _(16420, IKEV2_MESSAGE_ID_SYNC_SUPPORTED) \
+ _(16421, IPSEC_REPLAY_COUNTER_SYNC_SUPPORTED) \
+ _(16422, IKEV2_MESSAGE_ID_SYNC) \
+ _(16423, IPSEC_REPLAY_COUNTER_SYNC) \
+ _(16424, SECURE_PASSWORD_METHODS) \
+ _(16425, PSK_PERSIST) \
+ _(16426, PSK_CONFIRM) \
+ _(16427, ERX_SUPPORTED) \
+ _(16428, IFOM_CAPABILITY) \
+ _(16429, SENDER_REQUEST_ID) \
+ _(16430, IKEV2_FRAGMENTATION_SUPPORTED) \
+ _(16431, SIGNATURE_HASH_ALGORITHMS)
+
+
+typedef enum
+{
+#define _(v,f) IKEV2_NOTIFY_MSG_##f = v,
+ foreach_ikev2_notify_msg_type
+#undef _
+} ikev2_notify_msg_type_t;
+
+#define foreach_ikev2_transform_type \
+ _(0, UNDEFINED, "undefinded") \
+ _(1, ENCR, "encr") \
+ _(2, PRF, "prf") \
+ _(3, INTEG, "integ") \
+ _(4, DH, "dh-group") \
+ _(5, ESN, "esn")
+
+typedef enum
+{
+#define _(v,f,s) IKEV2_TRANSFORM_TYPE_##f = v,
+ foreach_ikev2_transform_type
+#undef _
+ IKEV2_TRANSFORM_NUM_TYPES
+} ikev2_transform_type_t;
+
+
+#define foreach_ikev2_transform_encr_type \
+ _(1 , DES_IV64, "des-iv64") \
+ _(2 , DES, "des") \
+ _(3 , 3DES, "3des") \
+ _(4 , RC5, "rc5") \
+ _(5 , IDEA, "idea") \
+ _(6 , CAST, "cast") \
+ _(7 , BLOWFISH, "blowfish") \
+ _(8 , 3IDEA, "3idea") \
+ _(9 , DES_IV32, "des-iv32") \
+ _(11, NULL, "null") \
+ _(12, AES_CBC, "aes-cbc") \
+ _(13, AES_CTR, "aes-ctr")
+
+typedef enum
+{
+#define _(v,f,str) IKEV2_TRANSFORM_ENCR_TYPE_##f = v,
+ foreach_ikev2_transform_encr_type
+#undef _
+} ikev2_transform_encr_type_t;
+
+#define foreach_ikev2_transform_prf_type \
+ _(1, PRF_HMAC_MD5, "hmac-md5") \
+ _(2, PRF_HMAC_SHA1, "hmac-sha1") \
+ _(3, PRF_MAC_TIGER, "mac-tiger") \
+ _(4, PRF_AES128_XCBC, "aes128-xcbc") \
+ _(5, PRF_HMAC_SHA2_256, "hmac-sha2-256") \
+ _(6, PRF_HMAC_SHA2_384, "hmac-sha2-384") \
+ _(7, PRF_HMAC_SHA2_512, "hmac-sha2-512") \
+ _(8, PRF_AES128_CMAC, "aes128-cmac")
+
+typedef enum
+{
+#define _(v,f,str) IKEV2_TRANSFORM_PRF_TYPE_##f = v,
+ foreach_ikev2_transform_prf_type
+#undef _
+} ikev2_transform_prf_type_t;
+
+#define foreach_ikev2_transform_integ_type \
+ _(0, NONE, "none") \
+ _(1, AUTH_HMAC_MD5_96, "md5-96") \
+ _(2, AUTH_HMAC_SHA1_96, "sha1-96") \
+ _(3, AUTH_DES_MAC, "des-mac") \
+ _(4, AUTH_KPDK_MD5, "kpdk-md5") \
+ _(5, AUTH_AES_XCBC_96, "aes-xcbc-96") \
+ _(6, AUTH_HMAC_MD5_128, "md5-128") \
+ _(7, AUTH_HMAC_SHA1_160, "sha1-160") \
+ _(8, AUTH_AES_CMAC_96, "cmac-96") \
+ _(9, AUTH_AES_128_GMAC, "aes-128-gmac") \
+ _(10, AUTH_AES_192_GMAC, "aes-192-gmac") \
+ _(11, AUTH_AES_256_GMAC, "aes-256-gmac") \
+ _(12, AUTH_HMAC_SHA2_256_128, "hmac-sha2-256-128") \
+ _(13, AUTH_HMAC_SHA2_384_192, "hmac-sha2-384-192") \
+ _(14, AUTH_HMAC_SHA2_512_256, "hmac-sha2-512-256")
+
+typedef enum
+{
+#define _(v,f, str) IKEV2_TRANSFORM_INTEG_TYPE_##f = v,
+ foreach_ikev2_transform_integ_type
+#undef _
+} ikev2_transform_integ_type_t;
+
+#if defined(OPENSSL_NO_CISCO_FECDH)
+#define foreach_ikev2_transform_dh_type \
+ _(0, NONE, "none") \
+ _(1, MODP_768, "modp-768") \
+ _(2, MODP_1024, "modp-1024") \
+ _(5, MODP_1536, "modp-1536") \
+ _(14, MODP_2048, "modp-2048") \
+ _(15, MODP_3072, "modp-3072") \
+ _(16, MODP_4096, "modp-4096") \
+ _(17, MODP_6144, "modp-6144") \
+ _(18, MODP_8192, "modp-8192") \
+ _(19, ECP_256, "ecp-256") \
+ _(20, ECP_384, "ecp-384") \
+ _(21, ECP_521, "ecp-521") \
+ _(22, MODP_1024_160, "modp-1024-160") \
+ _(23, MODP_2048_224, "modp-2048-224") \
+ _(24, MODP_2048_256, "modp-2048-256") \
+ _(25, ECP_192, "ecp-192") \
+ _(26, ECP_224, "ecp-224") \
+ _(27, BRAINPOOL_224, "brainpool-224") \
+ _(28, BRAINPOOL_256, "brainpool-256") \
+ _(29, BRAINPOOL_384, "brainpool-384") \
+ _(30, BRAINPOOL_512, "brainpool-512")
+#else
+#define foreach_ikev2_transform_dh_type \
+ _(0, NONE, "none") \
+ _(1, MODP_768, "modp-768") \
+ _(2, MODP_1024, "modp-1024") \
+ _(5, MODP_1536, "modp-1536") \
+ _(14, MODP_2048, "modp-2048") \
+ _(15, MODP_3072, "modp-3072") \
+ _(16, MODP_4096, "modp-4096") \
+ _(17, MODP_6144, "modp-6144") \
+ _(18, MODP_8192, "modp-8192") \
+ _(19, ECP_256, "ecp-256") \
+ _(20, ECP_384, "ecp-384") \
+ _(21, ECP_521, "ecp-521") \
+ _(22, MODP_1024_160, "modp-1024-160") \
+ _(23, MODP_2048_224, "modp-2048-224") \
+ _(24, MODP_2048_256, "modp-2048-256") \
+ _(25, ECP_192, "ecp-192")
+#endif
+
+typedef enum
+{
+#define _(v,f, str) IKEV2_TRANSFORM_DH_TYPE_##f = v,
+ foreach_ikev2_transform_dh_type
+#undef _
+} ikev2_transform_dh_type_t;
+
+#define foreach_ikev2_transform_esn_type \
+ _(0, NO_ESN, "no") \
+ _(1, ESN, "yes")
+
+typedef enum
+{
+#define _(v,f,str) IKEV2_TRANSFORM_ESN_TYPE_##f = v,
+ foreach_ikev2_transform_esn_type
+#undef _
+} ikev2_transform_esn_type_t;
+
+#define foreach_ikev2_auth_method \
+ _( 1, RSA_SIG, "rsa-sig") \
+ _( 2, SHARED_KEY_MIC, "shared-key-mic")
+
+typedef enum
+{
+#define _(v,f,s) IKEV2_AUTH_METHOD_##f = v,
+ foreach_ikev2_auth_method
+#undef _
+} ikev2_auth_method_t;
+
+#define foreach_ikev2_id_type \
+ _( 1, ID_IPV4_ADDR, "ip4-addr") \
+ _( 2, ID_FQDN, "fqdn") \
+ _( 3, ID_RFC822_ADDR, "rfc822") \
+ _( 5, ID_IPV6_ADDR, "ip6-addr") \
+ _( 9, ID_DER_ASN1_DN, "der-asn1-dn") \
+ _(10, ID_DER_ASN1_GN, "der-asn1-gn") \
+ _(11, ID_KEY_ID, "key-id")
+
+typedef enum
+{
+#define _(v,f,s) IKEV2_ID_TYPE_##f = v,
+ foreach_ikev2_id_type
+#undef _
+} ikev2_id_type_t;
+
+clib_error_t *ikev2_init (vlib_main_t * vm);
+clib_error_t *ikev2_set_local_key (vlib_main_t * vm, u8 * file);
+clib_error_t *ikev2_add_del_profile (vlib_main_t * vm, u8 * name, int is_add);
+clib_error_t *ikev2_set_profile_auth (vlib_main_t * vm, u8 * name,
+ u8 auth_method, u8 * data,
+ u8 data_hex_format);
+clib_error_t *ikev2_set_profile_id (vlib_main_t * vm, u8 * name,
+ u8 id_type, u8 * data, int is_local);
+clib_error_t *ikev2_set_profile_ts (vlib_main_t * vm, u8 * name,
+ u8 protocol_id, u16 start_port,
+ u16 end_port, ip4_address_t start_addr,
+ ip4_address_t end_addr, int is_local);
+/* ikev2_format.c */
+u8 *format_ikev2_auth_method (u8 * s, va_list * args);
+u8 *format_ikev2_id_type (u8 * s, va_list * args);
+u8 *format_ikev2_transform_type (u8 * s, va_list * args);
+u8 *format_ikev2_notify_msg_type (u8 * s, va_list * args);
+u8 *format_ikev2_transform_encr_type (u8 * s, va_list * args);
+u8 *format_ikev2_transform_prf_type (u8 * s, va_list * args);
+u8 *format_ikev2_transform_integ_type (u8 * s, va_list * args);
+u8 *format_ikev2_transform_dh_type (u8 * s, va_list * args);
+u8 *format_ikev2_transform_esn_type (u8 * s, va_list * args);
+u8 *format_ikev2_sa_transform (u8 * s, va_list * args);
+
+uword unformat_ikev2_auth_method (unformat_input_t * input, va_list * args);
+uword unformat_ikev2_id_type (unformat_input_t * input, va_list * args);
+uword unformat_ikev2_transform_type (unformat_input_t * input,
+ va_list * args);
+uword unformat_ikev2_transform_encr_type (unformat_input_t * input,
+ va_list * args);
+uword unformat_ikev2_transform_prf_type (unformat_input_t * input,
+ va_list * args);
+uword unformat_ikev2_transform_integ_type (unformat_input_t * input,
+ va_list * args);
+uword unformat_ikev2_transform_dh_type (unformat_input_t * input,
+ va_list * args);
+uword unformat_ikev2_transform_esn_type (unformat_input_t * input,
+ va_list * args);
+
+#endif /* __included_ikev2_h__ */
+
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/ipsec/ikev2_cli.c b/src/vnet/ipsec/ikev2_cli.c
new file mode 100644
index 00000000000..1369c187775
--- /dev/null
+++ b/src/vnet/ipsec/ikev2_cli.c
@@ -0,0 +1,479 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+#include <vnet/pg/pg.h>
+#include <vppinfra/error.h>
+#include <vnet/ip/udp.h>
+#include <vnet/ipsec/ikev2.h>
+#include <vnet/ipsec/ikev2_priv.h>
+
+u8 *
+format_ikev2_id_type_and_data (u8 * s, va_list * args)
+{
+ ikev2_id_t *id = va_arg (*args, ikev2_id_t *);
+
+ if (id->type == 0 || vec_len (id->data) == 0)
+ return format (s, "none");
+
+ s = format (s, "%U", format_ikev2_id_type, id->type);
+
+ if (id->type == IKEV2_ID_TYPE_ID_FQDN ||
+ id->type == IKEV2_ID_TYPE_ID_RFC822_ADDR)
+ {
+ s = format (s, " %v", id->data);
+ }
+ else
+ {
+ s =
+ format (s, " %U", format_hex_bytes, &id->data,
+ (uword) (vec_len (id->data)));
+ }
+
+ return s;
+}
+
+
+static clib_error_t *
+show_ikev2_sa_command_fn (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ ikev2_main_t *km = &ikev2_main;
+ ikev2_main_per_thread_data_t *tkm;
+ ikev2_sa_t *sa;
+ ikev2_ts_t *ts;
+ ikev2_child_sa_t *child;
+ ikev2_sa_transform_t *tr;
+
+ vec_foreach (tkm, km->per_thread_data)
+ {
+ /* *INDENT-OFF* */
+ pool_foreach (sa, tkm->sas, ({
+ u8 * s = 0;
+ vlib_cli_output(vm, " iip %U ispi %lx rip %U rspi %lx",
+ format_ip4_address, &sa->iaddr, sa->ispi,
+ format_ip4_address, &sa->raddr, sa->rspi);
+
+ tr = ikev2_sa_get_td_for_type(sa->r_proposals, IKEV2_TRANSFORM_TYPE_ENCR);
+ s = format(s, "%U ", format_ikev2_sa_transform, tr);
+
+ tr = ikev2_sa_get_td_for_type(sa->r_proposals, IKEV2_TRANSFORM_TYPE_PRF);
+ s = format(s, "%U ", format_ikev2_sa_transform, tr);
+
+ tr = ikev2_sa_get_td_for_type(sa->r_proposals, IKEV2_TRANSFORM_TYPE_INTEG);
+ s = format(s, "%U ", format_ikev2_sa_transform, tr);
+
+ tr = ikev2_sa_get_td_for_type(sa->r_proposals, IKEV2_TRANSFORM_TYPE_DH);
+ s = format(s, "%U ", format_ikev2_sa_transform, tr);
+
+ vlib_cli_output(vm, " %v", s);
+ vec_free(s);
+
+ vlib_cli_output(vm, " nonce i:%U\n r:%U",
+ format_hex_bytes, sa->i_nonce, vec_len(sa->i_nonce),
+ format_hex_bytes, sa->r_nonce, vec_len(sa->r_nonce));
+
+ vlib_cli_output(vm, " SK_d %U",
+ format_hex_bytes, sa->sk_d, vec_len(sa->sk_d));
+ vlib_cli_output(vm, " SK_a i:%U\n r:%U",
+ format_hex_bytes, sa->sk_ai, vec_len(sa->sk_ai),
+ format_hex_bytes, sa->sk_ar, vec_len(sa->sk_ar));
+ vlib_cli_output(vm, " SK_e i:%U\n r:%U",
+ format_hex_bytes, sa->sk_ei, vec_len(sa->sk_ei),
+ format_hex_bytes, sa->sk_er, vec_len(sa->sk_er));
+ vlib_cli_output(vm, " SK_p i:%U\n r:%U",
+ format_hex_bytes, sa->sk_pi, vec_len(sa->sk_pi),
+ format_hex_bytes, sa->sk_pr, vec_len(sa->sk_pr));
+
+ vlib_cli_output(vm, " identifier (i) %U",
+ format_ikev2_id_type_and_data, &sa->i_id);
+ vlib_cli_output(vm, " identifier (r) %U",
+ format_ikev2_id_type_and_data, &sa->r_id);
+
+ vec_foreach(child, sa->childs)
+ {
+ vlib_cli_output(vm, " child sa %u:", child - sa->childs);
+
+ tr = ikev2_sa_get_td_for_type(child->r_proposals, IKEV2_TRANSFORM_TYPE_ENCR);
+ s = format(s, "%U ", format_ikev2_sa_transform, tr);
+
+ tr = ikev2_sa_get_td_for_type(child->r_proposals, IKEV2_TRANSFORM_TYPE_INTEG);
+ s = format(s, "%U ", format_ikev2_sa_transform, tr);
+
+ tr = ikev2_sa_get_td_for_type(child->r_proposals, IKEV2_TRANSFORM_TYPE_ESN);
+ s = format(s, "%U ", format_ikev2_sa_transform, tr);
+
+ vlib_cli_output(vm, " %v", s);
+ vec_free(s);
+
+ vlib_cli_output(vm, " spi(i) %lx spi(r) %lx",
+ child->i_proposals ? child->i_proposals[0].spi : 0,
+ child->r_proposals ? child->r_proposals[0].spi : 0);
+
+ vlib_cli_output(vm, " SK_e i:%U\n r:%U",
+ format_hex_bytes, child->sk_ei, vec_len(child->sk_ei),
+ format_hex_bytes, child->sk_er, vec_len(child->sk_er));
+ vlib_cli_output(vm, " SK_a i:%U\n r:%U",
+ format_hex_bytes, child->sk_ai, vec_len(child->sk_ai),
+ format_hex_bytes, child->sk_ar, vec_len(child->sk_ar));
+ vlib_cli_output(vm, " traffic selectors (i):");
+ vec_foreach(ts, child->tsi)
+ {
+ vlib_cli_output(vm, " %u type %u protocol_id %u addr "
+ "%U - %U port %u - %u",
+ ts - child->tsi,
+ ts->ts_type, ts->protocol_id,
+ format_ip4_address, &ts->start_addr,
+ format_ip4_address, &ts->end_addr,
+ clib_net_to_host_u16( ts->start_port),
+ clib_net_to_host_u16( ts->end_port));
+ }
+ vlib_cli_output(vm, " traffic selectors (r):");
+ vec_foreach(ts, child->tsr)
+ {
+ vlib_cli_output(vm, " %u type %u protocol_id %u addr "
+ "%U - %U port %u - %u",
+ ts - child->tsr,
+ ts->ts_type, ts->protocol_id,
+ format_ip4_address, &ts->start_addr,
+ format_ip4_address, &ts->end_addr,
+ clib_net_to_host_u16( ts->start_port),
+ clib_net_to_host_u16( ts->end_port));
+ }
+ }
+ vlib_cli_output(vm, "");
+ }));
+ /* *INDENT-ON* */
+ }
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (show_ikev2_sa_command, static) = {
+ .path = "show ikev2 sa",
+ .short_help = "show ikev2 sa",
+ .function = show_ikev2_sa_command_fn,
+};
+/* *INDENT-ON* */
+
+static clib_error_t *
+ikev2_profile_add_del_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ unformat_input_t _line_input, *line_input = &_line_input;
+ u8 *name = 0;
+ clib_error_t *r = 0;
+ u32 id_type;
+ u8 *data = 0;
+ u32 tmp1, tmp2, tmp3;
+ ip4_address_t ip4;
+ ip4_address_t end_addr;
+
+ const char *valid_chars = "a-zA-Z0-9_";
+
+ if (!unformat_user (input, unformat_line_input, line_input))
+ return 0;
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "add %U", unformat_token, valid_chars, &name))
+ {
+ r = ikev2_add_del_profile (vm, name, 1);
+ goto done;
+ }
+ else
+ if (unformat
+ (line_input, "del %U", unformat_token, valid_chars, &name))
+ {
+ r = ikev2_add_del_profile (vm, name, 0);
+ goto done;
+ }
+ else if (unformat (line_input, "set %U auth shared-key-mic string %v",
+ unformat_token, valid_chars, &name, &data))
+ {
+ r =
+ ikev2_set_profile_auth (vm, name,
+ IKEV2_AUTH_METHOD_SHARED_KEY_MIC, data,
+ 0);
+ goto done;
+ }
+ else if (unformat (line_input, "set %U auth shared-key-mic hex %U",
+ unformat_token, valid_chars, &name,
+ unformat_hex_string, &data))
+ {
+ r =
+ ikev2_set_profile_auth (vm, name,
+ IKEV2_AUTH_METHOD_SHARED_KEY_MIC, data,
+ 1);
+ goto done;
+ }
+ else if (unformat (line_input, "set %U auth rsa-sig cert-file %v",
+ unformat_token, valid_chars, &name, &data))
+ {
+ r =
+ ikev2_set_profile_auth (vm, name, IKEV2_AUTH_METHOD_RSA_SIG, data,
+ 0);
+ goto done;
+ }
+ else if (unformat (line_input, "set %U id local %U %U",
+ unformat_token, valid_chars, &name,
+ unformat_ikev2_id_type, &id_type,
+ unformat_ip4_address, &ip4))
+ {
+ data = vec_new (u8, 4);
+ clib_memcpy (data, ip4.as_u8, 4);
+ r =
+ ikev2_set_profile_id (vm, name, (u8) id_type, data, /*local */ 1);
+ goto done;
+ }
+ else if (unformat (line_input, "set %U id local %U 0x%U",
+ unformat_token, valid_chars, &name,
+ unformat_ikev2_id_type, &id_type,
+ unformat_hex_string, &data))
+ {
+ r =
+ ikev2_set_profile_id (vm, name, (u8) id_type, data, /*local */ 1);
+ goto done;
+ }
+ else if (unformat (line_input, "set %U id local %U %v",
+ unformat_token, valid_chars, &name,
+ unformat_ikev2_id_type, &id_type, &data))
+ {
+ r =
+ ikev2_set_profile_id (vm, name, (u8) id_type, data, /*local */ 1);
+ goto done;
+ }
+ else if (unformat (line_input, "set %U id remote %U %U",
+ unformat_token, valid_chars, &name,
+ unformat_ikev2_id_type, &id_type,
+ unformat_ip4_address, &ip4))
+ {
+ data = vec_new (u8, 4);
+ clib_memcpy (data, ip4.as_u8, 4);
+ r = ikev2_set_profile_id (vm, name, (u8) id_type, data, /*remote */
+ 0);
+ goto done;
+ }
+ else if (unformat (line_input, "set %U id remote %U 0x%U",
+ unformat_token, valid_chars, &name,
+ unformat_ikev2_id_type, &id_type,
+ unformat_hex_string, &data))
+ {
+ r = ikev2_set_profile_id (vm, name, (u8) id_type, data, /*remote */
+ 0);
+ goto done;
+ }
+ else if (unformat (line_input, "set %U id remote %U %v",
+ unformat_token, valid_chars, &name,
+ unformat_ikev2_id_type, &id_type, &data))
+ {
+ r = ikev2_set_profile_id (vm, name, (u8) id_type, data, /*remote */
+ 0);
+ goto done;
+ }
+ else if (unformat (line_input, "set %U traffic-selector local "
+ "ip-range %U - %U port-range %u - %u protocol %u",
+ unformat_token, valid_chars, &name,
+ unformat_ip4_address, &ip4,
+ unformat_ip4_address, &end_addr,
+ &tmp1, &tmp2, &tmp3))
+ {
+ r =
+ ikev2_set_profile_ts (vm, name, (u8) tmp3, (u16) tmp1, (u16) tmp2,
+ ip4, end_addr, /*local */ 1);
+ goto done;
+ }
+ else if (unformat (line_input, "set %U traffic-selector remote "
+ "ip-range %U - %U port-range %u - %u protocol %u",
+ unformat_token, valid_chars, &name,
+ unformat_ip4_address, &ip4,
+ unformat_ip4_address, &end_addr,
+ &tmp1, &tmp2, &tmp3))
+ {
+ r =
+ ikev2_set_profile_ts (vm, name, (u8) tmp3, (u16) tmp1, (u16) tmp2,
+ ip4, end_addr, /*remote */ 0);
+ goto done;
+ }
+ else
+ break;
+ }
+
+ r = clib_error_return (0, "parse error: '%U'",
+ format_unformat_error, line_input);
+
+done:
+ vec_free (name);
+ vec_free (data);
+ unformat_free (line_input);
+ return r;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (ikev2_profile_add_del_command, static) = {
+ .path = "ikev2 profile",
+ .short_help =
+ "ikev2 profile [add|del] <id>\n"
+ "ikev2 profile set <id> auth [rsa-sig|shared-key-mic] [cert-file|string|hex]"
+ " <data>\n"
+ "ikev2 profile set <id> id <local|remote> <type> <data>\n"
+ "ikev2 profile set <id> traffic-selector <local|remote> ip-range "
+ "<start-addr> - <end-addr> port-range <start-port> - <end-port> "
+ "protocol <protocol-number>",
+ .function = ikev2_profile_add_del_command_fn,
+};
+/* *INDENT-ON* */
+
+static clib_error_t *
+show_ikev2_profile_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ ikev2_main_t *km = &ikev2_main;
+ ikev2_profile_t *p;
+
+ /* *INDENT-OFF* */
+ pool_foreach (p, km->profiles, ({
+ vlib_cli_output(vm, "profile %v", p->name);
+
+ if (p->auth.data)
+ {
+ if (p->auth.hex)
+ vlib_cli_output(vm, " auth-method %U auth data 0x%U",
+ format_ikev2_auth_method, p->auth.method,
+ format_hex_bytes, p->auth.data, vec_len(p->auth.data));
+ else
+ vlib_cli_output(vm, " auth-method %U auth data %v",
+ format_ikev2_auth_method, p->auth.method, p->auth.data);
+ }
+
+ if (p->loc_id.data)
+ {
+ if (p->loc_id.type == IKEV2_ID_TYPE_ID_IPV4_ADDR)
+ vlib_cli_output(vm, " local id-type %U data %U",
+ format_ikev2_id_type, p->loc_id.type,
+ format_ip4_address, p->loc_id.data);
+ else if (p->loc_id.type == IKEV2_ID_TYPE_ID_KEY_ID)
+ vlib_cli_output(vm, " local id-type %U data 0x%U",
+ format_ikev2_id_type, p->loc_id.type,
+ format_hex_bytes, p->loc_id.data,
+ vec_len(p->loc_id.data));
+ else
+ vlib_cli_output(vm, " local id-type %U data %v",
+ format_ikev2_id_type, p->loc_id.type, p->loc_id.data);
+ }
+
+ if (p->rem_id.data)
+ {
+ if (p->rem_id.type == IKEV2_ID_TYPE_ID_IPV4_ADDR)
+ vlib_cli_output(vm, " remote id-type %U data %U",
+ format_ikev2_id_type, p->rem_id.type,
+ format_ip4_address, p->rem_id.data);
+ else if (p->rem_id.type == IKEV2_ID_TYPE_ID_KEY_ID)
+ vlib_cli_output(vm, " remote id-type %U data 0x%U",
+ format_ikev2_id_type, p->rem_id.type,
+ format_hex_bytes, p->rem_id.data,
+ vec_len(p->rem_id.data));
+ else
+ vlib_cli_output(vm, " remote id-type %U data %v",
+ format_ikev2_id_type, p->rem_id.type, p->rem_id.data);
+ }
+
+ if (p->loc_ts.end_addr.as_u32)
+ vlib_cli_output(vm, " local traffic-selector addr %U - %U port %u - %u"
+ " protocol %u",
+ format_ip4_address, &p->loc_ts.start_addr,
+ format_ip4_address, &p->loc_ts.end_addr,
+ p->loc_ts.start_port, p->loc_ts.end_port,
+ p->loc_ts.protocol_id);
+
+ if (p->rem_ts.end_addr.as_u32)
+ vlib_cli_output(vm, " remote traffic-selector addr %U - %U port %u - %u"
+ " protocol %u",
+ format_ip4_address, &p->rem_ts.start_addr,
+ format_ip4_address, &p->rem_ts.end_addr,
+ p->rem_ts.start_port, p->rem_ts.end_port,
+ p->rem_ts.protocol_id);
+ }));
+ /* *INDENT-ON* */
+
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (show_ikev2_profile_command, static) = {
+ .path = "show ikev2 profile",
+ .short_help = "show ikev2 profile",
+ .function = show_ikev2_profile_command_fn,
+};
+/* *INDENT-ON* */
+
+static clib_error_t *
+set_ikev2_local_key_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ unformat_input_t _line_input, *line_input = &_line_input;
+ clib_error_t *r = 0;
+ u8 *data = 0;
+
+ if (!unformat_user (input, unformat_line_input, line_input))
+ return 0;
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "%s", &data))
+ {
+ r = ikev2_set_local_key (vm, data);
+ goto done;
+ }
+ else
+ break;
+ }
+
+ r = clib_error_return (0, "parse error: '%U'",
+ format_unformat_error, line_input);
+
+done:
+ vec_free (data);
+ unformat_free (line_input);
+ return r;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (set_ikev2_local_key_command, static) = {
+ .path = "set ikev2 local key",
+ .short_help =
+ "set ikev2 local key <file>",
+ .function = set_ikev2_local_key_command_fn,
+};
+/* *INDENT-ON* */
+
+clib_error_t *
+ikev2_cli_init (vlib_main_t * vm)
+{
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (ikev2_cli_init);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/ipsec/ikev2_crypto.c b/src/vnet/ipsec/ikev2_crypto.c
new file mode 100644
index 00000000000..32927629c5e
--- /dev/null
+++ b/src/vnet/ipsec/ikev2_crypto.c
@@ -0,0 +1,765 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+#include <vnet/pg/pg.h>
+#include <vppinfra/error.h>
+#include <vnet/ip/udp.h>
+#include <vnet/ipsec/ikev2.h>
+#include <vnet/ipsec/ikev2_priv.h>
+#include <openssl/obj_mac.h>
+#include <openssl/ec.h>
+#include <openssl/x509.h>
+#include <openssl/pem.h>
+#include <openssl/bn.h>
+
+/* from RFC7296 */
+static const char modp_dh_768_prime[] =
+ "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A63A3620FFFFFFFFFFFFFFFF";
+static const char modp_dh_768_generator[] = "02";
+
+static const char modp_dh_1024_prime[] =
+ "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381" "FFFFFFFFFFFFFFFF";
+static const char modp_dh_1024_generator[] = "02";
+
+/* from RFC3526 */
+static const char modp_dh_1536_prime[] =
+ "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF";
+static const char modp_dh_1536_generator[] = "02";
+
+static const char modp_dh_2048_prime[] =
+ "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AACAA68FFFFFFFFFFFFFFFF";
+static const char modp_dh_2048_generator[] = "02";
+
+static const char modp_dh_3072_prime[] =
+ "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF";
+static const char modp_dh_3072_generator[] = "02";
+
+static const char modp_dh_4096_prime[] =
+ "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199" "FFFFFFFFFFFFFFFF";
+static const char modp_dh_4096_generator[] = "02";
+
+static const char modp_dh_6144_prime[] =
+ "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08"
+ "8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B"
+ "302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9"
+ "A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6"
+ "49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8"
+ "FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C"
+ "180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718"
+ "3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D"
+ "04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D"
+ "B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226"
+ "1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC"
+ "E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26"
+ "99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB"
+ "04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2"
+ "233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127"
+ "D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406"
+ "AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918"
+ "DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151"
+ "2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03"
+ "F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F"
+ "BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B"
+ "B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632"
+ "387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E"
+ "6DCC4024FFFFFFFFFFFFFFFF";
+static const char modp_dh_6144_generator[] = "02";
+
+static const char modp_dh_8192_prime[] =
+ "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD"
+ "F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831"
+ "179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B"
+ "DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF"
+ "5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6"
+ "D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3"
+ "23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328"
+ "06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C"
+ "DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE"
+ "12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4"
+ "38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300"
+ "741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568"
+ "3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9"
+ "22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B"
+ "4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A"
+ "062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36"
+ "4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1"
+ "B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92"
+ "4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47"
+ "9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71"
+ "60C980DD98EDD3DFFFFFFFFFFFFFFFFF";
+static const char modp_dh_8192_generator[] = "02";
+
+/* from RFC5114 */
+static const char modp_dh_1024_160_prime[] =
+ "B10B8F96A080E01DDE92DE5EAE5D54EC52C99FBCFB06A3C6"
+ "9A6A9DCA52D23B616073E28675A23D189838EF1E2EE652C0"
+ "13ECB4AEA906112324975C3CD49B83BFACCBDD7D90C4BD70"
+ "98488E9C219A73724EFFD6FAE5644738FAA31A4FF55BCCC0"
+ "A151AF5F0DC8B4BD45BF37DF365C1A65E68CFDA76D4DA708" "DF1FB2BC2E4A4371";
+static const char modp_dh_1024_160_generator[] =
+ "A4D1CBD5C3FD34126765A442EFB99905F8104DD258AC507F"
+ "D6406CFF14266D31266FEA1E5C41564B777E690F5504F213"
+ "160217B4B01B886A5E91547F9E2749F4D7FBD7D3B9A92EE1"
+ "909D0D2263F80A76A6A24C087A091F531DBF0A0169B6A28A"
+ "D662A4D18E73AFA32D779D5918D08BC8858F4DCEF97C2A24" "855E6EEB22B3B2E5";
+
+static const char modp_dh_2048_224_prime[] =
+ "AD107E1E9123A9D0D660FAA79559C51FA20D64E5683B9FD1"
+ "B54B1597B61D0A75E6FA141DF95A56DBAF9A3C407BA1DF15"
+ "EB3D688A309C180E1DE6B85A1274A0A66D3F8152AD6AC212"
+ "9037C9EDEFDA4DF8D91E8FEF55B7394B7AD5B7D0B6C12207"
+ "C9F98D11ED34DBF6C6BA0B2C8BBC27BE6A00E0A0B9C49708"
+ "B3BF8A317091883681286130BC8985DB1602E714415D9330"
+ "278273C7DE31EFDC7310F7121FD5A07415987D9ADC0A486D"
+ "CDF93ACC44328387315D75E198C641A480CD86A1B9E587E8"
+ "BE60E69CC928B2B9C52172E413042E9B23F10B0E16E79763"
+ "C9B53DCF4BA80A29E3FB73C16B8E75B97EF363E2FFA31F71"
+ "CF9DE5384E71B81C0AC4DFFE0C10E64F";
+static const char modp_dh_2048_224_generator[] =
+ "AC4032EF4F2D9AE39DF30B5C8FFDAC506CDEBE7B89998CAF"
+ "74866A08CFE4FFE3A6824A4E10B9A6F0DD921F01A70C4AFA"
+ "AB739D7700C29F52C57DB17C620A8652BE5E9001A8D66AD7"
+ "C17669101999024AF4D027275AC1348BB8A762D0521BC98A"
+ "E247150422EA1ED409939D54DA7460CDB5F6C6B250717CBE"
+ "F180EB34118E98D119529A45D6F834566E3025E316A330EF"
+ "BB77A86F0C1AB15B051AE3D428C8F8ACB70A8137150B8EEB"
+ "10E183EDD19963DDD9E263E4770589EF6AA21E7F5F2FF381"
+ "B539CCE3409D13CD566AFBB48D6C019181E1BCFE94B30269"
+ "EDFE72FE9B6AA4BD7B5A0F1C71CFFF4C19C418E1F6EC0179"
+ "81BC087F2A7065B384B890D3191F2BFA";
+
+static const char modp_dh_2048_256_prime[] =
+ "87A8E61DB4B6663CFFBBD19C651959998CEEF608660DD0F2"
+ "5D2CEED4435E3B00E00DF8F1D61957D4FAF7DF4561B2AA30"
+ "16C3D91134096FAA3BF4296D830E9A7C209E0C6497517ABD"
+ "5A8A9D306BCF67ED91F9E6725B4758C022E0B1EF4275BF7B"
+ "6C5BFC11D45F9088B941F54EB1E59BB8BC39A0BF12307F5C"
+ "4FDB70C581B23F76B63ACAE1CAA6B7902D52526735488A0E"
+ "F13C6D9A51BFA4AB3AD8347796524D8EF6A167B5A41825D9"
+ "67E144E5140564251CCACB83E6B486F6B3CA3F7971506026"
+ "C0B857F689962856DED4010ABD0BE621C3A3960A54E710C3"
+ "75F26375D7014103A4B54330C198AF126116D2276E11715F"
+ "693877FAD7EF09CADB094AE91E1A1597";
+static const char modp_dh_2048_256_generator[] =
+ "3FB32C9B73134D0B2E77506660EDBD484CA7B18F21EF2054"
+ "07F4793A1A0BA12510DBC15077BE463FFF4FED4AAC0BB555"
+ "BE3A6C1B0C6B47B1BC3773BF7E8C6F62901228F8C28CBB18"
+ "A55AE31341000A650196F931C77A57F2DDF463E5E9EC144B"
+ "777DE62AAAB8A8628AC376D282D6ED3864E67982428EBC83"
+ "1D14348F6F2F9193B5045AF2767164E1DFC967C1FB3F2E55"
+ "A4BD1BFFE83B9C80D052B985D182EA0ADB2A3B7313D3FE14"
+ "C8484B1E052588B9B7D2BBD2DF016199ECD06E1557CD0915"
+ "B3353BBB64E0EC377FD028370DF92B52C7891428CDC67EB6"
+ "184B523D1DB246C32F63078490F00EF8D647D148D4795451"
+ "5E2327CFEF98C582664B4C0F6CC41659";
+
+v8 *
+ikev2_calc_prf (ikev2_sa_transform_t * tr, v8 * key, v8 * data)
+{
+ HMAC_CTX ctx;
+ v8 *prf;
+ unsigned int len = 0;
+
+ prf = vec_new (u8, tr->key_trunc);
+ HMAC_CTX_init (&ctx);
+ HMAC_Init_ex (&ctx, key, vec_len (key), tr->md, NULL);
+ HMAC_Update (&ctx, data, vec_len (data));
+ HMAC_Final (&ctx, prf, &len);
+ HMAC_CTX_cleanup (&ctx);
+
+ ASSERT (len == tr->key_trunc);
+
+ return prf;
+}
+
+u8 *
+ikev2_calc_prfplus (ikev2_sa_transform_t * tr, u8 * key, u8 * seed, int len)
+{
+ v8 *t = 0, *s = 0, *tmp = 0, *ret = 0;
+ u8 x = 0;
+
+ /* prf+ (K,S) = T1 | T2 | T3 | T4 | ...
+
+ where:
+ T1 = prf (K, S | 0x01)
+ T2 = prf (K, T1 | S | 0x02)
+ T3 = prf (K, T2 | S | 0x03)
+ T4 = prf (K, T3 | S | 0x04)
+ */
+
+ while (vec_len (ret) < len && x < 255)
+ {
+ if (t)
+ {
+ vec_append (s, t);
+ vec_free (t);
+ }
+
+ vec_append (s, seed);
+ vec_add2 (s, tmp, 1);
+ *tmp = x + 1;
+ t = ikev2_calc_prf (tr, key, s);
+ vec_append (ret, t);
+ vec_free (s);
+ x++;
+ }
+
+ vec_free (t);
+
+ if (x == 255)
+ {
+ vec_free (ret);
+ }
+
+ return ret;
+}
+
+v8 *
+ikev2_calc_integr (ikev2_sa_transform_t * tr, v8 * key, u8 * data, int len)
+{
+ v8 *r;
+ HMAC_CTX hctx;
+ unsigned int l;
+
+ ASSERT (tr->type == IKEV2_TRANSFORM_TYPE_INTEG);
+
+ r = vec_new (u8, tr->key_len);
+
+ /* verify integrity of data */
+ HMAC_CTX_init (&hctx);
+ HMAC_Init (&hctx, key, vec_len (key), tr->md);
+ HMAC_Update (&hctx, (const u8 *) data, len);
+ HMAC_Final (&hctx, r, &l);
+ HMAC_CTX_cleanup (&hctx);
+
+ ASSERT (l == tr->key_len);
+
+ return r;
+}
+
+v8 *
+ikev2_decrypt_data (ikev2_sa_t * sa, u8 * data, int len)
+{
+ EVP_CIPHER_CTX ctx;
+ v8 *r;
+ int out_len = 0, block_size;
+ ikev2_sa_transform_t *tr_encr;
+
+ tr_encr =
+ ikev2_sa_get_td_for_type (sa->r_proposals, IKEV2_TRANSFORM_TYPE_ENCR);
+ block_size = tr_encr->block_size;
+
+ /* check if data is multiplier of cipher block size */
+ if (len % block_size)
+ {
+ clib_warning ("wrong data length");
+ return 0;
+ }
+
+ EVP_CIPHER_CTX_init (&ctx);
+ r = vec_new (u8, len - block_size);
+ EVP_DecryptInit_ex (&ctx, tr_encr->cipher, NULL, sa->sk_ei, data);
+ EVP_DecryptUpdate (&ctx, r, &out_len, data + block_size, len - block_size);
+ EVP_DecryptFinal_ex (&ctx, r + out_len, &out_len);
+
+ /* remove padding */
+ _vec_len (r) -= r[vec_len (r) - 1] + 1;
+
+ EVP_CIPHER_CTX_cleanup (&ctx);
+ return r;
+}
+
+int
+ikev2_encrypt_data (ikev2_sa_t * sa, v8 * src, u8 * dst)
+{
+ EVP_CIPHER_CTX ctx;
+ int out_len;
+ int bs;
+ ikev2_sa_transform_t *tr_encr;
+
+ tr_encr =
+ ikev2_sa_get_td_for_type (sa->r_proposals, IKEV2_TRANSFORM_TYPE_ENCR);
+ bs = tr_encr->block_size;
+
+ /* generate IV */
+ RAND_bytes (dst, bs);
+
+ EVP_CIPHER_CTX_init (&ctx);
+
+ EVP_EncryptInit_ex (&ctx, tr_encr->cipher, NULL, sa->sk_er, dst /* dst */ );
+ EVP_EncryptUpdate (&ctx, dst + bs, &out_len, src, vec_len (src));
+
+ EVP_CIPHER_CTX_cleanup (&ctx);
+
+ ASSERT (vec_len (src) == out_len);
+
+ return out_len + bs;
+}
+
+void
+ikev2_generate_dh (ikev2_sa_t * sa, ikev2_sa_transform_t * t)
+{
+ int r;
+
+ if (t->dh_group == IKEV2_DH_GROUP_MODP)
+ {
+ DH *dh = DH_new ();
+ BN_hex2bn (&dh->p, t->dh_p);
+ BN_hex2bn (&dh->g, t->dh_g);
+ DH_generate_key (dh);
+
+ sa->r_dh_data = vec_new (u8, t->key_len);
+ r = BN_bn2bin (dh->pub_key, sa->r_dh_data);
+ ASSERT (r == t->key_len);
+
+ BIGNUM *ex;
+ sa->dh_shared_key = vec_new (u8, t->key_len);
+ ex = BN_bin2bn (sa->i_dh_data, vec_len (sa->i_dh_data), NULL);
+ r = DH_compute_key (sa->dh_shared_key, ex, dh);
+ ASSERT (r == t->key_len);
+ BN_clear_free (ex);
+ DH_free (dh);
+ }
+ else if (t->dh_group == IKEV2_DH_GROUP_ECP)
+ {
+ EC_KEY *ec = EC_KEY_new_by_curve_name (t->nid);
+ ASSERT (ec);
+
+ EC_KEY_generate_key (ec);
+
+ const EC_POINT *r_point = EC_KEY_get0_public_key (ec);
+ const EC_GROUP *group = EC_KEY_get0_group (ec);
+ BIGNUM *x = NULL, *y = NULL;
+ BN_CTX *bn_ctx = BN_CTX_new ();
+ u16 x_off, y_off, len;
+ EC_POINT *i_point = EC_POINT_new (group);
+ EC_POINT *shared_point = EC_POINT_new (group);
+
+ x = BN_new ();
+ y = BN_new ();
+ len = t->key_len / 2;
+
+ EC_POINT_get_affine_coordinates_GFp (group, r_point, x, y, bn_ctx);
+ sa->r_dh_data = vec_new (u8, t->key_len);
+ x_off = len - BN_num_bytes (x);
+ memset (sa->r_dh_data, 0, x_off);
+ BN_bn2bin (x, sa->r_dh_data + x_off);
+ y_off = t->key_len - BN_num_bytes (y);
+ memset (sa->r_dh_data + len, 0, y_off - len);
+ BN_bn2bin (y, sa->r_dh_data + y_off);
+
+ x = BN_bin2bn (sa->i_dh_data, len, x);
+ y = BN_bin2bn (sa->i_dh_data + len, len, y);
+ EC_POINT_set_affine_coordinates_GFp (group, i_point, x, y, bn_ctx);
+ sa->dh_shared_key = vec_new (u8, t->key_len);
+ EC_POINT_mul (group, shared_point, NULL, i_point,
+ EC_KEY_get0_private_key (ec), NULL);
+ EC_POINT_get_affine_coordinates_GFp (group, shared_point, x, y, bn_ctx);
+ x_off = len - BN_num_bytes (x);
+ memset (sa->dh_shared_key, 0, x_off);
+ BN_bn2bin (x, sa->dh_shared_key + x_off);
+ y_off = t->key_len - BN_num_bytes (y);
+ memset (sa->dh_shared_key + len, 0, y_off - len);
+ BN_bn2bin (y, sa->dh_shared_key + y_off);
+
+ EC_KEY_free (ec);
+ BN_free (x);
+ BN_free (y);
+ BN_CTX_free (bn_ctx);
+ EC_POINT_free (i_point);
+ EC_POINT_free (shared_point);
+ }
+}
+
+int
+ikev2_verify_sign (EVP_PKEY * pkey, u8 * sigbuf, u8 * data)
+{
+ EVP_MD_CTX md_ctx;
+
+ EVP_VerifyInit (&md_ctx, EVP_sha1 ());
+ EVP_VerifyUpdate (&md_ctx, data, vec_len (data));
+
+ return EVP_VerifyFinal (&md_ctx, sigbuf, vec_len (sigbuf), pkey);
+}
+
+u8 *
+ikev2_calc_sign (EVP_PKEY * pkey, u8 * data)
+{
+ EVP_MD_CTX md_ctx;
+ unsigned int sig_len = 0;
+ u8 *sign;
+
+ EVP_SignInit (&md_ctx, EVP_sha1 ());
+ EVP_SignUpdate (&md_ctx, data, vec_len (data));
+ /* get sign len */
+ EVP_SignFinal (&md_ctx, NULL, &sig_len, pkey);
+ sign = vec_new (u8, sig_len);
+ /* calc sign */
+ EVP_SignFinal (&md_ctx, sign, &sig_len, pkey);
+
+ return sign;
+}
+
+EVP_PKEY *
+ikev2_load_cert_file (u8 * file)
+{
+ FILE *fp;
+ X509 *x509;
+ EVP_PKEY *pkey = NULL;
+
+ fp = fopen ((char *) file, "r");
+ if (!fp)
+ {
+ clib_warning ("open %s failed", file);
+ goto end;
+ }
+
+ x509 = PEM_read_X509 (fp, NULL, NULL, NULL);
+ fclose (fp);
+ if (x509 == NULL)
+ {
+ clib_warning ("read cert %s failed", file);
+ goto end;
+ }
+
+ pkey = X509_get_pubkey (x509);
+ if (pkey == NULL)
+ clib_warning ("get pubkey %s failed", file);
+
+end:
+ return pkey;
+}
+
+EVP_PKEY *
+ikev2_load_key_file (u8 * file)
+{
+ FILE *fp;
+ EVP_PKEY *pkey = NULL;
+
+ fp = fopen ((char *) file, "r");
+ if (!fp)
+ {
+ clib_warning ("open %s failed", file);
+ goto end;
+ }
+
+ pkey = PEM_read_PrivateKey (fp, NULL, NULL, NULL);
+ fclose (fp);
+ if (pkey == NULL)
+ clib_warning ("read %s failed", file);
+
+end:
+ return pkey;
+}
+
+void
+ikev2_crypto_init (ikev2_main_t * km)
+{
+ ikev2_sa_transform_t *tr;
+
+ /* vector of supported transforms - in order of preference */
+ vec_add2 (km->supported_transforms, tr, 1);
+ tr->type = IKEV2_TRANSFORM_TYPE_ENCR;
+ tr->encr_type = IKEV2_TRANSFORM_ENCR_TYPE_AES_CBC;
+ tr->key_len = 256 / 8;
+ tr->block_size = 128 / 8;
+ tr->cipher = EVP_aes_256_cbc ();
+
+ vec_add2 (km->supported_transforms, tr, 1);
+ tr->type = IKEV2_TRANSFORM_TYPE_ENCR;
+ tr->encr_type = IKEV2_TRANSFORM_ENCR_TYPE_AES_CBC;
+ tr->key_len = 192 / 8;
+ tr->block_size = 128 / 8;
+ tr->cipher = EVP_aes_192_cbc ();
+
+ vec_add2 (km->supported_transforms, tr, 1);
+ tr->type = IKEV2_TRANSFORM_TYPE_ENCR;
+ tr->encr_type = IKEV2_TRANSFORM_ENCR_TYPE_AES_CBC;
+ tr->key_len = 128 / 8;
+ tr->block_size = 128 / 8;
+ tr->cipher = EVP_aes_128_cbc ();
+
+ vec_add2 (km->supported_transforms, tr, 1);
+ tr->type = IKEV2_TRANSFORM_TYPE_PRF;
+ tr->prf_type = IKEV2_TRANSFORM_PRF_TYPE_PRF_HMAC_SHA1;
+ tr->key_len = 160 / 8;
+ tr->key_trunc = 160 / 8;
+ tr->md = EVP_sha1 ();
+
+ vec_add2 (km->supported_transforms, tr, 1);
+ tr->type = IKEV2_TRANSFORM_TYPE_INTEG;
+ tr->integ_type = IKEV2_TRANSFORM_INTEG_TYPE_AUTH_HMAC_SHA1_96;
+ tr->key_len = 160 / 8;
+ tr->key_trunc = 96 / 8;
+ tr->md = EVP_sha1 ();
+
+#if defined(OPENSSL_NO_CISCO_FECDH)
+ vec_add2 (km->supported_transforms, tr, 1);
+ tr->type = IKEV2_TRANSFORM_TYPE_DH;
+ tr->dh_type = IKEV2_TRANSFORM_DH_TYPE_BRAINPOOL_512;
+ tr->key_len = (512 * 2) / 8;
+ tr->nid = NID_brainpoolP512r1;
+ tr->dh_group = IKEV2_DH_GROUP_ECP;
+
+ vec_add2 (km->supported_transforms, tr, 1);
+ tr->type = IKEV2_TRANSFORM_TYPE_DH;
+ tr->dh_type = IKEV2_TRANSFORM_DH_TYPE_BRAINPOOL_384;
+ tr->key_len = (384 * 2) / 8;
+ tr->nid = NID_brainpoolP384r1;
+ tr->dh_group = IKEV2_DH_GROUP_ECP;
+
+ vec_add2 (km->supported_transforms, tr, 1);
+ tr->type = IKEV2_TRANSFORM_TYPE_DH;
+ tr->dh_type = IKEV2_TRANSFORM_DH_TYPE_BRAINPOOL_256;
+ tr->key_len = (256 * 2) / 8;
+ tr->nid = NID_brainpoolP256r1;
+ tr->dh_group = IKEV2_DH_GROUP_ECP;
+
+ vec_add2 (km->supported_transforms, tr, 1);
+ tr->type = IKEV2_TRANSFORM_TYPE_DH;
+ tr->dh_type = IKEV2_TRANSFORM_DH_TYPE_BRAINPOOL_224;
+ tr->key_len = (224 * 2) / 8;
+ tr->nid = NID_brainpoolP224r1;
+ tr->dh_group = IKEV2_DH_GROUP_ECP;
+
+ vec_add2 (km->supported_transforms, tr, 1);
+ tr->type = IKEV2_TRANSFORM_TYPE_DH;
+ tr->dh_type = IKEV2_TRANSFORM_DH_TYPE_ECP_224;
+ tr->key_len = (224 * 2) / 8;
+ tr->nid = NID_secp224r1;
+ tr->dh_group = IKEV2_DH_GROUP_ECP;
+#endif
+
+ vec_add2 (km->supported_transforms, tr, 1);
+ tr->type = IKEV2_TRANSFORM_TYPE_DH;
+ tr->dh_type = IKEV2_TRANSFORM_DH_TYPE_ECP_521;
+ tr->key_len = (528 * 2) / 8;
+ tr->nid = NID_secp521r1;
+ tr->dh_group = IKEV2_DH_GROUP_ECP;
+
+ vec_add2 (km->supported_transforms, tr, 1);
+ tr->type = IKEV2_TRANSFORM_TYPE_DH;
+ tr->dh_type = IKEV2_TRANSFORM_DH_TYPE_ECP_384;
+ tr->key_len = (384 * 2) / 8;
+ tr->nid = NID_secp384r1;
+ tr->dh_group = IKEV2_DH_GROUP_ECP;
+
+ vec_add2 (km->supported_transforms, tr, 1);
+ tr->type = IKEV2_TRANSFORM_TYPE_DH;
+ tr->dh_type = IKEV2_TRANSFORM_DH_TYPE_ECP_256;
+ tr->key_len = (256 * 2) / 8;
+ tr->nid = NID_X9_62_prime256v1;
+ tr->dh_group = IKEV2_DH_GROUP_ECP;
+
+ vec_add2 (km->supported_transforms, tr, 1);
+ tr->type = IKEV2_TRANSFORM_TYPE_DH;
+ tr->dh_type = IKEV2_TRANSFORM_DH_TYPE_ECP_192;
+ tr->key_len = (192 * 2) / 8;
+ tr->nid = NID_X9_62_prime192v1;
+ tr->dh_group = IKEV2_DH_GROUP_ECP;
+
+ vec_add2 (km->supported_transforms, tr, 1);
+ tr->type = IKEV2_TRANSFORM_TYPE_DH;
+ tr->dh_type = IKEV2_TRANSFORM_DH_TYPE_MODP_2048_256;
+ tr->key_len = 2048 / 8;
+ tr->dh_p = (const char *) &modp_dh_2048_256_prime;
+ tr->dh_g = (const char *) &modp_dh_2048_256_generator;
+ tr->dh_group = IKEV2_DH_GROUP_MODP;
+
+ vec_add2 (km->supported_transforms, tr, 1);
+ tr->type = IKEV2_TRANSFORM_TYPE_DH;
+ tr->dh_type = IKEV2_TRANSFORM_DH_TYPE_MODP_2048_224;
+ tr->key_len = 2048 / 8;
+ tr->dh_p = (const char *) &modp_dh_2048_224_prime;
+ tr->dh_g = (const char *) &modp_dh_2048_224_generator;
+ tr->dh_group = IKEV2_DH_GROUP_MODP;
+
+ vec_add2 (km->supported_transforms, tr, 1);
+ tr->type = IKEV2_TRANSFORM_TYPE_DH;
+ tr->dh_type = IKEV2_TRANSFORM_DH_TYPE_MODP_1024_160;
+ tr->key_len = 1024 / 8;
+ tr->dh_p = (const char *) &modp_dh_1024_160_prime;
+ tr->dh_g = (const char *) &modp_dh_1024_160_generator;
+ tr->dh_group = IKEV2_DH_GROUP_MODP;
+
+ vec_add2 (km->supported_transforms, tr, 1);
+ tr->type = IKEV2_TRANSFORM_TYPE_DH;
+ tr->dh_type = IKEV2_TRANSFORM_DH_TYPE_MODP_8192;
+ tr->key_len = 8192 / 8;
+ tr->dh_p = (const char *) &modp_dh_8192_prime;
+ tr->dh_g = (const char *) &modp_dh_8192_generator;
+ tr->dh_group = IKEV2_DH_GROUP_MODP;
+
+ vec_add2 (km->supported_transforms, tr, 1);
+ tr->type = IKEV2_TRANSFORM_TYPE_DH;
+ tr->dh_type = IKEV2_TRANSFORM_DH_TYPE_MODP_6144;
+ tr->key_len = 6144 / 8;
+ tr->dh_p = (const char *) &modp_dh_6144_prime;
+ tr->dh_g = (const char *) &modp_dh_6144_generator;
+ tr->dh_group = IKEV2_DH_GROUP_MODP;
+
+ vec_add2 (km->supported_transforms, tr, 1);
+ tr->type = IKEV2_TRANSFORM_TYPE_DH;
+ tr->dh_type = IKEV2_TRANSFORM_DH_TYPE_MODP_4096;
+ tr->key_len = 4096 / 8;
+ tr->dh_p = (const char *) &modp_dh_4096_prime;
+ tr->dh_g = (const char *) &modp_dh_4096_generator;
+ tr->dh_group = IKEV2_DH_GROUP_MODP;
+
+ vec_add2 (km->supported_transforms, tr, 1);
+ tr->type = IKEV2_TRANSFORM_TYPE_DH;
+ tr->dh_type = IKEV2_TRANSFORM_DH_TYPE_MODP_3072;
+ tr->key_len = 3072 / 8;
+ tr->dh_p = (const char *) &modp_dh_3072_prime;
+ tr->dh_g = (const char *) &modp_dh_3072_generator;
+ tr->dh_group = IKEV2_DH_GROUP_MODP;
+
+ vec_add2 (km->supported_transforms, tr, 1);
+ tr->type = IKEV2_TRANSFORM_TYPE_DH;
+ tr->dh_type = IKEV2_TRANSFORM_DH_TYPE_MODP_2048;
+ tr->key_len = 2048 / 8;
+ tr->dh_p = (const char *) &modp_dh_2048_prime;
+ tr->dh_g = (const char *) &modp_dh_2048_generator;
+ tr->dh_group = IKEV2_DH_GROUP_MODP;
+
+ vec_add2 (km->supported_transforms, tr, 1);
+ tr->type = IKEV2_TRANSFORM_TYPE_DH;
+ tr->dh_type = IKEV2_TRANSFORM_DH_TYPE_MODP_1536;
+ tr->key_len = 1536 / 8;
+ tr->dh_p = (const char *) &modp_dh_1536_prime;
+ tr->dh_g = (const char *) &modp_dh_1536_generator;
+ tr->dh_group = IKEV2_DH_GROUP_MODP;
+
+ vec_add2 (km->supported_transforms, tr, 1);
+ tr->type = IKEV2_TRANSFORM_TYPE_DH;
+ tr->dh_type = IKEV2_TRANSFORM_DH_TYPE_MODP_1024;
+ tr->key_len = 1024 / 8;
+ tr->dh_p = (const char *) &modp_dh_1024_prime;
+ tr->dh_g = (const char *) &modp_dh_1024_generator;
+ tr->dh_group = IKEV2_DH_GROUP_MODP;
+
+ vec_add2 (km->supported_transforms, tr, 1);
+ tr->type = IKEV2_TRANSFORM_TYPE_DH;
+ tr->dh_type = IKEV2_TRANSFORM_DH_TYPE_MODP_768;
+ tr->key_len = 768 / 8;
+ tr->dh_p = (const char *) &modp_dh_768_prime;
+ tr->dh_g = (const char *) &modp_dh_768_generator;
+ tr->dh_group = IKEV2_DH_GROUP_MODP;
+
+ vec_add2 (km->supported_transforms, tr, 1);
+ tr->type = IKEV2_TRANSFORM_TYPE_ESN;
+ tr->esn_type = IKEV2_TRANSFORM_ESN_TYPE_ESN;
+
+ vec_add2 (km->supported_transforms, tr, 1);
+ tr->type = IKEV2_TRANSFORM_TYPE_ESN;
+ tr->esn_type = IKEV2_TRANSFORM_ESN_TYPE_NO_ESN;
+}
+
+
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/ipsec/ikev2_format.c b/src/vnet/ipsec/ikev2_format.c
new file mode 100644
index 00000000000..4d7a007f80d
--- /dev/null
+++ b/src/vnet/ipsec/ikev2_format.c
@@ -0,0 +1,155 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <vnet/vnet.h>
+#include <vnet/api_errno.h>
+#include <vnet/ip/ip.h>
+#include <vnet/interface.h>
+
+#include <vnet/ipsec/ipsec.h>
+#include <vnet/ipsec/ikev2.h>
+#include <vnet/ipsec/ikev2_priv.h>
+
+u8 *
+format_ikev2_sa_transform (u8 * s, va_list * args)
+{
+ ikev2_sa_transform_t *tr = va_arg (*args, ikev2_sa_transform_t *);
+
+ if (!tr)
+ return s;
+
+ if (tr->type >= IKEV2_TRANSFORM_NUM_TYPES)
+ return s;
+
+ s = format (s, "%U:", format_ikev2_transform_type, tr->type);
+
+ switch (tr->type)
+ {
+ case IKEV2_TRANSFORM_TYPE_ENCR:
+ s = format (s, "%U", format_ikev2_transform_encr_type, tr->encr_type);
+ break;
+ case IKEV2_TRANSFORM_TYPE_PRF:
+ s = format (s, "%U", format_ikev2_transform_prf_type, tr->prf_type);
+ break;
+ case IKEV2_TRANSFORM_TYPE_INTEG:
+ s = format (s, "%U", format_ikev2_transform_integ_type, tr->integ_type);
+ break;
+ case IKEV2_TRANSFORM_TYPE_DH:
+ s = format (s, "%U", format_ikev2_transform_dh_type, tr->dh_type);
+ break;
+ case IKEV2_TRANSFORM_TYPE_ESN:
+ s = format (s, "%U", format_ikev2_transform_esn_type, tr->esn_type);
+ break;
+ default:
+ break;
+ }
+
+ if (tr->type == IKEV2_TRANSFORM_TYPE_ENCR &&
+ tr->encr_type == IKEV2_TRANSFORM_ENCR_TYPE_AES_CBC && tr->key_len)
+ s = format (s, "-%u", tr->key_len * 8);
+ else if (vec_len (tr->attrs) == 4 && tr->attrs[0] == 0x80
+ && tr->attrs[1] == 0x0e)
+ s = format (s, "-%u", tr->attrs[2] * 256 + tr->attrs[3]);
+ else if (vec_len (tr->attrs))
+ s = format (s, "(unknown attr %U)", format_hex_bytes,
+ tr->attrs, vec_len (tr->attrs));
+
+ return s;
+}
+
+#define MACRO_FORMAT(lc) \
+u8 * format_ikev2_##lc (u8 * s, va_list * args) \
+{ \
+ u32 i = va_arg (*args, u32); \
+ char * t = 0; \
+ switch (i) { \
+ foreach_ikev2_##lc \
+ default: \
+ return format (s, "unknown (%u)", i); \
+ } \
+ s = format (s, "%s", t); \
+ return s; \
+}
+
+#define MACRO_UNFORMAT(lc) \
+uword \
+unformat_ikev2_##lc (unformat_input_t * input, \
+ va_list * args) \
+{ \
+ u32 * r = va_arg (*args, u32 *); \
+ if (0) ; \
+ foreach_ikev2_##lc \
+ else \
+ return 0; \
+ return 1; \
+}
+
+#define _(v,f,str) case IKEV2_AUTH_METHOD_##f: t = str; break;
+MACRO_FORMAT (auth_method)
+#undef _
+#define _(v,f,str) else if (unformat (input, str)) *r = IKEV2_AUTH_METHOD_##f;
+ MACRO_UNFORMAT (auth_method)
+#undef _
+#define _(v,f,str) case IKEV2_TRANSFORM_TYPE_##f: t = str; break;
+ MACRO_FORMAT (transform_type)
+#undef _
+#define _(v,f,str) else if (unformat (input, str)) *r = IKEV2_TRANSFORM_TYPE_##f;
+ MACRO_UNFORMAT (transform_type)
+#undef _
+#define _(v,f) case IKEV2_NOTIFY_MSG_##f: t = #f; break;
+ MACRO_FORMAT (notify_msg_type)
+#undef _
+#define _(v,f,str) case IKEV2_ID_TYPE_##f: t = str; break;
+ MACRO_FORMAT (id_type)
+#undef _
+#define _(v,f,str) else if (unformat (input, str)) *r = IKEV2_ID_TYPE_##f;
+ MACRO_UNFORMAT (id_type)
+#undef _
+#define _(v,f,str) case IKEV2_TRANSFORM_ENCR_TYPE_##f: t = str; break;
+ MACRO_FORMAT (transform_encr_type)
+#undef _
+#define _(v,f,str) else if (unformat (input, str)) *r = IKEV2_TRANSFORM_ENCR_TYPE_##f;
+ MACRO_UNFORMAT (transform_encr_type)
+#undef _
+#define _(v,f,str) case IKEV2_TRANSFORM_PRF_TYPE_##f: t = str; break;
+ MACRO_FORMAT (transform_prf_type)
+#undef _
+#define _(v,f,str) else if (unformat (input, str)) *r = IKEV2_TRANSFORM_PRF_TYPE_##f;
+ MACRO_UNFORMAT (transform_prf_type)
+#undef _
+#define _(v,f,str) case IKEV2_TRANSFORM_INTEG_TYPE_##f: t = str; break;
+ MACRO_FORMAT (transform_integ_type)
+#undef _
+#define _(v,f,str) else if (unformat (input, str)) *r = IKEV2_TRANSFORM_INTEG_TYPE_##f;
+ MACRO_UNFORMAT (transform_integ_type)
+#undef _
+#define _(v,f,str) case IKEV2_TRANSFORM_DH_TYPE_##f: t = str; break;
+ MACRO_FORMAT (transform_dh_type)
+#undef _
+#define _(v,f,str) else if (unformat (input, str)) *r = IKEV2_TRANSFORM_DH_TYPE_##f;
+ MACRO_UNFORMAT (transform_dh_type)
+#undef _
+#define _(v,f,str) case IKEV2_TRANSFORM_ESN_TYPE_##f: t = str; break;
+ MACRO_FORMAT (transform_esn_type)
+#undef _
+#define _(v,f,str) else if (unformat (input, str)) *r = IKEV2_TRANSFORM_ESN_TYPE_##f;
+ MACRO_UNFORMAT (transform_esn_type)
+#undef _
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/ipsec/ikev2_payload.c b/src/vnet/ipsec/ikev2_payload.c
new file mode 100644
index 00000000000..dd14812b550
--- /dev/null
+++ b/src/vnet/ipsec/ikev2_payload.c
@@ -0,0 +1,535 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <ctype.h>
+
+#include <vnet/vnet.h>
+#include <vnet/api_errno.h>
+#include <vnet/ip/ip.h>
+#include <vnet/interface.h>
+
+#include <vnet/ipsec/ipsec.h>
+#include <vnet/ipsec/ikev2.h>
+#include <vnet/ipsec/ikev2_priv.h>
+
+/* *INDENT-OFF* */
+typedef CLIB_PACKED (struct
+ {
+ u8 nextpayload;
+ u8 flags;
+ u16 length;
+ u8 protocol_id;
+ u8 spi_size;
+ u16 msg_type;
+ u8 payload[0];}) ike_notify_payload_header_t;
+/* *INDENT-ON* */
+
+/* *INDENT-OFF* */
+typedef CLIB_PACKED (struct
+ {
+ u8 ts_type;
+ u8 protocol_id;
+ u16 selector_len;
+ u16 start_port;
+ u16 end_port;
+ ip4_address_t start_addr;
+ ip4_address_t end_addr;}) ikev2_ts_payload_entry_t;
+/* *INDENT-OFF* */
+
+/* *INDENT-OFF* */
+typedef CLIB_PACKED (struct
+ {
+ u8 nextpayload;
+ u8 flags;
+ u16 length;
+ u8 num_ts;
+ u8 reserved[3];
+ ikev2_ts_payload_entry_t ts[0];})
+ ike_ts_payload_header_t;
+/* *INDENT-OFF* */
+
+/* *INDENT-OFF* */
+typedef CLIB_PACKED (struct {
+ u8 last_or_more;
+ u8 reserved;
+ u16 proposal_len;
+ u8 proposal_num;
+ u8 protocol_id;
+ u8 spi_size;
+ u8 num_transforms; u32 spi[0];
+}) ike_sa_proposal_data_t;
+/* *INDENT-OFF* */
+
+/* *INDENT-OFF* */
+typedef CLIB_PACKED (struct {
+ u8 last_or_more;
+ u8 reserved;
+ u16 transform_len;
+ u8 transform_type;
+ u8 reserved2;
+ u16 transform_id;
+ u8 attributes[0];
+}) ike_sa_transform_data_t;
+/* *INDENT-OFF* */
+
+/* *INDENT-OFF* */
+typedef CLIB_PACKED (struct {
+ u8 nextpayload;
+ u8 flags;
+ u16 length;
+ u8 protocol_id;
+ u8 spi_size;
+ u16 num_of_spi;
+ u32 spi[0];
+}) ike_delete_payload_header_t;
+/* *INDENT-OFF* */
+
+static ike_payload_header_t *
+ikev2_payload_add_hdr (ikev2_payload_chain_t * c, u8 payload_type, int len)
+{
+ ike_payload_header_t *hdr =
+ (ike_payload_header_t *) & c->data[c->last_hdr_off];
+ u8 *tmp;
+
+ if (c->data)
+ hdr->nextpayload = payload_type;
+ else
+ c->first_payload_type = payload_type;
+
+ c->last_hdr_off = vec_len (c->data);
+ vec_add2 (c->data, tmp, len);
+ hdr = (ike_payload_header_t *) tmp;
+ memset (hdr, 0, len);
+
+ hdr->length = clib_host_to_net_u16 (len);
+
+ return hdr;
+}
+
+static void
+ikev2_payload_add_data (ikev2_payload_chain_t * c, u8 * data)
+{
+ u16 len;
+ ike_payload_header_t *hdr;
+
+ vec_append (c->data, data);
+ hdr = (ike_payload_header_t *) & c->data[c->last_hdr_off];
+ len = clib_net_to_host_u16 (hdr->length);
+ hdr->length = clib_host_to_net_u16 (len + vec_len (data));
+}
+
+void
+ikev2_payload_add_notify (ikev2_payload_chain_t * c, u16 msg_type, u8 * data)
+{
+ ike_notify_payload_header_t *n;
+
+ n =
+ (ike_notify_payload_header_t *) ikev2_payload_add_hdr (c,
+ IKEV2_PAYLOAD_NOTIFY,
+ sizeof (*n));
+ n->msg_type = clib_host_to_net_u16 (msg_type);
+ ikev2_payload_add_data (c, data);
+}
+
+void
+ikev2_payload_add_sa (ikev2_payload_chain_t * c,
+ ikev2_sa_proposal_t * proposals)
+{
+ ike_payload_header_t *ph;
+ ike_sa_proposal_data_t *prop;
+ ike_sa_transform_data_t *tr;
+ ikev2_sa_proposal_t *p;
+ ikev2_sa_transform_t *t;
+
+ u8 *tmp;
+ u8 *pr_data = 0;
+ u8 *tr_data = 0;
+
+ ikev2_payload_add_hdr (c, IKEV2_PAYLOAD_SA, sizeof (*ph));
+
+ vec_foreach (p, proposals)
+ {
+ int spi_size = (p->protocol_id == IKEV2_PROTOCOL_ESP) ? 4 : 0;
+ pr_data = vec_new (u8, sizeof (ike_sa_proposal_data_t) + spi_size);
+ prop = (ike_sa_proposal_data_t *) pr_data;
+ prop->last_or_more = proposals - p + 1 < vec_len (proposals) ? 2 : 0;
+ prop->protocol_id = p->protocol_id;
+ prop->proposal_num = p->proposal_num;
+ prop->spi_size = spi_size;
+ prop->num_transforms = vec_len (p->transforms);
+
+ if (spi_size)
+ prop->spi[0] = clib_host_to_net_u32 (p->spi);
+
+ DBG_PLD ("proposal num %u protocol_id %u last_or_more %u spi_size %u%s%U",
+ prop->proposal_num, prop->protocol_id, prop->last_or_more,
+ prop->spi_size, prop->spi_size ? " spi_data " : "",
+ format_hex_bytes, prop->spi, prop->spi_size);
+
+ vec_foreach (t, p->transforms)
+ {
+ vec_add2 (tr_data, tmp, sizeof (*tr) + vec_len (t->attrs));
+ tr = (ike_sa_transform_data_t *) tmp;
+ tr->last_or_more =
+ ((t - p->transforms) + 1 < vec_len (p->transforms)) ? 3 : 0;
+ tr->transform_type = t->type;
+ tr->transform_id = clib_host_to_net_u16 (t->transform_id);
+ tr->transform_len =
+ clib_host_to_net_u16 (sizeof (*tr) + vec_len (t->attrs));
+
+ if (vec_len (t->attrs) > 0)
+ clib_memcpy (tr->attributes, t->attrs, vec_len (t->attrs));
+
+ DBG_PLD
+ ("transform type %U transform_id %u last_or_more %u attr_size %u%s%U",
+ format_ikev2_transform_type, tr->transform_type, t->transform_id,
+ tr->last_or_more, vec_len (t->attrs),
+ vec_len (t->attrs) ? " attrs " : "", format_hex_bytes,
+ tr->attributes, vec_len (t->attrs));
+ }
+
+ prop->proposal_len =
+ clib_host_to_net_u16 (vec_len (tr_data) + vec_len (pr_data));
+ ikev2_payload_add_data (c, pr_data);
+ ikev2_payload_add_data (c, tr_data);
+ vec_free (pr_data);
+ vec_free (tr_data);
+ }
+}
+
+void
+ikev2_payload_add_ke (ikev2_payload_chain_t * c, u16 dh_group, u8 * dh_data)
+{
+ ike_ke_payload_header_t *ke;
+ ke = (ike_ke_payload_header_t *) ikev2_payload_add_hdr (c, IKEV2_PAYLOAD_KE,
+ sizeof (*ke));
+
+ ke->dh_group = clib_host_to_net_u16 (dh_group);
+ ikev2_payload_add_data (c, dh_data);
+}
+
+void
+ikev2_payload_add_nonce (ikev2_payload_chain_t * c, u8 * nonce)
+{
+ ikev2_payload_add_hdr (c, IKEV2_PAYLOAD_NONCE,
+ sizeof (ike_payload_header_t));
+ ikev2_payload_add_data (c, nonce);
+}
+
+void
+ikev2_payload_add_id (ikev2_payload_chain_t * c, ikev2_id_t * id, u8 type)
+{
+ ike_id_payload_header_t *idp;
+ idp =
+ (ike_id_payload_header_t *) ikev2_payload_add_hdr (c, type,
+ sizeof (*idp));
+
+ idp->id_type = id->type;
+ ikev2_payload_add_data (c, id->data);
+}
+
+void
+ikev2_payload_add_delete (ikev2_payload_chain_t * c, ikev2_delete_t * d)
+{
+ ike_delete_payload_header_t *dp;
+ u16 num_of_spi = vec_len (d);
+ ikev2_delete_t *d2;
+ dp =
+ (ike_delete_payload_header_t *) ikev2_payload_add_hdr (c,
+ IKEV2_PAYLOAD_DELETE,
+ sizeof (*dp));
+
+ if (d[0].protocol_id == IKEV2_PROTOCOL_IKE)
+ {
+ dp->protocol_id = 1;
+ }
+ else
+ {
+ dp->protocol_id = d[0].protocol_id;
+ dp->spi_size = 4;
+ dp->num_of_spi = clib_host_to_net_u16 (num_of_spi);
+ vec_foreach (d2, d)
+ {
+ u8 *data = vec_new (u8, 4);
+ u32 spi = clib_host_to_net_u32 (d2->spi);
+ clib_memcpy (data, &spi, 4);
+ ikev2_payload_add_data (c, data);
+ vec_free (data);
+ }
+ }
+}
+
+void
+ikev2_payload_add_auth (ikev2_payload_chain_t * c, ikev2_auth_t * auth)
+{
+ ike_auth_payload_header_t *ap;
+ ap =
+ (ike_auth_payload_header_t *) ikev2_payload_add_hdr (c,
+ IKEV2_PAYLOAD_AUTH,
+ sizeof (*ap));
+
+ ap->auth_method = auth->method;
+ ikev2_payload_add_data (c, auth->data);
+}
+
+void
+ikev2_payload_add_ts (ikev2_payload_chain_t * c, ikev2_ts_t * ts, u8 type)
+{
+ ike_ts_payload_header_t *tsh;
+ ikev2_ts_t *ts2;
+ u8 *data = 0, *tmp;
+
+ tsh =
+ (ike_ts_payload_header_t *) ikev2_payload_add_hdr (c, type,
+ sizeof (*tsh));
+ tsh->num_ts = vec_len (ts);
+
+ vec_foreach (ts2, ts)
+ {
+ ASSERT (ts2->ts_type == 7); /*TS_IPV4_ADDR_RANGE */
+ ikev2_ts_payload_entry_t *entry;
+ vec_add2 (data, tmp, sizeof (*entry));
+ entry = (ikev2_ts_payload_entry_t *) tmp;
+ entry->ts_type = ts2->ts_type;
+ entry->protocol_id = ts2->protocol_id;
+ entry->selector_len = clib_host_to_net_u16 (16);
+ entry->start_port = clib_host_to_net_u16 (ts2->start_port);
+ entry->end_port = clib_host_to_net_u16 (ts2->end_port);
+ entry->start_addr.as_u32 = ts2->start_addr.as_u32;
+ entry->end_addr.as_u32 = ts2->end_addr.as_u32;
+ }
+
+ ikev2_payload_add_data (c, data);
+ vec_free (data);
+}
+
+void
+ikev2_payload_chain_add_padding (ikev2_payload_chain_t * c, int bs)
+{
+ u8 *tmp __attribute__ ((unused));
+ u8 pad_len = (vec_len (c->data) / bs + 1) * bs - vec_len (c->data);
+ vec_add2 (c->data, tmp, pad_len);
+ c->data[vec_len (c->data) - 1] = pad_len - 1;
+}
+
+ikev2_sa_proposal_t *
+ikev2_parse_sa_payload (ike_payload_header_t * ikep)
+{
+ ikev2_sa_proposal_t *v = 0;
+ ikev2_sa_proposal_t *proposal;
+ ikev2_sa_transform_t *transform;
+
+ u32 plen = clib_net_to_host_u16 (ikep->length);
+
+ ike_sa_proposal_data_t *sap;
+ int proposal_ptr = 0;
+
+ do
+ {
+ sap = (ike_sa_proposal_data_t *) & ikep->payload[proposal_ptr];
+ int i;
+ int transform_ptr;
+
+ DBG_PLD ("proposal num %u len %u last_or_more %u id %u "
+ "spi_size %u num_transforms %u",
+ sap->proposal_num, clib_net_to_host_u16 (sap->proposal_len),
+ sap->last_or_more, sap->protocol_id, sap->spi_size,
+ sap->num_transforms);
+
+ /* IKE proposal should not have SPI */
+ if (sap->protocol_id == IKEV2_PROTOCOL_IKE && sap->spi_size != 0)
+ goto data_corrupted;
+
+ /* IKE proposal should not have SPI */
+ if (sap->protocol_id == IKEV2_PROTOCOL_ESP && sap->spi_size != 4)
+ goto data_corrupted;
+
+ transform_ptr = proposal_ptr + sizeof (*sap) + sap->spi_size;
+
+ vec_add2 (v, proposal, 1);
+ proposal->proposal_num = sap->proposal_num;
+ proposal->protocol_id = sap->protocol_id;
+
+ if (sap->spi_size == 4)
+ {
+ proposal->spi = clib_net_to_host_u32 (sap->spi[0]);
+ }
+
+ for (i = 0; i < sap->num_transforms; i++)
+ {
+ ike_sa_transform_data_t *tr =
+ (ike_sa_transform_data_t *) & ikep->payload[transform_ptr];
+ u16 tlen = clib_net_to_host_u16 (tr->transform_len);
+
+ if (tlen < sizeof (*tr))
+ goto data_corrupted;
+
+ vec_add2 (proposal->transforms, transform, 1);
+
+ transform->type = tr->transform_type;
+ transform->transform_id = clib_net_to_host_u16 (tr->transform_id);
+ if (tlen > sizeof (*tr))
+ vec_add (transform->attrs, tr->attributes, tlen - sizeof (*tr));
+
+ DBG_PLD
+ ("transform num %u len %u last_or_more %u type %U id %u%s%U", i,
+ tlen, tr->last_or_more, format_ikev2_sa_transform, transform,
+ clib_net_to_host_u16 (tr->transform_id),
+ tlen > sizeof (*tr) ? " attrs " : "", format_hex_bytes,
+ tr->attributes, tlen - sizeof (*tr));
+
+ transform_ptr += tlen;
+ }
+
+ proposal_ptr += clib_net_to_host_u16 (sap->proposal_len);
+ }
+ while (proposal_ptr < (plen - sizeof (*ikep)) && sap->last_or_more == 2);
+
+ /* data validation */
+ if (proposal_ptr != (plen - sizeof (*ikep)) || sap->last_or_more)
+ goto data_corrupted;
+
+ return v;
+
+data_corrupted:
+ DBG_PLD ("SA payload data corrupted");
+ ikev2_sa_free_proposal_vector (&v);
+ return 0;
+}
+
+ikev2_ts_t *
+ikev2_parse_ts_payload (ike_payload_header_t * ikep)
+{
+ ike_ts_payload_header_t *tsp = (ike_ts_payload_header_t *) ikep;
+ ikev2_ts_t *r = 0, *ts;
+ u8 i;
+
+ for (i = 0; i < tsp->num_ts; i++)
+ {
+ if (tsp->ts[i].ts_type != 7) /* TS_IPV4_ADDR_RANGE */
+ {
+ DBG_PLD ("unsupported TS type received (%u)", tsp->ts[i].ts_type);
+ continue;
+ }
+
+ vec_add2 (r, ts, 1);
+ ts->ts_type = tsp->ts[i].ts_type;
+ ts->protocol_id = tsp->ts[i].protocol_id;
+ ts->start_port = tsp->ts[i].start_port;
+ ts->end_port = tsp->ts[i].end_port;
+ ts->start_addr.as_u32 = tsp->ts[i].start_addr.as_u32;
+ ts->end_addr.as_u32 = tsp->ts[i].end_addr.as_u32;
+ }
+ return r;
+}
+
+ikev2_notify_t *
+ikev2_parse_notify_payload (ike_payload_header_t * ikep)
+{
+ ike_notify_payload_header_t *n = (ike_notify_payload_header_t *) ikep;
+ u32 plen = clib_net_to_host_u16 (ikep->length);
+ ikev2_notify_t *r = 0;
+ u32 spi;
+
+ DBG_PLD ("msg_type %U len %u%s%U",
+ format_ikev2_notify_msg_type, clib_net_to_host_u16 (n->msg_type),
+ plen, plen > sizeof (*n) ? " data " : "",
+ format_hex_bytes, n->payload, plen - sizeof (*n));
+
+ r = vec_new (ikev2_notify_t, 1);
+ r->msg_type = clib_net_to_host_u16 (n->msg_type);
+ r->protocol_id = n->protocol_id;
+
+ if (n->spi_size == 4)
+ {
+ clib_memcpy (&spi, n->payload, n->spi_size);
+ r->spi = clib_net_to_host_u32 (spi);
+ DBG_PLD ("spi %lx", r->spi);
+ }
+ else if (n->spi_size == 0)
+ {
+ r->spi = 0;
+ }
+ else
+ {
+ clib_warning ("invalid SPI Size %d", n->spi_size);
+ }
+
+ if (plen > (sizeof (*n) + n->spi_size))
+ {
+ vec_add (r->data, n->payload + n->spi_size,
+ plen - sizeof (*n) - n->spi_size);
+ }
+
+ return r;
+}
+
+void
+ikev2_parse_vendor_payload (ike_payload_header_t * ikep)
+{
+ u32 plen = clib_net_to_host_u16 (ikep->length);
+ int i;
+ int is_string = 1;
+
+ for (i = 0; i < plen - 4; i++)
+ if (!isprint (ikep->payload[i]))
+ is_string = 0;
+
+ DBG_PLD ("len %u data %s:%U",
+ plen,
+ is_string ? "string" : "hex",
+ is_string ? format_ascii_bytes : format_hex_bytes,
+ ikep->payload, plen - sizeof (*ikep));
+}
+
+ikev2_delete_t *
+ikev2_parse_delete_payload (ike_payload_header_t * ikep)
+{
+ ike_delete_payload_header_t *d = (ike_delete_payload_header_t *) ikep;
+ u32 plen = clib_net_to_host_u16 (ikep->length);
+ ikev2_delete_t *r = 0, *del;
+ u16 num_of_spi = clib_net_to_host_u16 (d->num_of_spi);
+ u16 i = 0;
+
+ DBG_PLD ("protocol_id %u spi_size %u num_of_spi %u len %u%s%U",
+ d->protocol_id, d->spi_size, num_of_spi,
+ plen, plen > sizeof (d) ? " data " : "",
+ format_hex_bytes, d->spi, plen - sizeof (*d));
+
+ if (d->protocol_id == IKEV2_PROTOCOL_IKE)
+ {
+ r = vec_new (ikev2_delete_t, 1);
+ r->protocol_id = 1;
+ }
+ else
+ {
+ r = vec_new (ikev2_delete_t, num_of_spi);
+ vec_foreach (del, r)
+ {
+ del->protocol_id = d->protocol_id;
+ del->spi = clib_net_to_host_u32 (d->spi[i++]);
+ }
+ }
+
+ return r;
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/ipsec/ikev2_priv.h b/src/vnet/ipsec/ikev2_priv.h
new file mode 100644
index 00000000000..9f67ad2ae6f
--- /dev/null
+++ b/src/vnet/ipsec/ikev2_priv.h
@@ -0,0 +1,321 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef __included_ikev2_priv_h__
+#define __included_ikev2_priv_h__
+
+#include <vnet/vnet.h>
+#include <vnet/ip/ip.h>
+#include <vnet/ethernet/ethernet.h>
+
+#include <vnet/ipsec/ikev2.h>
+
+#include <vppinfra/hash.h>
+#include <vppinfra/elog.h>
+#include <vppinfra/error.h>
+
+#include <openssl/rand.h>
+#include <openssl/dh.h>
+#include <openssl/hmac.h>
+#include <openssl/evp.h>
+
+#define IKEV2_DEBUG_PAYLOAD 1
+
+#if IKEV2_DEBUG_PAYLOAD == 1
+#define DBG_PLD(my_args...) clib_warning(my_args)
+#else
+#define DBG_PLD(my_args...)
+#endif
+
+typedef enum
+{
+ IKEV2_STATE_UNKNOWN,
+ IKEV2_STATE_SA_INIT,
+ IKEV2_STATE_DELETED,
+ IKEV2_STATE_AUTH_FAILED,
+ IKEV2_STATE_AUTHENTICATED,
+ IKEV2_STATE_NOTIFY_AND_DELETE,
+ IKEV2_STATE_TS_UNACCEPTABLE,
+ IKEV2_STATE_NO_PROPOSAL_CHOSEN,
+} ikev2_state_t;
+
+typedef struct
+{
+ ikev2_auth_method_t method:8;
+ u8 *data;
+ u8 hex; /* hex encoding of the shared secret */
+ EVP_PKEY *key;
+} ikev2_auth_t;
+
+typedef enum
+{
+ IKEV2_DH_GROUP_MODP = 0,
+ IKEV2_DH_GROUP_ECP = 1,
+} ikev2_dh_group_t;
+
+typedef struct
+{
+ ikev2_transform_type_t type;
+ union
+ {
+ u16 transform_id;
+ ikev2_transform_encr_type_t encr_type:16;
+ ikev2_transform_prf_type_t prf_type:16;
+ ikev2_transform_integ_type_t integ_type:16;
+ ikev2_transform_dh_type_t dh_type:16;
+ ikev2_transform_esn_type_t esn_type:16;
+ };
+ u8 *attrs;
+ u16 key_len;
+ u16 key_trunc;
+ u16 block_size;
+ u8 dh_group;
+ int nid;
+ const char *dh_p;
+ const char *dh_g;
+ const void *md;
+ const void *cipher;
+} ikev2_sa_transform_t;
+
+typedef struct
+{
+ u8 proposal_num;
+ ikev2_protocol_id_t protocol_id:8;
+ u32 spi;
+ ikev2_sa_transform_t *transforms;
+} ikev2_sa_proposal_t;
+
+typedef struct
+{
+ u8 ts_type;
+ u8 protocol_id;
+ u16 selector_len;
+ u16 start_port;
+ u16 end_port;
+ ip4_address_t start_addr;
+ ip4_address_t end_addr;
+} ikev2_ts_t;
+
+typedef struct
+{
+ ikev2_id_type_t type:8;
+ u8 *data;
+} ikev2_id_t;
+
+typedef struct
+{
+ /* sa proposals vectors */
+ ikev2_sa_proposal_t *i_proposals;
+ ikev2_sa_proposal_t *r_proposals;
+
+ /* Traffic Selectors */
+ ikev2_ts_t *tsi;
+ ikev2_ts_t *tsr;
+
+ /* keys */
+ u8 *sk_ai;
+ u8 *sk_ar;
+ u8 *sk_ei;
+ u8 *sk_er;
+} ikev2_child_sa_t;
+
+typedef struct
+{
+ u8 protocol_id;
+ u32 spi; /*for ESP and AH SPI size is 4, for IKE size is 0 */
+} ikev2_delete_t;
+
+typedef struct
+{
+ u8 protocol_id;
+ u32 spi;
+ ikev2_sa_proposal_t *i_proposal;
+ ikev2_sa_proposal_t *r_proposal;
+ ikev2_ts_t *tsi;
+ ikev2_ts_t *tsr;
+} ikev2_rekey_t;
+
+typedef struct
+{
+ u16 msg_type;
+ u8 protocol_id;
+ u32 spi;
+ u8 *data;
+} ikev2_notify_t;
+
+
+typedef struct
+{
+ ikev2_state_t state;
+ u8 unsupported_cp;
+ u8 initial_contact;
+ ip4_address_t iaddr;
+ ip4_address_t raddr;
+ u64 ispi;
+ u64 rspi;
+ u8 *i_nonce;
+ u8 *r_nonce;
+
+ /* DH data */
+ u16 dh_group;
+ u8 *dh_shared_key;
+ u8 *i_dh_data;
+ u8 *r_dh_data;
+
+ /* sa proposals vectors */
+ ikev2_sa_proposal_t *i_proposals;
+ ikev2_sa_proposal_t *r_proposals;
+
+ /* keys */
+ u8 *sk_d;
+ u8 *sk_ai;
+ u8 *sk_ar;
+ u8 *sk_ei;
+ u8 *sk_er;
+ u8 *sk_pi;
+ u8 *sk_pr;
+
+ /* auth */
+ ikev2_auth_t i_auth;
+ ikev2_auth_t r_auth;
+
+ /* ID */
+ ikev2_id_t i_id;
+ ikev2_id_t r_id;
+
+ /* pending deletes */
+ ikev2_delete_t *del;
+
+ /* pending rekeyings */
+ ikev2_rekey_t *rekey;
+
+ /* packet data */
+ u8 *last_sa_init_req_packet_data;
+ u8 *last_sa_init_res_packet_data;
+
+ /* retransmit */
+ u32 last_msg_id;
+ u8 *last_res_packet_data;
+
+ ikev2_child_sa_t *childs;
+} ikev2_sa_t;
+
+typedef struct
+{
+ u8 *name;
+ u8 is_enabled;
+
+ ikev2_auth_t auth;
+ ikev2_id_t loc_id;
+ ikev2_id_t rem_id;
+ ikev2_ts_t loc_ts;
+ ikev2_ts_t rem_ts;
+} ikev2_profile_t;
+
+typedef struct
+{
+ /* pool of IKEv2 Security Associations */
+ ikev2_sa_t *sas;
+
+ /* hash */
+ uword *sa_by_rspi;
+} ikev2_main_per_thread_data_t;
+
+typedef struct
+{
+ /* pool of IKEv2 profiles */
+ ikev2_profile_t *profiles;
+
+ /* vector of supported transform types */
+ ikev2_sa_transform_t *supported_transforms;
+
+ /* hash */
+ mhash_t profile_index_by_name;
+
+ /* local private key */
+ EVP_PKEY *pkey;
+
+ /* convenience */
+ vlib_main_t *vlib_main;
+ vnet_main_t *vnet_main;
+
+ ikev2_main_per_thread_data_t *per_thread_data;
+
+} ikev2_main_t;
+
+ikev2_main_t ikev2_main;
+
+void ikev2_sa_free_proposal_vector (ikev2_sa_proposal_t ** v);
+ikev2_sa_transform_t *ikev2_sa_get_td_for_type (ikev2_sa_proposal_t * p,
+ ikev2_transform_type_t type);
+
+/* ikev2_crypto.c */
+v8 *ikev2_calc_prf (ikev2_sa_transform_t * tr, v8 * key, v8 * data);
+u8 *ikev2_calc_prfplus (ikev2_sa_transform_t * tr, u8 * key, u8 * seed,
+ int len);
+v8 *ikev2_calc_integr (ikev2_sa_transform_t * tr, v8 * key, u8 * data,
+ int len);
+v8 *ikev2_decrypt_data (ikev2_sa_t * sa, u8 * data, int len);
+int ikev2_encrypt_data (ikev2_sa_t * sa, v8 * src, u8 * dst);
+void ikev2_generate_dh (ikev2_sa_t * sa, ikev2_sa_transform_t * t);
+int ikev2_verify_sign (EVP_PKEY * pkey, u8 * sigbuf, u8 * data);
+u8 *ikev2_calc_sign (EVP_PKEY * pkey, u8 * data);
+EVP_PKEY *ikev2_load_cert_file (u8 * file);
+EVP_PKEY *ikev2_load_key_file (u8 * file);
+void ikev2_crypto_init (ikev2_main_t * km);
+
+/* ikev2_payload.c */
+typedef struct
+{
+ u8 first_payload_type;
+ u16 last_hdr_off;
+ u8 *data;
+} ikev2_payload_chain_t;
+
+#define ikev2_payload_new_chain(V) vec_validate (V, 0)
+#define ikev2_payload_destroy_chain(V) do { \
+ vec_free((V)->data); \
+ vec_free(V); \
+} while (0)
+
+void ikev2_payload_add_notify (ikev2_payload_chain_t * c, u16 msg_type,
+ u8 * data);
+void ikev2_payload_add_sa (ikev2_payload_chain_t * c,
+ ikev2_sa_proposal_t * proposals);
+void ikev2_payload_add_ke (ikev2_payload_chain_t * c, u16 dh_group,
+ u8 * dh_data);
+void ikev2_payload_add_nonce (ikev2_payload_chain_t * c, u8 * nonce);
+void ikev2_payload_add_id (ikev2_payload_chain_t * c, ikev2_id_t * id,
+ u8 type);
+void ikev2_payload_add_auth (ikev2_payload_chain_t * c, ikev2_auth_t * auth);
+void ikev2_payload_add_ts (ikev2_payload_chain_t * c, ikev2_ts_t * ts,
+ u8 type);
+void ikev2_payload_add_delete (ikev2_payload_chain_t * c, ikev2_delete_t * d);
+void ikev2_payload_chain_add_padding (ikev2_payload_chain_t * c, int bs);
+void ikev2_parse_vendor_payload (ike_payload_header_t * ikep);
+ikev2_sa_proposal_t *ikev2_parse_sa_payload (ike_payload_header_t * ikep);
+ikev2_ts_t *ikev2_parse_ts_payload (ike_payload_header_t * ikep);
+ikev2_delete_t *ikev2_parse_delete_payload (ike_payload_header_t * ikep);
+ikev2_notify_t *ikev2_parse_notify_payload (ike_payload_header_t * ikep);
+
+#endif /* __included_ikev2_priv_h__ */
+
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/ipsec/ipsec.api b/src/vnet/ipsec/ipsec.api
new file mode 100644
index 00000000000..178bb757168
--- /dev/null
+++ b/src/vnet/ipsec/ipsec.api
@@ -0,0 +1,457 @@
+/*
+ * Copyright (c) 2015-2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/** \brief IPsec: Add/delete Security Policy Database
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param is_add - add SPD if non-zero, else delete
+ @param spd_id - SPD instance id (control plane allocated)
+*/
+
+define ipsec_spd_add_del
+{
+ u32 client_index;
+ u32 context;
+ u8 is_add;
+ u32 spd_id;
+};
+
+/** \brief Reply for IPsec: Add/delete Security Policy Database entry
+ @param context - returned sender context, to match reply w/ request
+ @param retval - return code
+*/
+
+define ipsec_spd_add_del_reply
+{
+ u32 context;
+ i32 retval;
+};
+
+/** \brief IPsec: Add/delete SPD from interface
+
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param is_add - add security mode if non-zero, else delete
+ @param sw_if_index - index of the interface
+ @param spd_id - SPD instance id to use for lookups
+*/
+
+
+define ipsec_interface_add_del_spd
+{
+ u32 client_index;
+ u32 context;
+
+ u8 is_add;
+ u32 sw_if_index;
+ u32 spd_id;
+};
+
+/** \brief Reply for IPsec: Add/delete SPD from interface
+ @param context - returned sender context, to match reply w/ request
+ @param retval - return code
+*/
+
+define ipsec_interface_add_del_spd_reply
+{
+ u32 context;
+ i32 retval;
+};
+
+/** \brief IPsec: Add/delete Security Policy Database entry
+
+ See RFC 4301, 4.4.1.1 on how to match packet to selectors
+
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param is_add - add SPD if non-zero, else delete
+ @param spd_id - SPD instance id (control plane allocated)
+ @param priority - priority of SPD entry (non-unique value). Used to order SPD matching - higher priorities match before lower
+ @param is_outbound - entry applies to outbound traffic if non-zero, otherwise applies to inbound traffic
+ @param is_ipv6 - remote/local address are IPv6 if non-zero, else IPv4
+ @param remote_address_start - start of remote address range to match
+ @param remote_address_stop - end of remote address range to match
+ @param local_address_start - start of local address range to match
+ @param local_address_stop - end of local address range to match
+ @param protocol - protocol type to match [0 means any]
+ @param remote_port_start - start of remote port range to match ...
+ @param remote_port_stop - end of remote port range to match [0 to 65535 means ANY, 65535 to 0 means OPAQUE]
+ @param local_port_start - start of local port range to match ...
+ @param local_port_stop - end of remote port range to match [0 to 65535 means ANY, 65535 to 0 means OPAQUE]
+ @param policy - 0 = bypass (no IPsec processing), 1 = discard (discard packet with ICMP processing), 2 = resolve (send request to control plane for SA resolving, and discard without ICMP processing), 3 = protect (apply IPsec policy using following parameters)
+ @param sa_id - SAD instance id (control plane allocated)
+
+*/
+
+define ipsec_spd_add_del_entry
+{
+ u32 client_index;
+ u32 context;
+ u8 is_add;
+
+ u32 spd_id;
+ i32 priority;
+ u8 is_outbound;
+
+ // Selector
+ u8 is_ipv6;
+ u8 is_ip_any;
+ u8 remote_address_start[16];
+ u8 remote_address_stop[16];
+ u8 local_address_start[16];
+ u8 local_address_stop[16];
+
+ u8 protocol;
+
+ u16 remote_port_start;
+ u16 remote_port_stop;
+ u16 local_port_start;
+ u16 local_port_stop;
+
+ // Policy
+ u8 policy;
+ u32 sa_id;
+};
+
+/** \brief Reply for IPsec: Add/delete Security Policy Database entry
+ @param context - returned sender context, to match reply w/ request
+ @param retval - return code
+*/
+
+define ipsec_spd_add_del_entry_reply
+{
+ u32 context;
+ i32 retval;
+};
+
+/** \brief IPsec: Add/delete Security Association Database entry
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param is_add - add SAD entry if non-zero, else delete
+
+ @param sad_id - sad id
+
+ @param spi - security parameter index
+
+ @param protocol - 0 = AH, 1 = ESP
+
+ @param crypto_algorithm - 0 = Null, 1 = AES-CBC-128, 2 = AES-CBC-192, 3 = AES-CBC-256, 4 = 3DES-CBC
+ @param crypto_key_length - length of crypto_key in bytes
+ @param crypto_key - crypto keying material
+
+ @param integrity_algorithm - 0 = None, 1 = MD5-96, 2 = SHA1-96, 3 = SHA-256, 4 = SHA-384, 5=SHA-512
+ @param integrity_key_length - length of integrity_key in bytes
+ @param integrity_key - integrity keying material
+
+ @param use_extended_sequence_number - use ESN when non-zero
+
+ @param is_tunnel - IPsec tunnel mode if non-zero, else transport mode
+ @param is_tunnel_ipv6 - IPsec tunnel mode is IPv6 if non-zero, else IPv4 tunnel only valid if is_tunnel is non-zero
+ @param tunnel_src_address - IPsec tunnel source address IPv6 if is_tunnel_ipv6 is non-zero, else IPv4. Only valid if is_tunnel is non-zero
+ @param tunnel_dst_address - IPsec tunnel destination address IPv6 if is_tunnel_ipv6 is non-zero, else IPv4. Only valid if is_tunnel is non-zero
+
+ To be added:
+ Anti-replay
+ IPsec tunnel address copy mode (to support GDOI)
+ */
+
+define ipsec_sad_add_del_entry
+{
+ u32 client_index;
+ u32 context;
+ u8 is_add;
+
+ u32 sad_id;
+
+ u32 spi;
+
+ u8 protocol;
+
+ u8 crypto_algorithm;
+ u8 crypto_key_length;
+ u8 crypto_key[128];
+
+ u8 integrity_algorithm;
+ u8 integrity_key_length;
+ u8 integrity_key[128];
+
+ u8 use_extended_sequence_number;
+
+ u8 is_tunnel;
+ u8 is_tunnel_ipv6;
+ u8 tunnel_src_address[16];
+ u8 tunnel_dst_address[16];
+};
+
+/** \brief Reply for IPsec: Add/delete Security Association Database entry
+ @param context - returned sender context, to match reply w/ request
+ @param retval - return code
+*/
+
+define ipsec_sad_add_del_entry_reply
+{
+ u32 context;
+ i32 retval;
+};
+
+/** \brief IPsec: Update Security Association keys
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+
+ @param sa_id - sa id
+
+ @param crypto_key_length - length of crypto_key in bytes
+ @param crypto_key - crypto keying material
+
+ @param integrity_key_length - length of integrity_key in bytes
+ @param integrity_key - integrity keying material
+*/
+
+define ipsec_sa_set_key
+{
+ u32 client_index;
+ u32 context;
+
+ u32 sa_id;
+
+ u8 crypto_key_length;
+ u8 crypto_key[128];
+
+ u8 integrity_key_length;
+ u8 integrity_key[128];
+};
+
+/** \brief Reply for IPsec: Update Security Association keys
+ @param context - returned sender context, to match reply w/ request
+ @param retval - return code
+*/
+
+define ipsec_sa_set_key_reply
+{
+ u32 context;
+ i32 retval;
+};
+
+/** \brief IKEv2: Add/delete profile
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+
+ @param name - IKEv2 profile name
+ @param is_add - Add IKEv2 profile if non-zero, else delete
+*/
+define ikev2_profile_add_del
+{
+ u32 client_index;
+ u32 context;
+
+ u8 name[64];
+ u8 is_add;
+};
+
+/** \brief Reply for IKEv2: Add/delete profile
+ @param context - returned sender context, to match reply w/ request
+ @param retval - return code
+*/
+define ikev2_profile_add_del_reply
+{
+ u32 context;
+ i32 retval;
+};
+
+/** \brief IKEv2: Set IKEv2 profile authentication method
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+
+ @param name - IKEv2 profile name
+ @param auth_method - IKEv2 authentication method (shared-key-mic/rsa-sig)
+ @param is_hex - Authentication data in hex format if non-zero, else string
+ @param data_len - Authentication data length
+ @param data - Authentication data (for rsa-sig cert file path)
+*/
+define ikev2_profile_set_auth
+{
+ u32 client_index;
+ u32 context;
+
+ u8 name[64];
+ u8 auth_method;
+ u8 is_hex;
+ u32 data_len;
+ u8 data[0];
+};
+
+/** \brief Reply for IKEv2: Set IKEv2 profile authentication method
+ @param context - returned sender context, to match reply w/ request
+ @param retval - return code
+*/
+define ikev2_profile_set_auth_reply
+{
+ u32 context;
+ i32 retval;
+};
+
+/** \brief IKEv2: Set IKEv2 profile local/remote identification
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+
+ @param name - IKEv2 profile name
+ @param is_local - Identification is local if non-zero, else remote
+ @param id_type - Identification type
+ @param data_len - Identification data length
+ @param data - Identification data
+*/
+define ikev2_profile_set_id
+{
+ u32 client_index;
+ u32 context;
+
+ u8 name[64];
+ u8 is_local;
+ u8 id_type;
+ u32 data_len;
+ u8 data[0];
+};
+
+/** \brief Reply for IKEv2:
+ @param context - returned sender context, to match reply w/ request
+ @param retval - return code
+*/
+define ikev2_profile_set_id_reply
+{
+ u32 context;
+ i32 retval;
+};
+
+/** \brief IKEv2: Set IKEv2 profile traffic selector parameters
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+
+ @param name - IKEv2 profile name
+ @param is_local - Traffic selector is local if non-zero, else remote
+ @param proto - Traffic selector IP protocol (if zero not relevant)
+ @param start_port - The smallest port number allowed by traffic selector
+ @param end_port - The largest port number allowed by traffic selector
+ @param start_addr - The smallest address included in traffic selector
+ @param end_addr - The largest address included in traffic selector
+*/
+define ikev2_profile_set_ts
+{
+ u32 client_index;
+ u32 context;
+
+ u8 name[64];
+ u8 is_local;
+ u8 proto;
+ u16 start_port;
+ u16 end_port;
+ u32 start_addr;
+ u32 end_addr;
+};
+
+/** \brief Reply for IKEv2: Set IKEv2 profile traffic selector parameters
+ @param context - returned sender context, to match reply w/ request
+ @param retval - return code
+*/
+define ikev2_profile_set_ts_reply
+{
+ u32 context;
+ i32 retval;
+};
+
+/** \brief IKEv2: Set IKEv2 local RSA private key
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+
+ @param key_file - Key file absolute path
+*/
+define ikev2_set_local_key
+{
+ u32 client_index;
+ u32 context;
+
+ u8 key_file[256];
+};
+
+/** \brief Reply for IKEv2: Set IKEv2 local key
+ @param context - returned sender context, to match reply w/ request
+ @param retval - return code
+*/
+define ikev2_set_local_key_reply
+{
+ u32 context;
+ i32 retval;
+};
+
+/** \brief Dump ipsec policy database data
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param spd_id - SPD instance id
+ @param sa_id - SA id, optional, set to ~0 to see all policies in SPD
+*/
+define ipsec_spd_dump {
+ u32 client_index;
+ u32 context;
+ u32 spd_id;
+ u32 sa_id;
+};
+
+/** \brief IPsec policy database response
+ @param context - sender context which was passed in the request
+ @param spd_id - SPD instance id
+ @param priority - numeric value to control policy evaluation order
+ @param is_outbound - [1|0] to indicate if direction is [out|in]bound
+ @param is_ipv6 - [1|0] to indicate if address family is ipv[6|4]
+ @param local_start_addr - first address in local traffic selector range
+ @param local_stop_addr - last address in local traffic selector range
+ @param local_start_port - first port in local traffic selector range
+ @param local_stop_port - last port in local traffic selector range
+ @param remote_start_addr - first address in remote traffic selector range
+ @param remote_stop_addr - last address in remote traffic selector range
+ @param remote_start_port - first port in remote traffic selector range
+ @param remote_stop_port - last port in remote traffic selector range
+ @param protocol - traffic selector protocol
+ @param policy - policy action
+ @param sa_id - SA id
+ @param bytes - byte count of packets matching this policy
+ @param packets - count of packets matching this policy
+*/
+
+define ipsec_spd_details {
+ u32 context;
+ u32 spd_id;
+ i32 priority;
+ u8 is_outbound;
+ u8 is_ipv6;
+ u8 local_start_addr[16];
+ u8 local_stop_addr[16];
+ u16 local_start_port;
+ u16 local_stop_port;
+ u8 remote_start_addr[16];
+ u8 remote_stop_addr[16];
+ u16 remote_start_port;
+ u16 remote_stop_port;
+ u8 protocol;
+ u8 policy;
+ u32 sa_id;
+ u64 bytes;
+ u64 packets;
+};
+
+/*
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
+ \ No newline at end of file
diff --git a/src/vnet/ipsec/ipsec.c b/src/vnet/ipsec/ipsec.c
new file mode 100644
index 00000000000..ee85c402e86
--- /dev/null
+++ b/src/vnet/ipsec/ipsec.c
@@ -0,0 +1,581 @@
+/*
+ * decap.c : IPSec tunnel support
+ *
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/vnet.h>
+#include <vnet/api_errno.h>
+#include <vnet/ip/ip.h>
+#include <vnet/interface.h>
+
+#include <vnet/ipsec/ipsec.h>
+#include <vnet/ipsec/ikev2.h>
+
+#if DPDK_CRYPTO==1
+#include <vnet/devices/dpdk/ipsec/esp.h>
+#define ESP_NODE "dpdk-esp-encrypt"
+#else
+#include <vnet/ipsec/esp.h>
+#define ESP_NODE "esp-encrypt"
+#endif
+
+#if DPDK_CRYPTO==0
+/* dummy function */
+static int
+add_del_sa_sess (u32 sa_index, u8 is_add)
+{
+ return 0;
+}
+#endif
+
+u32
+ipsec_get_sa_index_by_sa_id (u32 sa_id)
+{
+ ipsec_main_t *im = &ipsec_main;
+ uword *p = hash_get (im->sa_index_by_sa_id, sa_id);
+ if (!p)
+ return ~0;
+
+ return p[0];
+}
+
+int
+ipsec_set_interface_spd (vlib_main_t * vm, u32 sw_if_index, u32 spd_id,
+ int is_add)
+{
+ ipsec_main_t *im = &ipsec_main;
+ ip4_ipsec_config_t config;
+
+ u32 spd_index;
+ uword *p;
+
+ p = hash_get (im->spd_index_by_spd_id, spd_id);
+ if (!p)
+ return VNET_API_ERROR_SYSCALL_ERROR_1; /* no such spd-id */
+
+ spd_index = p[0];
+
+ p = hash_get (im->spd_index_by_sw_if_index, sw_if_index);
+ if (p && is_add)
+ return VNET_API_ERROR_SYSCALL_ERROR_1; /* spd already assigned */
+
+ if (is_add)
+ {
+ hash_set (im->spd_index_by_sw_if_index, sw_if_index, spd_index);
+ }
+ else
+ {
+ hash_unset (im->spd_index_by_sw_if_index, sw_if_index);
+ }
+
+ clib_warning ("sw_if_index %u spd_id %u spd_index %u",
+ sw_if_index, spd_id, spd_index);
+
+ /* enable IPsec on TX */
+ vnet_feature_enable_disable ("ip4-output", "ipsec-output-ip4", sw_if_index,
+ is_add, 0, 0);
+ vnet_feature_enable_disable ("ip6-output", "ipsec-output-ip6", sw_if_index,
+ is_add, 0, 0);
+
+ /* enable IPsec on RX */
+ vnet_feature_enable_disable ("ip4-unicast", "ipsec-input-ip4", sw_if_index,
+ is_add, &config, sizeof (config));
+ vnet_feature_enable_disable ("ip6-unicast", "ipsec-input-ip6", sw_if_index,
+ is_add, &config, sizeof (config));
+
+ return 0;
+}
+
+int
+ipsec_add_del_spd (vlib_main_t * vm, u32 spd_id, int is_add)
+{
+ ipsec_main_t *im = &ipsec_main;
+ ipsec_spd_t *spd = 0;
+ uword *p;
+ u32 spd_index, k, v;
+
+ p = hash_get (im->spd_index_by_spd_id, spd_id);
+ if (p && is_add)
+ return VNET_API_ERROR_INVALID_VALUE;
+ if (!p && !is_add)
+ return VNET_API_ERROR_INVALID_VALUE;
+
+ if (!is_add) /* delete */
+ {
+ spd_index = p[0];
+ spd = pool_elt_at_index (im->spds, spd_index);
+ if (!spd)
+ return VNET_API_ERROR_INVALID_VALUE;
+ /* *INDENT-OFF* */
+ hash_foreach (k, v, im->spd_index_by_sw_if_index, ({
+ if (v == spd_index)
+ ipsec_set_interface_spd(vm, k, spd_id, 0);
+ }));
+ /* *INDENT-ON* */
+ hash_unset (im->spd_index_by_spd_id, spd_id);
+ pool_free (spd->policies);
+ vec_free (spd->ipv4_outbound_policies);
+ vec_free (spd->ipv6_outbound_policies);
+ vec_free (spd->ipv4_inbound_protect_policy_indices);
+ vec_free (spd->ipv4_inbound_policy_discard_and_bypass_indices);
+ pool_put (im->spds, spd);
+ }
+ else /* create new SPD */
+ {
+ pool_get (im->spds, spd);
+ memset (spd, 0, sizeof (*spd));
+ spd_index = spd - im->spds;
+ spd->id = spd_id;
+ hash_set (im->spd_index_by_spd_id, spd_id, spd_index);
+ }
+ return 0;
+}
+
+static int
+ipsec_spd_entry_sort (void *a1, void *a2)
+{
+ ipsec_main_t *im = &ipsec_main;
+ u32 *id1 = a1;
+ u32 *id2 = a2;
+ ipsec_spd_t *spd;
+ ipsec_policy_t *p1, *p2;
+
+ /* *INDENT-OFF* */
+ pool_foreach (spd, im->spds, ({
+ p1 = pool_elt_at_index(spd->policies, *id1);
+ p2 = pool_elt_at_index(spd->policies, *id2);
+ if (p1 && p2)
+ return p2->priority - p1->priority;
+ }));
+ /* *INDENT-ON* */
+
+ return 0;
+}
+
+int
+ipsec_add_del_policy (vlib_main_t * vm, ipsec_policy_t * policy, int is_add)
+{
+ ipsec_main_t *im = &ipsec_main;
+ ipsec_spd_t *spd = 0;
+ ipsec_policy_t *vp;
+ uword *p;
+ u32 spd_index;
+
+ clib_warning ("policy-id %u priority %d is_outbound %u", policy->id,
+ policy->priority, policy->is_outbound);
+
+ if (policy->policy == IPSEC_POLICY_ACTION_PROTECT)
+ {
+ p = hash_get (im->sa_index_by_sa_id, policy->sa_id);
+ if (!p)
+ return VNET_API_ERROR_SYSCALL_ERROR_1;
+ policy->sa_index = p[0];
+ }
+
+ p = hash_get (im->spd_index_by_spd_id, policy->id);
+
+ if (!p)
+ return VNET_API_ERROR_SYSCALL_ERROR_1;
+
+ spd_index = p[0];
+ spd = pool_elt_at_index (im->spds, spd_index);
+ if (!spd)
+ return VNET_API_ERROR_SYSCALL_ERROR_1;
+
+ if (is_add)
+ {
+ u32 policy_index;
+
+ pool_get (spd->policies, vp);
+ clib_memcpy (vp, policy, sizeof (*vp));
+ policy_index = vp - spd->policies;
+
+ if (policy->is_outbound)
+ {
+ if (policy->is_ipv6)
+ {
+ vec_add1 (spd->ipv6_outbound_policies, policy_index);
+ clib_memcpy (vp, policy, sizeof (ipsec_policy_t));
+ vec_sort_with_function (spd->ipv6_outbound_policies,
+ ipsec_spd_entry_sort);
+ }
+ else
+ {
+ vec_add1 (spd->ipv4_outbound_policies, policy_index);
+ clib_memcpy (vp, policy, sizeof (ipsec_policy_t));
+ vec_sort_with_function (spd->ipv4_outbound_policies,
+ ipsec_spd_entry_sort);
+ }
+ }
+ else
+ {
+ if (policy->is_ipv6)
+ {
+ if (policy->policy == IPSEC_POLICY_ACTION_PROTECT)
+ {
+ vec_add1 (spd->ipv6_inbound_protect_policy_indices,
+ policy_index);
+ clib_memcpy (vp, policy, sizeof (ipsec_policy_t));
+ vec_sort_with_function
+ (spd->ipv6_inbound_protect_policy_indices,
+ ipsec_spd_entry_sort);
+ }
+ else
+ {
+ vec_add1
+ (spd->ipv6_inbound_policy_discard_and_bypass_indices,
+ policy_index);
+ clib_memcpy (vp, policy, sizeof (ipsec_policy_t));
+ vec_sort_with_function
+ (spd->ipv6_inbound_policy_discard_and_bypass_indices,
+ ipsec_spd_entry_sort);
+ }
+ }
+ else
+ {
+ if (policy->policy == IPSEC_POLICY_ACTION_PROTECT)
+ {
+ vec_add1 (spd->ipv4_inbound_protect_policy_indices,
+ policy_index);
+ clib_memcpy (vp, policy, sizeof (ipsec_policy_t));
+ vec_sort_with_function
+ (spd->ipv4_inbound_protect_policy_indices,
+ ipsec_spd_entry_sort);
+ }
+ else
+ {
+ vec_add1
+ (spd->ipv4_inbound_policy_discard_and_bypass_indices,
+ policy_index);
+ clib_memcpy (vp, policy, sizeof (ipsec_policy_t));
+ vec_sort_with_function
+ (spd->ipv4_inbound_policy_discard_and_bypass_indices,
+ ipsec_spd_entry_sort);
+ }
+ }
+ }
+
+ }
+ else
+ {
+ u32 i, j;
+ /* *INDENT-OFF* */
+ pool_foreach_index(i, spd->policies, ({
+ vp = pool_elt_at_index(spd->policies, i);
+ if (vp->priority != policy->priority)
+ continue;
+ if (vp->is_outbound != policy->is_outbound)
+ continue;
+ if (vp->policy != policy->policy)
+ continue;
+ if (vp->sa_id != policy->sa_id)
+ continue;
+ if (vp->protocol != policy->protocol)
+ continue;
+ if (vp->lport.start != policy->lport.start)
+ continue;
+ if (vp->lport.stop != policy->lport.stop)
+ continue;
+ if (vp->rport.start != policy->rport.start)
+ continue;
+ if (vp->rport.stop != policy->rport.stop)
+ continue;
+ if (vp->is_ipv6 != policy->is_ipv6)
+ continue;
+ if (policy->is_ipv6)
+ {
+ if (vp->laddr.start.ip6.as_u64[0] != policy->laddr.start.ip6.as_u64[0])
+ continue;
+ if (vp->laddr.start.ip6.as_u64[1] != policy->laddr.start.ip6.as_u64[1])
+ continue;
+ if (vp->laddr.stop.ip6.as_u64[0] != policy->laddr.stop.ip6.as_u64[0])
+ continue;
+ if (vp->laddr.stop.ip6.as_u64[1] != policy->laddr.stop.ip6.as_u64[1])
+ continue;
+ if (vp->raddr.start.ip6.as_u64[0] != policy->raddr.start.ip6.as_u64[0])
+ continue;
+ if (vp->raddr.start.ip6.as_u64[1] != policy->raddr.start.ip6.as_u64[1])
+ continue;
+ if (vp->raddr.stop.ip6.as_u64[0] != policy->raddr.stop.ip6.as_u64[0])
+ continue;
+ if (vp->laddr.stop.ip6.as_u64[1] != policy->laddr.stop.ip6.as_u64[1])
+ continue;
+ if (policy->is_outbound)
+ {
+ vec_foreach_index(j, spd->ipv6_outbound_policies) {
+ if (vec_elt(spd->ipv6_outbound_policies, j) == i) {
+ vec_del1 (spd->ipv6_outbound_policies, j);
+ break;
+ }
+ }
+ }
+ else
+ {
+ if (policy->policy == IPSEC_POLICY_ACTION_PROTECT)
+ {
+ vec_foreach_index(j, spd->ipv6_inbound_protect_policy_indices) {
+ if (vec_elt(spd->ipv6_inbound_protect_policy_indices, j) == i) {
+ vec_del1 (spd->ipv6_inbound_protect_policy_indices, j);
+ break;
+ }
+ }
+ }
+ else
+ {
+ vec_foreach_index(j, spd->ipv6_inbound_policy_discard_and_bypass_indices) {
+ if (vec_elt(spd->ipv6_inbound_policy_discard_and_bypass_indices, j) == i) {
+ vec_del1 (spd->ipv6_inbound_policy_discard_and_bypass_indices, j);
+ break;
+ }
+ }
+ }
+ }
+ }
+ else
+ {
+ if (vp->laddr.start.ip4.as_u32 != policy->laddr.start.ip4.as_u32)
+ continue;
+ if (vp->laddr.stop.ip4.as_u32 != policy->laddr.stop.ip4.as_u32)
+ continue;
+ if (vp->raddr.start.ip4.as_u32 != policy->raddr.start.ip4.as_u32)
+ continue;
+ if (vp->raddr.stop.ip4.as_u32 != policy->raddr.stop.ip4.as_u32)
+ continue;
+ if (policy->is_outbound)
+ {
+ vec_foreach_index(j, spd->ipv4_outbound_policies) {
+ if (vec_elt(spd->ipv4_outbound_policies, j) == i) {
+ vec_del1 (spd->ipv4_outbound_policies, j);
+ break;
+ }
+ }
+ }
+ else
+ {
+ if (policy->policy == IPSEC_POLICY_ACTION_PROTECT)
+ {
+ vec_foreach_index(j, spd->ipv4_inbound_protect_policy_indices) {
+ if (vec_elt(spd->ipv4_inbound_protect_policy_indices, j) == i) {
+ vec_del1 (spd->ipv4_inbound_protect_policy_indices, j);
+ break;
+ }
+ }
+ }
+ else
+ {
+ vec_foreach_index(j, spd->ipv4_inbound_policy_discard_and_bypass_indices) {
+ if (vec_elt(spd->ipv4_inbound_policy_discard_and_bypass_indices, j) == i) {
+ vec_del1 (spd->ipv4_inbound_policy_discard_and_bypass_indices, j);
+ break;
+ }
+ }
+ }
+ }
+ pool_put (spd->policies, vp);
+ break;
+ }
+ }));
+ /* *INDENT-ON* */
+ }
+
+ return 0;
+}
+
+static u8
+ipsec_is_sa_used (u32 sa_index)
+{
+ ipsec_main_t *im = &ipsec_main;
+ ipsec_spd_t *spd;
+ ipsec_policy_t *p;
+ ipsec_tunnel_if_t *t;
+
+ /* *INDENT-OFF* */
+ pool_foreach(spd, im->spds, ({
+ pool_foreach(p, spd->policies, ({
+ if (p->policy == IPSEC_POLICY_ACTION_PROTECT)
+ {
+ if (p->sa_index == sa_index)
+ return 1;
+ }
+ }));
+ }));
+
+ pool_foreach(t, im->tunnel_interfaces, ({
+ if (t->input_sa_index == sa_index)
+ return 1;
+ if (t->output_sa_index == sa_index)
+ return 1;
+ }));
+ /* *INDENT-ON* */
+
+ return 0;
+}
+
+int
+ipsec_add_del_sa (vlib_main_t * vm, ipsec_sa_t * new_sa, int is_add)
+{
+ ipsec_main_t *im = &ipsec_main;
+ ipsec_sa_t *sa = 0;
+ uword *p;
+ u32 sa_index;
+
+ clib_warning ("id %u spi %u", new_sa->id, new_sa->spi);
+
+ p = hash_get (im->sa_index_by_sa_id, new_sa->id);
+ if (p && is_add)
+ return VNET_API_ERROR_SYSCALL_ERROR_1; /* already exists */
+ if (!p && !is_add)
+ return VNET_API_ERROR_SYSCALL_ERROR_1;
+
+ if (!is_add) /* delete */
+ {
+ sa_index = p[0];
+ sa = pool_elt_at_index (im->sad, sa_index);
+ if (ipsec_is_sa_used (sa_index))
+ {
+ clib_warning ("sa_id %u used in policy", sa->id);
+ return VNET_API_ERROR_SYSCALL_ERROR_1; /* sa used in policy */
+ }
+ hash_unset (im->sa_index_by_sa_id, sa->id);
+ add_del_sa_sess (sa_index, is_add);
+ pool_put (im->sad, sa);
+ }
+ else /* create new SA */
+ {
+ pool_get (im->sad, sa);
+ clib_memcpy (sa, new_sa, sizeof (*sa));
+ sa_index = sa - im->sad;
+ hash_set (im->sa_index_by_sa_id, sa->id, sa_index);
+ if (add_del_sa_sess (sa_index, is_add) < 0)
+ return VNET_API_ERROR_SYSCALL_ERROR_1;
+ }
+ return 0;
+}
+
+int
+ipsec_set_sa_key (vlib_main_t * vm, ipsec_sa_t * sa_update)
+{
+ ipsec_main_t *im = &ipsec_main;
+ uword *p;
+ u32 sa_index;
+ ipsec_sa_t *sa = 0;
+
+ p = hash_get (im->sa_index_by_sa_id, sa_update->id);
+ if (!p)
+ return VNET_API_ERROR_SYSCALL_ERROR_1; /* no such sa-id */
+
+ sa_index = p[0];
+ sa = pool_elt_at_index (im->sad, sa_index);
+
+ /* new crypto key */
+ if (0 < sa_update->crypto_key_len)
+ {
+ clib_memcpy (sa->crypto_key, sa_update->crypto_key,
+ sa_update->crypto_key_len);
+ sa->crypto_key_len = sa_update->crypto_key_len;
+ }
+
+ /* new integ key */
+ if (0 < sa_update->integ_key_len)
+ {
+ clib_memcpy (sa->integ_key, sa_update->integ_key,
+ sa_update->integ_key_len);
+ sa->integ_key_len = sa_update->integ_key_len;
+ }
+
+ if (sa->crypto_key_len + sa->integ_key_len > 0)
+ {
+ if (add_del_sa_sess (sa_index, 0) < 0)
+ return VNET_API_ERROR_SYSCALL_ERROR_1;
+ }
+
+ return 0;
+}
+
+static void
+ipsec_rand_seed (void)
+{
+ struct
+ {
+ time_t time;
+ pid_t pid;
+ void *p;
+ } seed_data;
+
+ seed_data.time = time (NULL);
+ seed_data.pid = getpid ();
+ seed_data.p = (void *) &seed_data;
+
+ RAND_seed ((const void *) &seed_data, sizeof (seed_data));
+}
+
+static clib_error_t *
+ipsec_init (vlib_main_t * vm)
+{
+ clib_error_t *error;
+ ipsec_main_t *im = &ipsec_main;
+ vlib_thread_main_t *tm = vlib_get_thread_main ();
+ vlib_node_t *node;
+
+ ipsec_rand_seed ();
+
+ memset (im, 0, sizeof (im[0]));
+
+ im->vnet_main = vnet_get_main ();
+ im->vlib_main = vm;
+
+ im->spd_index_by_spd_id = hash_create (0, sizeof (uword));
+ im->sa_index_by_sa_id = hash_create (0, sizeof (uword));
+ im->spd_index_by_sw_if_index = hash_create (0, sizeof (uword));
+
+ vec_validate_aligned (im->empty_buffers, tm->n_vlib_mains - 1,
+ CLIB_CACHE_LINE_BYTES);
+
+ node = vlib_get_node_by_name (vm, (u8 *) "error-drop");
+ ASSERT (node);
+ im->error_drop_node_index = node->index;
+
+ node = vlib_get_node_by_name (vm, (u8 *) ESP_NODE);
+
+ ASSERT (node);
+ im->esp_encrypt_node_index = node->index;
+
+ node = vlib_get_node_by_name (vm, (u8 *) "ip4-lookup");
+ ASSERT (node);
+ im->ip4_lookup_node_index = node->index;
+
+ if ((error = vlib_call_init_function (vm, ipsec_cli_init)))
+ return error;
+
+ if ((error = vlib_call_init_function (vm, ipsec_tunnel_if_init)))
+ return error;
+
+ esp_init ();
+
+ if ((error = ikev2_init (vm)))
+ return error;
+
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (ipsec_init);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/ipsec/ipsec.h b/src/vnet/ipsec/ipsec.h
new file mode 100644
index 00000000000..32c7edfc29d
--- /dev/null
+++ b/src/vnet/ipsec/ipsec.h
@@ -0,0 +1,344 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef __IPSEC_H__
+#define __IPSEC_H__
+
+#define IPSEC_FLAG_IPSEC_GRE_TUNNEL (1 << 0)
+
+#define foreach_ipsec_policy_action \
+ _(0, BYPASS, "bypass") \
+ _(1, DISCARD, "discard") \
+ _(2, RESOLVE, "resolve") \
+ _(3, PROTECT, "protect")
+
+typedef enum
+{
+#define _(v,f,s) IPSEC_POLICY_ACTION_##f = v,
+ foreach_ipsec_policy_action
+#undef _
+ IPSEC_POLICY_N_ACTION,
+} ipsec_policy_action_t;
+
+#if DPDK_CRYPTO==1
+#define foreach_ipsec_crypto_alg \
+ _(0, NONE, "none") \
+ _(1, AES_CBC_128, "aes-cbc-128") \
+ _(2, AES_CBC_192, "aes-cbc-192") \
+ _(3, AES_CBC_256, "aes-cbc-256") \
+ _(4, AES_GCM_128, "aes-gcm-128")
+#else
+#define foreach_ipsec_crypto_alg \
+ _(0, NONE, "none") \
+ _(1, AES_CBC_128, "aes-cbc-128") \
+ _(2, AES_CBC_192, "aes-cbc-192") \
+ _(3, AES_CBC_256, "aes-cbc-256")
+#endif
+
+typedef enum
+{
+#define _(v,f,s) IPSEC_CRYPTO_ALG_##f = v,
+ foreach_ipsec_crypto_alg
+#undef _
+ IPSEC_CRYPTO_N_ALG,
+} ipsec_crypto_alg_t;
+
+#if DPDK_CRYPTO==1
+#define foreach_ipsec_integ_alg \
+ _(0, NONE, "none") \
+ _(1, MD5_96, "md5-96") /* RFC2403 */ \
+ _(2, SHA1_96, "sha1-96") /* RFC2404 */ \
+ _(3, SHA_256_96, "sha-256-96") /* draft-ietf-ipsec-ciph-sha-256-00 */ \
+ _(4, SHA_256_128, "sha-256-128") /* RFC4868 */ \
+ _(5, SHA_384_192, "sha-384-192") /* RFC4868 */ \
+ _(6, SHA_512_256, "sha-512-256") /* RFC4868 */ \
+ _(7, AES_GCM_128, "aes-gcm-128")
+#else
+#define foreach_ipsec_integ_alg \
+ _(0, NONE, "none") \
+ _(1, MD5_96, "md5-96") /* RFC2403 */ \
+ _(2, SHA1_96, "sha1-96") /* RFC2404 */ \
+ _(3, SHA_256_96, "sha-256-96") /* draft-ietf-ipsec-ciph-sha-256-00 */ \
+ _(4, SHA_256_128, "sha-256-128") /* RFC4868 */ \
+ _(5, SHA_384_192, "sha-384-192") /* RFC4868 */ \
+ _(6, SHA_512_256, "sha-512-256") /* RFC4868 */
+#endif
+
+typedef enum
+{
+#define _(v,f,s) IPSEC_INTEG_ALG_##f = v,
+ foreach_ipsec_integ_alg
+#undef _
+ IPSEC_INTEG_N_ALG,
+} ipsec_integ_alg_t;
+
+typedef enum
+{
+ IPSEC_PROTOCOL_AH = 0,
+ IPSEC_PROTOCOL_ESP = 1
+} ipsec_protocol_t;
+
+typedef struct
+{
+ u32 id;
+ u32 spi;
+ ipsec_protocol_t protocol;
+
+ ipsec_crypto_alg_t crypto_alg;
+ u8 crypto_key_len;
+ u8 crypto_key[128];
+
+ ipsec_integ_alg_t integ_alg;
+ u8 integ_key_len;
+ u8 integ_key[128];
+
+ u8 use_esn;
+ u8 use_anti_replay;
+
+ u8 is_tunnel;
+ u8 is_tunnel_ip6;
+ ip46_address_t tunnel_src_addr;
+ ip46_address_t tunnel_dst_addr;
+
+ u32 salt;
+
+ /* runtime */
+ u32 seq;
+ u32 seq_hi;
+ u32 last_seq;
+ u32 last_seq_hi;
+ u64 replay_window;
+} ipsec_sa_t;
+
+typedef struct
+{
+ ip46_address_t start, stop;
+} ip46_address_range_t;
+
+typedef struct
+{
+ u16 start, stop;
+} port_range_t;
+
+typedef struct
+{
+ u8 is_add;
+ u8 esn;
+ u8 anti_replay;
+ ip4_address_t local_ip, remote_ip;
+ u32 local_spi;
+ u32 remote_spi;
+ ipsec_crypto_alg_t crypto_alg;
+ u8 local_crypto_key_len;
+ u8 local_crypto_key[128];
+ u8 remote_crypto_key_len;
+ u8 remote_crypto_key[128];
+ ipsec_integ_alg_t integ_alg;
+ u8 local_integ_key_len;
+ u8 local_integ_key[128];
+ u8 remote_integ_key_len;
+ u8 remote_integ_key[128];
+} ipsec_add_del_tunnel_args_t;
+
+typedef struct
+{
+ u8 is_add;
+ u32 local_sa_id;
+ u32 remote_sa_id;
+ ip4_address_t local_ip;
+ ip4_address_t remote_ip;
+} ipsec_add_del_ipsec_gre_tunnel_args_t;
+
+typedef enum
+{
+ IPSEC_IF_SET_KEY_TYPE_NONE,
+ IPSEC_IF_SET_KEY_TYPE_LOCAL_CRYPTO,
+ IPSEC_IF_SET_KEY_TYPE_REMOTE_CRYPTO,
+ IPSEC_IF_SET_KEY_TYPE_LOCAL_INTEG,
+ IPSEC_IF_SET_KEY_TYPE_REMOTE_INTEG,
+} ipsec_if_set_key_type_t;
+
+typedef struct
+{
+ u32 id;
+ i32 priority;
+ u8 is_outbound;
+
+ // Selector
+ u8 is_ipv6;
+ ip46_address_range_t laddr;
+ ip46_address_range_t raddr;
+ u8 protocol;
+ port_range_t lport;
+ port_range_t rport;
+
+ // Policy
+ u8 policy;
+ u32 sa_id;
+ u32 sa_index;
+
+ // Counter
+ vlib_counter_t counter;
+} ipsec_policy_t;
+
+typedef struct
+{
+ u32 id;
+ /* pool of policies */
+ ipsec_policy_t *policies;
+ /* vectors of policy indices */
+ u32 *ipv4_outbound_policies;
+ u32 *ipv6_outbound_policies;
+ u32 *ipv4_inbound_protect_policy_indices;
+ u32 *ipv4_inbound_policy_discard_and_bypass_indices;
+ u32 *ipv6_inbound_protect_policy_indices;
+ u32 *ipv6_inbound_policy_discard_and_bypass_indices;
+} ipsec_spd_t;
+
+typedef struct
+{
+ u32 spd_index;
+} ip4_ipsec_config_t;
+
+typedef struct
+{
+ u32 spd_index;
+} ip6_ipsec_config_t;
+
+typedef struct
+{
+ u32 input_sa_index;
+ u32 output_sa_index;
+ u32 hw_if_index;
+} ipsec_tunnel_if_t;
+
+typedef struct
+{
+ /* pool of tunnel instances */
+ ipsec_spd_t *spds;
+ ipsec_sa_t *sad;
+
+ /* pool of tunnel interfaces */
+ ipsec_tunnel_if_t *tunnel_interfaces;
+ u32 *free_tunnel_if_indices;
+
+ u32 **empty_buffers;
+
+ uword *tunnel_index_by_key;
+
+ /* convenience */
+ vlib_main_t *vlib_main;
+ vnet_main_t *vnet_main;
+
+ /* next node indices */
+ u32 feature_next_node_index[32];
+
+ /* hashes */
+ uword *spd_index_by_spd_id;
+ uword *spd_index_by_sw_if_index;
+ uword *sa_index_by_sa_id;
+ uword *ipsec_if_pool_index_by_key;
+
+ /* node indexes */
+ u32 error_drop_node_index;
+ u32 ip4_lookup_node_index;
+ u32 esp_encrypt_node_index;
+
+} ipsec_main_t;
+
+ipsec_main_t ipsec_main;
+
+extern vlib_node_registration_t esp_encrypt_node;
+extern vlib_node_registration_t esp_decrypt_node;
+extern vlib_node_registration_t ipsec_if_output_node;
+extern vlib_node_registration_t ipsec_if_input_node;
+
+
+/*
+ * functions
+ */
+int ipsec_set_interface_spd (vlib_main_t * vm, u32 sw_if_index, u32 spd_id,
+ int is_add);
+int ipsec_add_del_spd (vlib_main_t * vm, u32 spd_id, int is_add);
+int ipsec_add_del_policy (vlib_main_t * vm, ipsec_policy_t * policy,
+ int is_add);
+int ipsec_add_del_sa (vlib_main_t * vm, ipsec_sa_t * new_sa, int is_add);
+int ipsec_set_sa_key (vlib_main_t * vm, ipsec_sa_t * sa_update);
+
+u32 ipsec_get_sa_index_by_sa_id (u32 sa_id);
+u8 *format_ipsec_if_output_trace (u8 * s, va_list * args);
+u8 *format_ipsec_policy_action (u8 * s, va_list * args);
+u8 *format_ipsec_crypto_alg (u8 * s, va_list * args);
+u8 *format_ipsec_integ_alg (u8 * s, va_list * args);
+u8 *format_ipsec_replay_window (u8 * s, va_list * args);
+uword unformat_ipsec_policy_action (unformat_input_t * input, va_list * args);
+uword unformat_ipsec_crypto_alg (unformat_input_t * input, va_list * args);
+uword unformat_ipsec_integ_alg (unformat_input_t * input, va_list * args);
+
+/*u32 ipsec_add_del_tunnel_if (vnet_main_t * vnm, ipsec_add_del_tunnel_args_t * args); */
+int ipsec_add_del_tunnel_if (ipsec_add_del_tunnel_args_t * args);
+int ipsec_add_del_ipsec_gre_tunnel (vnet_main_t * vnm,
+ ipsec_add_del_ipsec_gre_tunnel_args_t *
+ args);
+int ipsec_set_interface_key (vnet_main_t * vnm, u32 hw_if_index,
+ ipsec_if_set_key_type_t type, u8 alg, u8 * key);
+
+
+/*
+ * inline functions
+ */
+
+always_inline void
+ipsec_alloc_empty_buffers (vlib_main_t * vm, ipsec_main_t * im)
+{
+ u32 cpu_index = os_get_cpu_number ();
+ uword l = vec_len (im->empty_buffers[cpu_index]);
+ uword n_alloc = 0;
+
+ if (PREDICT_FALSE (l < VLIB_FRAME_SIZE))
+ {
+ if (!im->empty_buffers[cpu_index])
+ {
+ vec_alloc (im->empty_buffers[cpu_index], 2 * VLIB_FRAME_SIZE);
+ }
+
+ n_alloc = vlib_buffer_alloc (vm, im->empty_buffers[cpu_index] + l,
+ 2 * VLIB_FRAME_SIZE - l);
+
+ _vec_len (im->empty_buffers[cpu_index]) = l + n_alloc;
+ }
+}
+
+static_always_inline u32
+get_next_output_feature_node_index (vlib_buffer_t * b,
+ vlib_node_runtime_t * nr)
+{
+ u32 next;
+ u32 sw_if_index = vnet_buffer (b)->sw_if_index[VLIB_TX];
+ vlib_main_t *vm = vlib_get_main ();
+ vlib_node_t *node = vlib_get_node (vm, nr->node_index);
+
+ vnet_feature_next (sw_if_index, &next, b);
+ return node->next_nodes[next];
+}
+
+#endif /* __IPSEC_H__ */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/ipsec/ipsec_api.c b/src/vnet/ipsec/ipsec_api.c
new file mode 100644
index 00000000000..30cc5bd2421
--- /dev/null
+++ b/src/vnet/ipsec/ipsec_api.c
@@ -0,0 +1,537 @@
+/*
+ *------------------------------------------------------------------
+ * ipsec_api.c - ipsec api
+ *
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+
+#include <vnet/vnet.h>
+#include <vlibmemory/api.h>
+
+#include <vnet/interface.h>
+#include <vnet/api_errno.h>
+#include <vnet/ip/ip.h>
+
+#include <vnet/vnet_msg_enum.h>
+
+#if IPSEC > 0
+#include <vnet/ipsec/ipsec.h>
+#include <vnet/ipsec/ikev2.h>
+#endif /* IPSEC */
+
+#define vl_typedefs /* define message structures */
+#include <vnet/vnet_all_api_h.h>
+#undef vl_typedefs
+
+#define vl_endianfun /* define message structures */
+#include <vnet/vnet_all_api_h.h>
+#undef vl_endianfun
+
+/* instantiate all the print functions we know about */
+#define vl_print(handle, ...) vlib_cli_output (handle, __VA_ARGS__)
+#define vl_printfun
+#include <vnet/vnet_all_api_h.h>
+#undef vl_printfun
+
+#include <vlibapi/api_helper_macros.h>
+
+#define foreach_vpe_api_msg \
+_(IPSEC_SPD_ADD_DEL, ipsec_spd_add_del) \
+_(IPSEC_INTERFACE_ADD_DEL_SPD, ipsec_interface_add_del_spd) \
+_(IPSEC_SPD_ADD_DEL_ENTRY, ipsec_spd_add_del_entry) \
+_(IPSEC_SAD_ADD_DEL_ENTRY, ipsec_sad_add_del_entry) \
+_(IPSEC_SA_SET_KEY, ipsec_sa_set_key) \
+_(IPSEC_SPD_DUMP, ipsec_spd_dump) \
+_(IKEV2_PROFILE_ADD_DEL, ikev2_profile_add_del) \
+_(IKEV2_PROFILE_SET_AUTH, ikev2_profile_set_auth) \
+_(IKEV2_PROFILE_SET_ID, ikev2_profile_set_id) \
+_(IKEV2_PROFILE_SET_TS, ikev2_profile_set_ts) \
+_(IKEV2_SET_LOCAL_KEY, ikev2_set_local_key)
+
+static void vl_api_ipsec_spd_add_del_t_handler
+ (vl_api_ipsec_spd_add_del_t * mp)
+{
+#if IPSEC == 0
+ clib_warning ("unimplemented");
+#else
+
+ vlib_main_t *vm __attribute__ ((unused)) = vlib_get_main ();
+ vl_api_ipsec_spd_add_del_reply_t *rmp;
+ int rv;
+
+#if DPDK > 0
+ rv = ipsec_add_del_spd (vm, ntohl (mp->spd_id), mp->is_add);
+#else
+ rv = VNET_API_ERROR_UNIMPLEMENTED;
+#endif
+
+ REPLY_MACRO (VL_API_IPSEC_SPD_ADD_DEL_REPLY);
+#endif
+}
+
+static void vl_api_ipsec_interface_add_del_spd_t_handler
+ (vl_api_ipsec_interface_add_del_spd_t * mp)
+{
+ vlib_main_t *vm __attribute__ ((unused)) = vlib_get_main ();
+ vl_api_ipsec_interface_add_del_spd_reply_t *rmp;
+ int rv;
+ u32 sw_if_index __attribute__ ((unused));
+ u32 spd_id __attribute__ ((unused));
+
+ sw_if_index = ntohl (mp->sw_if_index);
+ spd_id = ntohl (mp->spd_id);
+
+ VALIDATE_SW_IF_INDEX (mp);
+
+#if IPSEC > 0
+ rv = ipsec_set_interface_spd (vm, sw_if_index, spd_id, mp->is_add);
+#else
+ rv = VNET_API_ERROR_UNIMPLEMENTED;
+#endif
+
+ BAD_SW_IF_INDEX_LABEL;
+
+ REPLY_MACRO (VL_API_IPSEC_INTERFACE_ADD_DEL_SPD_REPLY);
+}
+
+static void vl_api_ipsec_spd_add_del_entry_t_handler
+ (vl_api_ipsec_spd_add_del_entry_t * mp)
+{
+ vlib_main_t *vm __attribute__ ((unused)) = vlib_get_main ();
+ vl_api_ipsec_spd_add_del_entry_reply_t *rmp;
+ int rv;
+
+#if IPSEC > 0
+ ipsec_policy_t p;
+
+ memset (&p, 0, sizeof (p));
+
+ p.id = ntohl (mp->spd_id);
+ p.priority = ntohl (mp->priority);
+ p.is_outbound = mp->is_outbound;
+ p.is_ipv6 = mp->is_ipv6;
+
+ if (mp->is_ipv6 || mp->is_ip_any)
+ {
+ clib_memcpy (&p.raddr.start, mp->remote_address_start, 16);
+ clib_memcpy (&p.raddr.stop, mp->remote_address_stop, 16);
+ clib_memcpy (&p.laddr.start, mp->local_address_start, 16);
+ clib_memcpy (&p.laddr.stop, mp->local_address_stop, 16);
+ }
+ else
+ {
+ clib_memcpy (&p.raddr.start.ip4.data, mp->remote_address_start, 4);
+ clib_memcpy (&p.raddr.stop.ip4.data, mp->remote_address_stop, 4);
+ clib_memcpy (&p.laddr.start.ip4.data, mp->local_address_start, 4);
+ clib_memcpy (&p.laddr.stop.ip4.data, mp->local_address_stop, 4);
+ }
+ p.protocol = mp->protocol;
+ p.rport.start = ntohs (mp->remote_port_start);
+ p.rport.stop = ntohs (mp->remote_port_stop);
+ p.lport.start = ntohs (mp->local_port_start);
+ p.lport.stop = ntohs (mp->local_port_stop);
+ /* policy action resolve unsupported */
+ if (mp->policy == IPSEC_POLICY_ACTION_RESOLVE)
+ {
+ clib_warning ("unsupported action: 'resolve'");
+ rv = VNET_API_ERROR_UNIMPLEMENTED;
+ goto out;
+ }
+ p.policy = mp->policy;
+ p.sa_id = ntohl (mp->sa_id);
+
+ rv = ipsec_add_del_policy (vm, &p, mp->is_add);
+ if (rv)
+ goto out;
+
+ if (mp->is_ip_any)
+ {
+ p.is_ipv6 = 1;
+ rv = ipsec_add_del_policy (vm, &p, mp->is_add);
+ }
+#else
+ rv = VNET_API_ERROR_UNIMPLEMENTED;
+ goto out;
+#endif
+
+out:
+ REPLY_MACRO (VL_API_IPSEC_SPD_ADD_DEL_ENTRY_REPLY);
+}
+
+static void vl_api_ipsec_sad_add_del_entry_t_handler
+ (vl_api_ipsec_sad_add_del_entry_t * mp)
+{
+ vlib_main_t *vm __attribute__ ((unused)) = vlib_get_main ();
+ vl_api_ipsec_sad_add_del_entry_reply_t *rmp;
+ int rv;
+#if IPSEC > 0
+ ipsec_sa_t sa;
+
+ memset (&sa, 0, sizeof (sa));
+
+ sa.id = ntohl (mp->sad_id);
+ sa.spi = ntohl (mp->spi);
+ /* security protocol AH unsupported */
+ if (mp->protocol == IPSEC_PROTOCOL_AH)
+ {
+ clib_warning ("unsupported security protocol 'AH'");
+ rv = VNET_API_ERROR_UNIMPLEMENTED;
+ goto out;
+ }
+ sa.protocol = mp->protocol;
+ /* check for unsupported crypto-alg */
+ if (mp->crypto_algorithm < IPSEC_CRYPTO_ALG_AES_CBC_128 ||
+ mp->crypto_algorithm >= IPSEC_CRYPTO_N_ALG)
+ {
+ clib_warning ("unsupported crypto-alg: '%U'", format_ipsec_crypto_alg,
+ mp->crypto_algorithm);
+ rv = VNET_API_ERROR_UNIMPLEMENTED;
+ goto out;
+ }
+ sa.crypto_alg = mp->crypto_algorithm;
+ sa.crypto_key_len = mp->crypto_key_length;
+ clib_memcpy (&sa.crypto_key, mp->crypto_key, sizeof (sa.crypto_key));
+ /* check for unsupported integ-alg */
+#if DPDK_CRYPTO==1
+ if (mp->integrity_algorithm < IPSEC_INTEG_ALG_NONE ||
+#else
+ if (mp->integrity_algorithm < IPSEC_INTEG_ALG_SHA1_96 ||
+#endif
+ mp->integrity_algorithm >= IPSEC_INTEG_N_ALG)
+ {
+ clib_warning ("unsupported integ-alg: '%U'", format_ipsec_integ_alg,
+ mp->integrity_algorithm);
+ rv = VNET_API_ERROR_UNIMPLEMENTED;
+ goto out;
+ }
+
+#if DPDK_CRYPTO==1
+ /*Special cases, aes-gcm-128 encryption */
+ if (mp->crypto_algorithm == IPSEC_CRYPTO_ALG_AES_GCM_128)
+ {
+ if (mp->integrity_algorithm != IPSEC_INTEG_ALG_NONE
+ && mp->integrity_algorithm != IPSEC_INTEG_ALG_AES_GCM_128)
+ {
+ clib_warning
+ ("unsupported: aes-gcm-128 crypto-alg needs none as integ-alg");
+ rv = VNET_API_ERROR_UNIMPLEMENTED;
+ goto out;
+ }
+ else /*set integ-alg internally to aes-gcm-128 */
+ mp->integrity_algorithm = IPSEC_INTEG_ALG_AES_GCM_128;
+ }
+ else if (mp->integrity_algorithm == IPSEC_INTEG_ALG_AES_GCM_128)
+ {
+ clib_warning ("unsupported integ-alg: aes-gcm-128");
+ rv = VNET_API_ERROR_UNIMPLEMENTED;
+ goto out;
+ }
+ else if (mp->integrity_algorithm == IPSEC_INTEG_ALG_NONE)
+ {
+ clib_warning ("unsupported integ-alg: none");
+ rv = VNET_API_ERROR_UNIMPLEMENTED;
+ goto out;
+ }
+#endif
+
+ sa.integ_alg = mp->integrity_algorithm;
+ sa.integ_key_len = mp->integrity_key_length;
+ clib_memcpy (&sa.integ_key, mp->integrity_key, sizeof (sa.integ_key));
+ sa.use_esn = mp->use_extended_sequence_number;
+ sa.is_tunnel = mp->is_tunnel;
+ sa.is_tunnel_ip6 = mp->is_tunnel_ipv6;
+ if (sa.is_tunnel_ip6)
+ {
+ clib_memcpy (&sa.tunnel_src_addr, mp->tunnel_src_address, 16);
+ clib_memcpy (&sa.tunnel_dst_addr, mp->tunnel_dst_address, 16);
+ }
+ else
+ {
+ clib_memcpy (&sa.tunnel_src_addr.ip4.data, mp->tunnel_src_address, 4);
+ clib_memcpy (&sa.tunnel_dst_addr.ip4.data, mp->tunnel_dst_address, 4);
+ }
+
+ rv = ipsec_add_del_sa (vm, &sa, mp->is_add);
+#else
+ rv = VNET_API_ERROR_UNIMPLEMENTED;
+ goto out;
+#endif
+
+out:
+ REPLY_MACRO (VL_API_IPSEC_SAD_ADD_DEL_ENTRY_REPLY);
+}
+
+static void
+send_ipsec_spd_details (ipsec_policy_t * p, unix_shared_memory_queue_t * q,
+ u32 context)
+{
+ vl_api_ipsec_spd_details_t *mp;
+
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ memset (mp, 0, sizeof (*mp));
+ mp->_vl_msg_id = ntohs (VL_API_IPSEC_SPD_DETAILS);
+ mp->context = context;
+
+ mp->spd_id = htonl (p->id);
+ mp->priority = htonl (p->priority);
+ mp->is_outbound = p->is_outbound;
+ mp->is_ipv6 = p->is_ipv6;
+ if (p->is_ipv6)
+ {
+ memcpy (mp->local_start_addr, &p->laddr.start.ip6, 16);
+ memcpy (mp->local_stop_addr, &p->laddr.stop.ip6, 16);
+ memcpy (mp->remote_start_addr, &p->raddr.start.ip6, 16);
+ memcpy (mp->remote_stop_addr, &p->raddr.stop.ip6, 16);
+ }
+ else
+ {
+ memcpy (mp->local_start_addr, &p->laddr.start.ip4, 4);
+ memcpy (mp->local_stop_addr, &p->laddr.stop.ip4, 4);
+ memcpy (mp->remote_start_addr, &p->raddr.start.ip4, 4);
+ memcpy (mp->remote_stop_addr, &p->raddr.stop.ip4, 4);
+ }
+ mp->local_start_port = htons (p->lport.start);
+ mp->local_stop_port = htons (p->lport.stop);
+ mp->remote_start_port = htons (p->rport.start);
+ mp->remote_stop_port = htons (p->rport.stop);
+ mp->protocol = p->protocol;
+ mp->policy = p->policy;
+ mp->sa_id = htonl (p->sa_id);
+ mp->bytes = clib_host_to_net_u64 (p->counter.bytes);
+ mp->packets = clib_host_to_net_u64 (p->counter.packets);
+
+ vl_msg_api_send_shmem (q, (u8 *) & mp);
+}
+
+static void
+vl_api_ipsec_spd_dump_t_handler (vl_api_ipsec_spd_dump_t * mp)
+{
+ unix_shared_memory_queue_t *q;
+ ipsec_main_t *im = &ipsec_main;
+ ipsec_policy_t *policy;
+ ipsec_spd_t *spd;
+ uword *p;
+ u32 spd_index;
+#if IPSEC > 0
+ q = vl_api_client_index_to_input_queue (mp->client_index);
+ if (q == 0)
+ return;
+
+ p = hash_get (im->spd_index_by_spd_id, ntohl (mp->spd_id));
+ if (!p)
+ return;
+
+ spd_index = p[0];
+ spd = pool_elt_at_index (im->spds, spd_index);
+
+ /* *INDENT-OFF* */
+ pool_foreach (policy, spd->policies,
+ ({
+ if (mp->sa_id == ~(0) || ntohl (mp->sa_id) == policy->sa_id)
+ send_ipsec_spd_details (policy, q,
+ mp->context);}
+ ));
+ /* *INDENT-ON* */
+#else
+ clib_warning ("unimplemented");
+#endif
+}
+
+static void
+vl_api_ipsec_sa_set_key_t_handler (vl_api_ipsec_sa_set_key_t * mp)
+{
+ vlib_main_t *vm __attribute__ ((unused)) = vlib_get_main ();
+ vl_api_ipsec_sa_set_key_reply_t *rmp;
+ int rv;
+#if IPSEC > 0
+ ipsec_sa_t sa;
+ sa.id = ntohl (mp->sa_id);
+ sa.crypto_key_len = mp->crypto_key_length;
+ clib_memcpy (&sa.crypto_key, mp->crypto_key, sizeof (sa.crypto_key));
+ sa.integ_key_len = mp->integrity_key_length;
+ clib_memcpy (&sa.integ_key, mp->integrity_key, sizeof (sa.integ_key));
+
+ rv = ipsec_set_sa_key (vm, &sa);
+#else
+ rv = VNET_API_ERROR_UNIMPLEMENTED;
+#endif
+
+ REPLY_MACRO (VL_API_IPSEC_SA_SET_KEY_REPLY);
+}
+
+static void
+vl_api_ikev2_profile_add_del_t_handler (vl_api_ikev2_profile_add_del_t * mp)
+{
+ vl_api_ikev2_profile_add_del_reply_t *rmp;
+ int rv = 0;
+
+#if IPSEC > 0
+ vlib_main_t *vm = vlib_get_main ();
+ clib_error_t *error;
+ u8 *tmp = format (0, "%s", mp->name);
+ error = ikev2_add_del_profile (vm, tmp, mp->is_add);
+ vec_free (tmp);
+ if (error)
+ rv = VNET_API_ERROR_UNSPECIFIED;
+#else
+ rv = VNET_API_ERROR_UNIMPLEMENTED;
+#endif
+
+ REPLY_MACRO (VL_API_IKEV2_PROFILE_ADD_DEL_REPLY);
+}
+
+static void
+ vl_api_ikev2_profile_set_auth_t_handler
+ (vl_api_ikev2_profile_set_auth_t * mp)
+{
+ vl_api_ikev2_profile_set_auth_reply_t *rmp;
+ int rv = 0;
+
+#if IPSEC > 0
+ vlib_main_t *vm = vlib_get_main ();
+ clib_error_t *error;
+ u8 *tmp = format (0, "%s", mp->name);
+ u8 *data = vec_new (u8, mp->data_len);
+ clib_memcpy (data, mp->data, mp->data_len);
+ error = ikev2_set_profile_auth (vm, tmp, mp->auth_method, data, mp->is_hex);
+ vec_free (tmp);
+ vec_free (data);
+ if (error)
+ rv = VNET_API_ERROR_UNSPECIFIED;
+#else
+ rv = VNET_API_ERROR_UNIMPLEMENTED;
+#endif
+
+ REPLY_MACRO (VL_API_IKEV2_PROFILE_SET_AUTH_REPLY);
+}
+
+static void
+vl_api_ikev2_profile_set_id_t_handler (vl_api_ikev2_profile_set_id_t * mp)
+{
+ vl_api_ikev2_profile_add_del_reply_t *rmp;
+ int rv = 0;
+
+#if IPSEC > 0
+ vlib_main_t *vm = vlib_get_main ();
+ clib_error_t *error;
+ u8 *tmp = format (0, "%s", mp->name);
+ u8 *data = vec_new (u8, mp->data_len);
+ clib_memcpy (data, mp->data, mp->data_len);
+ error = ikev2_set_profile_id (vm, tmp, mp->id_type, data, mp->is_local);
+ vec_free (tmp);
+ vec_free (data);
+ if (error)
+ rv = VNET_API_ERROR_UNSPECIFIED;
+#else
+ rv = VNET_API_ERROR_UNIMPLEMENTED;
+#endif
+
+ REPLY_MACRO (VL_API_IKEV2_PROFILE_SET_ID_REPLY);
+}
+
+static void
+vl_api_ikev2_profile_set_ts_t_handler (vl_api_ikev2_profile_set_ts_t * mp)
+{
+ vl_api_ikev2_profile_set_ts_reply_t *rmp;
+ int rv = 0;
+
+#if IPSEC > 0
+ vlib_main_t *vm = vlib_get_main ();
+ clib_error_t *error;
+ u8 *tmp = format (0, "%s", mp->name);
+ error = ikev2_set_profile_ts (vm, tmp, mp->proto, mp->start_port,
+ mp->end_port, (ip4_address_t) mp->start_addr,
+ (ip4_address_t) mp->end_addr, mp->is_local);
+ vec_free (tmp);
+ if (error)
+ rv = VNET_API_ERROR_UNSPECIFIED;
+#else
+ rv = VNET_API_ERROR_UNIMPLEMENTED;
+#endif
+
+ REPLY_MACRO (VL_API_IKEV2_PROFILE_SET_TS_REPLY);
+}
+
+static void
+vl_api_ikev2_set_local_key_t_handler (vl_api_ikev2_set_local_key_t * mp)
+{
+ vl_api_ikev2_profile_set_ts_reply_t *rmp;
+ int rv = 0;
+
+#if IPSEC > 0
+ vlib_main_t *vm = vlib_get_main ();
+ clib_error_t *error;
+
+ error = ikev2_set_local_key (vm, mp->key_file);
+ if (error)
+ rv = VNET_API_ERROR_UNSPECIFIED;
+#else
+ rv = VNET_API_ERROR_UNIMPLEMENTED;
+#endif
+
+ REPLY_MACRO (VL_API_IKEV2_SET_LOCAL_KEY_REPLY);
+}
+
+/*
+ * ipsec_api_hookup
+ * Add vpe's API message handlers to the table.
+ * vlib has alread mapped shared memory and
+ * added the client registration handlers.
+ * See .../vlib-api/vlibmemory/memclnt_vlib.c:memclnt_process()
+ */
+#define vl_msg_name_crc_list
+#include <vnet/vnet_all_api_h.h>
+#undef vl_msg_name_crc_list
+
+static void
+setup_message_id_table (api_main_t * am)
+{
+#define _(id,n,crc) vl_msg_api_add_msg_name_crc (am, #n "_" #crc, id);
+ foreach_vl_msg_name_crc_ipsec;
+#undef _
+}
+
+static clib_error_t *
+ipsec_api_hookup (vlib_main_t * vm)
+{
+ api_main_t *am = &api_main;
+
+#define _(N,n) \
+ vl_msg_api_set_handlers(VL_API_##N, #n, \
+ vl_api_##n##_t_handler, \
+ vl_noop_handler, \
+ vl_api_##n##_t_endian, \
+ vl_api_##n##_t_print, \
+ sizeof(vl_api_##n##_t), 1);
+ foreach_vpe_api_msg;
+#undef _
+
+ /*
+ * Set up the (msg_name, crc, message-id) table
+ */
+ setup_message_id_table (am);
+
+ return 0;
+}
+
+VLIB_API_INIT_FUNCTION (ipsec_api_hookup);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/ipsec/ipsec_cli.c b/src/vnet/ipsec/ipsec_cli.c
new file mode 100644
index 00000000000..7ab85d4aefb
--- /dev/null
+++ b/src/vnet/ipsec/ipsec_cli.c
@@ -0,0 +1,807 @@
+/*
+ * decap.c : IPSec tunnel support
+ *
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/vnet.h>
+#include <vnet/api_errno.h>
+#include <vnet/ip/ip.h>
+#include <vnet/interface.h>
+
+#include <vnet/ipsec/ipsec.h>
+
+static clib_error_t *
+set_interface_spd_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ unformat_input_t _line_input, *line_input = &_line_input;
+ ipsec_main_t *im = &ipsec_main;
+ u32 sw_if_index = (u32) ~ 0;
+ u32 spd_id;
+ int is_add = 1;
+
+ if (!unformat_user (input, unformat_line_input, line_input))
+ return 0;
+
+ if (unformat
+ (line_input, "%U %u", unformat_vnet_sw_interface, im->vnet_main,
+ &sw_if_index, &spd_id))
+ ;
+ else if (unformat (line_input, "del"))
+ is_add = 0;
+ else
+ return clib_error_return (0, "parse error: '%U'",
+ format_unformat_error, line_input);
+
+ unformat_free (line_input);
+
+ ipsec_set_interface_spd (vm, sw_if_index, spd_id, is_add);
+
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (set_interface_spd_command, static) = {
+ .path = "set interface ipsec spd",
+ .short_help =
+ "set interface ipsec spd <int> <id>",
+ .function = set_interface_spd_command_fn,
+};
+/* *INDENT-ON* */
+
+static clib_error_t *
+ipsec_sa_add_del_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ unformat_input_t _line_input, *line_input = &_line_input;
+ ipsec_sa_t sa;
+ int is_add = ~0;
+ u8 *ck = 0, *ik = 0;
+
+ memset (&sa, 0, sizeof (sa));
+
+ if (!unformat_user (input, unformat_line_input, line_input))
+ return 0;
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "add %u", &sa.id))
+ is_add = 1;
+ else if (unformat (line_input, "del %u", &sa.id))
+ is_add = 0;
+ else if (unformat (line_input, "spi %u", &sa.spi))
+ ;
+ else if (unformat (line_input, "esp"))
+ sa.protocol = IPSEC_PROTOCOL_ESP;
+ else if (unformat (line_input, "ah"))
+ //sa.protocol = IPSEC_PROTOCOL_AH;
+ return clib_error_return (0, "unsupported security protocol 'AH'");
+ else
+ if (unformat (line_input, "crypto-key %U", unformat_hex_string, &ck))
+ sa.crypto_key_len = vec_len (ck);
+ else
+ if (unformat
+ (line_input, "crypto-alg %U", unformat_ipsec_crypto_alg,
+ &sa.crypto_alg))
+ {
+ if (sa.crypto_alg < IPSEC_CRYPTO_ALG_AES_CBC_128 ||
+ sa.crypto_alg >= IPSEC_CRYPTO_N_ALG)
+ return clib_error_return (0, "unsupported crypto-alg: '%U'",
+ format_ipsec_crypto_alg, sa.crypto_alg);
+ }
+ else
+ if (unformat (line_input, "integ-key %U", unformat_hex_string, &ik))
+ sa.integ_key_len = vec_len (ik);
+ else if (unformat (line_input, "integ-alg %U", unformat_ipsec_integ_alg,
+ &sa.integ_alg))
+ {
+#if DPDK_CRYPTO==1
+ if (sa.integ_alg < IPSEC_INTEG_ALG_NONE ||
+#else
+ if (sa.integ_alg < IPSEC_INTEG_ALG_SHA1_96 ||
+#endif
+ sa.integ_alg >= IPSEC_INTEG_N_ALG)
+ return clib_error_return (0, "unsupported integ-alg: '%U'",
+ format_ipsec_integ_alg, sa.integ_alg);
+ }
+ else if (unformat (line_input, "tunnel-src %U",
+ unformat_ip4_address, &sa.tunnel_src_addr.ip4))
+ sa.is_tunnel = 1;
+ else if (unformat (line_input, "tunnel-dst %U",
+ unformat_ip4_address, &sa.tunnel_dst_addr.ip4))
+ sa.is_tunnel = 1;
+ else if (unformat (line_input, "tunnel-src %U",
+ unformat_ip6_address, &sa.tunnel_src_addr.ip6))
+ {
+ sa.is_tunnel = 1;
+ sa.is_tunnel_ip6 = 1;
+ }
+ else if (unformat (line_input, "tunnel-dst %U",
+ unformat_ip6_address, &sa.tunnel_dst_addr.ip6))
+ {
+ sa.is_tunnel = 1;
+ sa.is_tunnel_ip6 = 1;
+ }
+ else
+ return clib_error_return (0, "parse error: '%U'",
+ format_unformat_error, line_input);
+ }
+
+#if DPDK_CRYPTO==1
+ /*Special cases, aes-gcm-128 encryption */
+ if (sa.crypto_alg == IPSEC_CRYPTO_ALG_AES_GCM_128)
+ {
+ if (sa.integ_alg != IPSEC_INTEG_ALG_NONE
+ && sa.integ_alg != IPSEC_INTEG_ALG_AES_GCM_128)
+ return clib_error_return (0,
+ "unsupported: aes-gcm-128 crypto-alg needs none as integ-alg");
+ else /*set integ-alg internally to aes-gcm-128 */
+ sa.integ_alg = IPSEC_INTEG_ALG_AES_GCM_128;
+ }
+ else if (sa.integ_alg == IPSEC_INTEG_ALG_AES_GCM_128)
+ return clib_error_return (0, "unsupported integ-alg: aes-gcm-128");
+ else if (sa.integ_alg == IPSEC_INTEG_ALG_NONE)
+ return clib_error_return (0, "unsupported integ-alg: none");
+#endif
+
+ unformat_free (line_input);
+
+ if (sa.crypto_key_len > sizeof (sa.crypto_key))
+ sa.crypto_key_len = sizeof (sa.crypto_key);
+
+ if (sa.integ_key_len > sizeof (sa.integ_key))
+ sa.integ_key_len = sizeof (sa.integ_key);
+
+ if (ck)
+ strncpy ((char *) sa.crypto_key, (char *) ck, sa.crypto_key_len);
+
+ if (ik)
+ strncpy ((char *) sa.integ_key, (char *) ik, sa.integ_key_len);
+
+ ipsec_add_del_sa (vm, &sa, is_add);
+
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (ipsec_sa_add_del_command, static) = {
+ .path = "ipsec sa",
+ .short_help =
+ "ipsec sa [add|del]",
+ .function = ipsec_sa_add_del_command_fn,
+};
+/* *INDENT-ON* */
+
+static clib_error_t *
+ipsec_spd_add_del_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ unformat_input_t _line_input, *line_input = &_line_input;
+ u32 spd_id = ~0;
+ int is_add = ~0;
+
+ if (!unformat_user (input, unformat_line_input, line_input))
+ return 0;
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "add"))
+ is_add = 1;
+ else if (unformat (line_input, "del"))
+ is_add = 0;
+ else if (unformat (line_input, "%u", &spd_id))
+ ;
+ else
+ return clib_error_return (0, "parse error: '%U'",
+ format_unformat_error, line_input);
+ }
+
+ unformat_free (line_input);
+
+ if (spd_id == ~0)
+ return clib_error_return (0, "please specify SPD ID");
+
+ ipsec_add_del_spd (vm, spd_id, is_add);
+
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (ipsec_spd_add_del_command, static) = {
+ .path = "ipsec spd",
+ .short_help =
+ "ipsec spd [add|del] <id>",
+ .function = ipsec_spd_add_del_command_fn,
+};
+/* *INDENT-ON* */
+
+
+static clib_error_t *
+ipsec_policy_add_del_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ unformat_input_t _line_input, *line_input = &_line_input;
+ ipsec_policy_t p;
+ int is_add = 0;
+ int is_ip_any = 1;
+ u32 tmp, tmp2;
+
+ memset (&p, 0, sizeof (p));
+ p.lport.stop = p.rport.stop = ~0;
+ p.laddr.stop.ip4.as_u32 = p.raddr.stop.ip4.as_u32 = (u32) ~ 0;
+ p.laddr.stop.ip6.as_u64[0] = p.laddr.stop.ip6.as_u64[1] = (u64) ~ 0;
+ p.raddr.stop.ip6.as_u64[0] = p.raddr.stop.ip6.as_u64[1] = (u64) ~ 0;
+
+ if (!unformat_user (input, unformat_line_input, line_input))
+ return 0;
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "add"))
+ is_add = 1;
+ else if (unformat (line_input, "del"))
+ is_add = 0;
+ else if (unformat (line_input, "spd %u", &p.id))
+ ;
+ else if (unformat (line_input, "inbound"))
+ p.is_outbound = 0;
+ else if (unformat (line_input, "outbound"))
+ p.is_outbound = 1;
+ else if (unformat (line_input, "priority %d", &p.priority))
+ ;
+ else if (unformat (line_input, "protocol %u", &tmp))
+ p.protocol = (u8) tmp;
+ else
+ if (unformat
+ (line_input, "action %U", unformat_ipsec_policy_action,
+ &p.policy))
+ {
+ if (p.policy == IPSEC_POLICY_ACTION_RESOLVE)
+ return clib_error_return (0, "unsupported action: 'resolve'");
+ }
+ else if (unformat (line_input, "sa %u", &p.sa_id))
+ ;
+ else if (unformat (line_input, "local-ip-range %U - %U",
+ unformat_ip4_address, &p.laddr.start.ip4,
+ unformat_ip4_address, &p.laddr.stop.ip4))
+ is_ip_any = 0;
+ else if (unformat (line_input, "remote-ip-range %U - %U",
+ unformat_ip4_address, &p.raddr.start.ip4,
+ unformat_ip4_address, &p.raddr.stop.ip4))
+ is_ip_any = 0;
+ else if (unformat (line_input, "local-ip-range %U - %U",
+ unformat_ip6_address, &p.laddr.start.ip6,
+ unformat_ip6_address, &p.laddr.stop.ip6))
+ {
+ p.is_ipv6 = 1;
+ is_ip_any = 0;
+ }
+ else if (unformat (line_input, "remote-ip-range %U - %U",
+ unformat_ip6_address, &p.raddr.start.ip6,
+ unformat_ip6_address, &p.raddr.stop.ip6))
+ {
+ p.is_ipv6 = 1;
+ is_ip_any = 0;
+ }
+ else if (unformat (line_input, "local-port-range %u - %u", &tmp, &tmp2))
+ {
+ p.lport.start = tmp;
+ p.lport.stop = tmp2;
+ }
+ else
+ if (unformat (line_input, "remote-port-range %u - %u", &tmp, &tmp2))
+ {
+ p.rport.start = tmp;
+ p.rport.stop = tmp2;
+ }
+ else
+ return clib_error_return (0, "parse error: '%U'",
+ format_unformat_error, line_input);
+ }
+
+ unformat_free (line_input);
+
+ ipsec_add_del_policy (vm, &p, is_add);
+ if (is_ip_any)
+ {
+ p.is_ipv6 = 1;
+ ipsec_add_del_policy (vm, &p, is_add);
+ }
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (ipsec_policy_add_del_command, static) = {
+ .path = "ipsec policy",
+ .short_help =
+ "ipsec policy [add|del] spd <id> priority <n> ",
+ .function = ipsec_policy_add_del_command_fn,
+};
+/* *INDENT-ON* */
+
+static clib_error_t *
+set_ipsec_sa_key_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ unformat_input_t _line_input, *line_input = &_line_input;
+ ipsec_sa_t sa;
+ u8 *ck = 0, *ik = 0;
+
+ memset (&sa, 0, sizeof (sa));
+
+ if (!unformat_user (input, unformat_line_input, line_input))
+ return 0;
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "%u", &sa.id))
+ ;
+ else
+ if (unformat (line_input, "crypto-key %U", unformat_hex_string, &ck))
+ sa.crypto_key_len = vec_len (ck);
+ else
+ if (unformat (line_input, "integ-key %U", unformat_hex_string, &ik))
+ sa.integ_key_len = vec_len (ik);
+ else
+ return clib_error_return (0, "parse error: '%U'",
+ format_unformat_error, line_input);
+ }
+
+ unformat_free (line_input);
+
+ if (sa.crypto_key_len > sizeof (sa.crypto_key))
+ sa.crypto_key_len = sizeof (sa.crypto_key);
+
+ if (sa.integ_key_len > sizeof (sa.integ_key))
+ sa.integ_key_len = sizeof (sa.integ_key);
+
+ if (ck)
+ strncpy ((char *) sa.crypto_key, (char *) ck, sa.crypto_key_len);
+
+ if (ik)
+ strncpy ((char *) sa.integ_key, (char *) ik, sa.integ_key_len);
+
+ ipsec_set_sa_key (vm, &sa);
+
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (set_ipsec_sa_key_command, static) = {
+ .path = "set ipsec sa",
+ .short_help =
+ "set ipsec sa <id> crypto-key <key> integ-key <key>",
+ .function = set_ipsec_sa_key_command_fn,
+};
+/* *INDENT-ON* */
+
+static clib_error_t *
+show_ipsec_command_fn (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ ipsec_spd_t *spd;
+ ipsec_sa_t *sa;
+ ipsec_policy_t *p;
+ ipsec_main_t *im = &ipsec_main;
+ u32 *i;
+ ipsec_tunnel_if_t *t;
+ vnet_hw_interface_t *hi;
+
+ /* *INDENT-OFF* */
+ pool_foreach (sa, im->sad, ({
+ if (sa->id) {
+ vlib_cli_output(vm, "sa %u spi %u mode %s protocol %s", sa->id, sa->spi,
+ sa->is_tunnel ? "tunnel" : "transport",
+ sa->protocol ? "esp" : "ah");
+ if (sa->protocol == IPSEC_PROTOCOL_ESP) {
+ vlib_cli_output(vm, " crypto alg %U%s%U integrity alg %U%s%U",
+ format_ipsec_crypto_alg, sa->crypto_alg,
+ sa->crypto_alg ? " key " : "",
+ format_hex_bytes, sa->crypto_key, sa->crypto_key_len,
+ format_ipsec_integ_alg, sa->integ_alg,
+ sa->integ_alg ? " key " : "",
+ format_hex_bytes, sa->integ_key, sa->integ_key_len);
+ }
+ if (sa->is_tunnel && sa->is_tunnel_ip6) {
+ vlib_cli_output(vm, " tunnel src %U dst %U",
+ format_ip6_address, &sa->tunnel_src_addr.ip6,
+ format_ip6_address, &sa->tunnel_dst_addr.ip6);
+ } else if (sa->is_tunnel) {
+ vlib_cli_output(vm, " tunnel src %U dst %U",
+ format_ip4_address, &sa->tunnel_src_addr.ip4,
+ format_ip4_address, &sa->tunnel_dst_addr.ip4);
+ }
+ }
+ }));
+ /* *INDENT-ON* */
+
+ /* *INDENT-OFF* */
+ pool_foreach (spd, im->spds, ({
+ vlib_cli_output(vm, "spd %u", spd->id);
+
+ vlib_cli_output(vm, " outbound policies");
+ vec_foreach(i, spd->ipv4_outbound_policies)
+ {
+ p = pool_elt_at_index(spd->policies, *i);
+ vlib_cli_output(vm, " priority %d action %U protocol %s%s",
+ p->priority,
+ format_ipsec_policy_action, p->policy,
+ p->protocol ?
+ format(0, "%U", format_ip_protocol, p->protocol) :
+ (u8 *) "any",
+ p->policy == IPSEC_POLICY_ACTION_PROTECT ?
+ format(0, " sa %u", p->sa_id) :
+ (u8 *) "");
+ vlib_cli_output(vm, " local addr range %U - %U port range %u - %u",
+ format_ip4_address, &p->laddr.start.ip4,
+ format_ip4_address, &p->laddr.stop.ip4,
+ p->lport.start, p->lport.stop);
+ vlib_cli_output(vm, " remte addr range %U - %U port range %u - %u",
+ format_ip4_address, &p->raddr.start.ip4,
+ format_ip4_address, &p->raddr.stop.ip4,
+ p->rport.start, p->rport.stop);
+ vlib_cli_output(vm, " packets %u bytes %u", p->counter.packets,
+ p->counter.bytes);
+ };
+ vec_foreach(i, spd->ipv6_outbound_policies)
+ {
+ p = pool_elt_at_index(spd->policies, *i);
+ vlib_cli_output(vm, " priority %d action %U protocol %s%s",
+ p->priority,
+ format_ipsec_policy_action, p->policy,
+ p->protocol ?
+ format(0, "%U", format_ip_protocol, p->protocol) :
+ (u8 *) "any",
+ p->policy == IPSEC_POLICY_ACTION_PROTECT ?
+ format(0, " sa %u", p->sa_id) :
+ (u8 *) "");
+ vlib_cli_output(vm, " local addr range %U - %U port range %u - %u",
+ format_ip6_address, &p->laddr.start.ip6,
+ format_ip6_address, &p->laddr.stop.ip6,
+ p->lport.start, p->lport.stop);
+ vlib_cli_output(vm, " remote addr range %U - %U port range %u - %u",
+ format_ip6_address, &p->raddr.start.ip6,
+ format_ip6_address, &p->raddr.stop.ip6,
+ p->rport.start, p->rport.stop);
+ vlib_cli_output(vm, " packets %u bytes %u", p->counter.packets,
+ p->counter.bytes);
+ };
+ vlib_cli_output(vm, " inbound policies");
+ vec_foreach(i, spd->ipv4_inbound_protect_policy_indices)
+ {
+ p = pool_elt_at_index(spd->policies, *i);
+ vlib_cli_output(vm, " priority %d action %U protocol %s%s",
+ p->priority,
+ format_ipsec_policy_action, p->policy,
+ p->protocol ?
+ format(0, "%U", format_ip_protocol, p->protocol) :
+ (u8 *) "any",
+ p->policy == IPSEC_POLICY_ACTION_PROTECT ?
+ format(0, " sa %u", p->sa_id) :
+ (u8 *) "");
+ vlib_cli_output(vm, " local addr range %U - %U port range %u - %u",
+ format_ip4_address, &p->laddr.start.ip4,
+ format_ip4_address, &p->laddr.stop.ip4,
+ p->lport.start, p->lport.stop);
+ vlib_cli_output(vm, " remte addr range %U - %U port range %u - %u",
+ format_ip4_address, &p->raddr.start.ip4,
+ format_ip4_address, &p->raddr.stop.ip4,
+ p->rport.start, p->rport.stop);
+ vlib_cli_output(vm, " packets %u bytes %u", p->counter.packets,
+ p->counter.bytes);
+ };
+ vec_foreach(i, spd->ipv4_inbound_policy_discard_and_bypass_indices)
+ {
+ p = pool_elt_at_index(spd->policies, *i);
+ vlib_cli_output(vm, " priority %d action %U protocol %s%s",
+ p->priority,
+ format_ipsec_policy_action, p->policy,
+ p->protocol ?
+ format(0, "%U", format_ip_protocol, p->protocol) :
+ (u8 *) "any",
+ p->policy == IPSEC_POLICY_ACTION_PROTECT ?
+ format(0, " sa %u", p->sa_id) :
+ (u8 *) "");
+ vlib_cli_output(vm, " local addr range %U - %U port range %u - %u",
+ format_ip4_address, &p->laddr.start.ip4,
+ format_ip4_address, &p->laddr.stop.ip4,
+ p->lport.start, p->lport.stop);
+ vlib_cli_output(vm, " remte addr range %U - %U port range %u - %u",
+ format_ip4_address, &p->raddr.start.ip4,
+ format_ip4_address, &p->raddr.stop.ip4,
+ p->rport.start, p->rport.stop);
+ vlib_cli_output(vm, " packets %u bytes %u", p->counter.packets,
+ p->counter.bytes);
+ };
+ vec_foreach(i, spd->ipv6_inbound_protect_policy_indices)
+ {
+ p = pool_elt_at_index(spd->policies, *i);
+ vlib_cli_output(vm, " priority %d action %U protocol %s%s",
+ p->priority,
+ format_ipsec_policy_action, p->policy,
+ p->protocol ?
+ format(0, "%U", format_ip_protocol, p->protocol) :
+ (u8 *) "any",
+ p->policy == IPSEC_POLICY_ACTION_PROTECT ?
+ format(0, " sa %u", p->sa_id) :
+ (u8 *) "");
+ vlib_cli_output(vm, " local addr range %U - %U port range %u - %u",
+ format_ip6_address, &p->laddr.start.ip6,
+ format_ip6_address, &p->laddr.stop.ip6,
+ p->lport.start, p->lport.stop);
+ vlib_cli_output(vm, " remote addr range %U - %U port range %u - %u",
+ format_ip6_address, &p->raddr.start.ip6,
+ format_ip6_address, &p->raddr.stop.ip6,
+ p->rport.start, p->rport.stop);
+ vlib_cli_output(vm, " packets %u bytes %u", p->counter.packets,
+ p->counter.bytes);
+ };
+ vec_foreach(i, spd->ipv6_inbound_policy_discard_and_bypass_indices)
+ {
+ p = pool_elt_at_index(spd->policies, *i);
+ vlib_cli_output(vm, " priority %d action %U protocol %s%s",
+ p->priority,
+ format_ipsec_policy_action, p->policy,
+ p->protocol ?
+ format(0, "%U", format_ip_protocol, p->protocol) :
+ (u8 *) "any",
+ p->policy == IPSEC_POLICY_ACTION_PROTECT ?
+ format(0, " sa %u", p->sa_id) :
+ (u8 *) "");
+ vlib_cli_output(vm, " local addr range %U - %U port range %u - %u",
+ format_ip6_address, &p->laddr.start.ip6,
+ format_ip6_address, &p->laddr.stop.ip6,
+ p->lport.start, p->lport.stop);
+ vlib_cli_output(vm, " remote addr range %U - %U port range %u - %u",
+ format_ip6_address, &p->raddr.start.ip6,
+ format_ip6_address, &p->raddr.stop.ip6,
+ p->rport.start, p->rport.stop);
+ vlib_cli_output(vm, " packets %u bytes %u", p->counter.packets,
+ p->counter.bytes);
+ };
+ }));
+ /* *INDENT-ON* */
+
+ vlib_cli_output (vm, "tunnel interfaces");
+ /* *INDENT-OFF* */
+ pool_foreach (t, im->tunnel_interfaces, ({
+ if (t->hw_if_index == ~0)
+ continue;
+ hi = vnet_get_hw_interface (im->vnet_main, t->hw_if_index);
+ vlib_cli_output(vm, " %s seq", hi->name);
+ sa = pool_elt_at_index(im->sad, t->output_sa_index);
+ vlib_cli_output(vm, " seq %u seq-hi %u esn %u anti-replay %u",
+ sa->seq, sa->seq_hi, sa->use_esn, sa->use_anti_replay);
+ vlib_cli_output(vm, " local-spi %u local-ip %U", sa->spi,
+ format_ip4_address, &sa->tunnel_src_addr.ip4);
+ vlib_cli_output(vm, " local-crypto %U %U",
+ format_ipsec_crypto_alg, sa->crypto_alg,
+ format_hex_bytes, sa->crypto_key, sa->crypto_key_len);
+ vlib_cli_output(vm, " local-integrity %U %U",
+ format_ipsec_integ_alg, sa->integ_alg,
+ format_hex_bytes, sa->integ_key, sa->integ_key_len);
+ sa = pool_elt_at_index(im->sad, t->input_sa_index);
+ vlib_cli_output(vm, " last-seq %u last-seq-hi %u esn %u anti-replay %u window %U",
+ sa->last_seq, sa->last_seq_hi, sa->use_esn,
+ sa->use_anti_replay,
+ format_ipsec_replay_window, sa->replay_window);
+ vlib_cli_output(vm, " remote-spi %u remote-ip %U", sa->spi,
+ format_ip4_address, &sa->tunnel_src_addr.ip4);
+ vlib_cli_output(vm, " remote-crypto %U %U",
+ format_ipsec_crypto_alg, sa->crypto_alg,
+ format_hex_bytes, sa->crypto_key, sa->crypto_key_len);
+ vlib_cli_output(vm, " remote-integrity %U %U",
+ format_ipsec_integ_alg, sa->integ_alg,
+ format_hex_bytes, sa->integ_key, sa->integ_key_len);
+ }));
+ /* *INDENT-ON* */
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (show_ipsec_command, static) = {
+ .path = "show ipsec",
+ .short_help = "show ipsec",
+ .function = show_ipsec_command_fn,
+};
+/* *INDENT-ON* */
+
+static clib_error_t *
+clear_ipsec_counters_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ ipsec_main_t *im = &ipsec_main;
+ ipsec_spd_t *spd;
+ ipsec_policy_t *p;
+
+ /* *INDENT-OFF* */
+ pool_foreach (spd, im->spds, ({
+ pool_foreach(p, spd->policies, ({
+ p->counter.packets = p->counter.bytes = 0;
+ }));
+ }));
+ /* *INDENT-ON* */
+
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (clear_ipsec_counters_command, static) = {
+ .path = "clear ipsec counters",
+ .short_help = "clear ipsec counters",
+ .function = clear_ipsec_counters_command_fn,
+};
+/* *INDENT-ON* */
+
+static clib_error_t *
+create_ipsec_tunnel_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ unformat_input_t _line_input, *line_input = &_line_input;
+ ipsec_add_del_tunnel_args_t a;
+ int rv;
+ u32 num_m_args = 0;
+
+ memset (&a, 0, sizeof (a));
+ a.is_add = 1;
+
+ /* Get a line of input. */
+ if (!unformat_user (input, unformat_line_input, line_input))
+ return 0;
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat
+ (line_input, "local-ip %U", unformat_ip4_address, &a.local_ip))
+ num_m_args++;
+ else
+ if (unformat
+ (line_input, "remote-ip %U", unformat_ip4_address, &a.remote_ip))
+ num_m_args++;
+ else if (unformat (line_input, "local-spi %u", &a.local_spi))
+ num_m_args++;
+ else if (unformat (line_input, "remote-spi %u", &a.remote_spi))
+ num_m_args++;
+ else if (unformat (line_input, "del"))
+ a.is_add = 0;
+ else
+ return clib_error_return (0, "unknown input `%U'",
+ format_unformat_error, input);
+ }
+ unformat_free (line_input);
+
+ if (num_m_args < 4)
+ return clib_error_return (0, "mandatory argument(s) missing");
+
+ rv = ipsec_add_del_tunnel_if (&a);
+
+ switch (rv)
+ {
+ case 0:
+ break;
+ case VNET_API_ERROR_INVALID_VALUE:
+ if (a.is_add)
+ return clib_error_return (0,
+ "IPSec tunnel interface already exists...");
+ else
+ return clib_error_return (0, "IPSec tunnel interface not exists...");
+ default:
+ return clib_error_return (0, "ipsec_register_interface returned %d",
+ rv);
+ }
+
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (create_ipsec_tunnel_command, static) = {
+ .path = "create ipsec tunnel",
+ .short_help = "create ipsec tunnel local-ip <addr> local-spi <spi> remote-ip <addr> remote-spi <spi>",
+ .function = create_ipsec_tunnel_command_fn,
+};
+/* *INDENT-ON* */
+
+static clib_error_t *
+set_interface_key_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ unformat_input_t _line_input, *line_input = &_line_input;
+ ipsec_main_t *im = &ipsec_main;
+ ipsec_if_set_key_type_t type = IPSEC_IF_SET_KEY_TYPE_NONE;
+ u32 hw_if_index = (u32) ~ 0;
+ u32 alg;
+ u8 *key = 0;
+
+ if (!unformat_user (input, unformat_line_input, line_input))
+ return 0;
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "%U",
+ unformat_vnet_hw_interface, im->vnet_main, &hw_if_index))
+ ;
+ else
+ if (unformat
+ (line_input, "local crypto %U", unformat_ipsec_crypto_alg, &alg))
+ type = IPSEC_IF_SET_KEY_TYPE_LOCAL_CRYPTO;
+ else
+ if (unformat
+ (line_input, "remote crypto %U", unformat_ipsec_crypto_alg, &alg))
+ type = IPSEC_IF_SET_KEY_TYPE_REMOTE_CRYPTO;
+ else
+ if (unformat
+ (line_input, "local integ %U", unformat_ipsec_integ_alg, &alg))
+ type = IPSEC_IF_SET_KEY_TYPE_LOCAL_INTEG;
+ else
+ if (unformat
+ (line_input, "remote integ %U", unformat_ipsec_integ_alg, &alg))
+ type = IPSEC_IF_SET_KEY_TYPE_REMOTE_INTEG;
+ else if (unformat (line_input, "%U", unformat_hex_string, &key))
+ ;
+ else
+ return clib_error_return (0, "parse error: '%U'",
+ format_unformat_error, line_input);
+ }
+
+ unformat_free (line_input);
+
+ if (type == IPSEC_IF_SET_KEY_TYPE_NONE)
+ return clib_error_return (0, "unknown key type");
+
+ if (alg > 0 && vec_len (key) == 0)
+ return clib_error_return (0, "key is not specified");
+
+ if (hw_if_index == (u32) ~ 0)
+ return clib_error_return (0, "interface not specified");
+
+ ipsec_set_interface_key (im->vnet_main, hw_if_index, type, alg, key);
+ vec_free (key);
+
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (set_interface_key_command, static) = {
+ .path = "set interface ipsec key",
+ .short_help =
+ "set interface ipsec key <int> <local|remote> <crypto|integ> <key type> <key>",
+ .function = set_interface_key_command_fn,
+};
+/* *INDENT-ON* */
+
+clib_error_t *
+ipsec_cli_init (vlib_main_t * vm)
+{
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (ipsec_cli_init);
+
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/ipsec/ipsec_format.c b/src/vnet/ipsec/ipsec_format.c
new file mode 100644
index 00000000000..38aed79a155
--- /dev/null
+++ b/src/vnet/ipsec/ipsec_format.c
@@ -0,0 +1,141 @@
+/*
+ * decap.c : IPSec tunnel support
+ *
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/vnet.h>
+#include <vnet/api_errno.h>
+#include <vnet/ip/ip.h>
+#include <vnet/interface.h>
+
+#include <vnet/ipsec/ipsec.h>
+
+u8 *
+format_ipsec_policy_action (u8 * s, va_list * args)
+{
+ u32 i = va_arg (*args, u32);
+ char *t = 0;
+
+ switch (i)
+ {
+#define _(v,f,str) case IPSEC_POLICY_ACTION_##f: t = str; break;
+ foreach_ipsec_policy_action
+#undef _
+ default:
+ s = format (s, "unknown");
+ }
+ s = format (s, "%s", t);
+ return s;
+}
+
+uword
+unformat_ipsec_policy_action (unformat_input_t * input, va_list * args)
+{
+ u32 *r = va_arg (*args, u32 *);
+
+ if (0);
+#define _(v,f,s) else if (unformat (input, s)) *r = IPSEC_POLICY_ACTION_##f;
+ foreach_ipsec_policy_action
+#undef _
+ else
+ return 0;
+ return 1;
+}
+
+u8 *
+format_ipsec_crypto_alg (u8 * s, va_list * args)
+{
+ u32 i = va_arg (*args, u32);
+ u8 *t = 0;
+
+ switch (i)
+ {
+#define _(v,f,str) case IPSEC_CRYPTO_ALG_##f: t = (u8 *) str; break;
+ foreach_ipsec_crypto_alg
+#undef _
+ default:
+ s = format (s, "unknown");
+ }
+ s = format (s, "%s", t);
+ return s;
+}
+
+uword
+unformat_ipsec_crypto_alg (unformat_input_t * input, va_list * args)
+{
+ u32 *r = va_arg (*args, u32 *);
+
+ if (0);
+#define _(v,f,s) else if (unformat (input, s)) *r = IPSEC_CRYPTO_ALG_##f;
+ foreach_ipsec_crypto_alg
+#undef _
+ else
+ return 0;
+ return 1;
+}
+
+u8 *
+format_ipsec_integ_alg (u8 * s, va_list * args)
+{
+ u32 i = va_arg (*args, u32);
+ u8 *t = 0;
+
+ switch (i)
+ {
+#define _(v,f,str) case IPSEC_INTEG_ALG_##f: t = (u8 *) str; break;
+ foreach_ipsec_integ_alg
+#undef _
+ default:
+ s = format (s, "unknown");
+ }
+ s = format (s, "%s", t);
+ return s;
+}
+
+uword
+unformat_ipsec_integ_alg (unformat_input_t * input, va_list * args)
+{
+ u32 *r = va_arg (*args, u32 *);
+
+ if (0);
+#define _(v,f,s) else if (unformat (input, s)) *r = IPSEC_INTEG_ALG_##f;
+ foreach_ipsec_integ_alg
+#undef _
+ else
+ return 0;
+ return 1;
+}
+
+u8 *
+format_ipsec_replay_window (u8 * s, va_list * args)
+{
+ u64 w = va_arg (*args, u64);
+ u8 i;
+
+ for (i = 0; i < 64; i++)
+ {
+ s = format (s, "%u", w & (1ULL << i) ? 1 : 0);
+ }
+
+ return s;
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/ipsec/ipsec_if.c b/src/vnet/ipsec/ipsec_if.c
new file mode 100644
index 00000000000..a8da046f1a8
--- /dev/null
+++ b/src/vnet/ipsec/ipsec_if.c
@@ -0,0 +1,372 @@
+/*
+ * ipsec_if.c : IPSec interface support
+ *
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/vnet.h>
+#include <vnet/api_errno.h>
+#include <vnet/ip/ip.h>
+
+#include <vnet/ipsec/ipsec.h>
+#if DPDK_CRYPTO==1
+#include <vnet/devices/dpdk/ipsec/esp.h>
+#else
+#include <vnet/ipsec/esp.h>
+#endif
+
+#if DPDK_CRYPTO==0
+/* dummy function */
+static int
+add_del_sa_sess (u32 sa_index, u8 is_add)
+{
+ return 0;
+}
+#endif
+
+void vl_api_rpc_call_main_thread (void *fp, u8 * data, u32 data_length);
+
+static u8 *
+format_ipsec_name (u8 * s, va_list * args)
+{
+ u32 dev_instance = va_arg (*args, u32);
+ return format (s, "ipsec%d", dev_instance);
+}
+
+static uword
+dummy_interface_tx (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ clib_warning ("you shouldn't be here, leaking buffers...");
+ return frame->n_vectors;
+}
+
+/* *INDENT-OFF* */
+VNET_DEVICE_CLASS (ipsec_device_class, static) =
+{
+ .name = "IPSec",
+ .format_device_name = format_ipsec_name,
+ .format_tx_trace = format_ipsec_if_output_trace,
+ .tx_function = dummy_interface_tx,
+};
+/* *INDENT-ON* */
+
+/* *INDENT-OFF* */
+VNET_HW_INTERFACE_CLASS (ipsec_hw_class) =
+{
+ .name = "IPSec",
+ .build_rewrite = default_build_rewrite,
+};
+/* *INDENT-ON* */
+
+static int
+ipsec_add_del_tunnel_if_internal (vnet_main_t * vnm,
+ ipsec_add_del_tunnel_args_t * args);
+
+static int
+ipsec_add_del_tunnel_if_rpc_callback (ipsec_add_del_tunnel_args_t * a)
+{
+ vnet_main_t *vnm = vnet_get_main ();
+ ASSERT (os_get_cpu_number () == 0);
+
+ return ipsec_add_del_tunnel_if_internal (vnm, a);
+}
+
+int
+ipsec_add_del_tunnel_if (ipsec_add_del_tunnel_args_t * args)
+{
+ vl_api_rpc_call_main_thread (ipsec_add_del_tunnel_if_rpc_callback,
+ (u8 *) args, sizeof (*args));
+ return 0;
+}
+
+int
+ipsec_add_del_tunnel_if_internal (vnet_main_t * vnm,
+ ipsec_add_del_tunnel_args_t * args)
+{
+ ipsec_tunnel_if_t *t;
+ ipsec_main_t *im = &ipsec_main;
+ vnet_hw_interface_t *hi;
+ u32 hw_if_index = ~0;
+ uword *p;
+ ipsec_sa_t *sa;
+
+ u64 key = (u64) args->remote_ip.as_u32 << 32 | (u64) args->remote_spi;
+ p = hash_get (im->ipsec_if_pool_index_by_key, key);
+
+ if (args->is_add)
+ {
+ /* check if same src/dst pair exists */
+ if (p)
+ return VNET_API_ERROR_INVALID_VALUE;
+
+ pool_get_aligned (im->tunnel_interfaces, t, CLIB_CACHE_LINE_BYTES);
+ memset (t, 0, sizeof (*t));
+
+ pool_get (im->sad, sa);
+ memset (sa, 0, sizeof (*sa));
+ t->input_sa_index = sa - im->sad;
+ sa->spi = args->remote_spi;
+ sa->tunnel_src_addr.ip4.as_u32 = args->remote_ip.as_u32;
+ sa->tunnel_dst_addr.ip4.as_u32 = args->local_ip.as_u32;
+ sa->is_tunnel = 1;
+ sa->use_esn = args->esn;
+ sa->use_anti_replay = args->anti_replay;
+ sa->integ_alg = args->integ_alg;
+ if (args->remote_integ_key_len <= sizeof (args->remote_integ_key))
+ {
+ sa->integ_key_len = args->remote_integ_key_len;
+ clib_memcpy (sa->integ_key, args->remote_integ_key,
+ args->remote_integ_key_len);
+ }
+ sa->crypto_alg = args->crypto_alg;
+ if (args->remote_crypto_key_len <= sizeof (args->remote_crypto_key))
+ {
+ sa->crypto_key_len = args->remote_crypto_key_len;
+ clib_memcpy (sa->crypto_key, args->remote_crypto_key,
+ args->remote_crypto_key_len);
+ }
+
+ add_del_sa_sess (t->input_sa_index, args->is_add);
+
+ pool_get (im->sad, sa);
+ memset (sa, 0, sizeof (*sa));
+ t->output_sa_index = sa - im->sad;
+ sa->spi = args->local_spi;
+ sa->tunnel_src_addr.ip4.as_u32 = args->local_ip.as_u32;
+ sa->tunnel_dst_addr.ip4.as_u32 = args->remote_ip.as_u32;
+ sa->is_tunnel = 1;
+ sa->seq = 1;
+ sa->use_esn = args->esn;
+ sa->use_anti_replay = args->anti_replay;
+ sa->integ_alg = args->integ_alg;
+ if (args->local_integ_key_len <= sizeof (args->local_integ_key))
+ {
+ sa->integ_key_len = args->local_integ_key_len;
+ clib_memcpy (sa->integ_key, args->local_integ_key,
+ args->local_integ_key_len);
+ }
+ sa->crypto_alg = args->crypto_alg;
+ if (args->local_crypto_key_len <= sizeof (args->local_crypto_key))
+ {
+ sa->crypto_key_len = args->local_crypto_key_len;
+ clib_memcpy (sa->crypto_key, args->local_crypto_key,
+ args->local_crypto_key_len);
+ }
+
+ add_del_sa_sess (t->output_sa_index, args->is_add);
+
+ hash_set (im->ipsec_if_pool_index_by_key, key,
+ t - im->tunnel_interfaces);
+
+ if (vec_len (im->free_tunnel_if_indices) > 0)
+ {
+ hw_if_index =
+ im->free_tunnel_if_indices[vec_len (im->free_tunnel_if_indices) -
+ 1];
+ _vec_len (im->free_tunnel_if_indices) -= 1;
+ }
+ else
+ {
+ hw_if_index =
+ vnet_register_interface (vnm, ipsec_device_class.index,
+ t - im->tunnel_interfaces,
+ ipsec_hw_class.index,
+ t - im->tunnel_interfaces);
+
+ hi = vnet_get_hw_interface (vnm, hw_if_index);
+ hi->output_node_index = ipsec_if_output_node.index;
+ }
+ t->hw_if_index = hw_if_index;
+
+ /*1st interface, register protocol */
+ if (pool_elts (im->tunnel_interfaces) == 1)
+ ip4_register_protocol (IP_PROTOCOL_IPSEC_ESP,
+ ipsec_if_input_node.index);
+
+ return hw_if_index;
+ }
+ else
+ {
+ /* check if exists */
+ if (!p)
+ return VNET_API_ERROR_INVALID_VALUE;
+
+ t = pool_elt_at_index (im->tunnel_interfaces, p[0]);
+ hi = vnet_get_hw_interface (vnm, t->hw_if_index);
+ vnet_sw_interface_set_flags (vnm, hi->sw_if_index, 0); /* admin down */
+ vec_add1 (im->free_tunnel_if_indices, t->hw_if_index);
+
+ /* delete input and output SA */
+ sa = pool_elt_at_index (im->sad, t->input_sa_index);
+
+ if (add_del_sa_sess (t->input_sa_index, args->is_add) < 0)
+ return VNET_API_ERROR_SYSCALL_ERROR_1;
+
+ pool_put (im->sad, sa);
+
+ sa = pool_elt_at_index (im->sad, t->output_sa_index);
+
+ if (add_del_sa_sess (t->output_sa_index, args->is_add) < 0)
+ return VNET_API_ERROR_SYSCALL_ERROR_1;
+
+ pool_put (im->sad, sa);
+
+ hash_unset (im->ipsec_if_pool_index_by_key, key);
+ pool_put (im->tunnel_interfaces, t);
+ }
+ return 0;
+}
+
+int
+ipsec_add_del_ipsec_gre_tunnel (vnet_main_t * vnm,
+ ipsec_add_del_ipsec_gre_tunnel_args_t * args)
+{
+ ipsec_tunnel_if_t *t = 0;
+ ipsec_main_t *im = &ipsec_main;
+ uword *p;
+ ipsec_sa_t *sa;
+ u64 key;
+ u32 isa, osa;
+
+ p = hash_get (im->sa_index_by_sa_id, args->local_sa_id);
+ if (!p)
+ return VNET_API_ERROR_INVALID_VALUE;
+ isa = p[0];
+
+ p = hash_get (im->sa_index_by_sa_id, args->remote_sa_id);
+ if (!p)
+ return VNET_API_ERROR_INVALID_VALUE;
+ osa = p[0];
+ sa = pool_elt_at_index (im->sad, p[0]);
+
+ if (sa->is_tunnel)
+ key = (u64) sa->tunnel_dst_addr.ip4.as_u32 << 32 | (u64) sa->spi;
+ else
+ key = (u64) args->remote_ip.as_u32 << 32 | (u64) sa->spi;
+
+ p = hash_get (im->ipsec_if_pool_index_by_key, key);
+
+ if (args->is_add)
+ {
+ /* check if same src/dst pair exists */
+ if (p)
+ return VNET_API_ERROR_INVALID_VALUE;
+
+ pool_get_aligned (im->tunnel_interfaces, t, CLIB_CACHE_LINE_BYTES);
+ memset (t, 0, sizeof (*t));
+
+ t->input_sa_index = isa;
+ t->output_sa_index = osa;
+ t->hw_if_index = ~0;
+ hash_set (im->ipsec_if_pool_index_by_key, key,
+ t - im->tunnel_interfaces);
+
+ /*1st interface, register protocol */
+ if (pool_elts (im->tunnel_interfaces) == 1)
+ ip4_register_protocol (IP_PROTOCOL_IPSEC_ESP,
+ ipsec_if_input_node.index);
+ }
+ else
+ {
+ /* check if exists */
+ if (!p)
+ return VNET_API_ERROR_INVALID_VALUE;
+
+ t = pool_elt_at_index (im->tunnel_interfaces, p[0]);
+ hash_unset (im->ipsec_if_pool_index_by_key, key);
+ pool_put (im->tunnel_interfaces, t);
+ }
+ return 0;
+}
+
+int
+ipsec_set_interface_key (vnet_main_t * vnm, u32 hw_if_index,
+ ipsec_if_set_key_type_t type, u8 alg, u8 * key)
+{
+ ipsec_main_t *im = &ipsec_main;
+ vnet_hw_interface_t *hi;
+ ipsec_tunnel_if_t *t;
+ ipsec_sa_t *sa;
+
+ hi = vnet_get_hw_interface (vnm, hw_if_index);
+ t = pool_elt_at_index (im->tunnel_interfaces, hi->dev_instance);
+
+ if (type == IPSEC_IF_SET_KEY_TYPE_LOCAL_CRYPTO)
+ {
+ sa = pool_elt_at_index (im->sad, t->output_sa_index);
+ sa->crypto_alg = alg;
+ sa->crypto_key_len = vec_len (key);
+ clib_memcpy (sa->crypto_key, key, vec_len (key));
+
+ if (add_del_sa_sess (t->input_sa_index, 0) < 0)
+ return VNET_API_ERROR_SYSCALL_ERROR_1;
+ }
+ else if (type == IPSEC_IF_SET_KEY_TYPE_LOCAL_INTEG)
+ {
+ sa = pool_elt_at_index (im->sad, t->output_sa_index);
+ sa->integ_alg = alg;
+ sa->integ_key_len = vec_len (key);
+ clib_memcpy (sa->integ_key, key, vec_len (key));
+
+ if (add_del_sa_sess (t->output_sa_index, 0) < 0)
+ return VNET_API_ERROR_SYSCALL_ERROR_1;
+ }
+ else if (type == IPSEC_IF_SET_KEY_TYPE_REMOTE_CRYPTO)
+ {
+ sa = pool_elt_at_index (im->sad, t->input_sa_index);
+ sa->crypto_alg = alg;
+ sa->crypto_key_len = vec_len (key);
+ clib_memcpy (sa->crypto_key, key, vec_len (key));
+
+ if (add_del_sa_sess (t->input_sa_index, 0) < 0)
+ return VNET_API_ERROR_SYSCALL_ERROR_1;
+ }
+ else if (type == IPSEC_IF_SET_KEY_TYPE_REMOTE_INTEG)
+ {
+ sa = pool_elt_at_index (im->sad, t->input_sa_index);
+ sa->integ_alg = alg;
+ sa->integ_key_len = vec_len (key);
+ clib_memcpy (sa->integ_key, key, vec_len (key));
+
+ if (add_del_sa_sess (t->output_sa_index, 0) < 0)
+ return VNET_API_ERROR_SYSCALL_ERROR_1;
+ }
+ else
+ return VNET_API_ERROR_INVALID_VALUE;
+
+ return 0;
+}
+
+
+clib_error_t *
+ipsec_tunnel_if_init (vlib_main_t * vm)
+{
+ ipsec_main_t *im = &ipsec_main;
+
+ im->ipsec_if_pool_index_by_key = hash_create (0, sizeof (uword));
+
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (ipsec_tunnel_if_init);
+
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/ipsec/ipsec_if_in.c b/src/vnet/ipsec/ipsec_if_in.c
new file mode 100644
index 00000000000..db75ab92da9
--- /dev/null
+++ b/src/vnet/ipsec/ipsec_if_in.c
@@ -0,0 +1,175 @@
+/*
+ * ipsec_if_in.c : IPSec interface input node
+ *
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/vnet.h>
+#include <vnet/api_errno.h>
+#include <vnet/ip/ip.h>
+
+#include <vnet/ipsec/ipsec.h>
+#include <vnet/ipsec/esp.h>
+
+#if DPDK_CRYPTO==1
+#define ESP_NODE "dpdk-esp-decrypt"
+#else
+#define ESP_NODE "esp-decrypt"
+#endif
+
+/* Statistics (not really errors) */
+#define foreach_ipsec_if_input_error \
+_(RX, "good packets received")
+
+static char *ipsec_if_input_error_strings[] = {
+#define _(sym,string) string,
+ foreach_ipsec_if_input_error
+#undef _
+};
+
+typedef enum
+{
+#define _(sym,str) IPSEC_IF_INPUT_ERROR_##sym,
+ foreach_ipsec_if_input_error
+#undef _
+ IPSEC_IF_INPUT_N_ERROR,
+} ipsec_if_input_error_t;
+
+typedef enum
+{
+ IPSEC_IF_INPUT_NEXT_ESP_DECRYPT,
+ IPSEC_IF_INPUT_NEXT_DROP,
+ IPSEC_IF_INPUT_N_NEXT,
+} ipsec_if_input_next_t;
+
+typedef struct
+{
+ u32 spi;
+ u32 seq;
+} ipsec_if_input_trace_t;
+
+
+u8 *
+format_ipsec_if_input_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ ipsec_if_input_trace_t *t = va_arg (*args, ipsec_if_input_trace_t *);
+
+ s = format (s, "IPSec: spi %u seq %u", t->spi, t->seq);
+ return s;
+}
+
+static uword
+ipsec_if_input_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame)
+{
+ ipsec_main_t *im = &ipsec_main;
+ u32 *from, *to_next = 0, next_index;
+ u32 n_left_from;
+
+ from = vlib_frame_vector_args (from_frame);
+ n_left_from = from_frame->n_vectors;
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 bi0, next0;
+ vlib_buffer_t *b0;
+ ip4_header_t *ip0;
+ esp_header_t *esp0;
+ uword *p;
+
+ bi0 = to_next[0] = from[0];
+ from += 1;
+ n_left_from -= 1;
+ to_next += 1;
+ n_left_to_next -= 1;
+ b0 = vlib_get_buffer (vm, bi0);
+ ip0 = vlib_buffer_get_current (b0);
+ esp0 = (esp_header_t *) ((u8 *) ip0 + ip4_header_bytes (ip0));
+
+ next0 = IPSEC_IF_INPUT_NEXT_DROP;
+
+ u64 key = (u64) ip0->src_address.as_u32 << 32 |
+ (u64) clib_net_to_host_u32 (esp0->spi);
+
+ p = hash_get (im->ipsec_if_pool_index_by_key, key);
+
+ if (p)
+ {
+ ipsec_tunnel_if_t *t;
+ t = pool_elt_at_index (im->tunnel_interfaces, p[0]);
+ vnet_buffer (b0)->ipsec.sad_index = t->input_sa_index;
+ vnet_buffer (b0)->ipsec.flags =
+ t->hw_if_index == ~0 ? IPSEC_FLAG_IPSEC_GRE_TUNNEL : 0;
+ vlib_buffer_advance (b0, ip4_header_bytes (ip0));
+ next0 = IPSEC_IF_INPUT_NEXT_ESP_DECRYPT;
+ }
+
+ if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ ipsec_if_input_trace_t *tr =
+ vlib_add_trace (vm, node, b0, sizeof (*tr));
+ tr->spi = clib_host_to_net_u32 (esp0->spi);
+ tr->seq = clib_host_to_net_u32 (esp0->seq);
+ }
+
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
+ n_left_to_next, bi0, next0);
+ }
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ vlib_node_increment_counter (vm, ipsec_if_input_node.index,
+ IPSEC_IF_INPUT_ERROR_RX,
+ from_frame->n_vectors);
+
+ return from_frame->n_vectors;
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (ipsec_if_input_node) = {
+ .function = ipsec_if_input_node_fn,
+ .name = "ipsec-if-input",
+ .vector_size = sizeof (u32),
+ .format_trace = format_ipsec_if_input_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_errors = ARRAY_LEN(ipsec_if_input_error_strings),
+ .error_strings = ipsec_if_input_error_strings,
+
+ .n_next_nodes = IPSEC_IF_INPUT_N_NEXT,
+
+ .next_nodes = {
+ [IPSEC_IF_INPUT_NEXT_ESP_DECRYPT] = ESP_NODE,
+ [IPSEC_IF_INPUT_NEXT_DROP] = "error-drop",
+ },
+};
+/* *INDENT-ON* */
+
+VLIB_NODE_FUNCTION_MULTIARCH (ipsec_if_input_node, ipsec_if_input_node_fn)
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/ipsec/ipsec_if_out.c b/src/vnet/ipsec/ipsec_if_out.c
new file mode 100644
index 00000000000..8f062828413
--- /dev/null
+++ b/src/vnet/ipsec/ipsec_if_out.c
@@ -0,0 +1,161 @@
+/*
+ * ipsec_if_out.c : IPSec interface output node
+ *
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/vnet.h>
+#include <vnet/api_errno.h>
+#include <vnet/ip/ip.h>
+
+#include <vnet/ipsec/ipsec.h>
+
+#if DPDK_CRYPTO==1
+#define ESP_NODE "dpdk-esp-encrypt"
+#else
+#define ESP_NODE "esp-encrypt"
+#endif
+
+/* Statistics (not really errors) */
+#define foreach_ipsec_if_output_error \
+_(TX, "good packets transmitted")
+
+static char *ipsec_if_output_error_strings[] = {
+#define _(sym,string) string,
+ foreach_ipsec_if_output_error
+#undef _
+};
+
+typedef enum
+{
+#define _(sym,str) IPSEC_IF_OUTPUT_ERROR_##sym,
+ foreach_ipsec_if_output_error
+#undef _
+ IPSEC_IF_OUTPUT_N_ERROR,
+} ipsec_if_output_error_t;
+
+typedef enum
+{
+ IPSEC_IF_OUTPUT_NEXT_ESP_ENCRYPT,
+ IPSEC_IF_OUTPUT_NEXT_DROP,
+ IPSEC_IF_OUTPUT_N_NEXT,
+} ipsec_if_output_next_t;
+
+typedef struct
+{
+ u32 spi;
+ u32 seq;
+} ipsec_if_output_trace_t;
+
+
+u8 *
+format_ipsec_if_output_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ ipsec_if_output_trace_t *t = va_arg (*args, ipsec_if_output_trace_t *);
+
+ s = format (s, "IPSec: spi %u seq %u", t->spi, t->seq);
+ return s;
+}
+
+static uword
+ipsec_if_output_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame)
+{
+ ipsec_main_t *im = &ipsec_main;
+ vnet_main_t *vnm = im->vnet_main;
+ u32 *from, *to_next = 0, next_index;
+ u32 n_left_from, sw_if_index0;
+
+ from = vlib_frame_vector_args (from_frame);
+ n_left_from = from_frame->n_vectors;
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 bi0, next0;
+ vlib_buffer_t *b0;
+ ipsec_tunnel_if_t *t0;
+ vnet_hw_interface_t *hi0;
+
+ bi0 = to_next[0] = from[0];
+ from += 1;
+ n_left_from -= 1;
+ to_next += 1;
+ n_left_to_next -= 1;
+ b0 = vlib_get_buffer (vm, bi0);
+ sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_TX];
+ hi0 = vnet_get_sup_hw_interface (vnm, sw_if_index0);
+ t0 = pool_elt_at_index (im->tunnel_interfaces, hi0->dev_instance);
+ vnet_buffer (b0)->ipsec.sad_index = t0->output_sa_index;
+ next0 = IPSEC_IF_OUTPUT_NEXT_ESP_ENCRYPT;
+
+ if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ ipsec_if_output_trace_t *tr =
+ vlib_add_trace (vm, node, b0, sizeof (*tr));
+ ipsec_sa_t *sa0 =
+ pool_elt_at_index (im->sad, t0->output_sa_index);
+ tr->spi = sa0->spi;
+ tr->seq = sa0->seq;
+ }
+
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
+ n_left_to_next, bi0, next0);
+ }
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ vlib_node_increment_counter (vm, ipsec_if_output_node.index,
+ IPSEC_IF_OUTPUT_ERROR_TX,
+ from_frame->n_vectors);
+
+ return from_frame->n_vectors;
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (ipsec_if_output_node) = {
+ .function = ipsec_if_output_node_fn,
+ .name = "ipsec-if-output",
+ .vector_size = sizeof (u32),
+ .format_trace = format_ipsec_if_output_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_errors = ARRAY_LEN(ipsec_if_output_error_strings),
+ .error_strings = ipsec_if_output_error_strings,
+
+ .n_next_nodes = IPSEC_IF_OUTPUT_N_NEXT,
+
+ .next_nodes = {
+ [IPSEC_IF_OUTPUT_NEXT_ESP_ENCRYPT] = ESP_NODE,
+ [IPSEC_IF_OUTPUT_NEXT_DROP] = "error-drop",
+ },
+};
+/* *INDENT-ON* */
+
+VLIB_NODE_FUNCTION_MULTIARCH (ipsec_if_output_node, ipsec_if_output_node_fn)
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/ipsec/ipsec_input.c b/src/vnet/ipsec/ipsec_input.c
new file mode 100644
index 00000000000..4662c1a1cf0
--- /dev/null
+++ b/src/vnet/ipsec/ipsec_input.c
@@ -0,0 +1,455 @@
+/*
+ * decap.c : IPSec tunnel decapsulation
+ *
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/vnet.h>
+#include <vnet/api_errno.h>
+#include <vnet/ip/ip.h>
+#include <vnet/feature/feature.h>
+
+#include <vnet/ipsec/ipsec.h>
+#include <vnet/ipsec/esp.h>
+
+#if DPDK_CRYPTO==1
+#define ESP_NODE "dpdk-esp-decrypt"
+#else
+#define ESP_NODE "esp-decrypt"
+#endif
+
+#define foreach_ipsec_input_next \
+_(DROP, "error-drop") \
+_(ESP_DECRYPT, ESP_NODE)
+
+#define _(v, s) IPSEC_INPUT_NEXT_##v,
+typedef enum
+{
+ foreach_ipsec_input_next
+#undef _
+ IPSEC_INPUT_N_NEXT,
+} ipsec_input_next_t;
+
+
+#define foreach_ipsec_input_error \
+ _(RX_PKTS, "IPSEC pkts received") \
+ _(DECRYPTION_FAILED, "IPSEC decryption failed")
+
+
+typedef enum
+{
+#define _(sym,str) IPSEC_INPUT_ERROR_##sym,
+ foreach_ipsec_input_error
+#undef _
+ IPSEC_INPUT_N_ERROR,
+} ipsec_input_error_t;
+
+static char *ipsec_input_error_strings[] = {
+#define _(sym,string) string,
+ foreach_ipsec_input_error
+#undef _
+};
+
+typedef struct
+{
+ u32 sa_id;
+ u32 spi;
+ u32 seq;
+} ipsec_input_trace_t;
+
+/* packet trace format function */
+static u8 *
+format_ipsec_input_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ ipsec_input_trace_t *t = va_arg (*args, ipsec_input_trace_t *);
+
+ if (t->spi == 0 && t->seq == 0)
+ {
+ s = format (s, "esp: no esp packet");
+ return s;
+ }
+
+ if (t->sa_id != 0)
+ {
+ s = format (s, "esp: sa_id %u spi %u seq %u", t->sa_id, t->spi, t->seq);
+ }
+ else
+ {
+ s = format (s, "esp: no sa spi %u seq %u", t->spi, t->seq);
+ }
+ return s;
+}
+
+always_inline ipsec_policy_t *
+ipsec_input_protect_policy_match (ipsec_spd_t * spd, u32 sa, u32 da, u32 spi)
+{
+ ipsec_main_t *im = &ipsec_main;
+ ipsec_policy_t *p;
+ ipsec_sa_t *s;
+ u32 *i;
+
+ vec_foreach (i, spd->ipv4_inbound_protect_policy_indices)
+ {
+ p = pool_elt_at_index (spd->policies, *i);
+ s = pool_elt_at_index (im->sad, p->sa_index);
+
+ if (spi != s->spi)
+ continue;
+
+ if (s->is_tunnel)
+ {
+ if (da != clib_net_to_host_u32 (s->tunnel_dst_addr.ip4.as_u32))
+ continue;
+
+ if (sa != clib_net_to_host_u32 (s->tunnel_src_addr.ip4.as_u32))
+ continue;
+
+ return p;
+ }
+
+ if (da < clib_net_to_host_u32 (p->laddr.start.ip4.as_u32))
+ continue;
+
+ if (da > clib_net_to_host_u32 (p->laddr.stop.ip4.as_u32))
+ continue;
+
+ if (sa < clib_net_to_host_u32 (p->raddr.start.ip4.as_u32))
+ continue;
+
+ if (sa > clib_net_to_host_u32 (p->raddr.stop.ip4.as_u32))
+ continue;
+
+ return p;
+ }
+ return 0;
+}
+
+always_inline uword
+ip6_addr_match_range (ip6_address_t * a, ip6_address_t * la,
+ ip6_address_t * ua)
+{
+ if ((memcmp (a->as_u64, la->as_u64, 2 * sizeof (u64)) >= 0) &&
+ (memcmp (a->as_u64, ua->as_u64, 2 * sizeof (u64)) <= 0))
+ return 1;
+ return 0;
+}
+
+always_inline ipsec_policy_t *
+ipsec_input_ip6_protect_policy_match (ipsec_spd_t * spd,
+ ip6_address_t * sa,
+ ip6_address_t * da, u32 spi)
+{
+ ipsec_main_t *im = &ipsec_main;
+ ipsec_policy_t *p;
+ ipsec_sa_t *s;
+ u32 *i;
+
+ vec_foreach (i, spd->ipv6_inbound_protect_policy_indices)
+ {
+ p = pool_elt_at_index (spd->policies, *i);
+ s = pool_elt_at_index (im->sad, p->sa_index);
+
+ if (spi != s->spi)
+ continue;
+
+ if (s->is_tunnel)
+ {
+ if (!ip6_address_is_equal (sa, &s->tunnel_src_addr.ip6))
+ continue;
+
+ if (!ip6_address_is_equal (da, &s->tunnel_dst_addr.ip6))
+ continue;
+
+ return p;
+ }
+
+ if (!ip6_addr_match_range (sa, &p->raddr.start.ip6, &p->raddr.stop.ip6))
+ continue;
+
+ if (!ip6_addr_match_range (da, &p->laddr.start.ip6, &p->laddr.stop.ip6))
+ continue;
+
+ return p;
+ }
+ return 0;
+}
+
+static vlib_node_registration_t ipsec_input_ip4_node;
+
+static uword
+ipsec_input_ip4_node_fn (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame)
+{
+ u32 n_left_from, *from, next_index, *to_next;
+ ipsec_main_t *im = &ipsec_main;
+
+ from = vlib_frame_vector_args (from_frame);
+ n_left_from = from_frame->n_vectors;
+
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 bi0, next0;
+ vlib_buffer_t *b0;
+ ip4_header_t *ip0;
+ esp_header_t *esp0;
+ ip4_ipsec_config_t *c0;
+ ipsec_spd_t *spd0;
+ ipsec_policy_t *p0 = 0;
+
+ bi0 = to_next[0] = from[0];
+ from += 1;
+ n_left_from -= 1;
+ to_next += 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ c0 =
+ vnet_feature_next_with_data (vnet_buffer (b0)->sw_if_index
+ [VLIB_RX], &next0, b0,
+ sizeof (c0[0]));
+
+ spd0 = pool_elt_at_index (im->spds, c0->spd_index);
+
+ ip0 = vlib_buffer_get_current (b0);
+ esp0 = (esp_header_t *) ((u8 *) ip0 + ip4_header_bytes (ip0));
+
+ if (PREDICT_TRUE (ip0->protocol == IP_PROTOCOL_IPSEC_ESP))
+ {
+#if 0
+ clib_warning
+ ("packet received from %U to %U spi %u size %u spd_id %u",
+ format_ip4_address, ip0->src_address.as_u8,
+ format_ip4_address, ip0->dst_address.as_u8,
+ clib_net_to_host_u32 (esp0->spi),
+ clib_net_to_host_u16 (ip0->length), spd0->id);
+#endif
+
+ p0 = ipsec_input_protect_policy_match (spd0,
+ clib_net_to_host_u32
+ (ip0->src_address.
+ as_u32),
+ clib_net_to_host_u32
+ (ip0->dst_address.
+ as_u32),
+ clib_net_to_host_u32
+ (esp0->spi));
+
+ if (PREDICT_TRUE (p0 != 0))
+ {
+ p0->counter.packets++;
+ p0->counter.bytes += clib_net_to_host_u16 (ip0->length);
+ vnet_buffer (b0)->ipsec.sad_index = p0->sa_index;
+ vnet_buffer (b0)->ipsec.flags = 0;
+ next0 = IPSEC_INPUT_NEXT_ESP_DECRYPT;
+ vlib_buffer_advance (b0, ip4_header_bytes (ip0));
+ goto trace0;
+ }
+ }
+
+ /* FIXME bypass and discard */
+
+ trace0:
+ if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ ipsec_input_trace_t *tr =
+ vlib_add_trace (vm, node, b0, sizeof (*tr));
+ if (ip0->protocol == IP_PROTOCOL_IPSEC_ESP)
+ {
+ if (p0)
+ tr->sa_id = p0->sa_id;
+ tr->spi = clib_host_to_net_u32 (esp0->spi);
+ tr->seq = clib_host_to_net_u32 (esp0->seq);
+ }
+ }
+
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next, bi0,
+ next0);
+ }
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+ vlib_node_increment_counter (vm, ipsec_input_ip4_node.index,
+ IPSEC_INPUT_ERROR_RX_PKTS,
+ from_frame->n_vectors);
+
+ return from_frame->n_vectors;
+}
+
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (ipsec_input_ip4_node,static) = {
+ .function = ipsec_input_ip4_node_fn,
+ .name = "ipsec-input-ip4",
+ .vector_size = sizeof (u32),
+ .format_trace = format_ipsec_input_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_errors = ARRAY_LEN(ipsec_input_error_strings),
+ .error_strings = ipsec_input_error_strings,
+
+ .n_next_nodes = IPSEC_INPUT_N_NEXT,
+ .next_nodes = {
+#define _(s,n) [IPSEC_INPUT_NEXT_##s] = n,
+ foreach_ipsec_input_next
+#undef _
+ },
+};
+/* *INDENT-ON* */
+
+VLIB_NODE_FUNCTION_MULTIARCH (ipsec_input_ip4_node, ipsec_input_ip4_node_fn)
+ static vlib_node_registration_t ipsec_input_ip6_node;
+
+ static uword
+ ipsec_input_ip6_node_fn (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame)
+{
+ u32 n_left_from, *from, next_index, *to_next;
+ ipsec_main_t *im = &ipsec_main;
+
+ from = vlib_frame_vector_args (from_frame);
+ n_left_from = from_frame->n_vectors;
+
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 bi0, next0;
+ vlib_buffer_t *b0;
+ ip6_header_t *ip0;
+ esp_header_t *esp0;
+ ip4_ipsec_config_t *c0;
+ ipsec_spd_t *spd0;
+ ipsec_policy_t *p0 = 0;
+ u32 header_size = sizeof (ip0[0]);
+
+ bi0 = to_next[0] = from[0];
+ from += 1;
+ n_left_from -= 1;
+ to_next += 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ c0 =
+ vnet_feature_next_with_data (vnet_buffer (b0)->sw_if_index
+ [VLIB_RX], &next0, b0,
+ sizeof (c0[0]));
+
+ spd0 = pool_elt_at_index (im->spds, c0->spd_index);
+
+ ip0 = vlib_buffer_get_current (b0);
+ esp0 = (esp_header_t *) ((u8 *) ip0 + header_size);
+
+ if (PREDICT_TRUE (ip0->protocol == IP_PROTOCOL_IPSEC_ESP))
+ {
+#if 0
+ clib_warning
+ ("packet received from %U to %U spi %u size %u spd_id %u",
+ format_ip6_address, &ip0->src_address, format_ip6_address,
+ &ip0->dst_address, clib_net_to_host_u32 (esp0->spi),
+ clib_net_to_host_u16 (ip0->payload_length) + header_size,
+ spd0->id);
+#endif
+ p0 = ipsec_input_ip6_protect_policy_match (spd0,
+ &ip0->src_address,
+ &ip0->dst_address,
+ clib_net_to_host_u32
+ (esp0->spi));
+
+ if (PREDICT_TRUE (p0 != 0))
+ {
+ p0->counter.packets++;
+ p0->counter.bytes +=
+ clib_net_to_host_u16 (ip0->payload_length);
+ p0->counter.bytes += header_size;
+ vnet_buffer (b0)->ipsec.sad_index = p0->sa_index;
+ vnet_buffer (b0)->ipsec.flags = 0;
+ next0 = IPSEC_INPUT_NEXT_ESP_DECRYPT;
+ vlib_buffer_advance (b0, header_size);
+ goto trace0;
+ }
+ }
+
+ trace0:
+ if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ ipsec_input_trace_t *tr =
+ vlib_add_trace (vm, node, b0, sizeof (*tr));
+ if (ip0->protocol == IP_PROTOCOL_IPSEC_ESP)
+ {
+ if (p0)
+ tr->sa_id = p0->sa_id;
+ tr->spi = clib_host_to_net_u32 (esp0->spi);
+ tr->seq = clib_host_to_net_u32 (esp0->seq);
+ }
+ }
+
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
+ n_left_to_next, bi0, next0);
+ }
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+ vlib_node_increment_counter (vm, ipsec_input_ip6_node.index,
+ IPSEC_INPUT_ERROR_RX_PKTS,
+ from_frame->n_vectors);
+
+ return from_frame->n_vectors;
+}
+
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (ipsec_input_ip6_node,static) = {
+ .function = ipsec_input_ip6_node_fn,
+ .name = "ipsec-input-ip6",
+ .vector_size = sizeof (u32),
+ .format_trace = format_ipsec_input_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_errors = ARRAY_LEN(ipsec_input_error_strings),
+ .error_strings = ipsec_input_error_strings,
+
+ .n_next_nodes = IPSEC_INPUT_N_NEXT,
+ .next_nodes = {
+#define _(s,n) [IPSEC_INPUT_NEXT_##s] = n,
+ foreach_ipsec_input_next
+#undef _
+ },
+};
+/* *INDENT-ON* */
+
+VLIB_NODE_FUNCTION_MULTIARCH (ipsec_input_ip6_node, ipsec_input_ip6_node_fn)
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/ipsec/ipsec_output.c b/src/vnet/ipsec/ipsec_output.c
new file mode 100644
index 00000000000..97977899132
--- /dev/null
+++ b/src/vnet/ipsec/ipsec_output.c
@@ -0,0 +1,478 @@
+/*
+ * ipsec_output.c : IPSec output node
+ *
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/vnet.h>
+#include <vnet/api_errno.h>
+#include <vnet/ip/ip.h>
+
+#include <vnet/ipsec/ipsec.h>
+
+#if DPDK_CRYPTO==1
+#define ESP_NODE "dpdk-esp-encrypt"
+#else
+#define ESP_NODE "esp-encrypt"
+#endif
+
+#if IPSEC > 0
+
+#define foreach_ipsec_output_next \
+_(DROP, "error-drop") \
+_(ESP_ENCRYPT, ESP_NODE)
+
+#define _(v, s) IPSEC_OUTPUT_NEXT_##v,
+typedef enum
+{
+ foreach_ipsec_output_next
+#undef _
+ IPSEC_OUTPUT_N_NEXT,
+} ipsec_output_next_t;
+
+
+#define foreach_ipsec_output_error \
+ _(RX_PKTS, "IPSec pkts received") \
+ _(POLICY_DISCARD, "IPSec policy discard") \
+ _(POLICY_NO_MATCH, "IPSec policy (no match)") \
+ _(POLICY_PROTECT, "IPSec policy protect") \
+ _(POLICY_BYPASS, "IPSec policy bypass") \
+ _(ENCAPS_FAILED, "IPSec encapsulation failed")
+
+
+typedef enum
+{
+#define _(sym,str) IPSEC_OUTPUT_ERROR_##sym,
+ foreach_ipsec_output_error
+#undef _
+ IPSEC_DECAP_N_ERROR,
+} ipsec_output_error_t;
+
+static char *ipsec_output_error_strings[] = {
+#define _(sym,string) string,
+ foreach_ipsec_output_error
+#undef _
+};
+
+static vlib_node_registration_t ipsec_output_ip4_node;
+static vlib_node_registration_t ipsec_output_ip6_node;
+
+typedef struct
+{
+ u32 spd_id;
+} ipsec_output_trace_t;
+
+/* packet trace format function */
+static u8 *
+format_ipsec_output_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ ipsec_output_trace_t *t = va_arg (*args, ipsec_output_trace_t *);
+
+ if (t->spd_id != ~0)
+ {
+ s = format (s, "spd %u ", t->spd_id);
+ }
+ else
+ {
+ s = format (s, "no spd");
+ }
+ return s;
+}
+
+always_inline ipsec_policy_t *
+ipsec_output_policy_match (ipsec_spd_t * spd, u8 pr, u32 la, u32 ra, u16 lp,
+ u16 rp)
+{
+ ipsec_policy_t *p;
+ u32 *i;
+
+ if (!spd)
+ return 0;
+
+ vec_foreach (i, spd->ipv4_outbound_policies)
+ {
+ p = pool_elt_at_index (spd->policies, *i);
+ if (PREDICT_FALSE (p->protocol && (p->protocol != pr)))
+ continue;
+
+ if (la < clib_net_to_host_u32 (p->laddr.start.ip4.as_u32))
+ continue;
+
+ if (la > clib_net_to_host_u32 (p->laddr.stop.ip4.as_u32))
+ continue;
+
+ if (ra < clib_net_to_host_u32 (p->raddr.start.ip4.as_u32))
+ continue;
+
+ if (ra > clib_net_to_host_u32 (p->raddr.stop.ip4.as_u32))
+ continue;
+
+ if (PREDICT_FALSE ((pr != IP_PROTOCOL_TCP) && (pr != IP_PROTOCOL_UDP)))
+ return p;
+
+ if (lp < p->lport.start)
+ continue;
+
+ if (lp > p->lport.stop)
+ continue;
+
+ if (rp < p->rport.start)
+ continue;
+
+ if (rp > p->rport.stop)
+ continue;
+
+ return p;
+ }
+ return 0;
+}
+
+always_inline uword
+ip6_addr_match_range (ip6_address_t * a, ip6_address_t * la,
+ ip6_address_t * ua)
+{
+ if ((memcmp (a->as_u64, la->as_u64, 2 * sizeof (u64)) >= 0) &&
+ (memcmp (a->as_u64, ua->as_u64, 2 * sizeof (u64)) <= 0))
+ return 1;
+ return 0;
+}
+
+always_inline ipsec_policy_t *
+ipsec_output_ip6_policy_match (ipsec_spd_t * spd,
+ ip6_address_t * la,
+ ip6_address_t * ra, u16 lp, u16 rp, u8 pr)
+{
+ ipsec_policy_t *p;
+ u32 *i;
+
+ if (!spd)
+ return 0;
+
+ vec_foreach (i, spd->ipv6_outbound_policies)
+ {
+ p = pool_elt_at_index (spd->policies, *i);
+ if (PREDICT_FALSE (p->protocol && (p->protocol != pr)))
+ continue;
+
+ if (!ip6_addr_match_range (ra, &p->raddr.start.ip6, &p->raddr.stop.ip6))
+ continue;
+
+ if (!ip6_addr_match_range (la, &p->laddr.start.ip6, &p->laddr.stop.ip6))
+ continue;
+
+ if (PREDICT_FALSE ((pr != IP_PROTOCOL_TCP) && (pr != IP_PROTOCOL_UDP)))
+ return p;
+
+ if (lp < p->lport.start)
+ continue;
+
+ if (lp > p->lport.stop)
+ continue;
+
+ if (rp < p->rport.start)
+ continue;
+
+ if (rp > p->rport.stop)
+ continue;
+
+ return p;
+ }
+
+ return 0;
+}
+
+static inline uword
+ipsec_output_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame, int is_ipv6)
+{
+ ipsec_main_t *im = &ipsec_main;
+
+ u32 *from, *to_next = 0;
+ u32 n_left_from, sw_if_index0, last_sw_if_index = (u32) ~ 0;
+ u32 next_node_index = (u32) ~ 0, last_next_node_index = (u32) ~ 0;
+ vlib_frame_t *f = 0;
+ u32 spd_index0 = ~0;
+ ipsec_spd_t *spd0 = 0;
+ u64 nc_protect = 0, nc_bypass = 0, nc_discard = 0, nc_nomatch = 0;
+
+ from = vlib_frame_vector_args (from_frame);
+ n_left_from = from_frame->n_vectors;
+
+ while (n_left_from > 0)
+ {
+ u32 bi0;
+ vlib_buffer_t *b0;
+ ipsec_policy_t *p0;
+ ip4_header_t *ip0;
+ ip6_header_t *ip6_0 = 0;
+ udp_header_t *udp0;
+ u32 iph_offset = 0;
+
+ bi0 = from[0];
+ b0 = vlib_get_buffer (vm, bi0);
+ sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_TX];
+ iph_offset = vnet_buffer (b0)->ip.save_rewrite_length;
+ ip0 = (ip4_header_t *) ((u8 *) vlib_buffer_get_current (b0)
+ + iph_offset);
+
+ /* lookup for SPD only if sw_if_index is changed */
+ if (PREDICT_FALSE (last_sw_if_index != sw_if_index0))
+ {
+ uword *p = hash_get (im->spd_index_by_sw_if_index, sw_if_index0);
+ ASSERT (p);
+ spd_index0 = p[0];
+ spd0 = pool_elt_at_index (im->spds, spd_index0);
+ last_sw_if_index = sw_if_index0;
+ }
+
+ if (is_ipv6)
+ {
+ ip6_0 = (ip6_header_t *) ((u8 *) vlib_buffer_get_current (b0)
+ + iph_offset);
+
+ udp0 = ip6_next_header (ip6_0);
+#if 0
+ clib_warning
+ ("packet received from %U port %u to %U port %u spd_id %u",
+ format_ip6_address, &ip6_0->src_address,
+ clib_net_to_host_u16 (udp0->src_port), format_ip6_address,
+ &ip6_0->dst_address, clib_net_to_host_u16 (udp0->dst_port),
+ spd0->id);
+#endif
+
+ p0 = ipsec_output_ip6_policy_match (spd0,
+ &ip6_0->src_address,
+ &ip6_0->dst_address,
+ clib_net_to_host_u16
+ (udp0->src_port),
+ clib_net_to_host_u16
+ (udp0->dst_port),
+ ip6_0->protocol);
+ }
+ else
+ {
+ udp0 = (udp_header_t *) ((u8 *) ip0 + ip4_header_bytes (ip0));
+
+#if 0
+ clib_warning ("packet received from %U to %U port %u",
+ format_ip4_address, ip0->src_address.as_u8,
+ format_ip4_address, ip0->dst_address.as_u8,
+ clib_net_to_host_u16 (udp0->dst_port));
+ clib_warning ("sw_if_index0 %u spd_index0 %u spd_id %u",
+ sw_if_index0, spd_index0, spd0->id);
+#endif
+
+ p0 = ipsec_output_policy_match (spd0, ip0->protocol,
+ clib_net_to_host_u32
+ (ip0->src_address.as_u32),
+ clib_net_to_host_u32
+ (ip0->dst_address.as_u32),
+ clib_net_to_host_u16
+ (udp0->src_port),
+ clib_net_to_host_u16
+ (udp0->dst_port));
+ }
+
+ if (PREDICT_TRUE (p0 != NULL))
+ {
+ if (p0->policy == IPSEC_POLICY_ACTION_PROTECT)
+ {
+ nc_protect++;
+ next_node_index = im->esp_encrypt_node_index;
+ vnet_buffer (b0)->ipsec.sad_index = p0->sa_index;
+ vlib_buffer_advance (b0, iph_offset);
+ p0->counter.packets++;
+ if (is_ipv6)
+ {
+ p0->counter.bytes +=
+ clib_net_to_host_u16 (ip6_0->payload_length);
+ p0->counter.bytes += sizeof (ip6_header_t);
+ }
+ else
+ {
+ p0->counter.bytes += clib_net_to_host_u16 (ip0->length);
+ }
+ }
+ else if (p0->policy == IPSEC_POLICY_ACTION_BYPASS)
+ {
+ nc_bypass++;
+ next_node_index = get_next_output_feature_node_index (b0, node);
+ p0->counter.packets++;
+ if (is_ipv6)
+ {
+ p0->counter.bytes +=
+ clib_net_to_host_u16 (ip6_0->payload_length);
+ p0->counter.bytes += sizeof (ip6_header_t);
+ }
+ else
+ {
+ p0->counter.bytes += clib_net_to_host_u16 (ip0->length);
+ }
+ }
+ else
+ {
+ nc_discard++;
+ p0->counter.packets++;
+ if (is_ipv6)
+ {
+ p0->counter.bytes +=
+ clib_net_to_host_u16 (ip6_0->payload_length);
+ p0->counter.bytes += sizeof (ip6_header_t);
+ }
+ else
+ {
+ p0->counter.bytes += clib_net_to_host_u16 (ip0->length);
+ }
+ next_node_index = im->error_drop_node_index;
+ }
+ }
+ else
+ {
+ nc_nomatch++;
+ next_node_index = im->error_drop_node_index;
+ }
+
+ from += 1;
+ n_left_from -= 1;
+
+ if (PREDICT_FALSE ((last_next_node_index != next_node_index) || f == 0))
+ {
+ /* if this is not 1st frame */
+ if (f)
+ vlib_put_frame_to_node (vm, last_next_node_index, f);
+
+ last_next_node_index = next_node_index;
+
+ f = vlib_get_frame_to_node (vm, next_node_index);
+ to_next = vlib_frame_vector_args (f);
+ }
+
+ to_next[0] = bi0;
+ to_next += 1;
+ f->n_vectors++;
+
+ if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ ipsec_output_trace_t *tr =
+ vlib_add_trace (vm, node, b0, sizeof (*tr));
+ if (spd0)
+ tr->spd_id = spd0->id;
+ }
+ }
+
+ vlib_put_frame_to_node (vm, next_node_index, f);
+ vlib_node_increment_counter (vm, node->node_index,
+ IPSEC_OUTPUT_ERROR_POLICY_PROTECT, nc_protect);
+ vlib_node_increment_counter (vm, node->node_index,
+ IPSEC_OUTPUT_ERROR_POLICY_BYPASS, nc_bypass);
+ vlib_node_increment_counter (vm, node->node_index,
+ IPSEC_OUTPUT_ERROR_POLICY_DISCARD, nc_discard);
+ vlib_node_increment_counter (vm, node->node_index,
+ IPSEC_OUTPUT_ERROR_POLICY_NO_MATCH,
+ nc_nomatch);
+ return from_frame->n_vectors;
+}
+
+static uword
+ipsec_output_ip4_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ return ipsec_output_inline (vm, node, frame, 0);
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (ipsec_output_ip4_node,static) = {
+ .function = ipsec_output_ip4_node_fn,
+ .name = "ipsec-output-ip4",
+ .vector_size = sizeof (u32),
+ .format_trace = format_ipsec_output_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_errors = ARRAY_LEN(ipsec_output_error_strings),
+ .error_strings = ipsec_output_error_strings,
+
+ .n_next_nodes = IPSEC_OUTPUT_N_NEXT,
+ .next_nodes = {
+#define _(s,n) [IPSEC_OUTPUT_NEXT_##s] = n,
+ foreach_ipsec_output_next
+#undef _
+ },
+};
+/* *INDENT-ON* */
+
+VLIB_NODE_FUNCTION_MULTIARCH (ipsec_output_ip4_node, ipsec_output_ip4_node_fn)
+ static uword
+ ipsec_output_ip6_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ return ipsec_output_inline (vm, node, frame, 1);
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (ipsec_output_ip6_node,static) = {
+ .function = ipsec_output_ip6_node_fn,
+ .name = "ipsec-output-ip6",
+ .vector_size = sizeof (u32),
+ .format_trace = format_ipsec_output_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_errors = ARRAY_LEN(ipsec_output_error_strings),
+ .error_strings = ipsec_output_error_strings,
+
+ .n_next_nodes = IPSEC_OUTPUT_N_NEXT,
+ .next_nodes = {
+#define _(s,n) [IPSEC_OUTPUT_NEXT_##s] = n,
+ foreach_ipsec_output_next
+#undef _
+ },
+};
+/* *INDENT-ON* */
+
+VLIB_NODE_FUNCTION_MULTIARCH (ipsec_output_ip6_node, ipsec_output_ip6_node_fn)
+#else /* IPSEC > 1 */
+
+/* Dummy ipsec output node, in case when IPSec is disabled */
+
+static uword
+ipsec_output_node_fn (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ clib_warning ("IPSec disabled");
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (ipsec_output_node) = {
+ .vector_size = sizeof (u32),
+ .function = ipsec_output_node_fn,
+ .name = "ipsec-output-ip4",
+};
+
+VLIB_REGISTER_NODE (ipsec_output_node) = {
+ .vector_size = sizeof (u32),
+ .function = ipsec_output_node_fn,
+ .name = "ipsec-output-ip6",
+};
+/* *INDENT-ON* */
+#endif
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/l2/dir.dox b/src/vnet/l2/dir.dox
new file mode 100644
index 00000000000..8497a2f64cc
--- /dev/null
+++ b/src/vnet/l2/dir.dox
@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) 2013 Cisco and/or its affiliates.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+@dir
+@brief Layer 2 Forwarding Code.
+
+This directory contains the source code for basic Layer 2 forwarding.
+
+*/
+/*? %%clicmd:group_label Layer 2 CLI %% ?*/
diff --git a/src/vnet/l2/feat_bitmap.c b/src/vnet/l2/feat_bitmap.c
new file mode 100644
index 00000000000..6c046467f2c
--- /dev/null
+++ b/src/vnet/l2/feat_bitmap.c
@@ -0,0 +1,185 @@
+/*
+ * feat_bitmap.c: bitmap for managing feature invocation
+ *
+ * Copyright (c) 2013 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+#include <vnet/pg/pg.h>
+#include <vnet/ethernet/ethernet.h>
+#include <vnet/ethernet/packet.h>
+#include <vlib/cli.h>
+#include <vnet/l2/l2_input.h>
+#include <vnet/l2/feat_bitmap.h>
+
+#include <vppinfra/error.h>
+#include <vppinfra/hash.h>
+#include <vppinfra/cache.h>
+
+
+/*
+ * Drop node for feature bitmaps
+ * For features that just do a drop, or are not yet implemented.
+ * Initial feature dispatch nodes don't need to set b0->error
+ * in case of a possible drop because that will be done here.
+ *The next node is always error-drop.
+ */
+
+static vlib_node_registration_t feat_bitmap_drop_node;
+
+#define foreach_feat_bitmap_drop_error \
+_(NO_FWD, "L2 feature forwarding disabled") \
+_(NYI, "L2 feature not implemented")
+
+typedef enum
+{
+#define _(sym,str) FEAT_BITMAP_DROP_ERROR_##sym,
+ foreach_feat_bitmap_drop_error
+#undef _
+ FEAT_BITMAP_DROP_N_ERROR,
+} feat_bitmap_drop_error_t;
+
+static char *feat_bitmap_drop_error_strings[] = {
+#define _(sym,string) string,
+ foreach_feat_bitmap_drop_error
+#undef _
+};
+
+typedef enum
+{
+ FEAT_BITMAP_DROP_NEXT_DROP,
+ FEAT_BITMAP_DROP_N_NEXT,
+} feat_bitmap_drop_next_t;
+
+typedef struct
+{
+ u32 feature_bitmap;
+} feat_bitmap_drop_trace_t;
+
+/* packet trace format function */
+static u8 *
+format_feat_bitmap_drop_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ feat_bitmap_drop_trace_t *t = va_arg (*args, feat_bitmap_drop_trace_t *);
+
+ s =
+ format (s, "feat_bitmap_drop: feature bitmap 0x%08x", t->feature_bitmap);
+ return s;
+}
+
+static uword
+feat_bitmap_drop_node_fn (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ u32 n_left_from, *from, *to_next;
+ feat_bitmap_drop_next_t next_index;
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors; /* number of packets to process */
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ /* get space to enqueue frame to graph node "next_index" */
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 bi0;
+ vlib_buffer_t *b0;
+ u32 next0;
+
+ /* speculatively enqueue b0 to the current next frame */
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)
+ && (b0->flags & VLIB_BUFFER_IS_TRACED)))
+ {
+ feat_bitmap_drop_trace_t *t =
+ vlib_add_trace (vm, node, b0, sizeof (*t));
+ t->feature_bitmap = vnet_buffer (b0)->l2.feature_bitmap;
+ }
+
+ if (vnet_buffer (b0)->l2.feature_bitmap == 1)
+ {
+ /*
+ * If we are executing the last feature, this is the
+ * No forwarding catch-all
+ */
+ b0->error = node->errors[FEAT_BITMAP_DROP_ERROR_NO_FWD];
+ }
+ else
+ {
+ b0->error = node->errors[FEAT_BITMAP_DROP_ERROR_NYI];
+ }
+ next0 = FEAT_BITMAP_DROP_NEXT_DROP;
+
+ /* verify speculative enqueue, maybe switch current next frame */
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, next0);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+ return frame->n_vectors;
+}
+
+clib_error_t *
+feat_bitmap_drop_init (vlib_main_t * vm)
+{
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (feat_bitmap_drop_init);
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (feat_bitmap_drop_node,static) = {
+ .function = feat_bitmap_drop_node_fn,
+ .name = "feature-bitmap-drop",
+ .vector_size = sizeof (u32),
+ .format_trace = format_feat_bitmap_drop_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_errors = ARRAY_LEN(feat_bitmap_drop_error_strings),
+ .error_strings = feat_bitmap_drop_error_strings,
+
+ .n_next_nodes = FEAT_BITMAP_DROP_N_NEXT,
+
+ /* edit / add dispositions here */
+ .next_nodes = {
+ [FEAT_BITMAP_DROP_NEXT_DROP] = "error-drop",
+ },
+};
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/l2/feat_bitmap.h b/src/vnet/l2/feat_bitmap.h
new file mode 100644
index 00000000000..c6e02ecc7c9
--- /dev/null
+++ b/src/vnet/l2/feat_bitmap.h
@@ -0,0 +1,96 @@
+/*
+ * feat_bitmap.h: bitmap for managing feature invocation
+ *
+ * Copyright (c) 2013 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef included_vnet_l2_feat_bitmap_h
+#define included_vnet_l2_feat_bitmap_h
+
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+
+/*
+ * The feature bitmap is a way of organizing input and output feature graph nodes.
+ * The set of features to be executed are arranged in a bitmap with one bit per
+ * feature and each bit positioned in the same order that the features should be
+ * executed. Features can be dynamically removed from the set by masking off their
+ * corresponding bits. The bitmap is stored in packet context. Each feature clears
+ * its bit and then calls feat_bitmap_get_next_node_index() to go to the next
+ * graph node.
+ */
+
+
+/* 32 features in a u32 bitmap */
+#define FEAT_MAX 32
+
+/**
+ Initialize the feature next-node indexes of a graph node.
+ Should be called by the init function of each feature graph node.
+*/
+always_inline void
+feat_bitmap_init_next_nodes (vlib_main_t * vm, u32 node_index, /* the current graph node index */
+ u32 num_features, /* number of entries in feat_names */
+ char **feat_names, /* array of feature graph node names */
+ u32 * next_nodes) /* array of 32 next indexes to init */
+{
+ u32 idx;
+
+ ASSERT (num_features <= FEAT_MAX);
+
+ for (idx = 0; idx < num_features; idx++)
+ {
+ if (vlib_get_node_by_name (vm, (u8 *) feat_names[idx]))
+ {
+ next_nodes[idx] =
+ vlib_node_add_named_next (vm, node_index, feat_names[idx]);
+ }
+ else
+ { // Node may be in plugin which is not installed, use drop node
+ next_nodes[idx] =
+ vlib_node_add_named_next (vm, node_index, "feature-bitmap-drop");
+ }
+ }
+
+ /* All unassigned bits go to the drop node */
+ for (; idx < FEAT_MAX; idx++)
+ {
+ next_nodes[idx] = vlib_node_add_named_next (vm, node_index,
+ "feature-bitmap-drop");
+ }
+}
+
+/**
+ Return the graph node index for the feature corresponding to the
+ first set bit in the bitmap.
+*/
+always_inline
+ u32 feat_bitmap_get_next_node_index (u32 * next_nodes, u32 bitmap)
+{
+ u32 first_bit;
+
+ count_leading_zeros (first_bit, bitmap);
+ first_bit = uword_bits - 1 - first_bit;
+ return next_nodes[first_bit];
+}
+
+#endif /* included_vnet_l2_feat_bitmap_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/l2/l2.api b/src/vnet/l2/l2.api
new file mode 100644
index 00000000000..5fce7944b58
--- /dev/null
+++ b/src/vnet/l2/l2.api
@@ -0,0 +1,38 @@
+/* Hey Emacs use -*- mode: C -*- */
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/** \brief Reply to l2_xconnect_dump
+ @param context - sender context which was passed in the request
+ @param rx_sw_if_index - Receive interface index
+ @param tx_sw_if_index - Transmit interface index
+ */
+define l2_xconnect_details
+{
+ u32 context;
+ u32 rx_sw_if_index;
+ u32 tx_sw_if_index;
+};
+
+/** \brief Dump L2 XConnects
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+*/
+define l2_xconnect_dump
+{
+ u32 client_index;
+ u32 context;
+};
+
diff --git a/src/vnet/l2/l2_api.c b/src/vnet/l2/l2_api.c
new file mode 100644
index 00000000000..ca4f593f1ec
--- /dev/null
+++ b/src/vnet/l2/l2_api.c
@@ -0,0 +1,140 @@
+/*
+ *------------------------------------------------------------------
+ * l2_api.c - layer 2 forwarding api
+ *
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+
+#include <vnet/vnet.h>
+#include <vlibmemory/api.h>
+
+#include <vnet/interface.h>
+#include <vnet/api_errno.h>
+#include <vnet/l2/l2_input.h>
+
+#include <vnet/vnet_msg_enum.h>
+
+#define vl_typedefs /* define message structures */
+#include <vnet/vnet_all_api_h.h>
+#undef vl_typedefs
+
+#define vl_endianfun /* define message structures */
+#include <vnet/vnet_all_api_h.h>
+#undef vl_endianfun
+
+/* instantiate all the print functions we know about */
+#define vl_print(handle, ...) vlib_cli_output (handle, __VA_ARGS__)
+#define vl_printfun
+#include <vnet/vnet_all_api_h.h>
+#undef vl_printfun
+
+#include <vlibapi/api_helper_macros.h>
+
+#define foreach_vpe_api_msg \
+_(L2_XCONNECT_DUMP, l2_xconnect_dump)
+
+static void
+send_l2_xconnect_details (unix_shared_memory_queue_t * q, u32 context,
+ u32 rx_sw_if_index, u32 tx_sw_if_index)
+{
+ vl_api_l2_xconnect_details_t *mp;
+
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ memset (mp, 0, sizeof (*mp));
+ mp->_vl_msg_id = ntohs (VL_API_L2_XCONNECT_DETAILS);
+ mp->context = context;
+ mp->rx_sw_if_index = htonl (rx_sw_if_index);
+ mp->tx_sw_if_index = htonl (tx_sw_if_index);
+
+ vl_msg_api_send_shmem (q, (u8 *) & mp);
+}
+
+static void
+vl_api_l2_xconnect_dump_t_handler (vl_api_l2_xconnect_dump_t * mp)
+{
+ unix_shared_memory_queue_t *q;
+ vnet_main_t *vnm = vnet_get_main ();
+ vnet_interface_main_t *im = &vnm->interface_main;
+ l2input_main_t *l2im = &l2input_main;
+ vnet_sw_interface_t *swif;
+ l2_input_config_t *config;
+
+ q = vl_api_client_index_to_input_queue (mp->client_index);
+ if (q == 0)
+ return;
+
+ /* *INDENT-OFF* */
+ pool_foreach (swif, im->sw_interfaces,
+ ({
+ config = vec_elt_at_index (l2im->configs, swif->sw_if_index);
+ if (config->xconnect)
+ send_l2_xconnect_details (q, mp->context, swif->sw_if_index,
+ config->output_sw_if_index);
+ }));
+ /* *INDENT-ON* */
+}
+
+
+/*
+ * vpe_api_hookup
+ * Add vpe's API message handlers to the table.
+ * vlib has alread mapped shared memory and
+ * added the client registration handlers.
+ * See .../vlib-api/vlibmemory/memclnt_vlib.c:memclnt_process()
+ */
+#define vl_msg_name_crc_list
+#include <vnet/vnet_all_api_h.h>
+#undef vl_msg_name_crc_list
+
+static void
+setup_message_id_table (api_main_t * am)
+{
+#define _(id,n,crc) vl_msg_api_add_msg_name_crc (am, #n "_" #crc, id);
+ foreach_vl_msg_name_crc_l2;
+#undef _
+}
+
+static clib_error_t *
+l2_api_hookup (vlib_main_t * vm)
+{
+ api_main_t *am = &api_main;
+
+#define _(N,n) \
+ vl_msg_api_set_handlers(VL_API_##N, #n, \
+ vl_api_##n##_t_handler, \
+ vl_noop_handler, \
+ vl_api_##n##_t_endian, \
+ vl_api_##n##_t_print, \
+ sizeof(vl_api_##n##_t), 1);
+ foreach_vpe_api_msg;
+#undef _
+
+ /*
+ * Set up the (msg_name, crc, message-id) table
+ */
+ setup_message_id_table (am);
+
+ return 0;
+}
+
+VLIB_API_INIT_FUNCTION (l2_api_hookup);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/l2/l2_bd.c b/src/vnet/l2/l2_bd.c
new file mode 100644
index 00000000000..22f83d0b3e4
--- /dev/null
+++ b/src/vnet/l2/l2_bd.c
@@ -0,0 +1,1079 @@
+/*
+ * l2_bd.c : layer 2 bridge domain
+ *
+ * Copyright (c) 2013 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+#include <vlib/cli.h>
+#include <vnet/ethernet/ethernet.h>
+#include <vnet/ip/format.h>
+#include <vnet/l2/l2_input.h>
+#include <vnet/l2/feat_bitmap.h>
+#include <vnet/l2/l2_bd.h>
+#include <vnet/l2/l2_learn.h>
+#include <vnet/l2/l2_fib.h>
+#include <vnet/l2/l2_vtr.h>
+#include <vnet/ip/ip4_packet.h>
+#include <vnet/ip/ip6_packet.h>
+
+#include <vppinfra/error.h>
+#include <vppinfra/hash.h>
+#include <vppinfra/vec.h>
+
+/**
+ * @file
+ * @brief Ethernet Bridge Domain.
+ *
+ * Code in this file manages Layer 2 bridge domains.
+ *
+ */
+
+bd_main_t bd_main;
+
+/**
+ Init bridge domain if not done already.
+ For feature bitmap, set all bits except ARP termination
+*/
+void
+bd_validate (l2_bridge_domain_t * bd_config)
+{
+ if (!bd_is_valid (bd_config))
+ {
+ bd_config->feature_bitmap = ~L2INPUT_FEAT_ARP_TERM;
+ bd_config->bvi_sw_if_index = ~0;
+ bd_config->members = 0;
+ bd_config->flood_count = 0;
+ bd_config->tun_master_count = 0;
+ bd_config->tun_normal_count = 0;
+ bd_config->mac_by_ip4 = 0;
+ bd_config->mac_by_ip6 = hash_create_mem (0, sizeof (ip6_address_t),
+ sizeof (uword));
+ }
+}
+
+u32
+bd_find_or_add_bd_index (bd_main_t * bdm, u32 bd_id)
+{
+ uword *p;
+ u32 rv;
+
+ if (bd_id == ~0)
+ {
+ bd_id = 0;
+ while (hash_get (bdm->bd_index_by_bd_id, bd_id))
+ bd_id++;
+ }
+ else
+ {
+ p = hash_get (bdm->bd_index_by_bd_id, bd_id);
+ if (p)
+ return (p[0]);
+ }
+
+ rv = clib_bitmap_first_clear (bdm->bd_index_bitmap);
+
+ /* mark this index busy */
+ bdm->bd_index_bitmap = clib_bitmap_set (bdm->bd_index_bitmap, rv, 1);
+
+ hash_set (bdm->bd_index_by_bd_id, bd_id, rv);
+
+ vec_validate (l2input_main.bd_configs, rv);
+ l2input_main.bd_configs[rv].bd_id = bd_id;
+
+ return rv;
+}
+
+int
+bd_delete_bd_index (bd_main_t * bdm, u32 bd_id)
+{
+ uword *p;
+ u32 bd_index;
+
+ p = hash_get (bdm->bd_index_by_bd_id, bd_id);
+ if (p == 0)
+ return -1;
+
+ bd_index = p[0];
+
+ /* mark this index clear */
+ bdm->bd_index_bitmap = clib_bitmap_set (bdm->bd_index_bitmap, bd_index, 0);
+ hash_unset (bdm->bd_index_by_bd_id, bd_id);
+
+ l2input_main.bd_configs[bd_index].bd_id = ~0;
+ l2input_main.bd_configs[bd_index].feature_bitmap = 0;
+
+ return 0;
+}
+
+static void
+update_flood_count (l2_bridge_domain_t * bd_config)
+{
+ bd_config->flood_count = vec_len (bd_config->members) -
+ (bd_config->tun_master_count ? bd_config->tun_normal_count : 0);
+}
+
+void
+bd_add_member (l2_bridge_domain_t * bd_config, l2_flood_member_t * member)
+{
+ u32 ix;
+ vnet_sw_interface_t *sw_if = vnet_get_sw_interface
+ (vnet_get_main (), member->sw_if_index);
+
+ /*
+ * Add one element to the vector
+ * vector is ordered [ bvi, normal/tun_masters..., tun_normals... ]
+ * When flooding, the bvi interface (if present) must be the last member
+ * processed due to how BVI processing can change the packet. To enable
+ * this order, we make the bvi interface the first in the vector and
+ * flooding walks the vector in reverse.
+ */
+ switch (sw_if->flood_class)
+ {
+ case VNET_FLOOD_CLASS_TUNNEL_MASTER:
+ bd_config->tun_master_count++;
+ /* Fall through */
+ default:
+ /* Fall through */
+ case VNET_FLOOD_CLASS_NORMAL:
+ ix = (member->flags & L2_FLOOD_MEMBER_BVI) ? 0 :
+ vec_len (bd_config->members) - bd_config->tun_normal_count;
+ break;
+ case VNET_FLOOD_CLASS_TUNNEL_NORMAL:
+ ix = vec_len (bd_config->members);
+ bd_config->tun_normal_count++;
+ break;
+ }
+
+ vec_insert_elts (bd_config->members, member, 1, ix);
+ update_flood_count (bd_config);
+}
+
+#define BD_REMOVE_ERROR_OK 0
+#define BD_REMOVE_ERROR_NOT_FOUND 1
+
+u32
+bd_remove_member (l2_bridge_domain_t * bd_config, u32 sw_if_index)
+{
+ u32 ix;
+
+ /* Find and delete the member */
+ vec_foreach_index (ix, bd_config->members)
+ {
+ l2_flood_member_t *m = vec_elt_at_index (bd_config->members, ix);
+ if (m->sw_if_index == sw_if_index)
+ {
+ vnet_sw_interface_t *sw_if = vnet_get_sw_interface
+ (vnet_get_main (), sw_if_index);
+
+ if (sw_if->flood_class != VNET_FLOOD_CLASS_NORMAL)
+ {
+ if (sw_if->flood_class == VNET_FLOOD_CLASS_TUNNEL_MASTER)
+ bd_config->tun_master_count--;
+ else if (sw_if->flood_class == VNET_FLOOD_CLASS_TUNNEL_NORMAL)
+ bd_config->tun_normal_count--;
+ }
+ vec_del1 (bd_config->members, ix);
+ update_flood_count (bd_config);
+
+ return BD_REMOVE_ERROR_OK;
+ }
+ }
+
+ return BD_REMOVE_ERROR_NOT_FOUND;
+}
+
+
+clib_error_t *
+l2bd_init (vlib_main_t * vm)
+{
+ bd_main_t *bdm = &bd_main;
+ u32 bd_index;
+ bdm->bd_index_by_bd_id = hash_create (0, sizeof (uword));
+ /*
+ * create a dummy bd with bd_id of 0 and bd_index of 0 with feature set
+ * to packet drop only. Thus, packets received from any L2 interface with
+ * uninitialized bd_index of 0 can be dropped safely.
+ */
+ bd_index = bd_find_or_add_bd_index (bdm, 0);
+ ASSERT (bd_index == 0);
+ l2input_main.bd_configs[0].feature_bitmap = L2INPUT_FEAT_DROP;
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (l2bd_init);
+
+
+/**
+ Set the learn/forward/flood flags for the bridge domain.
+ Return 0 if ok, non-zero if for an error.
+*/
+u32
+bd_set_flags (vlib_main_t * vm, u32 bd_index, u32 flags, u32 enable)
+{
+
+ l2_bridge_domain_t *bd_config;
+ u32 feature_bitmap = 0;
+
+ vec_validate (l2input_main.bd_configs, bd_index);
+ bd_config = vec_elt_at_index (l2input_main.bd_configs, bd_index);
+
+ bd_validate (bd_config);
+
+ if (flags & L2_LEARN)
+ {
+ feature_bitmap |= L2INPUT_FEAT_LEARN;
+ }
+ if (flags & L2_FWD)
+ {
+ feature_bitmap |= L2INPUT_FEAT_FWD;
+ }
+ if (flags & L2_FLOOD)
+ {
+ feature_bitmap |= L2INPUT_FEAT_FLOOD;
+ }
+ if (flags & L2_UU_FLOOD)
+ {
+ feature_bitmap |= L2INPUT_FEAT_UU_FLOOD;
+ }
+ if (flags & L2_ARP_TERM)
+ {
+ feature_bitmap |= L2INPUT_FEAT_ARP_TERM;
+ }
+
+ if (enable)
+ {
+ bd_config->feature_bitmap |= feature_bitmap;
+ }
+ else
+ {
+ bd_config->feature_bitmap &= ~feature_bitmap;
+ }
+
+ return 0;
+}
+
+/**
+ Set the mac age for the bridge domain.
+*/
+void
+bd_set_mac_age (vlib_main_t * vm, u32 bd_index, u8 age)
+{
+ l2_bridge_domain_t *bd_config;
+ int enable = 0;
+
+ vec_validate (l2input_main.bd_configs, bd_index);
+ bd_config = vec_elt_at_index (l2input_main.bd_configs, bd_index);
+ bd_config->mac_age = age;
+
+ /* check if there is at least one bd with mac aging enabled */
+ vec_foreach (bd_config, l2input_main.bd_configs)
+ if (bd_config->bd_id != ~0 && bd_config->mac_age != 0)
+ enable = 1;
+
+ vlib_process_signal_event (vm, l2fib_mac_age_scanner_process_node.index,
+ enable ? L2_MAC_AGE_PROCESS_EVENT_START :
+ L2_MAC_AGE_PROCESS_EVENT_STOP, 0);
+}
+
+/**
+ Set bridge-domain learn enable/disable.
+ The CLI format is:
+ set bridge-domain learn <bd_id> [disable]
+*/
+static clib_error_t *
+bd_learn (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ bd_main_t *bdm = &bd_main;
+ clib_error_t *error = 0;
+ u32 bd_index, bd_id;
+ u32 enable;
+ uword *p;
+
+ if (!unformat (input, "%d", &bd_id))
+ {
+ error = clib_error_return (0, "expecting bridge-domain id but got `%U'",
+ format_unformat_error, input);
+ goto done;
+ }
+
+ p = hash_get (bdm->bd_index_by_bd_id, bd_id);
+
+ if (p == 0)
+ return clib_error_return (0, "No such bridge domain %d", bd_id);
+
+ bd_index = p[0];
+
+ enable = 1;
+ if (unformat (input, "disable"))
+ {
+ enable = 0;
+ }
+
+ /* set the bridge domain flag */
+ if (bd_set_flags (vm, bd_index, L2_LEARN, enable))
+ {
+ error =
+ clib_error_return (0, "bridge-domain id %d out of range", bd_index);
+ goto done;
+ }
+
+done:
+ return error;
+}
+
+/*?
+ * Layer 2 learning can be enabled and disabled on each
+ * interface and on each bridge-domain. Use this command to
+ * manage bridge-domains. It is enabled by default.
+ *
+ * @cliexpar
+ * Example of how to enable learning (where 200 is the bridge-domain-id):
+ * @cliexcmd{set bridge-domain learn 200}
+ * Example of how to disable learning (where 200 is the bridge-domain-id):
+ * @cliexcmd{set bridge-domain learn 200 disable}
+?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (bd_learn_cli, static) = {
+ .path = "set bridge-domain learn",
+ .short_help = "set bridge-domain learn <bridge-domain-id> [disable]",
+ .function = bd_learn,
+};
+/* *INDENT-ON* */
+
+/**
+ Set bridge-domain forward enable/disable.
+ The CLI format is:
+ set bridge-domain forward <bd_index> [disable]
+*/
+static clib_error_t *
+bd_fwd (vlib_main_t * vm, unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ bd_main_t *bdm = &bd_main;
+ clib_error_t *error = 0;
+ u32 bd_index, bd_id;
+ u32 enable;
+ uword *p;
+
+ if (!unformat (input, "%d", &bd_id))
+ {
+ error = clib_error_return (0, "expecting bridge-domain id but got `%U'",
+ format_unformat_error, input);
+ goto done;
+ }
+
+ p = hash_get (bdm->bd_index_by_bd_id, bd_id);
+
+ if (p == 0)
+ return clib_error_return (0, "No such bridge domain %d", bd_id);
+
+ bd_index = p[0];
+
+ enable = 1;
+ if (unformat (input, "disable"))
+ {
+ enable = 0;
+ }
+
+ /* set the bridge domain flag */
+ if (bd_set_flags (vm, bd_index, L2_FWD, enable))
+ {
+ error =
+ clib_error_return (0, "bridge-domain id %d out of range", bd_index);
+ goto done;
+ }
+
+done:
+ return error;
+}
+
+
+/*?
+ * Layer 2 unicast forwarding can be enabled and disabled on each
+ * interface and on each bridge-domain. Use this command to
+ * manage bridge-domains. It is enabled by default.
+ *
+ * @cliexpar
+ * Example of how to enable forwarding (where 200 is the bridge-domain-id):
+ * @cliexcmd{set bridge-domain forward 200}
+ * Example of how to disable forwarding (where 200 is the bridge-domain-id):
+ * @cliexcmd{set bridge-domain forward 200 disable}
+?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (bd_fwd_cli, static) = {
+ .path = "set bridge-domain forward",
+ .short_help = "set bridge-domain forward <bridge-domain-id> [disable]",
+ .function = bd_fwd,
+};
+/* *INDENT-ON* */
+
+/**
+ Set bridge-domain flood enable/disable.
+ The CLI format is:
+ set bridge-domain flood <bd_index> [disable]
+*/
+static clib_error_t *
+bd_flood (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ bd_main_t *bdm = &bd_main;
+ clib_error_t *error = 0;
+ u32 bd_index, bd_id;
+ u32 enable;
+ uword *p;
+
+ if (!unformat (input, "%d", &bd_id))
+ {
+ error = clib_error_return (0, "expecting bridge-domain id but got `%U'",
+ format_unformat_error, input);
+ goto done;
+ }
+
+ p = hash_get (bdm->bd_index_by_bd_id, bd_id);
+
+ if (p == 0)
+ return clib_error_return (0, "No such bridge domain %d", bd_id);
+
+ bd_index = p[0];
+
+ enable = 1;
+ if (unformat (input, "disable"))
+ {
+ enable = 0;
+ }
+
+ /* set the bridge domain flag */
+ if (bd_set_flags (vm, bd_index, L2_FLOOD, enable))
+ {
+ error =
+ clib_error_return (0, "bridge-domain id %d out of range", bd_index);
+ goto done;
+ }
+
+done:
+ return error;
+}
+
+/*?
+ * Layer 2 flooding can be enabled and disabled on each
+ * interface and on each bridge-domain. Use this command to
+ * manage bridge-domains. It is enabled by default.
+ *
+ * @cliexpar
+ * Example of how to enable flooding (where 200 is the bridge-domain-id):
+ * @cliexcmd{set bridge-domain flood 200}
+ * Example of how to disable flooding (where 200 is the bridge-domain-id):
+ * @cliexcmd{set bridge-domain flood 200 disable}
+?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (bd_flood_cli, static) = {
+ .path = "set bridge-domain flood",
+ .short_help = "set bridge-domain flood <bridge-domain-id> [disable]",
+ .function = bd_flood,
+};
+/* *INDENT-ON* */
+
+/**
+ Set bridge-domain unkown-unicast flood enable/disable.
+ The CLI format is:
+ set bridge-domain uu-flood <bd_index> [disable]
+*/
+static clib_error_t *
+bd_uu_flood (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ bd_main_t *bdm = &bd_main;
+ clib_error_t *error = 0;
+ u32 bd_index, bd_id;
+ u32 enable;
+ uword *p;
+
+ if (!unformat (input, "%d", &bd_id))
+ {
+ error = clib_error_return (0, "expecting bridge-domain id but got `%U'",
+ format_unformat_error, input);
+ goto done;
+ }
+
+ p = hash_get (bdm->bd_index_by_bd_id, bd_id);
+
+ if (p == 0)
+ return clib_error_return (0, "No such bridge domain %d", bd_id);
+
+ bd_index = p[0];
+
+ enable = 1;
+ if (unformat (input, "disable"))
+ {
+ enable = 0;
+ }
+
+ /* set the bridge domain flag */
+ if (bd_set_flags (vm, bd_index, L2_UU_FLOOD, enable))
+ {
+ error =
+ clib_error_return (0, "bridge-domain id %d out of range", bd_index);
+ goto done;
+ }
+
+done:
+ return error;
+}
+
+/*?
+ * Layer 2 unknown-unicast flooding can be enabled and disabled on each
+ * bridge-domain. It is enabled by default.
+ *
+ * @cliexpar
+ * Example of how to enable unknown-unicast flooding (where 200 is the
+ * bridge-domain-id):
+ * @cliexcmd{set bridge-domain uu-flood 200}
+ * Example of how to disable unknown-unicast flooding (where 200 is the bridge-domain-id):
+ * @cliexcmd{set bridge-domain uu-flood 200 disable}
+?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (bd_uu_flood_cli, static) = {
+ .path = "set bridge-domain uu-flood",
+ .short_help = "set bridge-domain uu-flood <bridge-domain-id> [disable]",
+ .function = bd_uu_flood,
+};
+/* *INDENT-ON* */
+
+/**
+ Set bridge-domain arp term enable/disable.
+ The CLI format is:
+ set bridge-domain arp term <bridge-domain-id> [disable]
+*/
+static clib_error_t *
+bd_arp_term (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ bd_main_t *bdm = &bd_main;
+ clib_error_t *error = 0;
+ u32 bd_index, bd_id;
+ u32 enable;
+ uword *p;
+
+ if (!unformat (input, "%d", &bd_id))
+ {
+ error = clib_error_return (0, "expecting bridge-domain id but got `%U'",
+ format_unformat_error, input);
+ goto done;
+ }
+
+ p = hash_get (bdm->bd_index_by_bd_id, bd_id);
+ if (p)
+ bd_index = *p;
+ else
+ return clib_error_return (0, "No such bridge domain %d", bd_id);
+
+ enable = 1;
+ if (unformat (input, "disable"))
+ enable = 0;
+
+ /* set the bridge domain flag */
+ if (bd_set_flags (vm, bd_index, L2_ARP_TERM, enable))
+ {
+ error =
+ clib_error_return (0, "bridge-domain id %d out of range", bd_index);
+ goto done;
+ }
+
+done:
+ return error;
+}
+
+static clib_error_t *
+bd_mac_age (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ bd_main_t *bdm = &bd_main;
+ clib_error_t *error = 0;
+ u32 bd_index, bd_id;
+ u32 age;
+ uword *p;
+
+ if (!unformat (input, "%d", &bd_id))
+ {
+ error = clib_error_return (0, "expecting bridge-domain id but got `%U'",
+ format_unformat_error, input);
+ goto done;
+ }
+
+ p = hash_get (bdm->bd_index_by_bd_id, bd_id);
+
+ if (p == 0)
+ return clib_error_return (0, "No such bridge domain %d", bd_id);
+
+ bd_index = p[0];
+
+ if (!unformat (input, "%u", &age))
+ {
+ error =
+ clib_error_return (0, "expecting ageing time in minutes but got `%U'",
+ format_unformat_error, input);
+ goto done;
+ }
+
+ /* set the bridge domain flag */
+ if (age > 255)
+ {
+ error =
+ clib_error_return (0, "mac aging time cannot be bigger than 255");
+ goto done;
+ }
+ bd_set_mac_age (vm, bd_index, (u8) age);
+
+done:
+ return error;
+}
+
+/*?
+ * Layer 2 mac aging can be enabled and disabled on each
+ * bridge-domain. Use this command to set or disable mac aging
+ * on specific bridge-domains. It is disabled by default.
+ *
+ * @cliexpar
+ * Example of how to set mac aging (where 200 is the bridge-domain-id and
+ * 5 is aging time in minutes):
+ * @cliexcmd{set bridge-domain mac-age 200 5}
+ * Example of how to disable mac aging (where 200 is the bridge-domain-id):
+ * @cliexcmd{set bridge-domain flood 200 0}
+?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (bd_mac_age_cli, static) = {
+ .path = "set bridge-domain mac-age",
+ .short_help = "set bridge-domain mac-age <bridge-domain-id> <mins>",
+ .function = bd_mac_age,
+};
+/* *INDENT-ON* */
+
+/*?
+ * Modify whether or not an existing bridge-domain should terminate and respond
+ * to ARP Requests. ARP Termination is disabled by default.
+ *
+ * @cliexpar
+ * Example of how to enable ARP termination (where 200 is the bridge-domain-id):
+ * @cliexcmd{set bridge-domain arp term 200}
+ * Example of how to disable ARP termination (where 200 is the bridge-domain-id):
+ * @cliexcmd{set bridge-domain arp term 200 disable}
+?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (bd_arp_term_cli, static) = {
+ .path = "set bridge-domain arp term",
+ .short_help = "set bridge-domain arp term <bridge-domain-id> [disable]",
+ .function = bd_arp_term,
+};
+/* *INDENT-ON* */
+
+
+/**
+ * Add/delete IP address to MAC address mapping.
+ *
+ * The clib hash implementation stores uword entries in the hash table.
+ * The hash table mac_by_ip4 is keyed via IP4 address and store the
+ * 6-byte MAC address directly in the hash table entry uword.
+ *
+ * @warning This only works for 64-bit processor with 8-byte uword;
+ * which means this code *WILL NOT WORK* for a 32-bit prcessor with
+ * 4-byte uword.
+ */
+u32
+bd_add_del_ip_mac (u32 bd_index,
+ u8 * ip_addr, u8 * mac_addr, u8 is_ip6, u8 is_add)
+{
+ l2input_main_t *l2im = &l2input_main;
+ l2_bridge_domain_t *bd_cfg = l2input_bd_config_from_index (l2im, bd_index);
+ u64 new_mac = *(u64 *) mac_addr;
+ u64 *old_mac;
+ u16 *mac16 = (u16 *) & new_mac;
+
+ ASSERT (sizeof (uword) == sizeof (u64)); /* make sure uword is 8 bytes */
+
+ mac16[3] = 0; /* Clear last 2 unsed bytes of the 8-byte MAC address */
+ if (is_ip6)
+ {
+ ip6_address_t *ip6_addr_key;
+ hash_pair_t *hp;
+ old_mac = (u64 *) hash_get_mem (bd_cfg->mac_by_ip6, ip_addr);
+ if (is_add)
+ {
+ if (old_mac == 0)
+ { /* new entry - allocate and craete ip6 address key */
+ ip6_addr_key = clib_mem_alloc (sizeof (ip6_address_t));
+ clib_memcpy (ip6_addr_key, ip_addr, sizeof (ip6_address_t));
+ }
+ else if (*old_mac == new_mac)
+ { /* same mac entry already exist for ip6 address */
+ return 0;
+ }
+ else
+ { /* updat mac for ip6 address */
+ hp = hash_get_pair (bd_cfg->mac_by_ip6, ip_addr);
+ ip6_addr_key = (ip6_address_t *) hp->key;
+ }
+ hash_set_mem (bd_cfg->mac_by_ip6, ip6_addr_key, new_mac);
+ }
+ else
+ {
+ if (old_mac && (*old_mac == new_mac))
+ {
+ hp = hash_get_pair (bd_cfg->mac_by_ip6, ip_addr);
+ ip6_addr_key = (ip6_address_t *) hp->key;
+ hash_unset_mem (bd_cfg->mac_by_ip6, ip_addr);
+ clib_mem_free (ip6_addr_key);
+ }
+ else
+ return 1;
+ }
+ }
+ else
+ {
+ ip4_address_t ip4_addr = *(ip4_address_t *) ip_addr;
+ old_mac = (u64 *) hash_get (bd_cfg->mac_by_ip4, ip4_addr.as_u32);
+ if (is_add)
+ {
+ if (old_mac && (*old_mac == new_mac))
+ return 0; /* mac entry already exist */
+ hash_set (bd_cfg->mac_by_ip4, ip4_addr.as_u32, new_mac);
+ }
+ else
+ {
+ if (old_mac && (*old_mac == new_mac))
+ hash_unset (bd_cfg->mac_by_ip4, ip4_addr.as_u32);
+ else
+ return 1;
+ }
+ }
+ return 0;
+}
+
+/**
+ Set bridge-domain arp entry add/delete.
+ The CLI format is:
+ set bridge-domain arp entry <bridge-domain-id> <ip-addr> <mac-addr> [del]
+*/
+static clib_error_t *
+bd_arp_entry (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ bd_main_t *bdm = &bd_main;
+ clib_error_t *error = 0;
+ u32 bd_index, bd_id;
+ u8 is_add = 1;
+ u8 is_ip6 = 0;
+ u8 ip_addr[16];
+ u8 mac_addr[6];
+ uword *p;
+
+ if (!unformat (input, "%d", &bd_id))
+ {
+ error = clib_error_return (0, "expecting bridge-domain id but got `%U'",
+ format_unformat_error, input);
+ goto done;
+ }
+
+ p = hash_get (bdm->bd_index_by_bd_id, bd_id);
+
+ if (p)
+ bd_index = *p;
+ else
+ return clib_error_return (0, "No such bridge domain %d", bd_id);
+
+ if (unformat (input, "%U", unformat_ip4_address, ip_addr))
+ {
+ is_ip6 = 0;
+ }
+ else if (unformat (input, "%U", unformat_ip6_address, ip_addr))
+ {
+ is_ip6 = 1;
+ }
+ else
+ {
+ error = clib_error_return (0, "expecting IP address but got `%U'",
+ format_unformat_error, input);
+ goto done;
+ }
+
+ if (!unformat (input, "%U", unformat_ethernet_address, mac_addr))
+ {
+ error = clib_error_return (0, "expecting MAC address but got `%U'",
+ format_unformat_error, input);
+ goto done;
+ }
+
+ if (unformat (input, "del"))
+ {
+ is_add = 0;
+ }
+
+ /* set the bridge domain flagAdd IP-MAC entry into bridge domain */
+ if (bd_add_del_ip_mac (bd_index, ip_addr, mac_addr, is_ip6, is_add))
+ {
+ error = clib_error_return (0, "MAC %s for IP %U and MAC %U failed",
+ is_add ? "add" : "del",
+ is_ip6 ?
+ format_ip4_address : format_ip6_address,
+ ip_addr, format_ethernet_address, mac_addr);
+ }
+
+done:
+ return error;
+}
+
+/*?
+ * Add an ARP entry to an existing bridge-domain.
+ *
+ * @cliexpar
+ * Example of how to add an ARP entry (where 200 is the bridge-domain-id):
+ * @cliexcmd{set bridge-domain arp entry 200 192.168.72.45 52:54:00:3b:83:1a}
+ * Example of how to delete an ARP entry (where 200 is the bridge-domain-id):
+ * @cliexcmd{set bridge-domain arp entry 200 192.168.72.45 52:54:00:3b:83:1a del}
+?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (bd_arp_entry_cli, static) = {
+ .path = "set bridge-domain arp entry",
+ .short_help = "set bridge-domain arp entry <bridge-domain-id> <ip-addr> <mac-addr> [del]",
+ .function = bd_arp_entry,
+};
+/* *INDENT-ON* */
+
+u8 *
+format_vtr (u8 * s, va_list * args)
+{
+ u32 vtr_op = va_arg (*args, u32);
+ u32 dot1q = va_arg (*args, u32);
+ u32 tag1 = va_arg (*args, u32);
+ u32 tag2 = va_arg (*args, u32);
+ switch (vtr_op)
+ {
+ case L2_VTR_DISABLED:
+ return format (s, "none");
+ case L2_VTR_PUSH_1:
+ return format (s, "push-1 %s %d", dot1q ? "dot1q" : "dot1ad", tag1);
+ case L2_VTR_PUSH_2:
+ return format (s, "push-2 %s %d %d", dot1q ? "dot1q" : "dot1ad", tag1,
+ tag2);
+ case L2_VTR_POP_1:
+ return format (s, "pop-1");
+ case L2_VTR_POP_2:
+ return format (s, "pop-2");
+ case L2_VTR_TRANSLATE_1_1:
+ return format (s, "trans-1-1 %s %d", dot1q ? "dot1q" : "dot1ad", tag1);
+ case L2_VTR_TRANSLATE_1_2:
+ return format (s, "trans-1-2 %s %d %d", dot1q ? "dot1q" : "dot1ad",
+ tag1, tag2);
+ case L2_VTR_TRANSLATE_2_1:
+ return format (s, "trans-2-1 %s %d", dot1q ? "dot1q" : "dot1ad", tag1);
+ case L2_VTR_TRANSLATE_2_2:
+ return format (s, "trans-2-2 %s %d %d", dot1q ? "dot1q" : "dot1ad",
+ tag1, tag2);
+ default:
+ return format (s, "none");
+ }
+}
+
+/**
+ Show bridge-domain state.
+ The CLI format is:
+ show bridge-domain [<bd_index>]
+*/
+static clib_error_t *
+bd_show (vlib_main_t * vm, unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ vnet_main_t *vnm = vnet_get_main ();
+ bd_main_t *bdm = &bd_main;
+ clib_error_t *error = 0;
+ u32 bd_index = ~0;
+ l2_bridge_domain_t *bd_config;
+ u32 start, end;
+ u32 printed;
+ u32 detail = 0;
+ u32 intf = 0;
+ u32 arp = 0;
+ u32 bd_id = ~0;
+ uword *p;
+
+ start = 0;
+ end = vec_len (l2input_main.bd_configs);
+
+ if (unformat (input, "%d", &bd_id))
+ {
+ if (unformat (input, "detail"))
+ detail = 1;
+ else if (unformat (input, "det"))
+ detail = 1;
+ if (unformat (input, "int"))
+ intf = 1;
+ if (unformat (input, "arp"))
+ arp = 1;
+
+ p = hash_get (bdm->bd_index_by_bd_id, bd_id);
+ if (p)
+ bd_index = *p;
+ else
+ return clib_error_return (0, "No such bridge domain %d", bd_id);
+
+ vec_validate (l2input_main.bd_configs, bd_index);
+ bd_config = vec_elt_at_index (l2input_main.bd_configs, bd_index);
+ if (bd_is_valid (bd_config))
+ {
+ start = bd_index;
+ end = start + 1;
+ }
+ else
+ {
+ vlib_cli_output (vm, "bridge-domain %d not in use", bd_id);
+ goto done;
+ }
+ }
+
+ /* Show all bridge-domains that have been initialized */
+ printed = 0;
+ for (bd_index = start; bd_index < end; bd_index++)
+ {
+ bd_config = vec_elt_at_index (l2input_main.bd_configs, bd_index);
+ if (bd_is_valid (bd_config))
+ {
+ if (!printed)
+ {
+ printed = 1;
+ vlib_cli_output (vm,
+ "%=5s %=7s %=10s %=10s %=10s %=10s %=10s %=14s",
+ "ID", "Index", "Learning", "U-Forwrd",
+ "UU-Flood", "Flooding", "ARP-Term",
+ "BVI-Intf");
+ }
+
+ vlib_cli_output (vm,
+ "%=5d %=7d %=10s %=10s %=10s %=10s %=10s %=14U",
+ bd_config->bd_id, bd_index,
+ bd_config->feature_bitmap & L2INPUT_FEAT_LEARN ?
+ "on" : "off",
+ bd_config->feature_bitmap & L2INPUT_FEAT_FWD ? "on"
+ : "off",
+ bd_config->feature_bitmap & L2INPUT_FEAT_UU_FLOOD ?
+ "on" : "off",
+ bd_config->feature_bitmap & L2INPUT_FEAT_FLOOD ?
+ "on" : "off",
+ bd_config->feature_bitmap & L2INPUT_FEAT_ARP_TERM ?
+ "on" : "off", format_vnet_sw_if_index_name_with_NA,
+ vnm, bd_config->bvi_sw_if_index);
+
+ if (detail || intf)
+ {
+ /* Show all member interfaces */
+ int i;
+ vec_foreach_index (i, bd_config->members)
+ {
+ l2_flood_member_t *member =
+ vec_elt_at_index (bd_config->members, i);
+ u32 vtr_opr, dot1q, tag1, tag2;
+ if (i == 0)
+ {
+ vlib_cli_output (vm, "\n%=30s%=7s%=5s%=5s%=9s%=30s",
+ "Interface", "Index", "SHG", "BVI",
+ "TxFlood", "VLAN-Tag-Rewrite");
+ }
+ l2vtr_get (vm, vnm, member->sw_if_index, &vtr_opr, &dot1q,
+ &tag1, &tag2);
+ vlib_cli_output (vm, "%=30U%=7d%=5d%=5s%=9s%=30U",
+ format_vnet_sw_if_index_name, vnm,
+ member->sw_if_index, member->sw_if_index,
+ member->shg,
+ member->flags & L2_FLOOD_MEMBER_BVI ? "*" :
+ "-", i < bd_config->flood_count ? "*" : "-",
+ format_vtr, vtr_opr, dot1q, tag1, tag2);
+ }
+ }
+
+ if ((detail || arp) &&
+ (bd_config->feature_bitmap & L2INPUT_FEAT_ARP_TERM))
+ {
+ u32 ip4_addr;
+ ip6_address_t *ip6_addr;
+ u64 mac_addr;
+ vlib_cli_output (vm,
+ "\n IP4/IP6 to MAC table for ARP Termination");
+
+ /* *INDENT-OFF* */
+ hash_foreach (ip4_addr, mac_addr, bd_config->mac_by_ip4,
+ ({
+ vlib_cli_output (vm, "%=40U => %=20U",
+ format_ip4_address, &ip4_addr,
+ format_ethernet_address, &mac_addr);
+ }));
+
+ hash_foreach_mem (ip6_addr, mac_addr, bd_config->mac_by_ip6,
+ ({
+ vlib_cli_output (vm, "%=40U => %=20U",
+ format_ip6_address, ip6_addr,
+ format_ethernet_address, &mac_addr);
+ }));
+ /* *INDENT-ON* */
+ }
+ }
+ }
+
+ if (!printed)
+ {
+ vlib_cli_output (vm, "no bridge-domains in use");
+ }
+
+done:
+ return error;
+}
+
+/*?
+ * Show a summary of all the bridge-domain instances or detailed view of a
+ * single bridge-domain. Bridge-domains are created by adding an interface
+ * to a bridge using the '<em>set interface l2 bridge</em>' command.
+ *
+ * @cliexpar
+ * @parblock
+ * Example of displaying all bridge-domains:
+ * @cliexstart{show bridge-domain}
+ * ID Index Learning U-Forwrd UU-Flood Flooding ARP-Term BVI-Intf
+ * 0 0 off off off off off local0
+ * 200 1 on on on on off N/A
+ * @cliexend
+ *
+ * Example of displaying details of a single bridge-domains:
+ * @cliexstart{show bridge-domain 200 detail}
+ * ID Index Learning U-Forwrd UU-Flood Flooding ARP-Term BVI-Intf
+ * 200 1 on on on on off N/A
+ *
+ * Interface Index SHG BVI VLAN-Tag-Rewrite
+ * GigabitEthernet0/8/0.200 3 0 - none
+ * GigabitEthernet0/9/0.200 4 0 - none
+ * @cliexend
+ * @endparblock
+?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (bd_show_cli, static) = {
+ .path = "show bridge-domain",
+ .short_help = "show bridge-domain [bridge-domain-id [detail|int|arp]]",
+ .function = bd_show,
+};
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/l2/l2_bd.h b/src/vnet/l2/l2_bd.h
new file mode 100644
index 00000000000..4bb9bc9b24c
--- /dev/null
+++ b/src/vnet/l2/l2_bd.h
@@ -0,0 +1,150 @@
+/*
+ * l2_bd.h : layer 2 bridge domain
+ *
+ * Copyright (c) 2013 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef included_l2bd_h
+#define included_l2bd_h
+
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+
+typedef struct
+{
+ /* hash bd_id -> bd_index */
+ uword *bd_index_by_bd_id;
+
+ /* Busy bd_index bitmap */
+ uword *bd_index_bitmap;
+
+ /* convenience */
+ vlib_main_t *vlib_main;
+ vnet_main_t *vnet_main;
+} bd_main_t;
+
+bd_main_t bd_main;
+
+/* Bridge domain member */
+
+#define L2_FLOOD_MEMBER_NORMAL 0
+#define L2_FLOOD_MEMBER_BVI 1
+
+typedef struct
+{
+ u32 sw_if_index; /* the output L2 interface */
+ u8 flags; /* 0=normal, 1=bvi */
+ u8 shg; /* split horizon group number */
+ u16 spare;
+} l2_flood_member_t;
+
+
+/* Per-bridge domain configuration */
+
+typedef struct
+{
+ u32 feature_bitmap;
+ /*
+ * Contains bit enables for flooding, learning, and forwarding.
+ * All other feature bits should always be set.
+ *
+ * identity of the bridge-domain's BVI interface
+ * set to ~0 if there is no BVI
+ */
+ u32 bvi_sw_if_index;
+
+ /* bridge domain id, not to be confused with bd_index */
+ u32 bd_id;
+
+ /* Vector of member ports */
+ l2_flood_member_t *members;
+
+ /* First flood_count member ports are flooded */
+ u32 flood_count;
+
+ /* Tunnel Master (Multicast vxlan) are always flooded */
+ u32 tun_master_count;
+
+ /* Tunnels (Unicast vxlan) are flooded if there are no masters */
+ u32 tun_normal_count;
+
+ /* hash ip4/ip6 -> mac for arp/nd termination */
+ uword *mac_by_ip4;
+ uword *mac_by_ip6;
+
+ /* mac aging */
+ u8 mac_age;
+
+} l2_bridge_domain_t;
+
+/* Return 1 if bridge domain has been initialized */
+always_inline u32
+bd_is_valid (l2_bridge_domain_t * bd_config)
+{
+ return (bd_config->feature_bitmap != 0);
+}
+
+/* Init bridge domain if not done already */
+void bd_validate (l2_bridge_domain_t * bd_config);
+
+
+void
+bd_add_member (l2_bridge_domain_t * bd_config, l2_flood_member_t * member);
+
+u32 bd_remove_member (l2_bridge_domain_t * bd_config, u32 sw_if_index);
+
+
+#define L2_LEARN (1<<0)
+#define L2_FWD (1<<1)
+#define L2_FLOOD (1<<2)
+#define L2_UU_FLOOD (1<<3)
+#define L2_ARP_TERM (1<<4)
+
+u32 bd_set_flags (vlib_main_t * vm, u32 bd_index, u32 flags, u32 enable);
+void bd_set_mac_age (vlib_main_t * vm, u32 bd_index, u8 age);
+
+/**
+ * \brief Get or create a bridge domain.
+ *
+ * Get or create a bridge domain with the given bridge domain ID.
+ *
+ * \param bdm bd_main pointer.
+ * \param bd_id The bridge domain ID or ~0 if an arbitrary unused bridge domain should be used.
+ * \return The bridge domain index in \c l2input_main->l2_bridge_domain_t vector.
+ */
+u32 bd_find_or_add_bd_index (bd_main_t * bdm, u32 bd_id);
+
+/**
+ * \brief Delete a bridge domain.
+ *
+ * Delete an existing bridge domain with the given bridge domain ID.
+ *
+ * \param bdm bd_main pointer.
+ * \param bd_id The bridge domain ID.
+ * \return 0 on success and -1 if the bridge domain does not exist.
+ */
+int bd_delete_bd_index (bd_main_t * bdm, u32 bd_id);
+
+u32 bd_add_del_ip_mac (u32 bd_index,
+ u8 * ip_addr, u8 * mac_addr, u8 is_ip6, u8 is_add);
+
+#endif
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/l2/l2_bvi.c b/src/vnet/l2/l2_bvi.c
new file mode 100644
index 00000000000..f239743a9c4
--- /dev/null
+++ b/src/vnet/l2/l2_bvi.c
@@ -0,0 +1,40 @@
+/*
+ * l2_bvi.c : layer 2 Bridged Virtual Interface
+ *
+ * Copyright (c) 2013 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+#include <vnet/l2/l2_fwd.h>
+#include <vnet/l2/l2_flood.h>
+#include <vnet/l2/l2_bvi.h>
+
+
+/* Call the L2 nodes that need the ethertype mapping */
+void
+l2bvi_register_input_type (vlib_main_t * vm,
+ ethernet_type_t type, u32 node_index)
+{
+ l2fwd_register_input_type (vm, type, node_index);
+ l2flood_register_input_type (vm, type, node_index);
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/l2/l2_bvi.h b/src/vnet/l2/l2_bvi.h
new file mode 100644
index 00000000000..dd1130a6c29
--- /dev/null
+++ b/src/vnet/l2/l2_bvi.h
@@ -0,0 +1,117 @@
+/*
+ * l2_bvi.h : layer 2 Bridged Virtual Interface
+ *
+ * Copyright (c) 2013 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef included_l2bvi_h
+#define included_l2bvi_h
+
+#include <vlib/vlib.h>
+#include <vnet/ethernet/ethernet.h>
+#include <vppinfra/sparse_vec.h>
+
+#include <vnet/l2/l2_input.h>
+
+#define TO_BVI_ERR_OK 0
+#define TO_BVI_ERR_BAD_MAC 1
+#define TO_BVI_ERR_ETHERTYPE 2
+
+/**
+ * Send a packet from L2 processing to L3 via the BVI interface.
+ * Set next0 to the proper L3 input node.
+ * Return an error if the packet isn't what we expect.
+ */
+
+static_always_inline u32
+l2_to_bvi (vlib_main_t * vlib_main,
+ vnet_main_t * vnet_main,
+ vlib_buffer_t * b0,
+ u32 bvi_sw_if_index, next_by_ethertype_t * l3_next, u32 * next0)
+{
+ u8 l2_len;
+ u16 ethertype;
+ u8 *l3h;
+ ethernet_header_t *e0;
+ vnet_hw_interface_t *hi;
+
+ e0 = vlib_buffer_get_current (b0);
+ hi = vnet_get_sup_hw_interface (vnet_main, bvi_sw_if_index);
+
+ /* Perform L3 my-mac filter */
+ if ((!ethernet_address_cast (e0->dst_address)) &&
+ (!eth_mac_equal ((u8 *) e0, hi->hw_address)))
+ {
+ return TO_BVI_ERR_BAD_MAC;
+ }
+
+ /* Save L2 header position which may be changed due to packet replication */
+ vnet_buffer (b0)->ethernet.start_of_ethernet_header = b0->current_data;
+
+ /* Strip L2 header */
+ l2_len = vnet_buffer (b0)->l2.l2_len;
+ vlib_buffer_advance (b0, l2_len);
+
+ l3h = vlib_buffer_get_current (b0);
+ ethertype = clib_net_to_host_u16 (*(u16 *) (l3h - 2));
+
+ /* Set the input interface to be the BVI interface */
+ vnet_buffer (b0)->sw_if_index[VLIB_RX] = bvi_sw_if_index;
+ vnet_buffer (b0)->sw_if_index[VLIB_TX] = ~0;
+
+ /* Go to appropriate L3 input node */
+ if (ethertype == ETHERNET_TYPE_IP4)
+ {
+ *next0 = l3_next->input_next_ip4;
+ }
+ else if (ethertype == ETHERNET_TYPE_IP6)
+ {
+ *next0 = l3_next->input_next_ip6;
+ }
+ else
+ {
+ /* uncommon ethertype, check table */
+ u32 i0;
+
+ i0 = sparse_vec_index (l3_next->input_next_by_type, ethertype);
+ *next0 = vec_elt (l3_next->input_next_by_type, i0);
+
+ if (i0 == SPARSE_VEC_INVALID_INDEX)
+ {
+ return TO_BVI_ERR_ETHERTYPE;
+ }
+ }
+
+ /* increment BVI RX interface stat */
+ vlib_increment_combined_counter
+ (vnet_main->interface_main.combined_sw_if_counters
+ + VNET_INTERFACE_COUNTER_RX,
+ vlib_main->cpu_index,
+ vnet_buffer (b0)->sw_if_index[VLIB_RX],
+ 1, vlib_buffer_length_in_chain (vlib_main, b0));
+ return TO_BVI_ERR_OK;
+}
+
+void
+l2bvi_register_input_type (vlib_main_t * vm,
+ ethernet_type_t type, u32 node_index);
+#endif
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/l2/l2_classify.h b/src/vnet/l2/l2_classify.h
new file mode 100644
index 00000000000..184187ff879
--- /dev/null
+++ b/src/vnet/l2/l2_classify.h
@@ -0,0 +1,116 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __included_vnet_l2_input_classify_h__
+#define __included_vnet_l2_input_classify_h__
+
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+#include <vnet/pg/pg.h>
+#include <vnet/ethernet/ethernet.h>
+#include <vnet/ethernet/packet.h>
+#include <vnet/ip/ip_packet.h>
+#include <vnet/ip/ip4_packet.h>
+#include <vnet/ip/ip6_packet.h>
+#include <vlib/cli.h>
+#include <vnet/l2/l2_input.h>
+#include <vnet/l2/l2_output.h>
+#include <vnet/l2/feat_bitmap.h>
+#include <vppinfra/error.h>
+#include <vppinfra/hash.h>
+#include <vppinfra/cache.h>
+
+#include <vnet/classify/vnet_classify.h>
+
+typedef enum
+{
+ L2_INPUT_CLASSIFY_NEXT_DROP,
+ L2_INPUT_CLASSIFY_NEXT_ETHERNET_INPUT,
+ L2_INPUT_CLASSIFY_NEXT_IP4_INPUT,
+ L2_INPUT_CLASSIFY_NEXT_IP6_INPUT,
+ L2_INPUT_CLASSIFY_NEXT_LI,
+ L2_INPUT_CLASSIFY_N_NEXT,
+} l2_input_classify_next_t;
+
+typedef enum
+{
+ L2_INPUT_CLASSIFY_TABLE_IP4,
+ L2_INPUT_CLASSIFY_TABLE_IP6,
+ L2_INPUT_CLASSIFY_TABLE_OTHER,
+ L2_INPUT_CLASSIFY_N_TABLES,
+} l2_input_classify_table_id_t;
+
+typedef enum
+{
+ L2_OUTPUT_CLASSIFY_NEXT_DROP,
+ L2_OUTPUT_CLASSIFY_N_NEXT,
+} l2_output_classify_next_t;
+
+typedef enum
+{
+ L2_OUTPUT_CLASSIFY_TABLE_IP4,
+ L2_OUTPUT_CLASSIFY_TABLE_IP6,
+ L2_OUTPUT_CLASSIFY_TABLE_OTHER,
+ L2_OUTPUT_CLASSIFY_N_TABLES,
+} l2_output_classify_table_id_t;
+
+typedef struct _l2_classify_main
+{
+ /* Next nodes for each feature */
+ u32 feat_next_node_index[32];
+
+ /* Per-address-family classifier table vectors */
+ u32 *classify_table_index_by_sw_if_index[L2_INPUT_CLASSIFY_N_TABLES];
+
+ /* Next nodes for features and output interfaces */
+ l2_output_next_nodes_st next_nodes;
+
+ /* convenience variables */
+ vlib_main_t *vlib_main;
+ vnet_main_t *vnet_main;
+ vnet_classify_main_t *vnet_classify_main;
+} l2_input_classify_main_t;
+
+typedef struct _l2_classify_main l2_output_classify_main_t;
+
+extern l2_input_classify_main_t l2_input_classify_main;
+extern vlib_node_registration_t l2_input_classify_node;
+
+extern l2_output_classify_main_t l2_output_classify_main;
+extern vlib_node_registration_t l2_output_classify_node;
+
+void vnet_l2_input_classify_enable_disable (u32 sw_if_index,
+ int enable_disable);
+
+int vnet_l2_input_classify_set_tables (u32 sw_if_index, u32 ip4_table_index,
+ u32 ip6_table_index,
+ u32 other_table_index);
+
+void vnet_l2_output_classify_enable_disable (u32 sw_if_index,
+ int enable_disable);
+
+int vnet_l2_output_classify_set_tables (u32 sw_if_index, u32 ip4_table_index,
+ u32 ip6_table_index,
+ u32 other_table_index);
+
+#endif /* __included_vnet_l2_input_classify_h__ */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/l2/l2_efp_filter.c b/src/vnet/l2/l2_efp_filter.c
new file mode 100644
index 00000000000..2db4dc69c9c
--- /dev/null
+++ b/src/vnet/l2/l2_efp_filter.c
@@ -0,0 +1,614 @@
+/*
+ * l2_efp_filter.c : layer 2 egress EFP Filter processing
+ *
+ * Copyright (c) 2013 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+#include <vnet/ethernet/ethernet.h>
+#include <vnet/ethernet/packet.h>
+#include <vnet/l2/feat_bitmap.h>
+#include <vnet/l2/l2_output.h>
+#include <vnet/ethernet/ethernet.h>
+
+#include <vppinfra/error.h>
+#include <vppinfra/cache.h>
+
+/**
+ * @file
+ * @brief EFP-filter - Ethernet Flow Point Filter.
+ *
+ * It is possible to transmit a packet out a subinterface with VLAN tags
+ * that are not compatible with that subinterface. In other words, if that
+ * packet arrived on the output port, it would not be classified as coming
+ * from the output subinterface. This can happen in various ways: through
+ * misconfiguration, by putting subinterfaces with different VLAN encaps in
+ * the same bridge-domain, etc. The EFP Filter Check detects such packets
+ * and drops them. It consists of two checks, one that verifies the packet
+ * prior to output VLAN tag rewrite and one that verifies the packet after
+ * VLAN tag rewrite.
+ *
+ */
+typedef struct
+{
+
+ /* Next nodes for features and output interfaces */
+ l2_output_next_nodes_st next_nodes;
+
+ /* convenience variables */
+ vlib_main_t *vlib_main;
+ vnet_main_t *vnet_main;
+} l2_efp_filter_main_t;
+
+
+typedef struct
+{
+ /* per-pkt trace data */
+ u8 src[6];
+ u8 dst[6];
+ u8 raw[12]; /* raw data (vlans) */
+ u32 sw_if_index;
+} l2_efp_filter_trace_t;
+
+/* packet trace format function */
+static u8 *
+format_l2_efp_filter_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ l2_efp_filter_trace_t *t = va_arg (*args, l2_efp_filter_trace_t *);
+
+ s = format (s, "l2-output-vtr: sw_if_index %d dst %U src %U data "
+ "%02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x",
+ t->sw_if_index,
+ format_ethernet_address, t->dst,
+ format_ethernet_address, t->src,
+ t->raw[0], t->raw[1], t->raw[2], t->raw[3], t->raw[4],
+ t->raw[5], t->raw[6], t->raw[7], t->raw[8], t->raw[9],
+ t->raw[10], t->raw[11]);
+ return s;
+}
+
+l2_efp_filter_main_t l2_efp_filter_main;
+
+static vlib_node_registration_t l2_efp_filter_node;
+
+#define foreach_l2_efp_filter_error \
+_(L2_EFP_FILTER, "L2 EFP filter packets") \
+_(DROP, "L2 EFP filter post-rewrite drops")
+
+typedef enum
+{
+#define _(sym,str) L2_EFP_FILTER_ERROR_##sym,
+ foreach_l2_efp_filter_error
+#undef _
+ L2_EFP_FILTER_N_ERROR,
+} l2_efp_filter_error_t;
+
+static char *l2_efp_filter_error_strings[] = {
+#define _(sym,string) string,
+ foreach_l2_efp_filter_error
+#undef _
+};
+
+typedef enum
+{
+ L2_EFP_FILTER_NEXT_DROP,
+ L2_EFP_FILTER_N_NEXT,
+} l2_efp_filter_next_t;
+
+
+/**
+ * Extract fields from the packet that will be used in interface
+ * classification.
+ */
+static_always_inline void
+extract_keys (vnet_main_t * vnet_main,
+ u32 sw_if_index0,
+ vlib_buffer_t * b0,
+ u32 * port_sw_if_index0,
+ u16 * first_ethertype0,
+ u16 * outer_id0, u16 * inner_id0, u32 * match_flags0)
+{
+ ethernet_header_t *e0;
+ ethernet_vlan_header_t *h0;
+ u32 tag_len;
+ u32 tag_num;
+
+ *port_sw_if_index0 =
+ vnet_get_sup_sw_interface (vnet_main, sw_if_index0)->sw_if_index;
+
+ e0 = vlib_buffer_get_current (b0);
+ h0 = (ethernet_vlan_header_t *) (e0 + 1);
+
+ *first_ethertype0 = clib_net_to_host_u16 (e0->type);
+ *outer_id0 = clib_net_to_host_u16 (h0[0].priority_cfi_and_id);
+ *inner_id0 = clib_net_to_host_u16 (h0[1].priority_cfi_and_id);
+
+ tag_len = vnet_buffer (b0)->l2.l2_len - sizeof (ethernet_header_t);
+ tag_num = tag_len / sizeof (ethernet_vlan_header_t);
+ *match_flags0 = eth_create_valid_subint_match_flags (tag_num);
+}
+
+/*
+ * EFP filtering is a basic switch feature which prevents an interface from
+ * transmitting a packet that doesn't match the interface's ingress match
+ * criteria. The check has two parts, one performed before egress vlan tag
+ * rewrite and one after.
+ *
+ * The pre-rewrite check insures the packet matches what an ingress packet looks
+ * like after going through the interface's ingress tag rewrite operation. Only
+ * pushed tags are compared. So:
+ * - if the ingress vlan tag rewrite pushes no tags (or is not enabled),
+ * any packet passes the filter
+ * - if the ingress vlan tag rewrite pushes one tag,
+ * the packet must have at least one tag, and the outer tag must match the pushed tag
+ * - if the ingress vlan tag rewrite pushes two tags,
+ * the packet must have at least two tags, and the outer two tags must match the pushed tags
+ *
+ * The pre-rewrite check is performed in the l2-output node.
+ *
+ * The post-rewrite check insures the packet matches what an ingress packet looks
+ * like before going through the interface's ingress tag rewrite operation. It verifies
+ * that such a packet arriving on the wire at this port would be classified as arriving
+ * an input interface equal to the packet's output interface. This can be done by running
+ * the output packet's vlan tags and output port through the interface classification,
+ * and checking if the resulting interface matches the output interface.
+ *
+ * The post-rewrite check is performed here.
+ */
+
+static uword
+l2_efp_filter_node_fn (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ u32 n_left_from, *from, *to_next;
+ l2_efp_filter_next_t next_index;
+ l2_efp_filter_main_t *msm = &l2_efp_filter_main;
+ vlib_node_t *n = vlib_get_node (vm, l2_efp_filter_node.index);
+ u32 node_counter_base_index = n->error_heap_index;
+ vlib_error_main_t *em = &vm->error_main;
+ u32 cached_sw_if_index = ~0;
+ u32 cached_next_index = ~0;
+
+ /* invalidate cache to begin with */
+ cached_sw_if_index = ~0;
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors; /* number of packets to process */
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ /* get space to enqueue frame to graph node "next_index" */
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from >= 6 && n_left_to_next >= 2)
+ {
+ u32 bi0, bi1;
+ vlib_buffer_t *b0, *b1;
+ u32 next0, next1;
+ u32 sw_if_index0, sw_if_index1;
+ u32 feature_bitmap0, feature_bitmap1;
+ u16 first_ethertype0, first_ethertype1;
+ u16 outer_id0, inner_id0, outer_id1, inner_id1;
+ u32 match_flags0, match_flags1;
+ u32 port_sw_if_index0, subint_sw_if_index0, port_sw_if_index1,
+ subint_sw_if_index1;
+ vnet_hw_interface_t *hi0, *hi1;
+ main_intf_t *main_intf0, *main_intf1;
+ vlan_intf_t *vlan_intf0, *vlan_intf1;
+ qinq_intf_t *qinq_intf0, *qinq_intf1;
+ u32 is_l20, is_l21;
+ __attribute__ ((unused)) u32 matched0, matched1;
+ u8 error0, error1;
+
+ /* Prefetch next iteration. */
+ {
+ vlib_buffer_t *p2, *p3, *p4, *p5;
+ __attribute__ ((unused)) u32 sw_if_index2, sw_if_index3;
+
+ p2 = vlib_get_buffer (vm, from[2]);
+ p3 = vlib_get_buffer (vm, from[3]);
+ p4 = vlib_get_buffer (vm, from[4]);
+ p5 = vlib_get_buffer (vm, from[5]);
+
+ /* Prefetch the buffer header and packet for the N+2 loop iteration */
+ vlib_prefetch_buffer_header (p4, LOAD);
+ vlib_prefetch_buffer_header (p5, LOAD);
+
+ CLIB_PREFETCH (p4->data, CLIB_CACHE_LINE_BYTES, STORE);
+ CLIB_PREFETCH (p5->data, CLIB_CACHE_LINE_BYTES, STORE);
+
+ /*
+ * Prefetch the input config for the N+1 loop iteration
+ * This depends on the buffer header above
+ */
+ sw_if_index2 = vnet_buffer (p2)->sw_if_index[VLIB_TX];
+ sw_if_index3 = vnet_buffer (p3)->sw_if_index[VLIB_TX];
+ /*
+ * $$$ TODO
+ * CLIB_PREFETCH (vec_elt_at_index(l2output_main.configs, sw_if_index2), CLIB_CACHE_LINE_BYTES, LOAD);
+ * CLIB_PREFETCH (vec_elt_at_index(l2output_main.configs, sw_if_index3), CLIB_CACHE_LINE_BYTES, LOAD);
+ */
+ }
+
+ /* speculatively enqueue b0 and b1 to the current next frame */
+ /* bi is "buffer index", b is pointer to the buffer */
+ to_next[0] = bi0 = from[0];
+ to_next[1] = bi1 = from[1];
+ from += 2;
+ to_next += 2;
+ n_left_from -= 2;
+ n_left_to_next -= 2;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ b1 = vlib_get_buffer (vm, bi1);
+
+ /* TX interface handles */
+ sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_TX];
+ sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_TX];
+
+ /* process 2 packets */
+ em->counters[node_counter_base_index +
+ L2_EFP_FILTER_ERROR_L2_EFP_FILTER] += 2;
+
+ /* Remove ourself from the feature bitmap */
+ feature_bitmap0 =
+ vnet_buffer (b0)->l2.feature_bitmap & ~L2OUTPUT_FEAT_EFP_FILTER;
+ feature_bitmap1 =
+ vnet_buffer (b1)->l2.feature_bitmap & ~L2OUTPUT_FEAT_EFP_FILTER;
+
+ /* Determine next node */
+ l2_output_dispatch (msm->vlib_main,
+ msm->vnet_main,
+ node,
+ l2_efp_filter_node.index,
+ &cached_sw_if_index,
+ &cached_next_index,
+ &msm->next_nodes,
+ b0, sw_if_index0, feature_bitmap0, &next0);
+ l2_output_dispatch (msm->vlib_main,
+ msm->vnet_main,
+ node,
+ l2_efp_filter_node.index,
+ &cached_sw_if_index,
+ &cached_next_index,
+ &msm->next_nodes,
+ b1, sw_if_index1, feature_bitmap1, &next1);
+
+ /* perform the efp filter check on two packets */
+
+ extract_keys (msm->vnet_main,
+ sw_if_index0,
+ b0,
+ &port_sw_if_index0,
+ &first_ethertype0,
+ &outer_id0, &inner_id0, &match_flags0);
+
+ extract_keys (msm->vnet_main,
+ sw_if_index1,
+ b1,
+ &port_sw_if_index1,
+ &first_ethertype1,
+ &outer_id1, &inner_id1, &match_flags1);
+
+ eth_vlan_table_lookups (&ethernet_main,
+ msm->vnet_main,
+ port_sw_if_index0,
+ first_ethertype0,
+ outer_id0,
+ inner_id0,
+ &hi0,
+ &main_intf0, &vlan_intf0, &qinq_intf0);
+
+ eth_vlan_table_lookups (&ethernet_main,
+ msm->vnet_main,
+ port_sw_if_index1,
+ first_ethertype1,
+ outer_id1,
+ inner_id1,
+ &hi1,
+ &main_intf1, &vlan_intf1, &qinq_intf1);
+
+ matched0 = eth_identify_subint (hi0,
+ b0,
+ match_flags0,
+ main_intf0,
+ vlan_intf0,
+ qinq_intf0,
+ &subint_sw_if_index0,
+ &error0, &is_l20);
+
+ matched1 = eth_identify_subint (hi1,
+ b1,
+ match_flags1,
+ main_intf1,
+ vlan_intf1,
+ qinq_intf1,
+ &subint_sw_if_index1,
+ &error1, &is_l21);
+
+ if (PREDICT_FALSE (sw_if_index0 != subint_sw_if_index0))
+ {
+ /* Drop packet */
+ next0 = L2_EFP_FILTER_NEXT_DROP;
+ b0->error = node->errors[L2_EFP_FILTER_ERROR_DROP];
+ }
+
+ if (PREDICT_FALSE (sw_if_index1 != subint_sw_if_index1))
+ {
+ /* Drop packet */
+ next1 = L2_EFP_FILTER_NEXT_DROP;
+ b1->error = node->errors[L2_EFP_FILTER_ERROR_DROP];
+ }
+
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)))
+ {
+ if (b0->flags & VLIB_BUFFER_IS_TRACED)
+ {
+ ethernet_header_t *h0 = vlib_buffer_get_current (b0);
+ l2_efp_filter_trace_t *t =
+ vlib_add_trace (vm, node, b0, sizeof (*t));
+ t->sw_if_index = sw_if_index0;
+ clib_memcpy (t->src, h0->src_address, 6);
+ clib_memcpy (t->dst, h0->dst_address, 6);
+ clib_memcpy (t->raw, &h0->type, sizeof (t->raw));
+ }
+ if (b1->flags & VLIB_BUFFER_IS_TRACED)
+ {
+ ethernet_header_t *h1 = vlib_buffer_get_current (b1);
+ l2_efp_filter_trace_t *t =
+ vlib_add_trace (vm, node, b1, sizeof (*t));
+ t->sw_if_index = sw_if_index1;
+ clib_memcpy (t->src, h1->src_address, 6);
+ clib_memcpy (t->dst, h1->dst_address, 6);
+ clib_memcpy (t->raw, &h1->type, sizeof (t->raw));
+ }
+ }
+
+ /* verify speculative enqueues, maybe switch current next frame */
+ /* if next0==next1==next_index then nothing special needs to be done */
+ vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, bi1, next0, next1);
+ }
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 bi0;
+ vlib_buffer_t *b0;
+ u32 next0;
+ u32 sw_if_index0;
+ u32 feature_bitmap0;
+ u16 first_ethertype0;
+ u16 outer_id0, inner_id0;
+ u32 match_flags0;
+ u32 port_sw_if_index0, subint_sw_if_index0;
+ vnet_hw_interface_t *hi0;
+ main_intf_t *main_intf0;
+ vlan_intf_t *vlan_intf0;
+ qinq_intf_t *qinq_intf0;
+ u32 is_l20;
+ __attribute__ ((unused)) u32 matched0;
+ u8 error0;
+
+ /* speculatively enqueue b0 to the current next frame */
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_TX];
+
+ /* process 1 packet */
+ em->counters[node_counter_base_index +
+ L2_EFP_FILTER_ERROR_L2_EFP_FILTER] += 1;
+
+ /* Remove ourself from the feature bitmap */
+ feature_bitmap0 =
+ vnet_buffer (b0)->l2.feature_bitmap & ~L2OUTPUT_FEAT_EFP_FILTER;
+
+ /* Determine next node */
+ l2_output_dispatch (msm->vlib_main,
+ msm->vnet_main,
+ node,
+ l2_efp_filter_node.index,
+ &cached_sw_if_index,
+ &cached_next_index,
+ &msm->next_nodes,
+ b0, sw_if_index0, feature_bitmap0, &next0);
+
+ /* perform the efp filter check on one packet */
+
+ extract_keys (msm->vnet_main,
+ sw_if_index0,
+ b0,
+ &port_sw_if_index0,
+ &first_ethertype0,
+ &outer_id0, &inner_id0, &match_flags0);
+
+ eth_vlan_table_lookups (&ethernet_main,
+ msm->vnet_main,
+ port_sw_if_index0,
+ first_ethertype0,
+ outer_id0,
+ inner_id0,
+ &hi0,
+ &main_intf0, &vlan_intf0, &qinq_intf0);
+
+ matched0 = eth_identify_subint (hi0,
+ b0,
+ match_flags0,
+ main_intf0,
+ vlan_intf0,
+ qinq_intf0,
+ &subint_sw_if_index0,
+ &error0, &is_l20);
+
+ if (PREDICT_FALSE (sw_if_index0 != subint_sw_if_index0))
+ {
+ /* Drop packet */
+ next0 = L2_EFP_FILTER_NEXT_DROP;
+ b0->error = node->errors[L2_EFP_FILTER_ERROR_DROP];
+ }
+
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)
+ && (b0->flags & VLIB_BUFFER_IS_TRACED)))
+ {
+ ethernet_header_t *h0 = vlib_buffer_get_current (b0);
+ l2_efp_filter_trace_t *t =
+ vlib_add_trace (vm, node, b0, sizeof (*t));
+ t->sw_if_index = sw_if_index0;
+ clib_memcpy (t->src, h0->src_address, 6);
+ clib_memcpy (t->dst, h0->dst_address, 6);
+ clib_memcpy (t->raw, &h0->type, sizeof (t->raw));
+ }
+
+ /* verify speculative enqueue, maybe switch current next frame */
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, next0);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ return frame->n_vectors;
+}
+
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (l2_efp_filter_node,static) = {
+ .function = l2_efp_filter_node_fn,
+ .name = "l2-efp-filter",
+ .vector_size = sizeof (u32),
+ .format_trace = format_l2_efp_filter_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_errors = ARRAY_LEN(l2_efp_filter_error_strings),
+ .error_strings = l2_efp_filter_error_strings,
+
+ .n_next_nodes = L2_EFP_FILTER_N_NEXT,
+
+ /* edit / add dispositions here */
+ .next_nodes = {
+ [L2_EFP_FILTER_NEXT_DROP] = "error-drop",
+ },
+};
+/* *INDENT-ON* */
+
+VLIB_NODE_FUNCTION_MULTIARCH (l2_efp_filter_node, l2_efp_filter_node_fn)
+ clib_error_t *l2_efp_filter_init (vlib_main_t * vm)
+{
+ l2_efp_filter_main_t *mp = &l2_efp_filter_main;
+
+ mp->vlib_main = vm;
+ mp->vnet_main = vnet_get_main ();
+
+ /* Initialize the feature next-node indexes */
+ feat_bitmap_init_next_nodes (vm,
+ l2_efp_filter_node.index,
+ L2OUTPUT_N_FEAT,
+ l2output_get_feat_names (),
+ mp->next_nodes.feat_next_node_index);
+
+ /* Initialize the output node mapping table */
+ l2output_init_output_node_vec (&mp->next_nodes.output_node_index_vec);
+
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (l2_efp_filter_init);
+
+
+/** Enable/disable the EFP Filter check on the subinterface. */
+void
+l2_efp_filter_configure (vnet_main_t * vnet_main, u32 sw_if_index, u32 enable)
+{
+ /* set the interface flag */
+ l2output_intf_bitmap_enable (sw_if_index, L2OUTPUT_FEAT_EFP_FILTER, enable);
+}
+
+
+/**
+ * Set subinterface egress efp filter enable/disable.
+ * The CLI format is:
+ * set interface l2 efp-filter <interface> [disable]]
+ */
+static clib_error_t *
+int_l2_efp_filter (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ vnet_main_t *vnm = vnet_get_main ();
+ clib_error_t *error = 0;
+ u32 sw_if_index;
+ u32 enable;
+
+ if (!unformat_user (input, unformat_vnet_sw_interface, vnm, &sw_if_index))
+ {
+ error = clib_error_return (0, "unknown interface `%U'",
+ format_unformat_error, input);
+ goto done;
+ }
+
+ enable = 1;
+ if (unformat (input, "disable"))
+ {
+ enable = 0;
+ }
+
+ /* enable/disable the feature */
+ l2_efp_filter_configure (vnm, sw_if_index, enable);
+
+done:
+ return error;
+}
+
+
+/*?
+ * EFP filtering is a basic switch feature which prevents an interface from
+ * transmitting a packet that doesn't match the interface's ingress match
+ * criteria. The check has two parts, one performed before egress vlan tag
+ * rewrite and one after. This command enables or disables the EFP filtering
+ * for a given sub-interface.
+ *
+ * @cliexpar
+ * Example of how to enable a Layer 2 efp-filter on a sub-interface:
+ * @cliexcmd{set interface l2 efp-filter GigabitEthernet0/8/0.200}
+ * Example of how to disable a Layer 2 efp-filter on a sub-interface:
+ * @cliexcmd{set interface l2 efp-filter GigabitEthernet0/8/0.200 disable}
+?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (int_l2_efp_filter_cli, static) = {
+ .path = "set interface l2 efp-filter",
+ .short_help = "set interface l2 efp-filter <interface> [disable]",
+ .function = int_l2_efp_filter,
+};
+/* *INDENT-ON* */
+
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/l2/l2_efp_filter.h b/src/vnet/l2/l2_efp_filter.h
new file mode 100644
index 00000000000..f40851df3bd
--- /dev/null
+++ b/src/vnet/l2/l2_efp_filter.h
@@ -0,0 +1,33 @@
+/*
+ * l2_efp_filter.h : layer 2 egress EFP Filter processing
+ *
+ * Copyright (c) 2013 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#ifndef included_vnet_l2_efp_filter_h
+#define included_vnet_l2_efp_filter_h
+
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+
+#endif
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/l2/l2_fib.c b/src/vnet/l2/l2_fib.c
new file mode 100644
index 00000000000..d34836e33d0
--- /dev/null
+++ b/src/vnet/l2/l2_fib.c
@@ -0,0 +1,857 @@
+/*
+ * l2_fib.c : layer 2 forwarding table (aka mac table)
+ *
+ * Copyright (c) 2013 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+#include <vnet/pg/pg.h>
+#include <vnet/ethernet/ethernet.h>
+#include <vlib/cli.h>
+
+#include <vppinfra/error.h>
+#include <vppinfra/hash.h>
+#include <vnet/l2/l2_input.h>
+#include <vnet/l2/l2_fib.h>
+#include <vnet/l2/l2_learn.h>
+#include <vnet/l2/l2_bd.h>
+
+#include <vppinfra/bihash_template.c>
+
+/**
+ * @file
+ * @brief Ethernet MAC Address FIB Table Management.
+ *
+ * The MAC Address forwarding table for bridge-domains is called the l2fib.
+ * Entries are added automatically as part of mac learning, but MAC Addresses
+ * entries can also be added manually.
+ *
+ */
+
+typedef struct
+{
+
+ /* hash table */
+ BVT (clib_bihash) mac_table;
+
+ /* convenience variables */
+ vlib_main_t *vlib_main;
+ vnet_main_t *vnet_main;
+} l2fib_main_t;
+
+l2fib_main_t l2fib_main;
+
+
+/** Format sw_if_index. If the value is ~0, use the text "N/A" */
+u8 *
+format_vnet_sw_if_index_name_with_NA (u8 * s, va_list * args)
+{
+ vnet_main_t *vnm = va_arg (*args, vnet_main_t *);
+ u32 sw_if_index = va_arg (*args, u32);
+ if (sw_if_index == ~0)
+ return format (s, "N/A");
+ else
+ return format (s, "%U",
+ format_vnet_sw_interface_name, vnm,
+ vnet_get_sw_interface (vnm, sw_if_index));
+}
+
+void
+l2fib_table_dump (u32 bd_index, l2fib_entry_key_t ** l2fe_key,
+ l2fib_entry_result_t ** l2fe_res)
+{
+ l2fib_main_t *msm = &l2fib_main;
+ BVT (clib_bihash) * h = &msm->mac_table;
+ clib_bihash_bucket_t *b;
+ BVT (clib_bihash_value) * v;
+ l2fib_entry_key_t key;
+ l2fib_entry_result_t result;
+ int i, j, k;
+
+ for (i = 0; i < h->nbuckets; i++)
+ {
+ b = &h->buckets[i];
+ if (b->offset == 0)
+ continue;
+ v = BV (clib_bihash_get_value) (h, b->offset);
+ for (j = 0; j < (1 << b->log2_pages); j++)
+ {
+ for (k = 0; k < BIHASH_KVP_PER_PAGE; k++)
+ {
+ if (v->kvp[k].key == ~0ULL && v->kvp[k].value == ~0ULL)
+ continue;
+
+ key.raw = v->kvp[k].key;
+ result.raw = v->kvp[k].value;
+
+ if ((bd_index == ~0) || (bd_index == key.fields.bd_index))
+ {
+ vec_add1 (*l2fe_key, key);
+ vec_add1 (*l2fe_res, result);
+ }
+ }
+ v++;
+ }
+ }
+}
+
+/** Display the contents of the l2fib. */
+static clib_error_t *
+show_l2fib (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ bd_main_t *bdm = &bd_main;
+ l2fib_main_t *msm = &l2fib_main;
+ l2_bridge_domain_t *bd_config;
+ BVT (clib_bihash) * h = &msm->mac_table;
+ clib_bihash_bucket_t *b;
+ BVT (clib_bihash_value) * v;
+ l2fib_entry_key_t key;
+ l2fib_entry_result_t result;
+ u32 first_entry = 1;
+ u64 total_entries = 0;
+ int i, j, k;
+ u8 verbose = 0;
+ u8 raw = 0;
+ u32 bd_id, bd_index = ~0;
+ u8 now = (u8) (vlib_time_now (vm) / 60);
+ u8 *s = 0;
+
+ if (unformat (input, "raw"))
+ raw = 1;
+ else if (unformat (input, "verbose"))
+ verbose = 1;
+ else if (unformat (input, "bd_index %d", &bd_index))
+ verbose = 1;
+ else if (unformat (input, "bd_id %d", &bd_id))
+ {
+ uword *p = hash_get (bdm->bd_index_by_bd_id, bd_id);
+ if (p)
+ {
+ verbose = 1;
+ bd_index = p[0];
+ }
+ else
+ {
+ vlib_cli_output (vm, "no such bridge domain id");
+ return 0;
+ }
+ }
+
+ for (i = 0; i < h->nbuckets; i++)
+ {
+ b = &h->buckets[i];
+ if (b->offset == 0)
+ continue;
+ v = BV (clib_bihash_get_value) (h, b->offset);
+ for (j = 0; j < (1 << b->log2_pages); j++)
+ {
+ for (k = 0; k < BIHASH_KVP_PER_PAGE; k++)
+ {
+ if (v->kvp[k].key == ~0ULL && v->kvp[k].value == ~0ULL)
+ continue;
+
+ if (verbose && first_entry)
+ {
+ first_entry = 0;
+ vlib_cli_output (vm,
+ "%=19s%=7s%=30s%=7s%=8s%=8s%=5s%=16s",
+ "Mac Address", "BD Idx", "Interface",
+ "Index", "static", "filter", "bvi",
+ "Mac Age (min)");
+ }
+
+ key.raw = v->kvp[k].key;
+ result.raw = v->kvp[k].value;
+
+ if (verbose
+ & ((bd_index >> 31) || (bd_index == key.fields.bd_index)))
+ {
+ bd_config = vec_elt_at_index (l2input_main.bd_configs,
+ key.fields.bd_index);
+
+ if (bd_config->mac_age)
+ {
+ i16 delta = now - result.fields.timestamp;
+ delta += delta < 0 ? 256 : 0;
+ s = format (s, "%d", delta);
+ }
+ else
+ s = format (s, "disabled");
+
+ vlib_cli_output (vm,
+ "%=19U%=7d%=30U%=7d%=8d%=8d%=5d%=16v",
+ format_ethernet_address, key.fields.mac,
+ key.fields.bd_index,
+ format_vnet_sw_if_index_name_with_NA,
+ msm->vnet_main, result.fields.sw_if_index,
+ result.fields.sw_if_index == ~0
+ ? -1 : result.fields.sw_if_index,
+ result.fields.static_mac,
+ result.fields.filter,
+ result.fields.bvi, s);
+ vec_reset_length (s);
+ }
+ total_entries++;
+ }
+ v++;
+ }
+ }
+
+ if (total_entries == 0)
+ vlib_cli_output (vm, "no l2fib entries");
+ else
+ vlib_cli_output (vm, "%lld l2fib entries", total_entries);
+
+ if (raw)
+ vlib_cli_output (vm, "Raw Hash Table:\n%U\n",
+ BV (format_bihash), h, 1 /* verbose */ );
+
+ vec_free (s);
+ return 0;
+}
+
+/*?
+ * This command dispays the MAC Address entries of the L2 FIB table.
+ * Output can be filtered to just get the number of MAC Addresses or display
+ * each MAC Address for all bridge domains or just a single bridge domain.
+ *
+ * @cliexpar
+ * Example of how to display the number of MAC Address entries in the L2
+ * FIB table:
+ * @cliexstart{show l2fib}
+ * 3 l2fib entries
+ * @cliexend
+ * Example of how to display all the MAC Address entries in the L2
+ * FIB table:
+ * @cliexstart{show l2fib verbose}
+ * Mac Address BD Idx Interface Index static filter bvi refresh timestamp
+ * 52:54:00:53:18:33 1 GigabitEthernet0/8/0.200 3 0 0 0 0 0
+ * 52:54:00:53:18:55 1 GigabitEthernet0/8/0.200 3 1 0 0 0 0
+ * 52:54:00:53:18:77 1 N/A -1 1 1 0 0 0
+ * 3 l2fib entries
+ * @cliexend
+?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (show_l2fib_cli, static) = {
+ .path = "show l2fib",
+ .short_help = "show l2fib [verbose | bd_id <nn> | bd_index <nn> | raw]",
+ .function = show_l2fib,
+};
+/* *INDENT-ON* */
+
+
+/* Remove all entries from the l2fib */
+void
+l2fib_clear_table (uint keep_static)
+{
+ l2fib_main_t *mp = &l2fib_main;
+
+ if (keep_static)
+ {
+ /* TODO: remove only non-static entries */
+ }
+ else
+ {
+ /* Remove all entries */
+ BV (clib_bihash_free) (&mp->mac_table);
+ BV (clib_bihash_init) (&mp->mac_table, "l2fib mac table",
+ L2FIB_NUM_BUCKETS, L2FIB_MEMORY_SIZE);
+ }
+
+ l2learn_main.global_learn_count = 0;
+}
+
+/** Clear all entries in L2FIB.
+ * @TODO: Later we may want a way to remove only the non-static entries
+ */
+static clib_error_t *
+clear_l2fib (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ l2fib_clear_table (0);
+ return 0;
+}
+
+/*?
+ * This command clears all the MAC Address entries from the L2 FIB table.
+ *
+ * @cliexpar
+ * Example of how to clear the L2 FIB Table:
+ * @cliexcmd{clear l2fib}
+ * Example to show the L2 FIB Table has been cleared:
+ * @cliexstart{show l2fib verbose}
+ * no l2fib entries
+ * @cliexend
+?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (clear_l2fib_cli, static) = {
+ .path = "clear l2fib",
+ .short_help = "clear l2fib",
+ .function = clear_l2fib,
+};
+/* *INDENT-ON* */
+
+
+/**
+ * Add an entry to the l2fib.
+ * If the entry already exists then overwrite it
+ */
+void
+l2fib_add_entry (u64 mac,
+ u32 bd_index,
+ u32 sw_if_index, u32 static_mac, u32 filter_mac, u32 bvi_mac)
+{
+ l2fib_entry_key_t key;
+ l2fib_entry_result_t result;
+ __attribute__ ((unused)) u32 bucket_contents;
+ l2fib_main_t *mp = &l2fib_main;
+ BVT (clib_bihash_kv) kv;
+
+ /* set up key */
+ key.raw = l2fib_make_key ((u8 *) & mac, bd_index);
+
+ /* set up result */
+ result.raw = 0; /* clear all fields */
+ result.fields.sw_if_index = sw_if_index;
+ result.fields.static_mac = static_mac;
+ result.fields.filter = filter_mac;
+ result.fields.bvi = bvi_mac;
+
+ kv.key = key.raw;
+ kv.value = result.raw;
+
+ BV (clib_bihash_add_del) (&mp->mac_table, &kv, 1 /* is_add */ );
+
+ /* increment counter if dynamically learned mac */
+ if (result.fields.static_mac)
+ {
+ l2learn_main.global_learn_count++;
+ }
+}
+
+/**
+ * Add an entry to the L2FIB.
+ * The CLI format is:
+ * l2fib add <mac> <bd> <intf> [static] [bvi]
+ * l2fib add <mac> <bd> filter
+ * Note that filter and bvi entries are always static
+ */
+static clib_error_t *
+l2fib_add (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ bd_main_t *bdm = &bd_main;
+ vnet_main_t *vnm = vnet_get_main ();
+ clib_error_t *error = 0;
+ u64 mac;
+ u32 bd_id;
+ u32 bd_index;
+ u32 sw_if_index = ~0;
+ u32 filter_mac = 0;
+ u32 static_mac = 0;
+ u32 bvi_mac = 0;
+ uword *p;
+
+ if (!unformat_user (input, unformat_ethernet_address, &mac))
+ {
+ error = clib_error_return (0, "expected mac address `%U'",
+ format_unformat_error, input);
+ goto done;
+ }
+
+ if (!unformat (input, "%d", &bd_id))
+ {
+ error = clib_error_return (0, "expected bridge domain ID `%U'",
+ format_unformat_error, input);
+ goto done;
+ }
+
+ p = hash_get (bdm->bd_index_by_bd_id, bd_id);
+ if (!p)
+ {
+ error = clib_error_return (0, "bridge domain ID %d invalid", bd_id);
+ goto done;
+ }
+ bd_index = p[0];
+
+ if (unformat (input, "filter"))
+ {
+ filter_mac = 1;
+ static_mac = 1;
+
+ }
+ else
+ {
+
+ if (!unformat_user
+ (input, unformat_vnet_sw_interface, vnm, &sw_if_index))
+ {
+ error = clib_error_return (0, "unknown interface `%U'",
+ format_unformat_error, input);
+ goto done;
+ }
+ if (unformat (input, "static"))
+ {
+ static_mac = 1;
+ }
+ else if (unformat (input, "bvi"))
+ {
+ bvi_mac = 1;
+ static_mac = 1;
+ }
+ }
+
+ l2fib_add_entry (mac, bd_index, sw_if_index, static_mac, filter_mac,
+ bvi_mac);
+
+done:
+ return error;
+}
+
+/*?
+ * This command adds a MAC Address entry to the L2 FIB table
+ * of an existing bridge-domain. The MAC Address can be static
+ * or dynamic. This command also allows a filter to be added,
+ * such that packets with given MAC Addresses (source mac or
+ * destination mac match) are dropped.
+ *
+ * @cliexpar
+ * Example of how to add a dynamic MAC Address entry to the L2 FIB table
+ * of a bridge-domain (where 200 is the bridge-domain-id):
+ * @cliexcmd{l2fib add 52:54:00:53:18:33 200 GigabitEthernet0/8/0.200}
+ * Example of how to add a static MAC Address entry to the L2 FIB table
+ * of a bridge-domain (where 200 is the bridge-domain-id):
+ * @cliexcmd{l2fib add 52:54:00:53:18:55 200 GigabitEthernet0/8/0.200 static}
+ * Example of how to add a filter such that a packet with the given MAC
+ * Address will be dropped in a given bridge-domain (where 200 is the
+ * bridge-domain-id):
+ * @cliexcmd{l2fib add 52:54:00:53:18:77 200 filter}
+ * Example of show command of the provisioned MAC Addresses and filters:
+ * @cliexstart{show l2fib verbose}
+ * Mac Address BD Idx Interface Index static filter bvi refresh timestamp
+ * 52:54:00:53:18:33 1 GigabitEthernet0/8/0.200 3 0 0 0 0 0
+ * 52:54:00:53:18:55 1 GigabitEthernet0/8/0.200 3 1 0 0 0 0
+ * 52:54:00:53:18:77 1 N/A -1 1 1 0 0 0
+ * 3 l2fib entries
+ * @cliexend
+?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (l2fib_add_cli, static) = {
+ .path = "l2fib add",
+ .short_help = "l2fib add <mac> <bridge-domain-id> filter | <intf> [static | bvi]",
+ .function = l2fib_add,
+};
+/* *INDENT-ON* */
+
+
+static clib_error_t *
+l2fib_test_command_fn (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ clib_error_t *error = 0;
+ u64 mac, save_mac;
+ u32 bd_index = 0;
+ u32 sw_if_index = 8;
+ u32 filter_mac = 0;
+ u32 bvi_mac = 0;
+ u32 is_add = 0;
+ u32 is_del = 0;
+ u32 is_check = 0;
+ u32 count = 1;
+ int mac_set = 0;
+ int i;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "mac %U", unformat_ethernet_address, &mac))
+ mac_set = 1;
+ else if (unformat (input, "add"))
+ is_add = 1;
+ else if (unformat (input, "del"))
+ is_del = 1;
+ else if (unformat (input, "check"))
+ is_check = 1;
+ else if (unformat (input, "count %d", &count))
+ ;
+ else
+ break;
+ }
+
+ if (mac_set == 0)
+ return clib_error_return (0, "mac not set");
+
+ if (is_add == 0 && is_del == 0 && is_check == 0)
+ return clib_error_return (0,
+ "noop: pick at least one of (add,del,check)");
+
+ save_mac = mac;
+
+ if (is_add)
+ {
+ for (i = 0; i < count; i++)
+ {
+ u64 tmp;
+ l2fib_add_entry (mac, bd_index, sw_if_index, mac,
+ filter_mac, bvi_mac);
+ tmp = clib_net_to_host_u64 (mac);
+ tmp >>= 16;
+ tmp++;
+ tmp <<= 16;
+ mac = clib_host_to_net_u64 (tmp);
+ }
+ }
+
+ if (is_check)
+ {
+ BVT (clib_bihash_kv) kv;
+ l2fib_main_t *mp = &l2fib_main;
+
+ mac = save_mac;
+
+ for (i = 0; i < count; i++)
+ {
+ u64 tmp;
+ kv.key = l2fib_make_key ((u8 *) & mac, bd_index);
+ if (BV (clib_bihash_search) (&mp->mac_table, &kv, &kv))
+ {
+ clib_warning ("key %U AWOL", format_ethernet_address, &mac);
+ break;
+ }
+ tmp = clib_net_to_host_u64 (mac);
+ tmp >>= 16;
+ tmp++;
+ tmp <<= 16;
+ mac = clib_host_to_net_u64 (tmp);
+ }
+ }
+
+ if (is_del)
+ {
+ for (i = 0; i < count; i++)
+ {
+ u64 tmp;
+
+ l2fib_del_entry (mac, bd_index);
+
+ tmp = clib_net_to_host_u64 (mac);
+ tmp >>= 16;
+ tmp++;
+ tmp <<= 16;
+ mac = clib_host_to_net_u64 (tmp);
+ }
+ }
+
+ return error;
+}
+
+/*?
+ * The set of '<em>test l2fib</em>' commands allow the L2 FIB table of the default
+ * bridge domain (bridge-domain-id of 0) to be modified.
+ *
+ * @cliexpar
+ * @parblock
+ * Example of how to add a set of 4 sequential MAC Address entries to L2
+ * FIB table of the default bridge-domain:
+ * @cliexcmd{test l2fib add mac 52:54:00:53:00:00 count 4}
+ *
+ * Show the set of 4 sequential MAC Address entries that were added:
+ * @cliexstart{show l2fib verbose}
+ * Mac Address BD Idx Interface Index static filter bvi refresh timestamp
+ * 52:54:00:53:00:00 0 GigabitEthernet0/8/0.300 8 0 0 0 0 0
+ * 52:54:00:53:00:01 0 GigabitEthernet0/8/0.300 8 0 0 0 0 0
+ * 52:54:00:53:00:03 0 GigabitEthernet0/8/0.300 8 0 0 0 0 0
+ * 52:54:00:53:00:02 0 GigabitEthernet0/8/0.300 8 0 0 0 0 0
+ * 4 l2fib entries
+ * @cliexend
+ *
+ * Example of how to check that the set of 4 sequential MAC Address
+ * entries were added to L2 FIB table of the default
+ * bridge-domain. Used a count of 5 to produce an error:
+ *
+ * @cliexcmd{test l2fib check mac 52:54:00:53:00:00 count 5}
+ * The output of the check command is in the log files. Log file
+ * location may vary based on your OS and Version:
+ *
+ * <b><em># tail -f /var/log/messages | grep l2fib_test_command_fn</em></b>
+ *
+ * Sep 7 17:15:24 localhost vnet[4952]: l2fib_test_command_fn:446: key 52:54:00:53:00:04 AWOL
+ *
+ * Example of how to delete a set of 4 sequential MAC Address entries
+ * from L2 FIB table of the default bridge-domain:
+ * @cliexcmd{test l2fib del mac 52:54:00:53:00:00 count 4}
+ * @endparblock
+?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (l2fib_test_command, static) = {
+ .path = "test l2fib",
+ .short_help = "test l2fib [add|del|check] mac <base-addr> count <nn>",
+ .function = l2fib_test_command_fn,
+};
+/* *INDENT-ON* */
+
+
+/**
+ * Delete an entry from the l2fib.
+ * Return 0 if the entry was deleted, or 1 if it was not found
+ */
+u32
+l2fib_del_entry (u64 mac, u32 bd_index)
+{
+
+ l2fib_entry_result_t result;
+ l2fib_main_t *mp = &l2fib_main;
+ BVT (clib_bihash_kv) kv;
+
+ /* set up key */
+ kv.key = l2fib_make_key ((u8 *) & mac, bd_index);
+
+ if (BV (clib_bihash_search) (&mp->mac_table, &kv, &kv))
+ return 1;
+
+ result.raw = kv.value;
+
+ /* decrement counter if dynamically learned mac */
+ if (result.fields.static_mac)
+ {
+ if (l2learn_main.global_learn_count > 0)
+ {
+ l2learn_main.global_learn_count--;
+ }
+ }
+
+ /* Remove entry from hash table */
+ BV (clib_bihash_add_del) (&mp->mac_table, &kv, 0 /* is_add */ );
+ return 0;
+}
+
+/**
+ * Delete an entry from the L2FIB.
+ * The CLI format is:
+ * l2fib del <mac> <bd-id>
+ */
+static clib_error_t *
+l2fib_del (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ bd_main_t *bdm = &bd_main;
+ clib_error_t *error = 0;
+ u64 mac;
+ u32 bd_id;
+ u32 bd_index;
+ uword *p;
+
+ if (!unformat_user (input, unformat_ethernet_address, &mac))
+ {
+ error = clib_error_return (0, "expected mac address `%U'",
+ format_unformat_error, input);
+ goto done;
+ }
+
+ if (!unformat (input, "%d", &bd_id))
+ {
+ error = clib_error_return (0, "expected bridge domain ID `%U'",
+ format_unformat_error, input);
+ goto done;
+ }
+
+ p = hash_get (bdm->bd_index_by_bd_id, bd_id);
+ if (!p)
+ {
+ error = clib_error_return (0, "bridge domain ID %d invalid", bd_id);
+ goto done;
+ }
+ bd_index = p[0];
+
+ /* Delete the entry */
+ if (l2fib_del_entry (mac, bd_index))
+ {
+ error = clib_error_return (0, "mac entry not found");
+ goto done;
+ }
+
+done:
+ return error;
+}
+
+/*?
+ * This command deletes an existing MAC Address entry from the L2 FIB
+ * table of an existing bridge-domain.
+ *
+ * @cliexpar
+ * Example of how to delete a MAC Address entry from the L2 FIB table of a bridge-domain (where 200 is the bridge-domain-id):
+ * @cliexcmd{l2fib del 52:54:00:53:18:33 200}
+?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (l2fib_del_cli, static) = {
+ .path = "l2fib del",
+ .short_help = "l2fib del <mac> <bridge-domain-id>",
+ .function = l2fib_del,
+};
+/* *INDENT-ON* */
+
+
+BVT (clib_bihash) * get_mac_table (void)
+{
+ l2fib_main_t *mp = &l2fib_main;
+ return &mp->mac_table;
+}
+
+static uword
+l2fib_mac_age_scanner_process (vlib_main_t * vm, vlib_node_runtime_t * rt,
+ vlib_frame_t * f)
+{
+ uword event_type, *event_data = 0;
+ l2fib_main_t *msm = &l2fib_main;
+ l2_bridge_domain_t *bd_config;
+ BVT (clib_bihash) * h = &msm->mac_table;
+ clib_bihash_bucket_t *b;
+ BVT (clib_bihash_value) * v;
+ l2fib_entry_key_t key;
+ l2fib_entry_result_t result;
+ int i, j, k;
+ bool enabled = 0;
+ f64 start_time, last_run_duration = 0, t;
+ i16 delta;
+
+ while (1)
+ {
+ if (enabled)
+ vlib_process_wait_for_event_or_clock (vm, 60 - last_run_duration);
+ else
+ vlib_process_wait_for_event (vm);
+
+ event_type = vlib_process_get_events (vm, &event_data);
+ vec_reset_length (event_data);
+
+ switch (event_type)
+ {
+ case ~0:
+ break;
+ case L2_MAC_AGE_PROCESS_EVENT_START:
+ enabled = 1;
+ break;
+ case L2_MAC_AGE_PROCESS_EVENT_STOP:
+ enabled = 0;
+ continue;
+ default:
+ ASSERT (0);
+ }
+ last_run_duration = start_time = vlib_time_now (vm);
+ for (i = 0; i < h->nbuckets; i++)
+ {
+ /* Allow no more than 10us without a pause */
+ t = vlib_time_now (vm);
+ if (t > start_time + 10e-6)
+ {
+ vlib_process_suspend (vm, 100e-6); /* suspend for 100 us */
+ start_time = vlib_time_now (vm);
+ }
+
+ if (i < (h->nbuckets - 3))
+ {
+ b = &h->buckets[i + 3];
+ CLIB_PREFETCH (b, CLIB_CACHE_LINE_BYTES, LOAD);
+ b = &h->buckets[i + 1];
+ if (b->offset)
+ {
+ v = BV (clib_bihash_get_value) (h, b->offset);
+ CLIB_PREFETCH (v, CLIB_CACHE_LINE_BYTES, LOAD);
+ }
+ }
+
+ b = &h->buckets[i];
+ if (b->offset == 0)
+ continue;
+ v = BV (clib_bihash_get_value) (h, b->offset);
+ for (j = 0; j < (1 << b->log2_pages); j++)
+ {
+ for (k = 0; k < BIHASH_KVP_PER_PAGE; k++)
+ {
+ if (v->kvp[k].key == ~0ULL && v->kvp[k].value == ~0ULL)
+ continue;
+
+ key.raw = v->kvp[k].key;
+ result.raw = v->kvp[k].value;
+
+ if (result.fields.static_mac)
+ continue;
+
+ bd_config = vec_elt_at_index (l2input_main.bd_configs,
+ key.fields.bd_index);
+
+ if (bd_config->mac_age == 0)
+ continue;
+
+ delta = (u8) (start_time / 60) - result.fields.timestamp;
+ delta += delta < 0 ? 256 : 0;
+
+ if (delta > bd_config->mac_age)
+ {
+ void *p = &key.fields.mac;
+ l2fib_del_entry (*(u64 *) p, key.fields.bd_index);
+ }
+ }
+ v++;
+ }
+ }
+ last_run_duration = vlib_time_now (vm) - last_run_duration;
+ }
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (l2fib_mac_age_scanner_process_node) = {
+ .function = l2fib_mac_age_scanner_process,
+ .type = VLIB_NODE_TYPE_PROCESS,
+ .name = "l2fib-mac-age-scanner-process",
+};
+/* *INDENT-ON* */
+
+clib_error_t *
+l2fib_init (vlib_main_t * vm)
+{
+ l2fib_main_t *mp = &l2fib_main;
+ l2fib_entry_key_t test_key;
+ u8 test_mac[6];
+
+ mp->vlib_main = vm;
+ mp->vnet_main = vnet_get_main ();
+
+ /* Create the hash table */
+ BV (clib_bihash_init) (&mp->mac_table, "l2fib mac table",
+ L2FIB_NUM_BUCKETS, L2FIB_MEMORY_SIZE);
+
+ /* verify the key constructor is good, since it is endian-sensitive */
+ memset (test_mac, 0, sizeof (test_mac));
+ test_mac[0] = 0x11;
+ test_key.raw = 0;
+ test_key.raw = l2fib_make_key ((u8 *) & test_mac, 0x1234);
+ ASSERT (test_key.fields.mac[0] == 0x11);
+ ASSERT (test_key.fields.bd_index == 0x1234);
+
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (l2fib_init);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/l2/l2_fib.h b/src/vnet/l2/l2_fib.h
new file mode 100644
index 00000000000..4a2da59bc01
--- /dev/null
+++ b/src/vnet/l2/l2_fib.h
@@ -0,0 +1,341 @@
+/*
+ * l2_fib.h : layer 2 forwarding table (aka mac table)
+ *
+ * Copyright (c) 2013 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef included_l2fib_h
+#define included_l2fib_h
+
+#include <vlib/vlib.h>
+#include <vppinfra/bihash_8_8.h>
+
+/*
+ * The size of the hash table
+ */
+#define L2FIB_NUM_BUCKETS (64 * 1024)
+#define L2FIB_MEMORY_SIZE (256<<20)
+
+/*
+ * The L2fib key is the mac address and bridge domain ID
+ */
+typedef struct
+{
+ union
+ {
+ struct
+ {
+ u16 bd_index;
+ u8 mac[6];
+ } fields;
+ struct
+ {
+ u32 w0;
+ u32 w1;
+ } words;
+ u64 raw;
+ };
+} l2fib_entry_key_t;
+
+STATIC_ASSERT_SIZEOF (l2fib_entry_key_t, 8);
+
+/*
+ * The l2fib entry results
+ */
+typedef struct
+{
+ union
+ {
+ struct
+ {
+ u32 sw_if_index; /* output sw_if_index (L3 interface if bvi==1) */
+
+ u8 static_mac:1; /* static mac, no dataplane learning */
+ u8 bvi:1; /* mac is for a bridged virtual interface */
+ u8 filter:1; /* drop packets to/from this mac */
+ u8 unused1:5;
+ u8 timestamp; /* timestamp for aging */
+ u16 unused2;
+ } fields;
+ u64 raw;
+ };
+} l2fib_entry_result_t;
+
+STATIC_ASSERT_SIZEOF (l2fib_entry_result_t, 8);
+
+/**
+ * Compute the hash for the given key and return
+ * the corresponding bucket index
+ */
+always_inline u32
+l2fib_compute_hash_bucket (l2fib_entry_key_t * key)
+{
+ u32 result;
+ u32 temp_a;
+ u32 temp_b;
+
+ result = 0xa5a5a5a5; /* some seed */
+ temp_a = key->words.w0;
+ temp_b = key->words.w1;
+ hash_mix32 (temp_a, temp_b, result);
+
+ return result % L2FIB_NUM_BUCKETS;
+}
+
+always_inline u64
+l2fib_make_key (u8 * mac_address, u16 bd_index)
+{
+ u64 temp;
+
+ /*
+ * The mac address in memory is A:B:C:D:E:F
+ * The bd id in register is H:L
+ */
+#if CLIB_ARCH_IS_LITTLE_ENDIAN
+ /*
+ * Create the in-register key as F:E:D:C:B:A:H:L
+ * In memory the key is L:H:A:B:C:D:E:F
+ */
+ temp = *((u64 *) (mac_address)) << 16;
+ temp = (temp & ~0xffff) | (u64) (bd_index);
+#else
+ /*
+ * Create the in-register key as H:L:A:B:C:D:E:F
+ * In memory the key is H:L:A:B:C:D:E:F
+ */
+ temp = *((u64 *) (mac_address)) >> 16;
+ temp = temp | (((u64) bd_index) << 48);
+#endif
+
+ return temp;
+}
+
+
+
+/**
+ * Lookup the entry for mac and bd_index in the mac table for 1 packet.
+ * Cached_key and cached_result are used as a one-entry cache.
+ * The function reads and updates them as needed.
+ *
+ * mac0 and bd_index0 are the keys. The entry is written to result0.
+ * If the entry was not found, result0 is set to ~0.
+ *
+ * key0 and bucket0 return with the computed key and hash bucket,
+ * convenient if the entry needs to be updated afterward.
+ * If the cached_result was used, bucket0 is set to ~0.
+ */
+
+static_always_inline void
+l2fib_lookup_1 (BVT (clib_bihash) * mac_table,
+ l2fib_entry_key_t * cached_key,
+ l2fib_entry_result_t * cached_result,
+ u8 * mac0,
+ u16 bd_index0,
+ l2fib_entry_key_t * key0,
+ u32 * bucket0, l2fib_entry_result_t * result0)
+{
+ /* set up key */
+ key0->raw = l2fib_make_key (mac0, bd_index0);
+ *bucket0 = ~0;
+
+ if (key0->raw == cached_key->raw)
+ {
+ /* Hit in the one-entry cache */
+ result0->raw = cached_result->raw;
+ }
+ else
+ {
+ /* Do a regular mac table lookup */
+ BVT (clib_bihash_kv) kv;
+
+ kv.key = key0->raw;
+ kv.value = ~0ULL;
+ BV (clib_bihash_search_inline) (mac_table, &kv);
+ result0->raw = kv.value;
+
+ /* Update one-entry cache */
+ cached_key->raw = key0->raw;
+ cached_result->raw = result0->raw;
+ }
+}
+
+
+/**
+ * Lookup the entry for mac and bd_index in the mac table for 2 packets.
+ * The lookups for the two packets are interleaved.
+ *
+ * Cached_key and cached_result are used as a one-entry cache.
+ * The function reads and updates them as needed.
+ *
+ * mac0 and bd_index0 are the keys. The entry is written to result0.
+ * If the entry was not found, result0 is set to ~0. The same
+ * holds for mac1/bd_index1/result1.
+ */
+static_always_inline void
+l2fib_lookup_2 (BVT (clib_bihash) * mac_table,
+ l2fib_entry_key_t * cached_key,
+ l2fib_entry_result_t * cached_result,
+ u8 * mac0,
+ u8 * mac1,
+ u16 bd_index0,
+ u16 bd_index1,
+ l2fib_entry_key_t * key0,
+ l2fib_entry_key_t * key1,
+ u32 * bucket0,
+ u32 * bucket1,
+ l2fib_entry_result_t * result0,
+ l2fib_entry_result_t * result1)
+{
+ /* set up key */
+ key0->raw = l2fib_make_key (mac0, bd_index0);
+ key1->raw = l2fib_make_key (mac1, bd_index1);
+
+ if ((key0->raw == cached_key->raw) && (key1->raw == cached_key->raw))
+ {
+ /* Both hit in the one-entry cache */
+ result0->raw = cached_result->raw;
+ result1->raw = cached_result->raw;
+ *bucket0 = ~0;
+ *bucket1 = ~0;
+
+ }
+ else
+ {
+ BVT (clib_bihash_kv) kv0, kv1;
+
+ /*
+ * Do a regular mac table lookup
+ * Interleave lookups for packet 0 and packet 1
+ */
+ kv0.key = key0->raw;
+ kv1.key = key1->raw;
+ kv0.value = ~0ULL;
+ kv1.value = ~0ULL;
+
+ BV (clib_bihash_search_inline) (mac_table, &kv0);
+ BV (clib_bihash_search_inline) (mac_table, &kv1);
+
+ result0->raw = kv0.value;
+ result1->raw = kv1.value;
+
+ /* Update one-entry cache */
+ cached_key->raw = key1->raw;
+ cached_result->raw = result1->raw;
+ }
+}
+
+static_always_inline void
+l2fib_lookup_4 (BVT (clib_bihash) * mac_table,
+ l2fib_entry_key_t * cached_key,
+ l2fib_entry_result_t * cached_result,
+ u8 * mac0,
+ u8 * mac1,
+ u8 * mac2,
+ u8 * mac3,
+ u16 bd_index0,
+ u16 bd_index1,
+ u16 bd_index2,
+ u16 bd_index3,
+ l2fib_entry_key_t * key0,
+ l2fib_entry_key_t * key1,
+ l2fib_entry_key_t * key2,
+ l2fib_entry_key_t * key3,
+ u32 * bucket0,
+ u32 * bucket1,
+ u32 * bucket2,
+ u32 * bucket3,
+ l2fib_entry_result_t * result0,
+ l2fib_entry_result_t * result1,
+ l2fib_entry_result_t * result2,
+ l2fib_entry_result_t * result3)
+{
+ /* set up key */
+ key0->raw = l2fib_make_key (mac0, bd_index0);
+ key1->raw = l2fib_make_key (mac1, bd_index1);
+ key2->raw = l2fib_make_key (mac2, bd_index2);
+ key3->raw = l2fib_make_key (mac3, bd_index3);
+
+ if ((key0->raw == cached_key->raw) && (key1->raw == cached_key->raw) &&
+ (key2->raw == cached_key->raw) && (key3->raw == cached_key->raw))
+ {
+ /* Both hit in the one-entry cache */
+ result0->raw = cached_result->raw;
+ result1->raw = cached_result->raw;
+ result2->raw = cached_result->raw;
+ result3->raw = cached_result->raw;
+ *bucket0 = ~0;
+ *bucket1 = ~0;
+ *bucket2 = ~0;
+ *bucket3 = ~0;
+
+ }
+ else
+ {
+ BVT (clib_bihash_kv) kv0, kv1, kv2, kv3;
+
+ /*
+ * Do a regular mac table lookup
+ * Interleave lookups for packet 0 and packet 1
+ */
+ kv0.key = key0->raw;
+ kv1.key = key1->raw;
+ kv2.key = key2->raw;
+ kv3.key = key3->raw;
+ kv0.value = ~0ULL;
+ kv1.value = ~0ULL;
+ kv2.value = ~0ULL;
+ kv3.value = ~0ULL;
+
+ BV (clib_bihash_search_inline) (mac_table, &kv0);
+ BV (clib_bihash_search_inline) (mac_table, &kv1);
+ BV (clib_bihash_search_inline) (mac_table, &kv2);
+ BV (clib_bihash_search_inline) (mac_table, &kv3);
+
+ result0->raw = kv0.value;
+ result1->raw = kv1.value;
+ result2->raw = kv2.value;
+ result3->raw = kv3.value;
+
+ /* Update one-entry cache */
+ cached_key->raw = key1->raw;
+ cached_result->raw = result1->raw;
+ }
+}
+
+BVT (clib_bihash) * get_mac_table (void);
+ void
+ l2fib_clear_table (uint keep_static);
+ void
+ l2fib_add_entry (u64 mac,
+ u32 bd_index,
+ u32 sw_if_index,
+ u32 static_mac, u32 drop_mac, u32 bvi_mac);
+u32
+l2fib_del_entry (u64 mac, u32 bd_index);
+
+ void
+ l2fib_table_dump (u32 bd_index, l2fib_entry_key_t ** l2fe_key,
+ l2fib_entry_result_t ** l2fe_res);
+
+ u8 *format_vnet_sw_if_index_name_with_NA (u8 * s, va_list * args);
+
+#endif
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/l2/l2_flood.c b/src/vnet/l2/l2_flood.c
new file mode 100644
index 00000000000..ed9e5ac2258
--- /dev/null
+++ b/src/vnet/l2/l2_flood.c
@@ -0,0 +1,568 @@
+/*
+ * l2_flood.c : layer 2 flooding
+ *
+ * Copyright (c) 2013 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+#include <vnet/pg/pg.h>
+#include <vnet/ethernet/ethernet.h>
+#include <vlib/cli.h>
+#include <vnet/l2/l2_input.h>
+#include <vnet/l2/feat_bitmap.h>
+#include <vnet/l2/l2_bvi.h>
+#include <vnet/replication.h>
+#include <vnet/l2/l2_fib.h>
+
+#include <vppinfra/error.h>
+#include <vppinfra/hash.h>
+
+
+/**
+ * @file
+ * @brief Ethernet Flooding.
+ *
+ * Flooding uses the packet replication infrastructure to send a copy of the
+ * packet to each member interface. Logically the replication infrastructure
+ * expects two graph nodes: a prep node that initiates replication and sends the
+ * packet to the first destination, and a recycle node that is passed the packet
+ * after it has been transmitted.
+ *
+ * To decrease the amount of code, l2 flooding implements both functions in
+ * the same graph node. This node can tell if is it being called as the "prep"
+ * or "recycle" using replication_is_recycled().
+ */
+
+
+typedef struct
+{
+
+ /* Next nodes for each feature */
+ u32 feat_next_node_index[32];
+
+ /* next node index for the L3 input node of each ethertype */
+ next_by_ethertype_t l3_next;
+
+ /* convenience variables */
+ vlib_main_t *vlib_main;
+ vnet_main_t *vnet_main;
+} l2flood_main_t;
+
+typedef struct
+{
+ u8 src[6];
+ u8 dst[6];
+ u32 sw_if_index;
+ u16 bd_index;
+} l2flood_trace_t;
+
+
+/* packet trace format function */
+static u8 *
+format_l2flood_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ l2flood_trace_t *t = va_arg (*args, l2flood_trace_t *);
+
+ s = format (s, "l2-flood: sw_if_index %d dst %U src %U bd_index %d",
+ t->sw_if_index,
+ format_ethernet_address, t->dst,
+ format_ethernet_address, t->src, t->bd_index);
+ return s;
+}
+
+l2flood_main_t l2flood_main;
+
+static vlib_node_registration_t l2flood_node;
+
+#define foreach_l2flood_error \
+_(L2FLOOD, "L2 flood packets") \
+_(REPL_FAIL, "L2 replication failures") \
+_(NO_MEMBERS, "L2 replication complete") \
+_(BVI_BAD_MAC, "BVI L3 mac mismatch") \
+_(BVI_ETHERTYPE, "BVI packet with unhandled ethertype")
+
+typedef enum
+{
+#define _(sym,str) L2FLOOD_ERROR_##sym,
+ foreach_l2flood_error
+#undef _
+ L2FLOOD_N_ERROR,
+} l2flood_error_t;
+
+static char *l2flood_error_strings[] = {
+#define _(sym,string) string,
+ foreach_l2flood_error
+#undef _
+};
+
+typedef enum
+{
+ L2FLOOD_NEXT_L2_OUTPUT,
+ L2FLOOD_NEXT_DROP,
+ L2FLOOD_N_NEXT,
+} l2flood_next_t;
+
+/*
+ * Perform flooding on one packet
+ *
+ * Due to the way BVI processing can modify the packet, the BVI interface
+ * (if present) must be processed last in the replication. The member vector
+ * is arranged so that the BVI interface is always the first element.
+ * Flooding walks the vector in reverse.
+ *
+ * BVI processing causes the packet to go to L3 processing. This strips the
+ * L2 header, which is fine because the replication infrastructure restores
+ * it. However L3 processing can trigger larger changes to the packet. For
+ * example, an ARP request could be turned into an ARP reply, an ICMP request
+ * could be turned into an ICMP reply. If BVI processing is not performed
+ * last, the modified packet would be replicated to the remaining members.
+ */
+
+static_always_inline void
+l2flood_process (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ l2flood_main_t * msm,
+ u64 * counter_base,
+ vlib_buffer_t * b0,
+ u32 * sw_if_index0,
+ l2fib_entry_key_t * key0,
+ u32 * bucket0, l2fib_entry_result_t * result0, u32 * next0)
+{
+ u16 bd_index0;
+ l2_bridge_domain_t *bd_config;
+ l2_flood_member_t *members;
+ i32 current_member; /* signed */
+ replication_context_t *ctx;
+ u8 in_shg = vnet_buffer (b0)->l2.shg;
+
+ if (!replication_is_recycled (b0))
+ {
+
+ /* Do flood "prep node" processing */
+
+ /* Get config for the bridge domain interface */
+ bd_index0 = vnet_buffer (b0)->l2.bd_index;
+ bd_config = vec_elt_at_index (l2input_main.bd_configs, bd_index0);
+ members = bd_config->members;
+
+ /* Find first member that passes the reflection and SHG checks */
+ current_member = bd_config->flood_count - 1;
+ while ((current_member >= 0) &&
+ ((members[current_member].sw_if_index == *sw_if_index0) ||
+ (in_shg && members[current_member].shg == in_shg)))
+ {
+ current_member--;
+ }
+
+ if (current_member < 0)
+ {
+ /* No members to flood to */
+ *next0 = L2FLOOD_NEXT_DROP;
+ b0->error = node->errors[L2FLOOD_ERROR_NO_MEMBERS];
+ return;
+ }
+
+ if ((current_member > 0) &&
+ ((current_member > 1) ||
+ ((members[0].sw_if_index != *sw_if_index0) &&
+ (!in_shg || members[0].shg != in_shg))))
+ {
+ /* If more than one member then initiate replication */
+ ctx =
+ replication_prep (vm, b0, l2flood_node.index, 1 /* l2_packet */ );
+ ctx->feature_replicas = (uword) members;
+ ctx->feature_counter = current_member;
+ }
+
+ }
+ else
+ {
+ vnet_buffer_opaque_t *vnet_buff_op;
+
+ /* Do flood "recycle node" processing */
+
+ if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_REPL_FAIL))
+ {
+ (void) replication_recycle (vm, b0, 1 /* is_last */ );
+ *next0 = L2FLOOD_NEXT_DROP;
+ b0->error = node->errors[L2FLOOD_ERROR_REPL_FAIL];
+ return;
+ }
+
+ ctx = replication_get_ctx (b0);
+ replication_clear_recycled (b0);
+
+ members = (l2_flood_member_t *) (intptr_t) ctx->feature_replicas;
+ current_member = (i32) ctx->feature_counter - 1;
+
+ /* Need to update input index from saved packet context */
+ vnet_buff_op = (vnet_buffer_opaque_t *) ctx->vnet_buffer;
+ *sw_if_index0 = vnet_buff_op->sw_if_index[VLIB_RX];
+
+ /* Find next member that passes the reflection and SHG check */
+ while ((current_member >= 0) &&
+ ((members[current_member].sw_if_index == *sw_if_index0) ||
+ (in_shg && members[current_member].shg == in_shg)))
+ {
+ current_member--;
+ }
+
+ if (current_member < 0)
+ {
+ /*
+ * No more members to flood to.
+ * Terminate replication and drop packet.
+ */
+
+ replication_recycle (vm, b0, 1 /* is_last */ );
+
+ *next0 = L2FLOOD_NEXT_DROP;
+ /* Ideally we woudn't bump a counter here, just silently complete */
+ b0->error = node->errors[L2FLOOD_ERROR_NO_MEMBERS];
+ return;
+ }
+
+ /* Restore packet and context and continue replication */
+ ctx->feature_counter = current_member;
+ replication_recycle (vm, b0, ((current_member == 0) || /*is_last */
+ ((current_member == 1) &&
+ ((members[0].sw_if_index ==
+ *sw_if_index0) || (in_shg
+ && members[0].shg ==
+ in_shg)))));
+ }
+
+ /* Forward packet to the current member */
+ if (PREDICT_FALSE (members[current_member].flags & L2_FLOOD_MEMBER_BVI))
+ {
+ /* Do BVI processing */
+ u32 rc;
+ rc = l2_to_bvi (vm,
+ msm->vnet_main,
+ b0,
+ members[current_member].sw_if_index,
+ &msm->l3_next, next0);
+
+ if (PREDICT_FALSE (rc))
+ {
+ if (rc == TO_BVI_ERR_BAD_MAC)
+ {
+ b0->error = node->errors[L2FLOOD_ERROR_BVI_BAD_MAC];
+ *next0 = L2FLOOD_NEXT_DROP;
+ }
+ else if (rc == TO_BVI_ERR_ETHERTYPE)
+ {
+ b0->error = node->errors[L2FLOOD_ERROR_BVI_ETHERTYPE];
+ *next0 = L2FLOOD_NEXT_DROP;
+ }
+ }
+ }
+ else
+ {
+ /* Do normal L2 forwarding */
+ vnet_buffer (b0)->sw_if_index[VLIB_TX] =
+ members[current_member].sw_if_index;
+ *next0 = L2FLOOD_NEXT_L2_OUTPUT;
+
+ }
+
+}
+
+
+static uword
+l2flood_node_fn (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ u32 n_left_from, *from, *to_next;
+ l2flood_next_t next_index;
+ l2flood_main_t *msm = &l2flood_main;
+ vlib_node_t *n = vlib_get_node (vm, l2flood_node.index);
+ u32 node_counter_base_index = n->error_heap_index;
+ vlib_error_main_t *em = &vm->error_main;
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors; /* number of packets to process */
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ /* get space to enqueue frame to graph node "next_index" */
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from >= 6 && n_left_to_next >= 2)
+ {
+ u32 bi0, bi1;
+ vlib_buffer_t *b0, *b1;
+ u32 next0, next1;
+ u32 sw_if_index0, sw_if_index1;
+ l2fib_entry_key_t key0, key1;
+ l2fib_entry_result_t result0, result1;
+ u32 bucket0, bucket1;
+
+ /* Prefetch next iteration. */
+ {
+ vlib_buffer_t *p2, *p3, *p4, *p5;
+
+ p2 = vlib_get_buffer (vm, from[2]);
+ p3 = vlib_get_buffer (vm, from[3]);
+ p4 = vlib_get_buffer (vm, from[4]);
+ p5 = vlib_get_buffer (vm, from[5]);
+
+ /* Prefetch the buffer header for the N+2 loop iteration */
+ vlib_prefetch_buffer_header (p4, LOAD);
+ vlib_prefetch_buffer_header (p5, LOAD);
+
+ /* Prefetch the replication context for the N+1 loop iteration */
+ /* This depends on the buffer header above */
+ replication_prefetch_ctx (p2);
+ replication_prefetch_ctx (p3);
+
+ /* Prefetch the packet for the N+1 loop iteration */
+ CLIB_PREFETCH (p2->data, CLIB_CACHE_LINE_BYTES, STORE);
+ CLIB_PREFETCH (p3->data, CLIB_CACHE_LINE_BYTES, STORE);
+ }
+
+ /* speculatively enqueue b0 and b1 to the current next frame */
+ /* bi is "buffer index", b is pointer to the buffer */
+ to_next[0] = bi0 = from[0];
+ to_next[1] = bi1 = from[1];
+ from += 2;
+ to_next += 2;
+ n_left_from -= 2;
+ n_left_to_next -= 2;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ b1 = vlib_get_buffer (vm, bi1);
+
+ /* RX interface handles */
+ sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+ sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_RX];
+
+ /* process 2 pkts */
+ em->counters[node_counter_base_index + L2FLOOD_ERROR_L2FLOOD] += 2;
+
+ l2flood_process (vm, node, msm,
+ &em->counters[node_counter_base_index], b0,
+ &sw_if_index0, &key0, &bucket0, &result0, &next0);
+
+ l2flood_process (vm, node, msm,
+ &em->counters[node_counter_base_index], b1,
+ &sw_if_index1, &key1, &bucket1, &result1, &next1);
+
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)))
+ {
+ if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ l2flood_trace_t *t =
+ vlib_add_trace (vm, node, b0, sizeof (*t));
+ ethernet_header_t *h0 = vlib_buffer_get_current (b0);
+ t->sw_if_index = sw_if_index0;
+ t->bd_index = vnet_buffer (b0)->l2.bd_index;
+ clib_memcpy (t->src, h0->src_address, 6);
+ clib_memcpy (t->dst, h0->dst_address, 6);
+ }
+ if (PREDICT_FALSE (b1->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ l2flood_trace_t *t =
+ vlib_add_trace (vm, node, b1, sizeof (*t));
+ ethernet_header_t *h1 = vlib_buffer_get_current (b1);
+ t->sw_if_index = sw_if_index1;
+ t->bd_index = vnet_buffer (b1)->l2.bd_index;
+ clib_memcpy (t->src, h1->src_address, 6);
+ clib_memcpy (t->dst, h1->dst_address, 6);
+ }
+ }
+
+ /* verify speculative enqueues, maybe switch current next frame */
+ /* if next0==next1==next_index then nothing special needs to be done */
+ vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, bi1, next0, next1);
+ }
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 bi0;
+ vlib_buffer_t *b0;
+ u32 next0;
+ u32 sw_if_index0;
+ l2fib_entry_key_t key0;
+ l2fib_entry_result_t result0;
+ u32 bucket0;
+
+ /* speculatively enqueue b0 to the current next frame */
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+
+ sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+
+ /* process 1 pkt */
+ em->counters[node_counter_base_index + L2FLOOD_ERROR_L2FLOOD] += 1;
+
+ l2flood_process (vm, node, msm,
+ &em->counters[node_counter_base_index], b0,
+ &sw_if_index0, &key0, &bucket0, &result0, &next0);
+
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) &&
+ (b0->flags & VLIB_BUFFER_IS_TRACED)))
+ {
+ l2flood_trace_t *t = vlib_add_trace (vm, node, b0, sizeof (*t));
+ ethernet_header_t *h0 = vlib_buffer_get_current (b0);
+ t->sw_if_index = sw_if_index0;
+ t->bd_index = vnet_buffer (b0)->l2.bd_index;
+ clib_memcpy (t->src, h0->src_address, 6);
+ clib_memcpy (t->dst, h0->dst_address, 6);
+ }
+
+ /* verify speculative enqueue, maybe switch current next frame */
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, next0);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ return frame->n_vectors;
+}
+
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (l2flood_node,static) = {
+ .function = l2flood_node_fn,
+ .name = "l2-flood",
+ .vector_size = sizeof (u32),
+ .format_trace = format_l2flood_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_errors = ARRAY_LEN(l2flood_error_strings),
+ .error_strings = l2flood_error_strings,
+
+ .n_next_nodes = L2FLOOD_N_NEXT,
+
+ /* edit / add dispositions here */
+ .next_nodes = {
+ [L2FLOOD_NEXT_L2_OUTPUT] = "l2-output",
+ [L2FLOOD_NEXT_DROP] = "error-drop",
+ },
+};
+/* *INDENT-ON* */
+
+VLIB_NODE_FUNCTION_MULTIARCH (l2flood_node, l2flood_node_fn)
+ clib_error_t *l2flood_init (vlib_main_t * vm)
+{
+ l2flood_main_t *mp = &l2flood_main;
+
+ mp->vlib_main = vm;
+ mp->vnet_main = vnet_get_main ();
+
+ /* Initialize the feature next-node indexes */
+ feat_bitmap_init_next_nodes (vm,
+ l2flood_node.index,
+ L2INPUT_N_FEAT,
+ l2input_get_feat_names (),
+ mp->feat_next_node_index);
+
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (l2flood_init);
+
+
+
+/** Add the L3 input node for this ethertype to the next nodes structure. */
+void
+l2flood_register_input_type (vlib_main_t * vm,
+ ethernet_type_t type, u32 node_index)
+{
+ l2flood_main_t *mp = &l2flood_main;
+ u32 next_index;
+
+ next_index = vlib_node_add_next (vm, l2flood_node.index, node_index);
+
+ next_by_ethertype_register (&mp->l3_next, type, next_index);
+}
+
+
+/**
+ * Set subinterface flood enable/disable.
+ * The CLI format is:
+ * set interface l2 flood <interface> [disable]
+ */
+static clib_error_t *
+int_flood (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ vnet_main_t *vnm = vnet_get_main ();
+ clib_error_t *error = 0;
+ u32 sw_if_index;
+ u32 enable;
+
+ if (!unformat_user (input, unformat_vnet_sw_interface, vnm, &sw_if_index))
+ {
+ error = clib_error_return (0, "unknown interface `%U'",
+ format_unformat_error, input);
+ goto done;
+ }
+
+ enable = 1;
+ if (unformat (input, "disable"))
+ {
+ enable = 0;
+ }
+
+ /* set the interface flag */
+ l2input_intf_bitmap_enable (sw_if_index, L2INPUT_FEAT_FLOOD, enable);
+
+done:
+ return error;
+}
+
+/*?
+ * Layer 2 flooding can be enabled and disabled on each
+ * interface and on each bridge-domain. Use this command to
+ * manage interfaces. It is enabled by default.
+ *
+ * @cliexpar
+ * Example of how to enable flooding:
+ * @cliexcmd{set interface l2 flood GigabitEthernet0/8/0}
+ * Example of how to disable flooding:
+ * @cliexcmd{set interface l2 flood GigabitEthernet0/8/0 disable}
+?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (int_flood_cli, static) = {
+ .path = "set interface l2 flood",
+ .short_help = "set interface l2 flood <interface> [disable]",
+ .function = int_flood,
+};
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/l2/l2_flood.h b/src/vnet/l2/l2_flood.h
new file mode 100644
index 00000000000..acd7c905aaf
--- /dev/null
+++ b/src/vnet/l2/l2_flood.h
@@ -0,0 +1,35 @@
+/*
+ * l2_flood.h : layer 2 flooding
+ *
+ * Copyright (c) 2013 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef included_l2flood_h
+#define included_l2flood_h
+
+#include <vlib/vlib.h>
+#include <vnet/ethernet/ethernet.h>
+
+void
+l2flood_register_input_type (vlib_main_t * vm,
+ ethernet_type_t type, u32 node_index);
+#endif
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/l2/l2_fwd.c b/src/vnet/l2/l2_fwd.c
new file mode 100644
index 00000000000..710a9d9e8c3
--- /dev/null
+++ b/src/vnet/l2/l2_fwd.c
@@ -0,0 +1,544 @@
+/*
+ * l2_fwd.c : layer 2 forwarding using l2fib
+ *
+ * Copyright (c) 2013 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+#include <vnet/pg/pg.h>
+#include <vnet/ethernet/ethernet.h>
+#include <vlib/cli.h>
+
+#include <vnet/l2/l2_input.h>
+#include <vnet/l2/l2_bvi.h>
+#include <vnet/l2/l2_fwd.h>
+#include <vnet/l2/l2_fib.h>
+
+#include <vppinfra/error.h>
+#include <vppinfra/hash.h>
+#include <vppinfra/sparse_vec.h>
+
+
+/**
+ * @file
+ * @brief Ethernet Forwarding.
+ *
+ * Code in this file handles forwarding Layer 2 packets. This file calls
+ * the FIB lookup, packet learning and the packet flooding as necessary.
+ * Packet is then sent to the next graph node.
+ */
+
+typedef struct
+{
+
+ /* Hash table */
+ BVT (clib_bihash) * mac_table;
+
+ /* next node index for the L3 input node of each ethertype */
+ next_by_ethertype_t l3_next;
+
+ /* convenience variables */
+ vlib_main_t *vlib_main;
+ vnet_main_t *vnet_main;
+} l2fwd_main_t;
+
+typedef struct
+{
+ /* per-pkt trace data */
+ u8 src[6];
+ u8 dst[6];
+ u32 sw_if_index;
+ u16 bd_index;
+} l2fwd_trace_t;
+
+/* packet trace format function */
+static u8 *
+format_l2fwd_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ l2fwd_trace_t *t = va_arg (*args, l2fwd_trace_t *);
+
+ s = format (s, "l2-fwd: sw_if_index %d dst %U src %U bd_index %d",
+ t->sw_if_index,
+ format_ethernet_address, t->dst,
+ format_ethernet_address, t->src, t->bd_index);
+ return s;
+}
+
+l2fwd_main_t l2fwd_main;
+
+static vlib_node_registration_t l2fwd_node;
+
+#define foreach_l2fwd_error \
+_(L2FWD, "L2 forward packets") \
+_(FLOOD, "L2 forward misses") \
+_(HIT, "L2 forward hits") \
+_(BVI_BAD_MAC, "BVI L3 MAC mismatch") \
+_(BVI_ETHERTYPE, "BVI packet with unhandled ethertype") \
+_(FILTER_DROP, "Filter Mac Drop") \
+_(REFLECT_DROP, "Reflection Drop")
+
+typedef enum
+{
+#define _(sym,str) L2FWD_ERROR_##sym,
+ foreach_l2fwd_error
+#undef _
+ L2FWD_N_ERROR,
+} l2fwd_error_t;
+
+static char *l2fwd_error_strings[] = {
+#define _(sym,string) string,
+ foreach_l2fwd_error
+#undef _
+};
+
+typedef enum
+{
+ L2FWD_NEXT_L2_OUTPUT,
+ L2FWD_NEXT_FLOOD,
+ L2FWD_NEXT_DROP,
+ L2FWD_N_NEXT,
+} l2fwd_next_t;
+
+/** Forward one packet based on the mac table lookup result. */
+
+static_always_inline void
+l2fwd_process (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ l2fwd_main_t * msm,
+ vlib_error_main_t * em,
+ vlib_buffer_t * b0,
+ u32 sw_if_index0, l2fib_entry_result_t * result0, u32 * next0)
+{
+ if (PREDICT_FALSE (result0->raw == ~0))
+ {
+ /*
+ * lookup miss, so flood
+ * TODO:replicate packet to each intf in bridge-domain
+ * For now just drop
+ */
+ if (vnet_buffer (b0)->l2.feature_bitmap & L2INPUT_FEAT_UU_FLOOD)
+ {
+ *next0 = L2FWD_NEXT_FLOOD;
+ }
+ else
+ {
+ /* Flooding is disabled */
+ b0->error = node->errors[L2FWD_ERROR_FLOOD];
+ *next0 = L2FWD_NEXT_DROP;
+ }
+
+ }
+ else
+ {
+
+ /* lookup hit, forward packet */
+#ifdef COUNTERS
+ em->counters[node_counter_base_index + L2FWD_ERROR_HIT] += 1;
+#endif
+
+ vnet_buffer (b0)->sw_if_index[VLIB_TX] = result0->fields.sw_if_index;
+ *next0 = L2FWD_NEXT_L2_OUTPUT;
+
+ /* perform reflection check */
+ if (PREDICT_FALSE (sw_if_index0 == result0->fields.sw_if_index))
+ {
+ b0->error = node->errors[L2FWD_ERROR_REFLECT_DROP];
+ *next0 = L2FWD_NEXT_DROP;
+
+ /* perform filter check */
+ }
+ else if (PREDICT_FALSE (result0->fields.filter))
+ {
+ b0->error = node->errors[L2FWD_ERROR_FILTER_DROP];
+ *next0 = L2FWD_NEXT_DROP;
+
+ /* perform BVI check */
+ }
+ else if (PREDICT_FALSE (result0->fields.bvi))
+ {
+ u32 rc;
+ rc = l2_to_bvi (vm,
+ msm->vnet_main,
+ b0,
+ vnet_buffer (b0)->sw_if_index[VLIB_TX],
+ &msm->l3_next, next0);
+
+ if (PREDICT_FALSE (rc))
+ {
+ if (rc == TO_BVI_ERR_BAD_MAC)
+ {
+ b0->error = node->errors[L2FWD_ERROR_BVI_BAD_MAC];
+ *next0 = L2FWD_NEXT_DROP;
+ }
+ else if (rc == TO_BVI_ERR_ETHERTYPE)
+ {
+ b0->error = node->errors[L2FWD_ERROR_BVI_ETHERTYPE];
+ *next0 = L2FWD_NEXT_DROP;
+ }
+ }
+ }
+ }
+}
+
+
+static uword
+l2fwd_node_fn (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ u32 n_left_from, *from, *to_next;
+ l2fwd_next_t next_index;
+ l2fwd_main_t *msm = &l2fwd_main;
+ vlib_node_t *n = vlib_get_node (vm, l2fwd_node.index);
+ CLIB_UNUSED (u32 node_counter_base_index) = n->error_heap_index;
+ vlib_error_main_t *em = &vm->error_main;
+ l2fib_entry_key_t cached_key;
+ l2fib_entry_result_t cached_result;
+
+ /* Clear the one-entry cache in case mac table was updated */
+ cached_key.raw = ~0;
+ cached_result.raw = ~0;
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors; /* number of packets to process */
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ /* get space to enqueue frame to graph node "next_index" */
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from >= 8 && n_left_to_next >= 4)
+ {
+ u32 bi0, bi1, bi2, bi3;
+ vlib_buffer_t *b0, *b1, *b2, *b3;
+ u32 next0, next1, next2, next3;
+ u32 sw_if_index0, sw_if_index1, sw_if_index2, sw_if_index3;
+ ethernet_header_t *h0, *h1, *h2, *h3;
+ l2fib_entry_key_t key0, key1, key2, key3;
+ l2fib_entry_result_t result0, result1, result2, result3;
+ u32 bucket0, bucket1, bucket2, bucket3;
+
+ /* Prefetch next iteration. */
+ {
+ vlib_buffer_t *p4, *p5, *p6, *p7;
+
+ p4 = vlib_get_buffer (vm, from[4]);
+ p5 = vlib_get_buffer (vm, from[5]);
+ p6 = vlib_get_buffer (vm, from[6]);
+ p7 = vlib_get_buffer (vm, from[7]);
+
+ vlib_prefetch_buffer_header (p4, LOAD);
+ vlib_prefetch_buffer_header (p5, LOAD);
+ vlib_prefetch_buffer_header (p6, LOAD);
+ vlib_prefetch_buffer_header (p7, LOAD);
+
+ CLIB_PREFETCH (p4->data, CLIB_CACHE_LINE_BYTES, STORE);
+ CLIB_PREFETCH (p5->data, CLIB_CACHE_LINE_BYTES, STORE);
+ CLIB_PREFETCH (p6->data, CLIB_CACHE_LINE_BYTES, STORE);
+ CLIB_PREFETCH (p7->data, CLIB_CACHE_LINE_BYTES, STORE);
+ }
+
+ /* speculatively enqueue b0 and b1 to the current next frame */
+ /* bi is "buffer index", b is pointer to the buffer */
+ to_next[0] = bi0 = from[0];
+ to_next[1] = bi1 = from[1];
+ to_next[2] = bi2 = from[2];
+ to_next[3] = bi3 = from[3];
+ from += 4;
+ to_next += 4;
+ n_left_from -= 4;
+ n_left_to_next -= 4;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ b1 = vlib_get_buffer (vm, bi1);
+ b2 = vlib_get_buffer (vm, bi2);
+ b3 = vlib_get_buffer (vm, bi3);
+
+ /* RX interface handles */
+ sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+ sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_RX];
+ sw_if_index2 = vnet_buffer (b2)->sw_if_index[VLIB_RX];
+ sw_if_index3 = vnet_buffer (b3)->sw_if_index[VLIB_RX];
+
+ h0 = vlib_buffer_get_current (b0);
+ h1 = vlib_buffer_get_current (b1);
+ h2 = vlib_buffer_get_current (b2);
+ h3 = vlib_buffer_get_current (b3);
+
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)))
+ {
+ if (b0->flags & VLIB_BUFFER_IS_TRACED)
+ {
+ l2fwd_trace_t *t =
+ vlib_add_trace (vm, node, b0, sizeof (*t));
+ t->sw_if_index = sw_if_index0;
+ t->bd_index = vnet_buffer (b0)->l2.bd_index;
+ clib_memcpy (t->src, h0->src_address, 6);
+ clib_memcpy (t->dst, h0->dst_address, 6);
+ }
+ if (b1->flags & VLIB_BUFFER_IS_TRACED)
+ {
+ l2fwd_trace_t *t =
+ vlib_add_trace (vm, node, b1, sizeof (*t));
+ t->sw_if_index = sw_if_index1;
+ t->bd_index = vnet_buffer (b1)->l2.bd_index;
+ clib_memcpy (t->src, h1->src_address, 6);
+ clib_memcpy (t->dst, h1->dst_address, 6);
+ }
+ if (b2->flags & VLIB_BUFFER_IS_TRACED)
+ {
+ l2fwd_trace_t *t =
+ vlib_add_trace (vm, node, b2, sizeof (*t));
+ t->sw_if_index = sw_if_index2;
+ t->bd_index = vnet_buffer (b2)->l2.bd_index;
+ clib_memcpy (t->src, h2->src_address, 6);
+ clib_memcpy (t->dst, h2->dst_address, 6);
+ }
+ if (b3->flags & VLIB_BUFFER_IS_TRACED)
+ {
+ l2fwd_trace_t *t =
+ vlib_add_trace (vm, node, b3, sizeof (*t));
+ t->sw_if_index = sw_if_index3;
+ t->bd_index = vnet_buffer (b3)->l2.bd_index;
+ clib_memcpy (t->src, h3->src_address, 6);
+ clib_memcpy (t->dst, h3->dst_address, 6);
+ }
+ }
+
+ /* process 2 pkts */
+#ifdef COUNTERS
+ em->counters[node_counter_base_index + L2FWD_ERROR_L2FWD] += 4;
+#endif
+ /* *INDENT-OFF* */
+ l2fib_lookup_4 (msm->mac_table, &cached_key, &cached_result,
+ h0->dst_address, h1->dst_address,
+ h2->dst_address, h3->dst_address,
+ vnet_buffer (b0)->l2.bd_index,
+ vnet_buffer (b1)->l2.bd_index,
+ vnet_buffer (b2)->l2.bd_index,
+ vnet_buffer (b3)->l2.bd_index,
+ &key0, /* not used */
+ &key1, /* not used */
+ &key2, /* not used */
+ &key3, /* not used */
+ &bucket0, /* not used */
+ &bucket1, /* not used */
+ &bucket2, /* not used */
+ &bucket3, /* not used */
+ &result0,
+ &result1,
+ &result2,
+ &result3);
+ /* *INDENT-ON* */
+ l2fwd_process (vm, node, msm, em, b0, sw_if_index0, &result0,
+ &next0);
+ l2fwd_process (vm, node, msm, em, b1, sw_if_index1, &result1,
+ &next1);
+ l2fwd_process (vm, node, msm, em, b2, sw_if_index2, &result2,
+ &next2);
+ l2fwd_process (vm, node, msm, em, b3, sw_if_index3, &result3,
+ &next3);
+
+ /* verify speculative enqueues, maybe switch current next frame */
+ /* if next0==next1==next_index then nothing special needs to be done */
+ vlib_validate_buffer_enqueue_x4 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, bi1, bi2, bi3,
+ next0, next1, next2, next3);
+ }
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 bi0;
+ vlib_buffer_t *b0;
+ u32 next0;
+ u32 sw_if_index0;
+ ethernet_header_t *h0;
+ l2fib_entry_key_t key0;
+ l2fib_entry_result_t result0;
+ u32 bucket0;
+
+ /* speculatively enqueue b0 to the current next frame */
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+
+ sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+
+ h0 = vlib_buffer_get_current (b0);
+
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)
+ && (b0->flags & VLIB_BUFFER_IS_TRACED)))
+ {
+ l2fwd_trace_t *t = vlib_add_trace (vm, node, b0, sizeof (*t));
+ t->sw_if_index = sw_if_index0;
+ t->bd_index = vnet_buffer (b0)->l2.bd_index;
+ clib_memcpy (t->src, h0->src_address, 6);
+ clib_memcpy (t->dst, h0->dst_address, 6);
+ }
+
+ /* process 1 pkt */
+#ifdef COUNTERS
+ em->counters[node_counter_base_index + L2FWD_ERROR_L2FWD] += 1;
+#endif
+ l2fib_lookup_1 (msm->mac_table, &cached_key, &cached_result, h0->dst_address, vnet_buffer (b0)->l2.bd_index, &key0, /* not used */
+ &bucket0, /* not used */
+ &result0);
+ l2fwd_process (vm, node, msm, em, b0, sw_if_index0, &result0,
+ &next0);
+
+ /* verify speculative enqueue, maybe switch current next frame */
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, next0);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ return frame->n_vectors;
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (l2fwd_node,static) = {
+ .function = l2fwd_node_fn,
+ .name = "l2-fwd",
+ .vector_size = sizeof (u32),
+ .format_trace = format_l2fwd_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_errors = ARRAY_LEN(l2fwd_error_strings),
+ .error_strings = l2fwd_error_strings,
+
+ .n_next_nodes = L2FWD_N_NEXT,
+
+ /* edit / add dispositions here */
+ .next_nodes = {
+ [L2FWD_NEXT_L2_OUTPUT] = "l2-output",
+ [L2FWD_NEXT_FLOOD] = "l2-flood",
+ [L2FWD_NEXT_DROP] = "error-drop",
+ },
+};
+/* *INDENT-ON* */
+
+VLIB_NODE_FUNCTION_MULTIARCH (l2fwd_node, l2fwd_node_fn)
+ clib_error_t *l2fwd_init (vlib_main_t * vm)
+{
+ l2fwd_main_t *mp = &l2fwd_main;
+
+ mp->vlib_main = vm;
+ mp->vnet_main = vnet_get_main ();
+
+ /* init the hash table ptr */
+ mp->mac_table = get_mac_table ();
+
+ /* Initialize the next nodes for each ethertype */
+ next_by_ethertype_init (&mp->l3_next);
+
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (l2fwd_init);
+
+
+/** Add the L3 input node for this ethertype to the next nodes structure. */
+void
+l2fwd_register_input_type (vlib_main_t * vm,
+ ethernet_type_t type, u32 node_index)
+{
+ l2fwd_main_t *mp = &l2fwd_main;
+ u32 next_index;
+
+ next_index = vlib_node_add_next (vm, l2fwd_node.index, node_index);
+
+ next_by_ethertype_register (&mp->l3_next, type, next_index);
+}
+
+
+/**
+ * Set subinterface forward enable/disable.
+ * The CLI format is:
+ * set interface l2 forward <interface> [disable]
+ */
+static clib_error_t *
+int_fwd (vlib_main_t * vm, unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ vnet_main_t *vnm = vnet_get_main ();
+ clib_error_t *error = 0;
+ u32 sw_if_index;
+ u32 enable;
+
+ if (!unformat_user (input, unformat_vnet_sw_interface, vnm, &sw_if_index))
+ {
+ error = clib_error_return (0, "unknown interface `%U'",
+ format_unformat_error, input);
+ goto done;
+ }
+
+ enable = 1;
+ if (unformat (input, "disable"))
+ {
+ enable = 0;
+ }
+
+ /* set the interface flag */
+ if (l2input_intf_config (sw_if_index)->xconnect)
+ {
+ l2input_intf_bitmap_enable (sw_if_index, L2INPUT_FEAT_XCONNECT, enable);
+ }
+ else
+ {
+ l2input_intf_bitmap_enable (sw_if_index, L2INPUT_FEAT_FWD, enable);
+ }
+
+done:
+ return error;
+}
+
+/*?
+ * Layer 2 unicast forwarding can be enabled and disabled on each
+ * interface and on each bridge-domain. Use this command to
+ * manage interfaces. It is enabled by default.
+ *
+ * @cliexpar
+ * Example of how to enable fowarding:
+ * @cliexcmd{set interface l2 forward GigabitEthernet0/8/0}
+ * Example of how to disable fowarding:
+ * @cliexcmd{set interface l2 forward GigabitEthernet0/8/0 disable}
+?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (int_fwd_cli, static) = {
+ .path = "set interface l2 forward",
+ .short_help = "set interface l2 forward <interface> [disable]",
+ .function = int_fwd,
+};
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/l2/l2_fwd.h b/src/vnet/l2/l2_fwd.h
new file mode 100644
index 00000000000..3968732dbc2
--- /dev/null
+++ b/src/vnet/l2/l2_fwd.h
@@ -0,0 +1,36 @@
+/*
+ * l2_fwd.c : layer 2 forwarding using l2fib
+ *
+ * Copyright (c) 2013 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef included_l2fwd_h
+#define included_l2fwd_h
+
+#include <vlib/vlib.h>
+#include <vnet/ethernet/ethernet.h>
+
+
+void
+l2fwd_register_input_type (vlib_main_t * vm,
+ ethernet_type_t type, u32 node_index);
+#endif
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/l2/l2_input.c b/src/vnet/l2/l2_input.c
new file mode 100644
index 00000000000..a104ec9eebb
--- /dev/null
+++ b/src/vnet/l2/l2_input.c
@@ -0,0 +1,1116 @@
+/*
+ * l2_input.c : layer 2 input packet processing
+ *
+ * Copyright (c) 2013 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+#include <vnet/pg/pg.h>
+#include <vnet/ethernet/ethernet.h>
+#include <vnet/ethernet/packet.h>
+#include <vnet/ip/ip_packet.h>
+#include <vnet/ip/ip4_packet.h>
+#include <vnet/ip/ip6_packet.h>
+#include <vlib/cli.h>
+#include <vnet/l2/l2_input.h>
+#include <vnet/l2/l2_output.h>
+#include <vnet/l2/feat_bitmap.h>
+#include <vnet/l2/l2_bvi.h>
+#include <vnet/l2/l2_fib.h>
+
+#include <vppinfra/error.h>
+#include <vppinfra/hash.h>
+#include <vppinfra/cache.h>
+
+/**
+ * @file
+ * @brief Interface Input Mode (Layer 2 Cross-Connect or Bridge / Layer 3).
+ *
+ * This file contains the CLI Commands that modify the input mode of an
+ * interface. For interfaces in a Layer 2 cross-connect, all packets
+ * received on one interface will be transmitted to the other. For
+ * interfaces in a bridge-domain, packets will be forwarded to other
+ * interfaces in the same bridge-domain based on destination mac address.
+ * For interfaces in Layer 3 mode, the packets will be routed.
+ */
+
+/* Feature graph node names */
+static char *l2input_feat_names[] = {
+#define _(sym,name) name,
+ foreach_l2input_feat
+#undef _
+};
+
+char **
+l2input_get_feat_names (void)
+{
+ return l2input_feat_names;
+}
+
+
+typedef struct
+{
+ /* per-pkt trace data */
+ u8 src[6];
+ u8 dst[6];
+ u32 next_index;
+ u32 sw_if_index;
+} l2input_trace_t;
+
+/* packet trace format function */
+static u8 *
+format_l2input_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ l2input_trace_t *t = va_arg (*args, l2input_trace_t *);
+
+ s = format (s, "l2-input: sw_if_index %d dst %U src %U",
+ t->sw_if_index,
+ format_ethernet_address, t->dst,
+ format_ethernet_address, t->src);
+ return s;
+}
+
+l2input_main_t l2input_main;
+
+#define foreach_l2input_error \
+_(L2INPUT, "L2 input packets") \
+_(DROP, "L2 input drops")
+
+typedef enum
+{
+#define _(sym,str) L2INPUT_ERROR_##sym,
+ foreach_l2input_error
+#undef _
+ L2INPUT_N_ERROR,
+} l2input_error_t;
+
+static char *l2input_error_strings[] = {
+#define _(sym,string) string,
+ foreach_l2input_error
+#undef _
+};
+
+typedef enum
+{ /* */
+ L2INPUT_NEXT_LEARN,
+ L2INPUT_NEXT_FWD,
+ L2INPUT_NEXT_DROP,
+ L2INPUT_N_NEXT,
+} l2input_next_t;
+
+
+static_always_inline void
+classify_and_dispatch (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ u32 cpu_index,
+ l2input_main_t * msm, vlib_buffer_t * b0, u32 * next0)
+{
+ /*
+ * Load L2 input feature struct
+ * Load bridge domain struct
+ * Parse ethernet header to determine unicast/mcast/broadcast
+ * take L2 input stat
+ * classify packet as IP/UDP/TCP, control, other
+ * mask feature bitmap
+ * go to first node in bitmap
+ * Later: optimize VTM
+ *
+ * For L2XC,
+ * set tx sw-if-handle
+ */
+
+ u16 ethertype;
+ u8 protocol;
+ l2_input_config_t *config;
+ l2_bridge_domain_t *bd_config;
+ u16 bd_index0;
+ u32 feature_bitmap;
+ u32 feat_mask;
+ ethernet_header_t *h0;
+ u8 *l3h0;
+ u32 sw_if_index0;
+
+#define get_u16(addr) ( *((u16 *)(addr)) )
+
+ sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+
+ h0 = vlib_buffer_get_current (b0);
+ l3h0 = (u8 *) h0 + vnet_buffer (b0)->l2.l2_len;
+
+ ethertype = clib_net_to_host_u16 (get_u16 (l3h0 - 2));
+ feat_mask = ~0;
+
+ /* Get config for the input interface */
+ config = vec_elt_at_index (msm->configs, sw_if_index0);
+
+ /* Save split horizon group */
+ vnet_buffer (b0)->l2.shg = config->shg;
+
+ /* determine layer2 kind for stat and mask */
+ if (PREDICT_FALSE (ethernet_address_cast (h0->dst_address)))
+ {
+ protocol = ((ip6_header_t *) l3h0)->protocol;
+
+ /* Disable bridge forwarding (flooding will execute instead if not xconnect) */
+ feat_mask &= ~(L2INPUT_FEAT_FWD | L2INPUT_FEAT_UU_FLOOD);
+
+ /* Disable ARP-term for non-ARP and non-ICMP6 packet */
+ if (ethertype != ETHERNET_TYPE_ARP &&
+ (ethertype != ETHERNET_TYPE_IP6 || protocol != IP_PROTOCOL_ICMP6))
+ feat_mask &= ~(L2INPUT_FEAT_ARP_TERM);
+ }
+ else
+ {
+ /*
+ * Check for from-BVI processing - set SHG of unicast packets from BVI
+ * to 0 so it is not dropped for VXLAN tunnels or other ports with the
+ * same SHG as that of the BVI.
+ */
+ if (PREDICT_FALSE (vnet_buffer (b0)->sw_if_index[VLIB_TX] ==
+ L2INPUT_BVI))
+ vnet_buffer (b0)->l2.shg = 0;
+ }
+
+
+ if (config->xconnect)
+ {
+ /* Set the output interface */
+ vnet_buffer (b0)->sw_if_index[VLIB_TX] = config->output_sw_if_index;
+ }
+ else
+ {
+ /* Do bridge-domain processing */
+ bd_index0 = config->bd_index;
+ /* save BD ID for next feature graph nodes */
+ vnet_buffer (b0)->l2.bd_index = bd_index0;
+
+ /* Get config for the bridge domain interface */
+ bd_config = vec_elt_at_index (msm->bd_configs, bd_index0);
+
+ /*
+ * Process bridge domain feature enables.
+ * To perform learning/flooding/forwarding, the corresponding bit
+ * must be enabled in both the input interface config and in the
+ * bridge domain config. In the bd_bitmap, bits for features other
+ * than learning/flooding/forwarding should always be set.
+ */
+ feat_mask = feat_mask & bd_config->feature_bitmap;
+ }
+
+ /* mask out features from bitmap using packet type and bd config */
+ feature_bitmap = config->feature_bitmap & feat_mask;
+
+ /* save for next feature graph nodes */
+ vnet_buffer (b0)->l2.feature_bitmap = feature_bitmap;
+
+ /* Determine the next node */
+ *next0 = feat_bitmap_get_next_node_index (msm->feat_next_node_index,
+ feature_bitmap);
+}
+
+
+static uword
+l2input_node_fn (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ u32 n_left_from, *from, *to_next;
+ l2input_next_t next_index;
+ l2input_main_t *msm = &l2input_main;
+ u32 cpu_index = os_get_cpu_number ();
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors; /* number of packets to process */
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ /* get space to enqueue frame to graph node "next_index" */
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from >= 8 && n_left_to_next >= 4)
+ {
+ u32 bi0, bi1, bi2, bi3;
+ vlib_buffer_t *b0, *b1, *b2, *b3;
+ u32 next0, next1, next2, next3;
+ u32 sw_if_index0, sw_if_index1, sw_if_index2, sw_if_index3;
+
+ /* Prefetch next iteration. */
+ {
+ vlib_buffer_t *p4, *p5, *p6, *p7;
+
+ p4 = vlib_get_buffer (vm, from[4]);
+ p5 = vlib_get_buffer (vm, from[5]);
+ p6 = vlib_get_buffer (vm, from[6]);
+ p7 = vlib_get_buffer (vm, from[7]);
+
+ /* Prefetch the buffer header and packet for the N+2 loop iteration */
+ vlib_prefetch_buffer_header (p4, LOAD);
+ vlib_prefetch_buffer_header (p5, LOAD);
+ vlib_prefetch_buffer_header (p6, LOAD);
+ vlib_prefetch_buffer_header (p7, LOAD);
+
+ CLIB_PREFETCH (p4->data, CLIB_CACHE_LINE_BYTES, STORE);
+ CLIB_PREFETCH (p5->data, CLIB_CACHE_LINE_BYTES, STORE);
+ CLIB_PREFETCH (p6->data, CLIB_CACHE_LINE_BYTES, STORE);
+ CLIB_PREFETCH (p7->data, CLIB_CACHE_LINE_BYTES, STORE);
+
+ /*
+ * Don't bother prefetching the bridge-domain config (which
+ * depends on the input config above). Only a small number of
+ * bridge domains are expected. Plus the structure is small
+ * and several fit in a cache line.
+ */
+ }
+
+ /* speculatively enqueue b0 and b1 to the current next frame */
+ /* bi is "buffer index", b is pointer to the buffer */
+ to_next[0] = bi0 = from[0];
+ to_next[1] = bi1 = from[1];
+ to_next[2] = bi2 = from[2];
+ to_next[3] = bi3 = from[3];
+ from += 4;
+ to_next += 4;
+ n_left_from -= 4;
+ n_left_to_next -= 4;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ b1 = vlib_get_buffer (vm, bi1);
+ b2 = vlib_get_buffer (vm, bi2);
+ b3 = vlib_get_buffer (vm, bi3);
+
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)))
+ {
+ /* RX interface handles */
+ sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+ sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_RX];
+ sw_if_index2 = vnet_buffer (b2)->sw_if_index[VLIB_RX];
+ sw_if_index3 = vnet_buffer (b3)->sw_if_index[VLIB_RX];
+
+ if (b0->flags & VLIB_BUFFER_IS_TRACED)
+ {
+ ethernet_header_t *h0 = vlib_buffer_get_current (b0);
+ l2input_trace_t *t =
+ vlib_add_trace (vm, node, b0, sizeof (*t));
+ t->sw_if_index = sw_if_index0;
+ clib_memcpy (t->src, h0->src_address, 6);
+ clib_memcpy (t->dst, h0->dst_address, 6);
+ }
+ if (b1->flags & VLIB_BUFFER_IS_TRACED)
+ {
+ ethernet_header_t *h1 = vlib_buffer_get_current (b1);
+ l2input_trace_t *t =
+ vlib_add_trace (vm, node, b1, sizeof (*t));
+ t->sw_if_index = sw_if_index1;
+ clib_memcpy (t->src, h1->src_address, 6);
+ clib_memcpy (t->dst, h1->dst_address, 6);
+ }
+ if (b2->flags & VLIB_BUFFER_IS_TRACED)
+ {
+ ethernet_header_t *h2 = vlib_buffer_get_current (b2);
+ l2input_trace_t *t =
+ vlib_add_trace (vm, node, b2, sizeof (*t));
+ t->sw_if_index = sw_if_index2;
+ clib_memcpy (t->src, h2->src_address, 6);
+ clib_memcpy (t->dst, h2->dst_address, 6);
+ }
+ if (b3->flags & VLIB_BUFFER_IS_TRACED)
+ {
+ ethernet_header_t *h3 = vlib_buffer_get_current (b3);
+ l2input_trace_t *t =
+ vlib_add_trace (vm, node, b3, sizeof (*t));
+ t->sw_if_index = sw_if_index3;
+ clib_memcpy (t->src, h3->src_address, 6);
+ clib_memcpy (t->dst, h3->dst_address, 6);
+ }
+ }
+
+ vlib_node_increment_counter (vm, l2input_node.index,
+ L2INPUT_ERROR_L2INPUT, 4);
+
+ classify_and_dispatch (vm, node, cpu_index, msm, b0, &next0);
+ classify_and_dispatch (vm, node, cpu_index, msm, b1, &next1);
+ classify_and_dispatch (vm, node, cpu_index, msm, b2, &next2);
+ classify_and_dispatch (vm, node, cpu_index, msm, b3, &next3);
+
+ /* verify speculative enqueues, maybe switch current next frame */
+ /* if next0==next1==next_index then nothing special needs to be done */
+ vlib_validate_buffer_enqueue_x4 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, bi1, bi2, bi3,
+ next0, next1, next2, next3);
+ }
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 bi0;
+ vlib_buffer_t *b0;
+ u32 next0;
+ u32 sw_if_index0;
+
+ /* speculatively enqueue b0 to the current next frame */
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)
+ && (b0->flags & VLIB_BUFFER_IS_TRACED)))
+ {
+ ethernet_header_t *h0 = vlib_buffer_get_current (b0);
+ l2input_trace_t *t = vlib_add_trace (vm, node, b0, sizeof (*t));
+ sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+ t->sw_if_index = sw_if_index0;
+ clib_memcpy (t->src, h0->src_address, 6);
+ clib_memcpy (t->dst, h0->dst_address, 6);
+ }
+
+ vlib_node_increment_counter (vm, l2input_node.index,
+ L2INPUT_ERROR_L2INPUT, 1);
+
+ classify_and_dispatch (vm, node, cpu_index, msm, b0, &next0);
+
+ /* verify speculative enqueue, maybe switch current next frame */
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, next0);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ return frame->n_vectors;
+}
+
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (l2input_node) = {
+ .function = l2input_node_fn,
+ .name = "l2-input",
+ .vector_size = sizeof (u32),
+ .format_trace = format_l2input_trace,
+ .format_buffer = format_ethernet_header_with_length,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_errors = ARRAY_LEN(l2input_error_strings),
+ .error_strings = l2input_error_strings,
+
+ .n_next_nodes = L2INPUT_N_NEXT,
+
+ /* edit / add dispositions here */
+ .next_nodes = {
+ [L2INPUT_NEXT_LEARN] = "l2-learn",
+ [L2INPUT_NEXT_FWD] = "l2-fwd",
+ [L2INPUT_NEXT_DROP] = "error-drop",
+ },
+};
+/* *INDENT-ON* */
+
+VLIB_NODE_FUNCTION_MULTIARCH (l2input_node, l2input_node_fn)
+ clib_error_t *l2input_init (vlib_main_t * vm)
+{
+ l2input_main_t *mp = &l2input_main;
+
+ mp->vlib_main = vm;
+ mp->vnet_main = vnet_get_main ();
+
+ /* Get packets RX'd from L2 interfaces */
+ ethernet_register_l2_input (vm, l2input_node.index);
+
+ /* Create the config vector */
+ vec_validate (mp->configs, 100);
+ /* create 100 sw interface entries and zero them */
+
+ /* Initialize the feature next-node indexes */
+ feat_bitmap_init_next_nodes (vm,
+ l2input_node.index,
+ L2INPUT_N_FEAT,
+ l2input_get_feat_names (),
+ mp->feat_next_node_index);
+
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (l2input_init);
+
+
+/** Get a pointer to the config for the given interface. */
+l2_input_config_t *
+l2input_intf_config (u32 sw_if_index)
+{
+ l2input_main_t *mp = &l2input_main;
+
+ vec_validate (mp->configs, sw_if_index);
+ return vec_elt_at_index (mp->configs, sw_if_index);
+}
+
+/** Enable (or disable) the feature in the bitmap for the given interface. */
+u32
+l2input_intf_bitmap_enable (u32 sw_if_index, u32 feature_bitmap, u32 enable)
+{
+ l2input_main_t *mp = &l2input_main;
+ l2_input_config_t *config;
+
+ vec_validate (mp->configs, sw_if_index);
+ config = vec_elt_at_index (mp->configs, sw_if_index);
+
+ if (enable)
+ {
+ config->feature_bitmap |= feature_bitmap;
+ }
+ else
+ {
+ config->feature_bitmap &= ~feature_bitmap;
+ }
+
+ return config->feature_bitmap;
+}
+
+u32
+l2input_set_bridge_features (u32 bd_index, u32 feat_mask, u32 feat_value)
+{
+ l2_bridge_domain_t *bd_config;
+ vec_validate (l2input_main.bd_configs, bd_index);
+ bd_config = vec_elt_at_index (l2input_main.bd_configs, bd_index);
+ bd_validate (bd_config);
+ bd_config->feature_bitmap =
+ (bd_config->feature_bitmap & ~feat_mask) | feat_value;
+ return bd_config->feature_bitmap;
+}
+
+/**
+ * Set the subinterface to run in l2 or l3 mode.
+ * For L3 mode, just the sw_if_index is specified.
+ * For bridged mode, the bd id and bvi flag are also specified.
+ * For xconnect mode, the peer sw_if_index is also specified.
+ * Return 0 if ok, or non-0 if there was an error.
+ */
+
+u32
+set_int_l2_mode (vlib_main_t * vm, vnet_main_t * vnet_main, u32 mode, u32 sw_if_index, u32 bd_index, /* for bridged interface */
+ u32 bvi, /* the bridged interface is the BVI */
+ u32 shg, /* the bridged interface's split horizon group */
+ u32 xc_sw_if_index) /* peer interface for xconnect */
+{
+ l2input_main_t *mp = &l2input_main;
+ l2output_main_t *l2om = &l2output_main;
+ vnet_main_t *vnm = vnet_get_main ();
+ vnet_hw_interface_t *hi;
+ l2_output_config_t *out_config;
+ l2_input_config_t *config;
+ l2_bridge_domain_t *bd_config;
+ l2_flood_member_t member;
+ u64 mac;
+ i32 l2_if_adjust = 0;
+ u32 slot;
+
+ hi = vnet_get_sup_hw_interface (vnet_main, sw_if_index);
+ config = l2input_intf_config (sw_if_index);
+
+ if (config->bridge)
+ {
+ /* Interface is already in bridge mode. Undo the existing config. */
+ bd_config = vec_elt_at_index (mp->bd_configs, config->bd_index);
+
+ /* remove interface from flood vector */
+ bd_remove_member (bd_config, sw_if_index);
+
+ /* undo any BVI-related config */
+ if (bd_config->bvi_sw_if_index == sw_if_index)
+ {
+ bd_config->bvi_sw_if_index = ~0;
+ config->bvi = 0;
+
+ /* delete the l2fib entry for the bvi interface */
+ mac = *((u64 *) hi->hw_address);
+ l2fib_del_entry (mac, config->bd_index);
+
+ /* Make loop output node send packet back to ethernet-input node */
+ slot =
+ vlib_node_add_named_next_with_slot (vm, hi->tx_node_index,
+ "ethernet-input",
+ VNET_SIMULATED_ETHERNET_TX_NEXT_ETHERNET_INPUT);
+ ASSERT (slot == VNET_SIMULATED_ETHERNET_TX_NEXT_ETHERNET_INPUT);
+ }
+ l2_if_adjust--;
+ }
+ else if (config->xconnect)
+ {
+ l2_if_adjust--;
+ }
+
+ /*
+ * Directs the l2 output path to work out the interface
+ * output next-arc itself. Needed when recycling a sw_if_index.
+ */
+ vec_validate_init_empty (l2om->next_nodes.output_node_index_vec,
+ sw_if_index, ~0);
+ l2om->next_nodes.output_node_index_vec[sw_if_index] = ~0;
+
+ /* Initialize the l2-input configuration for the interface */
+ if (mode == MODE_L3)
+ {
+ /* Set L2 config to BD index 0 so that if any packet accidentally
+ * came in on L2 path, it will be dropped in BD 0 */
+ config->xconnect = 0;
+ config->bridge = 0;
+ config->shg = 0;
+ config->bd_index = 0;
+ config->feature_bitmap = L2INPUT_FEAT_DROP;
+
+ /* Make sure any L2-output packet to this interface now in L3 mode is
+ * dropped. This may happen if L2 FIB MAC entry is stale */
+ l2om->next_nodes.output_node_index_vec[sw_if_index] =
+ L2OUTPUT_NEXT_BAD_INTF;
+ }
+ else if (mode == MODE_L2_CLASSIFY)
+ {
+ config->xconnect = 1;
+ config->bridge = 0;
+ config->output_sw_if_index = xc_sw_if_index;
+
+ /* Make sure last-chance drop is configured */
+ config->feature_bitmap |=
+ L2INPUT_FEAT_DROP | L2INPUT_FEAT_INPUT_CLASSIFY;
+
+ /* Make sure bridging features are disabled */
+ config->feature_bitmap &=
+ ~(L2INPUT_FEAT_LEARN | L2INPUT_FEAT_FWD | L2INPUT_FEAT_FLOOD);
+ shg = 0; /* not used in xconnect */
+
+ /* Insure all packets go to ethernet-input */
+ ethernet_set_rx_redirect (vnet_main, hi, 1);
+ }
+ else
+ {
+
+ if (mode == MODE_L2_BRIDGE)
+ {
+ /*
+ * Remove a check that the interface must be an Ethernet.
+ * Specifically so we can bridge to L3 tunnel interfaces.
+ * Here's the check:
+ * if (hi->hw_class_index != ethernet_hw_interface_class.index)
+ *
+ */
+ if (!hi)
+ return MODE_ERROR_ETH; /* non-ethernet */
+
+ config->xconnect = 0;
+ config->bridge = 1;
+ config->bd_index = bd_index;
+
+ /*
+ * Enable forwarding, flooding, learning and ARP termination by default
+ * (note that ARP term is disabled on BD feature bitmap by default)
+ */
+ config->feature_bitmap |= L2INPUT_FEAT_FWD | L2INPUT_FEAT_UU_FLOOD |
+ L2INPUT_FEAT_FLOOD | L2INPUT_FEAT_LEARN | L2INPUT_FEAT_ARP_TERM;
+
+ /* Make sure last-chance drop is configured */
+ config->feature_bitmap |= L2INPUT_FEAT_DROP;
+
+ /* Make sure xconnect is disabled */
+ config->feature_bitmap &= ~L2INPUT_FEAT_XCONNECT;
+
+ /* Set up bridge domain */
+ vec_validate (mp->bd_configs, bd_index);
+ bd_config = vec_elt_at_index (mp->bd_configs, bd_index);
+ bd_validate (bd_config);
+
+ /* TODO: think: add l2fib entry even for non-bvi interface? */
+
+ /* Do BVI interface initializations */
+ if (bvi)
+ {
+ /* ensure BD has no bvi interface (or replace that one with this??) */
+ if (bd_config->bvi_sw_if_index != ~0)
+ {
+ return MODE_ERROR_BVI_DEF; /* bd already has a bvi interface */
+ }
+ bd_config->bvi_sw_if_index = sw_if_index;
+ config->bvi = 1;
+
+ /* create the l2fib entry for the bvi interface */
+ mac = *((u64 *) hi->hw_address);
+ l2fib_add_entry (mac, bd_index, sw_if_index, 1, 0, 1); /* static + bvi */
+
+ /* Disable learning by default. no use since l2fib entry is static. */
+ config->feature_bitmap &= ~L2INPUT_FEAT_LEARN;
+
+ /* Make loop output node send packet to l2-input node */
+ slot =
+ vlib_node_add_named_next_with_slot (vm, hi->tx_node_index,
+ "l2-input",
+ VNET_SIMULATED_ETHERNET_TX_NEXT_ETHERNET_INPUT);
+ ASSERT (slot == VNET_SIMULATED_ETHERNET_TX_NEXT_ETHERNET_INPUT);
+ }
+
+ /* Add interface to bridge-domain flood vector */
+ member.sw_if_index = sw_if_index;
+ member.flags = bvi ? L2_FLOOD_MEMBER_BVI : L2_FLOOD_MEMBER_NORMAL;
+ member.shg = shg;
+ bd_add_member (bd_config, &member);
+
+ }
+ else
+ {
+ config->xconnect = 1;
+ config->bridge = 0;
+ config->output_sw_if_index = xc_sw_if_index;
+
+ /* Make sure last-chance drop is configured */
+ config->feature_bitmap |= L2INPUT_FEAT_DROP;
+
+ /* Make sure bridging features are disabled */
+ config->feature_bitmap &=
+ ~(L2INPUT_FEAT_LEARN | L2INPUT_FEAT_FWD | L2INPUT_FEAT_FLOOD);
+
+ config->feature_bitmap |= L2INPUT_FEAT_XCONNECT;
+ shg = 0; /* not used in xconnect */
+ }
+
+ /* set up split-horizon group */
+ config->shg = shg;
+ out_config = l2output_intf_config (sw_if_index);
+ out_config->shg = shg;
+
+ /*
+ * Test: remove this when non-IP features can be configured.
+ * Enable a non-IP feature to test IP feature masking
+ * config->feature_bitmap |= L2INPUT_FEAT_CTRL_PKT;
+ */
+
+ l2_if_adjust++;
+ }
+
+ /* Adjust count of L2 interfaces */
+ hi->l2_if_count += l2_if_adjust;
+
+ if (hi->hw_class_index == ethernet_hw_interface_class.index)
+ {
+ if ((hi->l2_if_count == 1) && (l2_if_adjust == 1))
+ {
+ /* Just added first L2 interface on this port */
+
+ /* Set promiscuous mode on the l2 interface */
+ ethernet_set_flags (vnet_main, hi->hw_if_index,
+ ETHERNET_INTERFACE_FLAG_ACCEPT_ALL);
+
+ /* ensure all packets go to ethernet-input */
+ ethernet_set_rx_redirect (vnet_main, hi, 1);
+
+ }
+ else if ((hi->l2_if_count == 0) && (l2_if_adjust == -1))
+ {
+ /* Just removed only L2 subinterface on this port */
+
+ /* Disable promiscuous mode on the l2 interface */
+ ethernet_set_flags (vnet_main, hi->hw_if_index, 0);
+
+ /* Allow ip packets to go directly to ip4-input etc */
+ ethernet_set_rx_redirect (vnet_main, hi, 0);
+ }
+ }
+
+ /* Set up the L2/L3 flag in the interface parsing tables */
+ ethernet_sw_interface_set_l2_mode (vnm, sw_if_index, (mode != MODE_L3));
+
+ return 0;
+}
+
+/**
+ * Set subinterface in bridging mode with a bridge-domain ID.
+ * The CLI format is:
+ * set interface l2 bridge <interface> <bd> [bvi] [split-horizon-group]
+ */
+static clib_error_t *
+int_l2_bridge (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ vnet_main_t *vnm = vnet_get_main ();
+ clib_error_t *error = 0;
+ u32 bd_index, bd_id;
+ u32 sw_if_index;
+ u32 bvi;
+ u32 rc;
+ u32 shg;
+
+ if (!unformat_user (input, unformat_vnet_sw_interface, vnm, &sw_if_index))
+ {
+ error = clib_error_return (0, "unknown interface `%U'",
+ format_unformat_error, input);
+ goto done;
+ }
+
+ if (!unformat (input, "%d", &bd_id))
+ {
+ error = clib_error_return (0, "expected bridge domain ID `%U'",
+ format_unformat_error, input);
+ goto done;
+ }
+
+ bd_index = bd_find_or_add_bd_index (&bd_main, bd_id);
+
+ /* optional bvi */
+ bvi = unformat (input, "bvi");
+
+ /* optional split horizon group */
+ shg = 0;
+ (void) unformat (input, "%d", &shg);
+
+ /* set the interface mode */
+ if ((rc =
+ set_int_l2_mode (vm, vnm, MODE_L2_BRIDGE, sw_if_index, bd_index, bvi,
+ shg, 0)))
+ {
+ if (rc == MODE_ERROR_ETH)
+ {
+ error = clib_error_return (0, "bridged interface must be ethernet",
+ format_unformat_error, input);
+ }
+ else if (rc == MODE_ERROR_BVI_DEF)
+ {
+ error =
+ clib_error_return (0, "bridge-domain already has a bvi interface",
+ format_unformat_error, input);
+ }
+ else
+ {
+ error = clib_error_return (0, "invalid configuration for interface",
+ format_unformat_error, input);
+ }
+ goto done;
+ }
+
+done:
+ return error;
+}
+
+/*?
+ * Use this command put an interface into Layer 2 bridge domain. If a
+ * bridge-domain with the provided bridge-domain-id does not exist, it
+ * will be created. Interfaces in a bridge-domain forward packets to
+ * other interfaces in the same bridge-domain based on destination mac
+ * address. To remove an interface from a the Layer 2 bridge domain,
+ * put the interface in a different mode, for example Layer 3 mode.
+ *
+ * Optionally, an interface can be added to a Layer 2 bridge-domain as
+ * a Bridged Virtual Interface (bvi). Only one interface in a Layer 2
+ * bridge-domain can be a bvi.
+ *
+ * Optionally, a split-horizon group can also be specified. This defaults
+ * to 0 if not specified.
+ *
+ * @cliexpar
+ * Example of how to configure a Layer 2 bridge-domain with three
+ * interfaces (where 200 is the bridge-domain-id):
+ * @cliexcmd{set interface l2 bridge GigabitEthernet0/8/0.200 200}
+ * This interface is added a BVI interface:
+ * @cliexcmd{set interface l2 bridge GigabitEthernet0/9/0.200 200 bvi}
+ * This interface also has a split-horizon group of 1 specified:
+ * @cliexcmd{set interface l2 bridge GigabitEthernet0/a/0.200 200 1}
+ * Example of how to remove an interface from a Layer2 bridge-domain:
+ * @cliexcmd{set interface l3 GigabitEthernet0/a/0.200}
+?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (int_l2_bridge_cli, static) = {
+ .path = "set interface l2 bridge",
+ .short_help = "set interface l2 bridge <interface> <bridge-domain-id> [bvi] [shg]",
+ .function = int_l2_bridge,
+};
+/* *INDENT-ON* */
+
+/**
+ * Set subinterface in xconnect mode with another interface.
+ * The CLI format is:
+ * set interface l2 xconnect <interface> <peer interface>
+ */
+static clib_error_t *
+int_l2_xc (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ vnet_main_t *vnm = vnet_get_main ();
+ clib_error_t *error = 0;
+ u32 sw_if_index;
+ u32 xc_sw_if_index;
+
+ if (!unformat_user (input, unformat_vnet_sw_interface, vnm, &sw_if_index))
+ {
+ error = clib_error_return (0, "unknown interface `%U'",
+ format_unformat_error, input);
+ goto done;
+ }
+
+ if (!unformat_user
+ (input, unformat_vnet_sw_interface, vnm, &xc_sw_if_index))
+ {
+ error = clib_error_return (0, "unknown peer interface `%U'",
+ format_unformat_error, input);
+ goto done;
+ }
+
+ /* set the interface mode */
+ if (set_int_l2_mode
+ (vm, vnm, MODE_L2_XC, sw_if_index, 0, 0, 0, xc_sw_if_index))
+ {
+ error = clib_error_return (0, "invalid configuration for interface",
+ format_unformat_error, input);
+ goto done;
+ }
+
+done:
+ return error;
+}
+
+/*?
+ * Use this command put an interface into Layer 2 cross-connect mode.
+ * Both interfaces must be in this mode for bi-directioal traffic. All
+ * packets received on one interface will be transmitted to the other.
+ * To remove the Layer 2 cross-connect, put the interface in a different
+ * mode, for example Layer 3 mode.
+ *
+ * @cliexpar
+ * Example of how to configure a Layer2 cross-connect between two interfaces:
+ * @cliexcmd{set interface l2 xconnect GigabitEthernet0/8/0.300 GigabitEthernet0/9/0.300}
+ * @cliexcmd{set interface l2 xconnect GigabitEthernet0/9/0.300 GigabitEthernet0/8/0.300}
+ * Example of how to remove a Layer2 cross-connect:
+ * @cliexcmd{set interface l3 GigabitEthernet0/8/0.300}
+ * @cliexcmd{set interface l3 GigabitEthernet0/9/0.300}
+?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (int_l2_xc_cli, static) = {
+ .path = "set interface l2 xconnect",
+ .short_help = "set interface l2 xconnect <interface> <peer interface>",
+ .function = int_l2_xc,
+};
+/* *INDENT-ON* */
+
+/**
+ * Set subinterface in L3 mode.
+ * The CLI format is:
+ * set interface l3 <interface>
+ */
+static clib_error_t *
+int_l3 (vlib_main_t * vm, unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ vnet_main_t *vnm = vnet_get_main ();
+ clib_error_t *error = 0;
+ u32 sw_if_index;
+
+ if (!unformat_user (input, unformat_vnet_sw_interface, vnm, &sw_if_index))
+ {
+ error = clib_error_return (0, "unknown interface `%U'",
+ format_unformat_error, input);
+ goto done;
+ }
+
+ /* set the interface mode */
+ if (set_int_l2_mode (vm, vnm, MODE_L3, sw_if_index, 0, 0, 0, 0))
+ {
+ error = clib_error_return (0, "invalid configuration for interface",
+ format_unformat_error, input);
+ goto done;
+ }
+
+done:
+ return error;
+}
+
+/*?
+ * Modify the packet processing mode of the interface to Layer 3, which
+ * implies packets will be routed. This is the default mode of an interface.
+ * Use this command to remove an interface from a Layer 2 cross-connect or a
+ * Layer 2 bridge.
+ *
+ * @cliexpar
+ * Example of how to set the mode of an interface to Layer 3:
+ * @cliexcmd{set interface l3 GigabitEthernet0/8/0.200}
+?*/
+/* *INDENT-OFF* */
+ VLIB_CLI_COMMAND (int_l3_cli, static) = {
+ .path = "set interface l3",
+ .short_help = "set interface l3 <interface>",
+ .function = int_l3,
+};
+/* *INDENT-ON* */
+
+/**
+ * Show interface mode.
+ * The CLI format is:
+ * show mode [<if-name1> <if-name2> ...]
+ */
+static clib_error_t *
+show_int_mode (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ vnet_main_t *vnm = vnet_get_main ();
+ clib_error_t *error = 0;
+ char *mode;
+ u8 *args;
+ vnet_interface_main_t *im = &vnm->interface_main;
+ vnet_sw_interface_t *si, *sis = 0;
+ l2input_main_t *mp = &l2input_main;
+ l2_input_config_t *config;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ u32 sw_if_index;
+
+ /* See if user wants to show specific interface */
+ if (unformat
+ (input, "%U", unformat_vnet_sw_interface, vnm, &sw_if_index))
+ {
+ si = pool_elt_at_index (im->sw_interfaces, sw_if_index);
+ vec_add1 (sis, si[0]);
+ }
+ else
+ {
+ error = clib_error_return (0, "unknown input `%U'",
+ format_unformat_error, input);
+ goto done;
+ }
+
+ }
+
+ if (vec_len (sis) == 0) /* Get all interfaces */
+ {
+ /* Gather interfaces. */
+ sis = vec_new (vnet_sw_interface_t, pool_elts (im->sw_interfaces));
+ _vec_len (sis) = 0;
+ /* *INDENT-OFF* */
+ pool_foreach (si, im->sw_interfaces, ({ vec_add1 (sis, si[0]); }));
+ /* *INDENT-ON* */
+ }
+
+ vec_foreach (si, sis)
+ {
+ vec_validate (mp->configs, si->sw_if_index);
+ config = vec_elt_at_index (mp->configs, si->sw_if_index);
+ if (config->bridge)
+ {
+ u32 bd_id;
+ mode = "l2 bridge";
+ bd_id = l2input_main.bd_configs[config->bd_index].bd_id;
+
+ args = format (0, "bd_id %d%s%d", bd_id,
+ config->bvi ? " bvi shg " : " shg ", config->shg);
+ }
+ else if (config->xconnect)
+ {
+ mode = "l2 xconnect";
+ args = format (0, "%U",
+ format_vnet_sw_if_index_name,
+ vnm, config->output_sw_if_index);
+ }
+ else
+ {
+ mode = "l3";
+ args = format (0, " ");
+ }
+ vlib_cli_output (vm, "%s %U %v\n",
+ mode,
+ format_vnet_sw_if_index_name,
+ vnm, si->sw_if_index, args);
+ vec_free (args);
+ }
+
+done:
+ vec_free (sis);
+
+ return error;
+}
+
+/*?
+ * Show the packet processing mode (Layer2 xcross-onnect, Layer 2 bridge,
+ * Layer 3 routed) of all interfaces and sub-interfaces, or limit the
+ * output to just the provided list of interfaces and sub-interfaces.
+ * The output shows the mode, the interface, and if the interface is
+ * a member of a bridge, the bridge-domain-id and the split horizen group (shg).
+ *
+ * @cliexpar
+ * Example of displaying the mode of all interfaces:
+ * @cliexstart{show mode}
+ * l3 local0
+ * l3 GigabitEthernet0/8/0
+ * l3 GigabitEthernet0/9/0
+ * l3 GigabitEthernet0/a/0
+ * l2 bridge GigabitEthernet0/8/0.200 bd_id 200 shg 0
+ * l2 bridge GigabitEthernet0/9/0.200 bd_id 200 shg 0
+ * l2 bridge GigabitEthernet0/a/0.200 bd_id 200 shg 0
+ * l2 xconnect GigabitEthernet0/8/0.300 GigabitEthernet0/9/0.300
+ * l2 xconnect GigabitEthernet0/9/0.300 GigabitEthernet0/8/0.300
+ * @cliexend
+ * Example of displaying the mode of a seleted list of interfaces:
+ * @cliexstart{show mode GigabitEthernet0/8/0 GigabitEthernet0/8/0.200}
+ * l3 GigabitEthernet0/8/0
+ * l2 bridge GigabitEthernet0/8/0.200 bd_id 200 shg 0
+ * @cliexend
+?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (show_l2_mode, static) = {
+ .path = "show mode",
+ .short_help = "show mode [<if-name1> <if-name2> ...]",
+ .function = show_int_mode,
+};
+/* *INDENT-ON* */
+
+#define foreach_l2_init_function \
+_(feat_bitmap_drop_init) \
+_(l2fib_init) \
+_(l2_input_classify_init) \
+_(l2bd_init) \
+_(l2fwd_init) \
+_(l2_inacl_init) \
+_(l2input_init) \
+_(l2_vtr_init) \
+_(l2_invtr_init) \
+_(l2_efp_filter_init) \
+_(l2learn_init) \
+_(l2flood_init) \
+_(l2_outacl_init) \
+_(l2output_init) \
+_(l2_patch_init) \
+_(l2_xcrw_init)
+
+clib_error_t *
+l2_init (vlib_main_t * vm)
+{
+ clib_error_t *error;
+
+#define _(a) do { \
+ if ((error = vlib_call_init_function (vm, a))) return error; } \
+while (0);
+ foreach_l2_init_function;
+#undef _
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (l2_init);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/l2/l2_input.h b/src/vnet/l2/l2_input.h
new file mode 100644
index 00000000000..f3fada6a7d3
--- /dev/null
+++ b/src/vnet/l2/l2_input.h
@@ -0,0 +1,266 @@
+/*
+ * l2_input.h : layer 2 input packet processing
+ *
+ * Copyright (c) 2013 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef included_vnet_l2_input_h
+#define included_vnet_l2_input_h
+
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+#include <vnet/l2/l2_bd.h>
+#include <vnet/ethernet/packet.h>
+#include <vnet/ip/ip.h>
+
+/* Per-subinterface L2 feature configuration */
+
+typedef struct
+{
+
+ union
+ {
+ u16 bd_index; /* bridge domain id */
+ u32 output_sw_if_index; /* for xconnect */
+ };
+
+ /* Interface mode. If both are 0, this interface is in L3 mode */
+ u8 xconnect;
+ u8 bridge;
+
+ /* this is the bvi interface for the bridge-domain */
+ u8 bvi;
+
+ /* config for which input features are configured on this interface */
+ u32 feature_bitmap;
+
+ /* some of these flags are also in the feature bitmap */
+ u8 learn_enable;
+ u8 fwd_enable;
+ u8 flood_enable;
+
+ /* split horizon group */
+ u8 shg;
+
+} l2_input_config_t;
+
+
+typedef struct
+{
+
+ /* Next nodes for the feature bitmap */
+ u32 feat_next_node_index[32];
+
+ /* config vector indexed by sw_if_index */
+ l2_input_config_t *configs;
+
+ /* bridge domain config vector indexed by bd_index */
+ l2_bridge_domain_t *bd_configs;
+
+ /* convenience variables */
+ vlib_main_t *vlib_main;
+ vnet_main_t *vnet_main;
+} l2input_main_t;
+
+extern l2input_main_t l2input_main;
+
+extern vlib_node_registration_t l2input_node;
+
+static_always_inline l2_bridge_domain_t *
+l2input_bd_config_from_index (l2input_main_t * l2im, u32 bd_index)
+{
+ l2_bridge_domain_t *bd_config;
+
+ bd_config = vec_elt_at_index (l2im->bd_configs, bd_index);
+ return bd_is_valid (bd_config) ? bd_config : NULL;
+}
+
+/* L2 input indication packet is from BVI, using -2 */
+#define L2INPUT_BVI ((u32) (~0-1))
+
+/* L2 input features */
+
+/* Mappings from feature ID to graph node name */
+#define foreach_l2input_feat \
+ _(DROP, "feature-bitmap-drop") \
+ _(XCONNECT, "l2-output") \
+ _(FLOOD, "l2-flood") \
+ _(ARP_TERM, "arp-term-l2bd") \
+ _(UU_FLOOD, "l2-flood") \
+ _(FWD, "l2-fwd") \
+ _(RW, "l2-rw") \
+ _(LEARN, "l2-learn") \
+ _(VTR, "l2-input-vtr") \
+ _(VPATH, "vpath-input-l2") \
+ _(ACL, "l2-input-acl") \
+ _(POLICER_CLAS, "l2-policer-classify") \
+ _(INPUT_CLASSIFY, "l2-input-classify")
+
+/* Feature bitmap positions */
+typedef enum
+{
+#define _(sym,str) L2INPUT_FEAT_##sym##_BIT,
+ foreach_l2input_feat
+#undef _
+ L2INPUT_N_FEAT,
+} l2input_feat_t;
+
+/* Feature bit masks */
+typedef enum
+{
+#define _(sym,str) L2INPUT_FEAT_##sym = (1<<L2INPUT_FEAT_##sym##_BIT),
+ foreach_l2input_feat
+#undef _
+} l2input_feat_masks_t;
+
+/** Return an array of strings containing graph node names of each feature */
+char **l2input_get_feat_names (void);
+
+
+static_always_inline u8
+bd_feature_flood (l2_bridge_domain_t * bd_config)
+{
+ return ((bd_config->feature_bitmap & L2INPUT_FEAT_FLOOD) ==
+ L2INPUT_FEAT_FLOOD);
+}
+
+static_always_inline u8
+bd_feature_uu_flood (l2_bridge_domain_t * bd_config)
+{
+ return ((bd_config->feature_bitmap & L2INPUT_FEAT_UU_FLOOD) ==
+ L2INPUT_FEAT_UU_FLOOD);
+}
+
+static_always_inline u8
+bd_feature_forward (l2_bridge_domain_t * bd_config)
+{
+ return ((bd_config->feature_bitmap & L2INPUT_FEAT_FWD) == L2INPUT_FEAT_FWD);
+}
+
+static_always_inline u8
+bd_feature_learn (l2_bridge_domain_t * bd_config)
+{
+ return ((bd_config->feature_bitmap & L2INPUT_FEAT_LEARN) ==
+ L2INPUT_FEAT_LEARN);
+}
+
+static_always_inline u8
+bd_feature_arp_term (l2_bridge_domain_t * bd_config)
+{
+ return ((bd_config->feature_bitmap & L2INPUT_FEAT_ARP_TERM) ==
+ L2INPUT_FEAT_ARP_TERM);
+}
+
+/** Masks for eliminating features that do not apply to a packet */
+
+/** Get a pointer to the config for the given interface */
+l2_input_config_t *l2input_intf_config (u32 sw_if_index);
+
+/* Enable (or disable) the feature in the bitmap for the given interface */
+u32 l2input_intf_bitmap_enable (u32 sw_if_index,
+ u32 feature_bitmap, u32 enable);
+
+/* Sets modifies flags from a bridge domain */
+u32 l2input_set_bridge_features (u32 bd_index, u32 feat_mask, u32 feat_value);
+
+
+#define MODE_L3 0
+#define MODE_L2_BRIDGE 1
+#define MODE_L2_XC 2
+#define MODE_L2_CLASSIFY 3
+
+#define MODE_ERROR_ETH 1
+#define MODE_ERROR_BVI_DEF 2
+
+u32 set_int_l2_mode (vlib_main_t * vm,
+ vnet_main_t * vnet_main,
+ u32 mode,
+ u32 sw_if_index,
+ u32 bd_index, u32 bvi, u32 shg, u32 xc_sw_if_index);
+
+static inline void
+vnet_update_l2_len (vlib_buffer_t * b)
+{
+ ethernet_header_t *eth;
+ u16 ethertype;
+ u8 vlan_count = 0;
+
+ /* point at currrent l2 hdr */
+ eth = vlib_buffer_get_current (b);
+
+ /*
+ * l2-output pays no attention to this
+ * but the tag push/pop code on an l2 subif needs it.
+ *
+ * Determine l2 header len, check for up to 2 vlans
+ */
+ vnet_buffer (b)->l2.l2_len = sizeof (ethernet_header_t);
+ ethertype = clib_net_to_host_u16 (eth->type);
+ if (ethernet_frame_is_tagged (ethertype))
+ {
+ ethernet_vlan_header_t *vlan;
+ vnet_buffer (b)->l2.l2_len += sizeof (*vlan);
+ vlan_count = 1;
+ vlan = (void *) (eth + 1);
+ ethertype = clib_net_to_host_u16 (vlan->type);
+ if (ethertype == ETHERNET_TYPE_VLAN)
+ {
+ vnet_buffer (b)->l2.l2_len += sizeof (*vlan);
+ vlan_count = 2;
+ }
+ }
+ ethernet_buffer_set_vlan_count (b, vlan_count);
+}
+
+/*
+ * Compute flow hash of an ethernet packet, use 5-tuple hash if L3 packet
+ * is ip4 or ip6. Otherwise hash on smac/dmac/etype.
+ * The vlib buffer current pointer is expected to be at ethernet header
+ * and vnet l2.l2_len is exppected to be setup already.
+ */
+static inline u32
+vnet_l2_compute_flow_hash (vlib_buffer_t * b)
+{
+ ethernet_header_t *eh = vlib_buffer_get_current (b);
+ u8 *l3h = (u8 *) eh + vnet_buffer (b)->l2.l2_len;
+ u16 ethertype = clib_net_to_host_u16 (*(u16 *) (l3h - 2));
+
+ if (ethertype == ETHERNET_TYPE_IP4)
+ return ip4_compute_flow_hash ((ip4_header_t *) l3h, IP_FLOW_HASH_DEFAULT);
+ else if (ethertype == ETHERNET_TYPE_IP6)
+ return ip6_compute_flow_hash ((ip6_header_t *) l3h, IP_FLOW_HASH_DEFAULT);
+ else
+ {
+ u32 a, b, c;
+ u32 *ap = (u32 *) & eh->dst_address[2];
+ u32 *bp = (u32 *) & eh->src_address[2];
+ a = *ap;
+ b = *bp;
+ c = ethertype;
+ hash_v3_mix32 (a, b, c);
+ hash_v3_finalize32 (a, b, c);
+ return c;
+ }
+}
+
+#endif
+
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/l2/l2_input_acl.c b/src/vnet/l2/l2_input_acl.c
new file mode 100644
index 00000000000..104fcd15b85
--- /dev/null
+++ b/src/vnet/l2/l2_input_acl.c
@@ -0,0 +1,434 @@
+/*
+ * l2_input_acl.c : layer 2 input acl processing
+ *
+ * Copyright (c) 2013 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+#include <vnet/pg/pg.h>
+#include <vnet/ethernet/ethernet.h>
+#include <vnet/ethernet/packet.h>
+#include <vnet/ip/ip_packet.h>
+#include <vnet/ip/ip4_packet.h>
+#include <vnet/ip/ip6_packet.h>
+#include <vlib/cli.h>
+#include <vnet/l2/l2_input.h>
+#include <vnet/l2/feat_bitmap.h>
+
+#include <vppinfra/error.h>
+#include <vppinfra/hash.h>
+#include <vppinfra/cache.h>
+
+#include <vnet/classify/vnet_classify.h>
+#include <vnet/classify/input_acl.h>
+
+typedef struct
+{
+
+ /* Next nodes for each feature */
+ u32 feat_next_node_index[32];
+
+ /* convenience variables */
+ vlib_main_t *vlib_main;
+ vnet_main_t *vnet_main;
+} l2_inacl_main_t;
+
+typedef struct
+{
+ u32 sw_if_index;
+ u32 next_index;
+ u32 table_index;
+ u32 offset;
+} l2_inacl_trace_t;
+
+/* packet trace format function */
+static u8 *
+format_l2_inacl_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ l2_inacl_trace_t *t = va_arg (*args, l2_inacl_trace_t *);
+
+ s = format (s, "INACL: sw_if_index %d, next_index %d, table %d, offset %d",
+ t->sw_if_index, t->next_index, t->table_index, t->offset);
+ return s;
+}
+
+l2_inacl_main_t l2_inacl_main;
+
+static vlib_node_registration_t l2_inacl_node;
+
+#define foreach_l2_inacl_error \
+_(NONE, "valid input ACL packets") \
+_(MISS, "input ACL misses") \
+_(HIT, "input ACL hits") \
+_(CHAIN_HIT, "input ACL hits after chain walk") \
+_(TABLE_MISS, "input ACL table-miss drops") \
+_(SESSION_DENY, "input ACL session deny drops")
+
+
+typedef enum
+{
+#define _(sym,str) L2_INACL_ERROR_##sym,
+ foreach_l2_inacl_error
+#undef _
+ L2_INACL_N_ERROR,
+} l2_inacl_error_t;
+
+static char *l2_inacl_error_strings[] = {
+#define _(sym,string) string,
+ foreach_l2_inacl_error
+#undef _
+};
+
+static uword
+l2_inacl_node_fn (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ u32 n_left_from, *from, *to_next;
+ acl_next_index_t next_index;
+ l2_inacl_main_t *msm = &l2_inacl_main;
+ input_acl_main_t *am = &input_acl_main;
+ vnet_classify_main_t *vcm = am->vnet_classify_main;
+ input_acl_table_id_t tid = INPUT_ACL_TABLE_L2;
+ f64 now = vlib_time_now (vm);
+ u32 hits = 0;
+ u32 misses = 0;
+ u32 chain_hits = 0;
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors; /* number of packets to process */
+ next_index = node->cached_next_index;
+
+ /* First pass: compute hashes */
+ while (n_left_from > 2)
+ {
+ vlib_buffer_t *b0, *b1;
+ u32 bi0, bi1;
+ u8 *h0, *h1;
+ u32 sw_if_index0, sw_if_index1;
+ u32 table_index0, table_index1;
+ vnet_classify_table_t *t0, *t1;
+
+ /* prefetch next iteration */
+ {
+ vlib_buffer_t *p1, *p2;
+
+ p1 = vlib_get_buffer (vm, from[1]);
+ p2 = vlib_get_buffer (vm, from[2]);
+
+ vlib_prefetch_buffer_header (p1, STORE);
+ CLIB_PREFETCH (p1->data, CLIB_CACHE_LINE_BYTES, STORE);
+ vlib_prefetch_buffer_header (p2, STORE);
+ CLIB_PREFETCH (p2->data, CLIB_CACHE_LINE_BYTES, STORE);
+ }
+
+ bi0 = from[0];
+ b0 = vlib_get_buffer (vm, bi0);
+
+ bi1 = from[1];
+ b1 = vlib_get_buffer (vm, bi1);
+
+ sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+ table_index0 =
+ am->classify_table_index_by_sw_if_index[tid][sw_if_index0];
+
+ sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_RX];
+ table_index1 =
+ am->classify_table_index_by_sw_if_index[tid][sw_if_index1];
+
+ t0 = pool_elt_at_index (vcm->tables, table_index0);
+
+ t1 = pool_elt_at_index (vcm->tables, table_index1);
+
+ if (t0->current_data_flag == CLASSIFY_FLAG_USE_CURR_DATA)
+ h0 = (void *) vlib_buffer_get_current (b0) + t0->current_data_offset;
+ else
+ h0 = b0->data;
+
+ vnet_buffer (b0)->l2_classify.hash =
+ vnet_classify_hash_packet (t0, (u8 *) h0);
+
+ vnet_classify_prefetch_bucket (t0, vnet_buffer (b0)->l2_classify.hash);
+
+ if (t1->current_data_flag == CLASSIFY_FLAG_USE_CURR_DATA)
+ h1 = (void *) vlib_buffer_get_current (b1) + t1->current_data_offset;
+ else
+ h1 = b1->data;
+
+ vnet_buffer (b1)->l2_classify.hash =
+ vnet_classify_hash_packet (t1, (u8 *) h1);
+
+ vnet_classify_prefetch_bucket (t1, vnet_buffer (b1)->l2_classify.hash);
+
+ vnet_buffer (b0)->l2_classify.table_index = table_index0;
+
+ vnet_buffer (b1)->l2_classify.table_index = table_index1;
+
+ from += 2;
+ n_left_from -= 2;
+ }
+
+ while (n_left_from > 0)
+ {
+ vlib_buffer_t *b0;
+ u32 bi0;
+ u8 *h0;
+ u32 sw_if_index0;
+ u32 table_index0;
+ vnet_classify_table_t *t0;
+
+ bi0 = from[0];
+ b0 = vlib_get_buffer (vm, bi0);
+
+ sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+ table_index0 =
+ am->classify_table_index_by_sw_if_index[tid][sw_if_index0];
+
+ t0 = pool_elt_at_index (vcm->tables, table_index0);
+
+ if (t0->current_data_flag == CLASSIFY_FLAG_USE_CURR_DATA)
+ h0 = (void *) vlib_buffer_get_current (b0) + t0->current_data_offset;
+ else
+ h0 = b0->data;
+
+ vnet_buffer (b0)->l2_classify.hash =
+ vnet_classify_hash_packet (t0, (u8 *) h0);
+
+ vnet_buffer (b0)->l2_classify.table_index = table_index0;
+ vnet_classify_prefetch_bucket (t0, vnet_buffer (b0)->l2_classify.hash);
+
+ from++;
+ n_left_from--;
+ }
+
+ next_index = node->cached_next_index;
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ /* Not enough load/store slots to dual loop... */
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 bi0;
+ vlib_buffer_t *b0;
+ u32 next0 = ACL_NEXT_INDEX_DENY;
+ u32 table_index0;
+ vnet_classify_table_t *t0;
+ vnet_classify_entry_t *e0;
+ u64 hash0;
+ u8 *h0;
+ u8 error0;
+
+ /* Stride 3 seems to work best */
+ if (PREDICT_TRUE (n_left_from > 3))
+ {
+ vlib_buffer_t *p1 = vlib_get_buffer (vm, from[3]);
+ vnet_classify_table_t *tp1;
+ u32 table_index1;
+ u64 phash1;
+
+ table_index1 = vnet_buffer (p1)->l2_classify.table_index;
+
+ if (PREDICT_TRUE (table_index1 != ~0))
+ {
+ tp1 = pool_elt_at_index (vcm->tables, table_index1);
+ phash1 = vnet_buffer (p1)->l2_classify.hash;
+ vnet_classify_prefetch_entry (tp1, phash1);
+ }
+ }
+
+ /* speculatively enqueue b0 to the current next frame */
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+
+ table_index0 = vnet_buffer (b0)->l2_classify.table_index;
+ e0 = 0;
+ t0 = 0;
+
+ /* Feature bitmap update */
+ vnet_buffer (b0)->l2.feature_bitmap &= ~L2INPUT_FEAT_ACL;
+
+ vnet_buffer (b0)->l2_classify.opaque_index = ~0;
+ /* Determine the next node */
+ next0 = feat_bitmap_get_next_node_index (msm->feat_next_node_index,
+ vnet_buffer (b0)->
+ l2.feature_bitmap);
+
+ if (PREDICT_TRUE (table_index0 != ~0))
+ {
+ hash0 = vnet_buffer (b0)->l2_classify.hash;
+ t0 = pool_elt_at_index (vcm->tables, table_index0);
+
+ if (t0->current_data_flag == CLASSIFY_FLAG_USE_CURR_DATA)
+ h0 =
+ (void *) vlib_buffer_get_current (b0) +
+ t0->current_data_offset;
+ else
+ h0 = b0->data;
+
+ e0 = vnet_classify_find_entry (t0, (u8 *) h0, hash0, now);
+ if (e0)
+ {
+ vnet_buffer (b0)->l2_classify.opaque_index
+ = e0->opaque_index;
+ vlib_buffer_advance (b0, e0->advance);
+
+ next0 = (e0->next_index < ACL_NEXT_INDEX_N_NEXT) ?
+ e0->next_index : next0;
+
+ hits++;
+
+ error0 = (next0 == ACL_NEXT_INDEX_DENY) ?
+ L2_INACL_ERROR_SESSION_DENY : L2_INACL_ERROR_NONE;
+ b0->error = node->errors[error0];
+ }
+ else
+ {
+ while (1)
+ {
+ if (PREDICT_TRUE (t0->next_table_index != ~0))
+ t0 = pool_elt_at_index (vcm->tables,
+ t0->next_table_index);
+ else
+ {
+ next0 =
+ (t0->miss_next_index <
+ ACL_NEXT_INDEX_N_NEXT) ? t0->miss_next_index :
+ next0;
+
+ misses++;
+
+ error0 = (next0 == ACL_NEXT_INDEX_DENY) ?
+ L2_INACL_ERROR_TABLE_MISS : L2_INACL_ERROR_NONE;
+ b0->error = node->errors[error0];
+ break;
+ }
+
+ if (t0->current_data_flag ==
+ CLASSIFY_FLAG_USE_CURR_DATA)
+ h0 =
+ (void *) vlib_buffer_get_current (b0) +
+ t0->current_data_offset;
+ else
+ h0 = b0->data;
+
+ hash0 = vnet_classify_hash_packet (t0, (u8 *) h0);
+ e0 = vnet_classify_find_entry
+ (t0, (u8 *) h0, hash0, now);
+ if (e0)
+ {
+ vlib_buffer_advance (b0, e0->advance);
+ next0 = (e0->next_index < ACL_NEXT_INDEX_N_NEXT) ?
+ e0->next_index : next0;
+ hits++;
+ chain_hits++;
+
+ error0 = (next0 == ACL_NEXT_INDEX_DENY) ?
+ L2_INACL_ERROR_SESSION_DENY : L2_INACL_ERROR_NONE;
+ b0->error = node->errors[error0];
+ break;
+ }
+ }
+ }
+ }
+
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)
+ && (b0->flags & VLIB_BUFFER_IS_TRACED)))
+ {
+ l2_inacl_trace_t *t =
+ vlib_add_trace (vm, node, b0, sizeof (*t));
+ t->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+ t->next_index = next0;
+ t->table_index = t0 ? t0 - vcm->tables : ~0;
+ t->offset = (t0 && e0) ? vnet_classify_get_offset (t0, e0) : ~0;
+ }
+
+ /* verify speculative enqueue, maybe switch current next frame */
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, next0);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ vlib_node_increment_counter (vm, node->node_index,
+ L2_INACL_ERROR_MISS, misses);
+ vlib_node_increment_counter (vm, node->node_index,
+ L2_INACL_ERROR_HIT, hits);
+ vlib_node_increment_counter (vm, node->node_index,
+ L2_INACL_ERROR_CHAIN_HIT, chain_hits);
+ return frame->n_vectors;
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (l2_inacl_node,static) = {
+ .function = l2_inacl_node_fn,
+ .name = "l2-input-acl",
+ .vector_size = sizeof (u32),
+ .format_trace = format_l2_inacl_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_errors = ARRAY_LEN(l2_inacl_error_strings),
+ .error_strings = l2_inacl_error_strings,
+
+ .n_next_nodes = ACL_NEXT_INDEX_N_NEXT,
+
+ /* edit / add dispositions here */
+ .next_nodes = {
+ [ACL_NEXT_INDEX_DENY] = "error-drop",
+ },
+};
+/* *INDENT-ON* */
+
+VLIB_NODE_FUNCTION_MULTIARCH (l2_inacl_node, l2_inacl_node_fn)
+ clib_error_t *l2_inacl_init (vlib_main_t * vm)
+{
+ l2_inacl_main_t *mp = &l2_inacl_main;
+
+ mp->vlib_main = vm;
+ mp->vnet_main = vnet_get_main ();
+
+ /* Initialize the feature next-node indexes */
+ feat_bitmap_init_next_nodes (vm,
+ l2_inacl_node.index,
+ L2INPUT_N_FEAT,
+ l2input_get_feat_names (),
+ mp->feat_next_node_index);
+
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (l2_inacl_init);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/l2/l2_input_classify.c b/src/vnet/l2/l2_input_classify.c
new file mode 100644
index 00000000000..497df192f39
--- /dev/null
+++ b/src/vnet/l2/l2_input_classify.c
@@ -0,0 +1,655 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * l2_classify.c
+ */
+
+#include <vnet/l2/l2_classify.h>
+#include <vnet/api_errno.h>
+
+/**
+ * @file
+ * @brief L2 input classifier.
+ *
+ * @sa @ref vnet/vnet/classify/vnet_classify.c
+ * @sa @ref vnet/vnet/classify/vnet_classify.h
+ */
+
+/**
+ * @brief l2_input_classifier packet trace record.
+ */
+typedef struct
+{
+ /** interface handle for the ith packet */
+ u32 sw_if_index;
+ /** graph arc index selected for this packet */
+ u32 next_index;
+ /** classifier table which provided the final result */
+ u32 table_index;
+ /** offset in classifier heap of the corresponding session */
+ u32 session_offset;
+} l2_input_classify_trace_t;
+
+/**
+ * @brief vlib node runtime.
+ */
+typedef struct
+{
+ /** use-case independent main object pointer */
+ vnet_classify_main_t *vcm;
+ /** l2 input classifier main object pointer */
+ l2_input_classify_main_t *l2cm;
+} l2_input_classify_runtime_t;
+
+/** Packet trace format function. */
+static u8 *
+format_l2_input_classify_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ l2_input_classify_trace_t *t = va_arg (*args, l2_input_classify_trace_t *);
+
+ s = format (s, "l2-classify: sw_if_index %d, table %d, offset %x, next %d",
+ t->sw_if_index, t->table_index, t->session_offset,
+ t->next_index);
+ return s;
+}
+
+/** l2 input classifier main data structure. */
+l2_input_classify_main_t l2_input_classify_main;
+
+vlib_node_registration_t l2_input_classify_node;
+
+#define foreach_l2_input_classify_error \
+_(MISS, "Classify misses") \
+_(HIT, "Classify hits") \
+_(CHAIN_HIT, "Classify hits after chain walk") \
+_(DROP, "L2 Classify Drops")
+
+typedef enum
+{
+#define _(sym,str) L2_INPUT_CLASSIFY_ERROR_##sym,
+ foreach_l2_input_classify_error
+#undef _
+ L2_INPUT_CLASSIFY_N_ERROR,
+} l2_input_classify_error_t;
+
+static char *l2_input_classify_error_strings[] = {
+#define _(sym,string) string,
+ foreach_l2_input_classify_error
+#undef _
+};
+
+/**
+ * @brief l2 input classifier node.
+ * @node l2-input-classify
+ *
+ * This is the l2 input classifier dispatch node
+ *
+ * @param vm vlib_main_t corresponding to the current thread.
+ * @param node vlib_node_runtime_t data for this node.
+ * @param frame vlib_frame_t whose contents should be dispatched.
+ *
+ * @par Graph mechanics: buffer metadata, next index usage
+ *
+ * @em Uses:
+ * - <code>(l2_input_classify_runtime_t *)
+ * rt->classify_table_index_by_sw_if_index</code>
+ * - Head of the per-interface, per-protocol classifier table chain
+ * for a specific interface.
+ * - @c ~0 => send pkts to the next feature in the L2 feature chain.
+ * - <code>vnet_buffer(b)->sw_if_index[VLIB_RX]</code>
+ * - Indicates the @c sw_if_index value of the interface that the
+ * packet was received on.
+ * - <code>vnet_buffer(b0)->l2.feature_bitmap</code>
+ * - Used to steer packets across l2 features enabled on the interface
+ * - <code>(vnet_classify_entry_t) e0->next_index</code>
+ * - Used to steer traffic when the classifier hits on a session
+ * - <code>(vnet_classify_entry_t) e0->advance</code>
+ * - Signed quantity applied via <code>vlib_buffer_advance</code>
+ * when the classifier hits on a session
+ * - <code>(vnet_classify_table_t) t0->miss_next_index</code>
+ * - Used to steer traffic when the classifier misses
+ *
+ * @em Sets:
+ * - <code>vnet_buffer (b0)->l2_classify.table_index</code>
+ * - Classifier table index of the first classifier table in
+ * the classifier table chain
+ * - <code>vnet_buffer (b0)->l2_classify.hash</code>
+ * - Bounded-index extensible hash corresponding to the
+ * masked fields in the current packet
+ * - <code>vnet_buffer (b0)->l2.feature_bitmap</code>
+ * - Used to steer packets across l2 features enabled on the interface
+ * - <code>vnet_buffer (b0)->l2_classify.opaque_index</code>
+ * - Copied from the classifier session object upon classifier hit
+ *
+ * @em Counters:
+ * - <code>L2_INPUT_CLASSIFY_ERROR_MISS</code> Classifier misses
+ * - <code>L2_INPUT_CLASSIFY_ERROR_HIT</code> Classifier hits
+ * - <code>L2_INPUT_CLASSIFY_ERROR_CHAIN_HIT</code>
+ * Classifier hits in other than the first table
+ */
+
+static uword
+l2_input_classify_node_fn (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ u32 n_left_from, *from, *to_next;
+ l2_input_classify_next_t next_index;
+ l2_input_classify_main_t *cm = &l2_input_classify_main;
+ vnet_classify_main_t *vcm = cm->vnet_classify_main;
+ l2_input_classify_runtime_t *rt =
+ (l2_input_classify_runtime_t *) node->runtime_data;
+ u32 feature_bitmap;
+ u32 hits = 0;
+ u32 misses = 0;
+ u32 chain_hits = 0;
+ f64 now;
+ u32 n_next_nodes;
+
+ n_next_nodes = node->n_next_nodes;
+
+ now = vlib_time_now (vm);
+
+ n_left_from = frame->n_vectors;
+ from = vlib_frame_vector_args (frame);
+
+ /* First pass: compute hash */
+
+ while (n_left_from > 2)
+ {
+ vlib_buffer_t *b0, *b1;
+ u32 bi0, bi1;
+ ethernet_header_t *h0, *h1;
+ u32 sw_if_index0, sw_if_index1;
+ u16 type0, type1;
+ int type_index0, type_index1;
+ vnet_classify_table_t *t0, *t1;
+ u32 table_index0, table_index1;
+ u64 hash0, hash1;
+
+
+ /* prefetch next iteration */
+ {
+ vlib_buffer_t *p1, *p2;
+
+ p1 = vlib_get_buffer (vm, from[1]);
+ p2 = vlib_get_buffer (vm, from[2]);
+
+ vlib_prefetch_buffer_header (p1, STORE);
+ CLIB_PREFETCH (p1->data, CLIB_CACHE_LINE_BYTES, STORE);
+ vlib_prefetch_buffer_header (p2, STORE);
+ CLIB_PREFETCH (p2->data, CLIB_CACHE_LINE_BYTES, STORE);
+ }
+
+ bi0 = from[0];
+ b0 = vlib_get_buffer (vm, bi0);
+ h0 = vlib_buffer_get_current (b0);
+
+ bi1 = from[1];
+ b1 = vlib_get_buffer (vm, bi1);
+ h1 = vlib_buffer_get_current (b1);
+
+ sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+ vnet_buffer (b0)->l2_classify.table_index = ~0;
+
+ sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_RX];
+ vnet_buffer (b1)->l2_classify.table_index = ~0;
+
+ /* Select classifier table based on ethertype */
+ type0 = clib_net_to_host_u16 (h0->type);
+ type1 = clib_net_to_host_u16 (h1->type);
+
+ type_index0 = (type0 == ETHERNET_TYPE_IP4)
+ ? L2_INPUT_CLASSIFY_TABLE_IP4 : L2_INPUT_CLASSIFY_TABLE_OTHER;
+ type_index0 = (type0 == ETHERNET_TYPE_IP6)
+ ? L2_INPUT_CLASSIFY_TABLE_IP6 : type_index0;
+
+ type_index1 = (type1 == ETHERNET_TYPE_IP4)
+ ? L2_INPUT_CLASSIFY_TABLE_IP4 : L2_INPUT_CLASSIFY_TABLE_OTHER;
+ type_index1 = (type1 == ETHERNET_TYPE_IP6)
+ ? L2_INPUT_CLASSIFY_TABLE_IP6 : type_index1;
+
+ vnet_buffer (b0)->l2_classify.table_index =
+ table_index0 =
+ rt->l2cm->classify_table_index_by_sw_if_index
+ [type_index0][sw_if_index0];
+
+ if (table_index0 != ~0)
+ {
+ t0 = pool_elt_at_index (vcm->tables, table_index0);
+
+ vnet_buffer (b0)->l2_classify.hash = hash0 =
+ vnet_classify_hash_packet (t0, (u8 *) h0);
+ vnet_classify_prefetch_bucket (t0, hash0);
+ }
+
+ vnet_buffer (b1)->l2_classify.table_index =
+ table_index1 =
+ rt->l2cm->classify_table_index_by_sw_if_index
+ [type_index1][sw_if_index1];
+
+ if (table_index1 != ~0)
+ {
+ t1 = pool_elt_at_index (vcm->tables, table_index1);
+
+ vnet_buffer (b1)->l2_classify.hash = hash1 =
+ vnet_classify_hash_packet (t1, (u8 *) h1);
+ vnet_classify_prefetch_bucket (t1, hash1);
+ }
+
+ from += 2;
+ n_left_from -= 2;
+ }
+
+ while (n_left_from > 0)
+ {
+ vlib_buffer_t *b0;
+ u32 bi0;
+ ethernet_header_t *h0;
+ u32 sw_if_index0;
+ u16 type0;
+ u32 type_index0;
+ vnet_classify_table_t *t0;
+ u32 table_index0;
+ u64 hash0;
+
+ bi0 = from[0];
+ b0 = vlib_get_buffer (vm, bi0);
+ h0 = vlib_buffer_get_current (b0);
+
+ sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+ vnet_buffer (b0)->l2_classify.table_index = ~0;
+
+ /* Select classifier table based on ethertype */
+ type0 = clib_net_to_host_u16 (h0->type);
+
+ type_index0 = (type0 == ETHERNET_TYPE_IP4)
+ ? L2_INPUT_CLASSIFY_TABLE_IP4 : L2_INPUT_CLASSIFY_TABLE_OTHER;
+ type_index0 = (type0 == ETHERNET_TYPE_IP6)
+ ? L2_INPUT_CLASSIFY_TABLE_IP6 : type_index0;
+
+ vnet_buffer (b0)->l2_classify.table_index =
+ table_index0 = rt->l2cm->classify_table_index_by_sw_if_index
+ [type_index0][sw_if_index0];
+
+ if (table_index0 != ~0)
+ {
+ t0 = pool_elt_at_index (vcm->tables, table_index0);
+
+ vnet_buffer (b0)->l2_classify.hash = hash0 =
+ vnet_classify_hash_packet (t0, (u8 *) h0);
+ vnet_classify_prefetch_bucket (t0, hash0);
+ }
+ from++;
+ n_left_from--;
+ }
+
+ next_index = node->cached_next_index;
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ /* Not enough load/store slots to dual loop... */
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 bi0;
+ vlib_buffer_t *b0;
+ u32 next0 = ~0; /* next l2 input feature, please... */
+ ethernet_header_t *h0;
+ u32 table_index0;
+ u64 hash0;
+ vnet_classify_table_t *t0;
+ vnet_classify_entry_t *e0;
+
+ if (PREDICT_TRUE (n_left_from > 2))
+ {
+ vlib_buffer_t *p2 = vlib_get_buffer (vm, from[2]);
+ u64 phash2;
+ u32 table_index2;
+ vnet_classify_table_t *tp2;
+
+ /*
+ * Prefetch table entry two ahead. Buffer / data
+ * were prefetched above...
+ */
+ table_index2 = vnet_buffer (p2)->l2_classify.table_index;
+
+ if (PREDICT_TRUE (table_index2 != ~0))
+ {
+ tp2 = pool_elt_at_index (vcm->tables, table_index2);
+ phash2 = vnet_buffer (p2)->l2_classify.hash;
+ vnet_classify_prefetch_entry (tp2, phash2);
+ }
+ }
+
+ /* speculatively enqueue b0 to the current next frame */
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ h0 = vlib_buffer_get_current (b0);
+ table_index0 = vnet_buffer (b0)->l2_classify.table_index;
+ e0 = 0;
+ vnet_buffer (b0)->l2_classify.opaque_index = ~0;
+
+ /* Remove ourself from the feature bitmap */
+ feature_bitmap = vnet_buffer (b0)->l2.feature_bitmap
+ & ~L2INPUT_FEAT_INPUT_CLASSIFY;
+
+ /* save for next feature graph nodes */
+ vnet_buffer (b0)->l2.feature_bitmap = feature_bitmap;
+
+ if (PREDICT_TRUE (table_index0 != ~0))
+ {
+ hash0 = vnet_buffer (b0)->l2_classify.hash;
+ t0 = pool_elt_at_index (vcm->tables, table_index0);
+
+ e0 = vnet_classify_find_entry (t0, (u8 *) h0, hash0, now);
+ if (e0)
+ {
+ vnet_buffer (b0)->l2_classify.opaque_index
+ = e0->opaque_index;
+ vlib_buffer_advance (b0, e0->advance);
+ next0 = (e0->next_index < n_next_nodes) ?
+ e0->next_index : next0;
+ hits++;
+ }
+ else
+ {
+ while (1)
+ {
+ if (t0->next_table_index != ~0)
+ t0 = pool_elt_at_index (vcm->tables,
+ t0->next_table_index);
+ else
+ {
+ next0 = (t0->miss_next_index < n_next_nodes) ?
+ t0->miss_next_index : next0;
+ misses++;
+ break;
+ }
+
+ hash0 = vnet_classify_hash_packet (t0, (u8 *) h0);
+ e0 =
+ vnet_classify_find_entry (t0, (u8 *) h0, hash0, now);
+ if (e0)
+ {
+ vnet_buffer (b0)->l2_classify.opaque_index
+ = e0->opaque_index;
+ vlib_buffer_advance (b0, e0->advance);
+ next0 = (e0->next_index < n_next_nodes) ?
+ e0->next_index : next0;
+ hits++;
+ chain_hits++;
+ break;
+ }
+ }
+ }
+ }
+
+ if (PREDICT_FALSE (next0 == 0))
+ b0->error = node->errors[L2_INPUT_CLASSIFY_ERROR_DROP];
+
+ if (PREDICT_TRUE (next0 == ~0))
+ {
+ // Determine the next node
+ next0 =
+ feat_bitmap_get_next_node_index (cm->feat_next_node_index,
+ feature_bitmap);
+ }
+
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)
+ && (b0->flags & VLIB_BUFFER_IS_TRACED)))
+ {
+ l2_input_classify_trace_t *t =
+ vlib_add_trace (vm, node, b0, sizeof (*t));
+ t->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+ t->table_index = table_index0;
+ t->next_index = next0;
+ t->session_offset = e0 ? vnet_classify_get_offset (t0, e0) : 0;
+ }
+
+ /* verify speculative enqueue, maybe switch current next frame */
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, next0);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ vlib_node_increment_counter (vm, node->node_index,
+ L2_INPUT_CLASSIFY_ERROR_MISS, misses);
+ vlib_node_increment_counter (vm, node->node_index,
+ L2_INPUT_CLASSIFY_ERROR_HIT, hits);
+ vlib_node_increment_counter (vm, node->node_index,
+ L2_INPUT_CLASSIFY_ERROR_CHAIN_HIT, chain_hits);
+ return frame->n_vectors;
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (l2_input_classify_node) = {
+ .function = l2_input_classify_node_fn,
+ .name = "l2-input-classify",
+ .vector_size = sizeof (u32),
+ .format_trace = format_l2_input_classify_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_errors = ARRAY_LEN(l2_input_classify_error_strings),
+ .error_strings = l2_input_classify_error_strings,
+
+ .runtime_data_bytes = sizeof (l2_input_classify_runtime_t),
+
+ .n_next_nodes = L2_INPUT_CLASSIFY_N_NEXT,
+
+ /* edit / add dispositions here */
+ .next_nodes = {
+ [L2_INPUT_CLASSIFY_NEXT_DROP] = "error-drop",
+ [L2_INPUT_CLASSIFY_NEXT_ETHERNET_INPUT] = "ethernet-input-not-l2",
+ [L2_INPUT_CLASSIFY_NEXT_IP4_INPUT] = "ip4-input",
+ [L2_INPUT_CLASSIFY_NEXT_IP6_INPUT] = "ip6-input",
+ [L2_INPUT_CLASSIFY_NEXT_LI] = "li-hit",
+ },
+};
+/* *INDENT-ON* */
+
+VLIB_NODE_FUNCTION_MULTIARCH (l2_input_classify_node,
+ l2_input_classify_node_fn);
+
+/** l2 input classsifier feature initialization. */
+clib_error_t *
+l2_input_classify_init (vlib_main_t * vm)
+{
+ l2_input_classify_main_t *cm = &l2_input_classify_main;
+ l2_input_classify_runtime_t *rt;
+
+ rt = vlib_node_get_runtime_data (vm, l2_input_classify_node.index);
+
+ cm->vlib_main = vm;
+ cm->vnet_main = vnet_get_main ();
+ cm->vnet_classify_main = &vnet_classify_main;
+
+ /* Initialize the feature next-node indexes */
+ feat_bitmap_init_next_nodes (vm,
+ l2_input_classify_node.index,
+ L2INPUT_N_FEAT,
+ l2input_get_feat_names (),
+ cm->feat_next_node_index);
+ rt->l2cm = cm;
+ rt->vcm = cm->vnet_classify_main;
+
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (l2_input_classify_init);
+
+
+/** Enable/disable l2 input classification on a specific interface. */
+void
+vnet_l2_input_classify_enable_disable (u32 sw_if_index, int enable_disable)
+{
+ l2input_intf_bitmap_enable (sw_if_index, L2INPUT_FEAT_INPUT_CLASSIFY,
+ (u32) enable_disable);
+}
+
+/** @brief Set l2 per-protocol, per-interface input classification tables.
+ *
+ * @param sw_if_index interface handle
+ * @param ip4_table_index ip4 classification table index, or ~0
+ * @param ip6_table_index ip6 classification table index, or ~0
+ * @param other_table_index non-ip4, non-ip6 classification table index,
+ * or ~0
+ * @returns 0 on success, VNET_API_ERROR_NO_SUCH_TABLE, TABLE2, TABLE3
+ * if the indicated (non-~0) table does not exist.
+ */
+
+int
+vnet_l2_input_classify_set_tables (u32 sw_if_index,
+ u32 ip4_table_index,
+ u32 ip6_table_index, u32 other_table_index)
+{
+ l2_input_classify_main_t *cm = &l2_input_classify_main;
+ vnet_classify_main_t *vcm = cm->vnet_classify_main;
+
+ /* Assume that we've validated sw_if_index in the API layer */
+
+ if (ip4_table_index != ~0 &&
+ pool_is_free_index (vcm->tables, ip4_table_index))
+ return VNET_API_ERROR_NO_SUCH_TABLE;
+
+ if (ip6_table_index != ~0 &&
+ pool_is_free_index (vcm->tables, ip6_table_index))
+ return VNET_API_ERROR_NO_SUCH_TABLE2;
+
+ if (other_table_index != ~0 &&
+ pool_is_free_index (vcm->tables, other_table_index))
+ return VNET_API_ERROR_NO_SUCH_TABLE3;
+
+ vec_validate
+ (cm->classify_table_index_by_sw_if_index[L2_INPUT_CLASSIFY_TABLE_IP4],
+ sw_if_index);
+
+ vec_validate
+ (cm->classify_table_index_by_sw_if_index[L2_INPUT_CLASSIFY_TABLE_IP6],
+ sw_if_index);
+
+ vec_validate
+ (cm->classify_table_index_by_sw_if_index[L2_INPUT_CLASSIFY_TABLE_OTHER],
+ sw_if_index);
+
+ cm->classify_table_index_by_sw_if_index[L2_INPUT_CLASSIFY_TABLE_IP4]
+ [sw_if_index] = ip4_table_index;
+
+ cm->classify_table_index_by_sw_if_index[L2_INPUT_CLASSIFY_TABLE_IP6]
+ [sw_if_index] = ip6_table_index;
+
+ cm->classify_table_index_by_sw_if_index[L2_INPUT_CLASSIFY_TABLE_OTHER]
+ [sw_if_index] = other_table_index;
+
+ return 0;
+}
+
+static clib_error_t *
+int_l2_input_classify_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ vnet_main_t *vnm = vnet_get_main ();
+ u32 sw_if_index = ~0;
+ u32 ip4_table_index = ~0;
+ u32 ip6_table_index = ~0;
+ u32 other_table_index = ~0;
+ int rv;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "intfc %U", unformat_vnet_sw_interface,
+ vnm, &sw_if_index))
+ ;
+ else if (unformat (input, "ip4-table %d", &ip4_table_index))
+ ;
+ else if (unformat (input, "ip6-table %d", &ip6_table_index))
+ ;
+ else if (unformat (input, "other-table %d", &other_table_index))
+ ;
+ else
+ break;
+ }
+
+ if (sw_if_index == ~0)
+ return clib_error_return (0, "interface must be specified");
+
+
+ if (ip4_table_index == ~0 && ip6_table_index == ~0
+ && other_table_index == ~0)
+ {
+ vlib_cli_output (vm, "L2 classification disabled");
+ vnet_l2_input_classify_enable_disable (sw_if_index, 0 /* enable */ );
+ return 0;
+ }
+
+ rv = vnet_l2_input_classify_set_tables (sw_if_index, ip4_table_index,
+ ip6_table_index, other_table_index);
+ switch (rv)
+ {
+ case 0:
+ vnet_l2_input_classify_enable_disable (sw_if_index, 1 /* enable */ );
+ break;
+
+ default:
+ return clib_error_return (0, "vnet_l2_input_classify_set_tables: %d",
+ rv);
+ break;
+ }
+
+ return 0;
+}
+
+/*?
+ * Configure l2 input classification.
+ *
+ * @cliexpar
+ * @cliexstart{set interface l2 input classify intfc <interface-name> [ip4-table <index>] [ip6-table <index>] [other-table <index>]}
+ * @cliexend
+ * @todo This is incomplete. This needs a detailed description and a
+ * practical example.
+ ?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (int_l2_input_classify_cli, static) = {
+ .path = "set interface l2 input classify",
+ .short_help =
+ "set interface l2 input classify intfc <interface-name> [ip4-table <n>]\n"
+ " [ip6-table <n>] [other-table <n>]",
+ .function = int_l2_input_classify_command_fn,
+};
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/l2/l2_input_vtr.c b/src/vnet/l2/l2_input_vtr.c
new file mode 100644
index 00000000000..60a39631e87
--- /dev/null
+++ b/src/vnet/l2/l2_input_vtr.c
@@ -0,0 +1,401 @@
+/*
+ * l2_input_vtr.c : layer 2 input vlan tag rewrite processing
+ *
+ * Copyright (c) 2013 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+#include <vnet/ethernet/ethernet.h>
+#include <vnet/ethernet/packet.h>
+#include <vnet/l2/l2_input.h>
+#include <vnet/l2/feat_bitmap.h>
+#include <vnet/l2/l2_vtr.h>
+#include <vnet/l2/l2_input_vtr.h>
+#include <vnet/l2/l2_output.h>
+
+#include <vppinfra/error.h>
+#include <vppinfra/cache.h>
+
+
+typedef struct
+{
+ /* per-pkt trace data */
+ u8 src[6];
+ u8 dst[6];
+ u8 raw[12]; /* raw data (vlans) */
+ u32 sw_if_index;
+} l2_invtr_trace_t;
+
+/* packet trace format function */
+static u8 *
+format_l2_invtr_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ l2_invtr_trace_t *t = va_arg (*args, l2_invtr_trace_t *);
+
+ s = format (s, "l2-input-vtr: sw_if_index %d dst %U src %U data "
+ "%02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x",
+ t->sw_if_index,
+ format_ethernet_address, t->dst,
+ format_ethernet_address, t->src,
+ t->raw[0], t->raw[1], t->raw[2], t->raw[3], t->raw[4],
+ t->raw[5], t->raw[6], t->raw[7], t->raw[8], t->raw[9],
+ t->raw[10], t->raw[11]);
+ return s;
+}
+
+l2_invtr_main_t l2_invtr_main;
+
+static vlib_node_registration_t l2_invtr_node;
+
+#define foreach_l2_invtr_error \
+_(L2_INVTR, "L2 inverter packets") \
+_(DROP, "L2 input tag rewrite drops")
+
+typedef enum
+{
+#define _(sym,str) L2_INVTR_ERROR_##sym,
+ foreach_l2_invtr_error
+#undef _
+ L2_INVTR_N_ERROR,
+} l2_invtr_error_t;
+
+static char *l2_invtr_error_strings[] = {
+#define _(sym,string) string,
+ foreach_l2_invtr_error
+#undef _
+};
+
+typedef enum
+{
+ L2_INVTR_NEXT_DROP,
+ L2_INVTR_N_NEXT,
+} l2_invtr_next_t;
+
+
+static uword
+l2_invtr_node_fn (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ u32 n_left_from, *from, *to_next;
+ l2_invtr_next_t next_index;
+ l2_invtr_main_t *msm = &l2_invtr_main;
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors; /* number of packets to process */
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ /* get space to enqueue frame to graph node "next_index" */
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from >= 6 && n_left_to_next >= 2)
+ {
+ u32 bi0, bi1;
+ vlib_buffer_t *b0, *b1;
+ u32 next0, next1;
+ u32 sw_if_index0, sw_if_index1;
+ u32 feature_bitmap0, feature_bitmap1;
+
+ /* Prefetch next iteration. */
+ {
+ vlib_buffer_t *p2, *p3, *p4, *p5;
+ u32 sw_if_index2, sw_if_index3;
+
+ p2 = vlib_get_buffer (vm, from[2]);
+ p3 = vlib_get_buffer (vm, from[3]);
+ p4 = vlib_get_buffer (vm, from[4]);
+ p5 = vlib_get_buffer (vm, from[5]);
+
+ /* Prefetch the buffer header and packet for the N+2 loop iteration */
+ vlib_prefetch_buffer_header (p4, LOAD);
+ vlib_prefetch_buffer_header (p5, LOAD);
+
+ CLIB_PREFETCH (p4->data, CLIB_CACHE_LINE_BYTES, STORE);
+ CLIB_PREFETCH (p5->data, CLIB_CACHE_LINE_BYTES, STORE);
+
+ /*
+ * Prefetch the input config for the N+1 loop iteration
+ * This depends on the buffer header above
+ */
+ sw_if_index2 = vnet_buffer (p2)->sw_if_index[VLIB_RX];
+ sw_if_index3 = vnet_buffer (p3)->sw_if_index[VLIB_RX];
+ CLIB_PREFETCH (vec_elt_at_index
+ (l2output_main.configs, sw_if_index2),
+ CLIB_CACHE_LINE_BYTES, LOAD);
+ CLIB_PREFETCH (vec_elt_at_index
+ (l2output_main.configs, sw_if_index3),
+ CLIB_CACHE_LINE_BYTES, LOAD);
+ }
+
+ /* speculatively enqueue b0 and b1 to the current next frame */
+ /* bi is "buffer index", b is pointer to the buffer */
+ to_next[0] = bi0 = from[0];
+ to_next[1] = bi1 = from[1];
+ from += 2;
+ to_next += 2;
+ n_left_from -= 2;
+ n_left_to_next -= 2;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ b1 = vlib_get_buffer (vm, bi1);
+
+ /* RX interface handles */
+ sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+ sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_RX];
+
+ /* process 2 packets */
+
+ /* Remove ourself from the feature bitmap */
+ feature_bitmap0 =
+ vnet_buffer (b0)->l2.feature_bitmap & ~L2INPUT_FEAT_VTR;
+ feature_bitmap1 =
+ vnet_buffer (b1)->l2.feature_bitmap & ~L2INPUT_FEAT_VTR;
+
+ /* save for next feature graph nodes */
+ vnet_buffer (b0)->l2.feature_bitmap = feature_bitmap0;
+ vnet_buffer (b1)->l2.feature_bitmap = feature_bitmap1;
+
+ /* Determine the next node */
+ next0 = feat_bitmap_get_next_node_index (msm->feat_next_node_index,
+ feature_bitmap0);
+ next1 = feat_bitmap_get_next_node_index (msm->feat_next_node_index,
+ feature_bitmap1);
+
+ l2_output_config_t *config0;
+ l2_output_config_t *config1;
+ config0 = vec_elt_at_index (l2output_main.configs, sw_if_index0);
+ config1 = vec_elt_at_index (l2output_main.configs, sw_if_index1);
+
+ if (PREDICT_FALSE (config0->out_vtr_flag))
+ {
+ if (config0->output_vtr.push_and_pop_bytes)
+ {
+ /* perform the tag rewrite on two packets */
+ if (l2_vtr_process
+ (b0,
+ &(vec_elt_at_index
+ (l2output_main.configs, sw_if_index0)->input_vtr)))
+ {
+ /* Drop packet */
+ next0 = L2_INVTR_NEXT_DROP;
+ b0->error = node->errors[L2_INVTR_ERROR_DROP];
+ }
+ }
+ else if (config0->output_pbb_vtr.push_and_pop_bytes)
+ {
+ if (l2_pbb_process (b0, &(config0->input_pbb_vtr)))
+ {
+ /* Drop packet */
+ next0 = L2_INVTR_NEXT_DROP;
+ b0->error = node->errors[L2_INVTR_ERROR_DROP];
+ }
+ }
+ }
+ if (PREDICT_FALSE (config1->out_vtr_flag))
+ {
+ if (config1->output_vtr.push_and_pop_bytes)
+ {
+ if (l2_vtr_process
+ (b1,
+ &(vec_elt_at_index
+ (l2output_main.configs, sw_if_index1)->input_vtr)))
+ {
+ /* Drop packet */
+ next1 = L2_INVTR_NEXT_DROP;
+ b1->error = node->errors[L2_INVTR_ERROR_DROP];
+ }
+ }
+ else if (config1->output_pbb_vtr.push_and_pop_bytes)
+ {
+ if (l2_pbb_process (b1, &(config1->input_pbb_vtr)))
+ {
+ /* Drop packet */
+ next1 = L2_INVTR_NEXT_DROP;
+ b1->error = node->errors[L2_INVTR_ERROR_DROP];
+ }
+ }
+ }
+
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)))
+ {
+ if (b0->flags & VLIB_BUFFER_IS_TRACED)
+ {
+ l2_invtr_trace_t *t =
+ vlib_add_trace (vm, node, b0, sizeof (*t));
+ ethernet_header_t *h0 = vlib_buffer_get_current (b0);
+ t->sw_if_index = sw_if_index0;
+ clib_memcpy (t->src, h0->src_address, 6);
+ clib_memcpy (t->dst, h0->dst_address, 6);
+ clib_memcpy (t->raw, &h0->type, sizeof (t->raw));
+ }
+ if (b1->flags & VLIB_BUFFER_IS_TRACED)
+ {
+ l2_invtr_trace_t *t =
+ vlib_add_trace (vm, node, b1, sizeof (*t));
+ ethernet_header_t *h1 = vlib_buffer_get_current (b1);
+ t->sw_if_index = sw_if_index0;
+ clib_memcpy (t->src, h1->src_address, 6);
+ clib_memcpy (t->dst, h1->dst_address, 6);
+ clib_memcpy (t->raw, &h1->type, sizeof (t->raw));
+ }
+ }
+
+ /* verify speculative enqueues, maybe switch current next frame */
+ /* if next0==next1==next_index then nothing special needs to be done */
+ vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, bi1, next0, next1);
+ }
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 bi0;
+ vlib_buffer_t *b0;
+ u32 next0;
+ u32 sw_if_index0;
+ u32 feature_bitmap0;
+
+ /* speculatively enqueue b0 to the current next frame */
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+
+ sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+
+ /* process 1 packet */
+
+ /* Remove ourself from the feature bitmap */
+ feature_bitmap0 =
+ vnet_buffer (b0)->l2.feature_bitmap & ~L2INPUT_FEAT_VTR;
+
+ /* save for next feature graph nodes */
+ vnet_buffer (b0)->l2.feature_bitmap = feature_bitmap0;
+
+ /* Determine the next node */
+ next0 = feat_bitmap_get_next_node_index (msm->feat_next_node_index,
+ feature_bitmap0);
+
+ l2_output_config_t *config0;
+ config0 = vec_elt_at_index (l2output_main.configs, sw_if_index0);
+
+ if (PREDICT_FALSE (config0->out_vtr_flag))
+ {
+ if (config0->output_vtr.push_and_pop_bytes)
+ {
+ /* perform the tag rewrite on one packet */
+ if (l2_vtr_process
+ (b0,
+ &(vec_elt_at_index
+ (l2output_main.configs, sw_if_index0)->input_vtr)))
+ {
+ /* Drop packet */
+ next0 = L2_INVTR_NEXT_DROP;
+ b0->error = node->errors[L2_INVTR_ERROR_DROP];
+ }
+ }
+ else if (config0->output_pbb_vtr.push_and_pop_bytes)
+ {
+ if (l2_pbb_process (b0, &(config0->input_pbb_vtr)))
+ {
+ /* Drop packet */
+ next0 = L2_INVTR_NEXT_DROP;
+ b0->error = node->errors[L2_INVTR_ERROR_DROP];
+ }
+ }
+ }
+
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)
+ && (b0->flags & VLIB_BUFFER_IS_TRACED)))
+ {
+ l2_invtr_trace_t *t =
+ vlib_add_trace (vm, node, b0, sizeof (*t));
+ ethernet_header_t *h0 = vlib_buffer_get_current (b0);
+ t->sw_if_index = sw_if_index0;
+ clib_memcpy (t->src, h0->src_address, 6);
+ clib_memcpy (t->dst, h0->dst_address, 6);
+ clib_memcpy (t->raw, &h0->type, sizeof (t->raw));
+ }
+
+ /* verify speculative enqueue, maybe switch current next frame */
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, next0);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ return frame->n_vectors;
+}
+
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (l2_invtr_node,static) = {
+ .function = l2_invtr_node_fn,
+ .name = "l2-input-vtr",
+ .vector_size = sizeof (u32),
+ .format_trace = format_l2_invtr_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_errors = ARRAY_LEN(l2_invtr_error_strings),
+ .error_strings = l2_invtr_error_strings,
+
+ .n_next_nodes = L2_INVTR_N_NEXT,
+
+ /* edit / add dispositions here */
+ .next_nodes = {
+ [L2_INVTR_NEXT_DROP] = "error-drop",
+ },
+};
+/* *INDENT-ON* */
+
+VLIB_NODE_FUNCTION_MULTIARCH (l2_invtr_node, l2_invtr_node_fn)
+ clib_error_t *l2_invtr_init (vlib_main_t * vm)
+{
+ l2_invtr_main_t *mp = &l2_invtr_main;
+
+ mp->vlib_main = vm;
+ mp->vnet_main = vnet_get_main ();
+
+ /* Initialize the feature next-node indexes */
+ feat_bitmap_init_next_nodes (vm,
+ l2_invtr_node.index,
+ L2INPUT_N_FEAT,
+ l2input_get_feat_names (),
+ mp->feat_next_node_index);
+
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (l2_invtr_init);
+
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/l2/l2_input_vtr.h b/src/vnet/l2/l2_input_vtr.h
new file mode 100644
index 00000000000..f248669e550
--- /dev/null
+++ b/src/vnet/l2/l2_input_vtr.h
@@ -0,0 +1,54 @@
+/*
+ * l2_input_vtr.h : layer 2 input vlan tag rewrite processing
+ *
+ * Copyright (c) 2013 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef included_vnet_l2_input_vtr_h
+#define included_vnet_l2_input_vtr_h
+
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+#include <vnet/l2/feat_bitmap.h>
+#include <vnet/l2/l2_vtr.h>
+
+
+typedef struct
+{
+
+ /*
+ * The input vtr data is located in l2_output_config_t because
+ * the same config data is used for the egress EFP Filter check.
+ */
+
+ /* Next nodes for each feature */
+ u32 feat_next_node_index[32];
+
+ /* convenience variables */
+ vlib_main_t *vlib_main;
+ vnet_main_t *vnet_main;
+} l2_invtr_main_t;
+
+extern l2_invtr_main_t l2_invtr_main;
+
+#endif /* included_vnet_l2_input_vtr_h */
+
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/l2/l2_learn.c b/src/vnet/l2/l2_learn.c
new file mode 100644
index 00000000000..7f19f936d70
--- /dev/null
+++ b/src/vnet/l2/l2_learn.c
@@ -0,0 +1,597 @@
+/*
+ * l2_learn.c : layer 2 learning using l2fib
+ *
+ * Copyright (c) 2013 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+#include <vnet/pg/pg.h>
+#include <vnet/ethernet/ethernet.h>
+#include <vlib/cli.h>
+
+#include <vnet/l2/l2_input.h>
+#include <vnet/l2/feat_bitmap.h>
+#include <vnet/l2/l2_fib.h>
+#include <vnet/l2/l2_learn.h>
+
+#include <vppinfra/error.h>
+#include <vppinfra/hash.h>
+
+/**
+ * @file
+ * @brief Ethernet Bridge Learning.
+ *
+ * Populate the mac table with entries mapping the packet's source mac + bridge
+ * domain ID to the input sw_if_index.
+ *
+ * Note that learning and forwarding are separate graph nodes. This means that
+ * for a set of packets, all learning is performed first, then all nodes are
+ * forwarded. The forwarding is done based on the end-state of the mac table,
+ * instead of the state after each packet. Thus the forwarding results could
+ * differ in certain cases (mac move tests), but this not expected to cause
+ * problems in real-world networks. It is much simpler to separate learning
+ * and forwarding into separate nodes.
+ */
+
+
+typedef struct
+{
+ u8 src[6];
+ u8 dst[6];
+ u32 sw_if_index;
+ u16 bd_index;
+} l2learn_trace_t;
+
+
+/* packet trace format function */
+static u8 *
+format_l2learn_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ l2learn_trace_t *t = va_arg (*args, l2learn_trace_t *);
+
+ s = format (s, "l2-learn: sw_if_index %d dst %U src %U bd_index %d",
+ t->sw_if_index,
+ format_ethernet_address, t->dst,
+ format_ethernet_address, t->src, t->bd_index);
+ return s;
+}
+
+static vlib_node_registration_t l2learn_node;
+
+#define foreach_l2learn_error \
+_(L2LEARN, "L2 learn packets") \
+_(MISS, "L2 learn misses") \
+_(MAC_MOVE, "L2 mac moves") \
+_(MAC_MOVE_VIOLATE, "L2 mac move violations") \
+_(LIMIT, "L2 not learned due to limit") \
+_(HIT, "L2 learn hits") \
+_(FILTER_DROP, "L2 filter mac drops")
+
+typedef enum
+{
+#define _(sym,str) L2LEARN_ERROR_##sym,
+ foreach_l2learn_error
+#undef _
+ L2LEARN_N_ERROR,
+} l2learn_error_t;
+
+static char *l2learn_error_strings[] = {
+#define _(sym,string) string,
+ foreach_l2learn_error
+#undef _
+};
+
+typedef enum
+{
+ L2LEARN_NEXT_L2FWD,
+ L2LEARN_NEXT_DROP,
+ L2LEARN_N_NEXT,
+} l2learn_next_t;
+
+
+/** Perform learning on one packet based on the mac table lookup result. */
+
+static_always_inline void
+l2learn_process (vlib_node_runtime_t * node,
+ l2learn_main_t * msm,
+ u64 * counter_base,
+ vlib_buffer_t * b0,
+ u32 sw_if_index0,
+ l2fib_entry_key_t * key0,
+ l2fib_entry_key_t * cached_key,
+ u32 * bucket0,
+ l2fib_entry_result_t * result0, u32 * next0, u8 timestamp)
+{
+ u32 feature_bitmap;
+
+ /* Set up the default next node (typically L2FWD) */
+
+ /* Remove ourself from the feature bitmap */
+ feature_bitmap = vnet_buffer (b0)->l2.feature_bitmap & ~L2INPUT_FEAT_LEARN;
+
+ /* Save for next feature graph nodes */
+ vnet_buffer (b0)->l2.feature_bitmap = feature_bitmap;
+
+ /* Determine the next node */
+ *next0 = feat_bitmap_get_next_node_index (msm->feat_next_node_index,
+ feature_bitmap);
+
+ /* Check mac table lookup result */
+
+ if (PREDICT_TRUE (result0->fields.sw_if_index == sw_if_index0))
+ {
+ /*
+ * The entry was in the table, and the sw_if_index matched, the normal case
+ */
+ counter_base[L2LEARN_ERROR_HIT] += 1;
+ if (PREDICT_FALSE (result0->fields.timestamp != timestamp))
+ result0->fields.timestamp = timestamp;
+
+ }
+ else if (result0->raw == ~0)
+ {
+
+ /* The entry was not in table, so add it */
+
+ counter_base[L2LEARN_ERROR_MISS] += 1;
+
+ if (msm->global_learn_count == msm->global_learn_limit)
+ {
+ /*
+ * Global limit reached. Do not learn the mac but forward the packet.
+ * In the future, limits could also be per-interface or bridge-domain.
+ */
+ counter_base[L2LEARN_ERROR_LIMIT] += 1;
+ goto done;
+
+ }
+ else
+ {
+ BVT (clib_bihash_kv) kv;
+ /* It is ok to learn */
+
+ result0->raw = 0; /* clear all fields */
+ result0->fields.sw_if_index = sw_if_index0;
+ result0->fields.timestamp = timestamp;
+ kv.key = key0->raw;
+ kv.value = result0->raw;
+
+ BV (clib_bihash_add_del) (msm->mac_table, &kv, 1 /* is_add */ );
+
+ cached_key->raw = ~0; /* invalidate the cache */
+ msm->global_learn_count++;
+ }
+
+ }
+ else
+ {
+
+ /* The entry was in the table, but with the wrong sw_if_index mapping (mac move) */
+ counter_base[L2LEARN_ERROR_MAC_MOVE] += 1;
+
+ if (result0->fields.static_mac)
+ {
+ /*
+ * Don't overwrite a static mac
+ * TODO: Check violation policy. For now drop the packet
+ */
+ b0->error = node->errors[L2LEARN_ERROR_MAC_MOVE_VIOLATE];
+ *next0 = L2LEARN_NEXT_DROP;
+ }
+ else
+ {
+ /*
+ * Update the entry
+ * TODO: may want to rate limit mac moves
+ * TODO: check global/bridge domain/interface learn limits
+ */
+ BVT (clib_bihash_kv) kv;
+
+ result0->raw = 0; /* clear all fields */
+ result0->fields.sw_if_index = sw_if_index0;
+ result0->fields.timestamp = timestamp;
+
+ kv.key = key0->raw;
+ kv.value = result0->raw;
+
+ cached_key->raw = ~0; /* invalidate the cache */
+
+ BV (clib_bihash_add_del) (msm->mac_table, &kv, 1 /* is_add */ );
+ }
+ }
+
+ if (result0->fields.filter)
+ {
+ /* drop packet because lookup matched a filter mac entry */
+
+ if (*next0 != L2LEARN_NEXT_DROP)
+ {
+ /* if we're not already dropping the packet, do it now */
+ b0->error = node->errors[L2LEARN_ERROR_FILTER_DROP];
+ *next0 = L2LEARN_NEXT_DROP;
+ }
+ }
+
+done:
+ return;
+}
+
+
+static uword
+l2learn_node_fn (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ u32 n_left_from, *from, *to_next;
+ l2learn_next_t next_index;
+ l2learn_main_t *msm = &l2learn_main;
+ vlib_node_t *n = vlib_get_node (vm, l2learn_node.index);
+ u32 node_counter_base_index = n->error_heap_index;
+ vlib_error_main_t *em = &vm->error_main;
+ l2fib_entry_key_t cached_key;
+ l2fib_entry_result_t cached_result;
+ u8 timestamp = (u8) (vlib_time_now (vm) / 60);
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors; /* number of packets to process */
+ next_index = node->cached_next_index;
+
+ /* Clear the one-entry cache in case mac table was updated */
+ cached_key.raw = ~0;
+ cached_result.raw = ~0; /* warning be gone */
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ /* get space to enqueue frame to graph node "next_index" */
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from >= 8 && n_left_to_next >= 4)
+ {
+ u32 bi0, bi1, bi2, bi3;
+ vlib_buffer_t *b0, *b1, *b2, *b3;
+ u32 next0, next1, next2, next3;
+ u32 sw_if_index0, sw_if_index1, sw_if_index2, sw_if_index3;
+ ethernet_header_t *h0, *h1, *h2, *h3;
+ l2fib_entry_key_t key0, key1, key2, key3;
+ l2fib_entry_result_t result0, result1, result2, result3;
+ u32 bucket0, bucket1, bucket2, bucket3;
+
+ /* Prefetch next iteration. */
+ {
+ vlib_buffer_t *p4, *p5, *p6, *p7;;
+
+ p4 = vlib_get_buffer (vm, from[4]);
+ p5 = vlib_get_buffer (vm, from[5]);
+ p6 = vlib_get_buffer (vm, from[6]);
+ p7 = vlib_get_buffer (vm, from[7]);
+
+ vlib_prefetch_buffer_header (p4, LOAD);
+ vlib_prefetch_buffer_header (p5, LOAD);
+ vlib_prefetch_buffer_header (p6, LOAD);
+ vlib_prefetch_buffer_header (p7, LOAD);
+
+ CLIB_PREFETCH (p4->data, CLIB_CACHE_LINE_BYTES, STORE);
+ CLIB_PREFETCH (p5->data, CLIB_CACHE_LINE_BYTES, STORE);
+ CLIB_PREFETCH (p6->data, CLIB_CACHE_LINE_BYTES, STORE);
+ CLIB_PREFETCH (p7->data, CLIB_CACHE_LINE_BYTES, STORE);
+ }
+
+ /* speculatively enqueue b0 and b1 to the current next frame */
+ /* bi is "buffer index", b is pointer to the buffer */
+ to_next[0] = bi0 = from[0];
+ to_next[1] = bi1 = from[1];
+ to_next[2] = bi2 = from[2];
+ to_next[3] = bi3 = from[3];
+ from += 4;
+ to_next += 4;
+ n_left_from -= 4;
+ n_left_to_next -= 4;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ b1 = vlib_get_buffer (vm, bi1);
+ b2 = vlib_get_buffer (vm, bi2);
+ b3 = vlib_get_buffer (vm, bi3);
+
+ /* RX interface handles */
+ sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+ sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_RX];
+ sw_if_index2 = vnet_buffer (b2)->sw_if_index[VLIB_RX];
+ sw_if_index3 = vnet_buffer (b3)->sw_if_index[VLIB_RX];
+
+ /* Process 4 x pkts */
+
+ h0 = vlib_buffer_get_current (b0);
+ h1 = vlib_buffer_get_current (b1);
+ h2 = vlib_buffer_get_current (b2);
+ h3 = vlib_buffer_get_current (b3);
+
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)))
+ {
+ if (b0->flags & VLIB_BUFFER_IS_TRACED)
+ {
+ l2learn_trace_t *t =
+ vlib_add_trace (vm, node, b0, sizeof (*t));
+ t->sw_if_index = sw_if_index0;
+ t->bd_index = vnet_buffer (b0)->l2.bd_index;
+ clib_memcpy (t->src, h0->src_address, 6);
+ clib_memcpy (t->dst, h0->dst_address, 6);
+ }
+ if (b1->flags & VLIB_BUFFER_IS_TRACED)
+ {
+ l2learn_trace_t *t =
+ vlib_add_trace (vm, node, b1, sizeof (*t));
+ t->sw_if_index = sw_if_index1;
+ t->bd_index = vnet_buffer (b1)->l2.bd_index;
+ clib_memcpy (t->src, h1->src_address, 6);
+ clib_memcpy (t->dst, h1->dst_address, 6);
+ }
+ if (b2->flags & VLIB_BUFFER_IS_TRACED)
+ {
+ l2learn_trace_t *t =
+ vlib_add_trace (vm, node, b2, sizeof (*t));
+ t->sw_if_index = sw_if_index2;
+ t->bd_index = vnet_buffer (b2)->l2.bd_index;
+ clib_memcpy (t->src, h2->src_address, 6);
+ clib_memcpy (t->dst, h2->dst_address, 6);
+ }
+ if (b3->flags & VLIB_BUFFER_IS_TRACED)
+ {
+ l2learn_trace_t *t =
+ vlib_add_trace (vm, node, b3, sizeof (*t));
+ t->sw_if_index = sw_if_index3;
+ t->bd_index = vnet_buffer (b3)->l2.bd_index;
+ clib_memcpy (t->src, h3->src_address, 6);
+ clib_memcpy (t->dst, h3->dst_address, 6);
+ }
+ }
+
+ /* process 4 pkts */
+ vlib_node_increment_counter (vm, l2learn_node.index,
+ L2LEARN_ERROR_L2LEARN, 4);
+
+ l2fib_lookup_4 (msm->mac_table, &cached_key, &cached_result,
+ h0->src_address,
+ h1->src_address,
+ h2->src_address,
+ h3->src_address,
+ vnet_buffer (b0)->l2.bd_index,
+ vnet_buffer (b1)->l2.bd_index,
+ vnet_buffer (b2)->l2.bd_index,
+ vnet_buffer (b3)->l2.bd_index,
+ &key0, &key1, &key2, &key3,
+ &bucket0, &bucket1, &bucket2, &bucket3,
+ &result0, &result1, &result2, &result3);
+
+ l2learn_process (node, msm, &em->counters[node_counter_base_index],
+ b0, sw_if_index0, &key0, &cached_key,
+ &bucket0, &result0, &next0, timestamp);
+
+ l2learn_process (node, msm, &em->counters[node_counter_base_index],
+ b1, sw_if_index1, &key1, &cached_key,
+ &bucket1, &result1, &next1, timestamp);
+
+ l2learn_process (node, msm, &em->counters[node_counter_base_index],
+ b2, sw_if_index2, &key2, &cached_key,
+ &bucket2, &result2, &next2, timestamp);
+
+ l2learn_process (node, msm, &em->counters[node_counter_base_index],
+ b3, sw_if_index3, &key3, &cached_key,
+ &bucket3, &result3, &next3, timestamp);
+
+ /* verify speculative enqueues, maybe switch current next frame */
+ /* if next0==next1==next_index then nothing special needs to be done */
+ vlib_validate_buffer_enqueue_x4 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, bi1, bi2, bi3,
+ next0, next1, next2, next3);
+ }
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 bi0;
+ vlib_buffer_t *b0;
+ u32 next0;
+ u32 sw_if_index0;
+ ethernet_header_t *h0;
+ l2fib_entry_key_t key0;
+ l2fib_entry_result_t result0;
+ u32 bucket0;
+
+ /* speculatively enqueue b0 to the current next frame */
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+
+ sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+
+ h0 = vlib_buffer_get_current (b0);
+
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)
+ && (b0->flags & VLIB_BUFFER_IS_TRACED)))
+ {
+ l2learn_trace_t *t = vlib_add_trace (vm, node, b0, sizeof (*t));
+ t->sw_if_index = sw_if_index0;
+ t->bd_index = vnet_buffer (b0)->l2.bd_index;
+ clib_memcpy (t->src, h0->src_address, 6);
+ clib_memcpy (t->dst, h0->dst_address, 6);
+ }
+
+ /* process 1 pkt */
+ vlib_node_increment_counter (vm, l2learn_node.index,
+ L2LEARN_ERROR_L2LEARN, 1);
+
+
+ l2fib_lookup_1 (msm->mac_table, &cached_key, &cached_result,
+ h0->src_address, vnet_buffer (b0)->l2.bd_index,
+ &key0, &bucket0, &result0);
+
+ l2learn_process (node, msm, &em->counters[node_counter_base_index],
+ b0, sw_if_index0, &key0, &cached_key,
+ &bucket0, &result0, &next0, timestamp);
+
+ /* verify speculative enqueue, maybe switch current next frame */
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, next0);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ return frame->n_vectors;
+}
+
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (l2learn_node,static) = {
+ .function = l2learn_node_fn,
+ .name = "l2-learn",
+ .vector_size = sizeof (u32),
+ .format_trace = format_l2learn_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_errors = ARRAY_LEN(l2learn_error_strings),
+ .error_strings = l2learn_error_strings,
+
+ .n_next_nodes = L2LEARN_N_NEXT,
+
+ /* edit / add dispositions here */
+ .next_nodes = {
+ [L2LEARN_NEXT_DROP] = "error-drop",
+ [L2LEARN_NEXT_L2FWD] = "l2-fwd",
+ },
+};
+/* *INDENT-ON* */
+
+VLIB_NODE_FUNCTION_MULTIARCH (l2learn_node, l2learn_node_fn)
+ clib_error_t *l2learn_init (vlib_main_t * vm)
+{
+ l2learn_main_t *mp = &l2learn_main;
+
+ mp->vlib_main = vm;
+ mp->vnet_main = vnet_get_main ();
+
+ /* Initialize the feature next-node indexes */
+ feat_bitmap_init_next_nodes (vm,
+ l2learn_node.index,
+ L2INPUT_N_FEAT,
+ l2input_get_feat_names (),
+ mp->feat_next_node_index);
+
+ /* init the hash table ptr */
+ mp->mac_table = get_mac_table ();
+
+ /*
+ * Set the default number of dynamically learned macs to the number
+ * of buckets.
+ */
+ mp->global_learn_limit = L2FIB_NUM_BUCKETS * 16;
+
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (l2learn_init);
+
+
+/**
+ * Set subinterface learn enable/disable.
+ * The CLI format is:
+ * set interface l2 learn <interface> [disable]
+ */
+static clib_error_t *
+int_learn (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ vnet_main_t *vnm = vnet_get_main ();
+ clib_error_t *error = 0;
+ u32 sw_if_index;
+ u32 enable;
+
+ if (!unformat_user (input, unformat_vnet_sw_interface, vnm, &sw_if_index))
+ {
+ error = clib_error_return (0, "unknown interface `%U'",
+ format_unformat_error, input);
+ goto done;
+ }
+
+ enable = 1;
+ if (unformat (input, "disable"))
+ {
+ enable = 0;
+ }
+
+ /* set the interface flag */
+ l2input_intf_bitmap_enable (sw_if_index, L2INPUT_FEAT_LEARN, enable);
+
+done:
+ return error;
+}
+
+/*?
+ * Layer 2 learning can be enabled and disabled on each
+ * interface and on each bridge-domain. Use this command to
+ * manage interfaces. It is enabled by default.
+ *
+ * @cliexpar
+ * Example of how to enable learning:
+ * @cliexcmd{set interface l2 learn GigabitEthernet0/8/0}
+ * Example of how to disable learning:
+ * @cliexcmd{set interface l2 learn GigabitEthernet0/8/0 disable}
+?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (int_learn_cli, static) = {
+ .path = "set interface l2 learn",
+ .short_help = "set interface l2 learn <interface> [disable]",
+ .function = int_learn,
+};
+/* *INDENT-ON* */
+
+
+static clib_error_t *
+l2learn_config (vlib_main_t * vm, unformat_input_t * input)
+{
+ l2learn_main_t *mp = &l2learn_main;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "limit %d", &mp->global_learn_limit))
+ ;
+
+ else
+ return clib_error_return (0, "unknown input `%U'",
+ format_unformat_error, input);
+ }
+
+ return 0;
+}
+
+VLIB_CONFIG_FUNCTION (l2learn_config, "l2learn");
+
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/l2/l2_learn.h b/src/vnet/l2/l2_learn.h
new file mode 100644
index 00000000000..5bb1130b015
--- /dev/null
+++ b/src/vnet/l2/l2_learn.h
@@ -0,0 +1,64 @@
+/*
+ * l2_learn.c : layer 2 learning using l2fib
+ *
+ * Copyright (c) 2014 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef included_l2learn_h
+#define included_l2learn_h
+
+#include <vlib/vlib.h>
+#include <vnet/ethernet/ethernet.h>
+
+
+typedef struct
+{
+
+ /* Hash table */
+ BVT (clib_bihash) * mac_table;
+
+ /* number of dynamically learned mac entries */
+ u32 global_learn_count;
+
+ /* maximum number of dynamically learned mac entries */
+ u32 global_learn_limit;
+
+ /* Next nodes for each feature */
+ u32 feat_next_node_index[32];
+
+ /* convenience variables */
+ vlib_main_t *vlib_main;
+ vnet_main_t *vnet_main;
+} l2learn_main_t;
+
+
+l2learn_main_t l2learn_main;
+
+extern vlib_node_registration_t l2fib_mac_age_scanner_process_node;
+
+enum
+{
+ L2_MAC_AGE_PROCESS_EVENT_START = 1,
+ L2_MAC_AGE_PROCESS_EVENT_STOP = 2,
+} l2_mac_age_process_event_t;
+
+#endif
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/l2/l2_output.c b/src/vnet/l2/l2_output.c
new file mode 100644
index 00000000000..953fcb0222a
--- /dev/null
+++ b/src/vnet/l2/l2_output.c
@@ -0,0 +1,708 @@
+/*
+ * l2_output.c : layer 2 output packet processing
+ *
+ * Copyright (c) 2013 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+#include <vnet/pg/pg.h>
+#include <vnet/ethernet/ethernet.h>
+#include <vlib/cli.h>
+
+#include <vppinfra/error.h>
+#include <vppinfra/hash.h>
+#include <vnet/l2/feat_bitmap.h>
+#include <vnet/l2/l2_output.h>
+
+
+/* Feature graph node names */
+static char *l2output_feat_names[] = {
+#define _(sym,name) name,
+ foreach_l2output_feat
+#undef _
+};
+
+char **
+l2output_get_feat_names (void)
+{
+ return l2output_feat_names;
+}
+
+l2output_main_t l2output_main;
+
+typedef struct
+{
+ /* per-pkt trace data */
+ u8 src[6];
+ u8 dst[6];
+ u32 sw_if_index;
+} l2output_trace_t;
+
+/* packet trace format function */
+static u8 *
+format_l2output_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ l2output_trace_t *t = va_arg (*args, l2output_trace_t *);
+
+ s = format (s, "l2-output: sw_if_index %d dst %U src %U",
+ t->sw_if_index,
+ format_ethernet_address, t->dst,
+ format_ethernet_address, t->src);
+ return s;
+}
+
+
+static char *l2output_error_strings[] = {
+#define _(sym,string) string,
+ foreach_l2output_error
+#undef _
+};
+
+/**
+ * Check for split horizon violations.
+ * Return 0 if split horizon check passes, otherwise return non-zero.
+ * Packets should not be transmitted out an interface with the same
+ * split-horizon group as the input interface, except if the @c shg is 0
+ * in which case the check always passes.
+ */
+static_always_inline u32
+split_horizon_violation (u8 shg1, u8 shg2)
+{
+ if (PREDICT_TRUE (shg1 == 0))
+ {
+ return 0;
+ }
+ else
+ {
+ return shg1 == shg2;
+ }
+}
+
+static_always_inline void
+l2output_vtr (vlib_node_runtime_t * node, l2_output_config_t * config,
+ u32 feature_bitmap, vlib_buffer_t * b, u32 * next)
+{
+ if (PREDICT_FALSE (config->out_vtr_flag))
+ {
+ /* Perform pre-vtr EFP filter check if configured */
+ if (config->output_vtr.push_and_pop_bytes)
+ {
+ /*
+ * Perform output vlan tag rewrite and the pre-vtr EFP filter check.
+ * The EFP Filter only needs to be run if there is an output VTR
+ * configured. The flag for the post-vtr EFP Filter node is used
+ * to trigger the pre-vtr check as well.
+ */
+ u32 failed1 = (feature_bitmap & L2OUTPUT_FEAT_EFP_FILTER)
+ && (l2_efp_filter_process (b, &(config->input_vtr)));
+ u32 failed2 = l2_vtr_process (b, &(config->output_vtr));
+
+ if (PREDICT_FALSE (failed1 | failed2))
+ {
+ *next = L2OUTPUT_NEXT_DROP;
+ if (failed2)
+ {
+ b->error = node->errors[L2OUTPUT_ERROR_VTR_DROP];
+ }
+ if (failed1)
+ {
+ b->error = node->errors[L2OUTPUT_ERROR_EFP_DROP];
+ }
+ }
+ }
+ // perform the PBB rewrite
+ else if (config->output_pbb_vtr.push_and_pop_bytes)
+ {
+ u32 failed = l2_pbb_process (b, &(config->output_pbb_vtr));
+ if (PREDICT_FALSE (failed))
+ {
+ *next = L2OUTPUT_NEXT_DROP;
+ b->error = node->errors[L2OUTPUT_ERROR_VTR_DROP];
+ }
+ }
+ }
+}
+
+
+static vlib_node_registration_t l2output_node;
+
+static uword
+l2output_node_fn (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ u32 n_left_from, *from, *to_next;
+ l2output_next_t next_index;
+ l2output_main_t *msm = &l2output_main;
+ u32 cached_sw_if_index;
+ u32 cached_next_index;
+
+ /* Invalidate cache */
+ cached_sw_if_index = ~0;
+ cached_next_index = ~0; /* warning be gone */
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors; /* number of packets to process */
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ /* get space to enqueue frame to graph node "next_index" */
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from >= 8 && n_left_to_next >= 4)
+ {
+ u32 bi0, bi1, bi2, bi3;
+ vlib_buffer_t *b0, *b1, *b2, *b3;
+ u32 next0, next1, next2, next3;
+ u32 sw_if_index0, sw_if_index1, sw_if_index2, sw_if_index3;
+ ethernet_header_t *h0, *h1, *h2, *h3;
+ l2_output_config_t *config0, *config1, *config2, *config3;
+ u32 feature_bitmap0, feature_bitmap1;
+ u32 feature_bitmap2, feature_bitmap3;
+
+ /* Prefetch next iteration. */
+ {
+ vlib_buffer_t *p4, *p5, *p6, *p7;
+
+ p4 = vlib_get_buffer (vm, from[4]);
+ p5 = vlib_get_buffer (vm, from[5]);
+ p6 = vlib_get_buffer (vm, from[6]);
+ p7 = vlib_get_buffer (vm, from[7]);
+
+ /* Prefetch the buffer header for the N+2 loop iteration */
+ vlib_prefetch_buffer_header (p4, LOAD);
+ vlib_prefetch_buffer_header (p5, LOAD);
+ vlib_prefetch_buffer_header (p6, LOAD);
+ vlib_prefetch_buffer_header (p7, LOAD);
+ }
+
+ /* speculatively enqueue b0 and b1 to the current next frame */
+ /* bi is "buffer index", b is pointer to the buffer */
+ to_next[0] = bi0 = from[0];
+ to_next[1] = bi1 = from[1];
+ to_next[2] = bi2 = from[2];
+ to_next[3] = bi3 = from[3];
+ from += 4;
+ to_next += 4;
+ n_left_from -= 4;
+ n_left_to_next -= 4;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ b1 = vlib_get_buffer (vm, bi1);
+ b2 = vlib_get_buffer (vm, bi2);
+ b3 = vlib_get_buffer (vm, bi3);
+
+ /* TX interface handles */
+ sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_TX];
+ sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_TX];
+ sw_if_index2 = vnet_buffer (b2)->sw_if_index[VLIB_TX];
+ sw_if_index3 = vnet_buffer (b3)->sw_if_index[VLIB_TX];
+
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)))
+ {
+ h0 = vlib_buffer_get_current (b0);
+ h1 = vlib_buffer_get_current (b1);
+ h2 = vlib_buffer_get_current (b2);
+ h3 = vlib_buffer_get_current (b3);
+ if (b0->flags & VLIB_BUFFER_IS_TRACED)
+ {
+ l2output_trace_t *t =
+ vlib_add_trace (vm, node, b0, sizeof (*t));
+ t->sw_if_index = sw_if_index0;
+ clib_memcpy (t->src, h0->src_address, 6);
+ clib_memcpy (t->dst, h0->dst_address, 6);
+ }
+ if (b1->flags & VLIB_BUFFER_IS_TRACED)
+ {
+ l2output_trace_t *t =
+ vlib_add_trace (vm, node, b1, sizeof (*t));
+ t->sw_if_index = sw_if_index1;
+ clib_memcpy (t->src, h1->src_address, 6);
+ clib_memcpy (t->dst, h1->dst_address, 6);
+ }
+ if (b2->flags & VLIB_BUFFER_IS_TRACED)
+ {
+ l2output_trace_t *t =
+ vlib_add_trace (vm, node, b2, sizeof (*t));
+ t->sw_if_index = sw_if_index2;
+ clib_memcpy (t->src, h2->src_address, 6);
+ clib_memcpy (t->dst, h2->dst_address, 6);
+ }
+ if (b3->flags & VLIB_BUFFER_IS_TRACED)
+ {
+ l2output_trace_t *t =
+ vlib_add_trace (vm, node, b3, sizeof (*t));
+ t->sw_if_index = sw_if_index3;
+ clib_memcpy (t->src, h3->src_address, 6);
+ clib_memcpy (t->dst, h3->dst_address, 6);
+ }
+ }
+
+ vlib_node_increment_counter (vm, l2output_node.index,
+ L2OUTPUT_ERROR_L2OUTPUT, 4);
+
+ /* Get config for the output interface */
+ config0 = vec_elt_at_index (msm->configs, sw_if_index0);
+ config1 = vec_elt_at_index (msm->configs, sw_if_index1);
+ config2 = vec_elt_at_index (msm->configs, sw_if_index2);
+ config3 = vec_elt_at_index (msm->configs, sw_if_index3);
+
+ /*
+ * Get features from the config
+ * TODO: mask out any non-applicable features
+ */
+ feature_bitmap0 = config0->feature_bitmap;
+ feature_bitmap1 = config1->feature_bitmap;
+ feature_bitmap2 = config2->feature_bitmap;
+ feature_bitmap3 = config3->feature_bitmap;
+
+ /* Determine next node */
+ l2_output_dispatch (msm->vlib_main,
+ msm->vnet_main,
+ node,
+ l2output_node.index,
+ &cached_sw_if_index,
+ &cached_next_index,
+ &msm->next_nodes,
+ b0, sw_if_index0, feature_bitmap0, &next0);
+
+ l2_output_dispatch (msm->vlib_main,
+ msm->vnet_main,
+ node,
+ l2output_node.index,
+ &cached_sw_if_index,
+ &cached_next_index,
+ &msm->next_nodes,
+ b1, sw_if_index1, feature_bitmap1, &next1);
+
+ l2_output_dispatch (msm->vlib_main,
+ msm->vnet_main,
+ node,
+ l2output_node.index,
+ &cached_sw_if_index,
+ &cached_next_index,
+ &msm->next_nodes,
+ b2, sw_if_index2, feature_bitmap2, &next2);
+
+ l2_output_dispatch (msm->vlib_main,
+ msm->vnet_main,
+ node,
+ l2output_node.index,
+ &cached_sw_if_index,
+ &cached_next_index,
+ &msm->next_nodes,
+ b3, sw_if_index3, feature_bitmap3, &next3);
+
+ l2output_vtr (node, config0, feature_bitmap0, b0, &next0);
+ l2output_vtr (node, config1, feature_bitmap1, b1, &next1);
+ l2output_vtr (node, config2, feature_bitmap2, b2, &next2);
+ l2output_vtr (node, config3, feature_bitmap3, b3, &next3);
+
+ /*
+ * Perform the split horizon check
+ * The check can only fail for non-zero shg's
+ */
+ if (PREDICT_FALSE (config0->shg + config1->shg +
+ config2->shg + config3->shg))
+ {
+ /* one of the checks might fail, check both */
+ if (split_horizon_violation
+ (config0->shg, vnet_buffer (b0)->l2.shg))
+ {
+ next0 = L2OUTPUT_NEXT_DROP;
+ b0->error = node->errors[L2OUTPUT_ERROR_SHG_DROP];
+ }
+ if (split_horizon_violation
+ (config1->shg, vnet_buffer (b1)->l2.shg))
+ {
+ next1 = L2OUTPUT_NEXT_DROP;
+ b1->error = node->errors[L2OUTPUT_ERROR_SHG_DROP];
+ }
+ if (split_horizon_violation
+ (config2->shg, vnet_buffer (b2)->l2.shg))
+ {
+ next2 = L2OUTPUT_NEXT_DROP;
+ b2->error = node->errors[L2OUTPUT_ERROR_SHG_DROP];
+ }
+ if (split_horizon_violation
+ (config3->shg, vnet_buffer (b3)->l2.shg))
+ {
+ next3 = L2OUTPUT_NEXT_DROP;
+ b3->error = node->errors[L2OUTPUT_ERROR_SHG_DROP];
+ }
+ }
+
+ /* verify speculative enqueues, maybe switch current next frame */
+ /* if next0==next1==next_index then nothing special needs to be done */
+ vlib_validate_buffer_enqueue_x4 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, bi1, bi2, bi3,
+ next0, next1, next2, next3);
+ }
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 bi0;
+ vlib_buffer_t *b0;
+ u32 next0;
+ u32 sw_if_index0;
+ ethernet_header_t *h0;
+ l2_output_config_t *config0;
+ u32 feature_bitmap0;
+
+ /* speculatively enqueue b0 to the current next frame */
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+
+ sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_TX];
+
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)
+ && (b0->flags & VLIB_BUFFER_IS_TRACED)))
+ {
+ l2output_trace_t *t =
+ vlib_add_trace (vm, node, b0, sizeof (*t));
+ t->sw_if_index = sw_if_index0;
+ h0 = vlib_buffer_get_current (b0);
+ clib_memcpy (t->src, h0->src_address, 6);
+ clib_memcpy (t->dst, h0->dst_address, 6);
+ }
+
+ vlib_node_increment_counter (vm, l2output_node.index,
+ L2OUTPUT_ERROR_L2OUTPUT, 1);
+
+ /* Get config for the output interface */
+ config0 = vec_elt_at_index (msm->configs, sw_if_index0);
+
+ /*
+ * Get features from the config
+ * TODO: mask out any non-applicable features
+ */
+ feature_bitmap0 = config0->feature_bitmap;
+
+ /* Determine next node */
+ l2_output_dispatch (msm->vlib_main,
+ msm->vnet_main,
+ node,
+ l2output_node.index,
+ &cached_sw_if_index,
+ &cached_next_index,
+ &msm->next_nodes,
+ b0, sw_if_index0, feature_bitmap0, &next0);
+
+ l2output_vtr (node, config0, feature_bitmap0, b0, &next0);
+
+ /* Perform the split horizon check */
+ if (PREDICT_FALSE
+ (split_horizon_violation
+ (config0->shg, vnet_buffer (b0)->l2.shg)))
+ {
+ next0 = L2OUTPUT_NEXT_DROP;
+ b0->error = node->errors[L2OUTPUT_ERROR_SHG_DROP];
+ }
+
+ /* verify speculative enqueue, maybe switch current next frame */
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, next0);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ return frame->n_vectors;
+}
+
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (l2output_node,static) = {
+ .function = l2output_node_fn,
+ .name = "l2-output",
+ .vector_size = sizeof (u32),
+ .format_trace = format_l2output_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_errors = ARRAY_LEN(l2output_error_strings),
+ .error_strings = l2output_error_strings,
+
+ .n_next_nodes = L2OUTPUT_N_NEXT,
+
+ /* edit / add dispositions here */
+ .next_nodes = {
+ [L2OUTPUT_NEXT_DROP] = "error-drop",
+ [L2OUTPUT_NEXT_BAD_INTF] = "l2-output-bad-intf",
+ },
+};
+/* *INDENT-ON* */
+
+
+#define foreach_l2output_bad_intf_error \
+_(DROP, "L2 output to interface not in L2 mode or deleted")
+
+static char *l2output_bad_intf_error_strings[] = {
+#define _(sym,string) string,
+ foreach_l2output_bad_intf_error
+#undef _
+};
+
+typedef enum
+{
+#define _(sym,str) L2OUTPUT_BAD_INTF_ERROR_##sym,
+ foreach_l2output_bad_intf_error
+#undef _
+ L2OUTPUT_BAD_INTF_N_ERROR,
+} l2output_bad_intf_error_t;
+
+
+/**
+ * Output node for interfaces/tunnels which was in L2 mode but were changed
+ * to L3 mode or possibly deleted thereafter. On changing forwarding mode
+ * of any tunnel/interface from L2 to L3, its entry in l2_output_main table
+ * next_nodes.output_node_index_vec[sw_if_index] MUST be set to the value of
+ * L2OUTPUT_NEXT_BAD_INTF. Thus, if there are stale entries in the L2FIB for
+ * this sw_if_index, l2-output will send packets for this sw_if_index to the
+ * l2-output-bad-intf node which just setup the proper drop reason before
+ * sending packets to the error-drop node to drop the packet. Then, stale L2FIB
+ * entries for delted tunnels won't cause possible packet or memory corrpution.
+ */
+static vlib_node_registration_t l2output_bad_intf_node;
+
+static uword
+l2output_bad_intf_node_fn (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ u32 n_left_from, *from, *to_next;
+ l2output_next_t next_index = 0;
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors; /* number of packets to process */
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ /* get space to enqueue frame to graph node "next_index" */
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ u32 bi0, bi1;
+ vlib_buffer_t *b0, *b1;
+
+ to_next[0] = bi0 = from[0];
+ to_next[1] = bi1 = from[1];
+ from += 2;
+ to_next += 2;
+ n_left_from -= 2;
+ n_left_to_next -= 2;
+ b0 = vlib_get_buffer (vm, bi0);
+ b1 = vlib_get_buffer (vm, bi1);
+ b0->error = node->errors[L2OUTPUT_BAD_INTF_ERROR_DROP];
+ b1->error = node->errors[L2OUTPUT_BAD_INTF_ERROR_DROP];
+ }
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 bi0;
+ vlib_buffer_t *b0;
+
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+ b0 = vlib_get_buffer (vm, bi0);
+ b0->error = node->errors[L2OUTPUT_BAD_INTF_ERROR_DROP];
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ return frame->n_vectors;
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (l2output_bad_intf_node,static) = {
+ .function = l2output_bad_intf_node_fn,
+ .name = "l2-output-bad-intf",
+ .vector_size = sizeof (u32),
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_errors = ARRAY_LEN(l2output_bad_intf_error_strings),
+ .error_strings = l2output_bad_intf_error_strings,
+
+ .n_next_nodes = 1,
+
+ /* edit / add dispositions here */
+ .next_nodes = {
+ [0] = "error-drop",
+ },
+};
+/* *INDENT-ON* */
+
+
+VLIB_NODE_FUNCTION_MULTIARCH (l2output_node, l2output_node_fn)
+ clib_error_t *l2output_init (vlib_main_t * vm)
+{
+ l2output_main_t *mp = &l2output_main;
+
+ mp->vlib_main = vm;
+ mp->vnet_main = vnet_get_main ();
+
+ /* Create the config vector */
+ vec_validate (mp->configs, 100);
+ /* Until we hook up the CLI config, just create 100 sw interface entries and zero them */
+
+ /* Initialize the feature next-node indexes */
+ feat_bitmap_init_next_nodes (vm,
+ l2output_node.index,
+ L2OUTPUT_N_FEAT,
+ l2output_get_feat_names (),
+ mp->next_nodes.feat_next_node_index);
+
+ /* Initialize the output node mapping table */
+ l2output_init_output_node_vec (&mp->next_nodes.output_node_index_vec);
+
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (l2output_init);
+
+typedef struct
+{
+ u32 node_index;
+ u32 sw_if_index;
+} output_node_mapping_rpc_args_t;
+
+static void output_node_rpc_callback (output_node_mapping_rpc_args_t * a);
+
+static void
+output_node_mapping_send_rpc (u32 node_index, u32 sw_if_index)
+{
+ output_node_mapping_rpc_args_t args;
+ void vl_api_rpc_call_main_thread (void *fp, u8 * data, u32 data_length);
+
+ args.node_index = node_index;
+ args.sw_if_index = sw_if_index;
+
+ vl_api_rpc_call_main_thread (output_node_rpc_callback,
+ (u8 *) & args, sizeof (args));
+}
+
+
+/** Create a mapping in the next node mapping table for the given sw_if_index. */
+u32
+l2output_create_output_node_mapping (vlib_main_t * vlib_main, vnet_main_t * vnet_main, u32 node_index, /* index of current node */
+ u32 * output_node_index_vec,
+ u32 sw_if_index)
+{
+
+ u32 next; /* index of next graph node */
+ vnet_hw_interface_t *hw0;
+ u32 *node;
+
+ hw0 = vnet_get_sup_hw_interface (vnet_main, sw_if_index);
+
+ uword cpu_number;
+
+ cpu_number = os_get_cpu_number ();
+
+ if (cpu_number)
+ {
+ u32 oldflags;
+
+ oldflags = __sync_fetch_and_or (&hw0->flags,
+ VNET_HW_INTERFACE_FLAG_L2OUTPUT_MAPPED);
+
+ if ((oldflags & VNET_HW_INTERFACE_FLAG_L2OUTPUT_MAPPED))
+ return L2OUTPUT_NEXT_DROP;
+
+ output_node_mapping_send_rpc (node_index, sw_if_index);
+ return L2OUTPUT_NEXT_DROP;
+ }
+
+ /* dynamically create graph node arc */
+ next = vlib_node_add_next (vlib_main, node_index, hw0->output_node_index);
+
+ /* Initialize vector with the mapping */
+
+ node = vec_elt_at_index (output_node_index_vec, sw_if_index);
+ *node = next;
+
+ /* reset mapping bit, includes memory barrier */
+ __sync_fetch_and_and (&hw0->flags, ~VNET_HW_INTERFACE_FLAG_L2OUTPUT_MAPPED);
+
+ return next;
+}
+
+void
+output_node_rpc_callback (output_node_mapping_rpc_args_t * a)
+{
+ vlib_main_t *vm = vlib_get_main ();
+ vnet_main_t *vnm = vnet_get_main ();
+ l2output_main_t *mp = &l2output_main;
+
+ (void) l2output_create_output_node_mapping
+ (vm, vnm, a->node_index, mp->next_nodes.output_node_index_vec,
+ a->sw_if_index);
+}
+
+/* Get a pointer to the config for the given interface */
+l2_output_config_t *
+l2output_intf_config (u32 sw_if_index)
+{
+ l2output_main_t *mp = &l2output_main;
+
+ vec_validate (mp->configs, sw_if_index);
+ return vec_elt_at_index (mp->configs, sw_if_index);
+}
+
+/** Enable (or disable) the feature in the bitmap for the given interface. */
+void
+l2output_intf_bitmap_enable (u32 sw_if_index, u32 feature_bitmap, u32 enable)
+{
+ l2output_main_t *mp = &l2output_main;
+ l2_output_config_t *config;
+
+ vec_validate (mp->configs, sw_if_index);
+ config = vec_elt_at_index (mp->configs, sw_if_index);
+
+ if (enable)
+ {
+ config->feature_bitmap |= feature_bitmap;
+ }
+ else
+ {
+ config->feature_bitmap &= ~feature_bitmap;
+ }
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/l2/l2_output.h b/src/vnet/l2/l2_output.h
new file mode 100644
index 00000000000..c683b1ade73
--- /dev/null
+++ b/src/vnet/l2/l2_output.h
@@ -0,0 +1,285 @@
+/*
+ * l2_output.h : layer 2 output packet processing
+ *
+ * Copyright (c) 2013 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef included_vnet_l2_output_h
+#define included_vnet_l2_output_h
+
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+#include <vnet/l2/feat_bitmap.h>
+#include <vnet/l2/l2_vtr.h>
+
+
+/* The L2 output feature configuration, a per-interface struct */
+typedef struct
+{
+
+ u32 feature_bitmap;
+
+ /*
+ * vlan tag rewrite for ingress and egress
+ * ingress vtr is located here because the same config data is used for
+ * the egress EFP filter check
+ */
+ vtr_config_t input_vtr;
+ vtr_config_t output_vtr;
+ ptr_config_t input_pbb_vtr;
+ ptr_config_t output_pbb_vtr;
+
+ /* some of these flags may get integrated into the feature bitmap */
+ u8 fwd_enable;
+ u8 flood_enable;
+
+ /* split horizon group */
+ u8 shg;
+
+ /* flag for output vtr operation */
+ u8 out_vtr_flag;
+
+} l2_output_config_t;
+
+
+/*
+ * The set of next nodes for features and interface output.
+ * Each output feature node should include this.
+ */
+typedef struct
+{
+ /*
+ * vector of output next node index, indexed by sw_if_index.
+ * used when all output features have been executed and the
+ * next nodes are the interface output nodes.
+ */
+ u32 *output_node_index_vec;
+
+ /*
+ * array of next node index for each output feature, indexed
+ * by l2output_feat_t. Used to determine next feature node.
+ */
+ u32 feat_next_node_index[32];
+
+} l2_output_next_nodes_st;
+
+
+typedef struct
+{
+ /* Next nodes for features and output interfaces */
+ l2_output_next_nodes_st next_nodes;
+
+ /* config vector indexed by sw_if_index */
+ l2_output_config_t *configs;
+
+ /* Convenience variables */
+ vlib_main_t *vlib_main;
+ vnet_main_t *vnet_main;
+} l2output_main_t;
+
+l2output_main_t l2output_main;
+
+/* L2 output features */
+
+/* Mappings from feature ID to graph node name */
+#define foreach_l2output_feat \
+ _(SPAN, "feature-bitmap-drop") \
+ _(CFM, "feature-bitmap-drop") \
+ _(QOS, "feature-bitmap-drop") \
+ _(ACL, "l2-output-acl") \
+ _(L2PT, "feature-bitmap-drop") \
+ _(EFP_FILTER, "l2-efp-filter") \
+ _(IPIW, "feature-bitmap-drop") \
+ _(STP_BLOCKED, "feature-bitmap-drop") \
+ _(LINESTATUS_DOWN, "feature-bitmap-drop") \
+ _(OUTPUT_CLASSIFY, "l2-output-classify") \
+ _(XCRW, "l2-xcrw")
+
+/* Feature bitmap positions */
+typedef enum
+{
+#define _(sym,str) L2OUTPUT_FEAT_##sym##_BIT,
+ foreach_l2output_feat
+#undef _
+ L2OUTPUT_N_FEAT,
+} l2output_feat_t;
+
+/* Feature bit masks */
+typedef enum
+{
+#define _(sym,str) L2OUTPUT_FEAT_##sym = (1<<L2OUTPUT_FEAT_##sym##_BIT),
+ foreach_l2output_feat
+#undef _
+} l2output_feat_masks_t;
+
+#define foreach_l2output_error \
+_(L2OUTPUT, "L2 output packets") \
+_(EFP_DROP, "L2 EFP filter pre-rewrite drops") \
+_(VTR_DROP, "L2 output tag rewrite drops") \
+_(SHG_DROP, "L2 split horizon drops") \
+_(DROP, "L2 output drops") \
+_(MAPPING_DROP, "L2 Output interface mapping in progress")
+
+typedef enum
+{
+ L2OUTPUT_NEXT_DROP,
+ L2OUTPUT_NEXT_BAD_INTF,
+ L2OUTPUT_N_NEXT,
+} l2output_next_t;
+
+typedef enum
+{
+#define _(sym,str) L2OUTPUT_ERROR_##sym,
+ foreach_l2output_error
+#undef _
+ L2OUTPUT_N_ERROR,
+} l2output_error_t;
+
+/* Return an array of strings containing graph node names of each feature */
+char **l2output_get_feat_names (void);
+
+
+/**
+ * The next set of functions is for use by output feature graph nodes.
+ * When the last bit has been cleared from the output feature bitmap,
+ * the next node is the output graph node for the TX sw_if_index.
+ * These functions help the feature nodes get that node index.
+ */
+
+/* Create a mapping to the output graph node for the given sw_if_index */
+u32 l2output_create_output_node_mapping (vlib_main_t * vlib_main, vnet_main_t * vnet_main, u32 node_index, /* index of current node */
+ u32 * output_node_index_vec,
+ u32 sw_if_index);
+
+/* Initialize the next node mapping table */
+always_inline void
+l2output_init_output_node_vec (u32 ** output_node_index_vec)
+{
+
+ /*
+ * Size it at 100 sw_if_indexes initially
+ * Uninitialized mappings are set to ~0
+ */
+ vec_validate_init_empty (*output_node_index_vec, 100, ~0);
+}
+
+
+/**
+ * Get a mapping from the output node mapping table,
+ * creating the entry if necessary.
+ */
+always_inline u32
+l2output_get_output_node (vlib_main_t * vlib_main, vnet_main_t * vnet_main, u32 node_index, /* index of current node */
+ u32 sw_if_index, u32 ** output_node_index_vec) /* may be updated */
+{
+ u32 next; /* index of next graph node */
+
+ /* Insure the vector is big enough */
+ vec_validate_init_empty (*output_node_index_vec, sw_if_index, ~0);
+
+ /* Get the mapping for the sw_if_index */
+ next = vec_elt (*output_node_index_vec, sw_if_index);
+
+ if (next == ~0)
+ {
+ /* Mapping doesn't exist so create it */
+ next = l2output_create_output_node_mapping (vlib_main,
+ vnet_main,
+ node_index,
+ *output_node_index_vec,
+ sw_if_index);
+ }
+
+ return next;
+}
+
+
+/** Determine the next L2 node based on the output feature bitmap */
+always_inline void
+l2_output_dispatch (vlib_main_t * vlib_main,
+ vnet_main_t * vnet_main,
+ vlib_node_runtime_t * node,
+ u32 node_index,
+ u32 * cached_sw_if_index,
+ u32 * cached_next_index,
+ l2_output_next_nodes_st * next_nodes,
+ vlib_buffer_t * b0,
+ u32 sw_if_index, u32 feature_bitmap, u32 * next0)
+{
+ if (feature_bitmap)
+ {
+ /* There are some features to execute */
+
+ /* Save bitmap for the next feature graph nodes */
+ vnet_buffer (b0)->l2.feature_bitmap = feature_bitmap;
+
+ /* Determine the next node */
+ *next0 =
+ feat_bitmap_get_next_node_index (next_nodes->feat_next_node_index,
+ feature_bitmap);
+ }
+ else
+ {
+ /*
+ * There are no features. Send packet to TX node for sw_if_index0
+ * This is a little tricky in that the output interface next node indexes
+ * are not precomputed at init time.
+ */
+
+ if (sw_if_index == *cached_sw_if_index)
+ {
+ /* We hit in the one-entry cache. Use it. */
+ *next0 = *cached_next_index;
+ }
+ else
+ {
+ /* Look up the output TX node */
+ *next0 = l2output_get_output_node (vlib_main,
+ vnet_main,
+ node_index,
+ sw_if_index,
+ &next_nodes->output_node_index_vec);
+
+ if (*next0 == L2OUTPUT_NEXT_DROP)
+ {
+ vnet_hw_interface_t *hw0;
+ hw0 = vnet_get_sup_hw_interface (vnet_main, sw_if_index);
+
+ if (hw0->flags & VNET_HW_INTERFACE_FLAG_L2OUTPUT_MAPPED)
+ b0->error = node->errors[L2OUTPUT_ERROR_MAPPING_DROP];
+ }
+
+ /* Update the one-entry cache */
+ *cached_sw_if_index = sw_if_index;
+ *cached_next_index = *next0;
+ }
+ }
+}
+
+/** Get a pointer to the config for the given interface */
+l2_output_config_t *l2output_intf_config (u32 sw_if_index);
+
+/** Enable (or disable) the feature in the bitmap for the given interface */
+void l2output_intf_bitmap_enable (u32 sw_if_index,
+ u32 feature_bitmap, u32 enable);
+
+#endif
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/l2/l2_output_acl.c b/src/vnet/l2/l2_output_acl.c
new file mode 100644
index 00000000000..94a4d66b48f
--- /dev/null
+++ b/src/vnet/l2/l2_output_acl.c
@@ -0,0 +1,358 @@
+/*
+ * l2_output_acl.c : layer 2 output acl processing
+ *
+ * Copyright (c) 2013 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+#include <vnet/pg/pg.h>
+#include <vnet/ethernet/ethernet.h>
+#include <vnet/ethernet/packet.h>
+#include <vnet/ip/ip_packet.h>
+#include <vnet/ip/ip4_packet.h>
+#include <vnet/ip/ip6_packet.h>
+#include <vlib/cli.h>
+#include <vnet/l2/feat_bitmap.h>
+#include <vnet/l2/l2_output.h>
+
+#include <vppinfra/error.h>
+#include <vppinfra/hash.h>
+#include <vppinfra/cache.h>
+
+
+typedef struct
+{
+ /* Next nodes for features and output interfaces */
+ l2_output_next_nodes_st next_nodes;
+
+ /* convenience variables */
+ vlib_main_t *vlib_main;
+ vnet_main_t *vnet_main;
+} l2_outacl_main_t;
+
+
+
+typedef struct
+{
+ /* per-pkt trace data */
+ u8 src[6];
+ u8 dst[6];
+ u32 next_index;
+ u32 sw_if_index;
+} l2_outacl_trace_t;
+
+/* packet trace format function */
+static u8 *
+format_l2_outacl_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ l2_outacl_trace_t *t = va_arg (*args, l2_outacl_trace_t *);
+
+ s = format (s, "l2-output-acl: sw_if_index %d dst %U src %U",
+ t->sw_if_index,
+ format_ethernet_address, t->dst,
+ format_ethernet_address, t->src);
+ return s;
+}
+
+l2_outacl_main_t l2_outacl_main;
+
+static vlib_node_registration_t l2_outacl_node;
+
+#define foreach_l2_outacl_error \
+_(L2_OUTACL, "L2 output ACL packets") \
+_(DROP, "L2 output drops")
+
+typedef enum
+{
+#define _(sym,str) L2_OUTACL_ERROR_##sym,
+ foreach_l2_outacl_error
+#undef _
+ L2_OUTACL_N_ERROR,
+} l2_outacl_error_t;
+
+static char *l2_outacl_error_strings[] = {
+#define _(sym,string) string,
+ foreach_l2_outacl_error
+#undef _
+};
+
+typedef enum
+{
+ L2_OUTACL_NEXT_DROP,
+ L2_OUTACL_N_NEXT,
+} l2_outacl_next_t;
+
+
+
+static uword
+l2_outacl_node_fn (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ u32 n_left_from, *from, *to_next;
+ l2_outacl_next_t next_index;
+ l2_outacl_main_t *msm = &l2_outacl_main;
+ vlib_node_t *n = vlib_get_node (vm, l2_outacl_node.index);
+ u32 node_counter_base_index = n->error_heap_index;
+ vlib_error_main_t *em = &vm->error_main;
+ u32 cached_sw_if_index = (u32) ~ 0;
+ u32 cached_next_index = (u32) ~ 0;
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors; /* number of packets to process */
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ /* get space to enqueue frame to graph node "next_index" */
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (0 && n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ u32 bi0, bi1;
+ vlib_buffer_t *b0, *b1;
+ u32 next0, next1;
+ u32 sw_if_index0, sw_if_index1;
+ ethernet_header_t *h0, *h1;
+
+ /* Prefetch next iteration. */
+ {
+ vlib_buffer_t *p2, *p3;
+
+ p2 = vlib_get_buffer (vm, from[2]);
+ p3 = vlib_get_buffer (vm, from[3]);
+
+ vlib_prefetch_buffer_header (p2, LOAD);
+ vlib_prefetch_buffer_header (p3, LOAD);
+
+ CLIB_PREFETCH (p2->data, CLIB_CACHE_LINE_BYTES, STORE);
+ CLIB_PREFETCH (p3->data, CLIB_CACHE_LINE_BYTES, STORE);
+ }
+
+ /* speculatively enqueue b0 and b1 to the current next frame */
+ /* bi is "buffer index", b is pointer to the buffer */
+ to_next[0] = bi0 = from[0];
+ to_next[1] = bi1 = from[1];
+ from += 2;
+ to_next += 2;
+ n_left_from -= 2;
+ n_left_to_next -= 2;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ b1 = vlib_get_buffer (vm, bi1);
+
+ /* TX interface handles */
+ sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_TX];
+ sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_TX];
+
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)))
+ {
+ if (b0->flags & VLIB_BUFFER_IS_TRACED)
+ {
+ l2_outacl_trace_t *t =
+ vlib_add_trace (vm, node, b0, sizeof (*t));
+ t->sw_if_index = sw_if_index0;
+ t->next_index = next0;
+ clib_memcpy (t->src, h0->src_address, 6);
+ clib_memcpy (t->dst, h0->dst_address, 6);
+ }
+ if (b1->flags & VLIB_BUFFER_IS_TRACED)
+ {
+ l2_outacl_trace_t *t =
+ vlib_add_trace (vm, node, b1, sizeof (*t));
+ t->sw_if_index = sw_if_index1;
+ t->next_index = next1;
+ clib_memcpy (t->src, h1->src_address, 6);
+ clib_memcpy (t->dst, h1->dst_address, 6);
+ }
+ }
+
+ em->counters[node_counter_base_index + L2_OUTACL_ERROR_L2_OUTACL] +=
+ 2;
+
+ /* add core loop code here */
+
+ /* verify speculative enqueues, maybe switch current next frame */
+ /* if next0==next1==next_index then nothing special needs to be done */
+ vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, bi1, next0, next1);
+ }
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 bi0;
+ vlib_buffer_t *b0;
+ u32 next0;
+ u32 sw_if_index0;
+ ethernet_header_t *h0;
+ u32 feature_bitmap0;
+
+ /* speculatively enqueue b0 to the current next frame */
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ h0 = vlib_buffer_get_current (b0);
+
+ sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_TX];
+
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)
+ && (b0->flags & VLIB_BUFFER_IS_TRACED)))
+ {
+ l2_outacl_trace_t *t =
+ vlib_add_trace (vm, node, b0, sizeof (*t));
+ t->sw_if_index = sw_if_index0;
+ clib_memcpy (t->src, h0->src_address, 6);
+ clib_memcpy (t->dst, h0->dst_address, 6);
+ }
+
+ em->counters[node_counter_base_index + L2_OUTACL_ERROR_L2_OUTACL] +=
+ 1;
+
+ /*
+ * L2_OUTACL code
+ * Dummy for now, just go to next feature node
+ */
+
+
+ /* Remove ourself from the feature bitmap */
+ feature_bitmap0 =
+ vnet_buffer (b0)->l2.feature_bitmap & ~L2OUTPUT_FEAT_ACL;
+
+ /* Determine next node */
+ l2_output_dispatch (msm->vlib_main,
+ msm->vnet_main,
+ node,
+ l2_outacl_node.index,
+ &cached_sw_if_index,
+ &cached_next_index,
+ &msm->next_nodes,
+ b0, sw_if_index0, feature_bitmap0, &next0);
+
+ /* verify speculative enqueue, maybe switch current next frame */
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, next0);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ return frame->n_vectors;
+}
+
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (l2_outacl_node,static) = {
+ .function = l2_outacl_node_fn,
+ .name = "l2-output-acl",
+ .vector_size = sizeof (u32),
+ .format_trace = format_l2_outacl_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_errors = ARRAY_LEN(l2_outacl_error_strings),
+ .error_strings = l2_outacl_error_strings,
+
+ .n_next_nodes = L2_OUTACL_N_NEXT,
+
+ /* edit / add dispositions here */
+ .next_nodes = {
+ [L2_OUTACL_NEXT_DROP] = "error-drop",
+ },
+};
+/* *INDENT-ON* */
+
+VLIB_NODE_FUNCTION_MULTIARCH (l2_outacl_node, l2_outacl_node_fn)
+ clib_error_t *l2_outacl_init (vlib_main_t * vm)
+{
+ l2_outacl_main_t *mp = &l2_outacl_main;
+
+ mp->vlib_main = vm;
+ mp->vnet_main = vnet_get_main ();
+
+ /* Initialize the feature next-node indexes */
+ feat_bitmap_init_next_nodes (vm,
+ l2_outacl_node.index,
+ L2OUTPUT_N_FEAT,
+ l2output_get_feat_names (),
+ mp->next_nodes.feat_next_node_index);
+
+ /* Initialize the output node mapping table */
+ l2output_init_output_node_vec (&mp->next_nodes.output_node_index_vec);
+
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (l2_outacl_init);
+
+#if 0
+/** @todo maybe someone will add output ACL's in the future.
+ * Set subinterface outacl enable/disable.
+ * The CLI format is:
+ * set interface acl output <interface> [disable]
+ */
+static clib_error_t *
+int_l2_outacl (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ vnet_main_t *vnm = vnet_get_main ();
+ clib_error_t *error = 0;
+ u32 sw_if_index;
+ u32 enable;
+
+ if (!unformat_user (input, unformat_vnet_sw_interface, vnm, &sw_if_index))
+ {
+ error = clib_error_return (0, "unknown interface `%U'",
+ format_unformat_error, input);
+ goto done;
+ }
+
+ enable = 1;
+ if (unformat (input, "disable"))
+ {
+ enable = 0;
+ }
+
+ /* set the interface flag */
+ l2output_intf_bitmap_enable (sw_if_index, L2OUTPUT_FEAT_ACL, enable);
+
+done:
+ return error;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (int_l2_outacl_cli, static) = {
+ .path = "set interface acl output",
+ .short_help = "set interface acl output <interface> [disable]",
+ .function = int_l2_outacl,
+};
+/* *INDENT-ON* */
+#endif
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/l2/l2_output_classify.c b/src/vnet/l2/l2_output_classify.c
new file mode 100644
index 00000000000..27d5eb39514
--- /dev/null
+++ b/src/vnet/l2/l2_output_classify.c
@@ -0,0 +1,657 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/l2/l2_classify.h>
+#include <vnet/api_errno.h>
+
+/**
+ * @file
+ * @brief Layer 2 Output Classifier.
+ *
+ * @sa @ref vnet/vnet/classify/vnet_classify.c
+ * @sa @ref vnet/vnet/classify/vnet_classify.h
+ */
+
+typedef struct
+{
+ /** interface handle for the ith packet */
+ u32 sw_if_index;
+ /** graph arc index selected for this packet */
+ u32 next_index;
+ /** classifier table which provided the final result */
+ u32 table_index;
+ /** offset in classifier heap of the corresponding session */
+ u32 session_offset;
+} l2_output_classify_trace_t;
+
+typedef struct
+{
+ /** use-case independent main object pointer */
+ vnet_classify_main_t *vcm;
+ /** l2 input classifier main object pointer */
+ l2_output_classify_main_t *l2cm;
+} l2_output_classify_runtime_t;
+
+/** Packet trace format function. */
+static u8 *
+format_l2_output_classify_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ l2_output_classify_trace_t *t =
+ va_arg (*args, l2_output_classify_trace_t *);
+
+ s = format (s, "l2-classify: sw_if_index %d, table %d, offset %x, next %d",
+ t->sw_if_index, t->table_index, t->session_offset,
+ t->next_index);
+ return s;
+}
+
+/** l2 output classifier main data structure. */
+l2_output_classify_main_t l2_output_classify_main;
+
+vlib_node_registration_t l2_output_classify_node;
+
+#define foreach_l2_output_classify_error \
+_(MISS, "Classify misses") \
+_(HIT, "Classify hits") \
+_(CHAIN_HIT, "Classify hits after chain walk") \
+_(DROP, "L2 Classify Drops")
+
+typedef enum
+{
+#define _(sym,str) L2_OUTPUT_CLASSIFY_ERROR_##sym,
+ foreach_l2_output_classify_error
+#undef _
+ L2_OUTPUT_CLASSIFY_N_ERROR,
+} l2_output_classify_error_t;
+
+static char *l2_output_classify_error_strings[] = {
+#define _(sym,string) string,
+ foreach_l2_output_classify_error
+#undef _
+};
+
+/**
+ * @brief l2 output classifier node.
+ * @node l2-output-classify
+ *
+ * This is the l2 output classifier dispatch node
+ *
+ * @param vm vlib_main_t corresponding to the current thread.
+ * @param node vlib_node_runtime_t data for this node.
+ * @param frame vlib_frame_t whose contents should be dispatched.
+ *
+ * @par Graph mechanics: buffer metadata, next index usage
+ *
+ * @em Uses:
+ * - <code>(l2_output_classify_runtime_t *)
+ * rt->classify_table_index_by_sw_if_index</code>
+ * Head of the per-interface, perprotocol classifier table chain
+ * for a specific interface. ~0 => send pkts to the next
+ * feature in the L2 feature chain.
+ * - <code>vnet_buffer(b)->sw_if_index[VLIB_TX]</code>
+ * - Indicates the @c sw_if_index value of the interface that the
+ * packet was received on.
+ * - <code>vnet_buffer (b0)->l2.feature_bitmap</code>
+ * - Used to steer packets across l2 features enabled on the interface
+ * - <code>(vnet_classify_entry_t) e0->next_index</code>
+ * - Used to steer traffic when the classifier hits on a session
+ * - <code>(vnet_classify_entry_t) e0->advance</code>
+ * - Signed quantity applied via <code>vlib_buffer_advance</code>
+ * when the classifier hits on a session
+ * - <code>(vnet_classify_table_t) t0->miss_next_index</code>
+ * - Used to steer traffic when the classifier misses
+ *
+ * @em Sets:
+ * - <code>vnet_buffer (b0)->l2_classify.table_index</code>
+ * - Classifier table index of the first classifier table in
+ * the classifier table chain
+ * - <code>vnet_buffer (b0)->l2_classify.hash</code>
+ * - Bounded-index extensible hash corresponding to the
+ * masked fields in the current packet
+ * - <code>vnet_buffer (b0)->l2.feature_bitmap</code>
+ * - Used to steer packets across l2 features enabled on the interface
+ * - <code>vnet_buffer (b0)->l2_classify.opaque_index</code>
+ * - Copied from the classifier session object upon classifier hit
+ *
+ * @em Counters:
+ * - <code>L2_OUTPUT_CLASSIFY_ERROR_MISS</code> Classifier misses
+ * - <code>L2_OUTPUT_CLASSIFY_ERROR_HIT</code> Classifier hits
+ * - <code>L2_OUTPUT_CLASSIFY_ERROR_CHAIN_HIT</code>
+ * Classifier hits in other than the first table
+ */
+
+static uword
+l2_output_classify_node_fn (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ u32 n_left_from, *from, *to_next;
+ l2_output_classify_next_t next_index;
+ l2_output_classify_main_t *cm = &l2_output_classify_main;
+ vnet_classify_main_t *vcm = cm->vnet_classify_main;
+ l2_output_classify_runtime_t *rt =
+ (l2_output_classify_runtime_t *) node->runtime_data;
+ u32 feature_bitmap0;
+ u32 hits = 0;
+ u32 misses = 0;
+ u32 chain_hits = 0;
+ f64 now;
+ u32 n_next_nodes;
+ u32 cached_sw_if_index = (u32) ~ 0;
+ u32 cached_next_index = (u32) ~ 0;
+ u32 sw_if_index0;
+
+ n_next_nodes = node->n_next_nodes;
+
+ now = vlib_time_now (vm);
+
+ n_left_from = frame->n_vectors;
+ from = vlib_frame_vector_args (frame);
+
+ /* First pass: compute hash */
+
+ while (n_left_from > 2)
+ {
+ vlib_buffer_t *b0, *b1;
+ u32 bi0, bi1;
+ ethernet_header_t *h0, *h1;
+ u32 sw_if_index0, sw_if_index1;
+ u16 type0, type1;
+ int type_index0, type_index1;
+ vnet_classify_table_t *t0, *t1;
+ u32 table_index0, table_index1;
+ u64 hash0, hash1;
+
+
+ /* prefetch next iteration */
+ {
+ vlib_buffer_t *p1, *p2;
+
+ p1 = vlib_get_buffer (vm, from[1]);
+ p2 = vlib_get_buffer (vm, from[2]);
+
+ vlib_prefetch_buffer_header (p1, STORE);
+ CLIB_PREFETCH (p1->data, CLIB_CACHE_LINE_BYTES, STORE);
+ vlib_prefetch_buffer_header (p2, STORE);
+ CLIB_PREFETCH (p2->data, CLIB_CACHE_LINE_BYTES, STORE);
+ }
+
+ bi0 = from[0];
+ b0 = vlib_get_buffer (vm, bi0);
+ h0 = vlib_buffer_get_current (b0);
+
+ bi1 = from[1];
+ b1 = vlib_get_buffer (vm, bi1);
+ h1 = vlib_buffer_get_current (b1);
+
+ sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_TX];
+ vnet_buffer (b0)->l2_classify.table_index = ~0;
+
+ sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_TX];
+ vnet_buffer (b1)->l2_classify.table_index = ~0;
+
+ /* Select classifier table based on ethertype */
+ type0 = clib_net_to_host_u16 (h0->type);
+ type1 = clib_net_to_host_u16 (h1->type);
+
+ type_index0 = (type0 == ETHERNET_TYPE_IP4)
+ ? L2_OUTPUT_CLASSIFY_TABLE_IP4 : L2_OUTPUT_CLASSIFY_TABLE_OTHER;
+ type_index0 = (type0 == ETHERNET_TYPE_IP6)
+ ? L2_OUTPUT_CLASSIFY_TABLE_IP6 : type_index0;
+
+ type_index1 = (type1 == ETHERNET_TYPE_IP4)
+ ? L2_OUTPUT_CLASSIFY_TABLE_IP4 : L2_OUTPUT_CLASSIFY_TABLE_OTHER;
+ type_index1 = (type1 == ETHERNET_TYPE_IP6)
+ ? L2_OUTPUT_CLASSIFY_TABLE_IP6 : type_index1;
+
+ vnet_buffer (b0)->l2_classify.table_index =
+ table_index0 =
+ rt->l2cm->classify_table_index_by_sw_if_index
+ [type_index0][sw_if_index0];
+
+ if (table_index0 != ~0)
+ {
+ t0 = pool_elt_at_index (vcm->tables, table_index0);
+
+ vnet_buffer (b0)->l2_classify.hash = hash0 =
+ vnet_classify_hash_packet (t0, (u8 *) h0);
+ vnet_classify_prefetch_bucket (t0, hash0);
+ }
+
+ vnet_buffer (b1)->l2_classify.table_index =
+ table_index1 =
+ rt->l2cm->classify_table_index_by_sw_if_index
+ [type_index1][sw_if_index1];
+
+ if (table_index1 != ~0)
+ {
+ t1 = pool_elt_at_index (vcm->tables, table_index1);
+
+ vnet_buffer (b1)->l2_classify.hash = hash1 =
+ vnet_classify_hash_packet (t1, (u8 *) h1);
+ vnet_classify_prefetch_bucket (t1, hash1);
+ }
+
+ from += 2;
+ n_left_from -= 2;
+ }
+
+ while (n_left_from > 0)
+ {
+ vlib_buffer_t *b0;
+ u32 bi0;
+ ethernet_header_t *h0;
+ u16 type0;
+ u32 type_index0;
+ vnet_classify_table_t *t0;
+ u32 table_index0;
+ u64 hash0;
+
+ bi0 = from[0];
+ b0 = vlib_get_buffer (vm, bi0);
+ h0 = vlib_buffer_get_current (b0);
+
+ sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_TX];
+ vnet_buffer (b0)->l2_classify.table_index = ~0;
+
+ /* Select classifier table based on ethertype */
+ type0 = clib_net_to_host_u16 (h0->type);
+
+ type_index0 = (type0 == ETHERNET_TYPE_IP4)
+ ? L2_OUTPUT_CLASSIFY_TABLE_IP4 : L2_OUTPUT_CLASSIFY_TABLE_OTHER;
+ type_index0 = (type0 == ETHERNET_TYPE_IP6)
+ ? L2_OUTPUT_CLASSIFY_TABLE_IP6 : type_index0;
+
+ vnet_buffer (b0)->l2_classify.table_index =
+ table_index0 = rt->l2cm->classify_table_index_by_sw_if_index
+ [type_index0][sw_if_index0];
+
+ if (table_index0 != ~0)
+ {
+ t0 = pool_elt_at_index (vcm->tables, table_index0);
+
+ vnet_buffer (b0)->l2_classify.hash = hash0 =
+ vnet_classify_hash_packet (t0, (u8 *) h0);
+ vnet_classify_prefetch_bucket (t0, hash0);
+ }
+ from++;
+ n_left_from--;
+ }
+
+ next_index = node->cached_next_index;
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ /* Not enough load/store slots to dual loop... */
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 bi0;
+ vlib_buffer_t *b0;
+ u32 next0 = ~0;
+ ethernet_header_t *h0;
+ u32 table_index0;
+ u64 hash0;
+ vnet_classify_table_t *t0;
+ vnet_classify_entry_t *e0;
+
+ if (PREDICT_TRUE (n_left_from > 2))
+ {
+ vlib_buffer_t *p2 = vlib_get_buffer (vm, from[2]);
+ u64 phash2;
+ u32 table_index2;
+ vnet_classify_table_t *tp2;
+
+ /*
+ * Prefetch table entry two ahead. Buffer / data
+ * were prefetched above...
+ */
+ table_index2 = vnet_buffer (p2)->l2_classify.table_index;
+
+ if (PREDICT_TRUE (table_index2 != ~0))
+ {
+ tp2 = pool_elt_at_index (vcm->tables, table_index2);
+ phash2 = vnet_buffer (p2)->l2_classify.hash;
+ vnet_classify_prefetch_entry (tp2, phash2);
+ }
+ }
+
+ /* speculatively enqueue b0 to the current next frame */
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ h0 = vlib_buffer_get_current (b0);
+ table_index0 = vnet_buffer (b0)->l2_classify.table_index;
+ e0 = 0;
+ vnet_buffer (b0)->l2_classify.opaque_index = ~0;
+ /* Remove ourself from the feature bitmap */
+ feature_bitmap0 = vnet_buffer (b0)->l2.feature_bitmap
+ & ~L2OUTPUT_FEAT_OUTPUT_CLASSIFY;
+
+ /* save for next feature graph nodes */
+ vnet_buffer (b0)->l2.feature_bitmap = feature_bitmap0;
+
+ if (PREDICT_TRUE (table_index0 != ~0))
+ {
+ hash0 = vnet_buffer (b0)->l2_classify.hash;
+ t0 = pool_elt_at_index (vcm->tables, table_index0);
+
+ e0 = vnet_classify_find_entry (t0, (u8 *) h0, hash0, now);
+ if (e0)
+ {
+ vnet_buffer (b0)->l2_classify.opaque_index
+ = e0->opaque_index;
+ vlib_buffer_advance (b0, e0->advance);
+ next0 = (e0->next_index < n_next_nodes) ?
+ e0->next_index : next0;
+ hits++;
+ }
+ else
+ {
+ while (1)
+ {
+ if (t0->next_table_index != ~0)
+ t0 = pool_elt_at_index (vcm->tables,
+ t0->next_table_index);
+ else
+ {
+ next0 = (t0->miss_next_index < n_next_nodes) ?
+ t0->miss_next_index : next0;
+ misses++;
+ break;
+ }
+
+ hash0 = vnet_classify_hash_packet (t0, (u8 *) h0);
+ e0 =
+ vnet_classify_find_entry (t0, (u8 *) h0, hash0, now);
+ if (e0)
+ {
+ vnet_buffer (b0)->l2_classify.opaque_index
+ = e0->opaque_index;
+ vlib_buffer_advance (b0, e0->advance);
+ next0 = (e0->next_index < n_next_nodes) ?
+ e0->next_index : next0;
+ hits++;
+ chain_hits++;
+ break;
+ }
+ }
+ }
+ }
+
+ if (PREDICT_FALSE (next0 == 0))
+ b0->error = node->errors[L2_OUTPUT_CLASSIFY_ERROR_DROP];
+
+ if (PREDICT_FALSE (next0 == ~0))
+ {
+ sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_TX];
+
+ /* Determine next node */
+ l2_output_dispatch (cm->vlib_main,
+ cm->vnet_main,
+ node,
+ l2_output_classify_node.index,
+ &cached_sw_if_index,
+ &cached_next_index,
+ &cm->next_nodes,
+ b0, sw_if_index0, feature_bitmap0, &next0);
+ }
+
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)
+ && (b0->flags & VLIB_BUFFER_IS_TRACED)))
+ {
+ l2_output_classify_trace_t *t =
+ vlib_add_trace (vm, node, b0, sizeof (*t));
+ t->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_TX];
+ t->table_index = table_index0;
+ t->next_index = next0;
+ t->session_offset = e0 ? vnet_classify_get_offset (t0, e0) : 0;
+ }
+
+ /* verify speculative enqueue, maybe switch current next frame */
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, next0);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ vlib_node_increment_counter (vm, node->node_index,
+ L2_OUTPUT_CLASSIFY_ERROR_MISS, misses);
+ vlib_node_increment_counter (vm, node->node_index,
+ L2_OUTPUT_CLASSIFY_ERROR_HIT, hits);
+ vlib_node_increment_counter (vm, node->node_index,
+ L2_OUTPUT_CLASSIFY_ERROR_CHAIN_HIT,
+ chain_hits);
+ return frame->n_vectors;
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (l2_output_classify_node) = {
+ .function = l2_output_classify_node_fn,
+ .name = "l2-output-classify",
+ .vector_size = sizeof (u32),
+ .format_trace = format_l2_output_classify_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_errors = ARRAY_LEN(l2_output_classify_error_strings),
+ .error_strings = l2_output_classify_error_strings,
+
+ .runtime_data_bytes = sizeof (l2_output_classify_runtime_t),
+
+ .n_next_nodes = L2_OUTPUT_CLASSIFY_N_NEXT,
+
+ /* edit / add dispositions here */
+ .next_nodes = {
+ [L2_OUTPUT_CLASSIFY_NEXT_DROP] = "error-drop",
+ },
+};
+/* *INDENT-ON* */
+
+VLIB_NODE_FUNCTION_MULTIARCH (l2_output_classify_node,
+ l2_output_classify_node_fn);
+
+/** l2 output classsifier feature initialization. */
+clib_error_t *
+l2_output_classify_init (vlib_main_t * vm)
+{
+ l2_output_classify_main_t *cm = &l2_output_classify_main;
+ l2_output_classify_runtime_t *rt;
+
+ rt = vlib_node_get_runtime_data (vm, l2_output_classify_node.index);
+
+ cm->vlib_main = vm;
+ cm->vnet_main = vnet_get_main ();
+ cm->vnet_classify_main = &vnet_classify_main;
+
+ /* Initialize the feature next-node indexes */
+ feat_bitmap_init_next_nodes (vm,
+ l2_output_classify_node.index,
+ L2OUTPUT_N_FEAT,
+ l2output_get_feat_names (),
+ cm->feat_next_node_index);
+ rt->l2cm = cm;
+ rt->vcm = cm->vnet_classify_main;
+
+ /* Initialize the output node mapping table */
+ l2output_init_output_node_vec (&cm->next_nodes.output_node_index_vec);
+
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (l2_output_classify_init);
+
+/** Enable/disable l2 input classification on a specific interface. */
+void
+vnet_l2_output_classify_enable_disable (u32 sw_if_index, int enable_disable)
+{
+
+ l2output_intf_bitmap_enable (sw_if_index, L2OUTPUT_FEAT_OUTPUT_CLASSIFY,
+ (u32) enable_disable);
+}
+
+/** @brief Set l2 per-protocol, per-interface output classification tables.
+ *
+ * @param sw_if_index interface handle
+ * @param ip4_table_index ip4 classification table index, or ~0
+ * @param ip6_table_index ip6 classification table index, or ~0
+ * @param other_table_index non-ip4, non-ip6 classification table index,
+ * or ~0
+ * @returns 0 on success, VNET_API_ERROR_NO_SUCH_TABLE, TABLE2, TABLE3
+ * if the indicated (non-~0) table does not exist.
+ */
+
+int
+vnet_l2_output_classify_set_tables (u32 sw_if_index,
+ u32 ip4_table_index,
+ u32 ip6_table_index,
+ u32 other_table_index)
+{
+ l2_output_classify_main_t *cm = &l2_output_classify_main;
+ vnet_classify_main_t *vcm = cm->vnet_classify_main;
+
+ /* Assume that we've validated sw_if_index in the API layer */
+
+ if (ip4_table_index != ~0 &&
+ pool_is_free_index (vcm->tables, ip4_table_index))
+ return VNET_API_ERROR_NO_SUCH_TABLE;
+
+ if (ip6_table_index != ~0 &&
+ pool_is_free_index (vcm->tables, ip6_table_index))
+ return VNET_API_ERROR_NO_SUCH_TABLE2;
+
+ if (other_table_index != ~0 &&
+ pool_is_free_index (vcm->tables, other_table_index))
+ return VNET_API_ERROR_NO_SUCH_TABLE3;
+
+ vec_validate
+ (cm->classify_table_index_by_sw_if_index[L2_OUTPUT_CLASSIFY_TABLE_IP4],
+ sw_if_index);
+
+ vec_validate
+ (cm->classify_table_index_by_sw_if_index[L2_OUTPUT_CLASSIFY_TABLE_IP6],
+ sw_if_index);
+
+ vec_validate
+ (cm->classify_table_index_by_sw_if_index[L2_OUTPUT_CLASSIFY_TABLE_OTHER],
+ sw_if_index);
+
+ cm->classify_table_index_by_sw_if_index[L2_OUTPUT_CLASSIFY_TABLE_IP4]
+ [sw_if_index] = ip4_table_index;
+
+ cm->classify_table_index_by_sw_if_index[L2_OUTPUT_CLASSIFY_TABLE_IP6]
+ [sw_if_index] = ip6_table_index;
+
+ cm->classify_table_index_by_sw_if_index[L2_OUTPUT_CLASSIFY_TABLE_OTHER]
+ [sw_if_index] = other_table_index;
+
+ return 0;
+}
+
+static clib_error_t *
+int_l2_output_classify_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ vnet_main_t *vnm = vnet_get_main ();
+ u32 sw_if_index = ~0;
+ u32 ip4_table_index = ~0;
+ u32 ip6_table_index = ~0;
+ u32 other_table_index = ~0;
+ int rv;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "intfc %U", unformat_vnet_sw_interface,
+ vnm, &sw_if_index))
+ ;
+ else if (unformat (input, "ip4-table %d", &ip4_table_index))
+ ;
+ else if (unformat (input, "ip6-table %d", &ip6_table_index))
+ ;
+ else if (unformat (input, "other-table %d", &other_table_index))
+ ;
+ else
+ break;
+ }
+
+ if (sw_if_index == ~0)
+ return clib_error_return (0, "interface must be specified");
+
+
+ if (ip4_table_index == ~0 && ip6_table_index == ~0
+ && other_table_index == ~0)
+ {
+ vlib_cli_output (vm, "L2 classification disabled");
+ vnet_l2_output_classify_enable_disable (sw_if_index, 0 /* enable */ );
+ return 0;
+ }
+
+ rv = vnet_l2_output_classify_set_tables (sw_if_index, ip4_table_index,
+ ip6_table_index,
+ other_table_index);
+ switch (rv)
+ {
+ case 0:
+ vnet_l2_output_classify_enable_disable (sw_if_index, 1 /* enable */ );
+ break;
+
+ default:
+ return clib_error_return (0, "vnet_l2_output_classify_set_tables: %d",
+ rv);
+ break;
+ }
+
+ return 0;
+}
+
+/*?
+ * Configure Layer 2 output classification.
+ *
+ * @cliexpar
+ * @cliexstart{set interface l2 output classify intfc <interface-name> [ip4-table <index>] [ip6-table <index>] [other-table <index>]}
+ * @cliexend
+ * @todo This is incomplete. This needs a detailed description and a
+ * practical example.
+?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (int_l2_output_classify_cli, static) = {
+ .path = "set interface l2 output classify",
+ .short_help =
+ "set interface l2 output classify intfc <<interface-name>> [ip4-table <n>]\n"
+ " [ip6-table <n>] [other-table <n>]",
+ .function = int_l2_output_classify_command_fn,
+};
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/l2/l2_patch.c b/src/vnet/l2/l2_patch.c
new file mode 100644
index 00000000000..5e4691f45c7
--- /dev/null
+++ b/src/vnet/l2/l2_patch.c
@@ -0,0 +1,452 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+#include <vnet/pg/pg.h>
+#include <vnet/ethernet/ethernet.h>
+#include <vnet/feature/feature.h>
+#include <vppinfra/error.h>
+
+typedef struct
+{
+ /* vector of dispositions, indexed by rx_sw_if_index */
+ u32 *tx_next_by_rx_sw_if_index;
+ u32 *tx_sw_if_index_by_rx_sw_if_index;
+
+ /* convenience variables */
+ vlib_main_t *vlib_main;
+ vnet_main_t *vnet_main;
+} l2_patch_main_t;
+
+typedef struct
+{
+ u32 rx_sw_if_index;
+ u32 tx_sw_if_index;
+} l2_patch_trace_t;
+
+/* packet trace format function */
+static u8 *
+format_l2_patch_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ l2_patch_trace_t *t = va_arg (*args, l2_patch_trace_t *);
+
+ s = format (s, "L2_PATCH: rx %d tx %d", t->rx_sw_if_index,
+ t->tx_sw_if_index);
+ return s;
+}
+
+l2_patch_main_t l2_patch_main;
+
+static vlib_node_registration_t l2_patch_node;
+
+#define foreach_l2_patch_error \
+_(PATCHED, "L2 patch packets") \
+_(DROPPED, "L2 patch misconfigured drops")
+
+typedef enum
+{
+#define _(sym,str) L2_PATCH_ERROR_##sym,
+ foreach_l2_patch_error
+#undef _
+ L2_PATCH_N_ERROR,
+} l2_patch_error_t;
+
+static char *l2_patch_error_strings[] = {
+#define _(sym,string) string,
+ foreach_l2_patch_error
+#undef _
+};
+
+typedef enum
+{
+ L2_PATCH_NEXT_DROP,
+ L2_PATCH_N_NEXT,
+} l2_patch_next_t;
+
+static uword
+l2_patch_node_fn (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ u32 n_left_from, *from, *to_next;
+ l2_patch_next_t next_index;
+ l2_patch_main_t *l2pm = &l2_patch_main;
+ vlib_node_t *n = vlib_get_node (vm, l2_patch_node.index);
+ u32 node_counter_base_index = n->error_heap_index;
+ vlib_error_main_t *em = &vm->error_main;
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ u32 bi0, bi1;
+ vlib_buffer_t *b0, *b1;
+ u32 next0, next1;
+ u32 sw_if_index0, sw_if_index1;
+
+ /* Prefetch next iteration. */
+ {
+ vlib_buffer_t *p2, *p3;
+
+ p2 = vlib_get_buffer (vm, from[2]);
+ p3 = vlib_get_buffer (vm, from[3]);
+
+ vlib_prefetch_buffer_header (p2, LOAD);
+ vlib_prefetch_buffer_header (p3, LOAD);
+
+ /* So stupid / simple, we don't need to prefetch data */
+ }
+
+ /* speculatively enqueue b0 and b1 to the current next frame */
+ to_next[0] = bi0 = from[0];
+ to_next[1] = bi1 = from[1];
+ from += 2;
+ to_next += 2;
+ n_left_from -= 2;
+ n_left_to_next -= 2;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ b1 = vlib_get_buffer (vm, bi1);
+
+ sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+ sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_RX];
+
+ ASSERT (l2pm->tx_next_by_rx_sw_if_index[sw_if_index0] != ~0);
+ ASSERT (l2pm->tx_sw_if_index_by_rx_sw_if_index[sw_if_index0] != ~0);
+ ASSERT (l2pm->tx_next_by_rx_sw_if_index[sw_if_index1] != ~0);
+ ASSERT (l2pm->tx_sw_if_index_by_rx_sw_if_index[sw_if_index1] != ~0);
+
+ next0 = l2pm->tx_next_by_rx_sw_if_index[sw_if_index0];
+ next1 = l2pm->tx_next_by_rx_sw_if_index[sw_if_index1];
+ vnet_buffer (b0)->sw_if_index[VLIB_TX] =
+ l2pm->tx_sw_if_index_by_rx_sw_if_index[sw_if_index0];
+ vnet_buffer (b1)->sw_if_index[VLIB_TX] =
+ l2pm->tx_sw_if_index_by_rx_sw_if_index[sw_if_index1];
+
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)))
+ {
+ if (b0->flags & VLIB_BUFFER_IS_TRACED)
+ {
+ l2_patch_trace_t *t =
+ vlib_add_trace (vm, node, b0, sizeof (*t));
+ t->rx_sw_if_index = sw_if_index0;
+ t->tx_sw_if_index =
+ l2pm->tx_sw_if_index_by_rx_sw_if_index[sw_if_index0];
+ }
+ if (b1->flags & VLIB_BUFFER_IS_TRACED)
+ {
+ l2_patch_trace_t *t =
+ vlib_add_trace (vm, node, b1, sizeof (*t));
+ t->rx_sw_if_index = sw_if_index1;
+ t->tx_sw_if_index =
+ l2pm->tx_sw_if_index_by_rx_sw_if_index[sw_if_index1];
+ }
+ }
+
+ /* verify speculative enqueues, maybe switch current next frame */
+ vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, bi1, next0, next1);
+ }
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 bi0;
+ vlib_buffer_t *b0;
+ u32 next0;
+ u32 sw_if_index0;
+
+ /* speculatively enqueue b0 to the current next frame */
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+
+ sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+
+ ASSERT (l2pm->tx_next_by_rx_sw_if_index[sw_if_index0] != ~0);
+ ASSERT (l2pm->tx_sw_if_index_by_rx_sw_if_index[sw_if_index0] != ~0);
+
+ next0 = l2pm->tx_next_by_rx_sw_if_index[sw_if_index0];
+ vnet_buffer (b0)->sw_if_index[VLIB_TX] =
+ l2pm->tx_sw_if_index_by_rx_sw_if_index[sw_if_index0];
+
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)))
+ {
+ if (b0->flags & VLIB_BUFFER_IS_TRACED)
+ {
+ l2_patch_trace_t *t =
+ vlib_add_trace (vm, node, b0, sizeof (*t));
+ t->rx_sw_if_index = sw_if_index0;
+ t->tx_sw_if_index =
+ l2pm->tx_sw_if_index_by_rx_sw_if_index[sw_if_index0];
+ }
+ }
+
+ /* verify speculative enqueue, maybe switch current next frame */
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, next0);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ em->counters[node_counter_base_index + L2_PATCH_ERROR_PATCHED] +=
+ frame->n_vectors;
+
+ return frame->n_vectors;
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (l2_patch_node, static) = {
+ .function = l2_patch_node_fn,
+ .name = "l2-patch",
+ .vector_size = sizeof (u32),
+ .format_trace = format_l2_patch_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_errors = ARRAY_LEN(l2_patch_error_strings),
+ .error_strings = l2_patch_error_strings,
+
+ .n_next_nodes = L2_PATCH_N_NEXT,
+
+ /* edit / add dispositions here */
+ .next_nodes = {
+ [L2_PATCH_NEXT_DROP] = "error-drop",
+ },
+};
+/* *INDENT-ON* */
+
+VLIB_NODE_FUNCTION_MULTIARCH (l2_patch_node, l2_patch_node_fn)
+ int vnet_l2_patch_add_del (u32 rx_sw_if_index, u32 tx_sw_if_index,
+ int is_add)
+{
+ l2_patch_main_t *l2pm = &l2_patch_main;
+ vnet_hw_interface_t *rxhi, *txhi;
+ u32 tx_next_index;
+
+ /*
+ * We assume that the API msg handler has used 2x VALIDATE_SW_IF_INDEX
+ * macros...
+ */
+
+ rxhi = vnet_get_sup_hw_interface (l2pm->vnet_main, rx_sw_if_index);
+
+ /* Make sure caller didn't pass a vlan subif, etc. */
+ if (rxhi->sw_if_index != rx_sw_if_index)
+ return VNET_API_ERROR_INVALID_SW_IF_INDEX;
+
+ txhi = vnet_get_sup_hw_interface (l2pm->vnet_main, tx_sw_if_index);
+ if (txhi->sw_if_index != tx_sw_if_index)
+ return VNET_API_ERROR_INVALID_SW_IF_INDEX_2;
+
+ if (is_add)
+ {
+ tx_next_index = vlib_node_add_next (l2pm->vlib_main,
+ l2_patch_node.index,
+ txhi->output_node_index);
+
+ vec_validate_init_empty (l2pm->tx_next_by_rx_sw_if_index,
+ rx_sw_if_index, ~0);
+
+ l2pm->tx_next_by_rx_sw_if_index[rx_sw_if_index] = tx_next_index;
+ vec_validate_init_empty (l2pm->tx_sw_if_index_by_rx_sw_if_index,
+ rx_sw_if_index, ~0);
+ l2pm->tx_sw_if_index_by_rx_sw_if_index[rx_sw_if_index]
+ = txhi->sw_if_index;
+
+ ethernet_set_flags (l2pm->vnet_main, rxhi->hw_if_index,
+ ETHERNET_INTERFACE_FLAG_ACCEPT_ALL);
+
+ vnet_feature_enable_disable ("device-input", "l2-patch",
+ rxhi->hw_if_index, 1, 0, 0);
+ }
+ else
+ {
+ ethernet_set_flags (l2pm->vnet_main, rxhi->hw_if_index,
+ 0 /* disable promiscuous mode */ );
+
+ vnet_feature_enable_disable ("device-input", "l2-patch",
+ rxhi->hw_if_index, 0, 0, 0);
+ if (vec_len (l2pm->tx_next_by_rx_sw_if_index) > rx_sw_if_index)
+ {
+ l2pm->tx_next_by_rx_sw_if_index[rx_sw_if_index] = ~0;
+ l2pm->tx_sw_if_index_by_rx_sw_if_index[rx_sw_if_index] = ~0;
+ }
+ }
+
+ return 0;
+}
+
+static clib_error_t *
+test_patch_command_fn (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ l2_patch_main_t *l2pm = &l2_patch_main;
+ unformat_input_t _line_input, *line_input = &_line_input;
+ u32 rx_sw_if_index, tx_sw_if_index;
+ int rv;
+ int rx_set = 0;
+ int tx_set = 0;
+ int is_add = 1;
+
+ /* Get a line of input. */
+ if (!unformat_user (input, unformat_line_input, line_input))
+ return 0;
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "rx %U", unformat_vnet_sw_interface,
+ l2pm->vnet_main, &rx_sw_if_index))
+ rx_set = 1;
+ else if (unformat (line_input, "tx %U", unformat_vnet_sw_interface,
+ l2pm->vnet_main, &tx_sw_if_index))
+ tx_set = 1;
+ else if (unformat (line_input, "del"))
+ is_add = 0;
+ else
+ break;
+ }
+
+ if (rx_set == 0)
+ return clib_error_return (0, "rx interface not set");
+
+ if (tx_set == 0)
+ return clib_error_return (0, "tx interface not set");
+
+ rv = vnet_l2_patch_add_del (rx_sw_if_index, tx_sw_if_index, is_add);
+
+ switch (rv)
+ {
+ case 0:
+ break;
+
+ case VNET_API_ERROR_INVALID_SW_IF_INDEX:
+ return clib_error_return (0, "rx interface not a physical port");
+
+ case VNET_API_ERROR_INVALID_SW_IF_INDEX_2:
+ return clib_error_return (0, "tx interface not a physical port");
+
+ default:
+ return clib_error_return
+ (0, "WARNING: vnet_l2_patch_add_del returned %d", rv);
+ }
+
+ return 0;
+}
+
+/*?
+ * Create or delete a Layer 2 patch.
+ *
+ * @cliexpar
+ * @cliexstart{test l2patch rx <intfc> tx <intfc> [del]}
+ * @cliexend
+ * @todo This is incomplete. This needs a detailed description and a
+ * practical example.
+?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (test_patch_command, static) = {
+ .path = "test l2patch",
+ .short_help = "test l2patch rx <intfc> tx <intfc> [del]",
+ .function = test_patch_command_fn,
+};
+/* *INDENT-ON* */
+
+/** Display the contents of the l2patch table. */
+static clib_error_t *
+show_l2patch (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ l2_patch_main_t *l2pm = &l2_patch_main;
+ u32 rx_sw_if_index;
+ u32 no_entries = 1;
+
+ ASSERT (vec_len (l2pm->tx_next_by_rx_sw_if_index) ==
+ vec_len (l2pm->tx_sw_if_index_by_rx_sw_if_index));
+
+ for (rx_sw_if_index = 0;
+ rx_sw_if_index < vec_len (l2pm->tx_sw_if_index_by_rx_sw_if_index);
+ rx_sw_if_index++)
+ {
+ u32 tx_sw_if_index =
+ l2pm->tx_sw_if_index_by_rx_sw_if_index[rx_sw_if_index];
+ if (tx_sw_if_index != ~0)
+ {
+ no_entries = 0;
+ vlib_cli_output (vm, "%26U -> %U",
+ format_vnet_sw_if_index_name,
+ l2pm->vnet_main, rx_sw_if_index,
+ format_vnet_sw_if_index_name,
+ l2pm->vnet_main, tx_sw_if_index);
+ }
+ }
+
+ if (no_entries)
+ vlib_cli_output (vm, "no l2patch entries");
+
+ return 0;
+}
+
+/*?
+ * Show Layer 2 patch entries.
+ *
+ * @cliexpar
+ * @cliexstart{show l2patch}
+ * @cliexend
+ * @todo This is incomplete. This needs a detailed description and a
+ * practical example.
+?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (show_l2patch_cli, static) = {
+ .path = "show l2patch",
+ .short_help = "Show l2 interface cross-connect entries",
+ .function = show_l2patch,
+};
+/* *INDENT-ON* */
+
+clib_error_t *
+l2_patch_init (vlib_main_t * vm)
+{
+ l2_patch_main_t *mp = &l2_patch_main;
+
+ mp->vlib_main = vm;
+ mp->vnet_main = vnet_get_main ();
+
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (l2_patch_init);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/l2/l2_rw.c b/src/vnet/l2/l2_rw.c
new file mode 100644
index 00000000000..c54509d048b
--- /dev/null
+++ b/src/vnet/l2/l2_rw.c
@@ -0,0 +1,719 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vlib/vlib.h>
+#include <vnet/l2/feat_bitmap.h>
+#include <vnet/l2/l2_rw.h>
+
+/**
+ * @file
+ * @brief Layer 2 Rewrite.
+ *
+ * Layer 2-Rewrite node uses classify tables to match packets. Then, using
+ * the provisioned mask and value, modfies the packet header.
+ */
+
+
+l2_rw_main_t l2_rw_main;
+
+vlib_node_registration_t l2_rw_node;
+
+typedef struct
+{
+ u32 sw_if_index;
+ u32 classify_table_index;
+ u32 rewrite_entry_index;
+} l2_rw_trace_t;
+
+static u8 *
+format_l2_rw_entry (u8 * s, va_list * args)
+{
+ l2_rw_entry_t *e = va_arg (*args, l2_rw_entry_t *);
+ l2_rw_main_t *rw = &l2_rw_main;
+ s = format (s, "%d - mask:%U value:%U\n",
+ e - rw->entries,
+ format_hex_bytes, e->mask,
+ e->rewrite_n_vectors * sizeof (u32x4), format_hex_bytes,
+ e->value, e->rewrite_n_vectors * sizeof (u32x4));
+ s =
+ format (s, " hits:%d skip_bytes:%d", e->hit_count,
+ e->skip_n_vectors * sizeof (u32x4));
+ return s;
+}
+
+static u8 *
+format_l2_rw_config (u8 * s, va_list * args)
+{
+ l2_rw_config_t *c = va_arg (*args, l2_rw_config_t *);
+ return format (s, "table-index:%d miss-index:%d",
+ c->table_index, c->miss_index);
+}
+
+/* packet trace format function */
+static u8 *
+format_l2_rw_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ l2_rw_trace_t *t = va_arg (*args, l2_rw_trace_t *);
+ return format (s, "l2-rw: sw_if_index %d, table %d, entry %d",
+ t->sw_if_index, t->classify_table_index,
+ t->rewrite_entry_index);
+}
+
+always_inline l2_rw_config_t *
+l2_rw_get_config (u32 sw_if_index)
+{
+ l2_rw_main_t *rw = &l2_rw_main;
+ if (PREDICT_FALSE (!clib_bitmap_get (rw->configs_bitmap, sw_if_index)))
+ {
+ vec_validate (rw->configs, sw_if_index);
+ rw->configs[sw_if_index].table_index = ~0;
+ rw->configs[sw_if_index].miss_index = ~0;
+ rw->configs_bitmap =
+ clib_bitmap_set (rw->configs_bitmap, sw_if_index, 1);
+ }
+ return &rw->configs[sw_if_index];
+}
+
+static_always_inline void
+l2_rw_rewrite (l2_rw_entry_t * rwe, u8 * h)
+{
+ if (U32X4_ALIGNED (h))
+ {
+ u32x4 *d = ((u32x4 *) h) + rwe->skip_n_vectors;
+ switch (rwe->rewrite_n_vectors)
+ {
+ case 5:
+ d[4] = (d[4] & ~rwe->mask[4]) | rwe->value[4];
+ /* FALLTHROUGH */
+ case 4:
+ d[3] = (d[3] & ~rwe->mask[3]) | rwe->value[3];
+ /* FALLTHROUGH */
+ case 3:
+ d[2] = (d[2] & ~rwe->mask[2]) | rwe->value[2];
+ /* FALLTHROUGH */
+ case 2:
+ d[1] = (d[1] & ~rwe->mask[1]) | rwe->value[1];
+ /* FALLTHROUGH */
+ case 1:
+ d[0] = (d[0] & ~rwe->mask[0]) | rwe->value[0];
+ break;
+ default:
+ abort ();
+ }
+ }
+ else
+ {
+ u64 *d = ((u64 *) h) + rwe->skip_n_vectors * 2;
+ switch (rwe->rewrite_n_vectors)
+ {
+ case 5:
+ d[8] =
+ (d[8] & ~(((u64 *) rwe->mask)[8])) | (((u64 *) rwe->value)[8]);
+ d[9] =
+ (d[9] & ~(((u64 *) rwe->mask)[9])) | (((u64 *) rwe->value)[9]);
+ /* FALLTHROUGH */
+ case 4:
+ d[6] =
+ (d[6] & ~(((u64 *) rwe->mask)[6])) | (((u64 *) rwe->value)[6]);
+ d[7] =
+ (d[7] & ~(((u64 *) rwe->mask)[7])) | (((u64 *) rwe->value)[7]);
+ /* FALLTHROUGH */
+ case 3:
+ d[4] =
+ (d[4] & ~(((u64 *) rwe->mask)[4])) | (((u64 *) rwe->value)[4]);
+ d[5] =
+ (d[5] & ~(((u64 *) rwe->mask)[5])) | (((u64 *) rwe->value)[5]);
+ /* FALLTHROUGH */
+ case 2:
+ d[2] =
+ (d[2] & ~(((u64 *) rwe->mask)[2])) | (((u64 *) rwe->value)[2]);
+ d[3] =
+ (d[3] & ~(((u64 *) rwe->mask)[3])) | (((u64 *) rwe->value)[3]);
+ /* FALLTHROUGH */
+ case 1:
+ d[0] =
+ (d[0] & ~(((u64 *) rwe->mask)[0])) | (((u64 *) rwe->value)[0]);
+ d[1] =
+ (d[1] & ~(((u64 *) rwe->mask)[1])) | (((u64 *) rwe->value)[1]);
+ break;
+ default:
+ abort ();
+ }
+ }
+}
+
+static uword
+l2_rw_node_fn (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ l2_rw_main_t *rw = &l2_rw_main;
+ u32 n_left_from, *from, *to_next, next_index;
+ vnet_classify_main_t *vcm = &vnet_classify_main;
+ f64 now = vlib_time_now (vlib_get_main ());
+ u32 prefetch_size = 0;
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors; /* number of packets to process */
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ /* get space to enqueue frame to graph node "next_index" */
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ u32 bi0, next0, sw_if_index0, feature_bitmap0, rwe_index0;
+ u32 bi1, next1, sw_if_index1, feature_bitmap1, rwe_index1;
+ vlib_buffer_t *b0, *b1;
+ ethernet_header_t *h0, *h1;
+ l2_rw_config_t *config0, *config1;
+ u64 hash0, hash1;
+ vnet_classify_table_t *t0, *t1;
+ vnet_classify_entry_t *e0, *e1;
+ l2_rw_entry_t *rwe0, *rwe1;
+
+ {
+ vlib_buffer_t *p2, *p3;
+ p2 = vlib_get_buffer (vm, from[2]);
+ p3 = vlib_get_buffer (vm, from[3]);
+
+ vlib_prefetch_buffer_header (p2, LOAD);
+ vlib_prefetch_buffer_header (p3, LOAD);
+ CLIB_PREFETCH (vlib_buffer_get_current (p2), prefetch_size, LOAD);
+ CLIB_PREFETCH (vlib_buffer_get_current (p3), prefetch_size, LOAD);
+ }
+
+ bi0 = from[0];
+ bi1 = from[1];
+ to_next[0] = bi0;
+ to_next[1] = bi1;
+ from += 2;
+ to_next += 2;
+ n_left_from -= 2;
+ n_left_to_next -= 2;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ b1 = vlib_get_buffer (vm, bi1);
+ h0 = vlib_buffer_get_current (b0);
+ h1 = vlib_buffer_get_current (b1);
+
+ sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+ sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_RX];
+ config0 = l2_rw_get_config (sw_if_index0); /*TODO: check sw_if_index0 value */
+ config1 = l2_rw_get_config (sw_if_index1); /*TODO: check sw_if_index0 value */
+ t0 = pool_elt_at_index (vcm->tables, config0->table_index);
+ t1 = pool_elt_at_index (vcm->tables, config1->table_index);
+ prefetch_size =
+ (t1->skip_n_vectors + t1->match_n_vectors) * sizeof (u32x4);
+
+ hash0 = vnet_classify_hash_packet (t0, (u8 *) h0);
+ hash1 = vnet_classify_hash_packet (t1, (u8 *) h1);
+ e0 = vnet_classify_find_entry (t0, (u8 *) h0, hash0, now);
+ e1 = vnet_classify_find_entry (t1, (u8 *) h1, hash1, now);
+
+ while (!e0 && (t0->next_table_index != ~0))
+ {
+ t0 = pool_elt_at_index (vcm->tables, t0->next_table_index);
+ hash0 = vnet_classify_hash_packet (t0, (u8 *) h0);
+ e0 = vnet_classify_find_entry (t0, (u8 *) h0, hash0, now);
+ }
+
+ while (!e1 && (t1->next_table_index != ~0))
+ {
+ t1 = pool_elt_at_index (vcm->tables, t1->next_table_index);
+ hash1 = vnet_classify_hash_packet (t1, (u8 *) h1);
+ e1 = vnet_classify_find_entry (t1, (u8 *) h1, hash1, now);
+ }
+
+ rwe_index0 = e0 ? e0->opaque_index : config0->miss_index;
+ rwe_index1 = e1 ? e1->opaque_index : config1->miss_index;
+
+ if (rwe_index0 != ~0)
+ {
+ rwe0 = pool_elt_at_index (rw->entries, rwe_index0);
+ l2_rw_rewrite (rwe0, (u8 *) h0);
+ }
+ if (rwe_index1 != ~0)
+ {
+ rwe1 = pool_elt_at_index (rw->entries, rwe_index1);
+ l2_rw_rewrite (rwe1, (u8 *) h1);
+ }
+
+ if (PREDICT_FALSE ((b0->flags & VLIB_BUFFER_IS_TRACED)))
+ {
+ l2_rw_trace_t *t = vlib_add_trace (vm, node, b0, sizeof (*t));
+ t->sw_if_index = sw_if_index0;
+ t->classify_table_index = config0->table_index;
+ t->rewrite_entry_index = rwe_index0;
+ }
+
+ if (PREDICT_FALSE ((b1->flags & VLIB_BUFFER_IS_TRACED)))
+ {
+ l2_rw_trace_t *t = vlib_add_trace (vm, node, b1, sizeof (*t));
+ t->sw_if_index = sw_if_index1;
+ t->classify_table_index = config1->table_index;
+ t->rewrite_entry_index = rwe_index1;
+ }
+
+ /* Update feature bitmap and get next feature index */
+ feature_bitmap0 =
+ vnet_buffer (b0)->l2.feature_bitmap & ~L2INPUT_FEAT_RW;
+ feature_bitmap1 =
+ vnet_buffer (b1)->l2.feature_bitmap & ~L2INPUT_FEAT_RW;
+ vnet_buffer (b0)->l2.feature_bitmap = feature_bitmap0;
+ vnet_buffer (b1)->l2.feature_bitmap = feature_bitmap1;
+ next0 = feat_bitmap_get_next_node_index (rw->feat_next_node_index,
+ feature_bitmap0);
+ next1 = feat_bitmap_get_next_node_index (rw->feat_next_node_index,
+ feature_bitmap1);
+
+ vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, bi1, next0, next1);
+ }
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 bi0, next0, sw_if_index0, feature_bitmap0, rwe_index0;
+ vlib_buffer_t *b0;
+ ethernet_header_t *h0;
+ l2_rw_config_t *config0;
+ u64 hash0;
+ vnet_classify_table_t *t0;
+ vnet_classify_entry_t *e0;
+ l2_rw_entry_t *rwe0;
+
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ h0 = vlib_buffer_get_current (b0);
+
+ sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+ config0 = l2_rw_get_config (sw_if_index0); /*TODO: check sw_if_index0 value */
+ t0 = pool_elt_at_index (vcm->tables, config0->table_index);
+
+ hash0 = vnet_classify_hash_packet (t0, (u8 *) h0);
+ e0 = vnet_classify_find_entry (t0, (u8 *) h0, hash0, now);
+
+ while (!e0 && (t0->next_table_index != ~0))
+ {
+ t0 = pool_elt_at_index (vcm->tables, t0->next_table_index);
+ hash0 = vnet_classify_hash_packet (t0, (u8 *) h0);
+ e0 = vnet_classify_find_entry (t0, (u8 *) h0, hash0, now);
+ }
+
+ rwe_index0 = e0 ? e0->opaque_index : config0->miss_index;
+
+ if (rwe_index0 != ~0)
+ {
+ rwe0 = pool_elt_at_index (rw->entries, rwe_index0);
+ l2_rw_rewrite (rwe0, (u8 *) h0);
+ }
+
+ if (PREDICT_FALSE ((b0->flags & VLIB_BUFFER_IS_TRACED)))
+ {
+ l2_rw_trace_t *t = vlib_add_trace (vm, node, b0, sizeof (*t));
+ t->sw_if_index = sw_if_index0;
+ t->classify_table_index = config0->table_index;
+ t->rewrite_entry_index = rwe_index0;
+ }
+
+ /* Update feature bitmap and get next feature index */
+ feature_bitmap0 =
+ vnet_buffer (b0)->l2.feature_bitmap & ~L2INPUT_FEAT_RW;
+ vnet_buffer (b0)->l2.feature_bitmap = feature_bitmap0;
+ next0 = feat_bitmap_get_next_node_index (rw->feat_next_node_index,
+ feature_bitmap0);
+
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, next0);
+ }
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ return frame->n_vectors;
+}
+
+int
+l2_rw_mod_entry (u32 * index,
+ u8 * mask, u8 * value, u32 len, u32 skip, u8 is_del)
+{
+ l2_rw_main_t *rw = &l2_rw_main;
+ l2_rw_entry_t *e = 0;
+ if (*index != ~0)
+ {
+ if (pool_is_free_index (rw->entries, *index))
+ {
+ return -1;
+ }
+ e = pool_elt_at_index (rw->entries, *index);
+ }
+ else
+ {
+ pool_get (rw->entries, e);
+ *index = e - rw->entries;
+ }
+
+ if (!e)
+ return -1;
+
+ if (is_del)
+ {
+ pool_put (rw->entries, e);
+ return 0;
+ }
+
+ e->skip_n_vectors = skip / sizeof (u32x4);
+ skip -= e->skip_n_vectors * sizeof (u32x4);
+ e->rewrite_n_vectors = (skip + len - 1) / sizeof (u32x4) + 1;
+ vec_alloc_aligned (e->mask, e->rewrite_n_vectors, sizeof (u32x4));
+ memset (e->mask, 0, e->rewrite_n_vectors * sizeof (u32x4));
+ vec_alloc_aligned (e->value, e->rewrite_n_vectors, sizeof (u32x4));
+ memset (e->value, 0, e->rewrite_n_vectors * sizeof (u32x4));
+
+ clib_memcpy (((u8 *) e->value) + skip, value, len);
+ clib_memcpy (((u8 *) e->mask) + skip, mask, len);
+
+ int i;
+ for (i = 0; i < e->rewrite_n_vectors; i++)
+ {
+ e->value[i] &= e->mask[i];
+ }
+
+ return 0;
+}
+
+static clib_error_t *
+l2_rw_entry_cli_fn (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ u32 index = ~0;
+ u8 *mask = 0;
+ u8 *value = 0;
+ u32 skip = 0;
+ u8 del = 0;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "index %d", &index))
+ ;
+ else if (unformat (input, "mask %U", unformat_hex_string, &mask))
+ ;
+ else if (unformat (input, "value %U", unformat_hex_string, &value))
+ ;
+ else if (unformat (input, "skip %d", &skip))
+ ;
+ else if (unformat (input, "del"))
+ del = 1;
+ else
+ break;
+ }
+
+ if (!mask || !value)
+ return clib_error_return (0, "Unspecified mask or value");
+
+ if (vec_len (mask) != vec_len (value))
+ return clib_error_return (0, "Mask and value lengths must be identical");
+
+ int ret;
+ if ((ret =
+ l2_rw_mod_entry (&index, mask, value, vec_len (mask), skip, del)))
+ return clib_error_return (0, "Could not add entry");
+
+ return 0;
+}
+
+/*?
+ * Layer 2-Rewrite node uses classify tables to match packets. Then, using
+ * the provisioned mask and value, modfies the packet header.
+ *
+ * @cliexpar
+ * @todo This is incomplete. This needs a detailed description and a
+ * practical example.
+?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (l2_rw_entry_cli, static) = {
+ .path = "l2 rewrite entry",
+ .short_help =
+ "l2 rewrite entry [index <index>] [mask <hex-mask>] [value <hex-value>] [skip <n_bytes>] [del]",
+ .function = l2_rw_entry_cli_fn,
+};
+/* *INDENT-ON* */
+
+int
+l2_rw_interface_set_table (u32 sw_if_index, u32 table_index, u32 miss_index)
+{
+ l2_rw_config_t *c = l2_rw_get_config (sw_if_index);
+ l2_rw_main_t *rw = &l2_rw_main;
+
+ c->table_index = table_index;
+ c->miss_index = miss_index;
+ u32 feature_bitmap = (table_index == ~0) ? 0 : L2INPUT_FEAT_RW;
+
+ l2input_intf_bitmap_enable (sw_if_index, L2INPUT_FEAT_RW, feature_bitmap);
+
+ if (c->table_index == ~0)
+ clib_bitmap_set (rw->configs_bitmap, sw_if_index, 0);
+
+ return 0;
+}
+
+static clib_error_t *
+l2_rw_interface_cli_fn (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ vnet_main_t *vnm = vnet_get_main ();
+ u32 table_index = ~0;
+ u32 sw_if_index = ~0;
+ u32 miss_index = ~0;
+
+ if (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ unformat (input, "%U", unformat_vnet_sw_interface, vnm, &sw_if_index);
+ }
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "table %d", &table_index))
+ ;
+ else if (unformat (input, "miss-index %d", &miss_index))
+ ;
+ else
+ break;
+ }
+
+ if (sw_if_index == ~0)
+ return clib_error_return (0,
+ "You must specify an interface 'iface <interface>'",
+ format_unformat_error, input);
+ int ret;
+ if ((ret =
+ l2_rw_interface_set_table (sw_if_index, table_index, miss_index)))
+ return clib_error_return (0, "l2_rw_interface_set_table returned %d",
+ ret);
+
+ return 0;
+}
+
+/*?
+ * Layer 2-Rewrite node uses classify tables to match packets. Then, using
+ * the provisioned mask and value, modfies the packet header.
+ *
+ * @cliexpar
+ * @todo This is incomplete. This needs a detailed description and a
+ * practical example.
+?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (l2_rw_interface_cli, static) = {
+ .path = "set interface l2 rewrite",
+ .short_help =
+ "set interface l2 rewrite <interface> [table <table index>] [miss-index <entry-index>]",
+ .function = l2_rw_interface_cli_fn,
+};
+/* *INDENT-ON* */
+
+static clib_error_t *
+l2_rw_show_interfaces_cli_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ l2_rw_main_t *rw = &l2_rw_main;
+ if (clib_bitmap_count_set_bits (rw->configs_bitmap) == 0)
+ vlib_cli_output (vm, "No interface is currently using l2 rewrite\n");
+
+ uword i;
+ /* *INDENT-OFF* */
+ clib_bitmap_foreach(i, rw->configs_bitmap, {
+ vlib_cli_output (vm, "sw_if_index:%d %U\n", i, format_l2_rw_config, &rw->configs[i]);
+ });
+ /* *INDENT-ON* */
+ return 0;
+}
+
+/*?
+ * Layer 2-Rewrite node uses classify tables to match packets. Then, using
+ * the provisioned mask and value, modfies the packet header.
+ *
+ * @cliexpar
+ * @todo This is incomplete. This needs a detailed description and a
+ * practical example.
+?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (l2_rw_show_interfaces_cli, static) = {
+ .path = "show l2 rewrite interfaces",
+ .short_help =
+ "show l2 rewrite interfaces",
+ .function = l2_rw_show_interfaces_cli_fn,
+};
+/* *INDENT-ON* */
+
+static clib_error_t *
+l2_rw_show_entries_cli_fn (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ l2_rw_main_t *rw = &l2_rw_main;
+ l2_rw_entry_t *e;
+ if (pool_elts (rw->entries) == 0)
+ vlib_cli_output (vm, "No entries\n");
+
+ /* *INDENT-OFF* */
+ pool_foreach(e, rw->entries, {
+ vlib_cli_output (vm, "%U\n", format_l2_rw_entry, e);
+ });
+ /* *INDENT-ON* */
+ return 0;
+}
+
+/*?
+ * Layer 2-Rewrite node uses classify tables to match packets. Then, using
+ * the provisioned mask and value, modfies the packet header.
+ *
+ * @cliexpar
+ * @todo This is incomplete. This needs a detailed description and a
+ * practical example.
+?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (l2_rw_show_entries_cli, static) = {
+ .path = "show l2 rewrite entries",
+ .short_help =
+ "show l2 rewrite entries",
+ .function = l2_rw_show_entries_cli_fn,
+};
+/* *INDENT-ON* */
+
+int
+l2_rw_enable_disable (u32 bridge_domain, u8 disable)
+{
+ u32 mask = L2INPUT_FEAT_RW;
+ l2input_set_bridge_features (bridge_domain, mask, disable ? 0 : mask);
+ return 0;
+}
+
+static clib_error_t *
+l2_rw_set_cli_fn (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ u32 bridge_domain;
+ u8 disable = 0;
+
+ if (unformat_check_input (input) == UNFORMAT_END_OF_INPUT ||
+ !unformat (input, "%d", &bridge_domain))
+ {
+ return clib_error_return (0, "You must specify a bridge domain");
+ }
+
+ if (unformat_check_input (input) != UNFORMAT_END_OF_INPUT &&
+ unformat (input, "disable"))
+ {
+ disable = 1;
+ }
+
+ if (l2_rw_enable_disable (bridge_domain, disable))
+ return clib_error_return (0, "Could not enable or disable rewrite");
+
+ return 0;
+}
+
+/*?
+ * Layer 2-Rewrite node uses classify tables to match packets. Then, using
+ * the provisioned mask and value, modfies the packet header.
+ *
+ * @cliexpar
+ * @todo This is incomplete. This needs a detailed description and a
+ * practical example.
+?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (l2_rw_set_cli, static) = {
+ .path = "set bridge-domain rewrite",
+ .short_help =
+ "set bridge-domain rewrite <bridge-domain> [disable]",
+ .function = l2_rw_set_cli_fn,
+};
+/* *INDENT-ON* */
+
+static clib_error_t *
+l2_rw_init (vlib_main_t * vm)
+{
+ l2_rw_main_t *rw = &l2_rw_main;
+ rw->configs = 0;
+ rw->entries = 0;
+ clib_bitmap_alloc (rw->configs_bitmap, 1);
+ feat_bitmap_init_next_nodes (vm,
+ l2_rw_node.index,
+ L2INPUT_N_FEAT,
+ l2input_get_feat_names (),
+ rw->feat_next_node_index);
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (l2_rw_init);
+
+enum
+{
+ L2_RW_NEXT_DROP,
+ L2_RW_N_NEXT,
+};
+
+#define foreach_l2_rw_error \
+_(UNKNOWN, "Unknown error")
+
+typedef enum
+{
+#define _(sym,str) L2_RW_ERROR_##sym,
+ foreach_l2_rw_error
+#undef _
+ L2_RW_N_ERROR,
+} l2_rw_error_t;
+
+static char *l2_rw_error_strings[] = {
+#define _(sym,string) string,
+ foreach_l2_rw_error
+#undef _
+};
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (l2_rw_node) = {
+ .function = l2_rw_node_fn,
+ .name = "l2-rw",
+ .vector_size = sizeof (u32),
+ .format_trace = format_l2_rw_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+ .n_errors = ARRAY_LEN(l2_rw_error_strings),
+ .error_strings = l2_rw_error_strings,
+ .runtime_data_bytes = 0,
+ .n_next_nodes = L2_RW_N_NEXT,
+ .next_nodes = { [L2_RW_NEXT_DROP] = "error-drop"},
+};
+/* *INDENT-ON* */
+
+VLIB_NODE_FUNCTION_MULTIARCH (l2_rw_node, l2_rw_node_fn)
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/l2/l2_rw.h b/src/vnet/l2/l2_rw.h
new file mode 100644
index 00000000000..49aa25fb601
--- /dev/null
+++ b/src/vnet/l2/l2_rw.h
@@ -0,0 +1,95 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * l2_rw is based on vnet classifier and provides a way
+ * to modify packets matching a given table.
+ *
+ * Tables must be created using vnet's classify features.
+ * Entries contained within these tables must have their
+ * opaque index set to the rewrite entry created with l2_rw_mod_entry.
+ */
+
+#ifndef L2_RW_H_
+#define L2_RW_H_
+
+#include <vnet/l2/l2_input.h>
+
+/* *INDENT-OFF* */
+typedef CLIB_PACKED(struct _l2_rw_entry {
+ u16 skip_n_vectors;
+ u16 rewrite_n_vectors;
+ u64 hit_count;
+ u32x4 *mask;
+ u32x4 *value;
+}) l2_rw_entry_t;
+/* *INDENT-ON* */
+
+/* l2_rw configuration for one interface */
+/* *INDENT-OFF* */
+typedef CLIB_PACKED(struct _l2_rw_config {
+ u32 table_index; /* Which classify table to use */
+ u32 miss_index; /* Rewrite entry to use if table does not match */
+}) l2_rw_config_t;
+/* *INDENT-ON* */
+
+typedef struct
+{
+ /* Next feature node indexes */
+ u32 feat_next_node_index[32];
+
+ /* A pool of entries */
+ l2_rw_entry_t *entries;
+
+ /* Config vector indexed by sw_if_index */
+ l2_rw_config_t *configs;
+ uword *configs_bitmap;
+} l2_rw_main_t;
+
+extern l2_rw_main_t l2_rw_main;
+
+/*
+ * Specifies which classify table and miss_index should be used
+ * with the given interface.
+ * Use special values ~0 in order to un-set table_index
+ * or miss_index.
+ * l2_rw feature is automatically enabled for the interface
+ * when table_index or miss_index is not ~0.
+ * returns 0 on success and something else on error.
+ */
+int l2_rw_interface_set_table (u32 sw_if_index,
+ u32 table_index, u32 miss_index);
+
+/*
+ * Creates, modifies or delete a rewrite entry.
+ * If *index != ~0, modifies an existing entry (or simply
+ * deletes it if is_del is set).
+ * If *index == ~0, creates a new entry and the created
+ * entry index is stored in *index (Does nothing if is_del
+ * is set).
+ * returns 0 on success and something else on error.
+ */
+int l2_rw_mod_entry (u32 * index,
+ u8 * mask, u8 * value, u32 len, u32 skip, u8 is_del);
+
+#endif /* L2_FW_H_ */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/l2/l2_vtr.c b/src/vnet/l2/l2_vtr.c
new file mode 100644
index 00000000000..95a4f15700a
--- /dev/null
+++ b/src/vnet/l2/l2_vtr.c
@@ -0,0 +1,770 @@
+/*
+ * l2_vtr.c : layer 2 vlan tag rewrite configuration
+ *
+ * Copyright (c) 2013 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+#include <vnet/ethernet/ethernet.h>
+#include <vnet/ethernet/packet.h>
+#include <vnet/l2/l2_input.h>
+#include <vnet/l2/l2_output.h>
+#include <vnet/l2/feat_bitmap.h>
+#include <vnet/l2/l2_vtr.h>
+#include <vnet/l2/l2_input_vtr.h>
+#include <vnet/l2/l2_output.h>
+
+#include <vppinfra/error.h>
+#include <vlib/cli.h>
+
+/**
+ * @file
+ * @brief Ethernet VLAN Tag Rewrite.
+ *
+ * VLAN tag rewrite provides the ability to change the VLAN tags on a packet.
+ * Existing tags can be popped, new tags can be pushed, and existing tags can
+ * be swapped with new tags. The rewrite feature is attached to a subinterface
+ * as input and output operations. The input operation is explicitly configured.
+ * The output operation is the symmetric opposite and is automatically derived
+ * from the input operation.
+ */
+
+/** Just a placeholder; ensures file is not eliminated by linker. */
+clib_error_t *
+l2_vtr_init (vlib_main_t * vm)
+{
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (l2_vtr_init);
+
+u32
+l2pbb_configure (vlib_main_t * vlib_main,
+ vnet_main_t * vnet_main, u32 sw_if_index, u32 vtr_op,
+ u8 * b_dmac, u8 * b_smac,
+ u16 b_vlanid, u32 i_sid, u16 vlan_outer_tag)
+{
+ u32 error = 0;
+ u32 enable = 0;
+
+ l2_output_config_t *config = 0;
+ vnet_hw_interface_t *hi;
+ hi = vnet_get_sup_hw_interface (vnet_main, sw_if_index);
+
+ if (!hi)
+ {
+ error = VNET_API_ERROR_INVALID_INTERFACE;
+ goto done;
+ }
+
+ // Config for this interface should be already initialized
+ ptr_config_t *in_config;
+ ptr_config_t *out_config;
+ config = vec_elt_at_index (l2output_main.configs, sw_if_index);
+ in_config = &(config->input_pbb_vtr);
+ out_config = &(config->output_pbb_vtr);
+
+ in_config->pop_bytes = 0;
+ in_config->push_bytes = 0;
+ out_config->pop_bytes = 0;
+ out_config->push_bytes = 0;
+ enable = (vtr_op != L2_VTR_DISABLED);
+
+ if (!enable)
+ goto done;
+
+ if (vtr_op == L2_VTR_POP_2)
+ {
+ in_config->pop_bytes = sizeof (ethernet_pbb_header_packed_t);
+ }
+ else if (vtr_op == L2_VTR_PUSH_2)
+ {
+ clib_memcpy (in_config->macs_tags.b_dst_address, b_dmac,
+ sizeof (in_config->macs_tags.b_dst_address));
+ clib_memcpy (in_config->macs_tags.b_src_address, b_smac,
+ sizeof (in_config->macs_tags.b_src_address));
+ in_config->macs_tags.b_type =
+ clib_net_to_host_u16 (ETHERNET_TYPE_DOT1AD);
+ in_config->macs_tags.priority_dei_id =
+ clib_net_to_host_u16 (b_vlanid & 0xFFF);
+ in_config->macs_tags.i_type =
+ clib_net_to_host_u16 (ETHERNET_TYPE_DOT1AH);
+ in_config->macs_tags.priority_dei_uca_res_sid =
+ clib_net_to_host_u32 (i_sid & 0xFFFFF);
+ in_config->push_bytes = sizeof (ethernet_pbb_header_packed_t);
+ }
+ else if (vtr_op == L2_VTR_TRANSLATE_2_2)
+ {
+ /* TODO after PoC */
+ }
+
+ /*
+ * Construct the output tag-rewrite config
+ *
+ * The push/pop values are always reversed
+ */
+ out_config->raw_data = in_config->raw_data;
+ out_config->pop_bytes = in_config->push_bytes;
+ out_config->push_bytes = in_config->pop_bytes;
+
+done:
+ l2input_intf_bitmap_enable (sw_if_index, L2INPUT_FEAT_VTR, enable);
+ if (config)
+ config->out_vtr_flag = (u8) enable;
+
+ /* output vtr enable is checked explicitly in l2_output */
+ return error;
+}
+
+/**
+ * Configure vtag tag rewrite on the given interface.
+ * Return 1 if there is an error, 0 if ok
+ */
+u32
+l2vtr_configure (vlib_main_t * vlib_main, vnet_main_t * vnet_main, u32 sw_if_index, u32 vtr_op, u32 push_dot1q, /* ethertype of first pushed tag is dot1q/dot1ad */
+ u32 vtr_tag1, /* first pushed tag */
+ u32 vtr_tag2) /* second pushed tag */
+{
+ vnet_hw_interface_t *hi;
+ vnet_sw_interface_t *si;
+ u32 hw_no_tags;
+ u32 error = 0;
+ l2_output_config_t *config;
+ vtr_config_t *in_config;
+ vtr_config_t *out_config;
+ u32 enable;
+ u32 push_inner_et;
+ u32 push_outer_et;
+ u32 cfg_tags;
+
+ hi = vnet_get_sup_hw_interface (vnet_main, sw_if_index);
+ if (!hi || (hi->hw_class_index != ethernet_hw_interface_class.index))
+ {
+ error = VNET_API_ERROR_INVALID_INTERFACE; /* non-ethernet interface */
+ goto done;
+ }
+
+ /* Init the config for this interface */
+ vec_validate (l2output_main.configs, sw_if_index);
+ config = vec_elt_at_index (l2output_main.configs, sw_if_index);
+ in_config = &(config->input_vtr);
+ out_config = &(config->output_vtr);
+ in_config->raw_tags = 0;
+ out_config->raw_tags = 0;
+
+ /* Get the configured tags for the interface */
+ si = vnet_get_sw_interface (vnet_main, sw_if_index);
+ hw_no_tags = (si->type == VNET_SW_INTERFACE_TYPE_HARDWARE);
+
+ /* Construct the input tag-rewrite config */
+
+ push_outer_et =
+ clib_net_to_host_u16 (push_dot1q ? ETHERNET_TYPE_VLAN :
+ ETHERNET_TYPE_DOT1AD);
+ push_inner_et = clib_net_to_host_u16 (ETHERNET_TYPE_VLAN);
+ vtr_tag1 = clib_net_to_host_u16 (vtr_tag1);
+ vtr_tag2 = clib_net_to_host_u16 (vtr_tag2);
+
+ /* Determine number of vlan tags with explictly configured values */
+ cfg_tags = 0;
+ if (hw_no_tags || si->sub.eth.flags.no_tags)
+ {
+ cfg_tags = 0;
+ }
+ else if (si->sub.eth.flags.one_tag)
+ {
+ cfg_tags = 1;
+ if (si->sub.eth.flags.outer_vlan_id_any)
+ {
+ cfg_tags = 0;
+ }
+ }
+ else if (si->sub.eth.flags.two_tags)
+ {
+ cfg_tags = 2;
+ if (si->sub.eth.flags.inner_vlan_id_any)
+ {
+ cfg_tags = 1;
+ }
+ if (si->sub.eth.flags.outer_vlan_id_any)
+ {
+ cfg_tags = 0;
+ }
+ }
+
+ switch (vtr_op)
+ {
+ case L2_VTR_DISABLED:
+ in_config->push_and_pop_bytes = 0;
+ break;
+
+ case L2_VTR_POP_1:
+ if (cfg_tags < 1)
+ {
+ /* Need one or two tags */
+ error = VNET_API_ERROR_INVALID_VLAN_TAG_COUNT;
+ goto done;
+ }
+ in_config->pop_bytes = 4;
+ in_config->push_bytes = 0;
+ break;
+
+ case L2_VTR_POP_2:
+ if (cfg_tags < 2)
+ {
+ error = VNET_API_ERROR_INVALID_VLAN_TAG_COUNT; /* Need two tags */
+ goto done;
+ }
+ in_config->pop_bytes = 8;
+ in_config->push_bytes = 0;
+
+ out_config->push_bytes = in_config->pop_bytes;
+ out_config->pop_bytes = in_config->push_bytes;
+ break;
+
+ case L2_VTR_PUSH_1:
+ in_config->pop_bytes = 0;
+ in_config->push_bytes = 4;
+ in_config->tags[1].priority_cfi_and_id = vtr_tag1;
+ in_config->tags[1].type = push_outer_et;
+ break;
+
+ case L2_VTR_PUSH_2:
+ in_config->pop_bytes = 0;
+ in_config->push_bytes = 8;
+ in_config->tags[0].priority_cfi_and_id = vtr_tag1;
+ in_config->tags[0].type = push_outer_et;
+ in_config->tags[1].priority_cfi_and_id = vtr_tag2;
+ in_config->tags[1].type = push_inner_et;
+ break;
+
+ case L2_VTR_TRANSLATE_1_1:
+ if (cfg_tags < 1)
+ {
+ error = VNET_API_ERROR_INVALID_VLAN_TAG_COUNT; /* Need one or two tags */
+ goto done;
+ }
+ in_config->pop_bytes = 4;
+ in_config->push_bytes = 4;
+ in_config->tags[1].priority_cfi_and_id = vtr_tag1;
+ in_config->tags[1].type = push_outer_et;
+ break;
+
+ case L2_VTR_TRANSLATE_1_2:
+ if (cfg_tags < 1)
+ {
+ error = VNET_API_ERROR_INVALID_VLAN_TAG_COUNT; /* Need one or two tags */
+ goto done;
+ }
+ in_config->pop_bytes = 4;
+ in_config->push_bytes = 8;
+ in_config->tags[0].priority_cfi_and_id = vtr_tag1;
+ in_config->tags[0].type = push_outer_et;
+ in_config->tags[1].priority_cfi_and_id = vtr_tag2;
+ in_config->tags[1].type = push_inner_et;
+ break;
+
+ case L2_VTR_TRANSLATE_2_1:
+ if (cfg_tags < 2)
+ {
+ error = VNET_API_ERROR_INVALID_VLAN_TAG_COUNT; /* Need two tags */
+ goto done;
+ }
+ in_config->pop_bytes = 8;
+ in_config->push_bytes = 4;
+ in_config->tags[1].priority_cfi_and_id = vtr_tag1;
+ in_config->tags[1].type = push_outer_et;
+ break;
+
+ case L2_VTR_TRANSLATE_2_2:
+ if (cfg_tags < 2)
+ {
+ error = VNET_API_ERROR_INVALID_VLAN_TAG_COUNT; /* Need two tags */
+ goto done;
+ }
+ in_config->pop_bytes = 8;
+ in_config->push_bytes = 8;
+ in_config->tags[0].priority_cfi_and_id = vtr_tag1;
+ in_config->tags[0].type = push_outer_et;
+ in_config->tags[1].priority_cfi_and_id = vtr_tag2;
+ in_config->tags[1].type = push_inner_et;
+ break;
+ }
+
+ /*
+ * Construct the output tag-rewrite config
+ *
+ * The push/pop values are always reversed
+ */
+ out_config->push_bytes = in_config->pop_bytes;
+ out_config->pop_bytes = in_config->push_bytes;
+
+ /* Any pushed tags are derived from the subinterface config */
+ push_outer_et =
+ clib_net_to_host_u16 (si->sub.eth.flags.dot1ad ? ETHERNET_TYPE_DOT1AD :
+ ETHERNET_TYPE_VLAN);
+ push_inner_et = clib_net_to_host_u16 (ETHERNET_TYPE_VLAN);
+ vtr_tag1 = clib_net_to_host_u16 (si->sub.eth.outer_vlan_id);
+ vtr_tag2 = clib_net_to_host_u16 (si->sub.eth.inner_vlan_id);
+
+ if (out_config->push_bytes == 4)
+ {
+ out_config->tags[1].priority_cfi_and_id = vtr_tag1;
+ out_config->tags[1].type = push_outer_et;
+ }
+ else if (out_config->push_bytes == 8)
+ {
+ out_config->tags[0].priority_cfi_and_id = vtr_tag1;
+ out_config->tags[0].type = push_outer_et;
+ out_config->tags[1].priority_cfi_and_id = vtr_tag2;
+ out_config->tags[1].type = push_inner_et;
+ }
+
+ /* set the interface enable flags */
+ enable = (vtr_op != L2_VTR_DISABLED);
+ config->out_vtr_flag = (u8) enable;
+ l2input_intf_bitmap_enable (sw_if_index, L2INPUT_FEAT_VTR, enable);
+ /* output vtr enable is checked explicitly in l2_output */
+
+done:
+ return error;
+}
+
+/**
+ * Get vtag tag rewrite on the given interface.
+ * Return 1 if there is an error, 0 if ok
+ */
+u32
+l2vtr_get (vlib_main_t * vlib_main, vnet_main_t * vnet_main, u32 sw_if_index, u32 * vtr_op, u32 * push_dot1q, /* ethertype of first pushed tag is dot1q/dot1ad */
+ u32 * vtr_tag1, /* first pushed tag */
+ u32 * vtr_tag2) /* second pushed tag */
+{
+ vnet_hw_interface_t *hi;
+ u32 error = 0;
+ vtr_config_t *in_config;
+
+ if (!vtr_op || !push_dot1q || !vtr_tag1 || !vtr_tag2)
+ {
+ clib_warning ("invalid arguments");
+ error = VNET_API_ERROR_INVALID_ARGUMENT;
+ goto done;
+ }
+
+ *vtr_op = L2_VTR_DISABLED;
+ *vtr_tag1 = 0;
+ *vtr_tag2 = 0;
+ *push_dot1q = 0;
+
+ hi = vnet_get_sup_hw_interface (vnet_main, sw_if_index);
+ if (!hi || (hi->hw_class_index != ethernet_hw_interface_class.index))
+ {
+ /* non-ethernet interface */
+ goto done;
+ }
+
+ if (sw_if_index >= vec_len (l2output_main.configs))
+ {
+ /* no specific config (return disabled) */
+ goto done;
+ }
+
+ /* Get the config for this interface */
+ in_config =
+ &(vec_elt_at_index (l2output_main.configs, sw_if_index)->input_vtr);
+
+ /* DISABLED */
+ if (in_config->push_and_pop_bytes == 0)
+ {
+ goto done;
+ }
+
+ /* find out vtr_op */
+ switch (in_config->pop_bytes)
+ {
+ case 0:
+ switch (in_config->push_bytes)
+ {
+ case 0:
+ /* DISABLED */
+ goto done;
+ case 4:
+ *vtr_op = L2_VTR_PUSH_1;
+ *vtr_tag1 =
+ clib_host_to_net_u16 (in_config->tags[1].priority_cfi_and_id);
+ *push_dot1q =
+ (ETHERNET_TYPE_VLAN ==
+ clib_host_to_net_u16 (in_config->tags[1].type));
+ break;
+ case 8:
+ *vtr_op = L2_VTR_PUSH_2;
+ *vtr_tag1 =
+ clib_host_to_net_u16 (in_config->tags[0].priority_cfi_and_id);
+ *vtr_tag2 =
+ clib_host_to_net_u16 (in_config->tags[1].priority_cfi_and_id);
+ *push_dot1q =
+ (ETHERNET_TYPE_VLAN ==
+ clib_host_to_net_u16 (in_config->tags[0].type));
+ break;
+ default:
+ clib_warning ("invalid push_bytes count: %d",
+ in_config->push_bytes);
+ error = VNET_API_ERROR_UNEXPECTED_INTF_STATE;
+ goto done;
+ }
+ break;
+
+ case 4:
+ switch (in_config->push_bytes)
+ {
+ case 0:
+ *vtr_op = L2_VTR_POP_1;
+ break;
+ case 4:
+ *vtr_op = L2_VTR_TRANSLATE_1_1;
+ *vtr_tag1 =
+ clib_host_to_net_u16 (in_config->tags[1].priority_cfi_and_id);
+ *push_dot1q =
+ (ETHERNET_TYPE_VLAN ==
+ clib_host_to_net_u16 (in_config->tags[1].type));
+ break;
+ case 8:
+ *vtr_op = L2_VTR_TRANSLATE_1_2;
+ *vtr_tag1 =
+ clib_host_to_net_u16 (in_config->tags[0].priority_cfi_and_id);
+ *vtr_tag2 =
+ clib_host_to_net_u16 (in_config->tags[1].priority_cfi_and_id);
+ *push_dot1q =
+ (ETHERNET_TYPE_VLAN ==
+ clib_host_to_net_u16 (in_config->tags[0].type));
+ break;
+ default:
+ clib_warning ("invalid push_bytes count: %d",
+ in_config->push_bytes);
+ error = VNET_API_ERROR_UNEXPECTED_INTF_STATE;
+ goto done;
+ }
+ break;
+
+ case 8:
+ switch (in_config->push_bytes)
+ {
+ case 0:
+ *vtr_op = L2_VTR_POP_2;
+ break;
+ case 4:
+ *vtr_op = L2_VTR_TRANSLATE_2_1;
+ *vtr_tag1 =
+ clib_host_to_net_u16 (in_config->tags[1].priority_cfi_and_id);
+ *push_dot1q =
+ (ETHERNET_TYPE_VLAN ==
+ clib_host_to_net_u16 (in_config->tags[1].type));
+ break;
+ case 8:
+ *vtr_op = L2_VTR_TRANSLATE_2_2;
+ *vtr_tag1 =
+ clib_host_to_net_u16 (in_config->tags[0].priority_cfi_and_id);
+ *vtr_tag2 =
+ clib_host_to_net_u16 (in_config->tags[1].priority_cfi_and_id);
+ *push_dot1q =
+ (ETHERNET_TYPE_VLAN ==
+ clib_host_to_net_u16 (in_config->tags[0].type));
+ break;
+ default:
+ clib_warning ("invalid push_bytes count: %d",
+ in_config->push_bytes);
+ error = VNET_API_ERROR_UNEXPECTED_INTF_STATE;
+ goto done;
+ }
+ break;
+
+ default:
+ clib_warning ("invalid pop_bytes count: %d", in_config->pop_bytes);
+ error = VNET_API_ERROR_UNEXPECTED_INTF_STATE;
+ goto done;
+ }
+
+done:
+ return error;
+}
+
+/**
+ * Set subinterface vtr enable/disable.
+ * The CLI format is:
+ * set interface l2 tag-rewrite <interface> [disable | pop 1 | pop 2 | push {dot1q|dot1ad} <tag> [<tag>]]
+ *
+ * "push" can also be replaced by "translate-{1|2}-{1|2}"
+ */
+static clib_error_t *
+int_l2_vtr (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ vnet_main_t *vnm = vnet_get_main ();
+ clib_error_t *error = 0;
+ u32 sw_if_index;
+ u32 vtr_op;
+ u32 push_dot1q = 0;
+ u32 tag1 = 0, tag2 = 0;
+
+ if (!unformat_user (input, unformat_vnet_sw_interface, vnm, &sw_if_index))
+ {
+ error = clib_error_return (0, "unknown interface `%U'",
+ format_unformat_error, input);
+ goto done;
+ }
+
+ vtr_op = L2_VTR_DISABLED;
+
+ if (unformat (input, "disable"))
+ {
+ vtr_op = L2_VTR_DISABLED;
+ }
+ else if (unformat (input, "pop 1"))
+ {
+ vtr_op = L2_VTR_POP_1;
+ }
+ else if (unformat (input, "pop 2"))
+ {
+ vtr_op = L2_VTR_POP_2;
+
+ }
+ else if (unformat (input, "push dot1q %d %d", &tag1, &tag2))
+ {
+ vtr_op = L2_VTR_PUSH_2;
+ push_dot1q = 1;
+ }
+ else if (unformat (input, "push dot1ad %d %d", &tag1, &tag2))
+ {
+ vtr_op = L2_VTR_PUSH_2;
+
+ }
+ else if (unformat (input, "push dot1q %d", &tag1))
+ {
+ vtr_op = L2_VTR_PUSH_1;
+ push_dot1q = 1;
+ }
+ else if (unformat (input, "push dot1ad %d", &tag1))
+ {
+ vtr_op = L2_VTR_PUSH_1;
+
+ }
+ else if (unformat (input, "translate 1-1 dot1q %d", &tag1))
+ {
+ vtr_op = L2_VTR_TRANSLATE_1_1;
+ push_dot1q = 1;
+ }
+ else if (unformat (input, "translate 1-1 dot1ad %d", &tag1))
+ {
+ vtr_op = L2_VTR_TRANSLATE_1_1;
+
+ }
+ else if (unformat (input, "translate 2-1 dot1q %d", &tag1))
+ {
+ vtr_op = L2_VTR_TRANSLATE_2_1;
+ push_dot1q = 1;
+ }
+ else if (unformat (input, "translate 2-1 dot1ad %d", &tag1))
+ {
+ vtr_op = L2_VTR_TRANSLATE_2_1;
+
+ }
+ else if (unformat (input, "translate 2-2 dot1q %d %d", &tag1, &tag2))
+ {
+ vtr_op = L2_VTR_TRANSLATE_2_2;
+ push_dot1q = 1;
+ }
+ else if (unformat (input, "translate 2-2 dot1ad %d %d", &tag1, &tag2))
+ {
+ vtr_op = L2_VTR_TRANSLATE_2_2;
+
+ }
+ else if (unformat (input, "translate 1-2 dot1q %d %d", &tag1, &tag2))
+ {
+ vtr_op = L2_VTR_TRANSLATE_1_2;
+ push_dot1q = 1;
+ }
+ else if (unformat (input, "translate 1-2 dot1ad %d %d", &tag1, &tag2))
+ {
+ vtr_op = L2_VTR_TRANSLATE_1_2;
+
+ }
+ else
+ {
+ error =
+ clib_error_return (0,
+ "expecting [disable | pop 1 | pop 2 | push {dot1q|dot1ah} <tag> [<tag>]\n"
+ " | translate {1|2}-{1|2} {dot1q|dot1ah} <tag> [<tag>]] but got `%U'",
+ format_unformat_error, input);
+ goto done;
+ }
+
+ if (l2vtr_configure (vm, vnm, sw_if_index, vtr_op, push_dot1q, tag1, tag2))
+ {
+ error =
+ clib_error_return (0,
+ "vlan tag rewrite is not compatible with interface");
+ goto done;
+ }
+
+done:
+ return error;
+}
+
+/*?
+ * VLAN tag rewrite provides the ability to change the VLAN tags on a packet.
+ * Existing tags can be popped, new tags can be pushed, and existing tags can
+ * be swapped with new tags. The rewrite feature is attached to a subinterface
+ * as input and output operations. The input operation is explicitly configured.
+ * The output operation is the symmetric opposite and is automatically derived
+ * from the input operation.
+ *
+ * <b>POP:</b> For pop operations, the subinterface encapsulation (the vlan
+ * tags specified when it was created) must have at least the number of popped
+ * tags. e.g. the \"pop 2\" operation would be rejected on a single-vlan interface.
+ * The output tag-rewrite operation for pops is to push the specified number of
+ * vlan tags onto the packet. The pushed tag values are the ones in the
+ * subinterface encapsulation.
+ *
+ * <b>PUSH:</b> For push operations, the ethertype is also specified. The
+ * output tag-rewrite operation for pushes is to pop the same number of tags
+ * off the packet. If the packet doesn't have enough tags it is dropped.
+ *
+ *
+ * @cliexpar
+ * @parblock
+ * By default a subinterface has no tag-rewrite. To return a subinterface to
+ * this state use:
+ * @cliexcmd{set interface l2 tag-rewrite GigabitEthernet0/8/0.200 disable}
+ *
+ * To pop vlan tags off packets received from a subinterface, use:
+ * @cliexcmd{set interface l2 tag-rewrite GigabitEthernet0/8/0.200 pop 1}
+ * @cliexcmd{set interface l2 tag-rewrite GigabitEthernet0/8/0.200 pop 2}
+ *
+ * To push one or two vlan tags onto packets received from an interface, use:
+ * @cliexcmd{set interface l2 tag-rewrite GigabitEthernet0/8/0.200 push dot1q 100}
+ * @cliexcmd{set interface l2 tag-rewrite GigabitEthernet0/8/0.200 push dot1ad 100 150}
+ *
+ * Tags can also be translated, which is basically a combination of a pop and push.
+ * @cliexcmd{set interface l2 tag-rewrite GigabitEthernet0/8/0.200 translate 1-1 dot1ad 100}
+ * @cliexcmd{set interface l2 tag-rewrite GigabitEthernet0/8/0.200 translate 2-2 dot1ad 100 150}
+ * @cliexcmd{set interface l2 tag-rewrite GigabitEthernet0/8/0.200 translate 1-2 dot1q 100}
+ * @cliexcmd{set interface l2 tag-rewrite GigabitEthernet0/8/0.200 translate 2-1 dot1q 100 150}
+ *
+ * To display the VLAN Tag settings, show the associate bridge-domain:
+ * @cliexstart{show bridge-domain 200 detail}
+ * ID Index Learning U-Forwrd UU-Flood Flooding ARP-Term BVI-Intf
+ * 200 1 on on on on off N/A
+ *
+ * Interface Index SHG BVI VLAN-Tag-Rewrite
+ * GigabitEthernet0/8/0.200 5 0 - trans-1-1 dot1ad 100
+ * GigabitEthernet0/9/0.200 4 0 - none
+ * GigabitEthernet0/a/0.200 6 0 - none
+ * @cliexend
+ * @endparblock
+?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (int_l2_vtr_cli, static) = {
+ .path = "set interface l2 tag-rewrite",
+ .short_help = "set interface l2 tag-rewrite <interface> [disable | pop {1|2} | push {dot1q|dot1ad} <tag> <tag>]",
+ .function = int_l2_vtr,
+};
+/* *INDENT-ON* */
+
+/**
+ * Set subinterface pbb vtr enable/disable.
+ * The CLI format is:
+ * set interface l2 pbb-tag-rewrite <interface> [disable | pop | push | translate_pbb_stag <outer_tag> dmac <address> smac <address> s_id <nn> [b_vlanid <nn>]]
+ */
+static clib_error_t *
+int_l2_pbb_vtr (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ vnet_main_t *vnm = vnet_get_main ();
+ clib_error_t *error = 0;
+ u32 sw_if_index, tmp;
+ u32 vtr_op = L2_VTR_DISABLED;
+ u32 outer_tag = 0;
+ u8 dmac[6];
+ u8 smac[6];
+ u8 dmac_set = 0, smac_set = 0;
+ u16 b_vlanid = 0;
+ u32 s_id = ~0;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat_user
+ (input, unformat_vnet_sw_interface, vnm, &sw_if_index))
+ ;
+ else if (unformat (input, "disable"))
+ vtr_op = L2_VTR_DISABLED;
+ else if (vtr_op == L2_VTR_DISABLED && unformat (input, "pop"))
+ vtr_op = L2_VTR_POP_2;
+ else if (vtr_op == L2_VTR_DISABLED && unformat (input, "push"))
+ vtr_op = L2_VTR_PUSH_2;
+ else if (vtr_op == L2_VTR_DISABLED
+ && unformat (input, "translate_pbb_stag %d", &outer_tag))
+ vtr_op = L2_VTR_TRANSLATE_2_1;
+ else if (unformat (input, "dmac %U", unformat_ethernet_address, dmac))
+ dmac_set = 1;
+ else if (unformat (input, "smac %U", unformat_ethernet_address, smac))
+ smac_set = 1;
+ else if (unformat (input, "b_vlanid %d", &tmp))
+ b_vlanid = tmp;
+ else if (unformat (input, "s_id %d", &s_id))
+ ;
+ else
+ {
+ error = clib_error_return (0,
+ "expecting [disable | pop | push | translate_pbb_stag <outer_tag>\n"
+ "dmac <address> smac <address> s_id <nn> [b_vlanid <nn>]]");
+ goto done;
+ }
+ }
+
+ if ((vtr_op == L2_VTR_PUSH_2 || vtr_op == L2_VTR_TRANSLATE_2_1)
+ && (!dmac_set || !smac_set || s_id == ~0))
+ {
+ error = clib_error_return (0,
+ "expecting dmac <address> smac <address> s_id <nn> [b_vlanid <nn>]");
+ goto done;
+ }
+
+ if (l2pbb_configure
+ (vm, vnm, sw_if_index, vtr_op, dmac, smac, b_vlanid, s_id, outer_tag))
+ {
+ error =
+ clib_error_return (0,
+ "pbb tag rewrite is not compatible with interface");
+ goto done;
+ }
+
+done:
+ return error;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (int_l2_pbb_vtr_cli, static) = {
+ .path = "set interface l2 pbb-tag-rewrite",
+ .short_help = "set interface l2 pbb-tag-rewrite <interface> [disable | pop | push | translate_pbb_stag <outer_tag> dmac <address> smac <address> s_id <nn> [b_vlanid <nn>]]",
+ .function = int_l2_pbb_vtr,
+};
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/l2/l2_vtr.h b/src/vnet/l2/l2_vtr.h
new file mode 100644
index 00000000000..893b2272b04
--- /dev/null
+++ b/src/vnet/l2/l2_vtr.h
@@ -0,0 +1,270 @@
+/*
+ * l2_vtr.h : layer 2 vlan tag rewrite processing
+ *
+ * Copyright (c) 2013 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef included_vnet_l2_vtr_h
+#define included_vnet_l2_vtr_h
+
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+#include <vnet/ethernet/packet.h>
+#include <vnet/l2/l2_vtr.h>
+
+/* VTR config options for API and CLI support */
+typedef enum
+{
+ L2_VTR_DISABLED,
+ L2_VTR_PUSH_1,
+ L2_VTR_PUSH_2,
+ L2_VTR_POP_1,
+ L2_VTR_POP_2,
+ L2_VTR_TRANSLATE_1_1,
+ L2_VTR_TRANSLATE_1_2,
+ L2_VTR_TRANSLATE_2_1,
+ L2_VTR_TRANSLATE_2_2
+} l2_vtr_op_t;
+
+/**
+ * Per-interface vlan tag rewrite configuration
+ * There will be one instance of this struct for each sw_if_index
+ * for both input vtr and output vtr
+ */
+typedef struct
+{
+ union
+ {
+ /*
+ * Up to two vlan tags to push.
+ * if there is only one vlan tag to push, it is in tags[1].
+ */
+ ethernet_vlan_header_tv_t tags[2];
+ u64 raw_tags;
+ };
+
+ union
+ {
+ struct
+ {
+ u8 push_bytes; /* number of bytes to push for up to 2 vlans (0,4,8) */
+ u8 pop_bytes; /* number of bytes to pop for up to 2 vlans (0,4,8) */
+ };
+ u16 push_and_pop_bytes; /* if 0 then the feature is disabled */
+ };
+} vtr_config_t;
+
+
+/**
+ * Perform the configured tag rewrite on the packet.
+ * Return 0 if ok, 1 if packet should be dropped (e.g. tried to pop
+ * too many tags)
+ */
+always_inline u32
+l2_vtr_process (vlib_buffer_t * b0, vtr_config_t * config)
+{
+ u64 temp_8;
+ u32 temp_4;
+ u8 *eth;
+
+ eth = vlib_buffer_get_current (b0);
+
+ /* copy the 12B dmac and smac to a temporary location */
+ temp_8 = *((u64 *) eth);
+ temp_4 = *((u32 *) (eth + 8));
+
+ /* adjust for popped tags */
+ eth += config->pop_bytes;
+
+ /* if not enough tags to pop then drop packet */
+ if (PREDICT_FALSE ((vnet_buffer (b0)->l2.l2_len - 12) < config->pop_bytes))
+ {
+ return 1;
+ }
+
+ /* copy the 2 new tags to the start of the packet */
+ *((u64 *) (eth + 12 - 8)) = config->raw_tags;
+
+ /* TODO: set cos bits */
+
+ /* adjust for pushed tags: */
+ eth -= config->push_bytes;
+
+ /* copy the 12 dmac and smac back to the packet */
+ *((u64 *) eth) = temp_8;
+ *((u32 *) (eth + 8)) = temp_4;
+
+ /* Update l2_len */
+ vnet_buffer (b0)->l2.l2_len +=
+ (word) config->push_bytes - (word) config->pop_bytes;
+
+ /* Update vlan tag count */
+ ethernet_buffer_adjust_vlan_count_by_bytes (b0,
+ (word) config->push_bytes -
+ (word) config->pop_bytes);
+
+ /* Update packet len */
+ vlib_buffer_advance (b0,
+ (word) config->pop_bytes - (word) config->push_bytes);
+
+ return 0;
+}
+
+
+/*
+ * Perform the egress pre-vlan tag rewrite EFP Filter check.
+ * The post-vlan tag rewrite check is a separate graph node.
+ *
+ * This check insures that a packet being output to an interface
+ * (before output vtr is performed) has vlan tags that match those
+ * on a packet received from that interface (after vtr has been performed).
+ * This means verifying that any tags pushed by input vtr are present
+ * on the packet.
+ *
+ * Return 0 if ok, 1 if packet should be dropped.
+ * This function should be passed the input vtr config for the interface.
+ */
+always_inline u8
+l2_efp_filter_process (vlib_buffer_t * b0, vtr_config_t * in_config)
+{
+ u8 *eth;
+ u64 packet_tags;
+ u64 tag_mask;
+
+ eth = vlib_buffer_get_current (b0);
+
+ /*
+ * If there are 2 tags pushed, they must match config->tags[0] and
+ * config->tags[1].
+ * If there is one tag pushed, it must match config->tag[1].
+ * If there are 0 tags pushed, the check passes.
+ */
+
+ /* mask for two vlan id and ethertypes, no cos bits */
+ tag_mask = clib_net_to_host_u64 (0xFFFF0FFFFFFF0FFF);
+ /* mask for one vlan id and ethertype, no cos bits */
+ tag_mask =
+ (in_config->push_bytes ==
+ 4) ? clib_net_to_host_u64 (0xFFFF0FFF) : tag_mask;
+ /* mask for always match */
+ tag_mask = (in_config->push_bytes == 0) ? 0 : tag_mask;
+
+ /*
+ * Read 8B from the packet, getting the proper set of vlan tags
+ * For 0 push bytes, the address doesn't matter since the mask
+ * clears the data to 0.
+ */
+ packet_tags = *((u64 *) (eth + 4 + in_config->push_bytes));
+
+ /* Check if the packet tags match the configured tags */
+ return (packet_tags & tag_mask) != in_config->raw_tags;
+}
+
+typedef struct
+{
+ union
+ {
+ ethernet_pbb_header_t macs_tags;
+ struct
+ {
+ u64 data1;
+ u64 data2;
+ u16 data3;
+ u32 data4;
+ } raw_data;
+ };
+ union
+ {
+ struct
+ {
+ u8 push_bytes; /* number of bytes to push pbb tags */
+ u8 pop_bytes; /* number of bytes to pop pbb tags */
+ };
+ u16 push_and_pop_bytes; /* if 0 then the feature is disabled */
+ };
+} ptr_config_t;
+
+always_inline u32
+l2_pbb_process (vlib_buffer_t * b0, ptr_config_t * config)
+{
+ u8 *eth = vlib_buffer_get_current (b0);
+
+ if (config->pop_bytes > 0)
+ {
+ ethernet_pbb_header_packed_t *ph = (ethernet_pbb_header_packed_t *) eth;
+
+ // drop packet without PBB header or with wrong I-tag or B-tag
+ if (clib_net_to_host_u16 (ph->priority_dei_id) !=
+ clib_net_to_host_u16 (config->macs_tags.priority_dei_id)
+ || clib_net_to_host_u32 (ph->priority_dei_uca_res_sid) !=
+ clib_net_to_host_u32 (config->macs_tags.priority_dei_uca_res_sid))
+ return 1;
+
+ eth += config->pop_bytes;
+ }
+
+ if (config->push_bytes > 0)
+ {
+ eth -= config->push_bytes;
+ // copy the B-DA (6B), B-SA (6B), B-TAG (4B), I-TAG (6B)
+ *((u64 *) eth) = config->raw_data.data1;
+ *((u64 *) (eth + 8)) = config->raw_data.data2;
+ *((u16 *) (eth + 16)) = config->raw_data.data3;
+ *((u32 *) (eth + 18)) = config->raw_data.data4;
+ }
+
+ /* Update l2_len */
+ vnet_buffer (b0)->l2.l2_len +=
+ (word) config->push_bytes - (word) config->pop_bytes;
+ /* Update packet len */
+ vlib_buffer_advance (b0,
+ (word) config->pop_bytes - (word) config->push_bytes);
+
+ return 0;
+}
+
+u32 l2pbb_configure (vlib_main_t * vlib_main,
+ vnet_main_t * vnet_main, u32 sw_if_index, u32 vtr_op,
+ u8 * b_dmac, u8 * b_smac,
+ u16 b_vlanid, u32 i_sid, u16 vlan_outer_tag);
+
+/**
+ * Configure vtag tag rewrite on the given interface.
+ * Return 1 if there is an error, 0 if ok
+ */
+u32 l2vtr_configure (vlib_main_t * vlib_main,
+ vnet_main_t * vnet_main,
+ u32 sw_if_index,
+ u32 vtr_op, u32 push_dot1q, u32 vtr_tag1, u32 vtr_tag2);
+
+/**
+ * Get vtag tag rewrite on the given interface.
+ * Return 1 if there is an error, 0 if ok
+ */
+u32 l2vtr_get (vlib_main_t * vlib_main,
+ vnet_main_t * vnet_main,
+ u32 sw_if_index,
+ u32 * vtr_op,
+ u32 * push_dot1q, u32 * vtr_tag1, u32 * vtr_tag2);
+
+#endif /* included_vnet_l2_vtr_h */
+
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/l2/l2_xcrw.c b/src/vnet/l2/l2_xcrw.c
new file mode 100644
index 00000000000..70610a853d3
--- /dev/null
+++ b/src/vnet/l2/l2_xcrw.c
@@ -0,0 +1,591 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <vnet/l2/l2_xcrw.h>
+
+/**
+ * @file
+ * General L2 / L3 cross-connect, used to set up
+ * "L2 interface <--> your-favorite-tunnel-encap" tunnels.
+ *
+ * We set up a typical L2 cross-connect or (future) bridge
+ * to hook L2 interface(s) up to the L3 stack in arbitrary ways.
+ *
+ * Each l2_xcrw adjacency specifies 3 things:
+ *
+ * 1. The next graph node (presumably in the L3 stack) to
+ * process the (L2 -> L3) packet
+ *
+ * 2. A new value for vnet_buffer(b)->sw_if_index[VLIB_TX]
+ * (i.e. a lookup FIB index),
+ *
+ * 3. A rewrite string to apply.
+ *
+ * Example: to cross-connect an L2 interface or (future) bridge
+ * to an mpls-o-gre tunnel, set up the L2 rewrite string as shown in
+ * mpls_gre_rewrite, and use "mpls-post-rewrite" to fix the
+ * GRE IP header checksum and length fields.
+ */
+
+typedef struct
+{
+ u32 next_index;
+ u32 tx_fib_index;
+} l2_xcrw_trace_t;
+
+/* packet trace format function */
+static u8 *
+format_l2_xcrw_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ l2_xcrw_trace_t *t = va_arg (*args, l2_xcrw_trace_t *);
+
+ s = format (s, "L2_XCRW: next index %d tx_fib_index %d",
+ t->next_index, t->tx_fib_index);
+ return s;
+}
+
+l2_xcrw_main_t l2_xcrw_main;
+
+static vlib_node_registration_t l2_xcrw_node;
+
+static char *l2_xcrw_error_strings[] = {
+#define _(sym,string) string,
+ foreach_l2_xcrw_error
+#undef _
+};
+
+static uword
+l2_xcrw_node_fn (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ u32 n_left_from, *from, *to_next;
+ l2_xcrw_next_t next_index;
+ l2_xcrw_main_t *xcm = &l2_xcrw_main;
+ vlib_node_t *n = vlib_get_node (vm, l2_xcrw_node.index);
+ u32 node_counter_base_index = n->error_heap_index;
+ vlib_error_main_t *em = &vm->error_main;
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ u32 bi0, bi1;
+ vlib_buffer_t *b0, *b1;
+ u32 next0, next1;
+ u32 sw_if_index0, sw_if_index1;
+ l2_xcrw_adjacency_t *adj0, *adj1;
+
+ /* Prefetch next iteration. */
+ {
+ vlib_buffer_t *p2, *p3;
+
+ p2 = vlib_get_buffer (vm, from[2]);
+ p3 = vlib_get_buffer (vm, from[3]);
+
+ vlib_prefetch_buffer_header (p2, LOAD);
+ vlib_prefetch_buffer_header (p3, LOAD);
+
+ CLIB_PREFETCH (p2->data, CLIB_CACHE_LINE_BYTES, STORE);
+ CLIB_PREFETCH (p3->data, CLIB_CACHE_LINE_BYTES, STORE);
+ }
+
+ /* speculatively enqueue b0 and b1 to the current next frame */
+ to_next[0] = bi0 = from[0];
+ to_next[1] = bi1 = from[1];
+ from += 2;
+ to_next += 2;
+ n_left_from -= 2;
+ n_left_to_next -= 2;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ b1 = vlib_get_buffer (vm, bi1);
+
+ sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+ sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_RX];
+
+ adj0 = vec_elt_at_index (xcm->adj_by_sw_if_index, sw_if_index0);
+ adj1 = vec_elt_at_index (xcm->adj_by_sw_if_index, sw_if_index1);
+
+ next0 = adj0->rewrite_header.next_index;
+ vnet_buffer (b0)->sw_if_index[VLIB_TX] =
+ adj0->rewrite_header.sw_if_index;
+
+ next1 = adj1->rewrite_header.next_index;
+ vnet_buffer (b1)->sw_if_index[VLIB_TX] =
+ adj1->rewrite_header.sw_if_index;
+
+ em->counters[node_counter_base_index + next1]++;
+
+ if (PREDICT_TRUE (next0 > 0))
+ {
+ u8 *h0 = vlib_buffer_get_current (b0);
+ vnet_rewrite_one_header (adj0[0], h0,
+ adj0->rewrite_header.data_bytes);
+ vlib_buffer_advance (b0, -adj0->rewrite_header.data_bytes);
+ em->counters[node_counter_base_index + L2_XCRW_ERROR_FWD]++;
+ }
+
+ if (PREDICT_TRUE (next1 > 0))
+ {
+ u8 *h1 = vlib_buffer_get_current (b1);
+ vnet_rewrite_one_header (adj1[0], h1,
+ adj1->rewrite_header.data_bytes);
+ vlib_buffer_advance (b1, -adj1->rewrite_header.data_bytes);
+ em->counters[node_counter_base_index + L2_XCRW_ERROR_FWD]++;
+ }
+
+
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)))
+ {
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)
+ && (b0->flags & VLIB_BUFFER_IS_TRACED)))
+ {
+ l2_xcrw_trace_t *t =
+ vlib_add_trace (vm, node, b0, sizeof (*t));
+ t->next_index = next0;
+ t->tx_fib_index = adj0->rewrite_header.sw_if_index;
+ }
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)
+ && (b1->flags & VLIB_BUFFER_IS_TRACED)))
+ {
+ l2_xcrw_trace_t *t =
+ vlib_add_trace (vm, node, b1, sizeof (*t));
+ t->next_index = next1;
+ t->tx_fib_index = adj1->rewrite_header.sw_if_index;
+ }
+ }
+
+ /* verify speculative enqueues, maybe switch current next frame */
+ vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, bi1, next0, next1);
+ }
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 bi0;
+ vlib_buffer_t *b0;
+ u32 next0;
+ u32 sw_if_index0;
+ l2_xcrw_adjacency_t *adj0;
+
+ /* speculatively enqueue b0 to the current next frame */
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+
+ sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+
+ adj0 = vec_elt_at_index (xcm->adj_by_sw_if_index, sw_if_index0);
+
+ next0 = adj0->rewrite_header.next_index;
+ vnet_buffer (b0)->sw_if_index[VLIB_TX] =
+ adj0->rewrite_header.sw_if_index;
+
+ if (PREDICT_TRUE (next0 > 0))
+ {
+ u8 *h0 = vlib_buffer_get_current (b0);
+ vnet_rewrite_one_header (adj0[0], h0,
+ adj0->rewrite_header.data_bytes);
+ vlib_buffer_advance (b0, -adj0->rewrite_header.data_bytes);
+ em->counters[node_counter_base_index + L2_XCRW_ERROR_FWD]++;
+ }
+
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)
+ && (b0->flags & VLIB_BUFFER_IS_TRACED)))
+ {
+ l2_xcrw_trace_t *t = vlib_add_trace (vm, node, b0, sizeof (*t));
+ t->next_index = next0;
+ t->tx_fib_index = adj0->rewrite_header.sw_if_index;
+ }
+
+ /* verify speculative enqueue, maybe switch current next frame */
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, next0);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ return frame->n_vectors;
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (l2_xcrw_node, static) = {
+ .function = l2_xcrw_node_fn,
+ .name = "l2-xcrw",
+ .vector_size = sizeof (u32),
+ .format_trace = format_l2_xcrw_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_errors = ARRAY_LEN(l2_xcrw_error_strings),
+ .error_strings = l2_xcrw_error_strings,
+
+ .n_next_nodes = L2_XCRW_N_NEXT,
+
+ /* edit / add dispositions here */
+ .next_nodes = {
+ [L2_XCRW_NEXT_DROP] = "error-drop",
+ },
+};
+/* *INDENT-ON* */
+
+VLIB_NODE_FUNCTION_MULTIARCH (l2_xcrw_node, l2_xcrw_node_fn)
+ clib_error_t *l2_xcrw_init (vlib_main_t * vm)
+{
+ l2_xcrw_main_t *mp = &l2_xcrw_main;
+
+ mp->vlib_main = vm;
+ mp->vnet_main = &vnet_main;
+ mp->tunnel_index_by_l2_sw_if_index = hash_create (0, sizeof (uword));
+
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (l2_xcrw_init);
+
+static uword
+dummy_interface_tx (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ clib_warning ("you shouldn't be here, leaking buffers...");
+ return frame->n_vectors;
+}
+
+static u8 *
+format_xcrw_name (u8 * s, va_list * args)
+{
+ u32 dev_instance = va_arg (*args, u32);
+ return format (s, "xcrw%d", dev_instance);
+}
+
+/* *INDENT-OFF* */
+VNET_DEVICE_CLASS (xcrw_device_class,static) = {
+ .name = "Xcrw",
+ .format_device_name = format_xcrw_name,
+ .tx_function = dummy_interface_tx,
+};
+/* *INDENT-ON* */
+
+/* Create a sham tunnel interface and return its sw_if_index */
+static u32
+create_xcrw_interface (vlib_main_t * vm)
+{
+ vnet_main_t *vnm = vnet_get_main ();
+ static u32 instance;
+ u8 address[6];
+ u32 hw_if_index;
+ vnet_hw_interface_t *hi;
+ u32 sw_if_index;
+
+ /* mac address doesn't really matter */
+ memset (address, 0, sizeof (address));
+ address[2] = 0x12;
+
+ /* can returns error iff phy != 0 */
+ (void) ethernet_register_interface
+ (vnm, xcrw_device_class.index, instance++, address, &hw_if_index,
+ /* flag change */ 0);
+
+ hi = vnet_get_hw_interface (vnm, hw_if_index);
+ sw_if_index = hi->sw_if_index;
+ vnet_sw_interface_set_flags (vnm, sw_if_index,
+ VNET_SW_INTERFACE_FLAG_ADMIN_UP);
+
+ /* Output to the sham tunnel invokes the encap node */
+ hi->output_node_index = l2_xcrw_node.index;
+
+ return sw_if_index;
+}
+
+int
+vnet_configure_l2_xcrw (vlib_main_t * vm, vnet_main_t * vnm,
+ u32 l2_sw_if_index, u32 tx_fib_index,
+ u8 * rewrite, u32 next_node_index, int is_add)
+{
+ l2_xcrw_main_t *xcm = &l2_xcrw_main;
+ l2_xcrw_adjacency_t *a;
+ l2_xcrw_tunnel_t *t;
+ uword *p;
+
+ if (is_add)
+ {
+
+ pool_get (xcm->tunnels, t);
+
+ /* No interface allocated? Do it. Otherwise, set admin up */
+ if (t->tunnel_sw_if_index == 0)
+ t->tunnel_sw_if_index = create_xcrw_interface (vm);
+ else
+ vnet_sw_interface_set_flags (vnm, t->tunnel_sw_if_index,
+ VNET_SW_INTERFACE_FLAG_ADMIN_UP);
+
+ t->l2_sw_if_index = l2_sw_if_index;
+
+ vec_validate (xcm->adj_by_sw_if_index, t->l2_sw_if_index);
+
+ a = vec_elt_at_index (xcm->adj_by_sw_if_index, t->l2_sw_if_index);
+ memset (a, 0, sizeof (*a));
+
+ a->rewrite_header.sw_if_index = tx_fib_index;
+
+ /*
+ * Add or find a dynamic disposition for the successor node,
+ * e.g. so we can ship pkts to mpls_post_rewrite...
+ */
+ a->rewrite_header.next_index =
+ vlib_node_add_next (vm, l2_xcrw_node.index, next_node_index);
+
+ if (vec_len (rewrite))
+ vnet_rewrite_set_data (a[0], rewrite, vec_len (rewrite));
+
+ set_int_l2_mode (vm, vnm, MODE_L2_XC, t->l2_sw_if_index, 0, 0, 0,
+ t->tunnel_sw_if_index);
+ hash_set (xcm->tunnel_index_by_l2_sw_if_index,
+ t->l2_sw_if_index, t - xcm->tunnels);
+ return 0;
+ }
+ else
+ {
+ p = hash_get (xcm->tunnel_index_by_l2_sw_if_index, l2_sw_if_index);
+ if (p == 0)
+ return VNET_API_ERROR_INVALID_SW_IF_INDEX;
+
+ t = pool_elt_at_index (xcm->tunnels, p[0]);
+
+ a = vec_elt_at_index (xcm->adj_by_sw_if_index, t->l2_sw_if_index);
+ /* Reset adj to drop traffic */
+ memset (a, 0, sizeof (*a));
+
+ set_int_l2_mode (vm, vnm, MODE_L3, t->l2_sw_if_index, 0, 0, 0, 0);
+
+ vnet_sw_interface_set_flags (vnm, t->tunnel_sw_if_index, 0 /* down */ );
+
+ hash_unset (xcm->tunnel_index_by_l2_sw_if_index, l2_sw_if_index);
+ pool_put (xcm->tunnels, t);
+ }
+ return 0;
+}
+
+
+static clib_error_t *
+set_l2_xcrw_command_fn (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ unformat_input_t _line_input, *line_input = &_line_input;
+ int is_add = 1;
+ int is_ipv6 = 0; /* for fib id -> fib index mapping */
+ u32 tx_fib_id = ~0;
+ u32 tx_fib_index = ~0;
+ u32 next_node_index = ~0;
+ u32 l2_sw_if_index;
+ u8 *rw = 0;
+ vnet_main_t *vnm = vnet_get_main ();
+ int rv;
+
+
+ if (!unformat_user (input, unformat_line_input, line_input))
+ return 0;
+
+ if (!unformat (line_input, "%U",
+ unformat_vnet_sw_interface, vnm, &l2_sw_if_index))
+ return clib_error_return (0, "unknown input '%U'",
+ format_unformat_error, line_input);
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "next %U",
+ unformat_vlib_node, vm, &next_node_index))
+ ;
+ else if (unformat (line_input, "tx-fib-id %d", &tx_fib_id))
+ ;
+ else if (unformat (line_input, "del"))
+ is_add = 0;
+ else if (unformat (line_input, "ipv6"))
+ is_ipv6 = 1;
+ else if (unformat (line_input, "rw %U", unformat_hex_string, &rw));
+ else
+ break;
+ }
+
+ if (next_node_index == ~0)
+ return clib_error_return (0, "next node not specified");
+
+ if (tx_fib_id != ~0)
+ {
+ uword *p;
+
+ if (is_ipv6)
+ p = hash_get (ip6_main.fib_index_by_table_id, tx_fib_id);
+ else
+ p = hash_get (ip4_main.fib_index_by_table_id, tx_fib_id);
+
+ if (p == 0)
+ return clib_error_return (0, "nonexistent tx_fib_id %d", tx_fib_id);
+
+ tx_fib_index = p[0];
+ }
+
+ rv = vnet_configure_l2_xcrw (vm, vnm, l2_sw_if_index, tx_fib_index,
+ rw, next_node_index, is_add);
+
+ switch (rv)
+ {
+
+ case 0:
+ break;
+
+ case VNET_API_ERROR_INVALID_SW_IF_INDEX:
+ return clib_error_return (0, "%U not cross-connected",
+ format_vnet_sw_if_index_name,
+ vnm, l2_sw_if_index);
+ default:
+ return clib_error_return (0, "vnet_configure_l2_xcrw returned %d", rv);
+ }
+
+ vec_free (rw);
+
+ return 0;
+}
+
+/*?
+ * Add or delete a Layer 2 to Layer 3 rewrite cross-connect. This is
+ * used to hook Layer 2 interface(s) up to the Layer 3 stack in
+ * arbitrary ways. For example, cross-connect an L2 interface or
+ * (future) bridge to an mpls-o-gre tunnel. Set up the L2 rewrite
+ * string as shown in mpls_gre_rewrite, and use \"mpls-post-rewrite\"
+ * to fix the GRE IP header checksum and length fields.
+ *
+ * @cliexpar
+ * @todo This is incomplete. This needs a detailed description and a
+ * practical example.
+?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (set_l2_xcrw_command, static) = {
+ .path = "set interface l2 xcrw",
+ .short_help =
+ "set interface l2 xcrw <interface> next <node-name>\n"
+ " [del] [tx-fib-id <id>] [ipv6] rw <hex-bytes>",
+ .function = set_l2_xcrw_command_fn,
+};
+/* *INDENT-ON* */
+
+static u8 *
+format_l2xcrw (u8 * s, va_list * args)
+{
+ vnet_main_t *vnm = va_arg (*args, vnet_main_t *);
+ l2_xcrw_tunnel_t *t = va_arg (*args, l2_xcrw_tunnel_t *);
+ l2_xcrw_main_t *xcm = &l2_xcrw_main;
+ vlib_main_t *vm = vlib_get_main ();
+ l2_xcrw_adjacency_t *a;
+ u8 *rewrite_string;
+
+ if (t == 0)
+ {
+ s = format (s, "%-25s%s", "L2 interface", "Tunnel Details");
+ return s;
+ }
+
+ s = format (s, "%-25U %U ",
+ format_vnet_sw_if_index_name, vnm, t->l2_sw_if_index,
+ format_vnet_sw_if_index_name, vnm, t->tunnel_sw_if_index);
+
+ a = vec_elt_at_index (xcm->adj_by_sw_if_index, t->l2_sw_if_index);
+
+ s = format (s, "next %U ",
+ format_vlib_next_node_name, vm, l2_xcrw_node.index,
+ a->rewrite_header.next_index);
+
+ if (a->rewrite_header.sw_if_index != ~0)
+ s = format (s, "tx fib index %d ", a->rewrite_header.sw_if_index);
+
+ if (a->rewrite_header.data_bytes)
+ {
+ rewrite_string = (u8 *) (a + 1);
+ rewrite_string -= a->rewrite_header.data_bytes;
+ s = format (s, "rewrite data: %U ",
+ format_hex_bytes, rewrite_string,
+ a->rewrite_header.data_bytes);
+ }
+
+ s = format (s, "\n");
+
+ return s;
+}
+
+
+static clib_error_t *
+show_l2xcrw_command_fn (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ vnet_main_t *vnm = vnet_get_main ();
+ l2_xcrw_main_t *xcm = &l2_xcrw_main;
+ l2_xcrw_tunnel_t *t;
+
+ if (pool_elts (xcm->tunnels) == 0)
+ {
+ vlib_cli_output (vm, "No L2 / L3 rewrite cross-connects configured");
+ return 0;
+ }
+
+ vlib_cli_output (vm, "%U", format_l2xcrw, 0, 0);
+
+ /* *INDENT-OFF* */
+ pool_foreach (t, xcm->tunnels,
+ ({
+ vlib_cli_output (vm, "%U", format_l2xcrw, vnm, t);
+ }));
+ /* *INDENT-ON* */
+
+ return 0;
+}
+
+/*?
+ * Display a Layer 2 to Layer 3 rewrite cross-connect. This is used to
+ * hook Layer 2 interface(s) up to the Layer 3 stack in arbitrary ways.
+ *
+ * @todo This is incomplete. This needs a detailed description and a
+ * practical example.
+?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (show_l2xcrw_command, static) = {
+ .path = "show l2xcrw",
+ .short_help = "show l2xcrw",
+ .function = show_l2xcrw_command_fn,
+};
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/l2/l2_xcrw.h b/src/vnet/l2/l2_xcrw.h
new file mode 100644
index 00000000000..ca80aae9929
--- /dev/null
+++ b/src/vnet/l2/l2_xcrw.h
@@ -0,0 +1,91 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef __included_l2_xcrw_h__
+#define __included_l2_xcrw_h__
+
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+#include <vnet/pg/pg.h>
+#include <vppinfra/error.h>
+#include <vnet/ip/ip.h>
+#include <vnet/l2/l2_input.h>
+#include <vnet/l2/l2_output.h>
+#include <vnet/api_errno.h>
+#include <vnet/ethernet/ethernet.h>
+
+typedef struct
+{
+ /*
+ * Let: rewrite_header.sw_if_index = tx_fib_index or ~0.
+ * rewrite_header.next_index = L2_XCRW_NEXT_XXX
+ */
+ vnet_declare_rewrite (VLIB_BUFFER_PRE_DATA_SIZE);
+} l2_xcrw_adjacency_t;
+
+typedef struct
+{
+ /* L2 interface */
+ u32 l2_sw_if_index;
+
+ /* Tunnel interface */
+ u32 tunnel_sw_if_index; /* This field remains set in freed pool elts */
+
+} l2_xcrw_tunnel_t;
+
+typedef struct
+{
+ u32 cached_next_index;
+
+ /* Vector of cross-connect rewrites */
+ l2_xcrw_adjacency_t *adj_by_sw_if_index;
+
+ /* Pool of xcrw tunnels */
+ l2_xcrw_tunnel_t *tunnels;
+
+ /* Tunnel index by tunnel sw_if_index */
+ uword *tunnel_index_by_l2_sw_if_index;
+
+ /* convenience variables */
+ vlib_main_t *vlib_main;
+ vnet_main_t *vnet_main;
+} l2_xcrw_main_t;
+
+typedef enum
+{
+ L2_XCRW_NEXT_DROP,
+ L2_XCRW_N_NEXT,
+} l2_xcrw_next_t;
+
+#define foreach_l2_xcrw_error \
+_(DROP, "Packets dropped") \
+_(FWD, "Packets forwarded")
+
+typedef enum
+{
+#define _(sym,str) L2_XCRW_ERROR_##sym,
+ foreach_l2_xcrw_error
+#undef _
+ L2_XCRW_N_ERROR,
+} l2_xcrw_error_t;
+
+#endif /* __included_l2_xcrw_h__ */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/l2tp/decap.c b/src/vnet/l2tp/decap.c
new file mode 100644
index 00000000000..e8986935e93
--- /dev/null
+++ b/src/vnet/l2tp/decap.c
@@ -0,0 +1,309 @@
+/*
+ * decap.c : L2TPv3 tunnel decapsulation
+ *
+ * Copyright (c) 2013 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vppinfra/error.h>
+#include <vppinfra/hash.h>
+#include <vnet/vnet.h>
+#include <vnet/ip/ip.h>
+#include <vnet/ethernet/ethernet.h>
+#include <vnet/l2tp/l2tp.h>
+
+/* Statistics (not really errors) */
+#define foreach_l2t_decap_error \
+_(USER_TO_NETWORK, "L2TP user (ip6) to L2 network pkts") \
+_(SESSION_ID_MISMATCH, "l2tpv3 local session id mismatches") \
+_(COOKIE_MISMATCH, "l2tpv3 local cookie mismatches") \
+_(NO_SESSION, "l2tpv3 session not found") \
+_(ADMIN_DOWN, "l2tpv3 tunnel is down")
+
+static char *l2t_decap_error_strings[] = {
+#define _(sym,string) string,
+ foreach_l2t_decap_error
+#undef _
+};
+
+typedef enum
+{
+#define _(sym,str) L2T_DECAP_ERROR_##sym,
+ foreach_l2t_decap_error
+#undef _
+ L2T_DECAP_N_ERROR,
+} l2t_DECAP_error_t;
+
+typedef enum
+{
+ L2T_DECAP_NEXT_DROP,
+ L2T_DECAP_NEXT_L2_INPUT,
+ L2T_DECAP_N_NEXT,
+ /* Pseudo next index */
+ L2T_DECAP_NEXT_NO_INTERCEPT = L2T_DECAP_N_NEXT,
+} l2t_decap_next_t;
+
+#define NSTAGES 3
+
+static inline void
+stage0 (vlib_main_t * vm, vlib_node_runtime_t * node, u32 buffer_index)
+{
+ vlib_buffer_t *b = vlib_get_buffer (vm, buffer_index);
+ vlib_prefetch_buffer_header (b, STORE);
+ /* l2tpv3 header is a long way away, need 2 cache lines */
+ CLIB_PREFETCH (b->data, 2 * CLIB_CACHE_LINE_BYTES, STORE);
+}
+
+static inline void
+stage1 (vlib_main_t * vm, vlib_node_runtime_t * node, u32 bi)
+{
+ vlib_buffer_t *b = vlib_get_buffer (vm, bi);
+ l2t_main_t *lm = &l2t_main;
+ ip6_header_t *ip6 = vlib_buffer_get_current (b);
+ u32 session_index;
+ uword *p = 0;
+ l2tpv3_header_t *l2t;
+
+ /* Not L2tpv3 (0x73, 0t115)? Use the normal path. */
+ if (PREDICT_FALSE (ip6->protocol != IP_PROTOCOL_L2TP))
+ {
+ vnet_buffer (b)->l2t.next_index = L2T_DECAP_NEXT_NO_INTERCEPT;
+ return;
+ }
+
+ /* Make up your minds, people... */
+ switch (lm->lookup_type)
+ {
+ case L2T_LOOKUP_SRC_ADDRESS:
+ p = hash_get_mem (lm->session_by_src_address, &ip6->src_address);
+ break;
+ case L2T_LOOKUP_DST_ADDRESS:
+ p = hash_get_mem (lm->session_by_dst_address, &ip6->dst_address);
+ break;
+ case L2T_LOOKUP_SESSION_ID:
+ l2t = (l2tpv3_header_t *) (ip6 + 1);
+ p = hash_get (lm->session_by_session_id, l2t->session_id);
+ break;
+ default:
+ ASSERT (0);
+ }
+
+ if (PREDICT_FALSE (p == 0))
+ {
+ vnet_buffer (b)->l2t.next_index = L2T_DECAP_NEXT_NO_INTERCEPT;
+ return;
+ }
+ else
+ {
+ session_index = p[0];
+ }
+
+ /* Remember mapping index, prefetch the mini counter */
+ vnet_buffer (b)->l2t.next_index = L2T_DECAP_NEXT_L2_INPUT;
+ vnet_buffer (b)->l2t.session_index = session_index;
+
+ /* $$$$$ prefetch counter */
+}
+
+static inline u32
+last_stage (vlib_main_t * vm, vlib_node_runtime_t * node, u32 bi)
+{
+ vlib_buffer_t *b = vlib_get_buffer (vm, bi);
+ l2t_main_t *lm = &l2t_main;
+ ip6_header_t *ip6 = vlib_buffer_get_current (b);
+ vlib_node_t *n = vlib_get_node (vm, node->node_index);
+ u32 node_counter_base_index = n->error_heap_index;
+ vlib_error_main_t *em = &vm->error_main;
+ l2tpv3_header_t *l2tp;
+ u32 counter_index;
+ l2t_session_t *session = 0;
+ u32 session_index;
+ u32 next_index;
+ u8 l2tp_decap_local = (l2t_decap_local_node.index == n->index);
+
+ /* Other-than-output pkt? We're done... */
+ if (vnet_buffer (b)->l2t.next_index != L2T_DECAP_NEXT_L2_INPUT)
+ {
+ next_index = vnet_buffer (b)->l2t.next_index;
+ goto done;
+ }
+
+ em->counters[node_counter_base_index + L2T_DECAP_ERROR_USER_TO_NETWORK] +=
+ 1;
+
+ session_index = vnet_buffer (b)->l2t.session_index;
+
+ counter_index =
+ session_index_to_counter_index (session_index,
+ SESSION_COUNTER_USER_TO_NETWORK);
+
+ /* per-mapping byte stats include the ethernet header */
+ vlib_increment_combined_counter (&lm->counter_main,
+ os_get_cpu_number (),
+ counter_index, 1 /* packet_increment */ ,
+ vlib_buffer_length_in_chain (vm, b) +
+ sizeof (ethernet_header_t));
+
+ session = pool_elt_at_index (lm->sessions, session_index);
+
+ l2tp = vlib_buffer_get_current (b) + sizeof (*ip6);
+
+ if (PREDICT_FALSE (l2tp->session_id != session->local_session_id))
+ {
+ /* Key matched but session id does not. Assume packet is not for us. */
+ em->counters[node_counter_base_index +
+ L2T_DECAP_ERROR_SESSION_ID_MISMATCH] += 1;
+ next_index = L2T_DECAP_NEXT_NO_INTERCEPT;
+ goto done;
+ }
+
+ if (PREDICT_FALSE (l2tp->cookie != session->local_cookie[0]))
+ {
+ if (l2tp->cookie != session->local_cookie[1])
+ {
+ /* Key and session ID matched, but cookie doesn't. Drop this packet. */
+ b->error = node->errors[L2T_DECAP_ERROR_COOKIE_MISMATCH];
+ next_index = L2T_DECAP_NEXT_DROP;
+ goto done;
+ }
+ }
+
+ vnet_buffer (b)->sw_if_index[VLIB_RX] = session->sw_if_index;
+
+ if (PREDICT_FALSE (!(session->admin_up)))
+ {
+ b->error = node->errors[L2T_DECAP_ERROR_ADMIN_DOWN];
+ next_index = L2T_DECAP_NEXT_DROP;
+ goto done;
+ }
+
+ /* strip the ip6 and L2TP header */
+ vlib_buffer_advance (b, sizeof (*ip6) + session->l2tp_hdr_size);
+
+ /* Required to make the l2 tag push / pop code work on l2 subifs */
+ vnet_update_l2_len (b);
+
+ if (PREDICT_FALSE (b->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ l2t_trace_t *t = vlib_add_trace (vm, node, b, sizeof (*t));
+ t->is_user_to_network = 1;
+ t->our_address.as_u64[0] = ip6->dst_address.as_u64[0];
+ t->our_address.as_u64[1] = ip6->dst_address.as_u64[1];
+ t->client_address.as_u64[0] = ip6->src_address.as_u64[0];
+ t->client_address.as_u64[1] = ip6->src_address.as_u64[1];
+ t->session_index = session_index;
+ }
+
+ return L2T_DECAP_NEXT_L2_INPUT;
+
+done:
+ if (next_index == L2T_DECAP_NEXT_NO_INTERCEPT)
+ {
+ /* Small behavioral change between l2tp-decap and l2tp-decap-local */
+ if (l2tp_decap_local)
+ {
+ b->error = node->errors[L2T_DECAP_ERROR_NO_SESSION];
+ next_index = L2T_DECAP_NEXT_DROP;
+ }
+ else
+ {
+ /* Go to next node on the ip6 configuration chain */
+ if (PREDICT_TRUE (session != 0))
+ vnet_feature_next (session->sw_if_index, &next_index, b);
+ }
+ }
+
+ if (PREDICT_FALSE (b->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ l2t_trace_t *t = vlib_add_trace (vm, node, b, sizeof (*t));
+ t->is_user_to_network = 1;
+ t->our_address.as_u64[0] = ip6->dst_address.as_u64[0];
+ t->our_address.as_u64[1] = ip6->dst_address.as_u64[1];
+ t->client_address.as_u64[0] = ip6->src_address.as_u64[0];
+ t->client_address.as_u64[1] = ip6->src_address.as_u64[1];
+ t->session_index = ~0;
+ }
+ return next_index;
+}
+
+#include <vnet/pipeline.h>
+
+static uword
+l2t_decap_node_fn (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ return dispatch_pipeline (vm, node, frame);
+}
+
+/*
+ * l2tp-decap and l2tp-decap-local have very slightly different behavior.
+ * When a packet has no associated session l2tp-decap let it go to ip6 forward,
+ * while l2tp-decap-local drops it.
+ */
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (l2t_decap_node) = {
+ .function = l2t_decap_node_fn,
+ .name = "l2tp-decap",
+ .vector_size = sizeof (u32),
+ .format_trace = format_l2t_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_errors = ARRAY_LEN(l2t_decap_error_strings),
+ .error_strings = l2t_decap_error_strings,
+
+ .n_next_nodes = L2T_DECAP_N_NEXT,
+
+ /* edit / add dispositions here */
+ .next_nodes = {
+ [L2T_DECAP_NEXT_L2_INPUT] = "l2-input",
+ [L2T_DECAP_NEXT_DROP] = "error-drop",
+ },
+};
+/* *INDENT-ON* */
+
+VLIB_NODE_FUNCTION_MULTIARCH (l2t_decap_node, l2t_decap_node_fn);
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (l2t_decap_local_node) = {
+ .function = l2t_decap_node_fn,
+ .name = "l2tp-decap-local",
+ .vector_size = sizeof (u32),
+ .format_trace = format_l2t_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_errors = ARRAY_LEN(l2t_decap_error_strings),
+ .error_strings = l2t_decap_error_strings,
+
+ .n_next_nodes = L2T_DECAP_N_NEXT,
+
+ /* edit / add dispositions here */
+ .next_nodes = {
+ [L2T_DECAP_NEXT_L2_INPUT] = "l2-input",
+ [L2T_DECAP_NEXT_DROP] = "error-drop",
+ },
+};
+/* *INDENT-ON* */
+
+void
+l2tp_decap_init (void)
+{
+ ip6_register_protocol (IP_PROTOCOL_L2TP, l2t_decap_local_node.index);
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/l2tp/encap.c b/src/vnet/l2tp/encap.c
new file mode 100644
index 00000000000..ed7a9580de1
--- /dev/null
+++ b/src/vnet/l2tp/encap.c
@@ -0,0 +1,238 @@
+/*
+ * encap.c : L2TPv3 tunnel encapsulation
+ *
+ * Copyright (c) 2013 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vppinfra/error.h>
+#include <vppinfra/hash.h>
+#include <vnet/vnet.h>
+#include <vnet/ip/ip.h>
+#include <vnet/ethernet/ethernet.h>
+#include <vnet/l2tp/l2tp.h>
+
+/* Statistics (not really errors) */
+#define foreach_l2t_encap_error \
+_(NETWORK_TO_USER, "L2TP L2 network to user (ip6) pkts") \
+_(LOOKUP_FAIL_TO_L3, "L2TP L2 session lookup failed pkts") \
+_(ADMIN_DOWN, "L2TP tunnel is down")
+
+static char *l2t_encap_error_strings[] = {
+#define _(sym,string) string,
+ foreach_l2t_encap_error
+#undef _
+};
+
+typedef enum
+{
+#define _(sym,str) L2T_ENCAP_ERROR_##sym,
+ foreach_l2t_encap_error
+#undef _
+ L2T_ENCAP_N_ERROR,
+} l2t_encap_error_t;
+
+
+typedef enum
+{
+ L2T_ENCAP_NEXT_DROP,
+ L2T_ENCAP_NEXT_IP6_LOOKUP,
+ L2T_ENCAP_N_NEXT,
+} l2t_encap_next_t;
+
+typedef struct
+{
+ u32 cached_session_index;
+ u32 cached_sw_if_index;
+ vnet_main_t *vnet_main;
+} l2tp_encap_runtime_t;
+
+vlib_node_registration_t l2t_encap_node;
+
+#define NSTAGES 3
+
+static inline void
+stage0 (vlib_main_t * vm, vlib_node_runtime_t * node, u32 buffer_index)
+{
+ vlib_buffer_t *b = vlib_get_buffer (vm, buffer_index);
+ vlib_prefetch_buffer_header (b, STORE);
+ CLIB_PREFETCH (b->data, 2 * CLIB_CACHE_LINE_BYTES, STORE);
+}
+
+static inline void
+stage1 (vlib_main_t * vm, vlib_node_runtime_t * node, u32 bi)
+{
+ l2tp_encap_runtime_t *rt = (void *) node->runtime_data;
+ vlib_buffer_t *b = vlib_get_buffer (vm, bi);
+ vnet_hw_interface_t *hi;
+
+ u32 sw_if_index = vnet_buffer (b)->sw_if_index[VLIB_TX];
+ u32 session_index = rt->cached_session_index;
+
+ if (PREDICT_FALSE (rt->cached_sw_if_index != sw_if_index))
+ {
+ hi = vnet_get_sup_hw_interface (rt->vnet_main, sw_if_index);
+ session_index = rt->cached_session_index = hi->dev_instance;
+ rt->cached_sw_if_index = sw_if_index;
+ }
+
+ /* Remember mapping index, prefetch the mini counter */
+ vnet_buffer (b)->l2t.next_index = L2T_ENCAP_NEXT_IP6_LOOKUP;
+ vnet_buffer (b)->l2t.session_index = session_index;
+
+ /* $$$$ prefetch counter... */
+}
+
+static inline u32
+last_stage (vlib_main_t * vm, vlib_node_runtime_t * node, u32 bi)
+{
+ vlib_buffer_t *b = vlib_get_buffer (vm, bi);
+ l2t_main_t *lm = &l2t_main;
+ vlib_node_t *n = vlib_get_node (vm, l2t_encap_node.index);
+ u32 node_counter_base_index = n->error_heap_index;
+ vlib_error_main_t *em = &vm->error_main;
+ l2tpv3_header_t *l2tp;
+ u32 session_index;
+ u32 counter_index;
+ l2t_session_t *s;
+ ip6_header_t *ip6;
+ u16 payload_length;
+ u32 next_index = L2T_ENCAP_NEXT_IP6_LOOKUP;
+
+ /* Other-than-output pkt? We're done... */
+ if (vnet_buffer (b)->l2t.next_index != L2T_ENCAP_NEXT_IP6_LOOKUP)
+ return vnet_buffer (b)->l2t.next_index;
+
+ em->counters[node_counter_base_index + L2T_ENCAP_ERROR_NETWORK_TO_USER] +=
+ 1;
+
+ session_index = vnet_buffer (b)->l2t.session_index;
+
+ counter_index =
+ session_index_to_counter_index (session_index,
+ SESSION_COUNTER_NETWORK_TO_USER);
+
+ /* per-mapping byte stats include the ethernet header */
+ vlib_increment_combined_counter (&lm->counter_main,
+ os_get_cpu_number (),
+ counter_index, 1 /* packet_increment */ ,
+ vlib_buffer_length_in_chain (vm, b));
+
+ s = pool_elt_at_index (lm->sessions, session_index);
+
+ vnet_buffer (b)->sw_if_index[VLIB_TX] = s->encap_fib_index;
+
+ /* Paint on an l2tpv3 hdr */
+ vlib_buffer_advance (b, -(s->l2tp_hdr_size));
+ l2tp = vlib_buffer_get_current (b);
+
+ l2tp->session_id = s->remote_session_id;
+ l2tp->cookie = s->remote_cookie;
+ if (PREDICT_FALSE (s->l2_sublayer_present))
+ {
+ l2tp->l2_specific_sublayer = 0;
+ }
+
+ /* Paint on an ip6 header */
+ vlib_buffer_advance (b, -(sizeof (*ip6)));
+ ip6 = vlib_buffer_get_current (b);
+
+ if (PREDICT_FALSE (!(s->admin_up)))
+ {
+ b->error = node->errors[L2T_ENCAP_ERROR_ADMIN_DOWN];
+ next_index = L2T_ENCAP_NEXT_DROP;
+ goto done;
+ }
+
+ ip6->ip_version_traffic_class_and_flow_label =
+ clib_host_to_net_u32 (0x6 << 28);
+
+ /* calculate ip6 payload length */
+ payload_length = vlib_buffer_length_in_chain (vm, b);
+ payload_length -= sizeof (*ip6);
+
+ ip6->payload_length = clib_host_to_net_u16 (payload_length);
+ ip6->protocol = IP_PROTOCOL_L2TP;
+ ip6->hop_limit = 0xff;
+ ip6->src_address.as_u64[0] = s->our_address.as_u64[0];
+ ip6->src_address.as_u64[1] = s->our_address.as_u64[1];
+ ip6->dst_address.as_u64[0] = s->client_address.as_u64[0];
+ ip6->dst_address.as_u64[1] = s->client_address.as_u64[1];
+
+
+done:
+ if (PREDICT_FALSE (b->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ l2t_trace_t *t = vlib_add_trace (vm, node, b, sizeof (*t));
+ t->is_user_to_network = 0;
+ t->our_address.as_u64[0] = ip6->src_address.as_u64[0];
+ t->our_address.as_u64[1] = ip6->src_address.as_u64[1];
+ t->client_address.as_u64[0] = ip6->dst_address.as_u64[0];
+ t->client_address.as_u64[1] = ip6->dst_address.as_u64[1];
+ t->session_index = session_index;
+ }
+
+ return next_index;
+}
+
+#include <vnet/pipeline.h>
+
+uword
+l2t_encap_node_fn (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ return dispatch_pipeline (vm, node, frame);
+}
+
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (l2t_encap_node) = {
+ .function = l2t_encap_node_fn,
+ .name = "l2tp-encap",
+ .vector_size = sizeof (u32),
+ .format_trace = format_l2t_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+ .runtime_data_bytes = sizeof (l2tp_encap_runtime_t),
+
+ .n_errors = ARRAY_LEN(l2t_encap_error_strings),
+ .error_strings = l2t_encap_error_strings,
+
+ .n_next_nodes = L2T_ENCAP_N_NEXT,
+
+ /* add dispositions here */
+ .next_nodes = {
+ [L2T_ENCAP_NEXT_IP6_LOOKUP] = "ip6-lookup",
+ [L2T_ENCAP_NEXT_DROP] = "error-drop",
+ },
+};
+/* *INDENT-ON* */
+
+VLIB_NODE_FUNCTION_MULTIARCH (l2t_encap_node, l2t_encap_node_fn);
+void
+l2tp_encap_init (vlib_main_t * vm)
+{
+ l2tp_encap_runtime_t *rt;
+
+ rt = vlib_node_get_runtime_data (vm, l2t_encap_node.index);
+ rt->vnet_main = vnet_get_main ();
+ rt->cached_sw_if_index = (u32) ~ 0;
+ rt->cached_session_index = (u32) ~ 0;
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/l2tp/l2tp.api b/src/vnet/l2tp/l2tp.api
new file mode 100644
index 00000000000..5a5a5a48a68
--- /dev/null
+++ b/src/vnet/l2tp/l2tp.api
@@ -0,0 +1,126 @@
+/*
+ * Copyright (c) 2015-2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/** \brief l2tpv3 tunnel interface create request
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param client_address - remote client tunnel ip address
+ @param client_address - local tunnel ip address
+ @param is_ipv6 - ipv6 if non-zero, else ipv4
+ @param local_session_id - local tunnel session id
+ @param remote_session_id - remote tunnel session id
+ @param local_cookie - local tunnel cookie
+ @param l2_sublayer_present - l2 sublayer is present in packets if non-zero
+ @param encap_vrf_id - fib identifier used for outgoing encapsulated packets
+*/
+define l2tpv3_create_tunnel
+{
+ u32 client_index;
+ u32 context;
+ u8 client_address[16];
+ u8 our_address[16];
+ u8 is_ipv6;
+ u32 local_session_id;
+ u32 remote_session_id;
+ u64 local_cookie;
+ u64 remote_cookie;
+ u8 l2_sublayer_present;
+ u32 encap_vrf_id;
+};
+
+/** \brief l2tpv3 tunnel interface create response
+ @param context - sender context, to match reply w/ request
+ @param retval - return code for the request
+ @param sw_if_index - index of the new tunnel interface
+*/
+define l2tpv3_create_tunnel_reply
+{
+ u32 context;
+ i32 retval;
+ u32 sw_if_index;
+};
+
+define l2tpv3_set_tunnel_cookies
+{
+ u32 client_index;
+ u32 context;
+ u32 sw_if_index;
+ u64 new_local_cookie;
+ u64 new_remote_cookie;
+};
+
+/** \brief L2TP tunnel set cookies response
+ @param context - sender context, to match reply w/ request
+ @param retval - return code for the request
+*/
+define l2tpv3_set_tunnel_cookies_reply
+{
+ u32 context;
+ i32 retval;
+};
+
+define sw_if_l2tpv3_tunnel_details
+{
+ u32 context;
+ u32 sw_if_index;
+ u8 interface_name[64];
+ u8 client_address[16];
+ u8 our_address[16];
+ u32 local_session_id;
+ u32 remote_session_id;
+ u64 local_cookie[2];
+ u64 remote_cookie;
+ u8 l2_sublayer_present;
+};
+
+define sw_if_l2tpv3_tunnel_dump
+{
+ u32 client_index;
+ u32 context;
+};
+
+define l2tpv3_interface_enable_disable
+{
+ u32 client_index;
+ u32 context;
+ u8 enable_disable;
+ u32 sw_if_index;
+};
+
+define l2tpv3_interface_enable_disable_reply
+{
+ u32 context;
+ i32 retval;
+};
+
+define l2tpv3_set_lookup_key
+{
+ u32 client_index;
+ u32 context;
+ /* 0 = ip6 src_address, 1 = ip6 dst_address, 2 = session_id */
+ u8 key;
+};
+
+define l2tpv3_set_lookup_key_reply
+{
+ u32 context;
+ i32 retval;
+};
+
+/*
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/l2tp/l2tp.c b/src/vnet/l2tp/l2tp.c
new file mode 100644
index 00000000000..a4531dab85c
--- /dev/null
+++ b/src/vnet/l2tp/l2tp.c
@@ -0,0 +1,739 @@
+/*
+ * l2tp.c : L2TPv3 tunnel support
+ *
+ * Copyright (c) 2013 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vppinfra/error.h>
+#include <vppinfra/hash.h>
+#include <vnet/vnet.h>
+#include <vnet/ip/ip.h>
+#include <vnet/l2/l2_input.h>
+#include <vnet/ethernet/ethernet.h>
+#include <vnet/l2tp/l2tp.h>
+
+l2t_main_t l2t_main;
+
+/* packet trace format function */
+u8 *
+format_l2t_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ l2t_trace_t *t = va_arg (*args, l2t_trace_t *);
+
+ if (t->is_user_to_network)
+ s = format (s, "L2T: %U (client) -> %U (our) session %d",
+ format_ip6_address, &t->client_address,
+ format_ip6_address, &t->our_address, t->session_index);
+ else
+ s = format (s, "L2T: %U (our) -> %U (client) session %d)",
+ format_ip6_address, &t->our_address,
+ format_ip6_address, &t->client_address, t->session_index);
+ return s;
+}
+
+u8 *
+format_l2t_session (u8 * s, va_list * args)
+{
+ l2t_session_t *session = va_arg (*args, l2t_session_t *);
+ l2t_main_t *lm = &l2t_main;
+ u32 counter_index;
+ vlib_counter_t v;
+
+ s = format (s, "[%d] %U (our) %U (client) %U (sw_if_index %d)\n",
+ session - lm->sessions,
+ format_ip6_address, &session->our_address,
+ format_ip6_address, &session->client_address,
+ format_vnet_sw_interface_name, lm->vnet_main,
+ vnet_get_sw_interface (lm->vnet_main, session->sw_if_index),
+ session->sw_if_index);
+
+ s = format (s, " local cookies %016llx %016llx remote cookie %016llx\n",
+ clib_net_to_host_u64 (session->local_cookie[0]),
+ clib_net_to_host_u64 (session->local_cookie[1]),
+ clib_net_to_host_u64 (session->remote_cookie));
+
+ s = format (s, " local session-id %d remote session-id %d\n",
+ clib_net_to_host_u32 (session->local_session_id),
+ clib_net_to_host_u32 (session->remote_session_id));
+
+ s = format (s, " l2 specific sublayer %s\n",
+ session->l2_sublayer_present ? "preset" : "absent");
+
+ counter_index =
+ session_index_to_counter_index (session - lm->sessions,
+ SESSION_COUNTER_USER_TO_NETWORK);
+
+ vlib_get_combined_counter (&lm->counter_main, counter_index, &v);
+ if (v.packets != 0)
+ s = format (s, " user-to-net: %llu pkts %llu bytes\n",
+ v.packets, v.bytes);
+
+ vlib_get_combined_counter (&lm->counter_main, counter_index + 1, &v);
+
+ if (v.packets != 0)
+ s = format (s, " net-to-user: %llu pkts %llu bytes\n",
+ v.packets, v.bytes);
+ return s;
+}
+
+static clib_error_t *
+show_l2tp_command_fn (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ l2t_session_t *session;
+ l2t_main_t *lm = &l2t_main;
+ char *keystr = 0;
+ int verbose = 0;
+
+ if (unformat (input, "verbose") || unformat (input, "v"))
+ verbose = 1;
+
+ if (pool_elts (lm->sessions) == 0)
+ vlib_cli_output (vm, "No l2tp sessions...");
+ else
+ vlib_cli_output (vm, "%u l2tp sessions...", pool_elts (lm->sessions));
+
+ if (verbose)
+ {
+ switch (lm->lookup_type)
+ {
+ case L2T_LOOKUP_SRC_ADDRESS:
+ keystr = "src address";
+ break;
+
+ case L2T_LOOKUP_DST_ADDRESS:
+ keystr = "dst address";
+ break;
+
+ case L2T_LOOKUP_SESSION_ID:
+ keystr = "session id";
+ break;
+
+ default:
+ keystr = "BOGUS!";
+ break;
+ }
+
+ vlib_cli_output (vm, "L2tp session lookup on %s", keystr);
+
+ /* *INDENT-OFF* */
+ pool_foreach (session, lm->sessions,
+ ({
+ vlib_cli_output (vm, "%U", format_l2t_session, session);
+ }));
+ /* *INDENT-ON* */
+ }
+
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (show_session_detail_command, static) = {
+ .path = "show l2tpv3",
+ .short_help = "show l2tpv3 [verbose]",
+ .function = show_l2tp_command_fn,
+};
+/* *INDENT-ON* */
+
+static clib_error_t *
+test_counters_command_fn (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ l2t_session_t *session;
+ l2t_main_t *lm = &l2t_main;
+ u32 session_index;
+ u32 counter_index;
+ u32 nincr = 0;
+ u32 cpu_index = os_get_cpu_number ();
+
+ /* *INDENT-OFF* */
+ pool_foreach (session, lm->sessions,
+ ({
+ session_index = session - lm->sessions;
+ counter_index =
+ session_index_to_counter_index (session_index,
+ SESSION_COUNTER_USER_TO_NETWORK);
+ vlib_increment_combined_counter (&lm->counter_main,
+ cpu_index,
+ counter_index,
+ 1/*pkt*/, 1111 /*bytes*/);
+ vlib_increment_combined_counter (&lm->counter_main,
+ cpu_index,
+ counter_index+1,
+ 1/*pkt*/, 2222 /*bytes*/);
+ nincr++;
+
+ }));
+ /* *INDENT-ON* */
+ vlib_cli_output (vm, "Incremented %d active counters\n", nincr);
+
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (test_counters_command, static) = {
+ .path = "test counters",
+ .short_help = "increment all active counters",
+ .function = test_counters_command_fn,
+};
+/* *INDENT-ON* */
+
+static clib_error_t *
+clear_counters_command_fn (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ l2t_session_t *session;
+ l2t_main_t *lm = &l2t_main;
+ u32 session_index;
+ u32 counter_index;
+ u32 nincr = 0;
+
+ /* *INDENT-OFF* */
+ pool_foreach (session, lm->sessions,
+ ({
+ session_index = session - lm->sessions;
+ counter_index =
+ session_index_to_counter_index (session_index,
+ SESSION_COUNTER_USER_TO_NETWORK);
+ vlib_zero_combined_counter (&lm->counter_main, counter_index);
+ vlib_zero_combined_counter (&lm->counter_main, counter_index+1);
+ nincr++;
+ }));
+ /* *INDENT-ON* */
+ vlib_cli_output (vm, "Cleared %d active counters\n", nincr);
+
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (clear_counters_command, static) = {
+ .path = "clear counters",
+ .short_help = "clear all active counters",
+ .function = clear_counters_command_fn,
+};
+/* *INDENT-ON* */
+
+static u8 *
+format_l2tpv3_name (u8 * s, va_list * args)
+{
+ l2t_main_t *lm = &l2t_main;
+ u32 i = va_arg (*args, u32);
+ u32 show_dev_instance = ~0;
+
+ if (i < vec_len (lm->dev_inst_by_real))
+ show_dev_instance = lm->dev_inst_by_real[i];
+
+ if (show_dev_instance != ~0)
+ i = show_dev_instance;
+
+ return format (s, "l2tpv3_tunnel%d", i);
+}
+
+static int
+l2tpv3_name_renumber (vnet_hw_interface_t * hi, u32 new_dev_instance)
+{
+ l2t_main_t *lm = &l2t_main;
+
+ vec_validate_init_empty (lm->dev_inst_by_real, hi->dev_instance, ~0);
+
+ lm->dev_inst_by_real[hi->dev_instance] = new_dev_instance;
+
+ return 0;
+}
+
+static uword
+dummy_interface_tx (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ clib_warning ("you shouldn't be here, leaking buffers...");
+ return frame->n_vectors;
+}
+
+/* *INDENT-OFF* */
+VNET_DEVICE_CLASS (l2tpv3_device_class,static) = {
+ .name = "L2TPv3",
+ .format_device_name = format_l2tpv3_name,
+ .name_renumber = l2tpv3_name_renumber,
+ .tx_function = dummy_interface_tx,
+};
+/* *INDENT-ON* */
+
+static u8 *
+format_l2tp_header_with_length (u8 * s, va_list * args)
+{
+ u32 dev_instance = va_arg (*args, u32);
+ s = format (s, "unimplemented dev %u", dev_instance);
+ return s;
+}
+
+/* *INDENT-OFF* */
+VNET_HW_INTERFACE_CLASS (l2tpv3_hw_class) = {
+ .name = "L2TPV3",
+ .format_header = format_l2tp_header_with_length,
+ .build_rewrite = default_build_rewrite,
+ .flags = VNET_HW_INTERFACE_CLASS_FLAG_P2P,
+};
+/* *INDENT-ON* */
+
+int
+create_l2tpv3_ipv6_tunnel (l2t_main_t * lm,
+ ip6_address_t * client_address,
+ ip6_address_t * our_address,
+ u32 local_session_id,
+ u32 remote_session_id,
+ u64 local_cookie,
+ u64 remote_cookie,
+ int l2_sublayer_present,
+ u32 encap_fib_index, u32 * sw_if_index)
+{
+ l2t_session_t *s = 0;
+ vnet_main_t *vnm = lm->vnet_main;
+ vnet_hw_interface_t *hi;
+ uword *p = (uword *) ~ 0;
+ u32 hw_if_index;
+ l2tpv3_header_t l2tp_hdr;
+ ip6_address_t *dst_address_copy, *src_address_copy;
+ u32 counter_index;
+
+ remote_session_id = clib_host_to_net_u32 (remote_session_id);
+ local_session_id = clib_host_to_net_u32 (local_session_id);
+
+ switch (lm->lookup_type)
+ {
+ case L2T_LOOKUP_SRC_ADDRESS:
+ p = hash_get_mem (lm->session_by_src_address, client_address);
+ break;
+
+ case L2T_LOOKUP_DST_ADDRESS:
+ p = hash_get_mem (lm->session_by_dst_address, our_address);
+ break;
+
+ case L2T_LOOKUP_SESSION_ID:
+ p = hash_get (lm->session_by_session_id, local_session_id);
+ break;
+
+ default:
+ ASSERT (0);
+ }
+
+ /* adding a session: session must not already exist */
+ if (p)
+ return VNET_API_ERROR_INVALID_VALUE;
+
+ pool_get (lm->sessions, s);
+ memset (s, 0, sizeof (*s));
+ clib_memcpy (&s->our_address, our_address, sizeof (s->our_address));
+ clib_memcpy (&s->client_address, client_address,
+ sizeof (s->client_address));
+ s->local_cookie[0] = clib_host_to_net_u64 (local_cookie);
+ s->remote_cookie = clib_host_to_net_u64 (remote_cookie);
+ s->local_session_id = local_session_id;
+ s->remote_session_id = remote_session_id;
+ s->l2_sublayer_present = l2_sublayer_present;
+ /* precompute l2tp header size */
+ s->l2tp_hdr_size = l2_sublayer_present ?
+ sizeof (l2tpv3_header_t) :
+ sizeof (l2tpv3_header_t) - sizeof (l2tp_hdr.l2_specific_sublayer);
+ s->admin_up = 0;
+ s->encap_fib_index = encap_fib_index;
+
+ /* Setup hash table entries */
+ switch (lm->lookup_type)
+ {
+ case L2T_LOOKUP_SRC_ADDRESS:
+ src_address_copy = clib_mem_alloc (sizeof (*src_address_copy));
+ clib_memcpy (src_address_copy, client_address,
+ sizeof (*src_address_copy));
+ hash_set_mem (lm->session_by_src_address, src_address_copy,
+ s - lm->sessions);
+ break;
+ case L2T_LOOKUP_DST_ADDRESS:
+ dst_address_copy = clib_mem_alloc (sizeof (*dst_address_copy));
+ clib_memcpy (dst_address_copy, our_address, sizeof (*dst_address_copy));
+ hash_set_mem (lm->session_by_dst_address, dst_address_copy,
+ s - lm->sessions);
+ break;
+ case L2T_LOOKUP_SESSION_ID:
+ hash_set (lm->session_by_session_id, local_session_id,
+ s - lm->sessions);
+ break;
+
+ default:
+ ASSERT (0);
+ }
+
+ /* validate counters */
+ counter_index =
+ session_index_to_counter_index (s - lm->sessions,
+ SESSION_COUNTER_USER_TO_NETWORK);
+ vlib_validate_combined_counter (&lm->counter_main, counter_index);
+ vlib_validate_combined_counter (&lm->counter_main, counter_index + 1);
+
+ if (vec_len (lm->free_l2tpv3_tunnel_hw_if_indices) > 0)
+ {
+ hw_if_index = lm->free_l2tpv3_tunnel_hw_if_indices
+ [vec_len (lm->free_l2tpv3_tunnel_hw_if_indices) - 1];
+ _vec_len (lm->free_l2tpv3_tunnel_hw_if_indices) -= 1;
+
+ hi = vnet_get_hw_interface (vnm, hw_if_index);
+ hi->dev_instance = s - lm->sessions;
+ hi->hw_instance = hi->dev_instance;
+ }
+ else
+ {
+ hw_if_index = vnet_register_interface
+ (vnm, l2tpv3_device_class.index, s - lm->sessions,
+ l2tpv3_hw_class.index, s - lm->sessions);
+ hi = vnet_get_hw_interface (vnm, hw_if_index);
+ hi->output_node_index = l2t_encap_node.index;
+ /* $$$$ initialize custom dispositions, if needed */
+ }
+
+ s->hw_if_index = hw_if_index;
+ s->sw_if_index = hi->sw_if_index;
+
+ if (sw_if_index)
+ *sw_if_index = hi->sw_if_index;
+
+ return 0;
+}
+
+static clib_error_t *
+create_l2tpv3_tunnel_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ ip6_address_t client_address, our_address;
+ unformat_input_t _line_input, *line_input = &_line_input;
+ l2t_main_t *lm = &l2t_main;
+ u64 local_cookie = (u64) ~ 0, remote_cookie = (u64) ~ 0;
+ u32 local_session_id = 1, remote_session_id = 1;
+ int our_address_set = 0, client_address_set = 0;
+ int l2_sublayer_present = 0;
+ int rv;
+ u32 sw_if_index;
+ u32 encap_fib_id = ~0;
+ u32 encap_fib_index = ~0;
+
+ /* Get a line of input. */
+ if (!unformat_user (input, unformat_line_input, line_input))
+ return 0;
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "client %U",
+ unformat_ip6_address, &client_address))
+ client_address_set = 1;
+ else if (unformat (line_input, "our %U",
+ unformat_ip6_address, &our_address))
+ our_address_set = 1;
+ else if (unformat (line_input, "local-cookie %llx", &local_cookie))
+ ;
+ else if (unformat (line_input, "remote-cookie %llx", &remote_cookie))
+ ;
+ else if (unformat (line_input, "local-session-id %d",
+ &local_session_id))
+ ;
+ else if (unformat (line_input, "remote-session-id %d",
+ &remote_session_id))
+ ;
+ else if (unformat (line_input, "fib-id %d", &encap_fib_id))
+ ;
+ else if (unformat (line_input, "l2-sublayer-present"))
+ l2_sublayer_present = 1;
+ else
+ return clib_error_return (0, "parse error: '%U'",
+ format_unformat_error, line_input);
+ }
+
+ unformat_free (line_input);
+
+ if (encap_fib_id != ~0)
+ {
+ uword *p;
+ ip6_main_t *im = &ip6_main;
+ if (!(p = hash_get (im->fib_index_by_table_id, encap_fib_id)))
+ return clib_error_return (0, "No fib with id %d", encap_fib_id);
+ encap_fib_index = p[0];
+ }
+ else
+ {
+ encap_fib_index = ~0;
+ }
+
+ if (our_address_set == 0)
+ return clib_error_return (0, "our address not specified");
+ if (client_address_set == 0)
+ return clib_error_return (0, "client address not specified");
+
+ rv = create_l2tpv3_ipv6_tunnel (lm, &client_address, &our_address,
+ local_session_id, remote_session_id,
+ local_cookie, remote_cookie,
+ l2_sublayer_present,
+ encap_fib_index, &sw_if_index);
+ switch (rv)
+ {
+ case 0:
+ vlib_cli_output (vm, "%U\n", format_vnet_sw_if_index_name,
+ vnet_get_main (), sw_if_index);
+ break;
+ case VNET_API_ERROR_INVALID_VALUE:
+ return clib_error_return (0, "session already exists...");
+
+ case VNET_API_ERROR_NO_SUCH_ENTRY:
+ return clib_error_return (0, "session does not exist...");
+
+ default:
+ return clib_error_return (0, "l2tp_session_add_del returned %d", rv);
+ }
+
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (create_l2tpv3_tunnel_command, static) =
+{
+ .path = "create l2tpv3 tunnel",
+ .short_help =
+ "create l2tpv3 tunnel client <ip6> our <ip6> local-cookie <hex> remote-cookie <hex> local-session <dec> remote-session <dec>",
+ .function = create_l2tpv3_tunnel_command_fn,
+};
+/* *INDENT-ON* */
+
+int
+l2tpv3_set_tunnel_cookies (l2t_main_t * lm,
+ u32 sw_if_index,
+ u64 new_local_cookie, u64 new_remote_cookie)
+{
+ l2t_session_t *s;
+ vnet_hw_interface_t *hi;
+ vnet_main_t *vnm = vnet_get_main ();
+ hi = vnet_get_sup_hw_interface (vnm, sw_if_index);
+
+ if (pool_is_free_index (lm->sessions, hi->dev_instance))
+ return VNET_API_ERROR_INVALID_VALUE;
+
+ s = pool_elt_at_index (lm->sessions, hi->dev_instance);
+
+ s->local_cookie[1] = s->local_cookie[0];
+ s->local_cookie[0] = clib_host_to_net_u64 (new_local_cookie);
+ s->remote_cookie = clib_host_to_net_u64 (new_remote_cookie);
+
+ return 0;
+}
+
+
+static clib_error_t *
+set_l2tp_tunnel_cookie_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ l2t_main_t *lm = &l2t_main;
+ vnet_main_t *vnm = vnet_get_main ();
+ u32 sw_if_index = ~0;
+ u64 local_cookie = (u64) ~ 0, remote_cookie = (u64) ~ 0;
+
+ int rv;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "%U", unformat_vnet_sw_interface, vnm,
+ &sw_if_index))
+ ;
+ else if (unformat (input, "local %llx", &local_cookie))
+ ;
+ else if (unformat (input, "remote %llx", &remote_cookie))
+ ;
+ else
+ break;
+ }
+ if (sw_if_index == ~0)
+ return clib_error_return (0, "unknown interface");
+ if (local_cookie == ~0)
+ return clib_error_return (0, "local cookie required");
+ if (remote_cookie == ~0)
+ return clib_error_return (0, "remote cookie required");
+
+ rv = l2tpv3_set_tunnel_cookies (lm, sw_if_index,
+ local_cookie, remote_cookie);
+
+ switch (rv)
+ {
+ case 0:
+ break;
+
+ case VNET_API_ERROR_INVALID_SW_IF_INDEX:
+ return clib_error_return (0, "invalid interface");
+
+ default:
+ return clib_error_return (0, "l2tp_session_set_cookies returned %d",
+ rv);
+ }
+
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (set_l2tp_tunnel_cookie_command, static) =
+{
+ .path = "set l2tpv3 tunnel cookie",
+ .short_help =
+ "set l2tpv3 tunnel cookie <intfc> local <hex> remote <hex>",
+ .function = set_l2tp_tunnel_cookie_command_fn,
+};
+/* *INDENT-ON* */
+
+int
+l2tpv3_interface_enable_disable (vnet_main_t * vnm,
+ u32 sw_if_index, int enable_disable)
+{
+
+ if (pool_is_free_index (vnm->interface_main.sw_interfaces, sw_if_index))
+ return VNET_API_ERROR_INVALID_SW_IF_INDEX;
+
+ vnet_feature_enable_disable ("ip6-unicast", "l2tp-decap", sw_if_index,
+ enable_disable, 0, 0);
+ return 0;
+}
+
+/* Enable/disable L2TPv3 intercept on IP6 fowarding path */
+static clib_error_t *
+set_ip6_l2tpv3 (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ u32 sw_if_index = ~0;
+ int is_add = 1;
+ int rv;
+ vnet_main_t *vnm = vnet_get_main ();
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "%U", unformat_vnet_sw_interface, vnm,
+ &sw_if_index))
+ ;
+ else if (unformat (input, "del"))
+ is_add = 0;
+ else
+ break;
+ }
+
+ if (sw_if_index == ~0)
+ return clib_error_return (0, "interface required");
+
+ rv = l2tpv3_interface_enable_disable (vnm, sw_if_index, is_add);
+
+ switch (rv)
+ {
+ case 0:
+ break;
+
+ case VNET_API_ERROR_INVALID_SW_IF_INDEX:
+ return clib_error_return (0, "invalid interface");
+
+ default:
+ return clib_error_return (0,
+ "l2tp_interface_enable_disable returned %d",
+ rv);
+ }
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (set_interface_ip6_l2tpv3, static) =
+{
+ .path = "set interface ip6 l2tpv3",
+ .function = set_ip6_l2tpv3,
+ .short_help = "set interface ip6 l2tpv3 <intfc> [del]",
+};
+/* *INDENT-ON* */
+
+static clib_error_t *
+l2tp_config (vlib_main_t * vm, unformat_input_t * input)
+{
+ l2t_main_t *lm = &l2t_main;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "lookup-v6-src"))
+ lm->lookup_type = L2T_LOOKUP_SRC_ADDRESS;
+ else if (unformat (input, "lookup-v6-dst"))
+ lm->lookup_type = L2T_LOOKUP_DST_ADDRESS;
+ else if (unformat (input, "lookup-session-id"))
+ lm->lookup_type = L2T_LOOKUP_SESSION_ID;
+ else
+ return clib_error_return (0, "unknown input `%U'",
+ format_unformat_error, input);
+ }
+ return 0;
+}
+
+VLIB_CONFIG_FUNCTION (l2tp_config, "l2tp");
+
+
+clib_error_t *
+l2tp_sw_interface_up_down (vnet_main_t * vnm, u32 sw_if_index, u32 flags)
+{
+ l2t_main_t *lm = &l2t_main;
+ vnet_hw_interface_t *hi = vnet_get_sup_hw_interface (vnm, sw_if_index);
+ if (hi->hw_class_index != l2tpv3_hw_class.index)
+ return 0;
+
+ u32 session_index = hi->dev_instance;
+ l2t_session_t *s = pool_elt_at_index (lm->sessions, session_index);
+ s->admin_up = ! !(flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP);
+ return 0;
+}
+
+VNET_SW_INTERFACE_ADMIN_UP_DOWN_FUNCTION (l2tp_sw_interface_up_down);
+
+clib_error_t *
+l2tp_init (vlib_main_t * vm)
+{
+ l2t_main_t *lm = &l2t_main;
+ ip_main_t *im = &ip_main;
+ ip_protocol_info_t *pi;
+
+ lm->vnet_main = vnet_get_main ();
+ lm->vlib_main = vm;
+ lm->lookup_type = L2T_LOOKUP_DST_ADDRESS;
+
+ lm->session_by_src_address = hash_create_mem
+ (0, sizeof (ip6_address_t) /* key bytes */ ,
+ sizeof (u32) /* value bytes */ );
+ lm->session_by_dst_address = hash_create_mem
+ (0, sizeof (ip6_address_t) /* key bytes */ ,
+ sizeof (u32) /* value bytes */ );
+ lm->session_by_session_id = hash_create (0, sizeof (uword));
+
+ pi = ip_get_protocol_info (im, IP_PROTOCOL_L2TP);
+ pi->unformat_pg_edit = unformat_pg_l2tp_header;
+
+ /* insure these nodes are included in build */
+ l2tp_encap_init (vm);
+ l2tp_decap_init ();
+
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (l2tp_init);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/l2tp/l2tp.h b/src/vnet/l2tp/l2tp.h
new file mode 100644
index 00000000000..e7d2892cbdf
--- /dev/null
+++ b/src/vnet/l2tp/l2tp.h
@@ -0,0 +1,147 @@
+/*
+ * l2tp.h : L2TPv3 tunnel support
+ *
+ * Copyright (c) 2013 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __included_l2tp_h__
+#define __included_l2tp_h__
+
+#include <vlib/vlib.h>
+#include <vnet/ip/ip.h>
+#include <vnet/l2tp/packet.h>
+
+typedef struct
+{
+ /* ip6 addresses */
+ ip6_address_t our_address;
+ ip6_address_t client_address;
+
+ /* l2tpv3 header parameters */
+ u64 local_cookie[2];
+ u64 remote_cookie;
+ u32 local_session_id;
+ u32 remote_session_id;
+
+ /* tunnel interface */
+ u32 hw_if_index;
+ u32 sw_if_index;
+
+ /* fib index used for outgoing encapsulated packets */
+ u32 encap_fib_index;
+
+ u8 l2tp_hdr_size;
+ u8 l2_sublayer_present;
+ u8 cookie_flags; /* in host byte order */
+
+ u8 admin_up;
+} l2t_session_t;
+
+typedef enum
+{
+ L2T_LOOKUP_SRC_ADDRESS = 0,
+ L2T_LOOKUP_DST_ADDRESS,
+ L2T_LOOKUP_SESSION_ID,
+} ip6_to_l2_lookup_t;
+
+typedef struct
+{
+ /* session pool */
+ l2t_session_t *sessions;
+
+ /* ip6 -> l2 hash tables. Make up your minds, people... */
+ uword *session_by_src_address;
+ uword *session_by_dst_address;
+ uword *session_by_session_id;
+
+ ip6_to_l2_lookup_t lookup_type;
+
+ /* Counters */
+ vlib_combined_counter_main_t counter_main;
+
+ /* vector of free l2tpv3 tunnel interfaces */
+ u32 *free_l2tpv3_tunnel_hw_if_indices;
+
+ /* show device instance by real device instance */
+ u32 *dev_inst_by_real;
+
+ /* convenience */
+ vlib_main_t *vlib_main;
+ vnet_main_t *vnet_main;
+
+} l2t_main_t;
+
+/* Packet trace structure */
+typedef struct
+{
+ int is_user_to_network;
+ u32 session_index;
+ ip6_address_t our_address;
+ ip6_address_t client_address;
+} l2t_trace_t;
+
+l2t_main_t l2t_main;
+extern vlib_node_registration_t l2t_encap_node;
+extern vlib_node_registration_t l2t_decap_node;
+extern vlib_node_registration_t l2t_decap_local_node;
+
+enum
+{
+ SESSION_COUNTER_USER_TO_NETWORK = 0,
+ SESSION_COUNTER_NETWORK_TO_USER,
+};
+
+static inline u32
+session_index_to_counter_index (u32 session_index, u32 counter_id)
+{
+ return ((session_index << 1) + counter_id);
+}
+
+u8 *format_l2t_trace (u8 * s, va_list * args);
+
+typedef struct
+{
+ /* Any per-interface config would go here */
+} ip6_l2tpv3_config_t;
+
+uword unformat_pg_l2tp_header (unformat_input_t * input, va_list * args);
+
+void l2tp_encap_init (vlib_main_t * vm);
+void l2tp_decap_init (void);
+int create_l2tpv3_ipv6_tunnel (l2t_main_t * lm,
+ ip6_address_t * client_address,
+ ip6_address_t * our_address,
+ u32 local_session_id,
+ u32 remote_session_id,
+ u64 local_cookie,
+ u64 remote_cookie,
+ int l2_sublayer_present,
+ u32 encap_fib_index, u32 * sw_if_index);
+
+int l2tpv3_set_tunnel_cookies (l2t_main_t * lm,
+ u32 sw_if_index,
+ u64 new_local_cookie, u64 new_remote_cookie);
+
+int l2tpv3_interface_enable_disable (vnet_main_t * vnm,
+ u32 sw_if_index, int enable_disable);
+
+#endif /* __included_l2tp_h__ */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/l2tp/l2tp_api.c b/src/vnet/l2tp/l2tp_api.c
new file mode 100644
index 00000000000..88d758c9ff0
--- /dev/null
+++ b/src/vnet/l2tp/l2tp_api.c
@@ -0,0 +1,267 @@
+/*
+ *------------------------------------------------------------------
+ * l2tp_api.c - l2tpv3 api
+ *
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+
+#include <vnet/vnet.h>
+#include <vlibmemory/api.h>
+
+#include <vnet/interface.h>
+#include <vnet/api_errno.h>
+#include <vnet/l2tp/l2tp.h>
+
+#include <vnet/vnet_msg_enum.h>
+
+#define vl_typedefs /* define message structures */
+#include <vnet/vnet_all_api_h.h>
+#undef vl_typedefs
+
+#define vl_endianfun /* define message structures */
+#include <vnet/vnet_all_api_h.h>
+#undef vl_endianfun
+
+/* instantiate all the print functions we know about */
+#define vl_print(handle, ...) vlib_cli_output (handle, __VA_ARGS__)
+#define vl_printfun
+#include <vnet/vnet_all_api_h.h>
+#undef vl_printfun
+
+#include <vlibapi/api_helper_macros.h>
+
+#define foreach_vpe_api_msg \
+_(L2TPV3_CREATE_TUNNEL, l2tpv3_create_tunnel) \
+_(L2TPV3_SET_TUNNEL_COOKIES, l2tpv3_set_tunnel_cookies) \
+_(L2TPV3_INTERFACE_ENABLE_DISABLE, l2tpv3_interface_enable_disable) \
+_(L2TPV3_SET_LOOKUP_KEY, l2tpv3_set_lookup_key) \
+_(SW_IF_L2TPV3_TUNNEL_DUMP, sw_if_l2tpv3_tunnel_dump)
+
+static void
+send_sw_if_l2tpv3_tunnel_details (vpe_api_main_t * am,
+ unix_shared_memory_queue_t * q,
+ l2t_session_t * s,
+ l2t_main_t * lm, u32 context)
+{
+ vl_api_sw_if_l2tpv3_tunnel_details_t *mp;
+ u8 *if_name = NULL;
+ vnet_sw_interface_t *si = NULL;
+
+ si = vnet_get_hw_sw_interface (lm->vnet_main, s->hw_if_index);
+
+ if_name = format (if_name, "%U",
+ format_vnet_sw_interface_name, lm->vnet_main, si);
+
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ memset (mp, 0, sizeof (*mp));
+ mp->_vl_msg_id = ntohs (VL_API_SW_IF_L2TPV3_TUNNEL_DETAILS);
+ strncpy ((char *) mp->interface_name,
+ (char *) if_name, ARRAY_LEN (mp->interface_name) - 1);
+ mp->sw_if_index = ntohl (si->sw_if_index);
+ mp->local_session_id = s->local_session_id;
+ mp->remote_session_id = s->remote_session_id;
+ mp->local_cookie[0] = s->local_cookie[0];
+ mp->local_cookie[1] = s->local_cookie[1];
+ mp->remote_cookie = s->remote_cookie;
+ clib_memcpy (mp->client_address, &s->client_address,
+ sizeof (s->client_address));
+ clib_memcpy (mp->our_address, &s->our_address, sizeof (s->our_address));
+ mp->l2_sublayer_present = s->l2_sublayer_present;
+ mp->context = context;
+
+ vl_msg_api_send_shmem (q, (u8 *) & mp);
+}
+
+
+static void
+vl_api_sw_if_l2tpv3_tunnel_dump_t_handler (vl_api_sw_if_l2tpv3_tunnel_dump_t *
+ mp)
+{
+ vpe_api_main_t *am = &vpe_api_main;
+ l2t_main_t *lm = &l2t_main;
+ unix_shared_memory_queue_t *q;
+ l2t_session_t *session;
+
+ q = vl_api_client_index_to_input_queue (mp->client_index);
+ if (q == 0)
+ return;
+
+ /* *INDENT-OFF* */
+ pool_foreach (session, lm->sessions,
+ ({
+ send_sw_if_l2tpv3_tunnel_details (am, q, session, lm, mp->context);
+ }));
+ /* *INDENT-ON* */
+}
+
+static void vl_api_l2tpv3_create_tunnel_t_handler
+ (vl_api_l2tpv3_create_tunnel_t * mp)
+{
+ vl_api_l2tpv3_create_tunnel_reply_t *rmp;
+ l2t_main_t *lm = &l2t_main;
+ u32 sw_if_index = (u32) ~ 0;
+ int rv;
+
+ if (mp->is_ipv6 != 1)
+ {
+ rv = VNET_API_ERROR_UNIMPLEMENTED;
+ goto out;
+ }
+
+ u32 encap_fib_index;
+
+ if (mp->encap_vrf_id != ~0)
+ {
+ uword *p;
+ ip6_main_t *im = &ip6_main;
+ if (!
+ (p =
+ hash_get (im->fib_index_by_table_id, ntohl (mp->encap_vrf_id))))
+ {
+ rv = VNET_API_ERROR_NO_SUCH_FIB;
+ goto out;
+ }
+ encap_fib_index = p[0];
+ }
+ else
+ {
+ encap_fib_index = ~0;
+ }
+
+ rv = create_l2tpv3_ipv6_tunnel (lm,
+ (ip6_address_t *) mp->client_address,
+ (ip6_address_t *) mp->our_address,
+ ntohl (mp->local_session_id),
+ ntohl (mp->remote_session_id),
+ clib_net_to_host_u64 (mp->local_cookie),
+ clib_net_to_host_u64 (mp->remote_cookie),
+ mp->l2_sublayer_present,
+ encap_fib_index, &sw_if_index);
+
+out:
+ /* *INDENT-OFF* */
+ REPLY_MACRO2(VL_API_L2TPV3_CREATE_TUNNEL_REPLY,
+ ({
+ rmp->sw_if_index = ntohl (sw_if_index);
+ }));
+ /* *INDENT-ON* */
+}
+
+static void vl_api_l2tpv3_set_tunnel_cookies_t_handler
+ (vl_api_l2tpv3_set_tunnel_cookies_t * mp)
+{
+ vl_api_l2tpv3_set_tunnel_cookies_reply_t *rmp;
+ l2t_main_t *lm = &l2t_main;
+ int rv;
+
+ VALIDATE_SW_IF_INDEX (mp);
+
+ rv = l2tpv3_set_tunnel_cookies (lm, ntohl (mp->sw_if_index),
+ clib_net_to_host_u64 (mp->new_local_cookie),
+ clib_net_to_host_u64
+ (mp->new_remote_cookie));
+
+ BAD_SW_IF_INDEX_LABEL;
+
+ REPLY_MACRO (VL_API_L2TPV3_SET_TUNNEL_COOKIES_REPLY);
+}
+
+static void vl_api_l2tpv3_interface_enable_disable_t_handler
+ (vl_api_l2tpv3_interface_enable_disable_t * mp)
+{
+ int rv;
+ vnet_main_t *vnm = vnet_get_main ();
+ vl_api_l2tpv3_interface_enable_disable_reply_t *rmp;
+
+ VALIDATE_SW_IF_INDEX (mp);
+
+ rv = l2tpv3_interface_enable_disable
+ (vnm, ntohl (mp->sw_if_index), mp->enable_disable);
+
+ BAD_SW_IF_INDEX_LABEL;
+
+ REPLY_MACRO (VL_API_L2TPV3_INTERFACE_ENABLE_DISABLE_REPLY);
+}
+
+static void vl_api_l2tpv3_set_lookup_key_t_handler
+ (vl_api_l2tpv3_set_lookup_key_t * mp)
+{
+ int rv = 0;
+ l2t_main_t *lm = &l2t_main;
+ vl_api_l2tpv3_set_lookup_key_reply_t *rmp;
+
+ if (mp->key > L2T_LOOKUP_SESSION_ID)
+ {
+ rv = VNET_API_ERROR_INVALID_VALUE;
+ goto out;
+ }
+
+ lm->lookup_type = mp->key;
+
+out:
+ REPLY_MACRO (VL_API_L2TPV3_SET_LOOKUP_KEY_REPLY);
+}
+
+/*
+ * l2tp_api_hookup
+ * Add vpe's API message handlers to the table.
+ * vlib has alread mapped shared memory and
+ * added the client registration handlers.
+ * See .../vlib-api/vlibmemory/memclnt_vlib.c:memclnt_process()
+ */
+#define vl_msg_name_crc_list
+#include <vnet/vnet_all_api_h.h>
+#undef vl_msg_name_crc_list
+
+static void
+setup_message_id_table (api_main_t * am)
+{
+#define _(id,n,crc) vl_msg_api_add_msg_name_crc (am, #n "_" #crc, id);
+ foreach_vl_msg_name_crc_l2tp;
+#undef _
+}
+
+static clib_error_t *
+l2tp_api_hookup (vlib_main_t * vm)
+{
+ api_main_t *am = &api_main;
+
+#define _(N,n) \
+ vl_msg_api_set_handlers(VL_API_##N, #n, \
+ vl_api_##n##_t_handler, \
+ vl_noop_handler, \
+ vl_api_##n##_t_endian, \
+ vl_api_##n##_t_print, \
+ sizeof(vl_api_##n##_t), 1);
+ foreach_vpe_api_msg;
+#undef _
+
+ /*
+ * Set up the (msg_name, crc, message-id) table
+ */
+ setup_message_id_table (am);
+
+ return 0;
+}
+
+VLIB_API_INIT_FUNCTION (l2tp_api_hookup);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/l2tp/packet.h b/src/vnet/l2tp/packet.h
new file mode 100644
index 00000000000..66dfea2194c
--- /dev/null
+++ b/src/vnet/l2tp/packet.h
@@ -0,0 +1,44 @@
+/*
+ * packet.h : L2TPv3 packet header format
+ *
+ * Copyright (c) 2013 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __included_l2tp_packet_h__
+#define __included_l2tp_packet_h__
+
+/*
+ * See RFC4719 for packet format.
+ * Note: the l2_specific_sublayer is present in current Linux l2tpv3
+ * tunnels. It is not present in IOS XR l2tpv3 tunnels.
+ * The Linux implementation is almost certainly wrong.
+ */
+/* *INDENT-OFF* */
+typedef CLIB_PACKED (struct
+{
+ u32 session_id;
+ u64 cookie; u32
+ l2_specific_sublayer; /* set to 0 (if present) */
+}) l2tpv3_header_t;
+/* *INDENT-ON* */
+
+#endif /* __included_l2tp_packet_h__ */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/l2tp/pg.c b/src/vnet/l2tp/pg.c
new file mode 100644
index 00000000000..1e523d3bbb0
--- /dev/null
+++ b/src/vnet/l2tp/pg.c
@@ -0,0 +1,106 @@
+/*
+ * pg.c: packet generator for L2TPv3 header
+ *
+ * Copyright (c) 2013 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vlib/vlib.h>
+#include <vnet/pg/pg.h>
+#include <vnet/l2tp/l2tp.h>
+
+typedef struct
+{
+ pg_edit_t session_id;
+ pg_edit_t cookie;
+} pg_l2tp_header_t;
+
+typedef struct
+{
+ pg_edit_t l2_sublayer;
+} pg_l2tp_header_l2_sublayer_t;
+
+static inline void
+pg_l2tp_header_init (pg_l2tp_header_t * e)
+{
+ pg_edit_init (&e->session_id, l2tpv3_header_t, session_id);
+ pg_edit_init (&e->cookie, l2tpv3_header_t, cookie);
+}
+
+uword
+unformat_pg_l2tp_header (unformat_input_t * input, va_list * args)
+{
+ pg_stream_t *s = va_arg (*args, pg_stream_t *);
+ pg_l2tp_header_t *h;
+ u32 group_index, error;
+ vlib_main_t *vm = vlib_get_main ();
+
+ h = pg_create_edit_group (s, sizeof (h[0]),
+ sizeof (l2tpv3_header_t) - sizeof (u32),
+ &group_index);
+ pg_l2tp_header_init (h);
+
+ error = 1;
+
+ /* session id and cookie are required */
+ if (!unformat (input, "L2TP: session_id %U cookie %U",
+ unformat_pg_edit, unformat_pg_number, &h->session_id,
+ unformat_pg_edit, unformat_pg_number, &h->cookie))
+ {
+ goto done;
+ }
+
+ /* "l2_sublayer <value>" is optional */
+ if (unformat (input, "l2_sublayer"))
+ {
+ pg_l2tp_header_l2_sublayer_t *h2;
+
+ h2 = pg_add_edits (s, sizeof (h2[0]), sizeof (u32), group_index);
+ pg_edit_init (&h2->l2_sublayer, l2tpv3_header_t, l2_specific_sublayer);
+ if (!unformat_user (input, unformat_pg_edit,
+ unformat_pg_number, &h2->l2_sublayer))
+ {
+ goto done;
+ }
+ }
+
+ /* Parse an ethernet header if it is present */
+ {
+ pg_node_t *pg_node = 0;
+ vlib_node_t *eth_lookup_node;
+
+ eth_lookup_node = vlib_get_node_by_name (vm, (u8 *) "ethernet-input");
+ ASSERT (eth_lookup_node);
+
+ pg_node = pg_get_node (eth_lookup_node->index);
+
+ if (pg_node && pg_node->unformat_edit
+ && unformat_user (input, pg_node->unformat_edit, s))
+ ;
+ }
+
+ error = 0;
+
+done:
+ if (error)
+ pg_free_edit_group (s);
+ return error == 0;
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/l3_types.h b/src/vnet/l3_types.h
new file mode 100644
index 00000000000..28b0891e715
--- /dev/null
+++ b/src/vnet/l3_types.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * l3_types.h: layer 3 packet types
+ *
+ * Copyright (c) 2010 Eliot Dresselhaus
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef included_vnet_l3_types_h
+#define included_vnet_l3_types_h
+
+/* Inherit generic L3 packet types from ethernet. */
+typedef enum
+{
+#define ethernet_type(n,f) VNET_L3_PACKET_TYPE_##f,
+#include <vnet/ethernet/types.def>
+#undef ethernet_type
+} vnet_l3_packet_type_t;
+
+#endif /* included_vnet_l3_types_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/lawful-intercept/lawful_intercept.c b/src/vnet/lawful-intercept/lawful_intercept.c
new file mode 100644
index 00000000000..ef07a339201
--- /dev/null
+++ b/src/vnet/lawful-intercept/lawful_intercept.c
@@ -0,0 +1,112 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/lawful-intercept/lawful_intercept.h>
+
+static clib_error_t *
+set_li_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ li_main_t * lm = &li_main;
+ ip4_address_t collector;
+ u8 collector_set = 0;
+ ip4_address_t src;
+ u8 src_set = 0;
+ u32 tmp;
+ u16 udp_port = 0;
+ u8 is_add = 1;
+ int i;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) {
+ if (unformat (input, "collector %U", unformat_ip4_address, &collector))
+ collector_set = 1;
+ if (unformat (input, "src %U", unformat_ip4_address, &src))
+ src_set = 1;
+ else if (unformat (input, "udp-port %d", &tmp))
+ udp_port = tmp;
+ else if (unformat (input, "del"))
+ is_add = 0;
+ else
+ break;
+ }
+
+ if (collector_set == 0)
+ return clib_error_return (0, "collector must be set...");
+ if (src_set == 0)
+ return clib_error_return (0, "src must be set...");
+ if (udp_port == 0)
+ return clib_error_return (0, "udp-port must be set...");
+
+ if (is_add == 1)
+ {
+ for (i = 0; i < vec_len (lm->collectors); i++)
+ {
+ if (lm->collectors[i].as_u32 == collector.as_u32)
+ {
+ if (lm->ports[i] == udp_port)
+ return clib_error_return
+ (0, "collector %U:%d already configured",
+ &collector, udp_port);
+ else
+ return clib_error_return
+ (0, "collector %U already configured with port %d",
+ &collector, (int)(lm->ports[i]));
+ }
+ }
+ vec_add1 (lm->collectors, collector);
+ vec_add1 (lm->ports, udp_port);
+ vec_add1 (lm->src_addrs, src);
+ return 0;
+ }
+ else
+ {
+ for (i = 0; i < vec_len (lm->collectors); i++)
+ {
+ if ((lm->collectors[i].as_u32 == collector.as_u32)
+ && lm->ports[i] == udp_port)
+ {
+ vec_delete (lm->collectors, 1, i);
+ vec_delete (lm->ports, 1, i);
+ vec_delete (lm->src_addrs, 1, i);
+ return 0;
+ }
+ }
+ return clib_error_return (0, "collector %U:%d not configured",
+ &collector, udp_port);
+ }
+ return 0;
+}
+
+VLIB_CLI_COMMAND (set_li_command, static) = {
+ .path = "set li",
+ .short_help =
+ "set li src <ip4-address> collector <ip4-address> udp-port <nnnn>",
+ .function = set_li_command_fn,
+};
+
+static clib_error_t *
+li_init (vlib_main_t * vm)
+{
+ li_main_t * lm = &li_main;
+
+ lm->vlib_main = vm;
+ lm->vnet_main = vnet_get_main();
+ lm->hit_node_index = li_hit_node.index;
+ return 0;
+}
+
+VLIB_INIT_FUNCTION(li_init);
+
diff --git a/src/vnet/lawful-intercept/lawful_intercept.h b/src/vnet/lawful-intercept/lawful_intercept.h
new file mode 100644
index 00000000000..89e699f51a0
--- /dev/null
+++ b/src/vnet/lawful-intercept/lawful_intercept.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __lawful_intercept_h__
+#define __lawful_intercept_h__
+
+#include <vnet/vnet.h>
+#include <vnet/ip/ip.h>
+
+typedef struct {
+ /* LI collector info */
+ ip4_address_t * src_addrs;
+ ip4_address_t * collectors;
+ u16 * ports;
+
+ /* Hit node index */
+ u32 hit_node_index;
+
+ /* convenience */
+ vlib_main_t * vlib_main;
+ vnet_main_t * vnet_main;
+} li_main_t;
+
+li_main_t li_main;
+
+typedef CLIB_PACKED(struct {
+ ip4_header_t ip4;
+ udp_header_t udp;
+}) ip4_udp_header_t;
+
+extern vlib_node_registration_t li_hit_node;
+
+#endif /* __lawful_intercept_h__ */
diff --git a/src/vnet/lawful-intercept/node.c b/src/vnet/lawful-intercept/node.c
new file mode 100644
index 00000000000..ea0cd8efcdd
--- /dev/null
+++ b/src/vnet/lawful-intercept/node.c
@@ -0,0 +1,275 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+#include <vppinfra/error.h>
+
+#include <vnet/lawful-intercept/lawful_intercept.h>
+
+#include <vppinfra/error.h>
+#include <vppinfra/elog.h>
+
+vlib_node_registration_t li_hit_node;
+
+typedef struct {
+ u32 next_index;
+} li_hit_trace_t;
+
+/* packet trace format function */
+static u8 * format_li_hit_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ li_hit_trace_t * t = va_arg (*args, li_hit_trace_t *);
+
+ s = format (s, "LI_HIT: next index %d", t->next_index);
+
+ return s;
+}
+
+vlib_node_registration_t li_hit_node;
+
+#define foreach_li_hit_error \
+_(HITS, "LI packets processed") \
+_(NO_COLLECTOR, "No collector configured")
+
+typedef enum {
+#define _(sym,str) LI_HIT_ERROR_##sym,
+ foreach_li_hit_error
+#undef _
+ LI_HIT_N_ERROR,
+} li_hit_error_t;
+
+static char * li_hit_error_strings[] = {
+#define _(sym,string) string,
+ foreach_li_hit_error
+#undef _
+};
+
+typedef enum {
+ LI_HIT_NEXT_ETHERNET,
+ LI_HIT_N_NEXT,
+} li_hit_next_t;
+
+static uword
+li_hit_node_fn (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ u32 n_left_from, * from, * to_next;
+ li_hit_next_t next_index;
+ vlib_frame_t * int_frame = 0;
+ u32 * to_int_next = 0;
+ li_main_t * lm = &li_main;
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+
+ if (PREDICT_FALSE (vec_len (lm->collectors) == 0))
+ {
+ vlib_node_increment_counter (vm, li_hit_node.index,
+ LI_HIT_ERROR_NO_COLLECTOR,
+ n_left_from);
+ }
+ else
+ {
+ /* The intercept frame... */
+ int_frame = vlib_get_frame_to_node (vm, ip4_lookup_node.index);
+ to_int_next = vlib_frame_vector_args (int_frame);
+ }
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame (vm, node, next_index,
+ to_next, n_left_to_next);
+
+#if 0
+ while (n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ u32 next0 = LI_HIT_NEXT_INTERFACE_OUTPUT;
+ u32 next1 = LI_HIT_NEXT_INTERFACE_OUTPUT;
+ u32 sw_if_index0, sw_if_index1;
+ u8 tmp0[6], tmp1[6];
+ ethernet_header_t *en0, *en1;
+ u32 bi0, bi1;
+ vlib_buffer_t * b0, * b1;
+
+ /* Prefetch next iteration. */
+ {
+ vlib_buffer_t * p2, * p3;
+
+ p2 = vlib_get_buffer (vm, from[2]);
+ p3 = vlib_get_buffer (vm, from[3]);
+
+ vlib_prefetch_buffer_header (p2, LOAD);
+ vlib_prefetch_buffer_header (p3, LOAD);
+
+ CLIB_PREFETCH (p2->data, CLIB_CACHE_LINE_BYTES, STORE);
+ CLIB_PREFETCH (p3->data, CLIB_CACHE_LINE_BYTES, STORE);
+ }
+
+ /* speculatively enqueue b0 and b1 to the current next frame */
+ to_next[0] = bi0 = from[0];
+ to_next[1] = bi1 = from[1];
+ from += 2;
+ to_next += 2;
+ n_left_from -= 2;
+ n_left_to_next -= 2;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ b1 = vlib_get_buffer (vm, bi1);
+
+ /* $$$$$ Dual loop: process 2 x packets here $$$$$ */
+ ASSERT (b0->current_data == 0);
+ ASSERT (b1->current_data == 0);
+
+ en0 = vlib_buffer_get_current (b0);
+ en1 = vlib_buffer_get_current (b1);
+
+ sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_RX];
+ sw_if_index1 = vnet_buffer(b1)->sw_if_index[VLIB_RX];
+
+ /* Send pkt back out the RX interface */
+ vnet_buffer(b0)->sw_if_index[VLIB_TX] = sw_if_index0;
+ vnet_buffer(b1)->sw_if_index[VLIB_TX] = sw_if_index1;
+
+ /* $$$$$ End of processing 2 x packets $$$$$ */
+
+ if (PREDICT_FALSE((node->flags & VLIB_NODE_FLAG_TRACE)))
+ {
+ if (b0->flags & VLIB_BUFFER_IS_TRACED)
+ {
+ li_hit_trace_t *t =
+ vlib_add_trace (vm, node, b0, sizeof (*t));
+ t->sw_if_index = sw_if_index0;
+ t->next_index = next0;
+ }
+ if (b1->flags & VLIB_BUFFER_IS_TRACED)
+ {
+ li_hit_trace_t *t =
+ vlib_add_trace (vm, node, b1, sizeof (*t));
+ t->sw_if_index = sw_if_index1;
+ t->next_index = next1;
+ }
+ }
+
+ /* verify speculative enqueues, maybe switch current next frame */
+ vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, bi1, next0, next1);
+ }
+#endif /* $$$ dual-loop off */
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 bi0;
+ vlib_buffer_t * b0;
+ vlib_buffer_t * c0;
+ ip4_udp_header_t * iu0;
+ ip4_header_t * ip0;
+ udp_header_t * udp0;
+ u32 next0 = LI_HIT_NEXT_ETHERNET;
+
+ /* speculatively enqueue b0 to the current next frame */
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ if (PREDICT_TRUE(to_int_next != 0))
+ {
+ /* Make an intercept copy */
+ c0 = vlib_buffer_copy (vm, b0);
+
+ vlib_buffer_advance(c0, -sizeof(*iu0));
+
+ iu0 = vlib_buffer_get_current(c0);
+ ip0 = &iu0->ip4;
+
+ ip0->ip_version_and_header_length = 0x45;
+ ip0->ttl = 254;
+ ip0->protocol = IP_PROTOCOL_UDP;
+
+ ip0->src_address.as_u32 = lm->src_addrs[0].as_u32;
+ ip0->dst_address.as_u32 = lm->collectors[0].as_u32;
+ ip0->length = vlib_buffer_length_in_chain (vm, c0);
+ ip0->checksum = ip4_header_checksum (ip0);
+
+ udp0 = &iu0->udp;
+ udp0->src_port = udp0->dst_port =
+ clib_host_to_net_u16(lm->ports[0]);
+ udp0->checksum = 0;
+ udp0->length =
+ clib_net_to_host_u16 (vlib_buffer_length_in_chain (vm , b0));
+
+ to_int_next [0] = vlib_get_buffer_index (vm, c0);
+ to_int_next++;
+ }
+
+ if (PREDICT_FALSE((node->flags & VLIB_NODE_FLAG_TRACE)
+ && (b0->flags & VLIB_BUFFER_IS_TRACED)))
+ {
+ li_hit_trace_t *t =
+ vlib_add_trace (vm, node, b0, sizeof (*t));
+ t->next_index = next0;
+ }
+
+ /* verify speculative enqueue, maybe switch current next frame */
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, next0);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ if (int_frame)
+ {
+ int_frame->n_vectors = frame->n_vectors;
+ vlib_put_frame_to_node (vm, ip4_lookup_node.index, int_frame);
+ }
+
+ vlib_node_increment_counter (vm, li_hit_node.index,
+ LI_HIT_ERROR_HITS, frame->n_vectors);
+ return frame->n_vectors;
+}
+
+VLIB_REGISTER_NODE (li_hit_node) = {
+ .function = li_hit_node_fn,
+ .name = "li-hit",
+ .vector_size = sizeof (u32),
+ .format_trace = format_li_hit_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_errors = ARRAY_LEN(li_hit_error_strings),
+ .error_strings = li_hit_error_strings,
+
+ .n_next_nodes = LI_HIT_N_NEXT,
+
+ /* edit / add dispositions here */
+ .next_nodes = {
+ [LI_HIT_NEXT_ETHERNET] = "ethernet-input-not-l2",
+ },
+};
+
+VLIB_NODE_FUNCTION_MULTIARCH (li_hit_node, li_hit_node_fn)
+
diff --git a/src/vnet/lisp-cp/control.c b/src/vnet/lisp-cp/control.c
new file mode 100644
index 00000000000..de048a4144a
--- /dev/null
+++ b/src/vnet/lisp-cp/control.c
@@ -0,0 +1,4950 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vlibmemory/api.h>
+#include <vnet/lisp-cp/control.h>
+#include <vnet/lisp-cp/packets.h>
+#include <vnet/lisp-cp/lisp_msg_serdes.h>
+#include <vnet/lisp-gpe/lisp_gpe.h>
+#include <vnet/lisp-gpe/lisp_gpe_fwd_entry.h>
+#include <vnet/lisp-gpe/lisp_gpe_tenant.h>
+#include <vnet/fib/fib_entry.h>
+#include <vnet/fib/fib_table.h>
+
+#include <openssl/evp.h>
+#include <openssl/hmac.h>
+
+typedef struct
+{
+ u8 is_resend;
+ gid_address_t seid;
+ gid_address_t deid;
+ u8 smr_invoked;
+} map_request_args_t;
+
+typedef struct
+{
+ u64 nonce;
+ u8 is_rloc_probe;
+ mapping_t *mappings;
+} map_records_arg_t;
+
+static int
+lisp_add_del_adjacency (lisp_cp_main_t * lcm, gid_address_t * local_eid,
+ gid_address_t * remote_eid, u8 is_add);
+
+u8
+vnet_lisp_get_map_request_mode (void)
+{
+ lisp_cp_main_t *lcm = vnet_lisp_cp_get_main ();
+ return lcm->map_request_mode;
+}
+
+static u16
+auth_data_len_by_key_id (lisp_key_type_t key_id)
+{
+ switch (key_id)
+ {
+ case HMAC_SHA_1_96:
+ return SHA1_AUTH_DATA_LEN;
+ case HMAC_SHA_256_128:
+ return SHA256_AUTH_DATA_LEN;
+ default:
+ clib_warning ("unsupported key type: %d!", key_id);
+ return (u16) ~ 0;
+ }
+ return (u16) ~ 0;
+}
+
+static const EVP_MD *
+get_encrypt_fcn (lisp_key_type_t key_id)
+{
+ switch (key_id)
+ {
+ case HMAC_SHA_1_96:
+ return EVP_sha1 ();
+ case HMAC_SHA_256_128:
+ return EVP_sha256 ();
+ default:
+ clib_warning ("unsupported encryption key type: %d!", key_id);
+ break;
+ }
+ return 0;
+}
+
+static int
+queue_map_request (gid_address_t * seid, gid_address_t * deid,
+ u8 smr_invoked, u8 is_resend);
+
+ip_interface_address_t *
+ip_interface_get_first_interface_address (ip_lookup_main_t * lm,
+ u32 sw_if_index, u8 loop)
+{
+ vnet_main_t *vnm = vnet_get_main ();
+ vnet_sw_interface_t *swif = vnet_get_sw_interface (vnm, sw_if_index);
+ if (loop && swif->flags & VNET_SW_INTERFACE_FLAG_UNNUMBERED)
+ sw_if_index = swif->unnumbered_sw_if_index;
+ u32 ia =
+ (vec_len ((lm)->if_address_pool_index_by_sw_if_index) > (sw_if_index)) ?
+ vec_elt ((lm)->if_address_pool_index_by_sw_if_index, (sw_if_index)) :
+ (u32) ~ 0;
+ return pool_elt_at_index ((lm)->if_address_pool, ia);
+}
+
+void *
+ip_interface_get_first_address (ip_lookup_main_t * lm, u32 sw_if_index,
+ u8 version)
+{
+ ip_interface_address_t *ia;
+
+ ia = ip_interface_get_first_interface_address (lm, sw_if_index, 1);
+ if (!ia)
+ return 0;
+ return ip_interface_address_get_address (lm, ia);
+}
+
+int
+ip_interface_get_first_ip_address (lisp_cp_main_t * lcm, u32 sw_if_index,
+ u8 version, ip_address_t * result)
+{
+ ip_lookup_main_t *lm;
+ void *addr;
+
+ lm = (version == IP4) ? &lcm->im4->lookup_main : &lcm->im6->lookup_main;
+ addr = ip_interface_get_first_address (lm, sw_if_index, version);
+ if (!addr)
+ return 0;
+
+ ip_address_set (result, addr, version);
+ return 1;
+}
+
+/**
+ * convert from a LISP address to a FIB prefix
+ */
+void
+ip_address_to_fib_prefix (const ip_address_t * addr, fib_prefix_t * prefix)
+{
+ if (addr->version == IP4)
+ {
+ prefix->fp_len = 32;
+ prefix->fp_proto = FIB_PROTOCOL_IP4;
+ memset (&prefix->fp_addr.pad, 0, sizeof (prefix->fp_addr.pad));
+ memcpy (&prefix->fp_addr.ip4, &addr->ip, sizeof (prefix->fp_addr.ip4));
+ }
+ else
+ {
+ prefix->fp_len = 128;
+ prefix->fp_proto = FIB_PROTOCOL_IP6;
+ memcpy (&prefix->fp_addr.ip6, &addr->ip, sizeof (prefix->fp_addr.ip6));
+ }
+}
+
+/**
+ * convert from a LISP to a FIB prefix
+ */
+void
+ip_prefix_to_fib_prefix (const ip_prefix_t * ip_prefix,
+ fib_prefix_t * fib_prefix)
+{
+ ip_address_to_fib_prefix (&ip_prefix->addr, fib_prefix);
+ fib_prefix->fp_len = ip_prefix->len;
+}
+
+/**
+ * Find the sw_if_index of the interface that would be used to egress towards
+ * dst.
+ */
+u32
+ip_fib_get_egress_iface_for_dst (lisp_cp_main_t * lcm, ip_address_t * dst)
+{
+ fib_node_index_t fei;
+ fib_prefix_t prefix;
+
+ ip_address_to_fib_prefix (dst, &prefix);
+
+ fei = fib_table_lookup (0, &prefix);
+
+ return (fib_entry_get_resolving_interface (fei));
+}
+
+/**
+ * Find first IP of the interface that would be used to egress towards dst.
+ * Returns 1 if the address is found 0 otherwise.
+ */
+int
+ip_fib_get_first_egress_ip_for_dst (lisp_cp_main_t * lcm, ip_address_t * dst,
+ ip_address_t * result)
+{
+ u32 si;
+ ip_lookup_main_t *lm;
+ void *addr = 0;
+ u8 ipver;
+
+ ASSERT (result != 0);
+
+ ipver = ip_addr_version (dst);
+
+ lm = (ipver == IP4) ? &lcm->im4->lookup_main : &lcm->im6->lookup_main;
+ si = ip_fib_get_egress_iface_for_dst (lcm, dst);
+
+ if ((u32) ~ 0 == si)
+ return 0;
+
+ /* find the first ip address */
+ addr = ip_interface_get_first_address (lm, si, ipver);
+ if (0 == addr)
+ return 0;
+
+ ip_address_set (result, addr, ipver);
+ return 1;
+}
+
+static int
+dp_add_del_iface (lisp_cp_main_t * lcm, u32 vni, u8 is_l2, u8 is_add)
+{
+ uword *dp_table;
+
+ if (!is_l2)
+ {
+ dp_table = hash_get (lcm->table_id_by_vni, vni);
+
+ if (!dp_table)
+ {
+ clib_warning ("vni %d not associated to a vrf!", vni);
+ return VNET_API_ERROR_INVALID_VALUE;
+ }
+ }
+ else
+ {
+ dp_table = hash_get (lcm->bd_id_by_vni, vni);
+ if (!dp_table)
+ {
+ clib_warning ("vni %d not associated to a bridge domain!", vni);
+ return VNET_API_ERROR_INVALID_VALUE;
+ }
+ }
+
+ /* enable/disable data-plane interface */
+ if (is_add)
+ {
+ if (is_l2)
+ lisp_gpe_tenant_l2_iface_add_or_lock (vni, dp_table[0]);
+ else
+ lisp_gpe_tenant_l3_iface_add_or_lock (vni, dp_table[0]);
+ }
+ else
+ {
+ if (is_l2)
+ lisp_gpe_tenant_l2_iface_unlock (vni);
+ else
+ lisp_gpe_tenant_l3_iface_unlock (vni);
+ }
+
+ return 0;
+}
+
+static void
+dp_del_fwd_entry (lisp_cp_main_t * lcm, u32 src_map_index, u32 dst_map_index)
+{
+ vnet_lisp_gpe_add_del_fwd_entry_args_t _a, *a = &_a;
+ fwd_entry_t *fe = 0;
+ uword *feip = 0;
+ memset (a, 0, sizeof (*a));
+
+ feip = hash_get (lcm->fwd_entry_by_mapping_index, dst_map_index);
+ if (!feip)
+ return;
+
+ fe = pool_elt_at_index (lcm->fwd_entry_pool, feip[0]);
+
+ /* delete dp fwd entry */
+ u32 sw_if_index;
+ a->is_add = 0;
+ a->locator_pairs = fe->locator_pairs;
+ a->vni = gid_address_vni (&fe->reid);
+ gid_address_copy (&a->rmt_eid, &fe->reid);
+ if (fe->is_src_dst)
+ gid_address_copy (&a->lcl_eid, &fe->leid);
+
+ vnet_lisp_gpe_add_del_fwd_entry (a, &sw_if_index);
+
+ /* delete entry in fwd table */
+ hash_unset (lcm->fwd_entry_by_mapping_index, dst_map_index);
+ vec_free (fe->locator_pairs);
+ pool_put (lcm->fwd_entry_pool, fe);
+}
+
+/**
+ * Finds first remote locator with best (lowest) priority that has a local
+ * peer locator with an underlying route to it.
+ *
+ */
+static u32
+get_locator_pairs (lisp_cp_main_t * lcm, mapping_t * lcl_map,
+ mapping_t * rmt_map, locator_pair_t ** locator_pairs)
+{
+ u32 i, limitp = 0, li, found = 0, esi;
+ locator_set_t *rmt_ls, *lcl_ls;
+ ip_address_t _lcl_addr, *lcl_addr = &_lcl_addr;
+ locator_t *lp, *rmt = 0;
+ uword *checked = 0;
+ locator_pair_t pair;
+
+ rmt_ls =
+ pool_elt_at_index (lcm->locator_set_pool, rmt_map->locator_set_index);
+ lcl_ls =
+ pool_elt_at_index (lcm->locator_set_pool, lcl_map->locator_set_index);
+
+ if (!rmt_ls || vec_len (rmt_ls->locator_indices) == 0)
+ return 0;
+
+ while (1)
+ {
+ rmt = 0;
+
+ /* find unvisited remote locator with best priority */
+ for (i = 0; i < vec_len (rmt_ls->locator_indices); i++)
+ {
+ if (0 != hash_get (checked, i))
+ continue;
+
+ li = vec_elt (rmt_ls->locator_indices, i);
+ lp = pool_elt_at_index (lcm->locator_pool, li);
+
+ /* we don't support non-IP locators for now */
+ if (gid_address_type (&lp->address) != GID_ADDR_IP_PREFIX)
+ continue;
+
+ if ((found && lp->priority == limitp)
+ || (!found && lp->priority >= limitp))
+ {
+ rmt = lp;
+
+ /* don't search for locators with lower priority and don't
+ * check this locator again*/
+ limitp = lp->priority;
+ hash_set (checked, i, 1);
+ break;
+ }
+ }
+ /* check if a local locator with a route to remote locator exists */
+ if (rmt != 0)
+ {
+ /* find egress sw_if_index for rmt locator */
+ esi =
+ ip_fib_get_egress_iface_for_dst (lcm,
+ &gid_address_ip (&rmt->address));
+ if ((u32) ~ 0 == esi)
+ continue;
+
+ for (i = 0; i < vec_len (lcl_ls->locator_indices); i++)
+ {
+ li = vec_elt (lcl_ls->locator_indices, i);
+ locator_t *sl = pool_elt_at_index (lcm->locator_pool, li);
+
+ /* found local locator with the needed sw_if_index */
+ if (sl->sw_if_index == esi)
+ {
+ /* and it has an address */
+ if (0 == ip_interface_get_first_ip_address (lcm,
+ sl->sw_if_index,
+ gid_address_ip_version
+ (&rmt->address),
+ lcl_addr))
+ continue;
+
+ memset (&pair, 0, sizeof (pair));
+ ip_address_copy (&pair.rmt_loc,
+ &gid_address_ip (&rmt->address));
+ ip_address_copy (&pair.lcl_loc, lcl_addr);
+ pair.weight = rmt->weight;
+ pair.priority = rmt->priority;
+ vec_add1 (locator_pairs[0], pair);
+ found = 1;
+ }
+ }
+ }
+ else
+ break;
+ }
+
+ hash_free (checked);
+ return found;
+}
+
+static void
+gid_address_sd_to_flat (gid_address_t * dst, gid_address_t * src,
+ fid_address_t * fid)
+{
+ ASSERT (GID_ADDR_SRC_DST == gid_address_type (src));
+
+ dst[0] = src[0];
+
+ switch (fid_addr_type (fid))
+ {
+ case FID_ADDR_IP_PREF:
+ gid_address_type (dst) = GID_ADDR_IP_PREFIX;
+ gid_address_ippref (dst) = fid_addr_ippref (fid);
+ break;
+ case FID_ADDR_MAC:
+ gid_address_type (dst) = GID_ADDR_MAC;
+ mac_copy (gid_address_mac (dst), fid_addr_mac (fid));
+ break;
+ default:
+ clib_warning ("Unsupported fid type %d!", fid_addr_type (fid));
+ break;
+ }
+}
+
+u8
+vnet_lisp_map_register_state_get (void)
+{
+ lisp_cp_main_t *lcm = vnet_lisp_cp_get_main ();
+ return lcm->map_registering;
+}
+
+u8
+vnet_lisp_rloc_probe_state_get (void)
+{
+ lisp_cp_main_t *lcm = vnet_lisp_cp_get_main ();
+ return lcm->rloc_probing;
+}
+
+static void
+dp_add_fwd_entry (lisp_cp_main_t * lcm, u32 src_map_index, u32 dst_map_index)
+{
+ vnet_lisp_gpe_add_del_fwd_entry_args_t _a, *a = &_a;
+ mapping_t *src_map, *dst_map;
+ u32 sw_if_index;
+ uword *feip = 0, *dpid;
+ fwd_entry_t *fe;
+ u8 type, is_src_dst = 0;
+
+ memset (a, 0, sizeof (*a));
+
+ /* remove entry if it already exists */
+ feip = hash_get (lcm->fwd_entry_by_mapping_index, dst_map_index);
+ if (feip)
+ dp_del_fwd_entry (lcm, src_map_index, dst_map_index);
+
+ if (lcm->lisp_pitr)
+ src_map = pool_elt_at_index (lcm->mapping_pool, lcm->pitr_map_index);
+ else
+ src_map = pool_elt_at_index (lcm->mapping_pool, src_map_index);
+ dst_map = pool_elt_at_index (lcm->mapping_pool, dst_map_index);
+
+ /* insert data plane forwarding entry */
+ a->is_add = 1;
+
+ if (MR_MODE_SRC_DST == lcm->map_request_mode)
+ {
+ if (GID_ADDR_SRC_DST == gid_address_type (&dst_map->eid))
+ {
+ gid_address_sd_to_flat (&a->rmt_eid, &dst_map->eid,
+ &gid_address_sd_dst (&dst_map->eid));
+ gid_address_sd_to_flat (&a->lcl_eid, &dst_map->eid,
+ &gid_address_sd_src (&dst_map->eid));
+ }
+ else
+ {
+ gid_address_copy (&a->rmt_eid, &dst_map->eid);
+ gid_address_copy (&a->lcl_eid, &src_map->eid);
+ }
+ is_src_dst = 1;
+ }
+ else
+ gid_address_copy (&a->rmt_eid, &dst_map->eid);
+
+ a->vni = gid_address_vni (&a->rmt_eid);
+
+ /* get vrf or bd_index associated to vni */
+ type = gid_address_type (&a->rmt_eid);
+ if (GID_ADDR_IP_PREFIX == type)
+ {
+ dpid = hash_get (lcm->table_id_by_vni, a->vni);
+ if (!dpid)
+ {
+ clib_warning ("vni %d not associated to a vrf!", a->vni);
+ return;
+ }
+ a->table_id = dpid[0];
+ }
+ else if (GID_ADDR_MAC == type)
+ {
+ dpid = hash_get (lcm->bd_id_by_vni, a->vni);
+ if (!dpid)
+ {
+ clib_warning ("vni %d not associated to a bridge domain !", a->vni);
+ return;
+ }
+ a->bd_id = dpid[0];
+ }
+
+ /* find best locator pair that 1) verifies LISP policy 2) are connected */
+ if (0 == get_locator_pairs (lcm, src_map, dst_map, &a->locator_pairs))
+ {
+ /* negative entry */
+ a->is_negative = 1;
+ a->action = dst_map->action;
+ }
+
+ /* TODO remove */
+ u8 ipver = ip_prefix_version (&gid_address_ippref (&a->rmt_eid));
+ a->decap_next_index = (ipver == IP4) ?
+ LISP_GPE_INPUT_NEXT_IP4_INPUT : LISP_GPE_INPUT_NEXT_IP6_INPUT;
+
+ vnet_lisp_gpe_add_del_fwd_entry (a, &sw_if_index);
+
+ /* add tunnel to fwd entry table XXX check return value from DP insertion */
+ pool_get (lcm->fwd_entry_pool, fe);
+ fe->locator_pairs = a->locator_pairs;
+ gid_address_copy (&fe->reid, &a->rmt_eid);
+ gid_address_copy (&fe->leid, &src_map->eid);
+ fe->is_src_dst = is_src_dst;
+ hash_set (lcm->fwd_entry_by_mapping_index, dst_map_index,
+ fe - lcm->fwd_entry_pool);
+}
+
+/**
+ * Returns vector of adjacencies.
+ *
+ * The caller must free the vector returned by this function.
+ *
+ * @param vni virtual network identifier
+ * @return vector of adjacencies
+ */
+lisp_adjacency_t *
+vnet_lisp_adjacencies_get_by_vni (u32 vni)
+{
+ lisp_cp_main_t *lcm = vnet_lisp_cp_get_main ();
+ fwd_entry_t *fwd;
+ lisp_adjacency_t *adjs = 0, adj;
+
+ /* *INDENT-OFF* */
+ pool_foreach(fwd, lcm->fwd_entry_pool,
+ ({
+ if (gid_address_vni (&fwd->reid) != vni)
+ continue;
+
+ gid_address_copy (&adj.reid, &fwd->reid);
+ gid_address_copy (&adj.leid, &fwd->leid);
+ vec_add1 (adjs, adj);
+ }));
+ /* *INDENT-ON* */
+
+ return adjs;
+}
+
+static clib_error_t *
+lisp_show_adjacencies_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ lisp_adjacency_t *adjs, *adj;
+ vlib_cli_output (vm, "%s %40s\n", "leid", "reid");
+ unformat_input_t _line_input, *line_input = &_line_input;
+ u32 vni = ~0;
+
+ /* Get a line of input. */
+ if (!unformat_user (input, unformat_line_input, line_input))
+ return 0;
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "vni %d", &vni))
+ ;
+ else
+ {
+ vlib_cli_output (vm, "parse error: '%U'",
+ format_unformat_error, line_input);
+ return 0;
+ }
+ }
+
+ if (~0 == vni)
+ {
+ vlib_cli_output (vm, "error: no vni specified!");
+ return 0;
+ }
+
+ adjs = vnet_lisp_adjacencies_get_by_vni (vni);
+
+ vec_foreach (adj, adjs)
+ {
+ vlib_cli_output (vm, "%U %40U\n", format_gid_address, &adj->leid,
+ format_gid_address, &adj->reid);
+ }
+ vec_free (adjs);
+
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (lisp_show_adjacencies_command) = {
+ .path = "show lisp adjacencies",
+ .short_help = "show lisp adjacencies",
+ .function = lisp_show_adjacencies_command_fn,
+};
+/* *INDENT-ON* */
+
+static lisp_msmr_t *
+get_map_server (ip_address_t * a)
+{
+ lisp_cp_main_t *lcm = vnet_lisp_cp_get_main ();
+ lisp_msmr_t *m;
+
+ vec_foreach (m, lcm->map_servers)
+ {
+ if (!ip_address_cmp (&m->address, a))
+ {
+ return m;
+ }
+ }
+ return 0;
+}
+
+static lisp_msmr_t *
+get_map_resolver (ip_address_t * a)
+{
+ lisp_cp_main_t *lcm = vnet_lisp_cp_get_main ();
+ lisp_msmr_t *m;
+
+ vec_foreach (m, lcm->map_resolvers)
+ {
+ if (!ip_address_cmp (&m->address, a))
+ {
+ return m;
+ }
+ }
+ return 0;
+}
+
+int
+vnet_lisp_add_del_map_server (ip_address_t * addr, u8 is_add)
+{
+ u32 i;
+ lisp_cp_main_t *lcm = vnet_lisp_cp_get_main ();
+ lisp_msmr_t _ms, *ms = &_ms;
+
+ if (vnet_lisp_enable_disable_status () == 0)
+ {
+ clib_warning ("LISP is disabled!");
+ return VNET_API_ERROR_LISP_DISABLED;
+ }
+
+ if (is_add)
+ {
+ if (get_map_server (addr))
+ {
+ clib_warning ("map-server %U already exists!", format_ip_address,
+ addr);
+ return -1;
+ }
+
+ memset (ms, 0, sizeof (*ms));
+ ip_address_copy (&ms->address, addr);
+ vec_add1 (lcm->map_servers, ms[0]);
+ }
+ else
+ {
+ for (i = 0; i < vec_len (lcm->map_servers); i++)
+ {
+ ms = vec_elt_at_index (lcm->map_servers, i);
+ if (!ip_address_cmp (&ms->address, addr))
+ {
+ vec_del1 (lcm->map_servers, i);
+ break;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static clib_error_t *
+lisp_add_del_map_server_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ int rv = 0;
+ u8 is_add = 1, ip_set = 0;
+ ip_address_t ip;
+ unformat_input_t _line_input, *line_input = &_line_input;
+
+ /* Get a line of input. */
+ if (!unformat_user (input, unformat_line_input, line_input))
+ return 0;
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "add"))
+ is_add = 1;
+ else if (unformat (line_input, "del"))
+ is_add = 0;
+ else if (unformat (line_input, "%U", unformat_ip_address, &ip))
+ ip_set = 1;
+ else
+ {
+ vlib_cli_output (vm, "parse error: '%U'",
+ format_unformat_error, line_input);
+ return 0;
+ }
+ }
+
+ if (!ip_set)
+ {
+ vlib_cli_output (vm, "map-server ip address not set!");
+ return 0;
+ }
+
+ rv = vnet_lisp_add_del_map_server (&ip, is_add);
+ if (!rv)
+ vlib_cli_output (vm, "failed to %s map-server!",
+ is_add ? "add" : "delete");
+
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (lisp_add_del_map_server_command) = {
+ .path = "lisp map-server",
+ .short_help = "lisp map-server add|del <ip>",
+ .function = lisp_add_del_map_server_command_fn,
+};
+/* *INDENT-ON* */
+
+/**
+ * Add/remove mapping to/from map-cache. Overwriting not allowed.
+ */
+int
+vnet_lisp_map_cache_add_del (vnet_lisp_add_del_mapping_args_t * a,
+ u32 * map_index_result)
+{
+ lisp_cp_main_t *lcm = vnet_lisp_cp_get_main ();
+ u32 mi, *map_indexp, map_index, i;
+ mapping_t *m, *old_map;
+ u32 **eid_indexes;
+
+ mi = gid_dictionary_lookup (&lcm->mapping_index_by_gid, &a->eid);
+ old_map = mi != ~0 ? pool_elt_at_index (lcm->mapping_pool, mi) : 0;
+ if (a->is_add)
+ {
+ /* TODO check if overwriting and take appropriate actions */
+ if (mi != GID_LOOKUP_MISS && !gid_address_cmp (&old_map->eid, &a->eid))
+ {
+ clib_warning ("eid %U found in the eid-table", format_gid_address,
+ &a->eid);
+ return VNET_API_ERROR_VALUE_EXIST;
+ }
+
+ pool_get (lcm->mapping_pool, m);
+ gid_address_copy (&m->eid, &a->eid);
+ m->locator_set_index = a->locator_set_index;
+ m->ttl = a->ttl;
+ m->action = a->action;
+ m->local = a->local;
+ m->is_static = a->is_static;
+ m->key = vec_dup (a->key);
+ m->key_id = a->key_id;
+
+ map_index = m - lcm->mapping_pool;
+ gid_dictionary_add_del (&lcm->mapping_index_by_gid, &a->eid, map_index,
+ 1);
+
+ if (pool_is_free_index (lcm->locator_set_pool, a->locator_set_index))
+ {
+ clib_warning ("Locator set with index %d doesn't exist",
+ a->locator_set_index);
+ return VNET_API_ERROR_INVALID_VALUE;
+ }
+
+ /* add eid to list of eids supported by locator-set */
+ vec_validate (lcm->locator_set_to_eids, a->locator_set_index);
+ eid_indexes = vec_elt_at_index (lcm->locator_set_to_eids,
+ a->locator_set_index);
+ vec_add1 (eid_indexes[0], map_index);
+
+ if (a->local)
+ {
+ /* mark as local */
+ vec_add1 (lcm->local_mappings_indexes, map_index);
+ }
+ map_index_result[0] = map_index;
+ }
+ else
+ {
+ if (mi == GID_LOOKUP_MISS)
+ {
+ clib_warning ("eid %U not found in the eid-table",
+ format_gid_address, &a->eid);
+ return VNET_API_ERROR_INVALID_VALUE;
+ }
+
+ /* clear locator-set to eids binding */
+ eid_indexes = vec_elt_at_index (lcm->locator_set_to_eids,
+ a->locator_set_index);
+ for (i = 0; i < vec_len (eid_indexes[0]); i++)
+ {
+ map_indexp = vec_elt_at_index (eid_indexes[0], i);
+ if (map_indexp[0] == mi)
+ break;
+ }
+ vec_del1 (eid_indexes[0], i);
+
+ /* remove local mark if needed */
+ m = pool_elt_at_index (lcm->mapping_pool, mi);
+ if (m->local)
+ {
+ u32 k, *lm_indexp;
+ for (k = 0; k < vec_len (lcm->local_mappings_indexes); k++)
+ {
+ lm_indexp = vec_elt_at_index (lcm->local_mappings_indexes, k);
+ if (lm_indexp[0] == mi)
+ break;
+ }
+ vec_del1 (lcm->local_mappings_indexes, k);
+ }
+
+ /* remove mapping from dictionary */
+ gid_dictionary_add_del (&lcm->mapping_index_by_gid, &a->eid, 0, 0);
+ gid_address_free (&m->eid);
+ pool_put_index (lcm->mapping_pool, mi);
+ }
+
+ return 0;
+}
+
+/**
+ * Add/update/delete mapping to/in/from map-cache.
+ */
+int
+vnet_lisp_add_del_local_mapping (vnet_lisp_add_del_mapping_args_t * a,
+ u32 * map_index_result)
+{
+ uword *dp_table = 0;
+ u32 vni;
+ u8 type;
+
+ lisp_cp_main_t *lcm = vnet_lisp_cp_get_main ();
+
+ if (vnet_lisp_enable_disable_status () == 0)
+ {
+ clib_warning ("LISP is disabled!");
+ return VNET_API_ERROR_LISP_DISABLED;
+ }
+
+ vni = gid_address_vni (&a->eid);
+ type = gid_address_type (&a->eid);
+ if (GID_ADDR_IP_PREFIX == type)
+ dp_table = hash_get (lcm->table_id_by_vni, vni);
+ else if (GID_ADDR_MAC == type)
+ dp_table = hash_get (lcm->bd_id_by_vni, vni);
+
+ if (!dp_table)
+ {
+ clib_warning ("vni %d not associated to a %s!", vni,
+ GID_ADDR_IP_PREFIX == type ? "vrf" : "bd");
+ return VNET_API_ERROR_INVALID_VALUE;
+ }
+
+ /* store/remove mapping from map-cache */
+ return vnet_lisp_map_cache_add_del (a, map_index_result);
+}
+
+static clib_error_t *
+lisp_add_del_local_eid_command_fn (vlib_main_t * vm, unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ lisp_cp_main_t *lcm = vnet_lisp_cp_get_main ();
+ unformat_input_t _line_input, *line_input = &_line_input;
+ u8 is_add = 1;
+ gid_address_t eid;
+ gid_address_t *eids = 0;
+ clib_error_t *error = 0;
+ u8 *locator_set_name = 0;
+ u32 locator_set_index = 0, map_index = 0;
+ uword *p;
+ vnet_lisp_add_del_mapping_args_t _a, *a = &_a;
+ int rv = 0;
+ u32 vni = 0;
+ u8 *key = 0;
+ u32 key_id = 0;
+
+ memset (&eid, 0, sizeof (eid));
+ memset (a, 0, sizeof (*a));
+
+ /* Get a line of input. */
+ if (!unformat_user (input, unformat_line_input, line_input))
+ return 0;
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "add"))
+ is_add = 1;
+ else if (unformat (line_input, "del"))
+ is_add = 0;
+ else if (unformat (line_input, "eid %U", unformat_gid_address, &eid))
+ ;
+ else if (unformat (line_input, "vni %d", &vni))
+ gid_address_vni (&eid) = vni;
+ else if (unformat (line_input, "secret-key %_%v%_", &key))
+ ;
+ else if (unformat (line_input, "key-id %U", unformat_hmac_key_id,
+ &key_id))
+ ;
+ else if (unformat (line_input, "locator-set %_%v%_", &locator_set_name))
+ {
+ p = hash_get_mem (lcm->locator_set_index_by_name, locator_set_name);
+ if (!p)
+ {
+ error = clib_error_return (0, "locator-set %s doesn't exist",
+ locator_set_name);
+ goto done;
+ }
+ locator_set_index = p[0];
+ }
+ else
+ {
+ error = unformat_parse_error (line_input);
+ goto done;
+ }
+ }
+ /* XXX treat batch configuration */
+
+ if (GID_ADDR_SRC_DST == gid_address_type (&eid))
+ {
+ error =
+ clib_error_return (0, "src/dst is not supported for local EIDs!");
+ goto done;
+ }
+
+ if (key && (0 == key_id))
+ {
+ vlib_cli_output (vm, "invalid key_id!");
+ return 0;
+ }
+
+ gid_address_copy (&a->eid, &eid);
+ a->is_add = is_add;
+ a->locator_set_index = locator_set_index;
+ a->local = 1;
+ a->key = key;
+ a->key_id = key_id;
+
+ rv = vnet_lisp_add_del_local_mapping (a, &map_index);
+ if (0 != rv)
+ {
+ error = clib_error_return (0, "failed to %s local mapping!",
+ is_add ? "add" : "delete");
+ }
+done:
+ vec_free (eids);
+ if (locator_set_name)
+ vec_free (locator_set_name);
+ gid_address_free (&a->eid);
+ vec_free (a->key);
+ return error;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (lisp_add_del_local_eid_command) = {
+ .path = "lisp eid-table",
+ .short_help = "lisp eid-table add/del [vni <vni>] eid <eid> "
+ "locator-set <locator-set> [key <secret-key> key-id sha1|sha256 ]",
+ .function = lisp_add_del_local_eid_command_fn,
+};
+/* *INDENT-ON* */
+
+int
+vnet_lisp_eid_table_map (u32 vni, u32 dp_id, u8 is_l2, u8 is_add)
+{
+ lisp_cp_main_t *lcm = vnet_lisp_cp_get_main ();
+ uword *dp_idp, *vnip, **dp_table_by_vni, **vni_by_dp_table;
+
+ if (vnet_lisp_enable_disable_status () == 0)
+ {
+ clib_warning ("LISP is disabled!");
+ return -1;
+ }
+
+ dp_table_by_vni = is_l2 ? &lcm->bd_id_by_vni : &lcm->table_id_by_vni;
+ vni_by_dp_table = is_l2 ? &lcm->vni_by_bd_id : &lcm->vni_by_table_id;
+
+ if (!is_l2 && (vni == 0 || dp_id == 0))
+ {
+ clib_warning ("can't add/del default vni-vrf mapping!");
+ return -1;
+ }
+
+ dp_idp = hash_get (dp_table_by_vni[0], vni);
+ vnip = hash_get (vni_by_dp_table[0], dp_id);
+
+ if (is_add)
+ {
+ if (dp_idp || vnip)
+ {
+ clib_warning ("vni %d or vrf %d already used in vrf/vni "
+ "mapping!", vni, dp_id);
+ return -1;
+ }
+ hash_set (dp_table_by_vni[0], vni, dp_id);
+ hash_set (vni_by_dp_table[0], dp_id, vni);
+
+ /* create dp iface */
+ dp_add_del_iface (lcm, vni, is_l2, 1);
+ }
+ else
+ {
+ if (!dp_idp || !vnip)
+ {
+ clib_warning ("vni %d or vrf %d not used in any vrf/vni! "
+ "mapping!", vni, dp_id);
+ return -1;
+ }
+ hash_unset (dp_table_by_vni[0], vni);
+ hash_unset (vni_by_dp_table[0], dp_id);
+
+ /* remove dp iface */
+ dp_add_del_iface (lcm, vni, is_l2, 0);
+ }
+ return 0;
+
+}
+
+static clib_error_t *
+lisp_eid_table_map_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ u8 is_add = 1, is_l2 = 0;
+ u32 vni = 0, dp_id = 0;
+ unformat_input_t _line_input, *line_input = &_line_input;
+
+ /* Get a line of input. */
+ if (!unformat_user (input, unformat_line_input, line_input))
+ return 0;
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "del"))
+ is_add = 0;
+ else if (unformat (line_input, "vni %d", &vni))
+ ;
+ else if (unformat (line_input, "vrf %d", &dp_id))
+ ;
+ else if (unformat (line_input, "bd %d", &dp_id))
+ is_l2 = 1;
+ else
+ {
+ return unformat_parse_error (line_input);
+ }
+ }
+ vnet_lisp_eid_table_map (vni, dp_id, is_l2, is_add);
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (lisp_eid_table_map_command) = {
+ .path = "lisp eid-table map",
+ .short_help = "lisp eid-table map [del] vni <vni> vrf <vrf> | bd <bdi>",
+ .function = lisp_eid_table_map_command_fn,
+};
+/* *INDENT-ON* */
+
+/* return 0 if the two locator sets are identical 1 otherwise */
+static u8
+compare_locators (lisp_cp_main_t * lcm, u32 * old_ls_indexes,
+ locator_t * new_locators)
+{
+ u32 i, old_li;
+ locator_t *old_loc, *new_loc;
+
+ if (vec_len (old_ls_indexes) != vec_len (new_locators))
+ return 1;
+
+ for (i = 0; i < vec_len (new_locators); i++)
+ {
+ old_li = vec_elt (old_ls_indexes, i);
+ old_loc = pool_elt_at_index (lcm->locator_pool, old_li);
+
+ new_loc = vec_elt_at_index (new_locators, i);
+
+ if (locator_cmp (old_loc, new_loc))
+ return 1;
+ }
+ return 0;
+}
+
+typedef struct
+{
+ u8 is_negative;
+ void *lcm;
+ gid_address_t *eids_to_be_deleted;
+} remove_mapping_args_t;
+
+/**
+ * Callback invoked when a sub-prefix is found
+ */
+static void
+remove_mapping_if_needed (u32 mi, void *arg)
+{
+ u8 delete = 0;
+ remove_mapping_args_t *a = arg;
+ lisp_cp_main_t *lcm = a->lcm;
+ mapping_t *m;
+ locator_set_t *ls;
+
+ m = pool_elt_at_index (lcm->mapping_pool, mi);
+ if (!m)
+ return;
+
+ ls = pool_elt_at_index (lcm->locator_set_pool, m->locator_set_index);
+
+ if (a->is_negative)
+ {
+ if (0 != vec_len (ls->locator_indices))
+ delete = 1;
+ }
+ else
+ {
+ if (0 == vec_len (ls->locator_indices))
+ delete = 1;
+ }
+
+ if (delete)
+ vec_add1 (a->eids_to_be_deleted, m->eid);
+}
+
+/**
+ * This function searches map cache and looks for IP prefixes that are subset
+ * of the provided one. If such prefix is found depending on 'is_negative'
+ * it does follows:
+ *
+ * 1) if is_negative is true and found prefix points to positive mapping,
+ * then the mapping is removed
+ * 2) if is_negative is false and found prefix points to negative mapping,
+ * then the mapping is removed
+ */
+static void
+remove_overlapping_sub_prefixes (lisp_cp_main_t * lcm, gid_address_t * eid,
+ u8 is_negative)
+{
+ gid_address_t *e;
+ remove_mapping_args_t a;
+ memset (&a, 0, sizeof (a));
+
+ /* do this only in src/dst mode ... */
+ if (MR_MODE_SRC_DST != lcm->map_request_mode)
+ return;
+
+ /* ... and only for IP prefix */
+ if (GID_ADDR_SRC_DST != gid_address_type (eid)
+ || (FID_ADDR_IP_PREF != gid_address_sd_dst_type (eid)))
+ return;
+
+ a.is_negative = is_negative;
+ a.lcm = lcm;
+
+ gid_dict_foreach_subprefix (&lcm->mapping_index_by_gid, eid,
+ remove_mapping_if_needed, &a);
+
+ vec_foreach (e, a.eids_to_be_deleted)
+ {
+ lisp_add_del_adjacency (lcm, 0, e, 0 /* is_add */ );
+ vnet_lisp_add_del_mapping (e, 0, 0, 0, 0, 0 /* is add */ , 0, 0);
+ }
+
+ vec_free (a.eids_to_be_deleted);
+}
+
+static void
+mapping_delete_timer (lisp_cp_main_t * lcm, u32 mi)
+{
+ timing_wheel_delete (&lcm->wheel, mi);
+}
+
+/**
+ * Adds/removes/updates mapping. Does not program forwarding.
+ *
+ * @param eid end-host identifier
+ * @param rlocs vector of remote locators
+ * @param action action for negative map-reply
+ * @param is_add add mapping if non-zero, delete otherwise
+ * @param res_map_index the map-index that was created/updated/removed. It is
+ * set to ~0 if no action is taken.
+ * @param is_static used for distinguishing between statically learned
+ remote mappings and mappings obtained from MR
+ * @return return code
+ */
+int
+vnet_lisp_add_del_mapping (gid_address_t * eid, locator_t * rlocs, u8 action,
+ u8 authoritative, u32 ttl, u8 is_add, u8 is_static,
+ u32 * res_map_index)
+{
+ vnet_lisp_add_del_mapping_args_t _m_args, *m_args = &_m_args;
+ vnet_lisp_add_del_locator_set_args_t _ls_args, *ls_args = &_ls_args;
+ lisp_cp_main_t *lcm = vnet_lisp_cp_get_main ();
+ u32 mi, ls_index = 0, dst_map_index;
+ mapping_t *old_map;
+
+ if (vnet_lisp_enable_disable_status () == 0)
+ {
+ clib_warning ("LISP is disabled!");
+ return VNET_API_ERROR_LISP_DISABLED;
+ }
+
+ if (res_map_index)
+ res_map_index[0] = ~0;
+
+ memset (m_args, 0, sizeof (m_args[0]));
+ memset (ls_args, 0, sizeof (ls_args[0]));
+
+ ls_args->locators = rlocs;
+
+ mi = gid_dictionary_lookup (&lcm->mapping_index_by_gid, eid);
+ old_map = ((u32) ~ 0 != mi) ? pool_elt_at_index (lcm->mapping_pool, mi) : 0;
+
+ if (is_add)
+ {
+ /* overwrite: if mapping already exists, decide if locators should be
+ * updated and be done */
+ if (old_map && gid_address_cmp (&old_map->eid, eid) == 0)
+ {
+ if (!is_static && (old_map->is_static || old_map->local))
+ {
+ /* do not overwrite local or static remote mappings */
+ clib_warning ("mapping %U rejected due to collision with local "
+ "or static remote mapping!", format_gid_address,
+ eid);
+ return 0;
+ }
+
+ locator_set_t *old_ls;
+
+ /* update mapping attributes */
+ old_map->action = action;
+ old_map->authoritative = authoritative;
+ old_map->ttl = ttl;
+
+ old_ls = pool_elt_at_index (lcm->locator_set_pool,
+ old_map->locator_set_index);
+ if (compare_locators (lcm, old_ls->locator_indices,
+ ls_args->locators))
+ {
+ /* set locator-set index to overwrite */
+ ls_args->is_add = 1;
+ ls_args->index = old_map->locator_set_index;
+ vnet_lisp_add_del_locator_set (ls_args, 0);
+ if (res_map_index)
+ res_map_index[0] = mi;
+ }
+ }
+ /* new mapping */
+ else
+ {
+ remove_overlapping_sub_prefixes (lcm, eid, 0 == ls_args->locators);
+
+ ls_args->is_add = 1;
+ ls_args->index = ~0;
+
+ vnet_lisp_add_del_locator_set (ls_args, &ls_index);
+
+ /* add mapping */
+ gid_address_copy (&m_args->eid, eid);
+ m_args->is_add = 1;
+ m_args->action = action;
+ m_args->locator_set_index = ls_index;
+ m_args->is_static = is_static;
+ m_args->ttl = ttl;
+ vnet_lisp_map_cache_add_del (m_args, &dst_map_index);
+
+ if (res_map_index)
+ res_map_index[0] = dst_map_index;
+ }
+ }
+ else
+ {
+ if (old_map == 0 || gid_address_cmp (&old_map->eid, eid) != 0)
+ {
+ clib_warning ("cannot delete mapping for eid %U",
+ format_gid_address, eid);
+ return -1;
+ }
+
+ m_args->is_add = 0;
+ gid_address_copy (&m_args->eid, eid);
+ m_args->locator_set_index = old_map->locator_set_index;
+
+ /* delete mapping associated from map-cache */
+ vnet_lisp_map_cache_add_del (m_args, 0);
+
+ ls_args->is_add = 0;
+ ls_args->index = old_map->locator_set_index;
+ /* delete locator set */
+ vnet_lisp_add_del_locator_set (ls_args, 0);
+
+ /* delete timer associated to the mapping if any */
+ if (old_map->timer_set)
+ mapping_delete_timer (lcm, mi);
+
+ /* return old mapping index */
+ if (res_map_index)
+ res_map_index[0] = mi;
+ }
+
+ /* success */
+ return 0;
+}
+
+int
+vnet_lisp_clear_all_remote_adjacencies (void)
+{
+ int rv = 0;
+ u32 mi, *map_indices = 0, *map_indexp;
+ lisp_cp_main_t *lcm = vnet_lisp_cp_get_main ();
+ vnet_lisp_add_del_mapping_args_t _dm_args, *dm_args = &_dm_args;
+ vnet_lisp_add_del_locator_set_args_t _ls, *ls = &_ls;
+
+ /* *INDENT-OFF* */
+ pool_foreach_index (mi, lcm->mapping_pool,
+ ({
+ vec_add1 (map_indices, mi);
+ }));
+ /* *INDENT-ON* */
+
+ vec_foreach (map_indexp, map_indices)
+ {
+ mapping_t *map = pool_elt_at_index (lcm->mapping_pool, map_indexp[0]);
+ if (!map->local)
+ {
+ dp_del_fwd_entry (lcm, 0, map_indexp[0]);
+
+ dm_args->is_add = 0;
+ gid_address_copy (&dm_args->eid, &map->eid);
+ dm_args->locator_set_index = map->locator_set_index;
+
+ /* delete mapping associated to fwd entry */
+ vnet_lisp_map_cache_add_del (dm_args, 0);
+
+ ls->is_add = 0;
+ ls->local = 0;
+ ls->index = map->locator_set_index;
+ /* delete locator set */
+ rv = vnet_lisp_add_del_locator_set (ls, 0);
+ if (rv != 0)
+ goto cleanup;
+ }
+ }
+
+cleanup:
+ if (map_indices)
+ vec_free (map_indices);
+ return rv;
+}
+
+/**
+ * Adds adjacency or removes forwarding entry associated to remote mapping.
+ * Note that adjacencies are not stored, they only result in forwarding entries
+ * being created.
+ */
+static int
+lisp_add_del_adjacency (lisp_cp_main_t * lcm, gid_address_t * local_eid,
+ gid_address_t * remote_eid, u8 is_add)
+{
+ u32 local_mi, remote_mi = ~0;
+
+ if (vnet_lisp_enable_disable_status () == 0)
+ {
+ clib_warning ("LISP is disabled!");
+ return VNET_API_ERROR_LISP_DISABLED;
+ }
+
+ remote_mi = gid_dictionary_sd_lookup (&lcm->mapping_index_by_gid,
+ remote_eid, local_eid);
+ if (GID_LOOKUP_MISS == remote_mi)
+ {
+ clib_warning ("Remote eid %U not found. Cannot add adjacency!",
+ format_gid_address, remote_eid);
+
+ return -1;
+ }
+
+ if (is_add)
+ {
+ /* TODO 1) check if src/dst 2) once we have src/dst working, use it in
+ * delete*/
+
+ /* check if source eid has an associated mapping. If pitr mode is on,
+ * just use the pitr's mapping */
+ local_mi = lcm->lisp_pitr ? lcm->pitr_map_index :
+ gid_dictionary_lookup (&lcm->mapping_index_by_gid, local_eid);
+
+
+ if (GID_LOOKUP_MISS == local_mi)
+ {
+ clib_warning ("Local eid %U not found. Cannot add adjacency!",
+ format_gid_address, local_eid);
+
+ return -1;
+ }
+
+ /* update forwarding */
+ dp_add_fwd_entry (lcm, local_mi, remote_mi);
+ }
+ else
+ dp_del_fwd_entry (lcm, 0, remote_mi);
+
+ return 0;
+}
+
+int
+vnet_lisp_add_del_adjacency (vnet_lisp_add_del_adjacency_args_t * a)
+{
+ lisp_cp_main_t *lcm = vnet_lisp_cp_get_main ();
+ return lisp_add_del_adjacency (lcm, &a->leid, &a->reid, a->is_add);
+}
+
+/**
+ * Handler for add/del remote mapping CLI.
+ *
+ * @param vm vlib context
+ * @param input input from user
+ * @param cmd cmd
+ * @return pointer to clib error structure
+ */
+static clib_error_t *
+lisp_add_del_remote_mapping_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ clib_error_t *error = 0;
+ unformat_input_t _line_input, *line_input = &_line_input;
+ u8 is_add = 1, del_all = 0;
+ locator_t rloc, *rlocs = 0, *curr_rloc = 0;
+ gid_address_t eid;
+ u8 eid_set = 0;
+ u32 vni, action = ~0, p, w;
+ int rv;
+
+ /* Get a line of input. */
+ if (!unformat_user (input, unformat_line_input, line_input))
+ return 0;
+
+ memset (&eid, 0, sizeof (eid));
+ memset (&rloc, 0, sizeof (rloc));
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "del-all"))
+ del_all = 1;
+ else if (unformat (line_input, "del"))
+ is_add = 0;
+ else if (unformat (line_input, "add"))
+ ;
+ else if (unformat (line_input, "eid %U", unformat_gid_address, &eid))
+ eid_set = 1;
+ else if (unformat (line_input, "vni %u", &vni))
+ {
+ gid_address_vni (&eid) = vni;
+ }
+ else if (unformat (line_input, "p %d w %d", &p, &w))
+ {
+ if (!curr_rloc)
+ {
+ clib_warning
+ ("No RLOC configured for setting priority/weight!");
+ goto done;
+ }
+ curr_rloc->priority = p;
+ curr_rloc->weight = w;
+ }
+ else if (unformat (line_input, "rloc %U", unformat_ip_address,
+ &gid_address_ip (&rloc.address)))
+ {
+ /* since rloc is stored in ip prefix we need to set prefix length */
+ ip_prefix_t *pref = &gid_address_ippref (&rloc.address);
+
+ u8 version = gid_address_ip_version (&rloc.address);
+ ip_prefix_len (pref) = ip_address_max_len (version);
+
+ vec_add1 (rlocs, rloc);
+ curr_rloc = &rlocs[vec_len (rlocs) - 1];
+ }
+ else if (unformat (line_input, "action %U",
+ unformat_negative_mapping_action, &action))
+ ;
+ else
+ {
+ clib_warning ("parse error");
+ goto done;
+ }
+ }
+
+ if (!eid_set)
+ {
+ clib_warning ("missing eid!");
+ goto done;
+ }
+
+ if (!del_all)
+ {
+ if (is_add && (~0 == action) && 0 == vec_len (rlocs))
+ {
+ clib_warning ("no action set for negative map-reply!");
+ goto done;
+ }
+ }
+ else
+ {
+ vnet_lisp_clear_all_remote_adjacencies ();
+ goto done;
+ }
+
+ /* TODO build src/dst with seid */
+
+ /* if it's a delete, clean forwarding */
+ if (!is_add)
+ {
+ lisp_cp_main_t *lcm = vnet_lisp_cp_get_main ();
+ rv = lisp_add_del_adjacency (lcm, 0, &eid, /* is_add */ 0);
+ if (rv)
+ {
+ goto done;
+ }
+ }
+
+ /* add as static remote mapping, i.e., not authoritative and infinite
+ * ttl */
+ rv = vnet_lisp_add_del_mapping (&eid, rlocs, action, 0, ~0, is_add,
+ 1 /* is_static */ , 0);
+
+ if (rv)
+ clib_warning ("failed to %s remote mapping!", is_add ? "add" : "delete");
+
+done:
+ vec_free (rlocs);
+ unformat_free (line_input);
+ return error;
+}
+
+VLIB_CLI_COMMAND (lisp_add_del_remote_mapping_command) =
+{
+.path = "lisp remote-mapping",.short_help =
+ "lisp remote-mapping add|del [del-all] vni <vni> "
+ "eid <est-eid> [action <no-action|natively-forward|"
+ "send-map-request|drop>] rloc <dst-locator> p <prio> w <weight> "
+ "[rloc <dst-locator> ... ]",.function =
+ lisp_add_del_remote_mapping_command_fn,};
+
+/**
+ * Handler for add/del adjacency CLI.
+ */
+static clib_error_t *
+lisp_add_del_adjacency_command_fn (vlib_main_t * vm, unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ clib_error_t *error = 0;
+ unformat_input_t _line_input, *line_input = &_line_input;
+ vnet_lisp_add_del_adjacency_args_t _a, *a = &_a;
+ u8 is_add = 1;
+ ip_prefix_t *reid_ippref, *leid_ippref;
+ gid_address_t leid, reid;
+ u8 *dmac = gid_address_mac (&reid);
+ u8 *smac = gid_address_mac (&leid);
+ u8 reid_set = 0, leid_set = 0;
+ u32 vni;
+ int rv;
+
+ /* Get a line of input. */
+ if (!unformat_user (input, unformat_line_input, line_input))
+ return 0;
+
+ memset (&reid, 0, sizeof (reid));
+ memset (&leid, 0, sizeof (leid));
+
+ leid_ippref = &gid_address_ippref (&leid);
+ reid_ippref = &gid_address_ippref (&reid);
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "del"))
+ is_add = 0;
+ else if (unformat (line_input, "add"))
+ ;
+ else if (unformat (line_input, "reid %U",
+ unformat_ip_prefix, reid_ippref))
+ {
+ gid_address_type (&reid) = GID_ADDR_IP_PREFIX;
+ reid_set = 1;
+ }
+ else if (unformat (line_input, "reid %U", unformat_mac_address, dmac))
+ {
+ gid_address_type (&reid) = GID_ADDR_MAC;
+ reid_set = 1;
+ }
+ else if (unformat (line_input, "vni %u", &vni))
+ {
+ gid_address_vni (&leid) = vni;
+ gid_address_vni (&reid) = vni;
+ }
+ else if (unformat (line_input, "leid %U",
+ unformat_ip_prefix, leid_ippref))
+ {
+ gid_address_type (&leid) = GID_ADDR_IP_PREFIX;
+ leid_set = 1;
+ }
+ else if (unformat (line_input, "leid %U", unformat_mac_address, smac))
+ {
+ gid_address_type (&leid) = GID_ADDR_MAC;
+ leid_set = 1;
+ }
+ else
+ {
+ clib_warning ("parse error");
+ goto done;
+ }
+ }
+
+ if (!reid_set || !leid_set)
+ {
+ clib_warning ("missing remote or local eid!");
+ goto done;
+ }
+
+ if ((gid_address_type (&leid) != gid_address_type (&reid))
+ || (gid_address_type (&reid) == GID_ADDR_IP_PREFIX
+ && ip_prefix_version (reid_ippref)
+ != ip_prefix_version (leid_ippref)))
+ {
+ clib_warning ("remote and local EIDs are of different types!");
+ return error;
+ }
+
+ memset (a, 0, sizeof (a[0]));
+ gid_address_copy (&a->leid, &leid);
+ gid_address_copy (&a->reid, &reid);
+
+ a->is_add = is_add;
+ rv = vnet_lisp_add_del_adjacency (a);
+
+ if (rv)
+ clib_warning ("failed to %s adjacency!", is_add ? "add" : "delete");
+
+done:
+ unformat_free (line_input);
+ return error;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (lisp_add_del_adjacency_command) = {
+ .path = "lisp adjacency",
+ .short_help = "lisp adjacency add|del vni <vni> reid <remote-eid> "
+ "leid <local-eid>",
+ .function = lisp_add_del_adjacency_command_fn,
+};
+/* *INDENT-ON* */
+
+int
+vnet_lisp_set_map_request_mode (u8 mode)
+{
+ lisp_cp_main_t *lcm = vnet_lisp_cp_get_main ();
+
+ if (vnet_lisp_enable_disable_status () == 0)
+ {
+ clib_warning ("LISP is disabled!");
+ return VNET_API_ERROR_LISP_DISABLED;
+ }
+
+ if (mode >= _MR_MODE_MAX)
+ {
+ clib_warning ("Invalid LISP map request mode %d!", mode);
+ return VNET_API_ERROR_INVALID_ARGUMENT;
+ }
+
+ lcm->map_request_mode = mode;
+ return 0;
+}
+
+static clib_error_t *
+lisp_map_request_mode_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ unformat_input_t _i, *i = &_i;
+ map_request_mode_t mr_mode = _MR_MODE_MAX;
+
+ /* Get a line of input. */
+ if (!unformat_user (input, unformat_line_input, i))
+ return 0;
+
+ while (unformat_check_input (i) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (i, "dst-only"))
+ mr_mode = MR_MODE_DST_ONLY;
+ else if (unformat (i, "src-dst"))
+ mr_mode = MR_MODE_SRC_DST;
+ else
+ {
+ clib_warning ("parse error '%U'", format_unformat_error, i);
+ goto done;
+ }
+ }
+
+ if (_MR_MODE_MAX == mr_mode)
+ {
+ clib_warning ("No LISP map request mode entered!");
+ return 0;
+ }
+
+ vnet_lisp_set_map_request_mode (mr_mode);
+done:
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (lisp_map_request_mode_command) = {
+ .path = "lisp map-request mode",
+ .short_help = "lisp map-request mode dst-only|src-dst",
+ .function = lisp_map_request_mode_command_fn,
+};
+/* *INDENT-ON* */
+
+static u8 *
+format_lisp_map_request_mode (u8 * s, va_list * args)
+{
+ u32 mode = va_arg (*args, u32);
+
+ switch (mode)
+ {
+ case 0:
+ return format (0, "dst-only");
+ case 1:
+ return format (0, "src-dst");
+ }
+ return 0;
+}
+
+static clib_error_t *
+lisp_show_map_request_mode_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ vlib_cli_output (vm, "map-request mode: %U", format_lisp_map_request_mode,
+ vnet_lisp_get_map_request_mode ());
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (lisp_show_map_request_mode_command) = {
+ .path = "show lisp map-request mode",
+ .short_help = "show lisp map-request mode",
+ .function = lisp_show_map_request_mode_command_fn,
+};
+/* *INDENT-ON* */
+
+static clib_error_t *
+lisp_show_map_resolvers_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ lisp_msmr_t *mr;
+ lisp_cp_main_t *lcm = vnet_lisp_cp_get_main ();
+
+ vec_foreach (mr, lcm->map_resolvers)
+ {
+ vlib_cli_output (vm, "%U", format_ip_address, &mr->address);
+ }
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (lisp_show_map_resolvers_command) = {
+ .path = "show lisp map-resolvers",
+ .short_help = "show lisp map-resolvers",
+ .function = lisp_show_map_resolvers_command_fn,
+};
+/* *INDENT-ON* */
+
+int
+vnet_lisp_pitr_set_locator_set (u8 * locator_set_name, u8 is_add)
+{
+ lisp_cp_main_t *lcm = vnet_lisp_cp_get_main ();
+ u32 locator_set_index = ~0;
+ mapping_t *m;
+ uword *p;
+
+ if (vnet_lisp_enable_disable_status () == 0)
+ {
+ clib_warning ("LISP is disabled!");
+ return VNET_API_ERROR_LISP_DISABLED;
+ }
+
+ p = hash_get_mem (lcm->locator_set_index_by_name, locator_set_name);
+ if (!p)
+ {
+ clib_warning ("locator-set %v doesn't exist", locator_set_name);
+ return -1;
+ }
+ locator_set_index = p[0];
+
+ if (is_add)
+ {
+ pool_get (lcm->mapping_pool, m);
+ m->locator_set_index = locator_set_index;
+ m->local = 1;
+ lcm->pitr_map_index = m - lcm->mapping_pool;
+
+ /* enable pitr mode */
+ lcm->lisp_pitr = 1;
+ }
+ else
+ {
+ /* remove pitr mapping */
+ pool_put_index (lcm->mapping_pool, lcm->pitr_map_index);
+
+ /* disable pitr mode */
+ lcm->lisp_pitr = 0;
+ }
+ return 0;
+}
+
+static clib_error_t *
+lisp_pitr_set_locator_set_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ u8 locator_name_set = 0;
+ u8 *locator_set_name = 0;
+ u8 is_add = 1;
+ unformat_input_t _line_input, *line_input = &_line_input;
+ clib_error_t *error = 0;
+ int rv = 0;
+
+ /* Get a line of input. */
+ if (!unformat_user (input, unformat_line_input, line_input))
+ return 0;
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "ls %_%v%_", &locator_set_name))
+ locator_name_set = 1;
+ else if (unformat (line_input, "disable"))
+ is_add = 0;
+ else
+ return clib_error_return (0, "parse error");
+ }
+
+ if (!locator_name_set)
+ {
+ clib_warning ("No locator set specified!");
+ goto done;
+ }
+ rv = vnet_lisp_pitr_set_locator_set (locator_set_name, is_add);
+ if (0 != rv)
+ {
+ error = clib_error_return (0, "failed to %s pitr!",
+ is_add ? "add" : "delete");
+ }
+
+done:
+ if (locator_set_name)
+ vec_free (locator_set_name);
+ return error;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (lisp_pitr_set_locator_set_command) = {
+ .path = "lisp pitr",
+ .short_help = "lisp pitr [disable] ls <locator-set-name>",
+ .function = lisp_pitr_set_locator_set_command_fn,
+};
+/* *INDENT-ON* */
+
+static clib_error_t *
+lisp_show_pitr_command_fn (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ lisp_cp_main_t *lcm = vnet_lisp_cp_get_main ();
+ mapping_t *m;
+ locator_set_t *ls;
+ u8 *tmp_str = 0;
+
+ vlib_cli_output (vm, "%=20s%=16s",
+ "pitr", lcm->lisp_pitr ? "locator-set" : "");
+
+ if (!lcm->lisp_pitr)
+ {
+ vlib_cli_output (vm, "%=20s", "disable");
+ return 0;
+ }
+
+ if (~0 == lcm->pitr_map_index)
+ {
+ tmp_str = format (0, "N/A");
+ }
+ else
+ {
+ m = pool_elt_at_index (lcm->mapping_pool, lcm->pitr_map_index);
+ if (~0 != m->locator_set_index)
+ {
+ ls =
+ pool_elt_at_index (lcm->locator_set_pool, m->locator_set_index);
+ tmp_str = format (0, "%s", ls->name);
+ }
+ else
+ {
+ tmp_str = format (0, "N/A");
+ }
+ }
+ vec_add1 (tmp_str, 0);
+
+ vlib_cli_output (vm, "%=20s%=16s", "enable", tmp_str);
+
+ vec_free (tmp_str);
+
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (lisp_show_pitr_command) = {
+ .path = "show lisp pitr",
+ .short_help = "Show pitr",
+ .function = lisp_show_pitr_command_fn,
+};
+/* *INDENT-ON* */
+
+static u8 *
+format_eid_entry (u8 * s, va_list * args)
+{
+ vnet_main_t *vnm = va_arg (*args, vnet_main_t *);
+ lisp_cp_main_t *lcm = va_arg (*args, lisp_cp_main_t *);
+ mapping_t *mapit = va_arg (*args, mapping_t *);
+ locator_set_t *ls = va_arg (*args, locator_set_t *);
+ gid_address_t *gid = &mapit->eid;
+ u32 ttl = mapit->ttl;
+ u8 aut = mapit->authoritative;
+ u32 *loc_index;
+ u8 first_line = 1;
+ u8 *loc;
+
+ u8 *type = ls->local ? format (0, "local(%s)", ls->name)
+ : format (0, "remote");
+
+ if (vec_len (ls->locator_indices) == 0)
+ {
+ s = format (s, "%-35U%-30s%-20u%-u", format_gid_address, gid,
+ type, ttl, aut);
+ }
+ else
+ {
+ vec_foreach (loc_index, ls->locator_indices)
+ {
+ locator_t *l = pool_elt_at_index (lcm->locator_pool, loc_index[0]);
+ if (l->local)
+ loc = format (0, "%U", format_vnet_sw_if_index_name, vnm,
+ l->sw_if_index);
+ else
+ loc = format (0, "%U", format_ip_address,
+ &gid_address_ip (&l->address));
+
+ if (first_line)
+ {
+ s = format (s, "%-35U%-20s%-30v%-20u%-u\n", format_gid_address,
+ gid, type, loc, ttl, aut);
+ first_line = 0;
+ }
+ else
+ s = format (s, "%55s%v\n", "", loc);
+ }
+ }
+ return s;
+}
+
+static clib_error_t *
+lisp_show_eid_table_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ lisp_cp_main_t *lcm = vnet_lisp_cp_get_main ();
+ mapping_t *mapit;
+ unformat_input_t _line_input, *line_input = &_line_input;
+ u32 mi;
+ gid_address_t eid;
+ u8 print_all = 1;
+ u8 filter = 0;
+
+ memset (&eid, 0, sizeof (eid));
+
+ /* Get a line of input. */
+ if (!unformat_user (input, unformat_line_input, line_input))
+ return 0;
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "eid %U", unformat_gid_address, &eid))
+ print_all = 0;
+ else if (unformat (line_input, "local"))
+ filter = 1;
+ else if (unformat (line_input, "remote"))
+ filter = 2;
+ else
+ return clib_error_return (0, "parse error: '%U'",
+ format_unformat_error, line_input);
+ }
+
+ vlib_cli_output (vm, "%-35s%-20s%-30s%-20s%-s",
+ "EID", "type", "locators", "ttl", "autoritative");
+
+ if (print_all)
+ {
+ /* *INDENT-OFF* */
+ pool_foreach (mapit, lcm->mapping_pool,
+ ({
+ locator_set_t * ls = pool_elt_at_index (lcm->locator_set_pool,
+ mapit->locator_set_index);
+ if (filter && !((1 == filter && ls->local) ||
+ (2 == filter && !ls->local)))
+ {
+ continue;
+ }
+ vlib_cli_output (vm, "%U", format_eid_entry, lcm->vnet_main,
+ lcm, mapit, ls);
+ }));
+ /* *INDENT-ON* */
+ }
+ else
+ {
+ mi = gid_dictionary_lookup (&lcm->mapping_index_by_gid, &eid);
+ if ((u32) ~ 0 == mi)
+ return 0;
+
+ mapit = pool_elt_at_index (lcm->mapping_pool, mi);
+ locator_set_t *ls = pool_elt_at_index (lcm->locator_set_pool,
+ mapit->locator_set_index);
+
+ if (filter && !((1 == filter && ls->local) ||
+ (2 == filter && !ls->local)))
+ {
+ return 0;
+ }
+
+ vlib_cli_output (vm, "%U,", format_eid_entry, lcm->vnet_main,
+ lcm, mapit, ls);
+ }
+
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (lisp_cp_show_eid_table_command) = {
+ .path = "show lisp eid-table",
+ .short_help = "Shows EID table",
+ .function = lisp_show_eid_table_command_fn,
+};
+/* *INDENT-ON* */
+
+/* cleans locator to locator-set data and removes locators not part of
+ * any locator-set */
+static void
+clean_locator_to_locator_set (lisp_cp_main_t * lcm, u32 lsi)
+{
+ u32 i, j, *loc_indexp, *ls_indexp, **ls_indexes, *to_be_deleted = 0;
+ locator_set_t *ls = pool_elt_at_index (lcm->locator_set_pool, lsi);
+ for (i = 0; i < vec_len (ls->locator_indices); i++)
+ {
+ loc_indexp = vec_elt_at_index (ls->locator_indices, i);
+ ls_indexes = vec_elt_at_index (lcm->locator_to_locator_sets,
+ loc_indexp[0]);
+ for (j = 0; j < vec_len (ls_indexes[0]); j++)
+ {
+ ls_indexp = vec_elt_at_index (ls_indexes[0], j);
+ if (ls_indexp[0] == lsi)
+ break;
+ }
+
+ /* delete index for removed locator-set */
+ vec_del1 (ls_indexes[0], j);
+
+ /* delete locator if it's part of no locator-set */
+ if (vec_len (ls_indexes[0]) == 0)
+ {
+ pool_put_index (lcm->locator_pool, loc_indexp[0]);
+ vec_add1 (to_be_deleted, i);
+ }
+ }
+
+ if (to_be_deleted)
+ {
+ for (i = 0; i < vec_len (to_be_deleted); i++)
+ {
+ loc_indexp = vec_elt_at_index (to_be_deleted, i);
+ vec_del1 (ls->locator_indices, loc_indexp[0]);
+ }
+ vec_free (to_be_deleted);
+ }
+}
+
+static inline uword *
+get_locator_set_index (vnet_lisp_add_del_locator_set_args_t * a, uword * p)
+{
+ lisp_cp_main_t *lcm = vnet_lisp_cp_get_main ();
+
+ ASSERT (a != NULL);
+ ASSERT (p != NULL);
+
+ /* find locator-set */
+ if (a->local)
+ {
+ p = hash_get_mem (lcm->locator_set_index_by_name, a->name);
+ }
+ else
+ {
+ *p = a->index;
+ }
+
+ return p;
+}
+
+static inline int
+is_locator_in_locator_set (lisp_cp_main_t * lcm, locator_set_t * ls,
+ locator_t * loc)
+{
+ locator_t *itloc;
+ u32 *locit;
+
+ ASSERT (ls != NULL);
+ ASSERT (loc != NULL);
+
+ vec_foreach (locit, ls->locator_indices)
+ {
+ itloc = pool_elt_at_index (lcm->locator_pool, locit[0]);
+ if ((ls->local && itloc->sw_if_index == loc->sw_if_index) ||
+ (!ls->local && !gid_address_cmp (&itloc->address, &loc->address)))
+ {
+ clib_warning ("Duplicate locator");
+ return VNET_API_ERROR_VALUE_EXIST;
+ }
+ }
+
+ return 0;
+}
+
+static inline void
+remove_locator_from_locator_set (locator_set_t * ls, u32 * locit,
+ u32 ls_index, u32 loc_id)
+{
+ lisp_cp_main_t *lcm = vnet_lisp_cp_get_main ();
+ u32 **ls_indexes = NULL;
+
+ ASSERT (ls != NULL);
+ ASSERT (locit != NULL);
+
+ ls_indexes = vec_elt_at_index (lcm->locator_to_locator_sets, locit[0]);
+ pool_put_index (lcm->locator_pool, locit[0]);
+ vec_del1 (ls->locator_indices, loc_id);
+ vec_del1 (ls_indexes[0], ls_index);
+}
+
+int
+vnet_lisp_add_del_locator (vnet_lisp_add_del_locator_set_args_t * a,
+ locator_set_t * ls, u32 * ls_result)
+{
+ lisp_cp_main_t *lcm = vnet_lisp_cp_get_main ();
+ locator_t *loc = NULL, *itloc = NULL;
+ uword _p = (u32) ~ 0, *p = &_p;
+ u32 loc_index = ~0, ls_index = ~0, *locit = NULL, **ls_indexes = NULL;
+ u32 loc_id = ~0;
+ int ret = 0;
+
+ ASSERT (a != NULL);
+
+ if (vnet_lisp_enable_disable_status () == 0)
+ {
+ clib_warning ("LISP is disabled!");
+ return VNET_API_ERROR_LISP_DISABLED;
+ }
+
+ p = get_locator_set_index (a, p);
+ if (!p)
+ {
+ clib_warning ("locator-set %v doesn't exist", a->name);
+ return VNET_API_ERROR_INVALID_ARGUMENT;
+ }
+
+ if (ls == 0)
+ {
+ ls = pool_elt_at_index (lcm->locator_set_pool, p[0]);
+ if (!ls)
+ {
+ clib_warning ("locator-set %d to be overwritten doesn't exist!",
+ p[0]);
+ return VNET_API_ERROR_INVALID_ARGUMENT;
+ }
+ }
+
+ if (a->is_add)
+ {
+ if (ls_result)
+ ls_result[0] = p[0];
+
+ /* allocate locators */
+ vec_foreach (itloc, a->locators)
+ {
+ ret = is_locator_in_locator_set (lcm, ls, itloc);
+ if (0 != ret)
+ {
+ return ret;
+ }
+
+ pool_get (lcm->locator_pool, loc);
+ loc[0] = itloc[0];
+ loc_index = loc - lcm->locator_pool;
+
+ vec_add1 (ls->locator_indices, loc_index);
+
+ vec_validate (lcm->locator_to_locator_sets, loc_index);
+ ls_indexes = vec_elt_at_index (lcm->locator_to_locator_sets,
+ loc_index);
+ vec_add1 (ls_indexes[0], p[0]);
+ }
+ }
+ else
+ {
+ ls_index = p[0];
+
+ itloc = a->locators;
+ loc_id = 0;
+ vec_foreach (locit, ls->locator_indices)
+ {
+ loc = pool_elt_at_index (lcm->locator_pool, locit[0]);
+
+ if (loc->local && loc->sw_if_index == itloc->sw_if_index)
+ {
+ remove_locator_from_locator_set (ls, locit, ls_index, loc_id);
+ }
+ if (0 == loc->local &&
+ !gid_address_cmp (&loc->address, &itloc->address))
+ {
+ remove_locator_from_locator_set (ls, locit, ls_index, loc_id);
+ }
+
+ loc_id++;
+ }
+ }
+
+ return 0;
+}
+
+int
+vnet_lisp_add_del_locator_set (vnet_lisp_add_del_locator_set_args_t * a,
+ u32 * ls_result)
+{
+ lisp_cp_main_t *lcm = vnet_lisp_cp_get_main ();
+ locator_set_t *ls;
+ uword _p = (u32) ~ 0, *p = &_p;
+ u32 ls_index;
+ u32 **eid_indexes;
+ int ret = 0;
+
+ if (vnet_lisp_enable_disable_status () == 0)
+ {
+ clib_warning ("LISP is disabled!");
+ return VNET_API_ERROR_LISP_DISABLED;
+ }
+
+ if (a->is_add)
+ {
+ p = get_locator_set_index (a, p);
+
+ /* overwrite */
+ if (p && p[0] != (u32) ~ 0)
+ {
+ ls = pool_elt_at_index (lcm->locator_set_pool, p[0]);
+ if (!ls)
+ {
+ clib_warning ("locator-set %d to be overwritten doesn't exist!",
+ p[0]);
+ return -1;
+ }
+
+ /* clean locator to locator-set vectors and remove locators if
+ * they're not part of another locator-set */
+ clean_locator_to_locator_set (lcm, p[0]);
+
+ /* remove locator indices from locator set */
+ vec_free (ls->locator_indices);
+
+ ls_index = p[0];
+
+ if (ls_result)
+ ls_result[0] = p[0];
+ }
+ /* new locator-set */
+ else
+ {
+ pool_get (lcm->locator_set_pool, ls);
+ memset (ls, 0, sizeof (*ls));
+ ls_index = ls - lcm->locator_set_pool;
+
+ if (a->local)
+ {
+ ls->name = vec_dup (a->name);
+
+ if (!lcm->locator_set_index_by_name)
+ lcm->locator_set_index_by_name = hash_create_vec (
+ /* size */
+ 0,
+ sizeof
+ (ls->name
+ [0]),
+ sizeof
+ (uword));
+ hash_set_mem (lcm->locator_set_index_by_name, ls->name,
+ ls_index);
+
+ /* mark as local locator-set */
+ vec_add1 (lcm->local_locator_set_indexes, ls_index);
+ }
+ ls->local = a->local;
+ if (ls_result)
+ ls_result[0] = ls_index;
+ }
+
+ ret = vnet_lisp_add_del_locator (a, ls, NULL);
+ if (0 != ret)
+ {
+ return ret;
+ }
+ }
+ else
+ {
+ p = get_locator_set_index (a, p);
+ if (!p)
+ {
+ clib_warning ("locator-set %v doesn't exists", a->name);
+ return -1;
+ }
+
+ ls = pool_elt_at_index (lcm->locator_set_pool, p[0]);
+ if (!ls)
+ {
+ clib_warning ("locator-set with index %d doesn't exists", p[0]);
+ return -1;
+ }
+
+ if (lcm->mreq_itr_rlocs == p[0])
+ {
+ clib_warning ("Can't delete the locator-set used to constrain "
+ "the itr-rlocs in map-requests!");
+ return -1;
+ }
+
+ if (vec_len (lcm->locator_set_to_eids) != 0)
+ {
+ eid_indexes = vec_elt_at_index (lcm->locator_set_to_eids, p[0]);
+ if (vec_len (eid_indexes[0]) != 0)
+ {
+ clib_warning
+ ("Can't delete a locator that supports a mapping!");
+ return -1;
+ }
+ }
+
+ /* clean locator to locator-sets data */
+ clean_locator_to_locator_set (lcm, p[0]);
+
+ if (ls->local)
+ {
+ u32 it, lsi;
+
+ vec_foreach_index (it, lcm->local_locator_set_indexes)
+ {
+ lsi = vec_elt (lcm->local_locator_set_indexes, it);
+ if (lsi == p[0])
+ {
+ vec_del1 (lcm->local_locator_set_indexes, it);
+ break;
+ }
+ }
+ hash_unset_mem (lcm->locator_set_index_by_name, ls->name);
+ }
+ vec_free (ls->name);
+ vec_free (ls->locator_indices);
+ pool_put (lcm->locator_set_pool, ls);
+ }
+ return 0;
+}
+
+int
+vnet_lisp_rloc_probe_enable_disable (u8 is_enable)
+{
+ lisp_cp_main_t *lcm = vnet_lisp_cp_get_main ();
+
+ lcm->rloc_probing = is_enable;
+ return 0;
+}
+
+int
+vnet_lisp_map_register_enable_disable (u8 is_enable)
+{
+ lisp_cp_main_t *lcm = vnet_lisp_cp_get_main ();
+
+ lcm->map_registering = is_enable;
+ return 0;
+}
+
+clib_error_t *
+vnet_lisp_enable_disable (u8 is_enable)
+{
+ u32 vni, dp_table;
+ clib_error_t *error = 0;
+ lisp_cp_main_t *lcm = vnet_lisp_cp_get_main ();
+ vnet_lisp_gpe_enable_disable_args_t _a, *a = &_a;
+
+ a->is_en = is_enable;
+ error = vnet_lisp_gpe_enable_disable (a);
+ if (error)
+ {
+ return clib_error_return (0, "failed to %s data-plane!",
+ a->is_en ? "enable" : "disable");
+ }
+
+ if (is_enable)
+ {
+ /* enable all l2 and l3 ifaces */
+
+ /* *INDENT-OFF* */
+ hash_foreach(vni, dp_table, lcm->table_id_by_vni, ({
+ dp_add_del_iface(lcm, vni, 0, 1);
+ }));
+ hash_foreach(vni, dp_table, lcm->bd_id_by_vni, ({
+ dp_add_del_iface(lcm, vni, /* is_l2 */ 1, 1);
+ }));
+ /* *INDENT-ON* */
+ }
+ else
+ {
+ /* clear interface table */
+ hash_free (lcm->fwd_entry_by_mapping_index);
+ pool_free (lcm->fwd_entry_pool);
+ }
+
+ /* update global flag */
+ lcm->is_enabled = is_enable;
+
+ return 0;
+}
+
+static clib_error_t *
+lisp_enable_disable_command_fn (vlib_main_t * vm, unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ unformat_input_t _line_input, *line_input = &_line_input;
+ u8 is_enabled = 0;
+ u8 is_set = 0;
+
+ /* Get a line of input. */
+ if (!unformat_user (input, unformat_line_input, line_input))
+ return 0;
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "enable"))
+ {
+ is_set = 1;
+ is_enabled = 1;
+ }
+ else if (unformat (line_input, "disable"))
+ is_set = 1;
+ else
+ {
+ return clib_error_return (0, "parse error: '%U'",
+ format_unformat_error, line_input);
+ }
+ }
+
+ if (!is_set)
+ return clib_error_return (0, "state not set");
+
+ vnet_lisp_enable_disable (is_enabled);
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (lisp_cp_enable_disable_command) = {
+ .path = "lisp",
+ .short_help = "lisp [enable|disable]",
+ .function = lisp_enable_disable_command_fn,
+};
+/* *INDENT-ON* */
+
+static clib_error_t *
+lisp_map_register_enable_disable_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ unformat_input_t _line_input, *line_input = &_line_input;
+ u8 is_enabled = 0;
+ u8 is_set = 0;
+
+ /* Get a line of input. */
+ if (!unformat_user (input, unformat_line_input, line_input))
+ return 0;
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "enable"))
+ {
+ is_set = 1;
+ is_enabled = 1;
+ }
+ else if (unformat (line_input, "disable"))
+ is_set = 1;
+ else
+ {
+ vlib_cli_output (vm, "parse error: '%U'", format_unformat_error,
+ line_input);
+ return 0;
+ }
+ }
+
+ if (!is_set)
+ {
+ vlib_cli_output (vm, "state not set!");
+ return 0;
+ }
+
+ vnet_lisp_map_register_enable_disable (is_enabled);
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (lisp_map_register_enable_disable_command) = {
+ .path = "lisp map-register",
+ .short_help = "lisp map-register [enable|disable]",
+ .function = lisp_map_register_enable_disable_command_fn,
+};
+/* *INDENT-ON* */
+
+static clib_error_t *
+lisp_rloc_probe_enable_disable_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ unformat_input_t _line_input, *line_input = &_line_input;
+ u8 is_enabled = 0;
+ u8 is_set = 0;
+
+ /* Get a line of input. */
+ if (!unformat_user (input, unformat_line_input, line_input))
+ return 0;
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "enable"))
+ {
+ is_set = 1;
+ is_enabled = 1;
+ }
+ else if (unformat (line_input, "disable"))
+ is_set = 1;
+ else
+ {
+ vlib_cli_output (vm, "parse error: '%U'", format_unformat_error,
+ line_input);
+ return 0;
+ }
+ }
+
+ if (!is_set)
+ {
+ vlib_cli_output (vm, "state not set!");
+ return 0;
+ }
+
+ vnet_lisp_rloc_probe_enable_disable (is_enabled);
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (lisp_rloc_probe_enable_disable_command) = {
+ .path = "lisp rloc-probe",
+ .short_help = "lisp rloc-probe [enable|disable]",
+ .function = lisp_rloc_probe_enable_disable_command_fn,
+};
+/* *INDENT-ON* */
+
+u8
+vnet_lisp_enable_disable_status (void)
+{
+ lisp_cp_main_t *lcm = vnet_lisp_cp_get_main ();
+ return lcm->is_enabled;
+}
+
+static u8 *
+format_lisp_status (u8 * s, va_list * args)
+{
+ lisp_cp_main_t *lcm = vnet_lisp_cp_get_main ();
+ return format (s, "%s", lcm->is_enabled ? "enabled" : "disabled");
+}
+
+static clib_error_t *
+lisp_show_status_command_fn (vlib_main_t * vm, unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ u8 *msg = 0;
+ msg = format (msg, "feature: %U\ngpe: %U\n",
+ format_lisp_status, format_vnet_lisp_gpe_status);
+ vlib_cli_output (vm, "%v", msg);
+ vec_free (msg);
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (lisp_show_status_command) = {
+ .path = "show lisp status",
+ .short_help = "show lisp status",
+ .function = lisp_show_status_command_fn,
+};
+/* *INDENT-ON* */
+
+static clib_error_t *
+lisp_show_eid_table_map_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ hash_pair_t *p;
+ unformat_input_t _line_input, *line_input = &_line_input;
+ lisp_cp_main_t *lcm = vnet_lisp_cp_get_main ();
+ uword *vni_table = 0;
+ u8 is_l2 = 0;
+
+ /* Get a line of input. */
+ if (!unformat_user (input, unformat_line_input, line_input))
+ return 0;
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "l2"))
+ {
+ vni_table = lcm->bd_id_by_vni;
+ is_l2 = 1;
+ }
+ else if (unformat (line_input, "l3"))
+ {
+ vni_table = lcm->table_id_by_vni;
+ is_l2 = 0;
+ }
+ else
+ return clib_error_return (0, "parse error: '%U'",
+ format_unformat_error, line_input);
+ }
+
+ if (!vni_table)
+ {
+ vlib_cli_output (vm, "Error: expected l2|l3 param!\n");
+ return 0;
+ }
+
+ vlib_cli_output (vm, "%=10s%=10s", "VNI", is_l2 ? "BD" : "VRF");
+
+ /* *INDENT-OFF* */
+ hash_foreach_pair (p, vni_table,
+ ({
+ vlib_cli_output (vm, "%=10d%=10d", p->key, p->value[0]);
+ }));
+ /* *INDENT-ON* */
+
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (lisp_show_eid_table_map_command) = {
+ .path = "show lisp eid-table map",
+ .short_help = "show lisp eid-table l2|l3",
+ .function = lisp_show_eid_table_map_command_fn,
+};
+/* *INDENT-ON* */
+
+static clib_error_t *
+lisp_add_del_locator_set_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ lisp_gpe_main_t *lgm = &lisp_gpe_main;
+ vnet_main_t *vnm = lgm->vnet_main;
+ unformat_input_t _line_input, *line_input = &_line_input;
+ u8 is_add = 1;
+ clib_error_t *error = 0;
+ u8 *locator_set_name = 0;
+ locator_t locator, *locators = 0;
+ vnet_lisp_add_del_locator_set_args_t _a, *a = &_a;
+ u32 ls_index = 0;
+ int rv = 0;
+
+ memset (&locator, 0, sizeof (locator));
+ memset (a, 0, sizeof (a[0]));
+
+ /* Get a line of input. */
+ if (!unformat_user (input, unformat_line_input, line_input))
+ return 0;
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "add %_%v%_", &locator_set_name))
+ is_add = 1;
+ else if (unformat (line_input, "del %_%v%_", &locator_set_name))
+ is_add = 0;
+ else if (unformat (line_input, "iface %U p %d w %d",
+ unformat_vnet_sw_interface, vnm,
+ &locator.sw_if_index, &locator.priority,
+ &locator.weight))
+ {
+ locator.local = 1;
+ vec_add1 (locators, locator);
+ }
+ else
+ {
+ error = unformat_parse_error (line_input);
+ goto done;
+ }
+ }
+
+ a->name = locator_set_name;
+ a->locators = locators;
+ a->is_add = is_add;
+ a->local = 1;
+
+ rv = vnet_lisp_add_del_locator_set (a, &ls_index);
+ if (0 != rv)
+ {
+ error = clib_error_return (0, "failed to %s locator-set!",
+ is_add ? "add" : "delete");
+ }
+
+done:
+ vec_free (locators);
+ if (locator_set_name)
+ vec_free (locator_set_name);
+ return error;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (lisp_cp_add_del_locator_set_command) = {
+ .path = "lisp locator-set",
+ .short_help = "lisp locator-set add/del <name> [iface <iface-name> "
+ "p <priority> w <weight>]",
+ .function = lisp_add_del_locator_set_command_fn,
+};
+/* *INDENT-ON* */
+
+static clib_error_t *
+lisp_add_del_locator_in_set_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ lisp_gpe_main_t *lgm = &lisp_gpe_main;
+ vnet_main_t *vnm = lgm->vnet_main;
+ unformat_input_t _line_input, *line_input = &_line_input;
+ u8 is_add = 1;
+ clib_error_t *error = 0;
+ u8 *locator_set_name = 0;
+ u8 locator_set_name_set = 0;
+ locator_t locator, *locators = 0;
+ vnet_lisp_add_del_locator_set_args_t _a, *a = &_a;
+ u32 ls_index = 0;
+
+ memset (&locator, 0, sizeof (locator));
+ memset (a, 0, sizeof (a[0]));
+
+ /* Get a line of input. */
+ if (!unformat_user (input, unformat_line_input, line_input))
+ return 0;
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "add"))
+ is_add = 1;
+ else if (unformat (line_input, "del"))
+ is_add = 0;
+ else if (unformat (line_input, "locator-set %_%v%_", &locator_set_name))
+ locator_set_name_set = 1;
+ else if (unformat (line_input, "iface %U p %d w %d",
+ unformat_vnet_sw_interface, vnm,
+ &locator.sw_if_index, &locator.priority,
+ &locator.weight))
+ {
+ locator.local = 1;
+ vec_add1 (locators, locator);
+ }
+ else
+ {
+ error = unformat_parse_error (line_input);
+ goto done;
+ }
+ }
+
+ if (!locator_set_name_set)
+ {
+ error = clib_error_return (0, "locator_set name not set!");
+ goto done;
+ }
+
+ a->name = locator_set_name;
+ a->locators = locators;
+ a->is_add = is_add;
+ a->local = 1;
+
+ vnet_lisp_add_del_locator (a, 0, &ls_index);
+
+done:
+ vec_free (locators);
+ vec_free (locator_set_name);
+ return error;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (lisp_cp_add_del_locator_in_set_command) = {
+ .path = "lisp locator",
+ .short_help = "lisp locator add/del locator-set <name> iface <iface-name> "
+ "p <priority> w <weight>",
+ .function = lisp_add_del_locator_in_set_command_fn,
+};
+/* *INDENT-ON* */
+
+static clib_error_t *
+lisp_cp_show_locator_sets_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ locator_set_t *lsit;
+ locator_t *loc;
+ u32 *locit;
+ lisp_cp_main_t *lcm = vnet_lisp_cp_get_main ();
+
+ vlib_cli_output (vm, "%s%=16s%=16s%=16s", "Locator-set", "Locator",
+ "Priority", "Weight");
+
+ /* *INDENT-OFF* */
+ pool_foreach (lsit, lcm->locator_set_pool,
+ ({
+ u8 * msg = 0;
+ int next_line = 0;
+ if (lsit->local)
+ {
+ msg = format (msg, "%v", lsit->name);
+ }
+ else
+ {
+ msg = format (msg, "<%s-%d>", "remote", lsit - lcm->locator_set_pool);
+ }
+ vec_foreach (locit, lsit->locator_indices)
+ {
+ if (next_line)
+ {
+ msg = format (msg, "%16s", " ");
+ }
+ loc = pool_elt_at_index (lcm->locator_pool, locit[0]);
+ if (loc->local)
+ msg = format (msg, "%16d%16d%16d\n", loc->sw_if_index, loc->priority,
+ loc->weight);
+ else
+ msg = format (msg, "%16U%16d%16d\n", format_ip_address,
+ &gid_address_ip(&loc->address), loc->priority,
+ loc->weight);
+ next_line = 1;
+ }
+ vlib_cli_output (vm, "%v", msg);
+ vec_free (msg);
+ }));
+ /* *INDENT-ON* */
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (lisp_cp_show_locator_sets_command) = {
+ .path = "show lisp locator-set",
+ .short_help = "Shows locator-sets",
+ .function = lisp_cp_show_locator_sets_command_fn,
+};
+/* *INDENT-ON* */
+
+int
+vnet_lisp_add_del_map_resolver (vnet_lisp_add_del_map_resolver_args_t * a)
+{
+ lisp_cp_main_t *lcm = vnet_lisp_cp_get_main ();
+ u32 i;
+ lisp_msmr_t _mr, *mr = &_mr;
+
+ if (vnet_lisp_enable_disable_status () == 0)
+ {
+ clib_warning ("LISP is disabled!");
+ return VNET_API_ERROR_LISP_DISABLED;
+ }
+
+ if (a->is_add)
+ {
+
+ if (get_map_resolver (&a->address))
+ {
+ clib_warning ("map-resolver %U already exists!", format_ip_address,
+ &a->address);
+ return -1;
+ }
+
+ memset (mr, 0, sizeof (*mr));
+ ip_address_copy (&mr->address, &a->address);
+ vec_add1 (lcm->map_resolvers, *mr);
+
+ if (vec_len (lcm->map_resolvers) == 1)
+ lcm->do_map_resolver_election = 1;
+ }
+ else
+ {
+ for (i = 0; i < vec_len (lcm->map_resolvers); i++)
+ {
+ mr = vec_elt_at_index (lcm->map_resolvers, i);
+ if (!ip_address_cmp (&mr->address, &a->address))
+ {
+ if (!ip_address_cmp (&mr->address, &lcm->active_map_resolver))
+ lcm->do_map_resolver_election = 1;
+
+ vec_del1 (lcm->map_resolvers, i);
+ break;
+ }
+ }
+ }
+ return 0;
+}
+
+static clib_error_t *
+lisp_add_del_map_resolver_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ unformat_input_t _line_input, *line_input = &_line_input;
+ u8 is_add = 1, addr_set = 0;
+ ip_address_t ip_addr;
+ clib_error_t *error = 0;
+ int rv = 0;
+ vnet_lisp_add_del_map_resolver_args_t _a, *a = &_a;
+
+ /* Get a line of input. */
+ if (!unformat_user (input, unformat_line_input, line_input))
+ return 0;
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "add"))
+ is_add = 1;
+ else if (unformat (line_input, "del"))
+ is_add = 0;
+ else if (unformat (line_input, "%U", unformat_ip_address, &ip_addr))
+ addr_set = 1;
+ else
+ {
+ error = unformat_parse_error (line_input);
+ goto done;
+ }
+ }
+
+ if (!addr_set)
+ {
+ error = clib_error_return (0, "Map-resolver address must be set!");
+ goto done;
+ }
+
+ a->is_add = is_add;
+ a->address = ip_addr;
+ rv = vnet_lisp_add_del_map_resolver (a);
+ if (0 != rv)
+ {
+ error = clib_error_return (0, "failed to %s map-resolver!",
+ is_add ? "add" : "delete");
+ }
+
+done:
+ return error;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (lisp_add_del_map_resolver_command) = {
+ .path = "lisp map-resolver",
+ .short_help = "lisp map-resolver add/del <ip_address>",
+ .function = lisp_add_del_map_resolver_command_fn,
+};
+/* *INDENT-ON* */
+
+int
+vnet_lisp_add_del_mreq_itr_rlocs (vnet_lisp_add_del_mreq_itr_rloc_args_t * a)
+{
+ lisp_cp_main_t *lcm = vnet_lisp_cp_get_main ();
+ uword *p = 0;
+
+ if (vnet_lisp_enable_disable_status () == 0)
+ {
+ clib_warning ("LISP is disabled!");
+ return VNET_API_ERROR_LISP_DISABLED;
+ }
+
+ if (a->is_add)
+ {
+ p = hash_get_mem (lcm->locator_set_index_by_name, a->locator_set_name);
+ if (!p)
+ {
+ clib_warning ("locator-set %v doesn't exist", a->locator_set_name);
+ return VNET_API_ERROR_INVALID_ARGUMENT;
+ }
+
+ lcm->mreq_itr_rlocs = p[0];
+ }
+ else
+ {
+ lcm->mreq_itr_rlocs = ~0;
+ }
+
+ return 0;
+}
+
+static clib_error_t *
+lisp_add_del_mreq_itr_rlocs_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ unformat_input_t _line_input, *line_input = &_line_input;
+ u8 is_add = 1;
+ u8 *locator_set_name = 0;
+ clib_error_t *error = 0;
+ int rv = 0;
+ vnet_lisp_add_del_mreq_itr_rloc_args_t _a, *a = &_a;
+
+ /* Get a line of input. */
+ if (!unformat_user (input, unformat_line_input, line_input))
+ return 0;
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "del"))
+ is_add = 0;
+ else if (unformat (line_input, "add %_%v%_", &locator_set_name))
+ is_add = 1;
+ else
+ {
+ error = unformat_parse_error (line_input);
+ goto done;
+ }
+ }
+
+ a->is_add = is_add;
+ a->locator_set_name = locator_set_name;
+ rv = vnet_lisp_add_del_mreq_itr_rlocs (a);
+ if (0 != rv)
+ {
+ error = clib_error_return (0, "failed to %s map-request itr-rlocs!",
+ is_add ? "add" : "delete");
+ }
+
+ vec_free (locator_set_name);
+
+done:
+ return error;
+
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (lisp_add_del_map_request_command) = {
+ .path = "lisp map-request itr-rlocs",
+ .short_help = "lisp map-request itr-rlocs add/del <locator_set_name>",
+ .function = lisp_add_del_mreq_itr_rlocs_command_fn,
+};
+/* *INDENT-ON* */
+
+static clib_error_t *
+lisp_show_mreq_itr_rlocs_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ lisp_cp_main_t *lcm = vnet_lisp_cp_get_main ();
+ locator_set_t *loc_set;
+
+ vlib_cli_output (vm, "%=20s", "itr-rlocs");
+
+ if (~0 == lcm->mreq_itr_rlocs)
+ {
+ return 0;
+ }
+
+ loc_set = pool_elt_at_index (lcm->locator_set_pool, lcm->mreq_itr_rlocs);
+
+ vlib_cli_output (vm, "%=20s", loc_set->name);
+
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (lisp_show_map_request_command) = {
+ .path = "show lisp map-request itr-rlocs",
+ .short_help = "Shows map-request itr-rlocs",
+ .function = lisp_show_mreq_itr_rlocs_command_fn,
+};
+/* *INDENT-ON* */
+
+/* Statistics (not really errors) */
+#define foreach_lisp_cp_lookup_error \
+_(DROP, "drop") \
+_(MAP_REQUESTS_SENT, "map-request sent")
+
+static char *lisp_cp_lookup_error_strings[] = {
+#define _(sym,string) string,
+ foreach_lisp_cp_lookup_error
+#undef _
+};
+
+typedef enum
+{
+#define _(sym,str) LISP_CP_LOOKUP_ERROR_##sym,
+ foreach_lisp_cp_lookup_error
+#undef _
+ LISP_CP_LOOKUP_N_ERROR,
+} lisp_cp_lookup_error_t;
+
+typedef enum
+{
+ LISP_CP_LOOKUP_NEXT_DROP,
+ LISP_CP_LOOKUP_N_NEXT,
+} lisp_cp_lookup_next_t;
+
+typedef struct
+{
+ gid_address_t dst_eid;
+ ip_address_t map_resolver_ip;
+} lisp_cp_lookup_trace_t;
+
+u8 *
+format_lisp_cp_lookup_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ lisp_cp_lookup_trace_t *t = va_arg (*args, lisp_cp_lookup_trace_t *);
+
+ s = format (s, "LISP-CP-LOOKUP: map-resolver: %U destination eid %U",
+ format_ip_address, &t->map_resolver_ip, format_gid_address,
+ &t->dst_eid);
+ return s;
+}
+
+int
+get_mr_and_local_iface_ip (lisp_cp_main_t * lcm, ip_address_t * mr_ip,
+ ip_address_t * sloc)
+{
+ lisp_msmr_t *mrit;
+ ip_address_t *a;
+
+ if (vec_len (lcm->map_resolvers) == 0)
+ {
+ clib_warning ("No map-resolver configured");
+ return 0;
+ }
+
+ /* find the first mr ip we have a route to and the ip of the
+ * iface that has a route to it */
+ vec_foreach (mrit, lcm->map_resolvers)
+ {
+ a = &mrit->address;
+ if (0 != ip_fib_get_first_egress_ip_for_dst (lcm, a, sloc))
+ {
+ ip_address_copy (mr_ip, a);
+
+ /* also update globals */
+ return 1;
+ }
+ }
+
+ clib_warning ("Can't find map-resolver and local interface ip!");
+ return 0;
+}
+
+static gid_address_t *
+build_itr_rloc_list (lisp_cp_main_t * lcm, locator_set_t * loc_set)
+{
+ void *addr;
+ u32 i;
+ locator_t *loc;
+ u32 *loc_indexp;
+ ip_interface_address_t *ia = 0;
+ gid_address_t gid_data, *gid = &gid_data;
+ gid_address_t *rlocs = 0;
+ ip_prefix_t *ippref = &gid_address_ippref (gid);
+ ip_address_t *rloc = &ip_prefix_addr (ippref);
+
+ memset (gid, 0, sizeof (gid[0]));
+ gid_address_type (gid) = GID_ADDR_IP_PREFIX;
+ for (i = 0; i < vec_len (loc_set->locator_indices); i++)
+ {
+ loc_indexp = vec_elt_at_index (loc_set->locator_indices, i);
+ loc = pool_elt_at_index (lcm->locator_pool, loc_indexp[0]);
+
+ /* Add ipv4 locators first TODO sort them */
+
+ /* *INDENT-OFF* */
+ foreach_ip_interface_address (&lcm->im4->lookup_main, ia,
+ loc->sw_if_index, 1 /* unnumbered */,
+ ({
+ addr = ip_interface_address_get_address (&lcm->im4->lookup_main, ia);
+ ip_address_set (rloc, addr, IP4);
+ ip_prefix_len (ippref) = 32;
+ ip_prefix_normalize (ippref);
+ vec_add1 (rlocs, gid[0]);
+ }));
+
+ /* Add ipv6 locators */
+ foreach_ip_interface_address (&lcm->im6->lookup_main, ia,
+ loc->sw_if_index, 1 /* unnumbered */,
+ ({
+ addr = ip_interface_address_get_address (&lcm->im6->lookup_main, ia);
+ ip_address_set (rloc, addr, IP6);
+ ip_prefix_len (ippref) = 128;
+ ip_prefix_normalize (ippref);
+ vec_add1 (rlocs, gid[0]);
+ }));
+ /* *INDENT-ON* */
+
+ }
+ return rlocs;
+}
+
+static vlib_buffer_t *
+build_map_request (lisp_cp_main_t * lcm, gid_address_t * deid,
+ ip_address_t * sloc, ip_address_t * rloc,
+ gid_address_t * itr_rlocs, u64 * nonce_res, u32 * bi_res)
+{
+ vlib_buffer_t *b;
+ u32 bi;
+ vlib_main_t *vm = lcm->vlib_main;
+
+ if (vlib_buffer_alloc (vm, &bi, 1) != 1)
+ {
+ clib_warning ("Can't allocate buffer for Map-Request!");
+ return 0;
+ }
+
+ b = vlib_get_buffer (vm, bi);
+
+ /* leave some space for the encap headers */
+ vlib_buffer_make_headroom (b, MAX_LISP_MSG_ENCAP_LEN);
+
+ /* put lisp msg */
+ lisp_msg_put_mreq (lcm, b, NULL, deid, itr_rlocs, 0 /* smr invoked */ ,
+ 1 /* rloc probe */ , nonce_res);
+
+ /* push outer ip header */
+ pkt_push_udp_and_ip (vm, b, LISP_CONTROL_PORT, LISP_CONTROL_PORT, sloc,
+ rloc);
+
+ bi_res[0] = bi;
+
+ return b;
+}
+
+static vlib_buffer_t *
+build_encapsulated_map_request (lisp_cp_main_t * lcm,
+ gid_address_t * seid, gid_address_t * deid,
+ locator_set_t * loc_set, ip_address_t * mr_ip,
+ ip_address_t * sloc, u8 is_smr_invoked,
+ u64 * nonce_res, u32 * bi_res)
+{
+ vlib_buffer_t *b;
+ u32 bi;
+ gid_address_t *rlocs = 0;
+ vlib_main_t *vm = lcm->vlib_main;
+
+ if (vlib_buffer_alloc (vm, &bi, 1) != 1)
+ {
+ clib_warning ("Can't allocate buffer for Map-Request!");
+ return 0;
+ }
+
+ b = vlib_get_buffer (vm, bi);
+
+ /* leave some space for the encap headers */
+ vlib_buffer_make_headroom (b, MAX_LISP_MSG_ENCAP_LEN);
+
+ /* get rlocs */
+ rlocs = build_itr_rloc_list (lcm, loc_set);
+
+ if (MR_MODE_SRC_DST == lcm->map_request_mode
+ && GID_ADDR_SRC_DST != gid_address_type (deid))
+ {
+ gid_address_t sd;
+ memset (&sd, 0, sizeof (sd));
+ build_src_dst (&sd, seid, deid);
+ lisp_msg_put_mreq (lcm, b, seid, &sd, rlocs, is_smr_invoked,
+ 0 /* rloc probe */ , nonce_res);
+ }
+ else
+ {
+ /* put lisp msg */
+ lisp_msg_put_mreq (lcm, b, seid, deid, rlocs, is_smr_invoked,
+ 0 /* rloc probe */ , nonce_res);
+ }
+
+ /* push ecm: udp-ip-lisp */
+ lisp_msg_push_ecm (vm, b, LISP_CONTROL_PORT, LISP_CONTROL_PORT, seid, deid);
+
+ /* push outer ip header */
+ pkt_push_udp_and_ip (vm, b, LISP_CONTROL_PORT, LISP_CONTROL_PORT, sloc,
+ mr_ip);
+
+ bi_res[0] = bi;
+
+ vec_free (rlocs);
+ return b;
+}
+
+static void
+reset_pending_mr_counters (pending_map_request_t * r)
+{
+ r->time_to_expire = PENDING_MREQ_EXPIRATION_TIME;
+ r->retries_num = 0;
+}
+
+static int
+elect_map_resolver (lisp_cp_main_t * lcm)
+{
+ lisp_msmr_t *mr;
+
+ vec_foreach (mr, lcm->map_resolvers)
+ {
+ if (!mr->is_down)
+ {
+ ip_address_copy (&lcm->active_map_resolver, &mr->address);
+ lcm->do_map_resolver_election = 0;
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static void
+free_map_register_records (mapping_t * maps)
+{
+ mapping_t *map;
+ vec_foreach (map, maps) vec_free (map->locators);
+
+ vec_free (maps);
+}
+
+static void
+add_locators (lisp_cp_main_t * lcm, mapping_t * m, u32 locator_set_index,
+ ip_address_t * probed_loc)
+{
+ u32 *li;
+ locator_t *loc, new;
+ ip_interface_address_t *ia = 0;
+ void *addr;
+ ip_address_t *new_ip = &gid_address_ip (&new.address);
+
+ m->locators = 0;
+ locator_set_t *ls = pool_elt_at_index (lcm->locator_set_pool,
+ locator_set_index);
+ vec_foreach (li, ls->locator_indices)
+ {
+ loc = pool_elt_at_index (lcm->locator_pool, li[0]);
+ new = loc[0];
+ if (loc->local)
+ {
+ /* *INDENT-OFF* */
+ foreach_ip_interface_address (&lcm->im4->lookup_main, ia,
+ loc->sw_if_index, 1 /* unnumbered */,
+ ({
+ addr = ip_interface_address_get_address (&lcm->im4->lookup_main,
+ ia);
+ ip_address_set (new_ip, addr, IP4);
+ }));
+
+ /* Add ipv6 locators */
+ foreach_ip_interface_address (&lcm->im6->lookup_main, ia,
+ loc->sw_if_index, 1 /* unnumbered */,
+ ({
+ addr = ip_interface_address_get_address (&lcm->im6->lookup_main,
+ ia);
+ ip_address_set (new_ip, addr, IP6);
+ }));
+ /* *INDENT-ON* */
+
+ if (probed_loc && ip_address_cmp (probed_loc, new_ip) == 0)
+ new.probed = 1;
+ }
+ vec_add1 (m->locators, new);
+ }
+}
+
+static mapping_t *
+build_map_register_record_list (lisp_cp_main_t * lcm)
+{
+ mapping_t *recs = 0, rec, *m;
+
+ /* *INDENT-OFF* */
+ pool_foreach(m, lcm->mapping_pool,
+ {
+ /* for now build only local mappings */
+ if (!m->local)
+ continue;
+
+ rec = m[0];
+ add_locators (lcm, &rec, m->locator_set_index, NULL);
+ vec_add1 (recs, rec);
+ });
+ /* *INDENT-ON* */
+
+ return recs;
+}
+
+static int
+update_map_register_auth_data (map_register_hdr_t * map_reg_hdr,
+ lisp_key_type_t key_id, u8 * key,
+ u16 auth_data_len, u32 msg_len)
+{
+ MREG_KEY_ID (map_reg_hdr) = clib_host_to_net_u16 (key_id);
+ MREG_AUTH_DATA_LEN (map_reg_hdr) = clib_host_to_net_u16 (auth_data_len);
+
+ unsigned char *result = HMAC (get_encrypt_fcn (key_id), key, vec_len (key),
+ (unsigned char *) map_reg_hdr, msg_len, NULL,
+ NULL);
+ clib_memcpy (MREG_DATA (map_reg_hdr), result, auth_data_len);
+
+ return 0;
+}
+
+static vlib_buffer_t *
+build_map_register (lisp_cp_main_t * lcm, ip_address_t * sloc,
+ ip_address_t * ms_ip, u64 * nonce_res, u8 want_map_notif,
+ mapping_t * records, lisp_key_type_t key_id, u8 * key,
+ u32 * bi_res)
+{
+ void *map_reg_hdr;
+ vlib_buffer_t *b;
+ u32 bi, auth_data_len = 0, msg_len = 0;
+ vlib_main_t *vm = lcm->vlib_main;
+
+ if (vlib_buffer_alloc (vm, &bi, 1) != 1)
+ {
+ clib_warning ("Can't allocate buffer for Map-Register!");
+ return 0;
+ }
+
+ b = vlib_get_buffer (vm, bi);
+
+ /* leave some space for the encap headers */
+ vlib_buffer_make_headroom (b, MAX_LISP_MSG_ENCAP_LEN);
+
+ auth_data_len = auth_data_len_by_key_id (key_id);
+ map_reg_hdr = lisp_msg_put_map_register (b, records, want_map_notif,
+ auth_data_len, nonce_res,
+ &msg_len);
+
+ update_map_register_auth_data (map_reg_hdr, key_id, key, auth_data_len,
+ msg_len);
+
+ /* push outer ip header */
+ pkt_push_udp_and_ip (vm, b, LISP_CONTROL_PORT, LISP_CONTROL_PORT, sloc,
+ ms_ip);
+
+ bi_res[0] = bi;
+ return b;
+}
+
+static int
+get_egress_map_resolver_ip (lisp_cp_main_t * lcm, ip_address_t * ip)
+{
+ lisp_msmr_t *mr;
+ while (lcm->do_map_resolver_election
+ | (0 == ip_fib_get_first_egress_ip_for_dst (lcm,
+ &lcm->active_map_resolver,
+ ip)))
+ {
+ if (0 == elect_map_resolver (lcm))
+ /* all map resolvers are down */
+ {
+ /* restart MR checking by marking all of them up */
+ vec_foreach (mr, lcm->map_resolvers) mr->is_down = 0;
+ return -1;
+ }
+ }
+ return 0;
+}
+
+static int
+send_rloc_probe (lisp_cp_main_t * lcm, gid_address_t * deid,
+ u32 local_locator_set_index, ip_address_t * sloc,
+ ip_address_t * rloc)
+{
+ locator_set_t *ls;
+ u32 bi;
+ vlib_buffer_t *b;
+ vlib_frame_t *f;
+ u64 nonce = 0;
+ u32 next_index, *to_next;
+ gid_address_t *itr_rlocs;
+
+ ls = pool_elt_at_index (lcm->locator_set_pool, local_locator_set_index);
+ itr_rlocs = build_itr_rloc_list (lcm, ls);
+
+ b = build_map_request (lcm, deid, sloc, rloc, itr_rlocs, &nonce, &bi);
+ vec_free (itr_rlocs);
+ if (!b)
+ return -1;
+
+ vnet_buffer (b)->sw_if_index[VLIB_TX] = 0;
+
+ next_index = (ip_addr_version (&lcm->active_map_resolver) == IP4) ?
+ ip4_lookup_node.index : ip6_lookup_node.index;
+
+ f = vlib_get_frame_to_node (lcm->vlib_main, next_index);
+
+ /* Enqueue the packet */
+ to_next = vlib_frame_vector_args (f);
+ to_next[0] = bi;
+ f->n_vectors = 1;
+ vlib_put_frame_to_node (lcm->vlib_main, next_index, f);
+
+ hash_set (lcm->map_register_messages_by_nonce, nonce, 0);
+ return 0;
+}
+
+static int
+send_rloc_probes (lisp_cp_main_t * lcm)
+{
+ u8 lprio = 0;
+ mapping_t *lm;
+ fwd_entry_t *e;
+ locator_pair_t *lp;
+ u32 si;
+
+ /* *INDENT-OFF* */
+ pool_foreach (e, lcm->fwd_entry_pool,
+ {
+ if (vec_len (e->locator_pairs) == 0)
+ continue;
+
+ si = gid_dictionary_lookup (&lcm->mapping_index_by_gid, &e->leid);
+ if (~0 == si)
+ {
+ clib_warning ("internal error: cannot find local eid %U in "
+ "map-cache!", format_gid_address, &e->leid);
+ continue;
+ }
+ lm = pool_elt_at_index (lcm->mapping_pool, si);
+
+ /* get the best (lowest) priority */
+ lprio = e->locator_pairs[0].priority;
+
+ /* send rloc-probe for pair(s) with the best remote locator priority */
+ vec_foreach (lp, e->locator_pairs)
+ {
+ if (lp->priority != lprio)
+ break;
+
+ /* get first remote locator */
+ send_rloc_probe (lcm, &e->reid, lm->locator_set_index, &lp->lcl_loc,
+ &lp->rmt_loc);
+ }
+ });
+ /* *INDENT-ON* */
+
+ return 0;
+}
+
+static int
+send_map_register (lisp_cp_main_t * lcm, u8 want_map_notif)
+{
+ u32 bi;
+ vlib_buffer_t *b;
+ ip_address_t sloc;
+ vlib_frame_t *f;
+ u64 nonce = 0;
+ u32 next_index, *to_next;
+ ip_address_t *ms = 0;
+ mapping_t *records, *r, *g;
+
+ // TODO: support multiple map servers and do election
+ if (0 == vec_len (lcm->map_servers))
+ return -1;
+
+ ms = &lcm->map_servers[0].address;
+
+ if (0 == ip_fib_get_first_egress_ip_for_dst (lcm, ms, &sloc))
+ {
+ clib_warning ("no eligible interface address found for %U!",
+ format_ip_address, &lcm->map_servers[0]);
+ return -1;
+ }
+
+ records = build_map_register_record_list (lcm);
+ if (!records)
+ return -1;
+
+ vec_foreach (r, records)
+ {
+ u8 *key = r->key;
+ u8 key_id = r->key_id;
+
+ if (!key)
+ continue; /* no secret key -> map-register cannot be sent */
+
+ g = 0;
+ // TODO: group mappings that share common key
+ vec_add1 (g, r[0]);
+ b = build_map_register (lcm, &sloc, ms, &nonce, want_map_notif, g,
+ key_id, key, &bi);
+ vec_free (g);
+ if (!b)
+ continue;
+
+ vnet_buffer (b)->sw_if_index[VLIB_TX] = 0;
+
+ next_index = (ip_addr_version (&lcm->active_map_resolver) == IP4) ?
+ ip4_lookup_node.index : ip6_lookup_node.index;
+
+ f = vlib_get_frame_to_node (lcm->vlib_main, next_index);
+
+ /* Enqueue the packet */
+ to_next = vlib_frame_vector_args (f);
+ to_next[0] = bi;
+ f->n_vectors = 1;
+ vlib_put_frame_to_node (lcm->vlib_main, next_index, f);
+
+ hash_set (lcm->map_register_messages_by_nonce, nonce, 0);
+ }
+ free_map_register_records (records);
+
+ return 0;
+}
+
+#define send_encapsulated_map_request(lcm, seid, deid, smr) \
+ _send_encapsulated_map_request(lcm, seid, deid, smr, 0)
+
+#define resend_encapsulated_map_request(lcm, seid, deid, smr) \
+ _send_encapsulated_map_request(lcm, seid, deid, smr, 1)
+
+static int
+_send_encapsulated_map_request (lisp_cp_main_t * lcm,
+ gid_address_t * seid, gid_address_t * deid,
+ u8 is_smr_invoked, u8 is_resend)
+{
+ u32 next_index, bi = 0, *to_next, map_index;
+ vlib_buffer_t *b;
+ vlib_frame_t *f;
+ u64 nonce = 0;
+ locator_set_t *loc_set;
+ mapping_t *map;
+ pending_map_request_t *pmr, *duplicate_pmr = 0;
+ ip_address_t sloc;
+ u32 ls_index;
+
+ /* if there is already a pending request remember it */
+
+ /* *INDENT-OFF* */
+ pool_foreach(pmr, lcm->pending_map_requests_pool,
+ ({
+ if (!gid_address_cmp (&pmr->src, seid)
+ && !gid_address_cmp (&pmr->dst, deid))
+ {
+ duplicate_pmr = pmr;
+ break;
+ }
+ }));
+ /* *INDENT-ON* */
+
+ if (!is_resend && duplicate_pmr)
+ {
+ /* don't send the request if there is a pending map request already */
+ return 0;
+ }
+
+ /* get locator-set for seid */
+ if (!lcm->lisp_pitr)
+ {
+ map_index = gid_dictionary_lookup (&lcm->mapping_index_by_gid, seid);
+ if (map_index == ~0)
+ {
+ clib_warning ("No local mapping found in eid-table for %U!",
+ format_gid_address, seid);
+ return -1;
+ }
+
+ map = pool_elt_at_index (lcm->mapping_pool, map_index);
+
+ if (!map->local)
+ {
+ clib_warning
+ ("Mapping found for src eid %U is not marked as local!",
+ format_gid_address, seid);
+ return -1;
+ }
+ ls_index = map->locator_set_index;
+ }
+ else
+ {
+ map_index = lcm->pitr_map_index;
+ map = pool_elt_at_index (lcm->mapping_pool, lcm->pitr_map_index);
+ ls_index = map->locator_set_index;
+ }
+
+ /* overwrite locator set if map-request itr-rlocs configured */
+ if (~0 != lcm->mreq_itr_rlocs)
+ {
+ ls_index = lcm->mreq_itr_rlocs;
+ }
+
+ loc_set = pool_elt_at_index (lcm->locator_set_pool, ls_index);
+
+ if (get_egress_map_resolver_ip (lcm, &sloc) < 0)
+ {
+ if (duplicate_pmr)
+ duplicate_pmr->to_be_removed = 1;
+ return -1;
+ }
+
+ /* build the encapsulated map request */
+ b = build_encapsulated_map_request (lcm, seid, deid, loc_set,
+ &lcm->active_map_resolver,
+ &sloc, is_smr_invoked, &nonce, &bi);
+
+ if (!b)
+ return -1;
+
+ /* set fib index to default and lookup node */
+ vnet_buffer (b)->sw_if_index[VLIB_TX] = 0;
+ next_index = (ip_addr_version (&lcm->active_map_resolver) == IP4) ?
+ ip4_lookup_node.index : ip6_lookup_node.index;
+
+ f = vlib_get_frame_to_node (lcm->vlib_main, next_index);
+
+ /* Enqueue the packet */
+ to_next = vlib_frame_vector_args (f);
+ to_next[0] = bi;
+ f->n_vectors = 1;
+ vlib_put_frame_to_node (lcm->vlib_main, next_index, f);
+
+ if (duplicate_pmr)
+ /* if there is a pending request already update it */
+ {
+ if (clib_fifo_elts (duplicate_pmr->nonces) >= PENDING_MREQ_QUEUE_LEN)
+ {
+ /* remove the oldest nonce */
+ u64 CLIB_UNUSED (tmp), *nonce_del;
+ nonce_del = clib_fifo_head (duplicate_pmr->nonces);
+ hash_unset (lcm->pending_map_requests_by_nonce, nonce_del[0]);
+ clib_fifo_sub1 (duplicate_pmr->nonces, tmp);
+ }
+
+ clib_fifo_add1 (duplicate_pmr->nonces, nonce);
+ hash_set (lcm->pending_map_requests_by_nonce, nonce,
+ duplicate_pmr - lcm->pending_map_requests_pool);
+ }
+ else
+ {
+ /* add map-request to pending requests table */
+ pool_get (lcm->pending_map_requests_pool, pmr);
+ memset (pmr, 0, sizeof (*pmr));
+ gid_address_copy (&pmr->src, seid);
+ gid_address_copy (&pmr->dst, deid);
+ clib_fifo_add1 (pmr->nonces, nonce);
+ pmr->is_smr_invoked = is_smr_invoked;
+ reset_pending_mr_counters (pmr);
+ hash_set (lcm->pending_map_requests_by_nonce, nonce,
+ pmr - lcm->pending_map_requests_pool);
+ }
+
+ return 0;
+}
+
+static void
+get_src_and_dst_ip (void *hdr, ip_address_t * src, ip_address_t * dst)
+{
+ ip4_header_t *ip4 = hdr;
+ ip6_header_t *ip6;
+
+ if ((ip4->ip_version_and_header_length & 0xF0) == 0x40)
+ {
+ ip_address_set (src, &ip4->src_address, IP4);
+ ip_address_set (dst, &ip4->dst_address, IP4);
+ }
+ else
+ {
+ ip6 = hdr;
+ ip_address_set (src, &ip6->src_address, IP6);
+ ip_address_set (dst, &ip6->dst_address, IP6);
+ }
+}
+
+static u32
+lisp_get_vni_from_buffer_ip (lisp_cp_main_t * lcm, vlib_buffer_t * b,
+ u8 version)
+{
+ uword *vnip;
+ u32 vni = ~0, table_id = ~0;
+
+ table_id = fib_table_get_table_id_for_sw_if_index ((version ==
+ IP4 ? FIB_PROTOCOL_IP4 :
+ FIB_PROTOCOL_IP6),
+ vnet_buffer
+ (b)->sw_if_index
+ [VLIB_RX]);
+
+ vnip = hash_get (lcm->vni_by_table_id, table_id);
+ if (vnip)
+ vni = vnip[0];
+ else
+ clib_warning ("vrf %d is not mapped to any vni!", table_id);
+
+ return vni;
+}
+
+always_inline u32
+lisp_get_vni_from_buffer_eth (lisp_cp_main_t * lcm, vlib_buffer_t * b)
+{
+ uword *vnip;
+ u32 vni = ~0;
+ u32 sw_if_index0;
+
+ l2input_main_t *l2im = &l2input_main;
+ l2_input_config_t *config;
+ l2_bridge_domain_t *bd_config;
+
+ sw_if_index0 = vnet_buffer (b)->sw_if_index[VLIB_RX];
+ config = vec_elt_at_index (l2im->configs, sw_if_index0);
+ bd_config = vec_elt_at_index (l2im->bd_configs, config->bd_index);
+
+ vnip = hash_get (lcm->vni_by_bd_id, bd_config->bd_id);
+ if (vnip)
+ vni = vnip[0];
+ else
+ clib_warning ("bridge domain %d is not mapped to any vni!",
+ config->bd_index);
+
+ return vni;
+}
+
+always_inline void
+get_src_and_dst_eids_from_buffer (lisp_cp_main_t * lcm, vlib_buffer_t * b,
+ gid_address_t * src, gid_address_t * dst)
+{
+ u32 vni = 0;
+ u16 type;
+
+ memset (src, 0, sizeof (*src));
+ memset (dst, 0, sizeof (*dst));
+ type = vnet_buffer (b)->lisp.overlay_afi;
+
+ if (LISP_AFI_IP == type || LISP_AFI_IP6 == type)
+ {
+ ip4_header_t *ip;
+ u8 version, preflen;
+
+ gid_address_type (src) = GID_ADDR_IP_PREFIX;
+ gid_address_type (dst) = GID_ADDR_IP_PREFIX;
+
+ ip = vlib_buffer_get_current (b);
+ get_src_and_dst_ip (ip, &gid_address_ip (src), &gid_address_ip (dst));
+
+ version = gid_address_ip_version (src);
+ preflen = ip_address_max_len (version);
+ gid_address_ippref_len (src) = preflen;
+ gid_address_ippref_len (dst) = preflen;
+
+ vni = lisp_get_vni_from_buffer_ip (lcm, b, version);
+ gid_address_vni (dst) = vni;
+ gid_address_vni (src) = vni;
+ }
+ else if (LISP_AFI_MAC == type)
+ {
+ ethernet_header_t *eh;
+
+ eh = vlib_buffer_get_current (b);
+
+ gid_address_type (src) = GID_ADDR_MAC;
+ gid_address_type (dst) = GID_ADDR_MAC;
+ mac_copy (&gid_address_mac (src), eh->src_address);
+ mac_copy (&gid_address_mac (dst), eh->dst_address);
+
+ /* get vni */
+ vni = lisp_get_vni_from_buffer_eth (lcm, b);
+
+ gid_address_vni (dst) = vni;
+ gid_address_vni (src) = vni;
+ }
+}
+
+static uword
+lisp_cp_lookup_inline (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame, int overlay)
+{
+ u32 *from, *to_next_drop, di, si;
+ lisp_cp_main_t *lcm = vnet_lisp_cp_get_main ();
+ u32 pkts_mapped = 0;
+ uword n_left_from, n_left_to_next_drop;
+
+ from = vlib_frame_vector_args (from_frame);
+ n_left_from = from_frame->n_vectors;
+
+ while (n_left_from > 0)
+ {
+ vlib_get_next_frame (vm, node, LISP_CP_LOOKUP_NEXT_DROP,
+ to_next_drop, n_left_to_next_drop);
+
+ while (n_left_from > 0 && n_left_to_next_drop > 0)
+ {
+ u32 pi0;
+ vlib_buffer_t *b0;
+ gid_address_t src, dst;
+
+ pi0 = from[0];
+ from += 1;
+ n_left_from -= 1;
+ to_next_drop[0] = pi0;
+ to_next_drop += 1;
+ n_left_to_next_drop -= 1;
+
+ b0 = vlib_get_buffer (vm, pi0);
+ b0->error = node->errors[LISP_CP_LOOKUP_ERROR_DROP];
+ vnet_buffer (b0)->lisp.overlay_afi = overlay;
+
+ /* src/dst eid pair */
+ get_src_and_dst_eids_from_buffer (lcm, b0, &src, &dst);
+
+ /* if we have remote mapping for destination already in map-chache
+ add forwarding tunnel directly. If not send a map-request */
+ di = gid_dictionary_sd_lookup (&lcm->mapping_index_by_gid, &dst,
+ &src);
+ if (~0 != di)
+ {
+ mapping_t *m = vec_elt_at_index (lcm->mapping_pool, di);
+ /* send a map-request also in case of negative mapping entry
+ with corresponding action */
+ if (m->action == LISP_SEND_MAP_REQUEST)
+ {
+ /* send map-request */
+ queue_map_request (&src, &dst, 0 /* smr_invoked */ ,
+ 0 /* is_resend */ );
+ pkts_mapped++;
+ }
+ else
+ {
+ si = gid_dictionary_lookup (&lcm->mapping_index_by_gid,
+ &src);
+ if (~0 != si)
+ {
+ dp_add_fwd_entry (lcm, si, di);
+ }
+ }
+ }
+ else
+ {
+ /* send map-request */
+ queue_map_request (&src, &dst, 0 /* smr_invoked */ ,
+ 0 /* is_resend */ );
+ pkts_mapped++;
+ }
+
+ if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ lisp_cp_lookup_trace_t *tr = vlib_add_trace (vm, node, b0,
+ sizeof (*tr));
+
+ memset (tr, 0, sizeof (*tr));
+ gid_address_copy (&tr->dst_eid, &dst);
+ ip_address_copy (&tr->map_resolver_ip,
+ &lcm->active_map_resolver);
+ }
+ gid_address_free (&dst);
+ gid_address_free (&src);
+ }
+
+ vlib_put_next_frame (vm, node, LISP_CP_LOOKUP_NEXT_DROP,
+ n_left_to_next_drop);
+ }
+ vlib_node_increment_counter (vm, node->node_index,
+ LISP_CP_LOOKUP_ERROR_MAP_REQUESTS_SENT,
+ pkts_mapped);
+ return from_frame->n_vectors;
+}
+
+static uword
+lisp_cp_lookup_ip4 (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * from_frame)
+{
+ return (lisp_cp_lookup_inline (vm, node, from_frame, LISP_AFI_IP));
+}
+
+static uword
+lisp_cp_lookup_ip6 (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * from_frame)
+{
+ return (lisp_cp_lookup_inline (vm, node, from_frame, LISP_AFI_IP6));
+}
+
+static uword
+lisp_cp_lookup_l2 (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * from_frame)
+{
+ return (lisp_cp_lookup_inline (vm, node, from_frame, LISP_AFI_MAC));
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (lisp_cp_lookup_ip4_node) = {
+ .function = lisp_cp_lookup_ip4,
+ .name = "lisp-cp-lookup-ip4",
+ .vector_size = sizeof (u32),
+ .format_trace = format_lisp_cp_lookup_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_errors = LISP_CP_LOOKUP_N_ERROR,
+ .error_strings = lisp_cp_lookup_error_strings,
+
+ .n_next_nodes = LISP_CP_LOOKUP_N_NEXT,
+
+ .next_nodes = {
+ [LISP_CP_LOOKUP_NEXT_DROP] = "error-drop",
+ },
+};
+/* *INDENT-ON* */
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (lisp_cp_lookup_ip6_node) = {
+ .function = lisp_cp_lookup_ip6,
+ .name = "lisp-cp-lookup-ip6",
+ .vector_size = sizeof (u32),
+ .format_trace = format_lisp_cp_lookup_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_errors = LISP_CP_LOOKUP_N_ERROR,
+ .error_strings = lisp_cp_lookup_error_strings,
+
+ .n_next_nodes = LISP_CP_LOOKUP_N_NEXT,
+
+ .next_nodes = {
+ [LISP_CP_LOOKUP_NEXT_DROP] = "error-drop",
+ },
+};
+/* *INDENT-ON* */
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (lisp_cp_lookup_l2_node) = {
+ .function = lisp_cp_lookup_l2,
+ .name = "lisp-cp-lookup-l2",
+ .vector_size = sizeof (u32),
+ .format_trace = format_lisp_cp_lookup_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_errors = LISP_CP_LOOKUP_N_ERROR,
+ .error_strings = lisp_cp_lookup_error_strings,
+
+ .n_next_nodes = LISP_CP_LOOKUP_N_NEXT,
+
+ .next_nodes = {
+ [LISP_CP_LOOKUP_NEXT_DROP] = "error-drop",
+ },
+};
+/* *INDENT-ON* */
+
+/* lisp_cp_input statistics */
+#define foreach_lisp_cp_input_error \
+_(DROP, "drop") \
+_(MAP_REPLIES_RECEIVED, "map-replies received")
+
+static char *lisp_cp_input_error_strings[] = {
+#define _(sym,string) string,
+ foreach_lisp_cp_input_error
+#undef _
+};
+
+typedef enum
+{
+#define _(sym,str) LISP_CP_INPUT_ERROR_##sym,
+ foreach_lisp_cp_input_error
+#undef _
+ LISP_CP_INPUT_N_ERROR,
+} lisp_cp_input_error_t;
+
+typedef enum
+{
+ LISP_CP_INPUT_NEXT_DROP,
+ LISP_CP_INPUT_N_NEXT,
+} lisp_cp_input_next_t;
+
+typedef struct
+{
+ gid_address_t dst_eid;
+ ip4_address_t map_resolver_ip;
+} lisp_cp_input_trace_t;
+
+u8 *
+format_lisp_cp_input_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ CLIB_UNUSED (lisp_cp_input_trace_t * t) =
+ va_arg (*args, lisp_cp_input_trace_t *);
+
+ s = format (s, "LISP-CP-INPUT: TODO");
+ return s;
+}
+
+static void
+remove_expired_mapping (lisp_cp_main_t * lcm, u32 mi)
+{
+ mapping_t *m;
+
+ m = pool_elt_at_index (lcm->mapping_pool, mi);
+ lisp_add_del_adjacency (lcm, 0, &m->eid, 0 /* is_add */ );
+ vnet_lisp_add_del_mapping (&m->eid, 0, 0, 0, ~0, 0 /* is_add */ ,
+ 0 /* is_static */ , 0);
+ mapping_delete_timer (lcm, mi);
+}
+
+static void
+mapping_start_expiration_timer (lisp_cp_main_t * lcm, u32 mi,
+ f64 expiration_time)
+{
+ mapping_t *m;
+ u64 now = clib_cpu_time_now ();
+ u64 cpu_cps = lcm->vlib_main->clib_time.clocks_per_second;
+ u64 exp_clock_time = now + expiration_time * cpu_cps;
+
+ m = pool_elt_at_index (lcm->mapping_pool, mi);
+
+ m->timer_set = 1;
+ timing_wheel_insert (&lcm->wheel, exp_clock_time, mi);
+}
+
+static void
+map_records_arg_free (map_records_arg_t * a)
+{
+ mapping_t *m;
+ vec_foreach (m, a->mappings)
+ {
+ vec_free (m->locators);
+ gid_address_free (&m->eid);
+ }
+
+ clib_mem_free (a);
+}
+
+void *
+process_map_reply (map_records_arg_t * a)
+{
+ mapping_t *m;
+ lisp_cp_main_t *lcm = vnet_lisp_cp_get_main ();
+ u32 dst_map_index = 0;
+ pending_map_request_t *pmr;
+ u64 *noncep;
+ uword *pmr_index;
+
+ if (a->is_rloc_probe)
+ goto done;
+
+ /* Check pending requests table and nonce */
+ pmr_index = hash_get (lcm->pending_map_requests_by_nonce, a->nonce);
+ if (!pmr_index)
+ {
+ clib_warning ("No pending map-request entry with nonce %lu!", a->nonce);
+ goto done;
+ }
+ pmr = pool_elt_at_index (lcm->pending_map_requests_pool, pmr_index[0]);
+
+ vec_foreach (m, a->mappings)
+ {
+ /* insert/update mappings cache */
+ vnet_lisp_add_del_mapping (&m->eid, m->locators, m->action,
+ m->authoritative, m->ttl,
+ 1, 0 /* is_static */ , &dst_map_index);
+
+ /* try to program forwarding only if mapping saved or updated */
+ if ((u32) ~ 0 != dst_map_index)
+ {
+ lisp_add_del_adjacency (lcm, &pmr->src, &m->eid, 1);
+ if ((u32) ~ 0 != m->ttl)
+ mapping_start_expiration_timer (lcm, dst_map_index, m->ttl * 60);
+ }
+ }
+
+ /* remove pending map request entry */
+
+ /* *INDENT-OFF* */
+ clib_fifo_foreach (noncep, pmr->nonces, ({
+ hash_unset(lcm->pending_map_requests_by_nonce, noncep[0]);
+ }));
+ /* *INDENT-ON* */
+
+ clib_fifo_free (pmr->nonces);
+ pool_put (lcm->pending_map_requests_pool, pmr);
+
+done:
+ map_records_arg_free (a);
+ return 0;
+}
+
+static int
+is_auth_data_valid (map_notify_hdr_t * h, u32 msg_len,
+ lisp_key_type_t key_id, u8 * key)
+{
+ u8 *auth_data = 0;
+ u16 auth_data_len;
+ int result;
+
+ auth_data_len = auth_data_len_by_key_id (key_id);
+ if ((u16) ~ 0 == auth_data_len)
+ {
+ clib_warning ("invalid length for key_id %d!", key_id);
+ return 0;
+ }
+
+ /* save auth data */
+ vec_validate (auth_data, auth_data_len - 1);
+ clib_memcpy (auth_data, MNOTIFY_DATA (h), auth_data_len);
+
+ /* clear auth data */
+ memset (MNOTIFY_DATA (h), 0, auth_data_len);
+
+ /* get hash of the message */
+ unsigned char *code = HMAC (get_encrypt_fcn (key_id), key, vec_len (key),
+ (unsigned char *) h, msg_len, NULL, NULL);
+
+ result = memcmp (code, auth_data, auth_data_len);
+
+ vec_free (auth_data);
+
+ return !result;
+}
+
+static void
+process_map_notify (map_records_arg_t * a)
+{
+ lisp_cp_main_t *lcm = vnet_lisp_cp_get_main ();
+ uword *pmr_index;
+
+ pmr_index = hash_get (lcm->map_register_messages_by_nonce, a->nonce);
+ if (!pmr_index)
+ {
+ clib_warning ("No pending map-register entry with nonce %lu!",
+ a->nonce);
+ return;
+ }
+
+ map_records_arg_free (a);
+ hash_unset (lcm->map_register_messages_by_nonce, a->nonce);
+}
+
+static mapping_t *
+get_mapping (lisp_cp_main_t * lcm, gid_address_t * e)
+{
+ u32 mi;
+
+ mi = gid_dictionary_lookup (&lcm->mapping_index_by_gid, e);
+ if (~0 == mi)
+ {
+ clib_warning ("eid %U not found in map-cache!", unformat_gid_address,
+ e);
+ return 0;
+ }
+ return pool_elt_at_index (lcm->mapping_pool, mi);
+}
+
+/**
+ * When map-notify is received it is necessary that all EIDs in the record
+ * list share common key. The key is then used to verify authentication
+ * data in map-notify message.
+ */
+static int
+map_record_integrity_check (lisp_cp_main_t * lcm, mapping_t * maps,
+ u32 key_id, u8 ** key_out)
+{
+ u32 i, len = vec_len (maps);
+ mapping_t *m;
+
+ /* get key of the first mapping */
+ m = get_mapping (lcm, &maps[0].eid);
+ if (!m || !m->key)
+ return -1;
+
+ key_out[0] = m->key;
+
+ for (i = 1; i < len; i++)
+ {
+ m = get_mapping (lcm, &maps[i].eid);
+ if (!m || !m->key)
+ return -1;
+
+ if (key_id != m->key_id || vec_cmp (m->key, key_out[0]))
+ {
+ clib_warning ("keys does not match! %v, %v", key_out[0], m->key);
+ return -1;
+ }
+ }
+ return 0;
+}
+
+static int
+parse_map_records (vlib_buffer_t * b, map_records_arg_t * a, u8 count)
+{
+ locator_t *locators = 0;
+ u32 i, len;
+ gid_address_t deid;
+ mapping_t m;
+ locator_t *loc;
+
+ /* parse record eid */
+ for (i = 0; i < count; i++)
+ {
+ len = lisp_msg_parse_mapping_record (b, &deid, &locators, NULL);
+ if (len == ~0)
+ {
+ clib_warning ("Failed to parse mapping record!");
+ vec_foreach (loc, locators) locator_free (loc);
+ vec_free (locators);
+ return -1;
+ }
+
+ m.locators = locators;
+ gid_address_copy (&m.eid, &deid);
+ vec_add1 (a->mappings, m);
+ }
+
+ return 0;
+}
+
+static map_records_arg_t *
+parse_map_notify (vlib_buffer_t * b)
+{
+ int rc = 0;
+ map_notify_hdr_t *mnotif_hdr;
+ lisp_key_type_t key_id;
+ lisp_cp_main_t *lcm = vnet_lisp_cp_get_main ();
+ u8 *key = 0;
+ gid_address_t deid;
+ u16 auth_data_len = 0;
+ u8 record_count;
+ map_records_arg_t *a = clib_mem_alloc (sizeof (*a));
+
+ memset (a, 0, sizeof (*a));
+ mnotif_hdr = vlib_buffer_get_current (b);
+ vlib_buffer_pull (b, sizeof (*mnotif_hdr));
+ memset (&deid, 0, sizeof (deid));
+
+ a->nonce = MNOTIFY_NONCE (mnotif_hdr);
+ key_id = clib_net_to_host_u16 (MNOTIFY_KEY_ID (mnotif_hdr));
+ auth_data_len = auth_data_len_by_key_id (key_id);
+
+ /* advance buffer by authentication data */
+ vlib_buffer_pull (b, auth_data_len);
+
+ record_count = MNOTIFY_REC_COUNT (mnotif_hdr);
+ rc = parse_map_records (b, a, record_count);
+ if (rc != 0)
+ {
+ map_records_arg_free (a);
+ return 0;
+ }
+
+ rc = map_record_integrity_check (lcm, a->mappings, key_id, &key);
+ if (rc != 0)
+ {
+ map_records_arg_free (a);
+ return 0;
+ }
+
+ /* verify authentication data */
+ if (!is_auth_data_valid (mnotif_hdr, vlib_buffer_get_tail (b)
+ - (u8 *) mnotif_hdr, key_id, key))
+ {
+ clib_warning ("Map-notify auth data verification failed for nonce %lu!",
+ a->nonce);
+ map_records_arg_free (a);
+ return 0;
+ }
+ return a;
+}
+
+static vlib_buffer_t *
+build_map_reply (lisp_cp_main_t * lcm, ip_address_t * sloc,
+ ip_address_t * dst, u64 nonce, u8 probe_bit,
+ mapping_t * records, u16 dst_port, u32 * bi_res)
+{
+ vlib_buffer_t *b;
+ u32 bi;
+ vlib_main_t *vm = lcm->vlib_main;
+
+ if (vlib_buffer_alloc (vm, &bi, 1) != 1)
+ {
+ clib_warning ("Can't allocate buffer for Map-Register!");
+ return 0;
+ }
+
+ b = vlib_get_buffer (vm, bi);
+
+ /* leave some space for the encap headers */
+ vlib_buffer_make_headroom (b, MAX_LISP_MSG_ENCAP_LEN);
+
+ lisp_msg_put_map_reply (b, records, nonce, probe_bit);
+
+ /* push outer ip header */
+ pkt_push_udp_and_ip (vm, b, LISP_CONTROL_PORT, dst_port, sloc, dst);
+
+ bi_res[0] = bi;
+ return b;
+}
+
+static int
+send_map_reply (lisp_cp_main_t * lcm, u32 mi, ip_address_t * dst,
+ u8 probe_bit, u64 nonce, u16 dst_port,
+ ip_address_t * probed_loc)
+{
+ ip_address_t src;
+ u32 bi;
+ vlib_buffer_t *b;
+ vlib_frame_t *f;
+ u32 next_index, *to_next;
+ mapping_t *records = 0, *m;
+
+ m = pool_elt_at_index (lcm->mapping_pool, mi);
+ if (!m)
+ return -1;
+
+ vec_add1 (records, m[0]);
+ add_locators (lcm, &records[0], m->locator_set_index, probed_loc);
+ memset (&src, 0, sizeof (src));
+
+ if (!ip_fib_get_first_egress_ip_for_dst (lcm, dst, &src))
+ {
+ clib_warning ("can't find inteface address for %U", format_ip_address,
+ dst);
+ return -1;
+ }
+
+ b = build_map_reply (lcm, &src, dst, nonce, probe_bit, records, dst_port,
+ &bi);
+ if (!b)
+ return -1;
+ free_map_register_records (records);
+
+ vnet_buffer (b)->sw_if_index[VLIB_TX] = 0;
+ next_index = (ip_addr_version (&lcm->active_map_resolver) == IP4) ?
+ ip4_lookup_node.index : ip6_lookup_node.index;
+
+ f = vlib_get_frame_to_node (lcm->vlib_main, next_index);
+
+ /* Enqueue the packet */
+ to_next = vlib_frame_vector_args (f);
+ to_next[0] = bi;
+ f->n_vectors = 1;
+ vlib_put_frame_to_node (lcm->vlib_main, next_index, f);
+ return 0;
+}
+
+void
+process_map_request (vlib_main_t * vm, lisp_cp_main_t * lcm,
+ vlib_buffer_t * b)
+{
+ u8 *ip_hdr = 0, *udp_hdr;
+ ip4_header_t *ip4;
+ ip6_header_t *ip6;
+ ip_address_t *dst_loc = 0, probed_loc, src_loc;
+ mapping_t m;
+ map_request_hdr_t *mreq_hdr;
+ gid_address_t src, dst;
+ u64 nonce;
+ u32 i, len = 0;
+ gid_address_t *itr_rlocs = 0;
+
+ mreq_hdr = vlib_buffer_get_current (b);
+
+ // TODO ugly workaround to find out whether LISP is carried by ip4 or 6
+ // and needs to be fixed
+ udp_hdr = (u8 *) vlib_buffer_get_current (b) - sizeof (udp_header_t);
+ ip4 = (ip4_header_t *) (udp_hdr - sizeof (ip4_header_t));
+ ip6 = (ip6_header_t *) (udp_hdr - sizeof (ip6_header_t));
+
+ if ((ip4->ip_version_and_header_length & 0xF0) == 0x40)
+ ip_hdr = (u8 *) ip4;
+ else
+ {
+ u32 flags = clib_net_to_host_u32
+ (ip6->ip_version_traffic_class_and_flow_label);
+ if ((flags & 0xF0000000) == 0x60000000)
+ ip_hdr = (u8 *) ip6;
+ else
+ {
+ clib_warning ("internal error: cannot determine whether packet "
+ "is ip4 or 6!");
+ return;
+ }
+ }
+
+ vlib_buffer_pull (b, sizeof (*mreq_hdr));
+
+ nonce = MREQ_NONCE (mreq_hdr);
+
+ if (!MREQ_SMR (mreq_hdr) && !MREQ_RLOC_PROBE (mreq_hdr))
+ {
+ clib_warning
+ ("Only SMR Map-Requests and RLOC probe supported for now!");
+ return;
+ }
+
+ /* parse src eid */
+ len = lisp_msg_parse_addr (b, &src);
+ if (len == ~0)
+ return;
+
+ len = lisp_msg_parse_itr_rlocs (b, &itr_rlocs,
+ MREQ_ITR_RLOC_COUNT (mreq_hdr) + 1);
+ if (len == ~0)
+ return;
+
+ /* parse eid records and send SMR-invoked map-requests */
+ for (i = 0; i < MREQ_REC_COUNT (mreq_hdr); i++)
+ {
+ memset (&dst, 0, sizeof (dst));
+ len = lisp_msg_parse_eid_rec (b, &dst);
+ if (len == ~0)
+ {
+ clib_warning ("Can't parse map-request EID-record");
+ goto done;
+ }
+
+ if (MREQ_SMR (mreq_hdr))
+ {
+ /* send SMR-invoked map-requests */
+ queue_map_request (&dst, &src, 1 /* invoked */ , 0 /* resend */ );
+ }
+ else if (MREQ_RLOC_PROBE (mreq_hdr))
+ {
+ memset (&m, 0, sizeof (m));
+ u32 mi = gid_dictionary_lookup (&lcm->mapping_index_by_gid, &dst);
+
+ // TODO: select best locator; for now use the first one
+ dst_loc = &gid_address_ip (&itr_rlocs[0]);
+
+ /* get src/dst IP addresses */
+ get_src_and_dst_ip (ip_hdr, &src_loc, &probed_loc);
+
+ // TODO get source port from buffer
+ u16 src_port = LISP_CONTROL_PORT;
+
+ send_map_reply (lcm, mi, dst_loc, 1 /* probe-bit */ , nonce,
+ src_port, &probed_loc);
+ }
+ }
+
+done:
+ vec_free (itr_rlocs);
+}
+
+static map_records_arg_t *
+parse_map_reply (vlib_buffer_t * b)
+{
+ locator_t probed;
+ gid_address_t deid;
+ void *h;
+ u32 i, len = 0;
+ mapping_t m;
+ map_reply_hdr_t *mrep_hdr;
+ map_records_arg_t *a = clib_mem_alloc (sizeof (*a));
+ memset (a, 0, sizeof (*a));
+ locator_t *locators;
+
+ mrep_hdr = vlib_buffer_get_current (b);
+ a->nonce = MREP_NONCE (mrep_hdr);
+ a->is_rloc_probe = MREP_RLOC_PROBE (mrep_hdr);
+ vlib_buffer_pull (b, sizeof (*mrep_hdr));
+
+ for (i = 0; i < MREP_REC_COUNT (mrep_hdr); i++)
+ {
+ memset (&m, 0, sizeof (m));
+ locators = 0;
+ h = vlib_buffer_get_current (b);
+
+ m.ttl = clib_net_to_host_u32 (MAP_REC_TTL (h));
+ m.action = MAP_REC_ACTION (h);
+ m.authoritative = MAP_REC_AUTH (h);
+
+ len = lisp_msg_parse_mapping_record (b, &deid, &locators, &probed);
+ if (len == ~0)
+ {
+ clib_warning ("Failed to parse mapping record!");
+ map_records_arg_free (a);
+ return 0;
+ }
+
+ m.locators = locators;
+ gid_address_copy (&m.eid, &deid);
+ vec_add1 (a->mappings, m);
+ }
+ return a;
+}
+
+static void
+queue_map_reply_for_processing (map_records_arg_t * a)
+{
+ vl_api_rpc_call_main_thread (process_map_reply, (u8 *) a, sizeof (a));
+}
+
+static void
+queue_map_notify_for_processing (map_records_arg_t * a)
+{
+ vl_api_rpc_call_main_thread (process_map_notify, (u8 *) a, sizeof (a[0]));
+}
+
+static uword
+lisp_cp_input (vlib_main_t * vm, vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame)
+{
+ u32 n_left_from, *from, *to_next_drop;
+ lisp_msg_type_e type;
+ lisp_cp_main_t *lcm = vnet_lisp_cp_get_main ();
+ map_records_arg_t *a;
+
+ from = vlib_frame_vector_args (from_frame);
+ n_left_from = from_frame->n_vectors;
+
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next_drop;
+
+ vlib_get_next_frame (vm, node, LISP_CP_INPUT_NEXT_DROP,
+ to_next_drop, n_left_to_next_drop);
+ while (n_left_from > 0 && n_left_to_next_drop > 0)
+ {
+ u32 bi0;
+ vlib_buffer_t *b0;
+
+ bi0 = from[0];
+ from += 1;
+ n_left_from -= 1;
+ to_next_drop[0] = bi0;
+ to_next_drop += 1;
+ n_left_to_next_drop -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+
+ type = lisp_msg_type (vlib_buffer_get_current (b0));
+ switch (type)
+ {
+ case LISP_MAP_REPLY:
+ a = parse_map_reply (b0);
+ if (a)
+ queue_map_reply_for_processing (a);
+ break;
+ case LISP_MAP_REQUEST:
+ process_map_request (vm, lcm, b0);
+ break;
+ case LISP_MAP_NOTIFY:
+ a = parse_map_notify (b0);
+ if (a)
+ queue_map_notify_for_processing (a);
+ break;
+ default:
+ clib_warning ("Unsupported LISP message type %d", type);
+ break;
+ }
+
+ b0->error = node->errors[LISP_CP_INPUT_ERROR_DROP];
+
+ if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+
+ }
+ }
+
+ vlib_put_next_frame (vm, node, LISP_CP_INPUT_NEXT_DROP,
+ n_left_to_next_drop);
+ }
+ return from_frame->n_vectors;
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (lisp_cp_input_node) = {
+ .function = lisp_cp_input,
+ .name = "lisp-cp-input",
+ .vector_size = sizeof (u32),
+ .format_trace = format_lisp_cp_input_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_errors = LISP_CP_INPUT_N_ERROR,
+ .error_strings = lisp_cp_input_error_strings,
+
+ .n_next_nodes = LISP_CP_INPUT_N_NEXT,
+
+ .next_nodes = {
+ [LISP_CP_INPUT_NEXT_DROP] = "error-drop",
+ },
+};
+/* *INDENT-ON* */
+
+clib_error_t *
+lisp_cp_init (vlib_main_t * vm)
+{
+ lisp_cp_main_t *lcm = vnet_lisp_cp_get_main ();
+ clib_error_t *error = 0;
+
+ if ((error = vlib_call_init_function (vm, lisp_gpe_init)))
+ return error;
+
+ lcm->im4 = &ip4_main;
+ lcm->im6 = &ip6_main;
+ lcm->vlib_main = vm;
+ lcm->vnet_main = vnet_get_main ();
+ lcm->mreq_itr_rlocs = ~0;
+ lcm->lisp_pitr = 0;
+ memset (&lcm->active_map_resolver, 0, sizeof (lcm->active_map_resolver));
+
+ gid_dictionary_init (&lcm->mapping_index_by_gid);
+ lcm->do_map_resolver_election = 1;
+ lcm->map_request_mode = MR_MODE_DST_ONLY;
+
+ /* default vrf mapped to vni 0 */
+ hash_set (lcm->table_id_by_vni, 0, 0);
+ hash_set (lcm->vni_by_table_id, 0, 0);
+
+ udp_register_dst_port (vm, UDP_DST_PORT_lisp_cp,
+ lisp_cp_input_node.index, 1 /* is_ip4 */ );
+ udp_register_dst_port (vm, UDP_DST_PORT_lisp_cp6,
+ lisp_cp_input_node.index, 0 /* is_ip4 */ );
+
+ u64 now = clib_cpu_time_now ();
+ timing_wheel_init (&lcm->wheel, now, vm->clib_time.clocks_per_second);
+ return 0;
+}
+
+static void *
+send_map_request_thread_fn (void *arg)
+{
+ map_request_args_t *a = arg;
+ lisp_cp_main_t *lcm = vnet_lisp_cp_get_main ();
+
+ if (a->is_resend)
+ resend_encapsulated_map_request (lcm, &a->seid, &a->deid, a->smr_invoked);
+ else
+ send_encapsulated_map_request (lcm, &a->seid, &a->deid, a->smr_invoked);
+
+ return 0;
+}
+
+static int
+queue_map_request (gid_address_t * seid, gid_address_t * deid,
+ u8 smr_invoked, u8 is_resend)
+{
+ map_request_args_t a;
+
+ a.is_resend = is_resend;
+ gid_address_copy (&a.seid, seid);
+ gid_address_copy (&a.deid, deid);
+ a.smr_invoked = smr_invoked;
+
+ vl_api_rpc_call_main_thread (send_map_request_thread_fn,
+ (u8 *) & a, sizeof (a));
+ return 0;
+}
+
+/**
+ * Take an action with a pending map request depending on expiration time
+ * and re-try counters.
+ */
+static void
+update_pending_request (pending_map_request_t * r, f64 dt)
+{
+ lisp_cp_main_t *lcm = vnet_lisp_cp_get_main ();
+ lisp_msmr_t *mr;
+
+ if (r->time_to_expire - dt < 0)
+ /* it's time to decide what to do with this pending request */
+ {
+ if (r->retries_num >= NUMBER_OF_RETRIES)
+ /* too many retries -> assume current map resolver is not available */
+ {
+ mr = get_map_resolver (&lcm->active_map_resolver);
+ if (!mr)
+ {
+ clib_warning ("Map resolver %U not found - probably deleted "
+ "by the user recently.", format_ip_address,
+ &lcm->active_map_resolver);
+ }
+ else
+ {
+ clib_warning ("map resolver %U is unreachable, ignoring",
+ format_ip_address, &lcm->active_map_resolver);
+
+ /* mark current map resolver unavailable so it won't be
+ * selected next time */
+ mr->is_down = 1;
+ mr->last_update = vlib_time_now (lcm->vlib_main);
+ }
+
+ reset_pending_mr_counters (r);
+ elect_map_resolver (lcm);
+
+ /* try to find a next eligible map resolver and re-send */
+ queue_map_request (&r->src, &r->dst, r->is_smr_invoked,
+ 1 /* resend */ );
+ }
+ else
+ {
+ /* try again */
+ queue_map_request (&r->src, &r->dst, r->is_smr_invoked,
+ 1 /* resend */ );
+ r->retries_num++;
+ r->time_to_expire = PENDING_MREQ_EXPIRATION_TIME;
+ }
+ }
+ else
+ r->time_to_expire -= dt;
+}
+
+static void
+remove_dead_pending_map_requests (lisp_cp_main_t * lcm)
+{
+ u64 *nonce;
+ pending_map_request_t *pmr;
+ u32 *to_be_removed = 0, *pmr_index;
+
+ /* *INDENT-OFF* */
+ pool_foreach (pmr, lcm->pending_map_requests_pool,
+ ({
+ if (pmr->to_be_removed)
+ {
+ clib_fifo_foreach (nonce, pmr->nonces, ({
+ hash_unset (lcm->pending_map_requests_by_nonce, nonce[0]);
+ }));
+
+ vec_add1 (to_be_removed, pmr - lcm->pending_map_requests_pool);
+ }
+ }));
+ /* *INDENT-ON* */
+
+ vec_foreach (pmr_index, to_be_removed)
+ pool_put_index (lcm->pending_map_requests_by_nonce, pmr_index[0]);
+
+ vec_free (to_be_removed);
+}
+
+static void
+update_rloc_probing (lisp_cp_main_t * lcm, f64 dt)
+{
+ static f64 time_left = RLOC_PROBING_INTERVAL;
+
+ if (!lcm->is_enabled || !lcm->rloc_probing)
+ return;
+
+ time_left -= dt;
+ if (time_left <= 0)
+ {
+ time_left = RLOC_PROBING_INTERVAL;
+ send_rloc_probes (lcm);
+ }
+}
+
+static void
+update_map_register (lisp_cp_main_t * lcm, f64 dt)
+{
+ static f64 time_left = QUICK_MAP_REGISTER_INTERVAL;
+ static u64 mreg_sent_counter = 0;
+
+ if (!lcm->is_enabled || !lcm->map_registering)
+ return;
+
+ time_left -= dt;
+ if (time_left <= 0)
+ {
+ if (mreg_sent_counter >= QUICK_MAP_REGISTER_MSG_COUNT)
+ time_left = MAP_REGISTER_INTERVAL;
+ else
+ {
+ mreg_sent_counter++;
+ time_left = QUICK_MAP_REGISTER_INTERVAL;
+ }
+ send_map_register (lcm, 1 /* want map notify */ );
+ }
+}
+
+static uword
+send_map_resolver_service (vlib_main_t * vm,
+ vlib_node_runtime_t * rt, vlib_frame_t * f)
+{
+ u32 *expired = 0;
+ f64 period = 2.0;
+ pending_map_request_t *pmr;
+ lisp_cp_main_t *lcm = vnet_lisp_cp_get_main ();
+
+ while (1)
+ {
+ vlib_process_wait_for_event_or_clock (vm, period);
+
+ /* currently no signals are expected - just wait for clock */
+ (void) vlib_process_get_events (vm, 0);
+
+ /* *INDENT-OFF* */
+ pool_foreach (pmr, lcm->pending_map_requests_pool,
+ ({
+ if (!pmr->to_be_removed)
+ update_pending_request (pmr, period);
+ }));
+ /* *INDENT-ON* */
+
+ remove_dead_pending_map_requests (lcm);
+
+ update_map_register (lcm, period);
+ update_rloc_probing (lcm, period);
+
+ u64 now = clib_cpu_time_now ();
+
+ expired = timing_wheel_advance (&lcm->wheel, now, expired, 0);
+ if (vec_len (expired) > 0)
+ {
+ u32 *mi = 0;
+ vec_foreach (mi, expired)
+ {
+ remove_expired_mapping (lcm, mi[0]);
+ }
+ _vec_len (expired) = 0;
+ }
+ }
+
+ /* unreachable */
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (lisp_retry_service_node,static) = {
+ .function = send_map_resolver_service,
+ .type = VLIB_NODE_TYPE_PROCESS,
+ .name = "lisp-retry-service",
+ .process_log2_n_stack_bytes = 16,
+};
+/* *INDENT-ON* */
+
+VLIB_INIT_FUNCTION (lisp_cp_init);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/lisp-cp/control.h b/src/vnet/lisp-cp/control.h
new file mode 100644
index 00000000000..e89c6fd6e8d
--- /dev/null
+++ b/src/vnet/lisp-cp/control.h
@@ -0,0 +1,314 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef VNET_CONTROL_H_
+#define VNET_CONTROL_H_
+
+#include <vnet/vnet.h>
+#include <vnet/lisp-cp/gid_dictionary.h>
+#include <vnet/lisp-cp/lisp_types.h>
+
+#define NUMBER_OF_RETRIES 1
+#define PENDING_MREQ_EXPIRATION_TIME 3.0 /* seconds */
+#define PENDING_MREQ_QUEUE_LEN 5
+
+#define PENDING_MREG_EXPIRATION_TIME 3.0 /* seconds */
+#define RLOC_PROBING_INTERVAL 60.0
+
+/* when map-registration is enabled "quick registration" takes place first.
+ In this mode ETR sends map-register messages at an increased frequency
+ until specified message count is reached */
+#define QUICK_MAP_REGISTER_MSG_COUNT 3
+#define QUICK_MAP_REGISTER_INTERVAL 3.0
+
+/* normal map-register period */
+#define MAP_REGISTER_INTERVAL 60.0
+
+/* 15 minutes */
+#define MAP_REGISTER_DEFAULT_TTL 900
+
+typedef struct
+{
+ gid_address_t src;
+ gid_address_t dst;
+ u32 retries_num;
+ f64 time_to_expire;
+ u8 is_smr_invoked;
+ u64 *nonces;
+ u8 to_be_removed;
+} pending_map_request_t;
+
+typedef struct
+{
+ gid_address_t leid;
+ gid_address_t reid;
+ u8 is_src_dst;
+ locator_pair_t *locator_pairs;
+} fwd_entry_t;
+
+typedef struct
+{
+ gid_address_t leid;
+ gid_address_t reid;
+} lisp_adjacency_t;
+
+typedef enum
+{
+ IP4_MISS_PACKET,
+ IP6_MISS_PACKET
+} miss_packet_type_t;
+
+/* map-server/map-resolver structure */
+typedef struct
+{
+ u8 is_down;
+ f64 last_update;
+ ip_address_t address;
+ char *key;
+} lisp_msmr_t;
+
+typedef struct
+{
+ /* headers */
+ u8 data[100];
+ u32 length;
+ miss_packet_type_t type;
+} miss_packet_t;
+
+typedef enum
+{
+ MR_MODE_DST_ONLY = 0,
+ MR_MODE_SRC_DST,
+ _MR_MODE_MAX
+} map_request_mode_t;
+
+typedef struct
+{
+ /* LISP feature status */
+ u8 is_enabled;
+
+ /* eid table */
+ gid_dictionary_t mapping_index_by_gid;
+
+ /* pool of mappings */
+ mapping_t *mapping_pool;
+
+ /* hash map of secret keys by mapping index */
+ u8 *key_by_mapping_index;
+
+ /* pool of locators */
+ locator_t *locator_pool;
+
+ /* pool of locator-sets */
+ locator_set_t *locator_set_pool;
+
+ /* vector of locator-set vectors composed of and indexed by locator index */
+ u32 **locator_to_locator_sets;
+
+ /* hash map of locators by name */
+ uword *locator_set_index_by_name;
+
+ /* vector of eid index vectors supported and indexed by locator-set index */
+ u32 **locator_set_to_eids;
+
+ /* vectors of indexes for local locator-sets and mappings */
+ u32 *local_mappings_indexes;
+ u32 *local_locator_set_indexes;
+
+ /* hash map of forwarding entries by mapping index */
+ u32 *fwd_entry_by_mapping_index;
+
+ /* forwarding entries pool */
+ fwd_entry_t *fwd_entry_pool;
+
+ /* hash map keyed by nonce of pending map-requests */
+ uword *pending_map_requests_by_nonce;
+
+ /* pool of pending map requests */
+ pending_map_request_t *pending_map_requests_pool;
+
+ /* hash map of sent map register messages */
+ uword *map_register_messages_by_nonce;
+
+ /* vector of map-resolvers */
+ lisp_msmr_t *map_resolvers;
+
+ /* vector of map-servers */
+ lisp_msmr_t *map_servers;
+
+ /* map resolver address currently being used for sending requests.
+ * This has to be an actual address and not an index to map_resolvers vector
+ * since the vector may be modified during request resend/retry procedure
+ * and break things :-) */
+ ip_address_t active_map_resolver;
+
+ u8 do_map_resolver_election;
+
+ /* map-request locator set index */
+ u32 mreq_itr_rlocs;
+
+ /* vni to vrf hash tables */
+ uword *table_id_by_vni;
+ uword *vni_by_table_id;
+
+ /* vni to bd-index hash tables */
+ uword *bd_id_by_vni;
+ uword *vni_by_bd_id;
+
+ /* track l2 and l3 interfaces that have been created for vni */
+ uword *l2_dp_intf_by_vni;
+
+ /* Proxy ETR map index */
+ u32 pitr_map_index;
+
+ /* LISP PITR mode */
+ u8 lisp_pitr;
+
+ /* map request mode */
+ u8 map_request_mode;
+
+ /* enable/disable map registering */
+ u8 map_registering;
+
+ /* enable/disable rloc-probing */
+ u8 rloc_probing;
+
+ /* timing wheel for mappping timeouts */
+ timing_wheel_t wheel;
+
+ /* commodity */
+ ip4_main_t *im4;
+ ip6_main_t *im6;
+ vlib_main_t *vlib_main;
+ vnet_main_t *vnet_main;
+} lisp_cp_main_t;
+
+/* lisp-gpe control plane */
+lisp_cp_main_t lisp_control_main;
+
+extern vlib_node_registration_t lisp_cp_input_node;
+extern vlib_node_registration_t lisp_cp_lookup_ip4_node;
+extern vlib_node_registration_t lisp_cp_lookup_ip6_node;
+
+clib_error_t *lisp_cp_init ();
+
+always_inline lisp_cp_main_t *
+vnet_lisp_cp_get_main ()
+{
+ return &lisp_control_main;
+}
+
+typedef struct
+{
+ u8 is_add;
+ union
+ {
+ u8 *name;
+ u32 index;
+ };
+ locator_t *locators;
+ u8 local;
+} vnet_lisp_add_del_locator_set_args_t;
+
+int
+vnet_lisp_add_del_locator_set (vnet_lisp_add_del_locator_set_args_t * a,
+ u32 * ls_index);
+int
+vnet_lisp_add_del_locator (vnet_lisp_add_del_locator_set_args_t * a,
+ locator_set_t * ls, u32 * ls_index);
+
+typedef struct
+{
+ u8 is_add;
+ gid_address_t eid;
+ u32 locator_set_index;
+
+ u32 ttl;
+ u8 action;
+ u8 authoritative;
+
+ u8 local;
+ u8 is_static;
+ u8 *key;
+ u8 key_id;
+} vnet_lisp_add_del_mapping_args_t;
+
+int
+vnet_lisp_map_cache_add_del (vnet_lisp_add_del_mapping_args_t * a,
+ u32 * map_index);
+int
+vnet_lisp_add_del_local_mapping (vnet_lisp_add_del_mapping_args_t * a,
+ u32 * map_index_result);
+
+int
+vnet_lisp_add_del_mapping (gid_address_t * deid, locator_t * dlocs, u8 action,
+ u8 authoritative, u32 ttl, u8 is_add, u8 is_static,
+ u32 * res_map_index);
+
+typedef struct
+{
+ gid_address_t reid;
+ gid_address_t leid;
+ u8 is_add;
+} vnet_lisp_add_del_adjacency_args_t;
+
+int vnet_lisp_add_del_adjacency (vnet_lisp_add_del_adjacency_args_t * a);
+
+typedef struct
+{
+ u8 is_add;
+ ip_address_t address;
+} vnet_lisp_add_del_map_resolver_args_t;
+
+int
+vnet_lisp_add_del_map_resolver (vnet_lisp_add_del_map_resolver_args_t * a);
+int vnet_lisp_add_del_map_server (ip_address_t * addr, u8 is_add);
+
+clib_error_t *vnet_lisp_enable_disable (u8 is_enabled);
+u8 vnet_lisp_enable_disable_status (void);
+
+int vnet_lisp_pitr_set_locator_set (u8 * locator_set_name, u8 is_add);
+
+typedef struct
+{
+ u8 is_add;
+ u8 *locator_set_name;
+} vnet_lisp_add_del_mreq_itr_rloc_args_t;
+
+int
+vnet_lisp_add_del_mreq_itr_rlocs (vnet_lisp_add_del_mreq_itr_rloc_args_t * a);
+
+int vnet_lisp_clear_all_remote_adjacencies (void);
+
+int vnet_lisp_eid_table_map (u32 vni, u32 vrf, u8 is_l2, u8 is_add);
+int vnet_lisp_add_del_map_table_key (gid_address_t * eid, char *key,
+ u8 is_add);
+int vnet_lisp_set_map_request_mode (u8 mode);
+u8 vnet_lisp_get_map_request_mode (void);
+lisp_adjacency_t *vnet_lisp_adjacencies_get_by_vni (u32 vni);
+int vnet_lisp_rloc_probe_enable_disable (u8 is_enable);
+int vnet_lisp_map_register_enable_disable (u8 is_enable);
+u8 vnet_lisp_map_register_state_get (void);
+u8 vnet_lisp_rloc_probe_state_get (void);
+
+#endif /* VNET_CONTROL_H_ */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/lisp-cp/gid_dictionary.c b/src/vnet/lisp-cp/gid_dictionary.c
new file mode 100644
index 00000000000..d238124ecd8
--- /dev/null
+++ b/src/vnet/lisp-cp/gid_dictionary.c
@@ -0,0 +1,865 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/lisp-cp/gid_dictionary.h>
+
+typedef struct
+{
+ void *arg;
+ ip_prefix_t src;
+ foreach_subprefix_match_cb_t cb;
+ union
+ {
+ gid_ip4_table_t *ip4_table;
+ gid_ip6_table_t *ip6_table;
+ };
+} sfib_entry_arg_t;
+
+static u32 ip4_lookup (gid_ip4_table_t * db, u32 vni, ip_prefix_t * key);
+
+static u32 ip6_lookup (gid_ip6_table_t * db, u32 vni, ip_prefix_t * key);
+
+static void
+foreach_sfib4_subprefix (BVT (clib_bihash_kv) * kvp, void *arg)
+{
+ sfib_entry_arg_t *a = arg;
+ u32 ip = (u32) kvp->key[0];
+ ip4_address_t *mask;
+ u8 plen = ip_prefix_len (&a->src);
+
+ ASSERT (plen <= 32);
+ mask = &a->ip4_table->ip4_fib_masks[plen];
+
+ u32 src_ip = ip_prefix_v4 (&a->src).as_u32;
+ src_ip &= mask->as_u32;
+ ip &= mask->as_u32;
+
+ if (src_ip == ip)
+ {
+ /* found sub-prefix of src prefix */
+ (a->cb) (kvp->value, a->arg);
+ }
+}
+
+static void
+gid_dict_foreach_ip4_subprefix (gid_dictionary_t * db, u32 vni,
+ ip_prefix_t * src, ip_prefix_t * dst,
+ foreach_subprefix_match_cb_t cb, void *arg)
+{
+ u32 sfi;
+ gid_ip4_table_t *sfib4;
+ sfib_entry_arg_t a;
+
+ sfi = ip4_lookup (&db->dst_ip4_table, vni, dst);
+ if (GID_LOOKUP_MISS == sfi)
+ return;
+
+ sfib4 = pool_elt_at_index (db->src_ip4_table_pool, sfi);
+
+ a.arg = arg;
+ a.cb = cb;
+ a.src = src[0];
+ a.ip4_table = sfib4;
+
+ BV (clib_bihash_foreach_key_value_pair) (&sfib4->ip4_lookup_table,
+ foreach_sfib4_subprefix, &a);
+}
+
+static void
+foreach_sfib6_subprefix (BVT (clib_bihash_kv) * kvp, void *arg)
+{
+ sfib_entry_arg_t *a = arg;
+ ip6_address_t ip;
+ ip6_address_t *mask;
+ u8 plen = ip_prefix_len (&a->src);
+
+ mask = &a->ip6_table->ip6_fib_masks[plen];
+ ip.as_u64[0] = kvp->key[0];
+ ip.as_u64[1] = kvp->key[1];
+
+ if (ip6_address_is_equal_masked (&ip_prefix_v6 (&a->src), &ip, mask))
+ {
+ /* found sub-prefix of src prefix */
+ (a->cb) (kvp->value, a->arg);
+ }
+}
+
+static void
+gid_dict_foreach_ip6_subprefix (gid_dictionary_t * db, u32 vni,
+ ip_prefix_t * src, ip_prefix_t * dst,
+ foreach_subprefix_match_cb_t cb, void *arg)
+{
+ u32 sfi;
+ gid_ip6_table_t *sfib6;
+ sfib_entry_arg_t a;
+
+ sfi = ip6_lookup (&db->dst_ip6_table, vni, dst);
+ if (GID_LOOKUP_MISS == sfi)
+ return;
+
+ sfib6 = pool_elt_at_index (db->src_ip6_table_pool, sfi);
+
+ a.arg = arg;
+ a.cb = cb;
+ a.src = src[0];
+ a.ip6_table = sfib6;
+
+ BV (clib_bihash_foreach_key_value_pair) (&sfib6->ip6_lookup_table,
+ foreach_sfib6_subprefix, &a);
+}
+
+void
+gid_dict_foreach_subprefix (gid_dictionary_t * db, gid_address_t * eid,
+ foreach_subprefix_match_cb_t cb, void *arg)
+{
+ ip_prefix_t *ippref = &gid_address_sd_dst_ippref (eid);
+
+ if (IP4 == ip_prefix_version (ippref))
+ gid_dict_foreach_ip4_subprefix (db, gid_address_vni (eid),
+ &gid_address_sd_src_ippref (eid),
+ &gid_address_sd_dst_ippref (eid), cb,
+ arg);
+ else
+ gid_dict_foreach_ip6_subprefix (db, gid_address_vni (eid),
+ &gid_address_sd_src_ippref (eid),
+ &gid_address_sd_dst_ippref (eid), cb,
+ arg);
+}
+
+static void
+make_mac_sd_key (BVT (clib_bihash_kv) * kv, u32 vni, u8 src_mac[6],
+ u8 dst_mac[6])
+{
+ kv->key[0] = (u64) vni;
+ kv->key[1] = mac_to_u64 (dst_mac);
+ kv->key[2] = src_mac ? mac_to_u64 (src_mac) : (u64) 0;
+}
+
+static u32
+mac_sd_lookup (gid_mac_table_t * db, u32 vni, u8 * dst, u8 * src)
+{
+ int rv;
+ BVT (clib_bihash_kv) kv, value;
+
+ make_mac_sd_key (&kv, vni, src, dst);
+ rv = BV (clib_bihash_search_inline_2) (&db->mac_lookup_table, &kv, &value);
+
+ /* no match, try with src 0, catch all for dst */
+ if (rv != 0)
+ {
+ kv.key[2] = 0;
+ rv = BV (clib_bihash_search_inline_2) (&db->mac_lookup_table, &kv,
+ &value);
+ if (rv == 0)
+ return value.value;
+ }
+ else
+ return value.value;
+
+ return GID_LOOKUP_MISS;
+}
+
+static u32
+ip4_lookup_exact_match (gid_ip4_table_t * db, u32 vni, ip_prefix_t * key)
+{
+ int rv;
+ BVT (clib_bihash_kv) kv, value;
+
+ ip4_address_t *mask;
+
+ mask = &db->ip4_fib_masks[ip_prefix_len (key)];
+
+ kv.key[0] = ((u64) vni << 32) | (ip_prefix_v4 (key).as_u32 & mask->as_u32);
+ kv.key[1] = 0;
+ kv.key[2] = 0;
+
+ rv = BV (clib_bihash_search_inline_2) (&db->ip4_lookup_table, &kv, &value);
+ if (rv == 0)
+ return value.value;
+
+ return GID_LOOKUP_MISS;
+}
+
+static u32
+ip4_lookup (gid_ip4_table_t * db, u32 vni, ip_prefix_t * key)
+{
+ int i, len;
+ int rv;
+ BVT (clib_bihash_kv) kv, value;
+
+ len = vec_len (db->ip4_prefix_lengths_in_search_order);
+
+ for (i = 0; i < len; i++)
+ {
+ int dst_address_length = db->ip4_prefix_lengths_in_search_order[i];
+ ip4_address_t *mask;
+
+ ASSERT (dst_address_length >= 0 && dst_address_length <= 32);
+
+ mask = &db->ip4_fib_masks[dst_address_length];
+
+ kv.key[0] =
+ ((u64) vni << 32) | (ip_prefix_v4 (key).as_u32 & mask->as_u32);
+ kv.key[1] = 0;
+ kv.key[2] = 0;
+
+ rv =
+ BV (clib_bihash_search_inline_2) (&db->ip4_lookup_table, &kv, &value);
+ if (rv == 0)
+ return value.value;
+ }
+
+ return GID_LOOKUP_MISS;
+}
+
+static u32
+ip6_lookup_exact_match (gid_ip6_table_t * db, u32 vni, ip_prefix_t * key)
+{
+ int rv;
+ BVT (clib_bihash_kv) kv, value;
+
+ ip6_address_t *mask;
+ mask = &db->ip6_fib_masks[ip_prefix_len (key)];
+
+ kv.key[0] = ip_prefix_v6 (key).as_u64[0] & mask->as_u64[0];
+ kv.key[1] = ip_prefix_v6 (key).as_u64[1] & mask->as_u64[1];
+ kv.key[2] = (u64) vni;
+
+ rv = BV (clib_bihash_search_inline_2) (&db->ip6_lookup_table, &kv, &value);
+ if (rv == 0)
+ return value.value;
+
+ return GID_LOOKUP_MISS;
+}
+
+static u32
+ip6_lookup (gid_ip6_table_t * db, u32 vni, ip_prefix_t * key)
+{
+ int i, len;
+ int rv;
+ BVT (clib_bihash_kv) kv, value;
+
+ len = vec_len (db->ip6_prefix_lengths_in_search_order);
+
+ for (i = 0; i < len; i++)
+ {
+ int dst_address_length = db->ip6_prefix_lengths_in_search_order[i];
+ ip6_address_t *mask;
+
+ ASSERT (dst_address_length >= 0 && dst_address_length <= 128);
+
+ mask = &db->ip6_fib_masks[dst_address_length];
+
+ kv.key[0] = ip_prefix_v6 (key).as_u64[0] & mask->as_u64[0];
+ kv.key[1] = ip_prefix_v6 (key).as_u64[1] & mask->as_u64[1];
+ kv.key[2] = (u64) vni;
+
+ rv =
+ BV (clib_bihash_search_inline_2) (&db->ip6_lookup_table, &kv, &value);
+ if (rv == 0)
+ return value.value;
+ }
+
+ return GID_LOOKUP_MISS;
+}
+
+static u32
+ip_sd_lookup (gid_dictionary_t * db, u32 vni, ip_prefix_t * dst,
+ ip_prefix_t * src)
+{
+ u32 sfi;
+ gid_ip4_table_t *sfib4;
+ gid_ip6_table_t *sfib6;
+
+ switch (ip_prefix_version (dst))
+ {
+ case IP4:
+ sfi = ip4_lookup (&db->dst_ip4_table, vni, dst);
+ if (GID_LOOKUP_MISS != sfi)
+ sfib4 = pool_elt_at_index (db->src_ip4_table_pool, sfi);
+ else
+ return GID_LOOKUP_MISS;
+
+ if (!src)
+ {
+ ip_prefix_t sp;
+ memset (&sp, 0, sizeof (sp));
+ return ip4_lookup_exact_match (sfib4, 0, &sp);
+ }
+ else
+ return ip4_lookup (sfib4, 0, src);
+
+ break;
+ case IP6:
+ sfi = ip6_lookup (&db->dst_ip6_table, vni, dst);
+ if (GID_LOOKUP_MISS != sfi)
+ sfib6 = pool_elt_at_index (db->src_ip6_table_pool, sfi);
+ else
+ return GID_LOOKUP_MISS;
+
+ if (!src)
+ {
+ ip_prefix_t sp;
+ memset (&sp, 0, sizeof (sp));
+ ip_prefix_version (&sp) = IP6;
+ return ip6_lookup_exact_match (sfib6, 0, &sp);
+ }
+ else
+ return ip6_lookup (sfib6, 0, src);
+
+ break;
+ default:
+ clib_warning ("address type %d not supported!",
+ ip_prefix_version (dst));
+ break;
+ }
+ return GID_LOOKUP_MISS;
+}
+
+u32
+gid_dictionary_lookup (gid_dictionary_t * db, gid_address_t * key)
+{
+ switch (gid_address_type (key))
+ {
+ case GID_ADDR_IP_PREFIX:
+ return ip_sd_lookup (db, gid_address_vni (key),
+ &gid_address_ippref (key), 0);
+ case GID_ADDR_MAC:
+ return mac_sd_lookup (&db->sd_mac_table, gid_address_vni (key),
+ gid_address_mac (key), 0);
+ case GID_ADDR_SRC_DST:
+ switch (gid_address_sd_dst_type (key))
+ {
+ case FID_ADDR_IP_PREF:
+ return ip_sd_lookup (db, gid_address_vni (key),
+ &gid_address_sd_dst_ippref (key),
+ &gid_address_sd_src_ippref (key));
+ break;
+ case FID_ADDR_MAC:
+ return mac_sd_lookup (&db->sd_mac_table, gid_address_vni (key),
+ gid_address_sd_dst_mac (key),
+ gid_address_sd_src_mac (key));
+ break;
+ default:
+ clib_warning ("Source/Dest address type %d not supported!",
+ gid_address_sd_dst_type (key));
+ break;
+ }
+ break;
+ default:
+ clib_warning ("address type %d not supported!", gid_address_type (key));
+ break;
+ }
+ return GID_LOOKUP_MISS;
+}
+
+u32
+gid_dictionary_sd_lookup (gid_dictionary_t * db, gid_address_t * dst,
+ gid_address_t * src)
+{
+ switch (gid_address_type (dst))
+ {
+ case GID_ADDR_IP_PREFIX:
+ return ip_sd_lookup (db, gid_address_vni (dst),
+ &gid_address_ippref (dst),
+ &gid_address_ippref (src));
+ case GID_ADDR_MAC:
+ return mac_sd_lookup (&db->sd_mac_table, gid_address_vni (dst),
+ gid_address_mac (dst), gid_address_mac (src));
+ case GID_ADDR_SRC_DST:
+ switch (gid_address_sd_dst_type (dst))
+ {
+ case FID_ADDR_IP_PREF:
+ return ip_sd_lookup (db, gid_address_vni (dst),
+ &gid_address_sd_dst_ippref (dst),
+ &gid_address_sd_src_ippref (dst));
+ break;
+ case FID_ADDR_MAC:
+ return mac_sd_lookup (&db->sd_mac_table, gid_address_vni (dst),
+ gid_address_sd_dst_mac (dst),
+ gid_address_sd_src_mac (dst));
+ break;
+ default:
+ clib_warning ("Source/Dest address type %d not supported!",
+ gid_address_sd_dst_type (dst));
+ break;
+ }
+ break;
+ default:
+ clib_warning ("address type %d not supported!", gid_address_type (dst));
+ break;
+ }
+ return GID_LOOKUP_MISS;
+}
+
+static void
+ip4_compute_prefix_lengths_in_search_order (gid_ip4_table_t * db)
+{
+ int i;
+ vec_reset_length (db->ip4_prefix_lengths_in_search_order);
+ /* Note: bitmap reversed so this is in fact a longest prefix match */
+
+ /* *INDENT-OFF* */
+ clib_bitmap_foreach (i, db->ip4_non_empty_dst_address_length_bitmap,
+ ({
+ int dst_address_length = 32 - i;
+ vec_add1 (db->ip4_prefix_lengths_in_search_order, dst_address_length);
+ }));
+ /* *INDENT-ON* */
+
+}
+
+static u32
+add_del_ip4_key (gid_ip4_table_t * db, u32 vni, ip_prefix_t * pref, u32 val,
+ u8 is_add)
+{
+ BVT (clib_bihash_kv) kv, value;
+ u32 old_val = ~0;
+ ip4_address_t key;
+ u8 plen = ip_prefix_len (pref);
+
+ clib_memcpy (&key, &ip_prefix_v4 (pref), sizeof (key));
+ key.as_u32 &= db->ip4_fib_masks[plen].as_u32;
+ if (is_add)
+ {
+ db->ip4_non_empty_dst_address_length_bitmap =
+ clib_bitmap_set (db->ip4_non_empty_dst_address_length_bitmap,
+ 32 - plen, 1);
+ ip4_compute_prefix_lengths_in_search_order (db);
+
+ db->ip4_prefix_len_refcount[plen]++;
+ }
+ else
+ {
+ ASSERT (db->ip4_prefix_len_refcount[plen] != 0);
+
+ db->ip4_prefix_len_refcount[plen]--;
+
+ if (db->ip4_prefix_len_refcount[plen] == 0)
+ {
+ db->ip4_non_empty_dst_address_length_bitmap =
+ clib_bitmap_set (db->ip4_non_empty_dst_address_length_bitmap,
+ 32 - plen, 0);
+ ip4_compute_prefix_lengths_in_search_order (db);
+ }
+ }
+
+ kv.key[0] = ((u64) vni << 32) | key.as_u32;
+ kv.key[1] = 0;
+ kv.key[2] = 0;
+
+ if (BV (clib_bihash_search) (&db->ip4_lookup_table, &kv, &value) == 0)
+ old_val = value.value;
+
+ if (!is_add)
+ BV (clib_bihash_add_del) (&db->ip4_lookup_table, &kv, 0 /* is_add */ );
+ else
+ {
+ kv.value = val;
+ BV (clib_bihash_add_del) (&db->ip4_lookup_table, &kv, 1 /* is_add */ );
+ }
+ return old_val;
+}
+
+static void
+ip4_lookup_init (gid_ip4_table_t * db)
+{
+ uword i;
+
+ memset (db->ip4_prefix_len_refcount, 0,
+ sizeof (db->ip4_prefix_len_refcount));
+
+ for (i = 0; i < ARRAY_LEN (db->ip4_fib_masks); i++)
+ {
+ u32 m;
+
+ if (i < 32)
+ m = pow2_mask (i) << (32 - i);
+ else
+ m = ~0;
+ db->ip4_fib_masks[i].as_u32 = clib_host_to_net_u32 (m);
+ }
+ if (db->ip4_lookup_table_nbuckets == 0)
+ db->ip4_lookup_table_nbuckets = IP4_LOOKUP_DEFAULT_HASH_NUM_BUCKETS;
+
+ db->ip4_lookup_table_nbuckets =
+ 1 << max_log2 (db->ip4_lookup_table_nbuckets);
+
+ if (db->ip4_lookup_table_size == 0)
+ db->ip4_lookup_table_size = IP4_LOOKUP_DEFAULT_HASH_MEMORY_SIZE;
+
+ BV (clib_bihash_init) (&db->ip4_lookup_table, "ip4 lookup table",
+ db->ip4_lookup_table_nbuckets,
+ db->ip4_lookup_table_size);
+}
+
+static u32
+add_del_sd_ip4_key (gid_dictionary_t * db, u32 vni, ip_prefix_t * dst_pref,
+ ip_prefix_t * src_pref, u32 val, u8 is_add)
+{
+ u32 sfi, old_val = ~0;
+ gid_ip4_table_t *sfib;
+
+ sfi = ip4_lookup_exact_match (&db->dst_ip4_table, vni, dst_pref);
+
+ if (is_add)
+ {
+ if (GID_LOOKUP_MISS == sfi)
+ {
+ pool_get (db->src_ip4_table_pool, sfib);
+ ip4_lookup_init (sfib);
+ add_del_ip4_key (&db->dst_ip4_table, vni, dst_pref,
+ sfib - db->src_ip4_table_pool, is_add);
+ if (src_pref)
+ add_del_ip4_key (sfib, 0 /* vni */ , src_pref, val, is_add);
+ else
+ {
+ ip_prefix_t sp;
+ memset (&sp, 0, sizeof (sp));
+ add_del_ip4_key (sfib, 0 /* vni */ , &sp, val, is_add);
+ }
+ }
+ else
+ {
+ ASSERT (!pool_is_free_index (db->src_ip4_table_pool, sfi));
+ sfib = pool_elt_at_index (db->src_ip4_table_pool, sfi);
+ if (src_pref)
+ {
+ old_val = ip4_lookup_exact_match (sfib, 0, src_pref);
+ add_del_ip4_key (sfib, 0 /* vni */ , src_pref, val, is_add);
+ }
+ else
+ {
+ ip_prefix_t sp;
+ memset (&sp, 0, sizeof (sp));
+ old_val =
+ add_del_ip4_key (sfib, 0 /* vni */ , &sp, val, is_add);
+ }
+ }
+ }
+ else
+ {
+ if (GID_LOOKUP_MISS != sfi)
+ {
+ add_del_ip4_key (&db->dst_ip4_table, vni, dst_pref, 0, is_add);
+ sfib = pool_elt_at_index (db->src_ip4_table_pool, sfi);
+ if (src_pref)
+ old_val = add_del_ip4_key (sfib, 0, src_pref, 0, is_add);
+ else
+ {
+ ip_prefix_t sp;
+ memset (&sp, 0, sizeof (sp));
+ old_val = add_del_ip4_key (sfib, 0, &sp, 0, is_add);
+ }
+ }
+ else
+ clib_warning ("cannot delete dst mapping %U!", format_ip_prefix,
+ dst_pref);
+ }
+ return old_val;
+}
+
+static void
+ip6_compute_prefix_lengths_in_search_order (gid_ip6_table_t * db)
+{
+ int i;
+ vec_reset_length (db->ip6_prefix_lengths_in_search_order);
+ /* Note: bitmap reversed so this is in fact a longest prefix match */
+
+ /* *INDENT-OFF* */
+ clib_bitmap_foreach (i, db->ip6_non_empty_dst_address_length_bitmap,
+ ({
+ int dst_address_length = 128 - i;
+ vec_add1 (db->ip6_prefix_lengths_in_search_order, dst_address_length);
+ }));
+ /* *INDENT-ON* */
+}
+
+static u32
+add_del_ip6_key (gid_ip6_table_t * db, u32 vni, ip_prefix_t * pref, u32 val,
+ u8 is_add)
+{
+ BVT (clib_bihash_kv) kv, value;
+ u32 old_val = ~0;
+ ip6_address_t key;
+ u8 plen = ip_prefix_len (pref);
+
+ clib_memcpy (&key, &ip_prefix_v6 (pref), sizeof (key));
+ ip6_address_mask (&key, &db->ip6_fib_masks[plen]);
+ if (is_add)
+ {
+ db->ip6_non_empty_dst_address_length_bitmap =
+ clib_bitmap_set (db->ip6_non_empty_dst_address_length_bitmap,
+ 128 - plen, 1);
+ ip6_compute_prefix_lengths_in_search_order (db);
+ db->ip6_prefix_len_refcount[plen]++;
+ }
+ else
+ {
+ ASSERT (db->ip6_prefix_len_refcount[plen] != 0);
+
+ db->ip6_prefix_len_refcount[plen]--;
+
+ if (db->ip6_prefix_len_refcount[plen] == 0)
+ {
+ db->ip6_non_empty_dst_address_length_bitmap =
+ clib_bitmap_set (db->ip6_non_empty_dst_address_length_bitmap,
+ 128 - plen, 0);
+ ip6_compute_prefix_lengths_in_search_order (db);
+ }
+ }
+
+ kv.key[0] = key.as_u64[0];
+ kv.key[1] = key.as_u64[1];
+ kv.key[2] = (u64) vni;
+// kv.key[2] = ((u64)((fib - im->fibs))<<32) | ip_prefix_len(key);
+
+ if (BV (clib_bihash_search) (&db->ip6_lookup_table, &kv, &value) == 0)
+ old_val = value.value;
+
+ if (!is_add)
+ BV (clib_bihash_add_del) (&db->ip6_lookup_table, &kv, 0 /* is_add */ );
+ else
+ {
+ kv.value = val;
+ BV (clib_bihash_add_del) (&db->ip6_lookup_table, &kv, 1 /* is_add */ );
+ }
+ return old_val;
+}
+
+static u32
+add_del_mac (gid_mac_table_t * db, u32 vni, u8 * dst_mac, u8 * src_mac,
+ u32 val, u8 is_add)
+{
+ BVT (clib_bihash_kv) kv, value;
+ u32 old_val = ~0;
+
+ make_mac_sd_key (&kv, vni, src_mac, dst_mac);
+
+ if (BV (clib_bihash_search) (&db->mac_lookup_table, &kv, &value) == 0)
+ old_val = value.value;
+
+ if (!is_add)
+ BV (clib_bihash_add_del) (&db->mac_lookup_table, &kv, 0 /* is_add */ );
+ else
+ {
+ kv.value = val;
+ BV (clib_bihash_add_del) (&db->mac_lookup_table, &kv, 1 /* is_add */ );
+ }
+ return old_val;
+}
+
+static void
+ip6_lookup_init (gid_ip6_table_t * db)
+{
+ uword i;
+
+ memset (db->ip6_prefix_len_refcount, 0,
+ sizeof (db->ip6_prefix_len_refcount));
+
+ for (i = 0; i < ARRAY_LEN (db->ip6_fib_masks); i++)
+ {
+ u32 j, i0, i1;
+
+ i0 = i / 32;
+ i1 = i % 32;
+
+ for (j = 0; j < i0; j++)
+ db->ip6_fib_masks[i].as_u32[j] = ~0;
+
+ if (i1)
+ db->ip6_fib_masks[i].as_u32[i0] =
+ clib_host_to_net_u32 (pow2_mask (i1) << (32 - i1));
+ }
+
+ if (db->ip6_lookup_table_nbuckets == 0)
+ db->ip6_lookup_table_nbuckets = IP6_LOOKUP_DEFAULT_HASH_NUM_BUCKETS;
+
+ db->ip6_lookup_table_nbuckets =
+ 1 << max_log2 (db->ip6_lookup_table_nbuckets);
+
+ if (db->ip6_lookup_table_size == 0)
+ db->ip6_lookup_table_size = IP6_LOOKUP_DEFAULT_HASH_MEMORY_SIZE;
+
+ BV (clib_bihash_init) (&db->ip6_lookup_table, "ip6 lookup table",
+ db->ip6_lookup_table_nbuckets,
+ db->ip6_lookup_table_size);
+}
+
+static u32
+add_del_sd_ip6_key (gid_dictionary_t * db, u32 vni, ip_prefix_t * dst_pref,
+ ip_prefix_t * src_pref, u32 val, u8 is_add)
+{
+ u32 sfi, old_val = ~0;
+ gid_ip6_table_t *sfib;
+
+ sfi = ip6_lookup_exact_match (&db->dst_ip6_table, vni, dst_pref);
+
+ if (is_add)
+ {
+ if (GID_LOOKUP_MISS == sfi)
+ {
+ pool_get (db->src_ip6_table_pool, sfib);
+ ip6_lookup_init (sfib);
+ add_del_ip6_key (&db->dst_ip6_table, vni, dst_pref,
+ sfib - db->src_ip6_table_pool, is_add);
+ if (src_pref)
+ add_del_ip6_key (sfib, 0 /* vni */ , src_pref, val, is_add);
+ else
+ {
+ ip_prefix_t sp;
+ memset (&sp, 0, sizeof (sp));
+ ip_prefix_version (&sp) = IP6;
+ add_del_ip6_key (sfib, 0 /* vni */ , &sp, val, is_add);
+ }
+ }
+ else
+ {
+ ASSERT (!pool_is_free_index (db->src_ip6_table_pool, sfi));
+ sfib = pool_elt_at_index (db->src_ip6_table_pool, sfi);
+ if (src_pref)
+ {
+ old_val = ip6_lookup_exact_match (sfib, 0, src_pref);
+ add_del_ip6_key (sfib, 0 /* vni */ , src_pref, val, is_add);
+ }
+ else
+ {
+ ip_prefix_t sp;
+ memset (&sp, 0, sizeof (sp));
+ ip_prefix_version (&sp) = IP6;
+ old_val =
+ add_del_ip6_key (sfib, 0 /* vni */ , &sp, val, is_add);
+ }
+ }
+ }
+ else
+ {
+ if (GID_LOOKUP_MISS != sfi)
+ {
+ add_del_ip6_key (&db->dst_ip6_table, vni, dst_pref, 0, is_add);
+ sfib = pool_elt_at_index (db->src_ip6_table_pool, sfi);
+ if (src_pref)
+ old_val = add_del_ip6_key (sfib, 0, src_pref, 0, is_add);
+ else
+ {
+ ip_prefix_t sp;
+ memset (&sp, 0, sizeof (sp));
+ ip_prefix_version (&sp) = IP6;
+ old_val = add_del_ip6_key (sfib, 0, &sp, 0, is_add);
+ }
+ }
+ else
+ clib_warning ("cannot delete dst mapping %U!", format_ip_prefix,
+ dst_pref);
+ }
+ return old_val;
+}
+
+static u32
+add_del_ip (gid_dictionary_t * db, u32 vni, ip_prefix_t * dst_key,
+ ip_prefix_t * src_key, u32 value, u8 is_add)
+{
+ switch (ip_prefix_version (dst_key))
+ {
+ case IP4:
+ return add_del_sd_ip4_key (db, vni, dst_key, src_key, value, is_add);
+ break;
+ case IP6:
+ return add_del_sd_ip6_key (db, vni, dst_key, src_key, value, is_add);
+ break;
+ default:
+ clib_warning ("address type %d not supported!",
+ ip_prefix_version (dst_key));
+ break;
+ }
+ return ~0;
+}
+
+static u32
+add_del_sd (gid_dictionary_t * db, u32 vni, source_dest_t * key, u32 value,
+ u8 is_add)
+{
+ switch (sd_dst_type (key))
+ {
+ case FID_ADDR_IP_PREF:
+ add_del_ip (db, vni, &sd_dst_ippref (key), &sd_src_ippref (key),
+ value, is_add);
+
+ case FID_ADDR_MAC:
+ return add_del_mac (&db->sd_mac_table, vni, sd_dst_mac (key),
+ sd_src_mac (key), value, is_add);
+
+ default:
+ clib_warning ("SD address type %d not supprted!", sd_dst_type (key));
+ break;
+ }
+
+ return ~0;
+}
+
+u32
+gid_dictionary_add_del (gid_dictionary_t * db, gid_address_t * key, u32 value,
+ u8 is_add)
+{
+ switch (gid_address_type (key))
+ {
+ case GID_ADDR_IP_PREFIX:
+ return add_del_ip (db, gid_address_vni (key), &gid_address_ippref (key),
+ 0, value, is_add);
+ case GID_ADDR_MAC:
+ return add_del_mac (&db->sd_mac_table, gid_address_vni (key),
+ gid_address_mac (key), 0, value, is_add);
+ case GID_ADDR_SRC_DST:
+ return add_del_sd (db, gid_address_vni (key), &gid_address_sd (key),
+ value, is_add);
+ default:
+ clib_warning ("address type %d not supported!", gid_address_type (key));
+ break;
+ }
+ return ~0;
+}
+
+static void
+mac_lookup_init (gid_mac_table_t * db)
+{
+ if (db->mac_lookup_table_nbuckets == 0)
+ db->mac_lookup_table_nbuckets = MAC_LOOKUP_DEFAULT_HASH_NUM_BUCKETS;
+
+ db->mac_lookup_table_nbuckets =
+ 1 << max_log2 (db->mac_lookup_table_nbuckets);
+
+ if (db->mac_lookup_table_size == 0)
+ db->mac_lookup_table_size = MAC_LOOKUP_DEFAULT_HASH_MEMORY_SIZE;
+
+ BV (clib_bihash_init) (&db->mac_lookup_table, "mac lookup table",
+ db->mac_lookup_table_nbuckets,
+ db->mac_lookup_table_size);
+}
+
+void
+gid_dictionary_init (gid_dictionary_t * db)
+{
+ ip4_lookup_init (&db->dst_ip4_table);
+ ip6_lookup_init (&db->dst_ip6_table);
+ mac_lookup_init (&db->sd_mac_table);
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/lisp-cp/gid_dictionary.h b/src/vnet/lisp-cp/gid_dictionary.h
new file mode 100644
index 00000000000..c5aaf8cb30d
--- /dev/null
+++ b/src/vnet/lisp-cp/gid_dictionary.h
@@ -0,0 +1,120 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef VNET_LISP_GPE_GID_DICTIONARY_H_
+#define VNET_LISP_GPE_GID_DICTIONARY_H_
+
+#include <vnet/vnet.h>
+#include <vnet/lisp-cp/lisp_types.h>
+#include <vppinfra/bihash_24_8.h>
+#include <vppinfra/bihash_template.h>
+
+#define GID_LOOKUP_MISS ((u32)~0)
+
+/* Default size of the ip4 hash table */
+#define IP4_LOOKUP_DEFAULT_HASH_NUM_BUCKETS (64 * 1024)
+#define IP4_LOOKUP_DEFAULT_HASH_MEMORY_SIZE (32<<20)
+
+/* Default size of the ip6 hash table */
+#define IP6_LOOKUP_DEFAULT_HASH_NUM_BUCKETS (64 * 1024)
+#define IP6_LOOKUP_DEFAULT_HASH_MEMORY_SIZE (32<<20)
+
+/* Default size of the MAC hash table */
+#define MAC_LOOKUP_DEFAULT_HASH_NUM_BUCKETS (64 * 1024)
+#define MAC_LOOKUP_DEFAULT_HASH_MEMORY_SIZE (32<<20)
+
+typedef void (*foreach_subprefix_match_cb_t) (u32, void *);
+
+typedef struct
+{
+ BVT (clib_bihash) ip4_lookup_table;
+
+ /* bitmap/vector of mask widths to search */
+ uword *ip4_non_empty_dst_address_length_bitmap;
+ u8 *ip4_prefix_lengths_in_search_order;
+ ip4_address_t ip4_fib_masks[33];
+ u32 ip4_prefix_len_refcount[33];
+
+ /* ip4 lookup table config parameters */
+ u32 ip4_lookup_table_nbuckets;
+ uword ip4_lookup_table_size;
+} gid_ip4_table_t;
+
+typedef struct
+{
+ BVT (clib_bihash) ip6_lookup_table;
+
+ /* bitmap/vector of mask widths to search */
+ uword *ip6_non_empty_dst_address_length_bitmap;
+ u8 *ip6_prefix_lengths_in_search_order;
+ ip6_address_t ip6_fib_masks[129];
+ u64 ip6_prefix_len_refcount[129];
+
+ /* ip6 lookup table config parameters */
+ u32 ip6_lookup_table_nbuckets;
+ uword ip6_lookup_table_size;
+} gid_ip6_table_t;
+
+typedef struct gid_mac_table
+{
+ BVT (clib_bihash) mac_lookup_table;
+
+ /* mac lookup table config parameters */
+ u32 mac_lookup_table_nbuckets;
+ uword mac_lookup_table_size;
+} gid_mac_table_t;
+
+typedef struct
+{
+ /** destination IP LPM ip4 lookup table */
+ gid_ip4_table_t dst_ip4_table;
+
+ /** pool of source IP LPM ip4 lookup tables */
+ gid_ip4_table_t *src_ip4_table_pool;
+
+ /** destination IP LPM ip6 lookup table */
+ gid_ip6_table_t dst_ip6_table;
+
+ /** pool of source IP LPM ip6 lookup tables */
+ gid_ip6_table_t *src_ip6_table_pool;
+
+ /** flat source/dest mac lookup table */
+ gid_mac_table_t sd_mac_table;
+
+} gid_dictionary_t;
+
+u32
+gid_dictionary_add_del (gid_dictionary_t * db, gid_address_t * key, u32 value,
+ u8 is_add);
+
+u32 gid_dictionary_lookup (gid_dictionary_t * db, gid_address_t * key);
+u32 gid_dictionary_sd_lookup (gid_dictionary_t * db, gid_address_t * dst,
+ gid_address_t * src);
+
+void gid_dictionary_init (gid_dictionary_t * db);
+
+void
+gid_dict_foreach_subprefix (gid_dictionary_t * db, gid_address_t * eid,
+ foreach_subprefix_match_cb_t cb, void *arg);
+
+#endif /* VNET_LISP_GPE_GID_DICTIONARY_H_ */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/lisp-cp/lisp.api b/src/vnet/lisp-cp/lisp.api
new file mode 100644
index 00000000000..20c17aa39b1
--- /dev/null
+++ b/src/vnet/lisp-cp/lisp.api
@@ -0,0 +1,835 @@
+/*
+ * Copyright (c) 2015-2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/** \brief add or delete locator_set
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param is_add - add address if non-zero, else delete
+ @param locator_set_name - locator name
+ @param locator_num - number of locators
+ @param locators - LISP locator records
+ Structure of one locator record is as follows:
+
+ define locator_t {
+ u32 sw_if_index;
+ u8 priority;
+ u8 weight;
+ }
+*/
+define lisp_add_del_locator_set
+{
+ u32 client_index;
+ u32 context;
+ u8 is_add;
+ u8 locator_set_name[64];
+ u32 locator_num;
+ u8 locators[0];
+};
+
+/** \brief Reply for locator_set add/del
+ @param context - returned sender context, to match reply w/ request
+ @param retval - return code
+ @param ls_index - locator set index
+*/
+define lisp_add_del_locator_set_reply
+{
+ u32 context;
+ i32 retval;
+ u32 ls_index;
+};
+
+/** \brief add or delete locator for locator_set
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param is_add - add address if non-zero, else delete
+ @param locator_set_name - name of locator_set to add/del locator
+ @param sw_if_index - index of the interface
+ @param priority - priority of the lisp locator
+ @param weight - weight of the lisp locator
+*/
+define lisp_add_del_locator
+{
+ u32 client_index;
+ u32 context;
+ u8 is_add;
+ u8 locator_set_name[64];
+ u32 sw_if_index;
+ u8 priority;
+ u8 weight;
+};
+
+/** \brief Reply for locator add/del
+ @param context - returned sender context, to match reply w/ request
+ @param retval - return code
+*/
+define lisp_add_del_locator_reply
+{
+ u32 context;
+ i32 retval;
+};
+
+/** \brief add or delete lisp eid-table
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param is_add - add address if non-zero, else delete
+ @param eid_type:
+ 0 : ipv4
+ 1 : ipv6
+ 2 : mac
+ @param eid - EID can be ip4, ip6 or mac
+ @param prefix_len - prefix len
+ @param locator_set_name - name of locator_set to add/del eid-table
+ @param vni - virtual network instance
+ @param key_id
+ HMAC_NO_KEY 0
+ HMAC_SHA_1_96 1
+ HMAC_SHA_256_128 2
+ @param key - secret key
+*/
+define lisp_add_del_local_eid
+{
+ u32 client_index;
+ u32 context;
+ u8 is_add;
+ u8 eid_type;
+ u8 eid[16];
+ u8 prefix_len;
+ u8 locator_set_name[64];
+ u32 vni;
+ u16 key_id;
+ u8 key[64];
+};
+
+/** \brief Reply for local_eid add/del
+ @param context - returned sender context, to match reply w/ request
+ @param retval - return code
+*/
+define lisp_add_del_local_eid_reply
+{
+ u32 context;
+ i32 retval;
+};
+
+/** \brief Add/delete map server
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param is_add - add address if non-zero; delete otherwise
+ @param is_ipv6 - if non-zero the address is ipv6, else ipv4
+ @param ip_address - map server IP address
+*/
+define lisp_add_del_map_server
+{
+ u32 client_index;
+ u32 context;
+ u8 is_add;
+ u8 is_ipv6;
+ u8 ip_address[16];
+};
+
+/** \brief Reply for lisp_add_del_map_server
+ @param context - returned sender context, to match reply w/ request
+ @param retval - return code
+*/
+define lisp_add_del_map_server_reply
+{
+ u32 context;
+ i32 retval;
+};
+
+/** \brief add or delete map-resolver
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param is_add - add address if non-zero, else delete
+ @param is_ipv6 - if non-zero the address is ipv6, else ipv4
+ @param ip_address - array of address bytes
+*/
+define lisp_add_del_map_resolver
+{
+ u32 client_index;
+ u32 context;
+ u8 is_add;
+ u8 is_ipv6;
+ u8 ip_address[16];
+};
+
+/** \brief Reply for map_resolver add/del
+ @param context - returned sender context, to match reply w/ request
+ @param retval - return code
+*/
+define lisp_add_del_map_resolver_reply
+{
+ u32 context;
+ i32 retval;
+};
+
+/** \brief enable or disable LISP feature
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param is_en - enable protocol if non-zero, else disable
+*/
+define lisp_enable_disable
+{
+ u32 client_index;
+ u32 context;
+ u8 is_en;
+};
+
+/** \brief Reply for gpe enable/disable
+ @param context - returned sender context, to match reply w/ request
+ @param retval - return code
+*/
+define lisp_enable_disable_reply
+{
+ u32 context;
+ i32 retval;
+};
+
+/** \brief configure or disable LISP PITR node
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param ls_name - locator set name
+ @param is_add - add locator set if non-zero, else disable pitr
+*/
+define lisp_pitr_set_locator_set
+{
+ u32 client_index;
+ u32 context;
+ u8 is_add;
+ u8 ls_name[64];
+};
+
+/** \brief Reply for lisp_pitr_set_locator_set
+ @param context - returned sender context, to match reply w/ request
+ @param retval - return code
+*/
+define lisp_pitr_set_locator_set_reply
+{
+ u32 context;
+ i32 retval;
+};
+
+/** \brief Get state of LISP RLOC probing
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+*/
+define show_lisp_rloc_probe_state
+{
+ u32 client_index;
+ u32 context;
+};
+
+/** \brief Reply for show_lisp_rloc_probe_state
+ @param context - returned sender context, to match reply w/ request
+ @param retval - return code
+ @param is_enabled - state of RLOC probing
+*/
+define show_lisp_rloc_probe_state_reply
+{
+ u32 context;
+ i32 retval;
+ u8 is_enabled;
+};
+
+/** \brief enable/disable LISP RLOC probing
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param is_enable - enable if non-zero; disable otherwise
+*/
+define lisp_rloc_probe_enable_disable
+{
+ u32 client_index;
+ u32 context;
+ u8 is_enabled;
+};
+
+/** \brief Reply for lisp_rloc_probe_enable_disable
+ @param context - returned sender context, to match reply w/ request
+ @param retval - return code
+*/
+define lisp_rloc_probe_enable_disable_reply
+{
+ u32 context;
+ i32 retval;
+};
+
+/** \brief enable/disable LISP map-register
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param is_enable - enable if non-zero; disable otherwise
+*/
+define lisp_map_register_enable_disable
+{
+ u32 client_index;
+ u32 context;
+ u8 is_enabled;
+};
+
+/** \brief Reply for lisp_map_register_enable_disable
+ @param context - returned sender context, to match reply w/ request
+ @param retval - return code
+*/
+define lisp_map_register_enable_disable_reply
+{
+ u32 context;
+ i32 retval;
+};
+
+/** \brief Get state of LISP map-register
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+*/
+define show_lisp_map_register_state
+{
+ u32 client_index;
+ u32 context;
+};
+
+/** \brief Reply for show_lisp_map_register_state
+ @param context - returned sender context, to match reply w/ request
+ @param retval - return code
+*/
+define show_lisp_map_register_state_reply
+{
+ u32 context;
+ i32 retval;
+ u8 is_enabled;
+};
+
+/** \brief set LISP map-request mode. Based on configuration VPP will send
+ src/dest or just normal destination map requests.
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param mode - new map-request mode. Supported values are:
+ 0 - destination only
+ 1 - source/destaination
+*/
+define lisp_map_request_mode
+{
+ u32 client_index;
+ u32 context;
+ u8 mode;
+};
+
+/** \brief Reply for lisp_map_request_mode
+ @param context - returned sender context, to match reply w/ request
+ @param retval - return code
+*/
+define lisp_map_request_mode_reply
+{
+ u32 context;
+ i32 retval;
+};
+
+/** \brief Request for LISP map-request mode
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+*/
+define show_lisp_map_request_mode
+{
+ u32 client_index;
+ u32 context;
+};
+
+/** \brief Reply for show_lisp_map_request_mode
+ @param context - returned sender context, to match reply w/ request
+ @param retval - return code
+ @param mode - map-request mode
+*/
+define show_lisp_map_request_mode_reply
+{
+ u32 context;
+ i32 retval;
+ u8 mode;
+};
+
+/** \brief add or delete remote static mapping
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param is_add - add address if non-zero, else delete
+ @param is_src_dst - flag indicating src/dst based routing policy
+ @param del_all - if set, delete all remote mappings
+ @param vni - virtual network instance
+ @param action - negative map-reply action
+ @param eid_type -
+ 0 : ipv4
+ 1 : ipv6
+ 2 : mac
+ @param deid - dst EID
+ @param seid - src EID, valid only if is_src_dst is enabled
+ @param rloc_num - number of remote locators
+ @param rlocs - remote locator records
+ Structure of remote locator:
+
+ define rloc_t {
+ u8 is_ip4;
+ u8 priority;
+ u8 weight;
+ u8 addr[16];
+ }
+*/
+define lisp_add_del_remote_mapping
+{
+ u32 client_index;
+ u32 context;
+ u8 is_add;
+ u8 is_src_dst;
+ u8 del_all;
+ u32 vni;
+ u8 action;
+ u8 eid_type;
+ u8 eid[16];
+ u8 eid_len;
+ u8 seid[16];
+ u8 seid_len;
+ u32 rloc_num;
+ u8 rlocs[0];
+};
+
+/** \brief Reply for lisp_add_del_remote_mapping
+ @param context - returned sender context, to match reply w/ request
+ @param retval - return code
+*/
+define lisp_add_del_remote_mapping_reply
+{
+ u32 context;
+ i32 retval;
+};
+
+/** \brief add or delete LISP adjacency adjacency
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param is_add - add address if non-zero, else delete
+ @param vni - virtual network instance
+ @param eid_type -
+ 0 : ipv4
+ 1 : ipv6
+ 2 : mac
+ @param reid - remote EID
+ @param leid - local EID
+*/
+define lisp_add_del_adjacency
+{
+ u32 client_index;
+ u32 context;
+ u8 is_add;
+ u32 vni;
+ u8 eid_type;
+ u8 reid[16];
+ u8 leid[16];
+ u8 reid_len;
+ u8 leid_len;
+};
+
+/** \brief Reply for lisp_add_del_adjacency
+ @param context - returned sender context, to match reply w/ request
+ @param retval - return code
+*/
+define lisp_add_del_adjacency_reply
+{
+ u32 context;
+ i32 retval;
+};
+
+/** \brief add or delete map request itr rlocs
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param is_add - add address if non-zero, else delete
+ @param locator_set_name - locator set name
+*/
+define lisp_add_del_map_request_itr_rlocs
+{
+ u32 client_index;
+ u32 context;
+ u8 is_add;
+ u8 locator_set_name[64];
+};
+
+/** \brief Reply for lisp_add_del_map_request_itr_rlocs
+ @param context - returned sender context, to match reply w/ request
+ @param retval - return code
+*/
+
+define lisp_add_del_map_request_itr_rlocs_reply
+{
+ u32 context;
+ i32 retval;
+};
+
+/** \brief map/unmap vni/bd_index to vrf
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param is_add - add or delete mapping
+ @param dp_table - virtual network id/bridge domain index
+ @param vrf - vrf
+*/
+define lisp_eid_table_add_del_map
+{
+ u32 client_index;
+ u32 context;
+ u8 is_add;
+ u32 vni;
+ u32 dp_table;
+ u8 is_l2;
+};
+
+/** \brief Reply for lisp_eid_table_add_del_map
+ @param context - returned sender context, to match reply w/ request
+ @param retval - return code
+*/
+define lisp_eid_table_add_del_map_reply
+{
+ u32 context;
+ i32 retval;
+};
+
+/** \brief Request for map lisp locator status
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param locator_set_index - index of locator_set
+ @param ls_name - locator set name
+ @param is_index_set - flag indicating whether ls_name or ls_index is set
+ */
+define lisp_locator_dump
+{
+ u32 client_index;
+ u32 context;
+ u32 ls_index;
+ u8 ls_name[64];
+ u8 is_index_set;
+};
+
+/** \brief LISP locator_set status
+ @param local - if is set, then locator is local
+ @param locator_set_name - name of the locator_set
+ @param sw_if_index - sw_if_index of the locator
+ @param priority - locator priority
+ @param weight - locator weight
+ */
+define lisp_locator_details
+{
+ u32 context;
+ u8 local;
+ u32 sw_if_index;
+ u8 is_ipv6;
+ u8 ip_address[16];
+ u8 priority;
+ u8 weight;
+};
+
+/** \brief LISP locator_set status
+ @param context - sender context, to match reply w/ request
+ @param ls_index - locator set index
+ @param ls_name - name of the locator set
+ */
+define lisp_locator_set_details
+{
+ u32 context;
+ u32 ls_index;
+ u8 ls_name[64];
+};
+
+/** \brief Request for locator_set summary status
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param filter - filter type
+ Supported values:
+ 0: all locator sets
+ 1: local locator sets
+ 2: remote locator sets
+ */
+define lisp_locator_set_dump
+{
+ u32 client_index;
+ u32 context;
+ u8 filter;
+};
+
+/** \brief Dump lisp eid-table
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param locator_set_index - index of locator_set, if ~0 then the mapping
+ is negative
+ @param action - negative map request action
+ @param is_local - local if non-zero, else remote
+ @param eid_type:
+ 0 : ipv4
+ 1 : ipv6
+ 2 : mac
+ @param is_src_dst - EID is type of source/destination
+ @param eid - EID can be ip4, ip6 or mac
+ @param eid_prefix_len - prefix length
+ @param seid - source EID can be ip4, ip6 or mac
+ @param seid_prefix_len - source prefix length
+ @param vni - virtual network instance
+ @param ttl - time to live
+ @param authoritative - authoritative
+ @param key_id
+ HMAC_NO_KEY 0
+ HMAC_SHA_1_96 1
+ HMAC_SHA_256_128 2
+ @param key - secret key
+*/
+
+define lisp_eid_table_details
+{
+ u32 context;
+ u32 locator_set_index;
+ u8 action;
+ u8 is_local;
+ u8 eid_type;
+ u8 is_src_dst;
+ u32 vni;
+ u8 eid[16];
+ u8 eid_prefix_len;
+ u8 seid[16];
+ u8 seid_prefix_len;
+ u32 ttl;
+ u8 authoritative;
+ u16 key_id;
+ u8 key[64];
+};
+
+/** \brief Request for eid table summary status
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param eid_set - if non-zero request info about specific mapping
+ @param vni - virtual network instance; valid only if eid_set != 0
+ @param prefix_length - prefix length if EID is IP address;
+ valid only if eid_set != 0
+ @param eid_type - EID type; valid only if eid_set != 0
+ Supported values:
+ 0: EID is IPv4
+ 1: EID is IPv6
+ 2: EID is ethernet address
+ @param eid - endpoint identifier
+ @param filter - filter type;
+ Support values:
+ 0: all eid
+ 1: local eid
+ 2: remote eid
+ */
+define lisp_eid_table_dump
+{
+ u32 client_index;
+ u32 context;
+ u8 eid_set;
+ u8 prefix_length;
+ u32 vni;
+ u8 eid_type;
+ u8 eid[16];
+ u8 filter;
+};
+
+/** \brief LISP adjacency
+ @param eid_type -
+ 0 : ipv4
+ 1 : ipv6
+ 2 : mac
+ @param reid - remote EID
+ @param leid - local EID
+ @param reid_prefix_len - remote EID IP prefix length
+ @param leid_prefix_len - local EID IP prefix length
+ */
+typeonly manual_print manual_endian define lisp_adjacency
+{
+ u8 eid_type;
+ u8 reid[16];
+ u8 leid[16];
+ u8 reid_prefix_len;
+ u8 leid_prefix_len;
+};
+
+/** \brief LISP adjacency reply
+ @param count - number of adjacencies
+ @param adjacencies - array of adjacencies
+ */
+manual_endian manual_print define lisp_adjacencies_get_reply
+{
+ u32 context;
+ i32 retval;
+ u32 count;
+ vl_api_lisp_adjacency_t adjacencies[count];
+};
+
+/** \brief Request for LISP adjacencies
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param vni - filter adjacencies by VNI
+ */
+define lisp_adjacencies_get
+{
+ u32 client_index;
+ u32 context;
+ u32 vni;
+};
+
+/** \brief Shows relationship between vni and vrf/bd
+ @param dp_table - VRF index or bridge domain index
+ @param vni - vitual network instance
+ */
+define lisp_eid_table_map_details
+{
+ u32 context;
+ u32 vni;
+ u32 dp_table;
+};
+
+/** \brief Request for lisp_eid_table_map_details
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param is_l2 - if set dump vni/bd mappings else vni/vrf
+ */
+define lisp_eid_table_map_dump
+{
+ u32 client_index;
+ u32 context;
+ u8 is_l2;
+};
+
+/** \brief Dumps all VNIs used in mappings
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ */
+define lisp_eid_table_vni_dump
+{
+ u32 client_index;
+ u32 context;
+};
+
+/** \brief reply to lisp_eid_table_vni_dump
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param vni - virtual network instance
+ */
+define lisp_eid_table_vni_details
+{
+ u32 client_index;
+ u32 context;
+ u32 vni;
+};
+
+/** \brief LISP map resolver status
+ @param is_ipv6 - if non-zero the address is ipv6, else ipv4
+ @param ip_address - array of address bytes
+ */
+define lisp_map_resolver_details
+{
+ u32 context;
+ u8 is_ipv6;
+ u8 ip_address[16];
+};
+
+/** \brief Request for map resolver summary status
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ */
+define lisp_map_resolver_dump
+{
+ u32 client_index;
+ u32 context;
+};
+
+/** \brief LISP map server details
+ @param is_ipv6 - if non-zero the address is ipv6, else ipv4
+ @param ip_address - array of address bytes
+ */
+define lisp_map_server_details
+{
+ u32 context;
+ u8 is_ipv6;
+ u8 ip_address[16];
+};
+
+/** \brief Request for map server summary status
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ */
+define lisp_map_server_dump
+{
+ u32 client_index;
+ u32 context;
+};
+
+/** \brief Request for lisp-gpe protocol status
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+*/
+define show_lisp_status
+{
+ u32 client_index;
+ u32 context;
+};
+
+/** \brief Status of lisp, enable or disable
+ @param context - sender context, to match reply w/ request
+ @param feature_status - lisp enable if non-zero, else disable
+ @param gpe_status - lisp enable if non-zero, else disable
+*/
+define show_lisp_status_reply
+{
+ u32 context;
+ i32 retval;
+ u8 feature_status;
+ u8 gpe_status;
+};
+
+/** \brief Get LISP map request itr rlocs status
+ @param context - sender context, to match reply w/ request
+ @param locator_set_name - name of the locator_set
+ */
+define lisp_get_map_request_itr_rlocs
+{
+ u32 client_index;
+ u32 context;
+};
+
+/** \brief Request for map request itr rlocs summary status
+ */
+define lisp_get_map_request_itr_rlocs_reply
+{
+ u32 context;
+ i32 retval;
+ u8 locator_set_name[64];
+};
+
+/** \brief Request for lisp pitr status
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+*/
+define show_lisp_pitr
+{
+ u32 client_index;
+ u32 context;
+};
+
+/** \brief Status of lisp pitr, enable or disable
+ @param context - sender context, to match reply w/ request
+ @param status - lisp pitr enable if non-zero, else disable
+ @param locator_set_name - name of the locator_set
+*/
+define show_lisp_pitr_reply
+{
+ u32 context;
+ i32 retval;
+ u8 status;
+ u8 locator_set_name[64];
+};
+
+/*
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
+ \ No newline at end of file
diff --git a/src/vnet/lisp-cp/lisp_api.c b/src/vnet/lisp-cp/lisp_api.c
new file mode 100644
index 00000000000..d3fc4627c5f
--- /dev/null
+++ b/src/vnet/lisp-cp/lisp_api.c
@@ -0,0 +1,1257 @@
+/*
+ *------------------------------------------------------------------
+ * lisp_api.c - lisp api
+ *
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+
+#include <vnet/vnet.h>
+#include <vlibmemory/api.h>
+
+#include <vnet/interface.h>
+#include <vnet/api_errno.h>
+#include <vnet/lisp-cp/control.h>
+#include <vnet/lisp-gpe/lisp_gpe.h>
+
+#include <vnet/vnet_msg_enum.h>
+
+#define vl_typedefs /* define message structures */
+#include <vnet/vnet_all_api_h.h>
+#undef vl_typedefs
+
+#define vl_endianfun /* define message structures */
+#include <vnet/vnet_all_api_h.h>
+#undef vl_endianfun
+
+/* instantiate all the print functions we know about */
+#define vl_print(handle, ...) vlib_cli_output (handle, __VA_ARGS__)
+#define vl_printfun
+#include <vnet/vnet_all_api_h.h>
+#undef vl_printfun
+
+#include <vlibapi/api_helper_macros.h>
+
+#define foreach_vpe_api_msg \
+_(LISP_ADD_DEL_LOCATOR_SET, lisp_add_del_locator_set) \
+_(LISP_ADD_DEL_LOCATOR, lisp_add_del_locator) \
+_(LISP_ADD_DEL_LOCAL_EID, lisp_add_del_local_eid) \
+_(LISP_ADD_DEL_MAP_RESOLVER, lisp_add_del_map_resolver) \
+_(LISP_ADD_DEL_MAP_SERVER, lisp_add_del_map_server) \
+_(LISP_ENABLE_DISABLE, lisp_enable_disable) \
+_(LISP_RLOC_PROBE_ENABLE_DISABLE, lisp_rloc_probe_enable_disable) \
+_(LISP_MAP_REGISTER_ENABLE_DISABLE, lisp_map_register_enable_disable) \
+_(LISP_ADD_DEL_REMOTE_MAPPING, lisp_add_del_remote_mapping) \
+_(LISP_ADD_DEL_ADJACENCY, lisp_add_del_adjacency) \
+_(LISP_PITR_SET_LOCATOR_SET, lisp_pitr_set_locator_set) \
+_(LISP_MAP_REQUEST_MODE, lisp_map_request_mode) \
+_(LISP_EID_TABLE_ADD_DEL_MAP, lisp_eid_table_add_del_map) \
+_(LISP_LOCATOR_SET_DUMP, lisp_locator_set_dump) \
+_(LISP_LOCATOR_DUMP, lisp_locator_dump) \
+_(LISP_EID_TABLE_DUMP, lisp_eid_table_dump) \
+_(LISP_MAP_RESOLVER_DUMP, lisp_map_resolver_dump) \
+_(LISP_MAP_SERVER_DUMP, lisp_map_server_dump) \
+_(LISP_EID_TABLE_MAP_DUMP, lisp_eid_table_map_dump) \
+_(LISP_EID_TABLE_VNI_DUMP, lisp_eid_table_vni_dump) \
+_(LISP_ADJACENCIES_GET, lisp_adjacencies_get) \
+_(SHOW_LISP_RLOC_PROBE_STATE, show_lisp_rloc_probe_state) \
+_(SHOW_LISP_MAP_REGISTER_STATE, show_lisp_map_register_state) \
+_(SHOW_LISP_STATUS, show_lisp_status) \
+_(LISP_ADD_DEL_MAP_REQUEST_ITR_RLOCS, \
+ lisp_add_del_map_request_itr_rlocs) \
+_(LISP_GET_MAP_REQUEST_ITR_RLOCS, lisp_get_map_request_itr_rlocs) \
+_(SHOW_LISP_PITR, show_lisp_pitr) \
+_(SHOW_LISP_MAP_REQUEST_MODE, show_lisp_map_request_mode) \
+
+/** Used for transferring locators via VPP API */
+/* *INDENT-OFF* */
+typedef CLIB_PACKED (struct {
+ u8 is_ip4; /**< is locator an IPv4 address */
+ u8 priority; /**< locator priority */
+ u8 weight; /**< locator weight */
+ u8 addr[16]; /**< IPv4/IPv6 address */
+}) rloc_t;
+/* *INDENT-ON* */
+
+/** Used for transferring locators via VPP API */
+/* *INDENT-OFF* */
+typedef CLIB_PACKED (struct {
+ u32 sw_if_index; /**< locator sw_if_index */
+ u8 priority; /**< locator priority */
+ u8 weight; /**< locator weight */
+}) ls_locator_t;
+/* *INDENT-ON* */
+
+static locator_t *
+unformat_lisp_locs (void *rmt_locs, u32 rloc_num)
+{
+ u32 i;
+ locator_t *locs = 0, loc;
+ rloc_t *r;
+
+ for (i = 0; i < rloc_num; i++)
+ {
+ /* remote locators */
+ r = &((rloc_t *) rmt_locs)[i];
+ memset (&loc, 0, sizeof (loc));
+ gid_address_ip_set (&loc.address, &r->addr, r->is_ip4 ? IP4 : IP6);
+
+ loc.priority = r->priority;
+ loc.weight = r->weight;
+
+ vec_add1 (locs, loc);
+ }
+ return locs;
+}
+
+static void
+vl_api_lisp_add_del_locator_set_t_handler (vl_api_lisp_add_del_locator_set_t *
+ mp)
+{
+ vl_api_lisp_add_del_locator_set_reply_t *rmp;
+ int rv = 0;
+ vnet_lisp_add_del_locator_set_args_t _a, *a = &_a;
+ locator_t locator;
+ ls_locator_t *ls_loc;
+ u32 ls_index = ~0, locator_num;
+ u8 *locator_name = NULL;
+ int i;
+
+ memset (a, 0, sizeof (a[0]));
+
+ locator_name = format (0, "%s", mp->locator_set_name);
+
+ a->name = locator_name;
+ a->is_add = mp->is_add;
+ a->local = 1;
+ locator_num = clib_net_to_host_u32 (mp->locator_num);
+
+ memset (&locator, 0, sizeof (locator));
+ for (i = 0; i < locator_num; i++)
+ {
+ ls_loc = &((ls_locator_t *) mp->locators)[i];
+ VALIDATE_SW_IF_INDEX (ls_loc);
+
+ locator.sw_if_index = htonl (ls_loc->sw_if_index);
+ locator.priority = ls_loc->priority;
+ locator.weight = ls_loc->weight;
+ locator.local = 1;
+ vec_add1 (a->locators, locator);
+ }
+
+ rv = vnet_lisp_add_del_locator_set (a, &ls_index);
+
+ BAD_SW_IF_INDEX_LABEL;
+
+ vec_free (locator_name);
+ vec_free (a->locators);
+
+ /* *INDENT-OFF* */
+ REPLY_MACRO2 (VL_API_LISP_ADD_DEL_LOCATOR_SET_REPLY,
+ ({
+ rmp->ls_index = clib_host_to_net_u32 (ls_index);
+ }));
+ /* *INDENT-ON* */
+}
+
+static void
+vl_api_lisp_add_del_locator_t_handler (vl_api_lisp_add_del_locator_t * mp)
+{
+ vl_api_lisp_add_del_locator_reply_t *rmp;
+ int rv = 0;
+ locator_t locator, *locators = NULL;
+ vnet_lisp_add_del_locator_set_args_t _a, *a = &_a;
+ u32 ls_index = ~0;
+ u8 *locator_name = NULL;
+
+ memset (&locator, 0, sizeof (locator));
+ memset (a, 0, sizeof (a[0]));
+
+ locator.sw_if_index = ntohl (mp->sw_if_index);
+ locator.priority = mp->priority;
+ locator.weight = mp->weight;
+ locator.local = 1;
+ vec_add1 (locators, locator);
+
+ locator_name = format (0, "%s", mp->locator_set_name);
+
+ a->name = locator_name;
+ a->locators = locators;
+ a->is_add = mp->is_add;
+ a->local = 1;
+
+ rv = vnet_lisp_add_del_locator (a, NULL, &ls_index);
+
+ vec_free (locators);
+ vec_free (locator_name);
+
+ REPLY_MACRO (VL_API_LISP_ADD_DEL_LOCATOR_REPLY);
+}
+
+static int
+unformat_lisp_eid_api (gid_address_t * dst, u32 vni, u8 type, void *src,
+ u8 len)
+{
+ switch (type)
+ {
+ case 0: /* ipv4 */
+ gid_address_type (dst) = GID_ADDR_IP_PREFIX;
+ gid_address_ip_set (dst, src, IP4);
+ gid_address_ippref_len (dst) = len;
+ ip_prefix_normalize (&gid_address_ippref (dst));
+ break;
+ case 1: /* ipv6 */
+ gid_address_type (dst) = GID_ADDR_IP_PREFIX;
+ gid_address_ip_set (dst, src, IP6);
+ gid_address_ippref_len (dst) = len;
+ ip_prefix_normalize (&gid_address_ippref (dst));
+ break;
+ case 2: /* l2 mac */
+ gid_address_type (dst) = GID_ADDR_MAC;
+ clib_memcpy (&gid_address_mac (dst), src, 6);
+ break;
+ default:
+ /* unknown type */
+ return VNET_API_ERROR_INVALID_VALUE;
+ }
+
+ gid_address_vni (dst) = vni;
+
+ return 0;
+}
+
+static void
+vl_api_lisp_add_del_local_eid_t_handler (vl_api_lisp_add_del_local_eid_t * mp)
+{
+ vl_api_lisp_add_del_local_eid_reply_t *rmp;
+ lisp_cp_main_t *lcm = vnet_lisp_cp_get_main ();
+ int rv = 0;
+ gid_address_t _eid, *eid = &_eid;
+ uword *p = NULL;
+ u32 locator_set_index = ~0, map_index = ~0;
+ vnet_lisp_add_del_mapping_args_t _a, *a = &_a;
+ u8 *name = NULL, *key = NULL;
+ memset (a, 0, sizeof (a[0]));
+ memset (eid, 0, sizeof (eid[0]));
+
+ rv = unformat_lisp_eid_api (eid, clib_net_to_host_u32 (mp->vni),
+ mp->eid_type, mp->eid, mp->prefix_len);
+ if (rv)
+ goto out;
+
+ name = format (0, "%s", mp->locator_set_name);
+ p = hash_get_mem (lcm->locator_set_index_by_name, name);
+ if (!p)
+ {
+ rv = VNET_API_ERROR_INVALID_VALUE;
+ goto out;
+ }
+ locator_set_index = p[0];
+
+ if (*mp->key)
+ key = format (0, "%s", mp->key);
+
+ /* XXX treat batch configuration */
+ a->is_add = mp->is_add;
+ gid_address_copy (&a->eid, eid);
+ a->locator_set_index = locator_set_index;
+ a->local = 1;
+ a->key = key;
+ a->key_id = clib_net_to_host_u16 (mp->key_id);
+
+ rv = vnet_lisp_add_del_local_mapping (a, &map_index);
+
+out:
+ vec_free (name);
+ vec_free (key);
+ gid_address_free (&a->eid);
+
+ REPLY_MACRO (VL_API_LISP_ADD_DEL_LOCAL_EID_REPLY);
+}
+
+static void
+ vl_api_lisp_eid_table_add_del_map_t_handler
+ (vl_api_lisp_eid_table_add_del_map_t * mp)
+{
+ vl_api_lisp_eid_table_add_del_map_reply_t *rmp;
+ int rv = 0;
+ rv = vnet_lisp_eid_table_map (clib_net_to_host_u32 (mp->vni),
+ clib_net_to_host_u32 (mp->dp_table),
+ mp->is_l2, mp->is_add);
+REPLY_MACRO (VL_API_LISP_EID_TABLE_ADD_DEL_MAP_REPLY)}
+
+static void
+vl_api_lisp_add_del_map_server_t_handler (vl_api_lisp_add_del_map_server_t
+ * mp)
+{
+ vl_api_lisp_add_del_map_server_reply_t *rmp;
+ int rv = 0;
+ ip_address_t addr;
+
+ memset (&addr, 0, sizeof (addr));
+
+ ip_address_set (&addr, mp->ip_address, mp->is_ipv6 ? IP6 : IP4);
+ rv = vnet_lisp_add_del_map_server (&addr, mp->is_add);
+
+ REPLY_MACRO (VL_API_LISP_ADD_DEL_MAP_SERVER_REPLY);
+}
+
+static void
+vl_api_lisp_add_del_map_resolver_t_handler (vl_api_lisp_add_del_map_resolver_t
+ * mp)
+{
+ vl_api_lisp_add_del_map_resolver_reply_t *rmp;
+ int rv = 0;
+ vnet_lisp_add_del_map_resolver_args_t _a, *a = &_a;
+
+ memset (a, 0, sizeof (a[0]));
+
+ a->is_add = mp->is_add;
+ ip_address_set (&a->address, mp->ip_address, mp->is_ipv6 ? IP6 : IP4);
+
+ rv = vnet_lisp_add_del_map_resolver (a);
+
+ REPLY_MACRO (VL_API_LISP_ADD_DEL_MAP_RESOLVER_REPLY);
+}
+
+static void
+ vl_api_lisp_map_register_enable_disable_t_handler
+ (vl_api_lisp_map_register_enable_disable_t * mp)
+{
+ vl_api_lisp_map_register_enable_disable_reply_t *rmp;
+ int rv = 0;
+
+ vnet_lisp_map_register_enable_disable (mp->is_enabled);
+ REPLY_MACRO (VL_API_LISP_ENABLE_DISABLE_REPLY);
+}
+
+static void
+ vl_api_lisp_rloc_probe_enable_disable_t_handler
+ (vl_api_lisp_rloc_probe_enable_disable_t * mp)
+{
+ vl_api_lisp_rloc_probe_enable_disable_reply_t *rmp;
+ int rv = 0;
+
+ vnet_lisp_rloc_probe_enable_disable (mp->is_enabled);
+ REPLY_MACRO (VL_API_LISP_ENABLE_DISABLE_REPLY);
+}
+
+static void
+vl_api_lisp_enable_disable_t_handler (vl_api_lisp_enable_disable_t * mp)
+{
+ vl_api_lisp_enable_disable_reply_t *rmp;
+ int rv = 0;
+
+ vnet_lisp_enable_disable (mp->is_en);
+ REPLY_MACRO (VL_API_LISP_ENABLE_DISABLE_REPLY);
+}
+
+static void
+ vl_api_show_lisp_map_request_mode_t_handler
+ (vl_api_show_lisp_map_request_mode_t * mp)
+{
+ int rv = 0;
+ vl_api_show_lisp_map_request_mode_reply_t *rmp;
+
+ /* *INDENT-OFF* */
+ REPLY_MACRO2(VL_API_SHOW_LISP_MAP_REQUEST_MODE_REPLY,
+ ({
+ rmp->mode = vnet_lisp_get_map_request_mode ();
+ }));
+ /* *INDENT-ON* */
+}
+
+static void
+vl_api_lisp_map_request_mode_t_handler (vl_api_lisp_map_request_mode_t * mp)
+{
+ vl_api_lisp_map_request_mode_reply_t *rmp;
+ int rv = 0;
+
+ rv = vnet_lisp_set_map_request_mode (mp->mode);
+
+ REPLY_MACRO (VL_API_LISP_MAP_REQUEST_MODE_REPLY);
+}
+
+static void
+vl_api_lisp_pitr_set_locator_set_t_handler (vl_api_lisp_pitr_set_locator_set_t
+ * mp)
+{
+ vl_api_lisp_pitr_set_locator_set_reply_t *rmp;
+ int rv = 0;
+ u8 *ls_name = 0;
+
+ ls_name = format (0, "%s", mp->ls_name);
+ rv = vnet_lisp_pitr_set_locator_set (ls_name, mp->is_add);
+ vec_free (ls_name);
+
+ REPLY_MACRO (VL_API_LISP_PITR_SET_LOCATOR_SET_REPLY);
+}
+
+static void
+ vl_api_lisp_add_del_map_request_itr_rlocs_t_handler
+ (vl_api_lisp_add_del_map_request_itr_rlocs_t * mp)
+{
+ vl_api_lisp_add_del_map_request_itr_rlocs_reply_t *rmp;
+ int rv = 0;
+ u8 *locator_set_name = NULL;
+ vnet_lisp_add_del_mreq_itr_rloc_args_t _a, *a = &_a;
+
+ locator_set_name = format (0, "%s", mp->locator_set_name);
+
+ a->is_add = mp->is_add;
+ a->locator_set_name = locator_set_name;
+
+ rv = vnet_lisp_add_del_mreq_itr_rlocs (a);
+
+ vec_free (locator_set_name);
+
+ REPLY_MACRO (VL_API_LISP_ADD_DEL_MAP_REQUEST_ITR_RLOCS_REPLY);
+}
+
+static void
+ vl_api_lisp_add_del_remote_mapping_t_handler
+ (vl_api_lisp_add_del_remote_mapping_t * mp)
+{
+ locator_t *rlocs = 0;
+ vl_api_lisp_add_del_remote_mapping_reply_t *rmp;
+ int rv = 0;
+ gid_address_t _eid, *eid = &_eid;
+ u32 rloc_num = clib_net_to_host_u32 (mp->rloc_num);
+
+ memset (eid, 0, sizeof (eid[0]));
+
+ rv = unformat_lisp_eid_api (eid, clib_net_to_host_u32 (mp->vni),
+ mp->eid_type, mp->eid, mp->eid_len);
+ if (rv)
+ goto send_reply;
+
+ rlocs = unformat_lisp_locs (mp->rlocs, rloc_num);
+
+ if (!mp->is_add)
+ {
+ vnet_lisp_add_del_adjacency_args_t _a, *a = &_a;
+ gid_address_copy (&a->reid, eid);
+ a->is_add = 0;
+ rv = vnet_lisp_add_del_adjacency (a);
+ if (rv)
+ {
+ goto out;
+ }
+ }
+
+ /* NOTE: for now this works as a static remote mapping, i.e.,
+ * not authoritative and ttl infinite. */
+ rv = vnet_lisp_add_del_mapping (eid, rlocs, mp->action, 0, ~0,
+ mp->is_add, 1 /* is_static */ , 0);
+
+ if (mp->del_all)
+ vnet_lisp_clear_all_remote_adjacencies ();
+
+out:
+ vec_free (rlocs);
+send_reply:
+ REPLY_MACRO (VL_API_LISP_ADD_DEL_REMOTE_MAPPING_REPLY);
+}
+
+static void
+vl_api_lisp_add_del_adjacency_t_handler (vl_api_lisp_add_del_adjacency_t * mp)
+{
+ vl_api_lisp_add_del_adjacency_reply_t *rmp;
+ vnet_lisp_add_del_adjacency_args_t _a, *a = &_a;
+
+ int rv = 0;
+ memset (a, 0, sizeof (a[0]));
+
+ rv = unformat_lisp_eid_api (&a->leid, clib_net_to_host_u32 (mp->vni),
+ mp->eid_type, mp->leid, mp->leid_len);
+ rv |= unformat_lisp_eid_api (&a->reid, clib_net_to_host_u32 (mp->vni),
+ mp->eid_type, mp->reid, mp->reid_len);
+
+ if (rv)
+ goto send_reply;
+
+ a->is_add = mp->is_add;
+ rv = vnet_lisp_add_del_adjacency (a);
+
+send_reply:
+ REPLY_MACRO (VL_API_LISP_ADD_DEL_ADJACENCY_REPLY);
+}
+
+static void
+send_lisp_locator_details (lisp_cp_main_t * lcm,
+ locator_t * loc,
+ unix_shared_memory_queue_t * q, u32 context)
+{
+ vl_api_lisp_locator_details_t *rmp;
+
+ rmp = vl_msg_api_alloc (sizeof (*rmp));
+ memset (rmp, 0, sizeof (*rmp));
+ rmp->_vl_msg_id = ntohs (VL_API_LISP_LOCATOR_DETAILS);
+ rmp->context = context;
+
+ rmp->local = loc->local;
+ if (loc->local)
+ {
+ rmp->sw_if_index = ntohl (loc->sw_if_index);
+ }
+ else
+ {
+ rmp->is_ipv6 = gid_address_ip_version (&loc->address);
+ ip_address_copy_addr (rmp->ip_address, &gid_address_ip (&loc->address));
+ }
+ rmp->priority = loc->priority;
+ rmp->weight = loc->weight;
+
+ vl_msg_api_send_shmem (q, (u8 *) & rmp);
+}
+
+static void
+vl_api_lisp_locator_dump_t_handler (vl_api_lisp_locator_dump_t * mp)
+{
+ u8 *ls_name = 0;
+ unix_shared_memory_queue_t *q = 0;
+ lisp_cp_main_t *lcm = vnet_lisp_cp_get_main ();
+ locator_set_t *lsit = 0;
+ locator_t *loc = 0;
+ u32 ls_index = ~0, *locit = 0;
+ uword *p = 0;
+
+ q = vl_api_client_index_to_input_queue (mp->client_index);
+ if (q == 0)
+ {
+ return;
+ }
+
+ if (mp->is_index_set)
+ ls_index = htonl (mp->ls_index);
+ else
+ {
+ /* make sure we get a proper C-string */
+ mp->ls_name[sizeof (mp->ls_name) - 1] = 0;
+ ls_name = format (0, "%s", mp->ls_name);
+ p = hash_get_mem (lcm->locator_set_index_by_name, ls_name);
+ if (!p)
+ goto out;
+ ls_index = p[0];
+ }
+
+ if (pool_is_free_index (lcm->locator_set_pool, ls_index))
+ return;
+
+ lsit = pool_elt_at_index (lcm->locator_set_pool, ls_index);
+
+ vec_foreach (locit, lsit->locator_indices)
+ {
+ loc = pool_elt_at_index (lcm->locator_pool, locit[0]);
+ send_lisp_locator_details (lcm, loc, q, mp->context);
+ };
+out:
+ vec_free (ls_name);
+}
+
+static void
+send_lisp_locator_set_details (lisp_cp_main_t * lcm,
+ locator_set_t * lsit,
+ unix_shared_memory_queue_t * q,
+ u32 context, u32 ls_index)
+{
+ vl_api_lisp_locator_set_details_t *rmp;
+ u8 *str = 0;
+
+ rmp = vl_msg_api_alloc (sizeof (*rmp));
+ memset (rmp, 0, sizeof (*rmp));
+ rmp->_vl_msg_id = ntohs (VL_API_LISP_LOCATOR_SET_DETAILS);
+ rmp->context = context;
+
+ rmp->ls_index = htonl (ls_index);
+ if (lsit->local)
+ {
+ ASSERT (lsit->name != NULL);
+ strncpy ((char *) rmp->ls_name, (char *) lsit->name,
+ vec_len (lsit->name));
+ }
+ else
+ {
+ str = format (0, "<remote-%d>", ls_index);
+ strncpy ((char *) rmp->ls_name, (char *) str, vec_len (str));
+ vec_free (str);
+ }
+
+ vl_msg_api_send_shmem (q, (u8 *) & rmp);
+}
+
+static void
+vl_api_lisp_locator_set_dump_t_handler (vl_api_lisp_locator_set_dump_t * mp)
+{
+ unix_shared_memory_queue_t *q = NULL;
+ lisp_cp_main_t *lcm = vnet_lisp_cp_get_main ();
+ locator_set_t *lsit = NULL;
+ u8 filter;
+
+ q = vl_api_client_index_to_input_queue (mp->client_index);
+ if (q == 0)
+ {
+ return;
+ }
+
+ filter = mp->filter;
+ /* *INDENT-OFF* */
+ pool_foreach (lsit, lcm->locator_set_pool,
+ ({
+ if (filter && !((1 == filter && lsit->local) ||
+ (2 == filter && !lsit->local)))
+ {
+ continue;
+ }
+ send_lisp_locator_set_details (lcm, lsit, q, mp->context,
+ lsit - lcm->locator_set_pool);
+ }));
+ /* *INDENT-ON* */
+}
+
+static void
+lisp_fid_put_api (u8 * dst, fid_address_t * src, u8 * prefix_length)
+{
+ ASSERT (prefix_length);
+ ip_prefix_t *ippref = &fid_addr_ippref (src);
+
+ switch (fid_addr_type (src))
+ {
+ case FID_ADDR_IP_PREF:
+ if (ip_prefix_version (ippref) == IP4)
+ clib_memcpy (dst, &ip_prefix_v4 (ippref), 4);
+ else
+ clib_memcpy (dst, &ip_prefix_v6 (ippref), 16);
+ prefix_length[0] = ip_prefix_len (ippref);
+ break;
+
+ case FID_ADDR_MAC:
+ prefix_length[0] = 0;
+ clib_memcpy (dst, fid_addr_mac (src), 6);
+ break;
+
+ default:
+ clib_warning ("Unknown FID type %d!", fid_addr_type (src));
+ break;
+ }
+}
+
+static u8
+fid_type_to_api_type (fid_address_t * fid)
+{
+ ip_prefix_t *ippref;
+
+ switch (fid_addr_type (fid))
+ {
+ case FID_ADDR_IP_PREF:
+ ippref = &fid_addr_ippref (fid);
+ if (ip_prefix_version (ippref) == IP4)
+ return 0;
+ else if (ip_prefix_version (ippref) == IP6)
+ return 1;
+ else
+ return ~0;
+
+ case FID_ADDR_MAC:
+ return 2;
+ }
+
+ return ~0;
+}
+
+static void
+send_lisp_eid_table_details (mapping_t * mapit,
+ unix_shared_memory_queue_t * q,
+ u32 context, u8 filter)
+{
+ fid_address_t *fid;
+ lisp_cp_main_t *lcm = vnet_lisp_cp_get_main ();
+ locator_set_t *ls = 0;
+ vl_api_lisp_eid_table_details_t *rmp = NULL;
+ gid_address_t *gid = NULL;
+ u8 *mac = 0;
+ ip_prefix_t *ip_prefix = NULL;
+
+ switch (filter)
+ {
+ case 0: /* all mappings */
+ break;
+
+ case 1: /* local only */
+ if (!mapit->local)
+ return;
+ break;
+ case 2: /* remote only */
+ if (mapit->local)
+ return;
+ break;
+ default:
+ clib_warning ("Filter error, unknown filter: %d", filter);
+ return;
+ }
+
+ gid = &mapit->eid;
+ ip_prefix = &gid_address_ippref (gid);
+ mac = gid_address_mac (gid);
+
+ rmp = vl_msg_api_alloc (sizeof (*rmp));
+ memset (rmp, 0, sizeof (*rmp));
+ rmp->_vl_msg_id = ntohs (VL_API_LISP_EID_TABLE_DETAILS);
+
+ ls = pool_elt_at_index (lcm->locator_set_pool, mapit->locator_set_index);
+ if (vec_len (ls->locator_indices) == 0)
+ rmp->locator_set_index = ~0;
+ else
+ rmp->locator_set_index = clib_host_to_net_u32 (mapit->locator_set_index);
+
+ rmp->is_local = mapit->local;
+ rmp->ttl = clib_host_to_net_u32 (mapit->ttl);
+ rmp->action = mapit->action;
+ rmp->authoritative = mapit->authoritative;
+
+ switch (gid_address_type (gid))
+ {
+ case GID_ADDR_SRC_DST:
+ rmp->is_src_dst = 1;
+ fid = &gid_address_sd_src (gid);
+ rmp->eid_type = fid_type_to_api_type (fid);
+ lisp_fid_put_api (rmp->seid, &gid_address_sd_src (gid),
+ &rmp->seid_prefix_len);
+ lisp_fid_put_api (rmp->eid, &gid_address_sd_dst (gid),
+ &rmp->eid_prefix_len);
+ break;
+ case GID_ADDR_IP_PREFIX:
+ rmp->eid_prefix_len = ip_prefix_len (ip_prefix);
+ if (ip_prefix_version (ip_prefix) == IP4)
+ {
+ rmp->eid_type = 0; /* ipv4 type */
+ clib_memcpy (rmp->eid, &ip_prefix_v4 (ip_prefix),
+ sizeof (ip_prefix_v4 (ip_prefix)));
+ }
+ else
+ {
+ rmp->eid_type = 1; /* ipv6 type */
+ clib_memcpy (rmp->eid, &ip_prefix_v6 (ip_prefix),
+ sizeof (ip_prefix_v6 (ip_prefix)));
+ }
+ break;
+ case GID_ADDR_MAC:
+ rmp->eid_type = 2; /* l2 mac type */
+ clib_memcpy (rmp->eid, mac, 6);
+ break;
+ default:
+ ASSERT (0);
+ }
+ rmp->context = context;
+ rmp->vni = clib_host_to_net_u32 (gid_address_vni (gid));
+ rmp->key_id = clib_host_to_net_u16 (mapit->key_id);
+ memcpy (rmp->key, mapit->key, vec_len (mapit->key));
+ vl_msg_api_send_shmem (q, (u8 *) & rmp);
+}
+
+static void
+vl_api_lisp_eid_table_dump_t_handler (vl_api_lisp_eid_table_dump_t * mp)
+{
+ u32 mi;
+ unix_shared_memory_queue_t *q = NULL;
+ lisp_cp_main_t *lcm = vnet_lisp_cp_get_main ();
+ mapping_t *mapit = NULL;
+ gid_address_t _eid, *eid = &_eid;
+
+ q = vl_api_client_index_to_input_queue (mp->client_index);
+ if (q == 0)
+ {
+ return;
+ }
+
+ if (mp->eid_set)
+ {
+ memset (eid, 0, sizeof (*eid));
+
+ unformat_lisp_eid_api (eid, clib_net_to_host_u32 (mp->vni),
+ mp->eid_type, mp->eid, mp->prefix_length);
+
+ mi = gid_dictionary_lookup (&lcm->mapping_index_by_gid, eid);
+ if ((u32) ~ 0 == mi)
+ return;
+
+ mapit = pool_elt_at_index (lcm->mapping_pool, mi);
+ send_lisp_eid_table_details (mapit, q, mp->context,
+ 0 /* ignore filter */ );
+ }
+ else
+ {
+ /* *INDENT-OFF* */
+ pool_foreach (mapit, lcm->mapping_pool,
+ ({
+ send_lisp_eid_table_details(mapit, q, mp->context,
+ mp->filter);
+ }));
+ /* *INDENT-ON* */
+ }
+}
+
+static void
+send_lisp_map_server_details (ip_address_t * ip,
+ unix_shared_memory_queue_t * q, u32 context)
+{
+ vl_api_lisp_map_server_details_t *rmp = NULL;
+
+ rmp = vl_msg_api_alloc (sizeof (*rmp));
+ memset (rmp, 0, sizeof (*rmp));
+ rmp->_vl_msg_id = ntohs (VL_API_LISP_MAP_SERVER_DETAILS);
+
+ switch (ip_addr_version (ip))
+ {
+ case IP4:
+ rmp->is_ipv6 = 0;
+ clib_memcpy (rmp->ip_address, &ip_addr_v4 (ip),
+ sizeof (ip_addr_v4 (ip)));
+ break;
+
+ case IP6:
+ rmp->is_ipv6 = 1;
+ clib_memcpy (rmp->ip_address, &ip_addr_v6 (ip),
+ sizeof (ip_addr_v6 (ip)));
+ break;
+
+ default:
+ ASSERT (0);
+ }
+ rmp->context = context;
+
+ vl_msg_api_send_shmem (q, (u8 *) & rmp);
+}
+
+static void
+vl_api_lisp_map_server_dump_t_handler (vl_api_lisp_map_server_dump_t * mp)
+{
+ unix_shared_memory_queue_t *q = NULL;
+ lisp_cp_main_t *lcm = vnet_lisp_cp_get_main ();
+ lisp_msmr_t *mr;
+
+ q = vl_api_client_index_to_input_queue (mp->client_index);
+ if (q == 0)
+ {
+ return;
+ }
+
+ vec_foreach (mr, lcm->map_servers)
+ {
+ send_lisp_map_server_details (&mr->address, q, mp->context);
+ }
+}
+
+static void
+send_lisp_map_resolver_details (ip_address_t * ip,
+ unix_shared_memory_queue_t * q, u32 context)
+{
+ vl_api_lisp_map_resolver_details_t *rmp = NULL;
+
+ rmp = vl_msg_api_alloc (sizeof (*rmp));
+ memset (rmp, 0, sizeof (*rmp));
+ rmp->_vl_msg_id = ntohs (VL_API_LISP_MAP_RESOLVER_DETAILS);
+
+ switch (ip_addr_version (ip))
+ {
+ case IP4:
+ rmp->is_ipv6 = 0;
+ clib_memcpy (rmp->ip_address, &ip_addr_v4 (ip),
+ sizeof (ip_addr_v4 (ip)));
+ break;
+
+ case IP6:
+ rmp->is_ipv6 = 1;
+ clib_memcpy (rmp->ip_address, &ip_addr_v6 (ip),
+ sizeof (ip_addr_v6 (ip)));
+ break;
+
+ default:
+ ASSERT (0);
+ }
+ rmp->context = context;
+
+ vl_msg_api_send_shmem (q, (u8 *) & rmp);
+}
+
+static void
+vl_api_lisp_map_resolver_dump_t_handler (vl_api_lisp_map_resolver_dump_t * mp)
+{
+ unix_shared_memory_queue_t *q = NULL;
+ lisp_cp_main_t *lcm = vnet_lisp_cp_get_main ();
+ lisp_msmr_t *mr;
+
+ q = vl_api_client_index_to_input_queue (mp->client_index);
+ if (q == 0)
+ {
+ return;
+ }
+
+ vec_foreach (mr, lcm->map_resolvers)
+ {
+ send_lisp_map_resolver_details (&mr->address, q, mp->context);
+ }
+}
+
+static void
+send_eid_table_map_pair (hash_pair_t * p,
+ unix_shared_memory_queue_t * q, u32 context)
+{
+ vl_api_lisp_eid_table_map_details_t *rmp = NULL;
+
+ rmp = vl_msg_api_alloc (sizeof (*rmp));
+ memset (rmp, 0, sizeof (*rmp));
+ rmp->_vl_msg_id = ntohs (VL_API_LISP_EID_TABLE_MAP_DETAILS);
+
+ rmp->vni = clib_host_to_net_u32 (p->key);
+ rmp->dp_table = clib_host_to_net_u32 (p->value[0]);
+ rmp->context = context;
+ vl_msg_api_send_shmem (q, (u8 *) & rmp);
+}
+
+static void
+vl_api_lisp_eid_table_map_dump_t_handler (vl_api_lisp_eid_table_map_dump_t *
+ mp)
+{
+ unix_shared_memory_queue_t *q = NULL;
+ lisp_cp_main_t *lcm = vnet_lisp_cp_get_main ();
+ hash_pair_t *p;
+ uword *vni_table = 0;
+
+ q = vl_api_client_index_to_input_queue (mp->client_index);
+ if (q == 0)
+ {
+ return;
+ }
+
+ if (mp->is_l2)
+ {
+ vni_table = lcm->bd_id_by_vni;
+ }
+ else
+ {
+ vni_table = lcm->table_id_by_vni;
+ }
+
+ /* *INDENT-OFF* */
+ hash_foreach_pair (p, vni_table,
+ ({
+ send_eid_table_map_pair (p, q, mp->context);
+ }));
+ /* *INDENT-ON* */
+}
+
+static void
+send_eid_table_vni (u32 vni, unix_shared_memory_queue_t * q, u32 context)
+{
+ vl_api_lisp_eid_table_vni_details_t *rmp = 0;
+
+ rmp = vl_msg_api_alloc (sizeof (*rmp));
+ memset (rmp, 0, sizeof (*rmp));
+ rmp->_vl_msg_id = ntohs (VL_API_LISP_EID_TABLE_VNI_DETAILS);
+ rmp->context = context;
+ rmp->vni = clib_host_to_net_u32 (vni);
+ vl_msg_api_send_shmem (q, (u8 *) & rmp);
+}
+
+static void
+lisp_adjacency_copy (vl_api_lisp_adjacency_t * dst, lisp_adjacency_t * adjs)
+{
+ lisp_adjacency_t *adj;
+ vl_api_lisp_adjacency_t a;
+ u32 i, n = vec_len (adjs);
+
+ for (i = 0; i < n; i++)
+ {
+ adj = vec_elt_at_index (adjs, i);
+ memset (&a, 0, sizeof (a));
+
+ switch (gid_address_type (&adj->reid))
+ {
+ case GID_ADDR_IP_PREFIX:
+ a.reid_prefix_len = gid_address_ippref_len (&adj->reid);
+ a.leid_prefix_len = gid_address_ippref_len (&adj->leid);
+ if (gid_address_ip_version (&adj->reid) == IP4)
+ {
+ a.eid_type = 0; /* ipv4 type */
+ clib_memcpy (a.reid, &gid_address_ip (&adj->reid), 4);
+ clib_memcpy (a.leid, &gid_address_ip (&adj->leid), 4);
+ }
+ else
+ {
+ a.eid_type = 1; /* ipv6 type */
+ clib_memcpy (a.reid, &gid_address_ip (&adj->reid), 16);
+ clib_memcpy (a.leid, &gid_address_ip (&adj->leid), 16);
+ }
+ break;
+ case GID_ADDR_MAC:
+ a.eid_type = 2; /* l2 mac type */
+ mac_copy (a.reid, gid_address_mac (&adj->reid));
+ mac_copy (a.leid, gid_address_mac (&adj->leid));
+ break;
+ default:
+ ASSERT (0);
+ }
+ dst[i] = a;
+ }
+}
+
+static void
+ vl_api_show_lisp_rloc_probe_state_t_handler
+ (vl_api_show_lisp_rloc_probe_state_t * mp)
+{
+ vl_api_show_lisp_rloc_probe_state_reply_t *rmp = 0;
+ int rv = 0;
+
+ /* *INDENT-OFF* */
+ REPLY_MACRO2 (VL_API_SHOW_LISP_RLOC_PROBE_STATE_REPLY,
+ {
+ rmp->is_enabled = vnet_lisp_rloc_probe_state_get ();
+ });
+ /* *INDENT-ON* */
+}
+
+static void
+ vl_api_show_lisp_map_register_state_t_handler
+ (vl_api_show_lisp_map_register_state_t * mp)
+{
+ vl_api_show_lisp_map_register_state_reply_t *rmp = 0;
+ int rv = 0;
+
+ /* *INDENT-OFF* */
+ REPLY_MACRO2 (VL_API_SHOW_LISP_MAP_REGISTER_STATE_REPLY,
+ {
+ rmp->is_enabled = vnet_lisp_map_register_state_get ();
+ });
+ /* *INDENT-ON* */
+}
+
+static void
+vl_api_lisp_adjacencies_get_t_handler (vl_api_lisp_adjacencies_get_t * mp)
+{
+ vl_api_lisp_adjacencies_get_reply_t *rmp = 0;
+ lisp_adjacency_t *adjs = 0;
+ int rv = 0;
+ vl_api_lisp_adjacency_t a;
+ u32 size = ~0;
+ u32 vni = clib_net_to_host_u32 (mp->vni);
+
+ adjs = vnet_lisp_adjacencies_get_by_vni (vni);
+ size = vec_len (adjs) * sizeof (a);
+
+ /* *INDENT-OFF* */
+ REPLY_MACRO4 (VL_API_LISP_ADJACENCIES_GET_REPLY, size,
+ {
+ rmp->count = clib_host_to_net_u32 (vec_len (adjs));
+ lisp_adjacency_copy (rmp->adjacencies, adjs);
+ });
+ /* *INDENT-ON* */
+
+ vec_free (adjs);
+}
+
+static void
+vl_api_lisp_eid_table_vni_dump_t_handler (vl_api_lisp_eid_table_vni_dump_t *
+ mp)
+{
+ hash_pair_t *p;
+ u32 *vnis = 0;
+ unix_shared_memory_queue_t *q = 0;
+ lisp_cp_main_t *lcm = vnet_lisp_cp_get_main ();
+
+ q = vl_api_client_index_to_input_queue (mp->client_index);
+ if (q == 0)
+ {
+ return;
+ }
+
+ /* *INDENT-OFF* */
+ hash_foreach_pair (p, lcm->table_id_by_vni,
+ ({
+ hash_set (vnis, p->key, 0);
+ }));
+
+ hash_foreach_pair (p, lcm->bd_id_by_vni,
+ ({
+ hash_set (vnis, p->key, 0);
+ }));
+
+ hash_foreach_pair (p, vnis,
+ ({
+ send_eid_table_vni (p->key, q, mp->context);
+ }));
+ /* *INDENT-ON* */
+
+ hash_free (vnis);
+}
+
+static void
+vl_api_show_lisp_status_t_handler (vl_api_show_lisp_status_t * mp)
+{
+ unix_shared_memory_queue_t *q = NULL;
+ vl_api_show_lisp_status_reply_t *rmp = NULL;
+ int rv = 0;
+
+ q = vl_api_client_index_to_input_queue (mp->client_index);
+ if (q == 0)
+ {
+ return;
+ }
+
+ /* *INDENT-OFF* */
+ REPLY_MACRO2(VL_API_SHOW_LISP_STATUS_REPLY,
+ ({
+ rmp->gpe_status = vnet_lisp_gpe_enable_disable_status ();
+ rmp->feature_status = vnet_lisp_enable_disable_status ();
+ }));
+ /* *INDENT-ON* */
+}
+
+static void
+ vl_api_lisp_get_map_request_itr_rlocs_t_handler
+ (vl_api_lisp_get_map_request_itr_rlocs_t * mp)
+{
+ unix_shared_memory_queue_t *q = NULL;
+ vl_api_lisp_get_map_request_itr_rlocs_reply_t *rmp = NULL;
+ lisp_cp_main_t *lcm = vnet_lisp_cp_get_main ();
+ locator_set_t *loc_set = 0;
+ u8 *tmp_str = 0;
+ int rv = 0;
+
+ q = vl_api_client_index_to_input_queue (mp->client_index);
+ if (q == 0)
+ {
+ return;
+ }
+
+ if (~0 == lcm->mreq_itr_rlocs)
+ {
+ tmp_str = format (0, " ");
+ }
+ else
+ {
+ loc_set =
+ pool_elt_at_index (lcm->locator_set_pool, lcm->mreq_itr_rlocs);
+ tmp_str = format (0, "%s", loc_set->name);
+ }
+
+ /* *INDENT-OFF* */
+ REPLY_MACRO2(VL_API_LISP_GET_MAP_REQUEST_ITR_RLOCS_REPLY,
+ ({
+ strncpy((char *) rmp->locator_set_name, (char *) tmp_str,
+ ARRAY_LEN(rmp->locator_set_name) - 1);
+ }));
+ /* *INDENT-ON* */
+
+ vec_free (tmp_str);
+}
+
+static void
+vl_api_show_lisp_pitr_t_handler (vl_api_show_lisp_pitr_t * mp)
+{
+ unix_shared_memory_queue_t *q = NULL;
+ vl_api_show_lisp_pitr_reply_t *rmp = NULL;
+ lisp_cp_main_t *lcm = vnet_lisp_cp_get_main ();
+ mapping_t *m;
+ locator_set_t *ls = 0;
+ u8 *tmp_str = 0;
+ int rv = 0;
+
+ q = vl_api_client_index_to_input_queue (mp->client_index);
+ if (q == 0)
+ {
+ return;
+ }
+
+ if (!lcm->lisp_pitr)
+ {
+ tmp_str = format (0, "N/A");
+ }
+ else
+ {
+ m = pool_elt_at_index (lcm->mapping_pool, lcm->pitr_map_index);
+ if (~0 != m->locator_set_index)
+ {
+ ls =
+ pool_elt_at_index (lcm->locator_set_pool, m->locator_set_index);
+ tmp_str = format (0, "%s", ls->name);
+ }
+ else
+ {
+ tmp_str = format (0, "N/A");
+ }
+ }
+ vec_add1 (tmp_str, 0);
+
+ /* *INDENT-OFF* */
+ REPLY_MACRO2(VL_API_SHOW_LISP_PITR_REPLY,
+ ({
+ rmp->status = lcm->lisp_pitr;
+ strncpy((char *) rmp->locator_set_name, (char *) tmp_str,
+ ARRAY_LEN(rmp->locator_set_name) - 1);
+ }));
+ /* *INDENT-ON* */
+}
+
+/*
+ * lisp_api_hookup
+ * Add vpe's API message handlers to the table.
+ * vlib has alread mapped shared memory and
+ * added the client registration handlers.
+ * See .../vlib-api/vlibmemory/memclnt_vlib.c:memclnt_process()
+ */
+#define vl_msg_name_crc_list
+#include <vnet/vnet_all_api_h.h>
+#undef vl_msg_name_crc_list
+
+static void
+setup_message_id_table (api_main_t * am)
+{
+#define _(id,n,crc) vl_msg_api_add_msg_name_crc (am, #n "_" #crc, id);
+ foreach_vl_msg_name_crc_lisp;
+#undef _
+}
+
+static clib_error_t *
+lisp_api_hookup (vlib_main_t * vm)
+{
+ api_main_t *am = &api_main;
+
+#define _(N,n) \
+ vl_msg_api_set_handlers(VL_API_##N, #n, \
+ vl_api_##n##_t_handler, \
+ vl_noop_handler, \
+ vl_api_##n##_t_endian, \
+ vl_api_##n##_t_print, \
+ sizeof(vl_api_##n##_t), 1);
+ foreach_vpe_api_msg;
+#undef _
+
+ /*
+ * Set up the (msg_name, crc, message-id) table
+ */
+ setup_message_id_table (am);
+
+ return 0;
+}
+
+VLIB_API_INIT_FUNCTION (lisp_api_hookup);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/lisp-cp/lisp_cp_dpo.c b/src/vnet/lisp-cp/lisp_cp_dpo.c
new file mode 100644
index 00000000000..185b07a2c1b
--- /dev/null
+++ b/src/vnet/lisp-cp/lisp_cp_dpo.c
@@ -0,0 +1,117 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/dpo/dpo.h>
+#include <vnet/lisp-gpe/lisp_gpe.h>
+#include <vnet/lisp-cp/control.h>
+
+/**
+ * The static array of LISP punt DPOs
+ */
+static dpo_id_t lisp_cp_dpos[DPO_PROTO_NUM];
+
+const dpo_id_t *
+lisp_cp_dpo_get (dpo_proto_t proto)
+{
+ /*
+ * there are only two instances of this DPO type.
+ * we can use the protocol as the index
+ */
+ return (&lisp_cp_dpos[proto]);
+}
+
+static u8 *
+format_lisp_cp_dpo (u8 * s, va_list * args)
+{
+ index_t index = va_arg (*args, index_t);
+ CLIB_UNUSED (u32 indent) = va_arg (*args, u32);
+
+ return (format (s, "lisp-cp-punt-%U", format_dpo_proto, index));
+}
+
+static void
+lisp_cp_dpo_lock (dpo_id_t * dpo)
+{
+}
+
+static void
+lisp_cp_dpo_unlock (dpo_id_t * dpo)
+{
+}
+
+const static dpo_vft_t lisp_cp_vft = {
+ .dv_lock = lisp_cp_dpo_lock,
+ .dv_unlock = lisp_cp_dpo_unlock,
+ .dv_format = format_lisp_cp_dpo,
+};
+
+/**
+ * @brief The per-protocol VLIB graph nodes that are assigned to a LISP-CP
+ * object.
+ *
+ * this means that these graph nodes are ones from which a LISP-CP is the
+ * parent object in the DPO-graph.
+ */
+const static char *const lisp_cp_ip4_nodes[] = {
+ "lisp-cp-lookup-ip4",
+ NULL,
+};
+
+const static char *const lisp_cp_ip6_nodes[] = {
+ "lisp-cp-lookup-ip6",
+ NULL,
+};
+
+const static char *const lisp_cp_ethernet_nodes[] = {
+ "lisp-cp-lookup-l2",
+ NULL,
+};
+
+
+const static char *const *const lisp_cp_nodes[DPO_PROTO_NUM] = {
+ [DPO_PROTO_IP4] = lisp_cp_ip4_nodes,
+ [DPO_PROTO_IP6] = lisp_cp_ip6_nodes,
+ [DPO_PROTO_ETHERNET] = lisp_cp_ethernet_nodes,
+ [DPO_PROTO_MPLS] = NULL,
+};
+
+clib_error_t *
+lisp_cp_dpo_module_init (vlib_main_t * vm)
+{
+ dpo_proto_t dproto;
+
+ /*
+ * there are no exit arcs from the LIS-CP VLIB node, so we
+ * pass NULL as said node array.
+ */
+ dpo_register (DPO_LISP_CP, &lisp_cp_vft, lisp_cp_nodes);
+
+ FOR_EACH_DPO_PROTO (dproto)
+ {
+ dpo_set (&lisp_cp_dpos[dproto], DPO_LISP_CP, dproto, dproto);
+ }
+
+ return (NULL);
+}
+
+VLIB_INIT_FUNCTION (lisp_cp_dpo_module_init);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/lisp-cp/lisp_cp_dpo.h b/src/vnet/lisp-cp/lisp_cp_dpo.h
new file mode 100644
index 00000000000..f0f3fae81a4
--- /dev/null
+++ b/src/vnet/lisp-cp/lisp_cp_dpo.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LISP_CP_DPO_H__
+#define __LISP_CP_DPO_H__
+
+#include <vnet/vnet.h>
+#include <vnet/dpo/dpo.h>
+
+/**
+ * A representation of punt to the LISP control plane.
+ */
+typedef struct lisp_cp_dpo_t
+{
+ /**
+ * The transport payload type.
+ */
+ dpo_proto_t lcd_proto;
+} lisp_cp_dpo_t;
+
+extern const dpo_id_t *lisp_cp_dpo_get (dpo_proto_t proto);
+
+extern void lisp_cp_dpo_module_init (void);
+
+#endif
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/lisp-cp/lisp_cp_messages.h b/src/vnet/lisp-cp/lisp_cp_messages.h
new file mode 100644
index 00000000000..278f60e1726
--- /dev/null
+++ b/src/vnet/lisp-cp/lisp_cp_messages.h
@@ -0,0 +1,613 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef VNET_LISP_GPE_LISP_CP_MESSAGES_H_
+#define VNET_LISP_GPE_LISP_CP_MESSAGES_H_
+
+#include <vnet/vnet.h>
+
+#define MAX_IP_PKT_LEN 4096
+#define MAX_IP_HDR_LEN 40 /* without options or IPv6 hdr extensions */
+#define UDP_HDR_LEN 8
+#define LISP_DATA_HDR_LEN 8
+#define LISP_ECM_HDR_LEN 4
+#define MAX_LISP_MSG_ENCAP_LEN 2*(MAX_IP_HDR_LEN + UDP_HDR_LEN)+ LISP_ECM_HDR_LEN
+#define MAX_LISP_PKT_ENCAP_LEN MAX_IP_HDR_LEN + UDP_HDR_LEN + LISP_DATA_HDR_LEN
+
+#define LISP_CONTROL_PORT 4342
+
+/*
+ * EID RECORD FIELD
+ */
+
+/*
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * / | Reserved | EID mask-len | EID-prefix-AFI |
+ * Rec +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * \ | EID-prefix ... |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+
+
+typedef struct _eid_prefix_record_hdr
+{
+ u8 reserved;
+ u8 eid_prefix_length;
+} __attribute__ ((__packed__)) eid_record_hdr_t;
+
+void eid_rec_hdr_init (eid_record_hdr_t * ptr);
+
+#define EID_REC_CAST(h_) ((eid_record_hdr_t *)(h_))
+#define EID_REC_MLEN(h_) EID_REC_CAST((h_))->eid_prefix_length
+#define EID_REC_ADDR(h) (u8 *)(h) + sizeof(eid_record_hdr_t)
+
+/* LISP Types */
+typedef enum
+{
+ NOT_LISP_MSG,
+ LISP_MAP_REQUEST = 1,
+ LISP_MAP_REPLY,
+ LISP_MAP_REGISTER,
+ LISP_MAP_NOTIFY,
+ LISP_INFO_NAT = 7,
+ LISP_ENCAP_CONTROL_TYPE = 8,
+ LISP_MSG_TYPES
+} lisp_msg_type_e;
+
+/*
+ * ENCAPSULATED CONTROL MESSAGE
+ */
+
+/*
+ * 0 1 2 3
+ * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * / | IPv4 or IPv6 Header |
+ * OH | (uses RLOC addresses) |
+ * \ | |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * / | Source Port = xxxx | Dest Port = 4342 |
+ * UDP +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * \ | UDP Length | UDP Checksum |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * LH |Type=8 |S| Reserved |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * / | IPv4 or IPv6 Header |
+ * IH | (uses RLOC or EID addresses) |
+ * \ | |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * / | Source Port = xxxx | Dest Port = yyyy |
+ * UDP +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * \ | UDP Length | UDP Checksum |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * LCM | LISP Control Message |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+
+
+/*
+ * Encapsulated control message header. This is followed by the IP
+ * header of the encapsulated LISP control message.
+ *
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |Type=8 |S| Reserved |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+
+typedef struct
+{
+#if CLIB_ARCH_IS_LITTLE_ENDIAN
+ u8 reserved:3;
+ u8 s_bit:1;
+ u8 type:4;
+#else
+ u8 type:4;
+ u8 s_bit:1;
+ u8 reserved:3;
+#endif
+ u8 reserved2[3];
+} ecm_hdr_t;
+
+char *ecm_hdr_to_char (ecm_hdr_t * h);
+
+#define ECM_TYPE(h_) ((ecm_hdr_t *)(h_))->type
+
+/*
+ * MAP-REQUEST MESSAGE
+ */
+
+/*
+ * Map-Request Message Format
+ *
+ * 0 1 2 3
+ * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |Type=1 |A|M|P|S|p|s| Reserved | IRC | Record Count |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Nonce . . . |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | . . . Nonce |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Source-EID-AFI | Source EID Address ... |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | ITR-RLOC-AFI 1 | ITR-RLOC Address 1 ... |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | ... |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | ITR-RLOC-AFI n | ITR-RLOC Address n ... |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * / | Reserved | EID mask-len | EID-prefix-AFI |
+ * Rec +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * \ | EID-prefix ... |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Map-Reply Record ... |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Mapping Protocol Data |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+
+
+/*
+ * Fixed size portion of the map request. Variable size source EID
+ * address, originating ITR RLOC AFIs and addresses and then map
+ * request records follow.
+ */
+typedef struct
+{
+#if CLIB_ARCH_IS_LITTLE_ENDIAN
+ u8 solicit_map_request:1;
+ u8 rloc_probe:1;
+ u8 map_data_present:1;
+ u8 authoritative:1;
+ u8 type:4;
+#else
+ u8 type:4;
+ u8 authoritative:1;
+ u8 map_data_present:1;
+ u8 rloc_probe:1;
+ u8 solicit_map_request:1;
+#endif
+#if CLIB_ARCH_IS_LITTLE_ENDIAN
+ u8 reserved1:6;
+ u8 smr_invoked:1;
+ u8 pitr:1;
+#else
+ u8 pitr:1;
+ u8 smr_invoked:1;
+ u8 reserved1:6;
+#endif
+#if CLIB_ARCH_IS_LITTLE_ENDIAN
+ u8 additional_itr_rloc_count:5;
+ u8 reserved2:3;
+#else
+ u8 reserved2:3;
+ u8 additional_itr_rloc_count:5;
+#endif
+ u8 record_count;
+ u64 nonce;
+} __attribute__ ((__packed__)) map_request_hdr_t;
+
+void map_request_hdr_init (void *ptr);
+char *map_request_hdr_to_char (map_request_hdr_t * h);
+
+#define MREQ_TYPE(h_) (h_)->type
+#define MREQ_HDR_CAST(h_) ((map_request_hdr_t *)(h_))
+#define MREQ_REC_COUNT(h_) (MREQ_HDR_CAST(h_))->record_count
+#define MREQ_RLOC_PROBE(h_) (MREQ_HDR_CAST(h_))->rloc_probe
+#define MREQ_ITR_RLOC_COUNT(h_) (MREQ_HDR_CAST(h_))->additional_itr_rloc_count
+#define MREQ_NONCE(h_) (MREQ_HDR_CAST(h_))->nonce
+#define MREQ_SMR(h_) (MREQ_HDR_CAST(h_))->solicit_map_request
+#define MREQ_SMR_INVOKED(h_) (MREQ_HDR_CAST(h_))->smr_invoked
+
+/*
+ * MAP-REPLY MESSAGE
+ */
+
+ /*
+ * Map-Reply Message Format
+ *
+ * 0 1 2 3
+ * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |Type=2 |P|E|S| Reserved | Record Count |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Nonce . . . |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | . . . Nonce |
+ * +-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | | Record TTL |
+ * | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * R | Locator Count | EID mask-len | ACT |A| Reserved |
+ * e +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * c | Rsvd | Map-Version Number | EID-AFI |
+ * o +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * r | EID-prefix |
+ * d +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | /| Priority | Weight | M Priority | M Weight |
+ * | L +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | o | Unused Flags |L|p|R| Loc-AFI |
+ * | c +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | \| Locator |
+ * +-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Mapping Protocol Data |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+
+ /*
+ * Fixed size portion of the map reply.
+ */
+typedef struct
+{
+#if CLIB_ARCH_IS_LITTLE_ENDIAN
+ u8 reserved1:1;
+ u8 security:1;
+ u8 echo_nonce:1;
+ u8 rloc_probe:1;
+ u8 type:4;
+#else
+ u8 type:4;
+ u8 rloc_probe:1;
+ u8 echo_nonce:1;
+ u8 security:1;
+ u8 reserved1:1;
+#endif
+ u8 reserved2;
+ u8 reserved3;
+ u8 record_count;
+ u64 nonce;
+} __attribute__ ((__packed__)) map_reply_hdr_t;
+
+void map_reply_hdr_init (void *ptr);
+char *map_reply_hdr_to_char (map_reply_hdr_t * h);
+
+#define MREP_TYPE(h_) MREP_HDR_CAST(h_)->type
+#define MREP_HDR_CAST(h_) ((map_reply_hdr_t *)(h_))
+#define MREP_REC_COUNT(h_) MREP_HDR_CAST(h_)->record_count
+#define MREP_RLOC_PROBE(h_) MREP_HDR_CAST(h_)->rloc_probe
+#define MREP_NONCE(h_) MREP_HDR_CAST(h_)->nonce
+
+
+always_inline lisp_msg_type_e
+lisp_msg_type (void *b)
+{
+ ecm_hdr_t *hdr = b;
+ if (!hdr)
+ {
+ return (NOT_LISP_MSG);
+ }
+ return (hdr->type);
+}
+
+always_inline void
+increment_record_count (void *b)
+{
+ switch (lisp_msg_type (b))
+ {
+ case LISP_MAP_REQUEST:
+ MREQ_REC_COUNT (b) += 1;
+ break;
+ case LISP_MAP_REPLY:
+ MREP_REC_COUNT (b) += 1;
+ break;
+ default:
+ return;
+ }
+}
+
+
+/*
+ * LOCATOR FIELD
+ *
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * /| Priority | Weight | M Priority | M Weight |
+ * L +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * o | Unused Flags |L|p|R| Loc-AFI |
+ * c +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * \| Locator |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * Fixed portion of the mapping record locator. Variable length
+ * locator address follows.
+ */
+typedef struct _locator_hdr
+{
+ u8 priority;
+ u8 weight;
+ u8 mpriority;
+ u8 mweight;
+ u8 unused1;
+#ifdef CLIB_ARCH_IS_LITTLE_ENDIAN
+ u8 reachable:1;
+ u8 probed:1;
+ u8 local:1;
+ u8 unused2:5;
+#else
+ u8 unused2:5;
+ u8 local:1;
+ u8 probed:1;
+ u8 reachable:1;
+#endif
+} __attribute__ ((__packed__)) locator_hdr_t;
+
+#define LOC_CAST(h_) ((locator_hdr_t *)(h_))
+#define LOC_PROBED(h_) LOC_CAST(h_)->probed
+#define LOC_PRIORITY(h_) LOC_CAST(h_)->priority
+#define LOC_WEIGHT(h_) LOC_CAST(h_)->weight
+#define LOC_MPRIORITY(h_) LOC_CAST(h_)->mpriority
+#define LOC_MWEIGHT(h_) LOC_CAST(h_)->mweight
+#define LOC_REACHABLE(h_) LOC_CAST(h_)->reachable
+#define LOC_LOCAL(h_) LOC_CAST(h_)->local
+#define LOC_ADDR(h_) ((u8 *)(h_) + sizeof(locator_hdr_t))
+
+/*
+ * MAPPING RECORD
+ *
+ * Mapping record used in all LISP control messages.
+ *
+ * +---> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | | Record TTL |
+ * | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * R | Locator Count | EID mask-len | ACT |A| Reserved |
+ * e +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * c | Rsvd | Map-Version Number | EID-AFI |
+ * o +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * r | EID-prefix |
+ * d +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | /| Priority | Weight | M Priority | M Weight |
+ * | / +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Loc | Unused Flags |L|p|R| Loc-AFI |
+ * | \ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | \| Locator |
+ * +---> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+
+/*
+ * Fixed portion of the mapping record. EID prefix address and
+ * locators follow.
+ */
+
+typedef struct _mapping_record_hdr_t
+{
+ u32 ttl;
+ u8 locator_count;
+ u8 eid_prefix_length;
+#ifdef CLIB_ARCH_IS_LITTLE_ENDIAN
+ u8 reserved1:4;
+ u8 authoritative:1;
+ u8 action:3;
+#else
+ u8 action:3;
+ u8 authoritative:1;
+ u8 reserved1:4;
+#endif
+ u8 reserved2;
+#ifdef CLIB_ARCH_IS_LITTLE_ENDIAN
+ u8 version_hi:4;
+ u8 reserved3:4;
+#else
+ u8 reserved3:4;
+ u8 version_hi:4;
+#endif
+ u8 version_low;
+} __attribute__ ((__packed__)) mapping_record_hdr_t;
+
+void mapping_record_init_hdr (mapping_record_hdr_t * h);
+
+#define MAP_REC_EID_PLEN(h) ((mapping_record_hdr_t *)(h))->eid_prefix_length
+#define MAP_REC_LOC_COUNT(h) ((mapping_record_hdr_t *)(h))->locator_count
+#define MAP_REC_ACTION(h) ((mapping_record_hdr_t *)(h))->action
+#define MAP_REC_AUTH(h) ((mapping_record_hdr_t *)(h))->authoritative
+#define MAP_REC_TTL(h) ((mapping_record_hdr_t *)(h))->ttl
+#define MAP_REC_EID(h) (u8 *)(h)+sizeof(mapping_record_hdr_t)
+#define MAP_REC_VERSION(h) (h)->version_hi << 8 | (h)->version_low
+
+typedef enum
+{
+ LISP_NO_ACTION,
+ LISP_FORWARD_NATIVE,
+ LISP_SEND_MAP_REQUEST,
+ LISP_DROP
+} lisp_action_e;
+
+typedef enum lisp_authoritative
+{
+ A_NO_AUTHORITATIVE = 0,
+ A_AUTHORITATIVE
+} lisp_authoritative_e;
+
+/*
+ * LISP Canonical Address Format Encodings
+ *
+ * 0 1 2 3
+ * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | AFI = 16387 | Rsvd1 | Flags |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Type | Rsvd2 | Length |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+
+typedef struct _lcaf_hdr_t
+{
+ u8 reserved1;
+ u8 flags;
+ u8 type;
+ u8 reserved2;
+ u16 len;
+} __attribute__ ((__packed__)) lcaf_hdr_t;
+
+#define LCAF_TYPE(h) ((lcaf_hdr_t *)(h))->type
+#define LCAF_LENGTH(h) ((lcaf_hdr_t *)(h))->len
+#define LCAF_RES2(h) ((lcaf_hdr_t *)(h))->reserved2
+#define LCAF_FLAGS(h) ((lcaf_hdr_t *)(h))->flags
+#define LCAF_PAYLOAD(h) (u8 *)(h)+sizeof(lcaf_hdr_t)
+
+/*
+ * Source/Dest Key Canonical Address Format:
+ *
+ * 0 1 2 3
+ * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Reserved | Source-ML | Dest-ML |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+typedef struct _lcaf_src_dst_hdr_t
+{
+ u16 reserved;
+ u8 src_mask_len;
+ u8 dst_mask_len;
+} __attribute__ ((__packed__)) lcaf_src_dst_hdr_t;
+
+#define LCAF_SD_SRC_ML(_h) (_h)->src_mask_len
+#define LCAF_SD_DST_ML(_h) (_h)->dst_mask_len
+
+/*
+ * The Map-Register message format is:
+ *
+ * 0 1 2 3
+ * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |Type=3 |P| Reserved |M| Record Count |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Nonce . . . |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | . . . Nonce |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Key ID | Authentication Data Length |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * ~ Authentication Data ~
+ * +-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | | Record TTL |
+ * | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * R | Locator Count | EID mask-len | ACT |A| Reserved |
+ * e +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * c | Rsvd | Map-Version Number | EID-Prefix-AFI |
+ * o +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * r | EID-Prefix |
+ * d +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | /| Priority | Weight | M Priority | M Weight |
+ * | L +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | o | Unused Flags |L|p|R| Loc-AFI |
+ * | c +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | \| Locator |
+ * +-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+typedef struct
+{
+#if CLIB_ARCH_IS_LITTLE_ENDIAN
+ u8 res1:3;
+ u8 proxy_map_reply:1;
+ u8 type:4;
+#else
+ u8 type:4;
+ u8 proxy_map_reply:1;
+ u8 res1:3;
+#endif
+
+ u8 res2;
+
+#if CLIB_ARCH_IS_LITTLE_ENDIAN
+ u8 want_map_notify:1;
+ u8 res3:7;
+#else
+ u8 res3:7;
+ u8 want_map_notify:1;
+#endif
+
+ u8 record_count;
+ u64 nonce;
+ u16 key_id;
+ u16 auth_data_len;
+ u8 data[0];
+} __attribute__ ((__packed__)) map_register_hdr_t;
+
+#define MREG_TYPE(h_) (h_)->type
+#define MREG_HDR_CAST(h_) ((map_register_hdr_t *)(h_))
+#define MREG_PROXY_MR(h_) (MREG_HDR_CAST(h_))->proxy_map_reply
+#define MREG_WANT_MAP_NOTIFY(h_) (MREG_HDR_CAST(h_))->want_map_notify
+#define MREG_REC_COUNT(h_) (MREG_HDR_CAST(h_))->record_count
+#define MREG_NONCE(h_) (MREG_HDR_CAST(h_))->nonce
+#define MREG_KEY_ID(h_) (MREG_HDR_CAST(h_))->key_id
+#define MREG_AUTH_DATA_LEN(h_) (MREG_HDR_CAST(h_))->auth_data_len
+#define MREG_DATA(h_) (MREG_HDR_CAST(h_))->data
+
+/*
+ * The Map-Notify message format is:
+ *
+ * 0 1 2 3
+ * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |Type=4 | Reserved | Record Count |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Nonce . . . |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | . . . Nonce |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Key ID | Authentication Data Length |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * ~ Authentication Data ~
+ * +-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | | Record TTL |
+ * | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * R | Locator Count | EID mask-len | ACT |A| Reserved |
+ * e +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * c | Rsvd | Map-Version Number | EID-Prefix-AFI |
+ * o +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * r | EID-Prefix |
+ * d +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | /| Priority | Weight | M Priority | M Weight |
+ * | L +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | o | Unused Flags |L|p|R| Loc-AFI |
+ * | c +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | \| Locator |
+ * +-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+*/
+
+typedef struct
+{
+#if CLIB_ARCH_IS_LITTLE_ENDIAN
+ u8 res1:4;
+ u8 type:4;
+#else
+ u8 type:4;
+ u8 res1:4;
+#endif
+
+ u16 res2;
+
+ u8 record_count;
+ u64 nonce;
+ u16 key_id;
+ u16 auth_data_len;
+ u8 data[0];
+} __attribute__ ((__packed__)) map_notify_hdr_t;
+
+#define MNOTIFY_TYPE(h_) (h_)->type
+#define MNOTIFY_HDR_CAST(h_) ((map_register_hdr_t *)(h_))
+#define MNOTIFY_REC_COUNT(h_) (MREG_HDR_CAST(h_))->record_count
+#define MNOTIFY_NONCE(h_) (MREG_HDR_CAST(h_))->nonce
+#define MNOTIFY_KEY_ID(h_) (MREG_HDR_CAST(h_))->key_id
+#define MNOTIFY_AUTH_DATA_LEN(h_) (MREG_HDR_CAST(h_))->auth_data_len
+#define MNOTIFY_DATA(h_) (MREG_HDR_CAST(h_))->data
+
+#endif /* VNET_LISP_GPE_LISP_CP_MESSAGES_H_ */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/lisp-cp/lisp_msg_serdes.c b/src/vnet/lisp-cp/lisp_msg_serdes.c
new file mode 100644
index 00000000000..eee1885cd9b
--- /dev/null
+++ b/src/vnet/lisp-cp/lisp_msg_serdes.c
@@ -0,0 +1,372 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/lisp-cp/lisp_msg_serdes.h>
+#include <vnet/lisp-cp/packets.h>
+#include <vppinfra/time.h>
+
+void *lisp_msg_put_gid (vlib_buffer_t * b, gid_address_t * gid);
+
+static void
+lisp_msg_put_locators (vlib_buffer_t * b, locator_t * locators)
+{
+ locator_t *loc;
+
+ vec_foreach (loc, locators)
+ {
+ u8 *p = vlib_buffer_put_uninit (b, sizeof (locator_hdr_t));
+ memset (p, 0, sizeof (locator_hdr_t));
+ LOC_PRIORITY (p) = loc->priority;
+ LOC_MPRIORITY (p) = loc->mpriority;
+ LOC_WEIGHT (p) = loc->weight;
+ LOC_MWEIGHT (p) = loc->mweight;
+ LOC_LOCAL (p) = loc->local;
+ LOC_PROBED (p) = loc->probed ? 1 : 0;
+ lisp_msg_put_gid (b, &loc->address);
+ }
+}
+
+static void
+lisp_msg_put_mapping_record (vlib_buffer_t * b, mapping_t * record)
+{
+ mapping_record_hdr_t *p =
+ vlib_buffer_put_uninit (b, sizeof (mapping_record_hdr_t));
+ gid_address_t *eid = &record->eid;
+
+ memset (p, 0, sizeof (*p));
+ MAP_REC_EID_PLEN (p) = gid_address_len (eid);
+ MAP_REC_TTL (p) = clib_host_to_net_u32 (MAP_REGISTER_DEFAULT_TTL);
+ MAP_REC_AUTH (p) = record->authoritative ? 1 : 0;
+ MAP_REC_LOC_COUNT (p) = vec_len (record->locators);
+
+ lisp_msg_put_gid (b, eid);
+ lisp_msg_put_locators (b, record->locators);
+}
+
+static void
+lisp_msg_put_mreg_records (vlib_buffer_t * b, mapping_t * records)
+{
+ u32 i;
+ for (i = 0; i < vec_len (records); i++)
+ lisp_msg_put_mapping_record (b, &records[i]);
+}
+
+void *
+lisp_msg_put_gid (vlib_buffer_t * b, gid_address_t * gid)
+{
+ u8 *p = 0;
+ if (!gid)
+ {
+ /* insert only src-eid-afi field set to 0 */
+ p = vlib_buffer_put_uninit (b, sizeof (u16));
+ *(u16 *) p = 0;
+ }
+ else
+ {
+ p = vlib_buffer_put_uninit (b, gid_address_size_to_put (gid));
+ gid_address_put (p, gid);
+ }
+ return p;
+}
+
+static void *
+lisp_msg_put_itr_rlocs (lisp_cp_main_t * lcm, vlib_buffer_t * b,
+ gid_address_t * rlocs, u8 * locs_put)
+{
+ u8 *bp, count = 0;
+ u32 i;
+
+ bp = vlib_buffer_get_current (b);
+ for (i = 0; i < vec_len (rlocs); i++)
+ {
+ lisp_msg_put_gid (b, &rlocs[i]);
+ count++;
+ }
+
+ *locs_put = count - 1;
+ return bp;
+}
+
+void *
+lisp_msg_put_eid_rec (vlib_buffer_t * b, gid_address_t * eid)
+{
+ eid_record_hdr_t *h = vlib_buffer_put_uninit (b, sizeof (*h));
+
+ memset (h, 0, sizeof (*h));
+ EID_REC_MLEN (h) = gid_address_len (eid);
+ lisp_msg_put_gid (b, eid);
+ return h;
+}
+
+u64
+nonce_build (u32 seed)
+{
+ u64 nonce;
+ u32 nonce_lower;
+ u32 nonce_upper;
+ struct timespec ts;
+
+ /* Put nanosecond clock in lower 32-bits and put an XOR of the nanosecond
+ * clock with the seond clock in the upper 32-bits. */
+ syscall (SYS_clock_gettime, CLOCK_REALTIME, &ts);
+ nonce_lower = ts.tv_nsec;
+ nonce_upper = ts.tv_sec ^ clib_host_to_net_u32 (nonce_lower);
+
+ /* OR in a caller provided seed to the low-order 32-bits. */
+ nonce_lower |= seed;
+
+ /* Return 64-bit nonce. */
+ nonce = nonce_upper;
+ nonce = (nonce << 32) | nonce_lower;
+ return nonce;
+}
+
+void *
+lisp_msg_put_map_reply (vlib_buffer_t * b, mapping_t * records, u64 nonce,
+ u8 probe_bit)
+{
+ map_reply_hdr_t *h = vlib_buffer_put_uninit (b, sizeof (h[0]));
+
+ memset (h, 0, sizeof (h[0]));
+ MREP_TYPE (h) = LISP_MAP_REPLY;
+ MREP_NONCE (h) = nonce;
+ MREP_REC_COUNT (h) = 1;
+ MREP_RLOC_PROBE (h) = probe_bit;
+
+ lisp_msg_put_mreg_records (b, records);
+ return h;
+}
+
+void *
+lisp_msg_put_map_register (vlib_buffer_t * b, mapping_t * records,
+ u8 want_map_notify, u16 auth_data_len, u64 * nonce,
+ u32 * msg_len)
+{
+ u8 *auth_data = 0;
+
+ /* Basic header init */
+ map_register_hdr_t *h = vlib_buffer_put_uninit (b, sizeof (h[0]));
+
+ memset (h, 0, sizeof (h[0]));
+ MREG_TYPE (h) = LISP_MAP_REGISTER;
+ MREG_NONCE (h) = nonce_build (0);
+ MREG_WANT_MAP_NOTIFY (h) = want_map_notify ? 1 : 0;
+ MREG_REC_COUNT (h) = vec_len (records);
+
+ auth_data = vlib_buffer_put_uninit (b, auth_data_len);
+ memset (auth_data, 0, auth_data_len);
+
+ /* Put map register records */
+ lisp_msg_put_mreg_records (b, records);
+
+ nonce[0] = MREG_NONCE (h);
+ msg_len[0] = vlib_buffer_get_tail (b) - (u8 *) h;
+ return h;
+}
+
+void *
+lisp_msg_put_mreq (lisp_cp_main_t * lcm, vlib_buffer_t * b,
+ gid_address_t * seid, gid_address_t * deid,
+ gid_address_t * rlocs, u8 is_smr_invoked,
+ u8 rloc_probe_set, u64 * nonce)
+{
+ u8 loc_count = 0;
+
+ /* Basic header init */
+ map_request_hdr_t *h = vlib_buffer_put_uninit (b, sizeof (h[0]));
+
+ memset (h, 0, sizeof (h[0]));
+ MREQ_TYPE (h) = LISP_MAP_REQUEST;
+ MREQ_NONCE (h) = nonce_build (0);
+ MREQ_SMR_INVOKED (h) = is_smr_invoked ? 1 : 0;
+ MREQ_RLOC_PROBE (h) = rloc_probe_set ? 1 : 0;
+
+ /* We're adding one eid record */
+ increment_record_count (h);
+
+ /* Fill source eid */
+ lisp_msg_put_gid (b, seid);
+
+ /* Put itr rlocs */
+ lisp_msg_put_itr_rlocs (lcm, b, rlocs, &loc_count);
+ MREQ_ITR_RLOC_COUNT (h) = loc_count;
+
+ /* Put eid record */
+ lisp_msg_put_eid_rec (b, deid);
+
+ nonce[0] = MREQ_NONCE (h);
+ return h;
+}
+
+void *
+lisp_msg_push_ecm (vlib_main_t * vm, vlib_buffer_t * b, int lp, int rp,
+ gid_address_t * la, gid_address_t * ra)
+{
+ ecm_hdr_t *h;
+ ip_address_t _src_ip, *src_ip = &_src_ip, _dst_ip, *dst_ip = &_dst_ip;
+ if (gid_address_type (la) != GID_ADDR_IP_PREFIX)
+ {
+ /* empty ip4 */
+ memset (src_ip, 0, sizeof (src_ip[0]));
+ memset (dst_ip, 0, sizeof (dst_ip[0]));
+ }
+ else
+ {
+ src_ip = &gid_address_ip (la);
+ dst_ip = &gid_address_ip (ra);
+ }
+
+ /* Push inner ip and udp */
+ pkt_push_udp_and_ip (vm, b, lp, rp, src_ip, dst_ip);
+
+ /* Push lisp ecm hdr */
+ h = pkt_push_ecm_hdr (b);
+
+ return h;
+}
+
+static u32
+msg_type_to_hdr_len (lisp_msg_type_e type)
+{
+ switch (type)
+ {
+ case LISP_MAP_REQUEST:
+ return (sizeof (map_request_hdr_t));
+ case LISP_MAP_REPLY:
+ return (sizeof (map_reply_hdr_t));
+ default:
+ return (0);
+ }
+}
+
+void *
+lisp_msg_pull_hdr (vlib_buffer_t * b, lisp_msg_type_e type)
+{
+ return vlib_buffer_pull (b, msg_type_to_hdr_len (type));
+}
+
+u32
+lisp_msg_parse_addr (vlib_buffer_t * b, gid_address_t * eid)
+{
+ u32 len;
+ memset (eid, 0, sizeof (*eid));
+ len = gid_address_parse (vlib_buffer_get_current (b), eid);
+ if (len != ~0)
+ vlib_buffer_pull (b, len);
+ return len;
+}
+
+u32
+lisp_msg_parse_eid_rec (vlib_buffer_t * b, gid_address_t * eid)
+{
+ eid_record_hdr_t *h = vlib_buffer_get_current (b);
+ u32 len;
+ memset (eid, 0, sizeof (*eid));
+ len = gid_address_parse (EID_REC_ADDR (h), eid);
+ if (len == ~0)
+ return len;
+
+ gid_address_ippref_len (eid) = EID_REC_MLEN (h);
+ vlib_buffer_pull (b, len + sizeof (eid_record_hdr_t));
+
+ return len + sizeof (eid_record_hdr_t);
+}
+
+u32
+lisp_msg_parse_itr_rlocs (vlib_buffer_t * b, gid_address_t ** rlocs,
+ u8 rloc_count)
+{
+ gid_address_t tloc;
+ u32 i, len = 0, tlen = 0;
+
+ //MREQ_ITR_RLOC_COUNT(mreq_hdr) + 1
+ for (i = 0; i < rloc_count; i++)
+ {
+ len = lisp_msg_parse_addr (b, &tloc);
+ if (len == ~0)
+ return len;
+ vec_add1 (*rlocs, tloc);
+ tlen += len;
+ }
+ return tlen;
+}
+
+u32
+lisp_msg_parse_loc (vlib_buffer_t * b, locator_t * loc)
+{
+ int len;
+
+ len = locator_parse (vlib_buffer_get_current (b), loc);
+ if (len == ~0)
+ return ~0;
+
+ vlib_buffer_pull (b, len);
+
+ return len;
+}
+
+u32
+lisp_msg_parse_mapping_record (vlib_buffer_t * b, gid_address_t * eid,
+ locator_t ** locs, locator_t * probed_)
+{
+ void *h = 0, *loc_hdr = 0;
+ locator_t loc, *probed = 0;
+ int i = 0, len = 0, llen = 0;
+
+ h = vlib_buffer_get_current (b);
+ vlib_buffer_pull (b, sizeof (mapping_record_hdr_t));
+
+ memset (eid, 0, sizeof (*eid));
+ len = gid_address_parse (vlib_buffer_get_current (b), eid);
+ if (len == ~0)
+ return len;
+
+ vlib_buffer_pull (b, len);
+ if (GID_ADDR_IP_PREFIX == gid_address_type (eid))
+ gid_address_ippref_len (eid) = MAP_REC_EID_PLEN (h);
+
+ for (i = 0; i < MAP_REC_LOC_COUNT (h); i++)
+ {
+ loc_hdr = vlib_buffer_get_current (b);
+
+ llen = lisp_msg_parse_loc (b, &loc);
+ if (llen == ~0)
+ return llen;
+ vec_add1 (*locs, loc);
+ len += llen;
+
+ if (LOC_PROBED (loc_hdr))
+ {
+ if (probed != 0)
+ clib_warning
+ ("Multiple locators probed! Probing only the first!");
+ else
+ probed = &loc;
+ }
+ }
+ /* XXX */
+ if (probed_ != 0 && probed)
+ *probed_ = *probed;
+
+ return len + sizeof (map_reply_hdr_t);
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/lisp-cp/lisp_msg_serdes.h b/src/vnet/lisp-cp/lisp_msg_serdes.h
new file mode 100644
index 00000000000..d794eff6340
--- /dev/null
+++ b/src/vnet/lisp-cp/lisp_msg_serdes.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef VNET_LISP_GPE_LISP_MSG_BUILDER_H_
+#define VNET_LISP_GPE_LISP_MSG_BUILDER_H_
+
+#include <vnet/vnet.h>
+#include <vnet/lisp-cp/lisp_cp_messages.h>
+#include <vnet/lisp-cp/control.h>
+
+void *lisp_msg_put_mreq (lisp_cp_main_t * lcm, vlib_buffer_t * b,
+ gid_address_t * seid, gid_address_t * deid,
+ gid_address_t * rlocs, u8 is_smr_invoked,
+ u8 rloc_probe_set, u64 * nonce);
+
+void *lisp_msg_put_map_register (vlib_buffer_t * b, mapping_t * records,
+ u8 want_map_notify, u16 auth_data_len,
+ u64 * nonce, u32 * msg_len);
+
+void *lisp_msg_push_ecm (vlib_main_t * vm, vlib_buffer_t * b, int lp, int rp,
+ gid_address_t * la, gid_address_t * ra);
+
+void *lisp_msg_put_map_reply (vlib_buffer_t * b, mapping_t * record,
+ u64 nonce, u8 probe_bit);
+
+u32
+lisp_msg_parse_mapping_record (vlib_buffer_t * b, gid_address_t * eid,
+ locator_t ** locs, locator_t * probed_);
+
+u32 lisp_msg_parse_addr (vlib_buffer_t * b, gid_address_t * eid);
+
+u32 lisp_msg_parse_eid_rec (vlib_buffer_t * b, gid_address_t * eid);
+
+u32
+lisp_msg_parse_itr_rlocs (vlib_buffer_t * b, gid_address_t ** rlocs,
+ u8 rloc_count);
+
+#endif /* VNET_LISP_GPE_LISP_MSG_BUILDER_H_ */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/lisp-cp/lisp_types.c b/src/vnet/lisp-cp/lisp_types.c
new file mode 100644
index 00000000000..5ab4a5eb449
--- /dev/null
+++ b/src/vnet/lisp-cp/lisp_types.c
@@ -0,0 +1,1574 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/lisp-cp/lisp_types.h>
+
+static u16 gid_address_put_no_vni (u8 * b, gid_address_t * gid);
+static u16 gid_address_size_to_put_no_vni (gid_address_t * gid);
+static u16 fid_addr_size_to_write (fid_address_t * a);
+
+u32 mac_parse (u8 * offset, u8 * a);
+
+typedef u16 (*size_to_write_fct) (void *);
+typedef void *(*cast_fct) (gid_address_t *);
+typedef u16 (*serdes_fct) (u8 *, void *);
+typedef u8 (*addr_len_fct) (void *);
+typedef void (*copy_fct) (void *, void *);
+typedef void (*free_fct) (void *);
+typedef int (*cmp_fct) (void *, void *);
+
+size_to_write_fct size_to_write_fcts[GID_ADDR_TYPES] =
+ { ip_prefix_size_to_write, lcaf_size_to_write, mac_size_to_write,
+ sd_size_to_write
+};
+serdes_fct write_fcts[GID_ADDR_TYPES] =
+ { ip_prefix_write, lcaf_write, mac_write, sd_write };
+cast_fct cast_fcts[GID_ADDR_TYPES] =
+ { ip_prefix_cast, lcaf_cast, mac_cast, sd_cast };
+addr_len_fct addr_len_fcts[GID_ADDR_TYPES] =
+ { ip_prefix_length, lcaf_length, mac_length, sd_length };
+copy_fct copy_fcts[GID_ADDR_TYPES] =
+ { ip_prefix_copy, lcaf_copy, mac_copy, sd_copy };
+
+#define foreach_lcaf_type \
+ _(1, no_addr) \
+ _(0, NULL) \
+ _(1, vni) \
+ _(0, NULL) \
+ _(0, NULL) \
+ _(0, NULL) \
+ _(0, NULL) \
+ _(0, NULL) \
+ _(0, NULL) \
+ _(0, NULL) \
+ _(0, NULL) \
+ _(0, NULL) \
+ _(1, sd)
+
+#define _(cond, name) \
+ u16 name ## _write (u8 * p, void * a); \
+ u16 name ## _parse (u8 * p, void * a); \
+ u16 name ## _size_to_write (void * a); \
+ void name ## _free (void * a); \
+ void name ## _copy (void * dst, void * src); \
+ u8 name ## _length (void * a); \
+ int name ## _cmp (void *, void *);
+foreach_lcaf_type
+#undef _
+#define CONCAT(a,b) a##_##b
+#define IF(c, t, e) CONCAT(IF, c)(t, e)
+#define IF_0(t, e) e
+#define IF_1(t, e) t
+#define EXPAND_FCN(cond, fcn) \
+ IF(cond, fcn, NULL)
+ cmp_fct lcaf_cmp_fcts[LCAF_TYPES] =
+{
+#define _(cond, name) \
+ EXPAND_FCN(cond, name##_cmp),
+ foreach_lcaf_type
+#undef _
+};
+
+addr_len_fct lcaf_body_length_fcts[LCAF_TYPES] = {
+#define _(cond, name) \
+ EXPAND_FCN(cond, name##_length),
+ foreach_lcaf_type
+#undef _
+};
+
+copy_fct lcaf_copy_fcts[LCAF_TYPES] = {
+#define _(cond, name) \
+ EXPAND_FCN(cond, name##_copy),
+ foreach_lcaf_type
+#undef _
+};
+
+free_fct lcaf_free_fcts[LCAF_TYPES] = {
+#define _(cond, name) \
+ EXPAND_FCN(cond, name##_free),
+ foreach_lcaf_type
+#undef _
+};
+
+size_to_write_fct lcaf_size_to_write_fcts[LCAF_TYPES] = {
+#define _(cond, name) \
+ EXPAND_FCN(cond, name##_size_to_write),
+ foreach_lcaf_type
+#undef _
+};
+
+serdes_fct lcaf_write_fcts[LCAF_TYPES] = {
+#define _(cond, name) \
+ EXPAND_FCN(cond, name##_write),
+ foreach_lcaf_type
+#undef _
+};
+
+serdes_fct lcaf_parse_fcts[LCAF_TYPES] = {
+#define _(cond, name) \
+ EXPAND_FCN(cond, name##_parse),
+ foreach_lcaf_type
+#undef _
+};
+
+u8 *
+format_ip_address (u8 * s, va_list * args)
+{
+ ip_address_t *a = va_arg (*args, ip_address_t *);
+ u8 ver = ip_addr_version (a);
+ if (ver == IP4)
+ {
+ return format (s, "%U", format_ip4_address, &ip_addr_v4 (a));
+ }
+ else if (ver == IP6)
+ {
+ return format (s, "%U", format_ip6_address, &ip_addr_v6 (a));
+ }
+ else
+ {
+ clib_warning ("Can't format IP version %d!", ver);
+ return 0;
+ }
+}
+
+uword
+unformat_ip_address (unformat_input_t * input, va_list * args)
+{
+ ip_address_t *a = va_arg (*args, ip_address_t *);
+
+ memset (a, 0, sizeof (*a));
+ if (unformat (input, "%U", unformat_ip4_address, &ip_addr_v4 (a)))
+ ip_addr_version (a) = IP4;
+ else if (unformat_user (input, unformat_ip6_address, &ip_addr_v6 (a)))
+ ip_addr_version (a) = IP6;
+ else
+ return 0;
+ return 1;
+}
+
+u8 *
+format_ip_prefix (u8 * s, va_list * args)
+{
+ ip_prefix_t *a = va_arg (*args, ip_prefix_t *);
+ return format (s, "%U/%d", format_ip_address, &ip_prefix_addr (a),
+ ip_prefix_len (a));
+}
+
+uword
+unformat_ip_prefix (unformat_input_t * input, va_list * args)
+{
+ ip_prefix_t *a = va_arg (*args, ip_prefix_t *);
+ if (unformat (input, "%U/%d", unformat_ip_address, &ip_prefix_addr (a),
+ &ip_prefix_len (a)))
+ {
+ if ((ip_prefix_version (a) == IP4 && 32 < ip_prefix_len (a)) ||
+ (ip_prefix_version (a) == IP6 && 128 < ip_prefix_length (a)))
+ {
+ clib_warning ("Prefix length to big: %d!", ip_prefix_len (a));
+ return 0;
+ }
+ ip_prefix_normalize (a);
+ }
+ else
+ return 0;
+ return 1;
+}
+
+uword
+unformat_mac_address (unformat_input_t * input, va_list * args)
+{
+ u8 *a = va_arg (*args, u8 *);
+ return unformat (input, "%x:%x:%x:%x:%x:%x", &a[0], &a[1], &a[2], &a[3],
+ &a[4], &a[5]);
+}
+
+u8 *
+format_mac_address (u8 * s, va_list * args)
+{
+ u8 *a = va_arg (*args, u8 *);
+ return format (s, "%02x:%02x:%02x:%02x:%02x:%02x",
+ a[0], a[1], a[2], a[3], a[4], a[5]);
+}
+
+u8 *
+format_fid_address (u8 * s, va_list * args)
+{
+ fid_address_t *a = va_arg (*args, fid_address_t *);
+
+ switch (fid_addr_type (a))
+ {
+ case FID_ADDR_IP_PREF:
+ return format (s, "%U", format_ip_prefix, &fid_addr_ippref (a));
+
+ case FID_ADDR_MAC:
+ return format (s, "%U", format_mac_address, &fid_addr_mac (a));
+
+ default:
+ clib_warning ("Can't format fid address type %d!", fid_addr_type (a));
+ return 0;
+ }
+ return 0;
+}
+
+u8 *
+format_gid_address (u8 * s, va_list * args)
+{
+ gid_address_t *a = va_arg (*args, gid_address_t *);
+ u8 type = gid_address_type (a);
+ switch (type)
+ {
+ case GID_ADDR_IP_PREFIX:
+ return format (s, "[%d] %U", gid_address_vni (a), format_ip_prefix,
+ &gid_address_ippref (a));
+ case GID_ADDR_SRC_DST:
+ return format (s, "[%d] %U|%U", gid_address_vni (a),
+ format_fid_address, &gid_address_sd_src (a),
+ format_fid_address, &gid_address_sd_dst (a));
+ case GID_ADDR_MAC:
+ return format (s, "[%d] %U", gid_address_vni (a), format_mac_address,
+ &gid_address_mac (a));
+ default:
+ clib_warning ("Can't format gid type %d", type);
+ return 0;
+ }
+ return 0;
+}
+
+uword
+unformat_fid_address (unformat_input_t * i, va_list * args)
+{
+ fid_address_t *a = va_arg (*args, fid_address_t *);
+ ip_prefix_t ippref;
+ u8 mac[6] = { 0 };
+
+ if (unformat (i, "%U", unformat_ip_prefix, &ippref))
+ {
+ fid_addr_type (a) = FID_ADDR_IP_PREF;
+ ip_prefix_copy (&fid_addr_ippref (a), &ippref);
+ }
+ else if (unformat (i, "%U", unformat_mac_address, mac))
+ {
+ fid_addr_type (a) = FID_ADDR_MAC;
+ mac_copy (fid_addr_mac (a), mac);
+ }
+ else
+ return 0;
+
+ return 1;
+}
+
+uword
+unformat_hmac_key_id (unformat_input_t * input, va_list * args)
+{
+ u32 *key_id = va_arg (*args, u32 *);
+ u8 *s = 0;
+
+ if (unformat (input, "%s", &s))
+ {
+ if (!strcmp ((char *) s, "sha1"))
+ key_id[0] = HMAC_SHA_1_96;
+ else if (!strcmp ((char *) s, "sha256"))
+ key_id[0] = HMAC_SHA_256_128;
+ else
+ {
+ clib_warning ("invalid key_id: '%s'", s);
+ key_id[0] = HMAC_NO_KEY;
+ }
+ }
+ else
+ return 0;
+
+ vec_free (s);
+ return 1;
+}
+
+uword
+unformat_gid_address (unformat_input_t * input, va_list * args)
+{
+ gid_address_t *a = va_arg (*args, gid_address_t *);
+ u8 mac[6] = { 0 };
+ ip_prefix_t ippref;
+ fid_address_t sim1, sim2;
+
+ memset (&ippref, 0, sizeof (ippref));
+ memset (&sim1, 0, sizeof (sim1));
+ memset (&sim2, 0, sizeof (sim2));
+
+ if (unformat (input, "%U|%U", unformat_fid_address, &sim1,
+ unformat_fid_address, &sim2))
+ {
+ gid_address_sd_src (a) = sim1;
+ gid_address_sd_dst (a) = sim2;
+ gid_address_type (a) = GID_ADDR_SRC_DST;
+ }
+ else if (unformat (input, "%U", unformat_ip_prefix, &ippref))
+ {
+ ip_prefix_copy (&gid_address_ippref (a), &ippref);
+ gid_address_type (a) = GID_ADDR_IP_PREFIX;
+ }
+ else if (unformat (input, "%U", unformat_mac_address, mac))
+ {
+ mac_copy (gid_address_mac (a), mac);
+ gid_address_type (a) = GID_ADDR_MAC;
+ }
+ else
+ return 0;
+
+ return 1;
+}
+
+uword
+unformat_negative_mapping_action (unformat_input_t * input, va_list * args)
+{
+ u32 *action = va_arg (*args, u32 *);
+ u8 *s = 0;
+
+ if (unformat (input, "%s", &s))
+ {
+ if (!strcmp ((char *) s, "no-action"))
+ action[0] = LISP_NO_ACTION;
+ else if (!strcmp ((char *) s, "natively-forward"))
+ action[0] = LISP_FORWARD_NATIVE;
+ else if (!strcmp ((char *) s, "send-map-request"))
+ action[0] = LISP_SEND_MAP_REQUEST;
+ else if (!strcmp ((char *) s, "drop"))
+ action[0] = LISP_DROP;
+ else
+ {
+ clib_warning ("invalid action: '%s'", s);
+ action[0] = LISP_DROP;
+ }
+ }
+ else
+ return 0;
+
+ vec_free (s);
+ return 1;
+}
+
+u8 *
+format_hmac_key_id (u8 * s, va_list * args)
+{
+ lisp_key_type_t key_id = va_arg (*args, lisp_key_type_t);
+
+ switch (key_id)
+ {
+ case HMAC_SHA_1_96:
+ return format (0, "sha1");
+ case HMAC_SHA_256_128:
+ return format (0, "sha256");
+ default:
+ return 0;
+ }
+
+ return 0;
+}
+
+u8 *
+format_negative_mapping_action (u8 * s, va_list * args)
+{
+ lisp_action_e action = va_arg (*args, lisp_action_e);
+
+ switch (action)
+ {
+ case LISP_NO_ACTION:
+ s = format (s, "no-action");
+ break;
+ case LISP_FORWARD_NATIVE:
+ s = format (s, "natively-forward");
+ break;
+ case LISP_SEND_MAP_REQUEST:
+ s = format (s, "send-map-request");
+ break;
+ case LISP_DROP:
+ default:
+ s = format (s, "drop");
+ break;
+ }
+ return (s);
+}
+
+u16
+ip_address_size (const ip_address_t * a)
+{
+ switch (ip_addr_version (a))
+ {
+ case IP4:
+ return sizeof (ip4_address_t);
+ break;
+ case IP6:
+ return sizeof (ip6_address_t);
+ break;
+ }
+ return 0;
+}
+
+u16
+ip_version_to_size (u8 ver)
+{
+ switch (ver)
+ {
+ case IP4:
+ return sizeof (ip4_address_t);
+ break;
+ case IP6:
+ return sizeof (ip6_address_t);
+ break;
+ }
+ return 0;
+}
+
+u8
+ip_version_to_max_plen (u8 ver)
+{
+ switch (ver)
+ {
+ case IP4:
+ return 32;
+ break;
+ case IP6:
+ return 128;
+ break;
+ }
+ return 0;
+}
+
+always_inline lisp_afi_e
+ip_version_to_iana_afi (u16 version)
+{
+ switch (version)
+ {
+ case IP4:
+ return LISP_AFI_IP;
+ case IP6:
+ return LISP_AFI_IP6;
+ default:
+ return 0;
+ }
+ return 0;
+}
+
+always_inline u8
+ip_iana_afi_to_version (lisp_afi_e afi)
+{
+ switch (afi)
+ {
+ case LISP_AFI_IP:
+ return IP4;
+ case LISP_AFI_IP6:
+ return IP6;
+ default:
+ return 0;
+ }
+ return 0;
+}
+
+u16
+ip_address_size_to_write (ip_address_t * a)
+{
+ return ip_address_size (a) + sizeof (u16);
+}
+
+u16
+ip_address_iana_afi (ip_address_t * a)
+{
+ return ip_version_to_iana_afi (ip_addr_version (a));
+}
+
+u8
+ip_address_max_len (u8 version)
+{
+ return version == IP4 ? 32 : 128;
+}
+
+u16
+ip4_address_size_to_put ()
+{
+ // return sizeof(u16) + sizeof (ip4_address_t);
+ return 6;
+}
+
+u16
+ip6_address_size_to_put ()
+{
+ //return sizeof(u16) + sizeof (ip6_address_t);
+ return 18;
+}
+
+u32
+ip4_address_put (u8 * b, ip4_address_t * a)
+{
+ *(u16 *) b = clib_host_to_net_u16 (ip_version_to_iana_afi (IP4));
+ u8 *p = b + sizeof (u16);
+ clib_memcpy (p, a, sizeof (*a));
+ return ip4_address_size_to_put ();
+}
+
+u32
+ip6_address_put (u8 * b, ip6_address_t * a)
+{
+ *(u16 *) b = clib_host_to_net_u16 (ip_version_to_iana_afi (IP6));
+ u8 *p = b + sizeof (u16);
+ clib_memcpy (p, a, sizeof (*a));
+ return ip6_address_size_to_put ();
+}
+
+u32
+ip_address_put (u8 * b, ip_address_t * a)
+{
+ u32 len = ip_address_size (a);
+ *(u16 *) b = clib_host_to_net_u16 (ip_address_iana_afi (a));
+ u8 *p = b + sizeof (u16);
+ clib_memcpy (p, &ip_addr_addr (a), len);
+ return (len + sizeof (u16));
+}
+
+u32
+ip_address_parse (void *offset, u16 iana_afi, ip_address_t * dst)
+{
+ ip_addr_version (dst) = ip_iana_afi_to_version (iana_afi);
+ u8 size = ip_version_to_size (ip_addr_version (dst));
+ clib_memcpy (&ip_addr_addr (dst), offset + sizeof (u16), size);
+ return (sizeof (u16) + size);
+}
+
+u32
+lcaf_hdr_parse (void *offset, lcaf_t * lcaf)
+{
+ lcaf_hdr_t *lh = offset;
+ lcaf->type = lh->type;
+
+ /* this is a bit of hack: since the LCAF Instance ID is the
+ only message that uses reserved2 field, we can set it here.
+ If any LCAF format starts using reserved2 field as well this needs
+ to be moved elsewhere */
+ lcaf_vni_len (lcaf) = lh->reserved2;
+
+ return sizeof (lh[0]);
+}
+
+static u8
+iana_afi_to_fid_addr_type (u16 type)
+{
+ switch (type)
+ {
+ case LISP_AFI_IP:
+ case LISP_AFI_IP6:
+ return FID_ADDR_IP_PREF;
+
+ case LISP_AFI_MAC:
+ return FID_ADDR_MAC;
+ }
+ return ~0;
+}
+
+static u16
+fid_addr_parse (u8 * p, fid_address_t * a)
+{
+ u16 afi = clib_net_to_host_u16 (*(u16 *) p);
+ fid_addr_type (a) = iana_afi_to_fid_addr_type (afi);
+ ip_address_t *ip_addr = &ip_prefix_addr (&fid_addr_ippref (a));
+
+ switch (fid_addr_type (a))
+ {
+ case FID_ADDR_MAC:
+ return mac_parse (p, fid_addr_mac (a));
+
+ case FID_ADDR_IP_PREF:
+ return ip_address_parse (p, afi, ip_addr);
+ }
+ return ~0;
+}
+
+u16
+sd_parse (u8 * p, void *a)
+{
+ lcaf_src_dst_hdr_t *sd_hdr;
+ gid_address_t *g = a;
+ u16 size = 0;
+ fid_address_t *src = &gid_address_sd_src (g);
+ fid_address_t *dst = &gid_address_sd_dst (g);
+
+ gid_address_type (g) = GID_ADDR_SRC_DST;
+
+ sd_hdr = (lcaf_src_dst_hdr_t *) (p + size);
+ size += sizeof (sd_hdr[0]);
+
+ size += fid_addr_parse (p + size, src);
+ size += fid_addr_parse (p + size, dst);
+
+ if (fid_addr_type (src) == FID_ADDR_IP_PREF)
+ {
+ ip_prefix_t *ippref = &fid_addr_ippref (src);
+ ip_prefix_len (ippref) = LCAF_SD_SRC_ML (sd_hdr);
+ }
+ if (fid_addr_type (dst) == FID_ADDR_IP_PREF)
+ {
+ ip_prefix_t *ippref = &fid_addr_ippref (dst);
+ ip_prefix_len (ippref) = LCAF_SD_DST_ML (sd_hdr);
+ }
+ return size;
+}
+
+u16
+try_parse_src_dst_lcaf (u8 * p, gid_address_t * a)
+{
+ lcaf_t lcaf;
+ u16 size = sizeof (u16); /* skip AFI */
+
+ size += lcaf_hdr_parse (p + size, &lcaf);
+
+ if (LCAF_SOURCE_DEST != lcaf_type (&lcaf))
+ return ~0;
+
+ size += sd_parse (p + size, a);
+ return size;
+}
+
+u16
+vni_parse (u8 * p, void *a)
+{
+ lcaf_t *lcaf = a;
+ gid_address_t *g = a;
+ u16 size = 0;
+
+ gid_address_vni (g) = clib_net_to_host_u32 (*(u32 *) p);
+ size += sizeof (u32);
+ gid_address_vni_mask (g) = lcaf_vni_len (lcaf);
+
+ /* nested LCAFs are not supported except of src/dst with vni - to handle
+ * such case look at the next AFI and process src/dest LCAF separately */
+ u16 afi = clib_net_to_host_u16 (*((u16 *) (p + size)));
+ if (LISP_AFI_LCAF == afi)
+ {
+ u16 len = try_parse_src_dst_lcaf (p + size, g);
+ if ((u16) ~ 0 == len)
+ return ~0;
+ size += len;
+ }
+ else
+ size += gid_address_parse (p + size, g);
+
+ return size;
+}
+
+u16
+no_addr_parse (u8 * p, void *a)
+{
+ /* do nothing */
+ return 0;
+}
+
+u32
+lcaf_parse (void *offset, gid_address_t * addr)
+{
+ /* skip AFI type */
+ offset += sizeof (u16);
+ lcaf_t *lcaf = &gid_address_lcaf (addr);
+
+ u32 size = lcaf_hdr_parse (offset, lcaf);
+ u8 type = lcaf_type (lcaf);
+
+ if (!lcaf_parse_fcts[type])
+ {
+ clib_warning ("Unsupported LCAF type: %u", type);
+ return ~0;
+ }
+ size += (*lcaf_parse_fcts[type]) (offset + size, lcaf);
+ return sizeof (u16) + size;
+}
+
+void
+vni_free (void *a)
+{
+ vni_t *v = a;
+ gid_address_free (vni_gid (v));
+ clib_mem_free (vni_gid (v));
+}
+
+void
+no_addr_free (void *a)
+{
+ /* nothing to do */
+}
+
+void
+sd_free (void *a)
+{
+ /* nothing */
+}
+
+void
+gid_address_free (gid_address_t * a)
+{
+ if (gid_address_type (a) != GID_ADDR_LCAF)
+ return;
+
+ lcaf_t *lcaf = &gid_address_lcaf (a);
+ u8 lcaf_type = lcaf_type (lcaf);
+ (*lcaf_free_fcts[lcaf_type]) (lcaf);
+}
+
+void
+gid_address_from_ip (gid_address_t * g, ip_address_t * ip)
+{
+ memset (g, 0, sizeof (g[0]));
+ ip_address_set (&gid_address_ip (g), ip, ip_addr_version (ip));
+ gid_address_ippref_len (g) = 32;
+}
+
+int
+ip_address_cmp (const ip_address_t * ip1, const ip_address_t * ip2)
+{
+ int res = 0;
+ if (ip_addr_version (ip1) != ip_addr_version (ip2))
+ return -1;
+ res =
+ memcmp (&ip_addr_addr (ip1), &ip_addr_addr (ip2), ip_address_size (ip1));
+
+ if (res < 0)
+ res = 2;
+ else if (res > 0)
+ res = 1;
+
+ return res;
+}
+
+void
+ip_address_copy (ip_address_t * dst, const ip_address_t * src)
+{
+ if (IP4 == ip_addr_version (src))
+ {
+ /* don't copy any garbe from the union */
+ memset (dst, 0, sizeof (*dst));
+ dst->ip.v4 = src->ip.v4;
+ dst->version = IP4;
+ }
+ else
+ {
+ clib_memcpy (dst, src, sizeof (ip_address_t));
+ }
+}
+
+void
+ip_address_copy_addr (void *dst, const ip_address_t * src)
+{
+ clib_memcpy (dst, src, ip_address_size (src));
+}
+
+void
+ip_address_set (ip_address_t * dst, const void *src, u8 version)
+{
+ clib_memcpy (dst, src, ip_version_to_size (version));
+ ip_addr_version (dst) = version;
+}
+
+void
+ip_address_to_46 (const ip_address_t * addr,
+ ip46_address_t * a, fib_protocol_t * proto)
+{
+ *proto = (IP4 == ip_addr_version (addr) ?
+ FIB_PROTOCOL_IP4 : FIB_PROTOCOL_IP6);
+ switch (*proto)
+ {
+ case FIB_PROTOCOL_IP4:
+ ip46_address_set_ip4 (a, &addr->ip.v4);
+ break;
+ case FIB_PROTOCOL_IP6:
+ a->ip6 = addr->ip.v6;
+ break;
+ default:
+ ASSERT (0);
+ break;
+ }
+}
+
+static void
+ip_prefix_normalize_ip4 (ip4_address_t * ip4, u8 preflen)
+{
+ u32 mask = ~0;
+
+ ASSERT (ip4);
+
+ if (32 <= preflen)
+ {
+ return;
+ }
+
+ mask = pow2_mask (preflen) << (32 - preflen);
+ mask = clib_host_to_net_u32 (mask);
+ ip4->data_u32 &= mask;
+}
+
+static void
+ip_prefix_normalize_ip6 (ip6_address_t * ip6, u8 preflen)
+{
+ u8 mask_6[16];
+ u32 *m;
+ u8 j, i0, i1;
+
+ ASSERT (ip6);
+
+ memset (mask_6, 0, sizeof (mask_6));
+
+ if (128 <= preflen)
+ {
+ return;
+ }
+
+ i1 = preflen % 32;
+ i0 = preflen / 32;
+ m = (u32 *) & mask_6[0];
+
+ for (j = 0; j < i0; j++)
+ {
+ m[j] = ~0;
+ }
+
+ if (i1)
+ {
+ m[i0] = clib_host_to_net_u32 (pow2_mask (i1) << (32 - i1));
+ }
+
+ for (j = 0; j < sizeof (mask_6); j++)
+ {
+ ip6->as_u8[j] &= mask_6[j];
+ }
+}
+
+void
+ip_prefix_normalize (ip_prefix_t * a)
+{
+ u8 preflen = ip_prefix_len (a);
+
+ switch (ip_prefix_version (a))
+ {
+ case IP4:
+ ip_prefix_normalize_ip4 (&ip_prefix_v4 (a), preflen);
+ break;
+
+ case IP6:
+ ip_prefix_normalize_ip6 (&ip_prefix_v6 (a), preflen);
+ break;
+
+ default:
+ ASSERT (0);
+ }
+}
+
+void *
+ip_prefix_cast (gid_address_t * a)
+{
+ return &gid_address_ippref (a);
+}
+
+u16
+ip_prefix_size_to_write (void *pref)
+{
+ ip_prefix_t *a = (ip_prefix_t *) pref;
+ return ip_address_size_to_write (&ip_prefix_addr (a));
+}
+
+u16
+ip_prefix_write (u8 * p, void *gid)
+{
+ gid_address_t *g = gid;
+ ip_prefix_t *a = &gid_address_ippref (g);
+
+ switch (ip_prefix_version (a))
+ {
+ case IP4:
+ return ip4_address_put (p, &ip_prefix_v4 (a));
+ break;
+ case IP6:
+ return ip6_address_put (p, &ip_prefix_v6 (a));
+ break;
+ }
+ return 0;
+}
+
+u8
+ip_prefix_length (void *a)
+{
+ return ip_prefix_len ((ip_prefix_t *) a);
+}
+
+void
+ip_prefix_copy (void *dst, void *src)
+{
+ clib_memcpy (dst, src, sizeof (ip_prefix_t));
+}
+
+void
+mac_copy (void *dst, void *src)
+{
+ clib_memcpy (dst, src, 6);
+}
+
+void
+sd_copy (void *dst, void *src)
+{
+ clib_memcpy (dst, src, sizeof (source_dest_t));
+}
+
+int
+ip_prefix_cmp (ip_prefix_t * p1, ip_prefix_t * p2)
+{
+ int cmp = 0;
+
+ ip_prefix_normalize (p1);
+ ip_prefix_normalize (p2);
+
+ cmp = ip_address_cmp (&ip_prefix_addr (p1), &ip_prefix_addr (p2));
+ if (cmp == 0)
+ {
+ if (ip_prefix_len (p1) < ip_prefix_len (p2))
+ {
+ cmp = 1;
+ }
+ else
+ {
+ if (ip_prefix_len (p1) > ip_prefix_len (p2))
+ cmp = 2;
+ }
+ }
+ return cmp;
+}
+
+void
+no_addr_copy (void *dst, void *src)
+{
+ /* nothing to do */
+}
+
+void
+vni_copy (void *dst, void *src)
+{
+ vni_t *vd = dst;
+ vni_t *vs = src;
+
+ clib_memcpy (vd, vs, sizeof (vd[0]));
+ vni_gid (vd) = clib_mem_alloc (sizeof (gid_address_t));
+ gid_address_copy (vni_gid (vd), vni_gid (vs));
+}
+
+void
+lcaf_copy (void *dst, void *src)
+{
+ lcaf_t *lcaf_dst = dst;
+ lcaf_t *lcaf_src = src;
+
+ lcaf_type (lcaf_dst) = lcaf_type (lcaf_src);
+ (*lcaf_copy_fcts[lcaf_type (lcaf_src)]) (dst, src);
+}
+
+u8
+lcaf_length (void *a)
+{
+ return 0;
+}
+
+u8
+mac_length (void *a)
+{
+ return 0;
+}
+
+u8
+sd_length (void *a)
+{
+ return 0;
+}
+
+void *
+lcaf_cast (gid_address_t * a)
+{
+ return &gid_address_lcaf (a);
+}
+
+void *
+mac_cast (gid_address_t * a)
+{
+ return &gid_address_mac (a);
+}
+
+void *
+sd_cast (gid_address_t * a)
+{
+ return &gid_address_sd (a);
+}
+
+u8
+no_addr_length (void *a)
+{
+ return 0;
+}
+
+u8
+vni_length (void *a)
+{
+ vni_t *v = a;
+ return (sizeof (u32) /* VNI size */
+ + gid_address_size_to_put (vni_gid (v)) /* vni body size */ );
+}
+
+u16
+lcaf_write (u8 * p, void *a)
+{
+ u16 size = 0, len;
+ lcaf_t *lcaf = a;
+ u8 type = lcaf_type (lcaf);
+ lcaf_hdr_t _h, *h = &_h;
+
+ *(u16 *) p = clib_host_to_net_u16 (LISP_AFI_LCAF);
+ size += sizeof (u16);
+ memset (h, 0, sizeof (h[0]));
+ LCAF_TYPE (h) = type;
+ u16 lcaf_len = (*lcaf_body_length_fcts[type]) (lcaf);
+ LCAF_LENGTH (h) = clib_host_to_net_u16 (lcaf_len);
+
+ clib_memcpy (p + size, h, sizeof (h[0]));
+ size += sizeof (h[0]);
+ len = (*lcaf_write_fcts[type]) (p + size, lcaf);
+
+ if ((u16) ~ 0 == len)
+ return ~0;
+
+ return size + len;
+}
+
+u16
+mac_write (u8 * p, void *a)
+{
+ *(u16 *) p = clib_host_to_net_u16 (LISP_AFI_MAC);
+ clib_memcpy (p + sizeof (u16), a, 6);
+ return mac_size_to_write (a);
+}
+
+static u16
+fid_addr_write (u8 * p, fid_address_t * a)
+{
+ switch (fid_addr_type (a))
+ {
+ case FID_ADDR_IP_PREF:
+ return ip_prefix_write (p, &fid_addr_ippref (a));
+
+ case FID_ADDR_MAC:
+ return mac_write (p, &fid_addr_mac (a));
+
+ default:
+ return ~0;
+ }
+ return ~0;
+}
+
+static u8
+fid_address_length (fid_address_t * a)
+{
+ switch (fid_addr_type (a))
+ {
+ case FID_ADDR_IP_PREF:
+ return ip_prefix_length (&fid_addr_ippref (a));
+ case FID_ADDR_MAC:
+ return 0;
+ }
+ return 0;
+}
+
+u16
+sd_write (u8 * p, void *a)
+{
+ source_dest_t *sd = a;
+ u16 size = 0;
+ lcaf_hdr_t _h, *h = &_h;
+ lcaf_src_dst_hdr_t sd_hdr;
+
+ *(u16 *) p = clib_host_to_net_u16 (LISP_AFI_LCAF);
+ size += sizeof (u16);
+ memset (h, 0, sizeof (h[0]));
+ LCAF_TYPE (h) = LCAF_SOURCE_DEST;
+ u16 lcaf_len = sizeof (lcaf_src_dst_hdr_t)
+ + fid_addr_size_to_write (&sd_src (sd))
+ + fid_addr_size_to_write (&sd_dst (sd));
+ LCAF_LENGTH (h) = clib_host_to_net_u16 (lcaf_len);
+
+ clib_memcpy (p + size, h, sizeof (h[0]));
+ size += sizeof (h[0]);
+
+ memset (&sd_hdr, 0, sizeof (sd_hdr));
+ LCAF_SD_SRC_ML (&sd_hdr) = fid_address_length (&sd_src (sd));
+ LCAF_SD_DST_ML (&sd_hdr) = fid_address_length (&sd_dst (sd));
+ clib_memcpy (p + size, &sd_hdr, sizeof (sd_hdr));
+ size += sizeof (sd_hdr);
+
+ u16 len = fid_addr_write (p + size, &sd_src (sd));
+ if ((u16) ~ 0 == len)
+ return ~0;
+ size += len;
+
+ len = fid_addr_write (p + size, &sd_dst (sd));
+ if ((u16) ~ 0 == len)
+ return ~0;
+ size += len;
+
+ return size;
+}
+
+u16
+vni_write (u8 * p, void *a)
+{
+ lcaf_hdr_t _h, *h = &_h;
+ gid_address_t *g = a;
+ u16 size = 0, len;
+
+ /* put lcaf header */
+ *(u16 *) p = clib_host_to_net_u16 (LISP_AFI_LCAF);
+ size += sizeof (u16);
+ memset (h, 0, sizeof (h[0]));
+ LCAF_TYPE (h) = LCAF_INSTANCE_ID;
+ u16 lcaf_len = sizeof (u32) /* Instance ID size */
+ + gid_address_size_to_put_no_vni (g);
+ LCAF_LENGTH (h) = clib_host_to_net_u16 (lcaf_len);
+ LCAF_RES2 (h) = gid_address_vni_mask (g);
+
+ /* put vni header */
+ clib_memcpy (p + size, h, sizeof (h[0]));
+ size += sizeof (h[0]);
+
+ u32 *afip = (u32 *) (p + size);
+ afip[0] = clib_host_to_net_u32 (gid_address_vni (g));
+ size += sizeof (u32);
+
+ if (GID_ADDR_SRC_DST == gid_address_type (g))
+ /* write src/dst LCAF */
+ {
+ len = sd_write (p + size, g);
+ if ((u16) ~ 0 == len)
+ return ~0;
+ }
+ else
+ /* write the actual address */
+ len = gid_address_put_no_vni (p + size, g);
+
+ if ((u16) ~ 0 == len)
+ return ~0;
+
+ return size + len;
+}
+
+u16
+no_addr_write (u8 * p, void *a)
+{
+ /* do nothing; return AFI field size */
+ return sizeof (u16);
+}
+
+u16
+no_addr_size_to_write (void *a)
+{
+ return sizeof (u16); /* AFI field length */
+}
+
+static u16
+fid_addr_size_to_write (fid_address_t * a)
+{
+ switch (fid_addr_type (a))
+ {
+ case FID_ADDR_IP_PREF:
+ return ip_prefix_size_to_write (a);
+
+ case FID_ADDR_MAC:
+ return mac_size_to_write (a);
+
+ default:
+ break;
+ }
+ return 0;
+}
+
+u16
+vni_size_to_write (void *a)
+{
+ gid_address_t *g = a;
+
+ u16 lcaf_size = sizeof (u32) + sizeof (u16) /* LCAF AFI field size */
+ + sizeof (lcaf_hdr_t);
+
+ if (gid_address_type (g) == GID_ADDR_SRC_DST)
+ /* special case where nested LCAF is supported */
+ return lcaf_size + sd_size_to_write (g);
+ else
+ return lcaf_size + gid_address_size_to_put_no_vni (g);
+}
+
+u16
+lcaf_size_to_write (void *a)
+{
+ lcaf_t *lcaf = (lcaf_t *) a;
+ u32 size = 0, len;
+ u8 type = lcaf_type (lcaf);
+
+ size += sizeof (u16); /* AFI size */
+
+ len = (*lcaf_size_to_write_fcts[type]) (lcaf);
+ if (~0 == len)
+ return ~0;
+
+ return size + len;
+}
+
+u16
+sd_size_to_write (void *a)
+{
+ source_dest_t *sd = a;
+ return sizeof (u16)
+ + sizeof (lcaf_hdr_t)
+ + sizeof (lcaf_src_dst_hdr_t)
+ + fid_addr_size_to_write (&sd_src (sd))
+ + fid_addr_size_to_write (&sd_dst (sd));
+}
+
+u16
+mac_size_to_write (void *a)
+{
+ return sizeof (u16) + 6;
+}
+
+u8
+gid_address_len (gid_address_t * a)
+{
+ gid_address_type_t type = gid_address_type (a);
+ return (*addr_len_fcts[type]) ((*cast_fcts[type]) (a));
+}
+
+static u16
+gid_address_put_no_vni (u8 * b, gid_address_t * gid)
+{
+ gid_address_type_t type = gid_address_type (gid);
+ return (*write_fcts[type]) (b, (*cast_fcts[type]) (gid));
+}
+
+u16
+gid_address_put (u8 * b, gid_address_t * gid)
+{
+ if (0 != gid_address_vni (gid))
+ return vni_write (b, gid);
+
+ return gid_address_put_no_vni (b, gid);
+}
+
+static u16
+gid_address_size_to_put_no_vni (gid_address_t * gid)
+{
+ gid_address_type_t type = gid_address_type (gid);
+ return (*size_to_write_fcts[type]) ((*cast_fcts[type]) (gid));
+}
+
+u16
+gid_address_size_to_put (gid_address_t * gid)
+{
+ if (0 != gid_address_vni (gid))
+ return vni_size_to_write (gid);
+
+ return gid_address_size_to_put_no_vni (gid);
+}
+
+void *
+gid_address_cast (gid_address_t * gid, gid_address_type_t type)
+{
+ return (*cast_fcts[type]) (gid);
+}
+
+void
+gid_address_copy (gid_address_t * dst, gid_address_t * src)
+{
+ gid_address_type_t type = gid_address_type (src);
+ (*copy_fcts[type]) ((*cast_fcts[type]) (dst), (*cast_fcts[type]) (src));
+ gid_address_type (dst) = type;
+ gid_address_vni (dst) = gid_address_vni (src);
+ gid_address_vni_mask (dst) = gid_address_vni_mask (src);
+}
+
+u32
+mac_parse (u8 * offset, u8 * a)
+{
+ /* skip AFI field */
+ offset += sizeof (u16);
+
+ clib_memcpy (a, offset, 6);
+ return sizeof (u16) + 6;
+}
+
+u32
+gid_address_parse (u8 * offset, gid_address_t * a)
+{
+ lisp_afi_e afi;
+ int len = 0;
+
+ if (!a)
+ return 0;
+
+ /* NOTE: since gid_address_parse may be called by vni_parse, we can't 0
+ * the gid address here */
+ afi = clib_net_to_host_u16 (*((u16 *) offset));
+
+ switch (afi)
+ {
+ case LISP_AFI_NO_ADDR:
+ len = sizeof (u16);
+ gid_address_type (a) = GID_ADDR_NO_ADDRESS;
+ break;
+ case LISP_AFI_IP:
+ len = ip_address_parse (offset, afi, &gid_address_ip (a));
+ gid_address_type (a) = GID_ADDR_IP_PREFIX;
+ /* this should be modified outside if needed */
+ gid_address_ippref_len (a) = 32;
+ break;
+ case LISP_AFI_IP6:
+ len = ip_address_parse (offset, afi, &gid_address_ip (a));
+ gid_address_type (a) = GID_ADDR_IP_PREFIX;
+ /* this should be modified outside if needed */
+ gid_address_ippref_len (a) = 128;
+ break;
+ case LISP_AFI_LCAF:
+ gid_address_type (a) = GID_ADDR_LCAF;
+ len = lcaf_parse (offset, a);
+ break;
+ case LISP_AFI_MAC:
+ len = mac_parse (offset, gid_address_mac (a));
+ gid_address_type (a) = GID_ADDR_MAC;
+ break;
+ default:
+ clib_warning ("LISP AFI %d not supported!", afi);
+ return ~0;
+ }
+ return len;
+}
+
+void
+gid_address_ip_set (gid_address_t * dst, void *src, u8 version)
+{
+ gid_address_ippref_len (dst) = ip_address_max_len (version);
+ ip_address_set (&gid_address_ip (dst), src, version);
+}
+
+int
+no_addr_cmp (void *a1, void *a2)
+{
+ return 0;
+}
+
+int
+vni_cmp (void *a1, void *a2)
+{
+ vni_t *v1 = a1;
+ vni_t *v2 = a2;
+
+ if (vni_mask_len (v1) != vni_mask_len (v2))
+ return -1;
+ if (vni_vni (v1) != vni_vni (v2))
+ return -1;
+ return gid_address_cmp (vni_gid (v1), vni_gid (v2));
+}
+
+static int
+mac_cmp (void *a1, void *a2)
+{
+ return memcmp (a1, a2, 6);
+}
+
+static int
+fid_addr_cmp (fid_address_t * a1, fid_address_t * a2)
+{
+ if (fid_addr_type (a1) != fid_addr_type (a2))
+ return -1;
+
+ switch (fid_addr_type (a1))
+ {
+ case FID_ADDR_IP_PREF:
+ return ip_prefix_cmp (&fid_addr_ippref (a1), &fid_addr_ippref (a2));
+
+ case FID_ADDR_MAC:
+ return mac_cmp (fid_addr_mac (a1), fid_addr_mac (a2));
+
+ default:
+ return -1;
+ }
+ return -1;
+}
+
+int
+sd_cmp (void *a1, void *a2)
+{
+ source_dest_t *sd1 = a1;
+ source_dest_t *sd2 = a2;
+
+ if (fid_addr_cmp (&sd_dst (sd1), &sd_dst (sd2)))
+ return -1;
+ if (fid_addr_cmp (&sd_src (sd1), &sd_src (sd2)))
+ return -1;
+ return 0;
+}
+
+/* Compare two gid_address_t.
+ * Returns:
+ * -1: If they are from different afi
+ * 0: Both address are the same
+ * 1: Addr1 is bigger than addr2
+ * 2: Addr2 is bigger than addr1
+ */
+int
+gid_address_cmp (gid_address_t * a1, gid_address_t * a2)
+{
+ lcaf_t *lcaf1, *lcaf2;
+ int cmp = -1;
+ if (!a1 || !a2)
+ return -1;
+ if (gid_address_type (a1) != gid_address_type (a2))
+ return -1;
+ if (gid_address_vni (a1) != gid_address_vni (a2))
+ return -1;
+ if (gid_address_vni_mask (a1) != gid_address_vni_mask (a2))
+ return -1;
+
+ switch (gid_address_type (a1))
+ {
+ case GID_ADDR_NO_ADDRESS:
+ if (a1 == a2)
+ cmp = 0;
+ else
+ cmp = 2;
+ break;
+ case GID_ADDR_IP_PREFIX:
+ cmp =
+ ip_prefix_cmp (&gid_address_ippref (a1), &gid_address_ippref (a2));
+ break;
+ case GID_ADDR_LCAF:
+ lcaf1 = &gid_address_lcaf (a1);
+ lcaf2 = &gid_address_lcaf (a2);
+ if (lcaf_type (lcaf1) == lcaf_type (lcaf2))
+ cmp = (*lcaf_cmp_fcts[lcaf_type (lcaf1)]) (lcaf1, lcaf2);
+ break;
+ case GID_ADDR_MAC:
+ cmp = mac_cmp (gid_address_mac (a1), gid_address_mac (a2));
+ break;
+
+ case GID_ADDR_SRC_DST:
+ cmp = sd_cmp (&gid_address_sd (a1), &gid_address_sd (a2));
+ break;
+ default:
+ break;
+ }
+
+ return cmp;
+}
+
+u32
+locator_parse (void *b, locator_t * loc)
+{
+ locator_hdr_t *h;
+ u8 status = 1; /* locator up */
+ int len;
+
+ h = b;
+ if (!LOC_REACHABLE (h) && LOC_LOCAL (h))
+ status = 0;
+
+ len = gid_address_parse (LOC_ADDR (h), &loc->address);
+ if (len == ~0)
+ return len;
+
+ loc->state = status;
+ loc->local = 0;
+ loc->priority = LOC_PRIORITY (h);
+ loc->weight = LOC_WEIGHT (h);
+ loc->mpriority = LOC_MPRIORITY (h);
+ loc->mweight = LOC_MWEIGHT (h);
+
+ return sizeof (locator_hdr_t) + len;
+}
+
+void
+locator_copy (locator_t * dst, locator_t * src)
+{
+ /* TODO if gid become more complex, this will need to be changed! */
+ clib_memcpy (dst, src, sizeof (*dst));
+ if (!src->local)
+ gid_address_copy (&dst->address, &src->address);
+}
+
+u32
+locator_cmp (locator_t * l1, locator_t * l2)
+{
+ u32 ret = 0;
+ if ((ret = gid_address_cmp (&l1->address, &l2->address)) != 0)
+ return 1;
+
+ if (l1->priority != l2->priority)
+ return 1;
+ if (l1->weight != l2->weight)
+ return 1;
+ if (l1->mpriority != l2->mpriority)
+ return 1;
+ if (l1->mweight != l2->mweight)
+ return 1;
+ return 0;
+}
+
+void
+locator_free (locator_t * l)
+{
+ if (!l->local)
+ gid_address_free (&l->address);
+}
+
+void
+build_src_dst (gid_address_t * sd, gid_address_t * src, gid_address_t * dst)
+{
+ memset (sd, 0, sizeof (*sd));
+ gid_address_type (sd) = GID_ADDR_SRC_DST;
+ gid_address_vni (sd) = gid_address_vni (dst);
+ gid_address_vni_mask (sd) = gid_address_vni_mask (dst);
+
+ switch (gid_address_type (dst))
+ {
+ case GID_ADDR_IP_PREFIX:
+ gid_address_sd_src_type (sd) = FID_ADDR_IP_PREF;
+ gid_address_sd_dst_type (sd) = FID_ADDR_IP_PREF;
+ ip_prefix_copy (&gid_address_sd_src_ippref (sd),
+ &gid_address_ippref (src));
+ ip_prefix_copy (&gid_address_sd_dst_ippref (sd),
+ &gid_address_ippref (dst));
+ break;
+ case GID_ADDR_MAC:
+ gid_address_sd_src_type (sd) = FID_ADDR_MAC;
+ gid_address_sd_dst_type (sd) = FID_ADDR_MAC;
+ mac_copy (gid_address_sd_src_mac (sd), gid_address_mac (src));
+ mac_copy (gid_address_sd_dst_mac (sd), gid_address_mac (dst));
+ break;
+ default:
+ clib_warning ("Unsupported gid type %d while conversion!",
+ gid_address_type (dst));
+ break;
+ }
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/lisp-cp/lisp_types.h b/src/vnet/lisp-cp/lisp_types.h
new file mode 100644
index 00000000000..ac58b894c2d
--- /dev/null
+++ b/src/vnet/lisp-cp/lisp_types.h
@@ -0,0 +1,354 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef VNET_LISP_GPE_LISP_TYPES_H_
+#define VNET_LISP_GPE_LISP_TYPES_H_
+
+#include <vnet/ip/ip.h>
+#include <vnet/lisp-cp/lisp_cp_messages.h>
+
+#define SHA1_AUTH_DATA_LEN 20
+#define SHA256_AUTH_DATA_LEN 32
+
+typedef enum
+{
+ HMAC_NO_KEY = 0,
+ HMAC_SHA_1_96,
+ HMAC_SHA_256_128
+} lisp_key_type_t;
+
+uword unformat_hmac_key_id (unformat_input_t * input, va_list * args);
+u8 *format_hmac_key_id (u8 * s, va_list * args);
+
+typedef enum
+{
+ IP4,
+ IP6
+} ip_address_type_t;
+
+/* *INDENT-OFF* */
+typedef CLIB_PACKED(struct ip_address
+{
+ union
+ {
+ ip4_address_t v4;
+ ip6_address_t v6;
+ } ip;
+ u8 version;
+}) ip_address_t;
+/* *INDENT-ON* */
+
+#define ip_addr_addr(_a) (_a)->ip
+#define ip_addr_v4(_a) (_a)->ip.v4
+#define ip_addr_v6(_a) (_a)->ip.v6
+#define ip_addr_version(_a) (_a)->version
+
+int ip_address_cmp (const ip_address_t * ip1, const ip_address_t * ip2);
+void ip_address_copy (ip_address_t * dst, const ip_address_t * src);
+void ip_address_copy_addr (void *dst, const ip_address_t * src);
+void ip_address_set (ip_address_t * dst, const void *src, u8 version);
+
+/* *INDENT-OFF* */
+typedef CLIB_PACKED(struct ip_prefix
+{
+ ip_address_t addr;
+ u8 len;
+}) ip_prefix_t;
+/* *INDENT-ON* */
+
+#define ip_prefix_addr(_a) (_a)->addr
+#define ip_prefix_version(_a) ip_addr_version(&ip_prefix_addr(_a))
+#define ip_prefix_len(_a) (_a)->len
+#define ip_prefix_v4(_a) ip_addr_v4(&ip_prefix_addr(_a))
+#define ip_prefix_v6(_a) ip_addr_v6(&ip_prefix_addr(_a))
+
+void ip_prefix_normalize (ip_prefix_t * a);
+
+extern void ip_address_to_fib_prefix (const ip_address_t * addr,
+ fib_prefix_t * prefix);
+extern void ip_prefix_to_fib_prefix (const ip_prefix_t * ipp,
+ fib_prefix_t * fibp);
+
+typedef enum
+{
+ /* NOTE: ip addresses are left out on purpose. Use max masked ip-prefixes
+ * instead */
+ GID_ADDR_IP_PREFIX,
+ GID_ADDR_LCAF,
+ GID_ADDR_MAC,
+ GID_ADDR_SRC_DST,
+ GID_ADDR_NO_ADDRESS,
+ GID_ADDR_TYPES
+} gid_address_type_t;
+
+typedef enum
+{
+ /* make sure that values corresponds with RFC */
+ LCAF_NULL_BODY = 0,
+ LCAF_AFI_LIST_TYPE,
+ LCAF_INSTANCE_ID,
+ LCAF_SOURCE_DEST = 12,
+ LCAF_TYPES
+} lcaf_type_t;
+
+typedef enum fid_addr_type_t_
+{
+ FID_ADDR_IP_PREF,
+ FID_ADDR_MAC
+} __attribute__ ((packed)) fid_addr_type_t;
+
+/* flat address type */
+typedef struct
+{
+ union
+ {
+ ip_prefix_t ippref;
+ u8 mac[6];
+ };
+ fid_addr_type_t type;
+} fid_address_t;
+
+typedef fid_address_t dp_address_t;
+
+#define fid_addr_ippref(_a) (_a)->ippref
+#define fid_addr_mac(_a) (_a)->mac
+#define fid_addr_type(_a) (_a)->type
+u8 *format_fid_address (u8 * s, va_list * args);
+
+typedef struct
+{
+ fid_address_t src;
+ fid_address_t dst;
+} source_dest_t;
+
+#define sd_dst(_a) (_a)->dst
+#define sd_src(_a) (_a)->src
+#define sd_src_ippref(_a) fid_addr_ippref(&sd_src(_a))
+#define sd_dst_ippref(_a) fid_addr_ippref(&sd_dst(_a))
+#define sd_src_mac(_a) fid_addr_mac(&sd_src(_a))
+#define sd_dst_mac(_a) fid_addr_mac(&sd_dst(_a))
+#define sd_src_type(_a) fid_addr_type(&sd_src(_a))
+#define sd_dst_type(_a) fid_addr_type(&sd_dst(_a))
+
+typedef struct
+{
+ u8 vni_mask_len;
+ u32 vni;
+ struct _gid_address_t *gid_addr;
+} vni_t;
+
+#define vni_vni(_a) (_a)->vni
+#define vni_mask_len(_a) (_a)->vni_mask_len
+#define vni_gid(_a) (_a)->gid_addr
+
+typedef struct
+{
+ /* the union needs to be at the beginning! */
+ union
+ {
+ source_dest_t sd;
+ vni_t uni;
+ };
+ u8 type;
+} lcaf_t;
+
+#define lcaf_type(_a) (_a)->type
+#define lcaf_vni(_a) vni_vni(& (_a)->uni)
+#define lcaf_vni_len(_a) vni_mask_len(& (_a)->uni)
+
+/* might want to expand this in the future :) */
+typedef struct _gid_address_t
+{
+ union
+ {
+ ip_prefix_t ippref;
+ lcaf_t lcaf;
+ u8 mac[6];
+ source_dest_t sd;
+ };
+ u8 type;
+ u32 vni;
+ u8 vni_mask;
+} gid_address_t;
+
+u8 *format_ip_address (u8 * s, va_list * args);
+uword unformat_ip_address (unformat_input_t * input, va_list * args);
+u8 *format_ip_prefix (u8 * s, va_list * args);
+uword unformat_ip_prefix (unformat_input_t * input, va_list * args);
+u8 *format_mac_address (u8 * s, va_list * args);
+uword unformat_mac_address (unformat_input_t * input, va_list * args);
+
+u16 ip4_address_size_to_put ();
+u16 ip6_address_size_to_put ();
+u32 ip4_address_put (u8 * b, ip4_address_t * a);
+u32 ip6_address_put (u8 * b, ip6_address_t * a);
+
+u16 ip_address_size_to_write (ip_address_t * a);
+u16 ip_address_iana_afi (ip_address_t * a);
+u8 ip_address_max_len (u8 ver);
+u32 ip_address_put (u8 * b, ip_address_t * a);
+void ip_address_to_46 (const ip_address_t * addr,
+ ip46_address_t * a, fib_protocol_t * proto);
+
+/* LISP AFI codes */
+typedef enum
+{
+ LISP_AFI_NO_ADDR,
+ LISP_AFI_IP,
+ LISP_AFI_IP6,
+ LISP_AFI_LCAF = 16387,
+ LISP_AFI_MAC = 16389
+} lisp_afi_e;
+
+u8 *format_gid_address (u8 * s, va_list * args);
+uword unformat_gid_address (unformat_input_t * input, va_list * args);
+int gid_address_cmp (gid_address_t * a1, gid_address_t * a2);
+void gid_address_free (gid_address_t * a);
+
+u16 gid_address_size_to_put (gid_address_t * a);
+u16 gid_address_put (u8 * b, gid_address_t * gid);
+u8 gid_address_len (gid_address_t * a);
+void *gid_address_cast (gid_address_t * gid, gid_address_type_t type);
+void gid_address_copy (gid_address_t * dst, gid_address_t * src);
+u32 gid_address_parse (u8 * offset, gid_address_t * a);
+void gid_address_ip_set (gid_address_t * dst, void *src, u8 version);
+
+#define gid_address_type(_a) (_a)->type
+#define gid_address_ippref(_a) (_a)->ippref
+#define gid_address_ippref_len(_a) (_a)->ippref.len
+#define gid_address_ip(_a) ip_prefix_addr(&gid_address_ippref(_a))
+#define gid_address_ip_version(_a) ip_addr_version(&gid_address_ip(_a))
+#define gid_address_lcaf(_a) (_a)->lcaf
+#define gid_address_mac(_a) (_a)->mac
+#define gid_address_vni(_a) (_a)->vni
+#define gid_address_vni_mask(_a) (_a)->vni_mask
+#define gid_address_sd_dst_ippref(_a) sd_dst_ippref(&(_a)->sd)
+#define gid_address_sd_src_ippref(_a) sd_src_ippref(&(_a)->sd)
+#define gid_address_sd_dst_mac(_a) sd_dst_mac(&(_a)->sd)
+#define gid_address_sd_src_mac(_a) sd_src_mac(&(_a)->sd)
+#define gid_address_sd(_a) (_a)->sd
+#define gid_address_sd_src(_a) sd_src(&gid_address_sd(_a))
+#define gid_address_sd_dst(_a) sd_dst(&gid_address_sd(_a))
+#define gid_address_sd_src_type(_a) sd_src_type(&gid_address_sd(_a))
+#define gid_address_sd_dst_type(_a) sd_dst_type(&gid_address_sd(_a))
+
+/* 'sub'address functions */
+#define foreach_gid_address_type_fcns \
+ _(ip_prefix) \
+ _(lcaf) \
+ _(mac) \
+ _(sd)
+
+/* *INDENT-OFF* */
+#define _(_n) \
+u16 _n ## _size_to_write (void * pref); \
+u16 _n ## _write (u8 * p, void * pref); \
+u8 _n ## _length (void *a); \
+void * _n ## _cast (gid_address_t * a); \
+void _n ## _copy (void * dst , void * src);
+
+foreach_gid_address_type_fcns
+#undef _
+/* *INDENT-ON* */
+
+always_inline u64
+mac_to_u64 (u8 * m)
+{
+ return (*((u64 *) m) & 0xffffffffffff);
+}
+
+typedef struct
+{
+ /* mark locator as local as opposed to remote */
+ u8 local;
+ u8 state;
+ union
+ {
+ u32 sw_if_index;
+ gid_address_t address;
+ };
+ u8 priority;
+ u8 weight;
+ u8 mpriority;
+ u8 mweight;
+ u8 probed;
+} locator_t;
+
+u32 locator_parse (void *ptr, locator_t * loc);
+void locator_copy (locator_t * dst, locator_t * src);
+u32 locator_cmp (locator_t * l1, locator_t * l2);
+void locator_free (locator_t * l);
+
+typedef struct
+{
+ /* locator-set name */
+ u8 *name;
+
+ /* vector of locator indices */
+ u32 *locator_indices;
+ u8 local;
+} locator_set_t;
+
+typedef struct
+{
+ gid_address_t eid;
+
+ /* index of local locator set */
+ union
+ {
+ u32 locator_set_index;
+ locator_t *locators; /* used for map register message */
+ };
+
+ u32 ttl;
+ u8 action;
+ u8 authoritative;
+
+ u8 local;
+ /* valid only for remote mappings */
+ u8 is_static;
+ u8 *key;
+ lisp_key_type_t key_id;
+ u8 timer_set;
+} mapping_t;
+
+uword
+unformat_negative_mapping_action (unformat_input_t * input, va_list * args);
+u8 *format_negative_mapping_action (u8 *, va_list * args);
+
+typedef struct locator_pair
+{
+ /* local and remote locators (underlay attachment points) */
+ ip_address_t lcl_loc;
+ ip_address_t rmt_loc;
+
+ u8 priority;
+ u8 weight;
+} locator_pair_t;
+
+void
+build_src_dst (gid_address_t * sd, gid_address_t * src, gid_address_t * dst);
+
+void gid_address_from_ip (gid_address_t * g, ip_address_t * ip);
+
+#endif /* VNET_LISP_GPE_LISP_TYPES_H_ */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/lisp-cp/packets.c b/src/vnet/lisp-cp/packets.c
new file mode 100644
index 00000000000..3a4f421b02a
--- /dev/null
+++ b/src/vnet/lisp-cp/packets.c
@@ -0,0 +1,269 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/lisp-cp/packets.h>
+#include <vnet/lisp-cp/lisp_cp_messages.h>
+#include <vnet/ip/udp_packet.h>
+
+/* Returns IP ID for the packet */
+/* static u16 ip_id = 0;
+static inline u16
+get_IP_ID()
+{
+ ip_id++;
+ return (ip_id);
+} */
+
+u16
+udp_ip4_checksum (const void *b, u32 len, u8 * src, u8 * dst)
+{
+ const u16 *buf = b;
+ u16 *ip_src = (u16 *) src;
+ u16 *ip_dst = (u16 *) dst;
+ u32 length = len;
+ u32 sum = 0;
+
+ while (len > 1)
+ {
+ sum += *buf++;
+ if (sum & 0x80000000)
+ sum = (sum & 0xFFFF) + (sum >> 16);
+ len -= 2;
+ }
+
+ /* Add the padding if the packet length is odd */
+ if (len & 1)
+ sum += *((u8 *) buf);
+
+ /* Add the pseudo-header */
+ sum += *(ip_src++);
+ sum += *ip_src;
+
+ sum += *(ip_dst++);
+ sum += *ip_dst;
+
+ sum += clib_host_to_net_u16 (IP_PROTOCOL_UDP);
+ sum += clib_host_to_net_u16 (length);
+
+ /* Add the carries */
+ while (sum >> 16)
+ sum = (sum & 0xFFFF) + (sum >> 16);
+
+ /* Return the one's complement of sum */
+ return ((u16) (~sum));
+}
+
+u16
+udp_ip6_checksum (ip6_header_t * ip6, udp_header_t * up, u32 len)
+{
+ size_t i;
+ register const u16 *sp;
+ u32 sum;
+ union
+ {
+ struct
+ {
+ ip6_address_t ph_src;
+ ip6_address_t ph_dst;
+ u32 ph_len;
+ u8 ph_zero[3];
+ u8 ph_nxt;
+ } ph;
+ u16 pa[20];
+ } phu;
+
+ /* pseudo-header */
+ memset (&phu, 0, sizeof (phu));
+ phu.ph.ph_src = ip6->src_address;
+ phu.ph.ph_dst = ip6->dst_address;
+ phu.ph.ph_len = clib_host_to_net_u32 (len);
+ phu.ph.ph_nxt = IP_PROTOCOL_UDP;
+
+ sum = 0;
+ for (i = 0; i < sizeof (phu.pa) / sizeof (phu.pa[0]); i++)
+ sum += phu.pa[i];
+
+ sp = (const u16 *) up;
+
+ for (i = 0; i < (len & ~1); i += 2)
+ sum += *sp++;
+
+ if (len & 1)
+ sum += clib_host_to_net_u16 ((*(const u8 *) sp) << 8);
+
+ while (sum > 0xffff)
+ sum = (sum & 0xffff) + (sum >> 16);
+ sum = ~sum & 0xffff;
+
+ return (sum);
+}
+
+u16
+udp_checksum (udp_header_t * uh, u32 udp_len, void *ih, u8 version)
+{
+ switch (version)
+ {
+ case IP4:
+ return (udp_ip4_checksum (uh, udp_len,
+ ((ip4_header_t *) ih)->src_address.as_u8,
+ ((ip4_header_t *) ih)->dst_address.as_u8));
+ case IP6:
+ return (udp_ip6_checksum (ih, uh, udp_len));
+ default:
+ return ~0;
+ }
+}
+
+void *
+pkt_push_udp (vlib_main_t * vm, vlib_buffer_t * b, u16 sp, u16 dp)
+{
+ udp_header_t *uh;
+ u16 udp_len = sizeof (udp_header_t) + vlib_buffer_length_in_chain (vm, b);
+
+ uh = vlib_buffer_push_uninit (b, sizeof (*uh));
+
+ uh->src_port = clib_host_to_net_u16 (sp);
+ uh->dst_port = clib_host_to_net_u16 (dp);
+ uh->length = clib_host_to_net_u16 (udp_len);
+ uh->checksum = 0;
+ return uh;
+}
+
+void *
+pkt_push_ipv4 (vlib_main_t * vm, vlib_buffer_t * b, ip4_address_t * src,
+ ip4_address_t * dst, int proto)
+{
+ ip4_header_t *ih;
+
+ /* make some room */
+ ih = vlib_buffer_push_uninit (b, sizeof (ip4_header_t));
+
+ ih->ip_version_and_header_length = 0x45;
+ ih->tos = 0;
+ ih->length = clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b));
+
+ /* iph->fragment_id = clib_host_to_net_u16(get_IP_ID ()); */
+
+ /* TODO: decide if we allow fragments in case of control */
+ ih->flags_and_fragment_offset = clib_host_to_net_u16 (IP_DF);
+ ih->ttl = 255;
+ ih->protocol = proto;
+ ih->src_address.as_u32 = src->as_u32;
+ ih->dst_address.as_u32 = dst->as_u32;
+
+ ih->checksum = ip4_header_checksum (ih);
+ return ih;
+}
+
+void *
+pkt_push_ipv6 (vlib_main_t * vm, vlib_buffer_t * b, ip6_address_t * src,
+ ip6_address_t * dst, int proto)
+{
+ ip6_header_t *ip6h;
+ u16 payload_length;
+
+ /* make some room */
+ ip6h = vlib_buffer_push_uninit (b, sizeof (ip6_header_t));
+
+ ip6h->ip_version_traffic_class_and_flow_label =
+ clib_host_to_net_u32 (0x6 << 28);
+
+ /* calculate ip6 payload length */
+ payload_length = vlib_buffer_length_in_chain (vm, b);
+ payload_length -= sizeof (*ip6h);
+
+ ip6h->payload_length = clib_host_to_net_u16 (payload_length);
+
+ ip6h->hop_limit = 0xff;
+ ip6h->protocol = proto;
+ clib_memcpy (ip6h->src_address.as_u8, src->as_u8,
+ sizeof (ip6h->src_address));
+ clib_memcpy (ip6h->dst_address.as_u8, dst->as_u8,
+ sizeof (ip6h->src_address));
+
+ return ip6h;
+}
+
+void *
+pkt_push_ip (vlib_main_t * vm, vlib_buffer_t * b, ip_address_t * src,
+ ip_address_t * dst, u32 proto)
+{
+ if (ip_addr_version (src) != ip_addr_version (dst))
+ {
+ clib_warning ("src %U and dst %U IP have different AFI! Discarding!",
+ format_ip_address, src, format_ip_address, dst);
+ return 0;
+ }
+
+ switch (ip_addr_version (src))
+ {
+ case IP4:
+ return pkt_push_ipv4 (vm, b, &ip_addr_v4 (src), &ip_addr_v4 (dst),
+ proto);
+ break;
+ case IP6:
+ return pkt_push_ipv6 (vm, b, &ip_addr_v6 (src), &ip_addr_v6 (dst),
+ proto);
+ break;
+ }
+
+ return 0;
+}
+
+void *
+pkt_push_udp_and_ip (vlib_main_t * vm, vlib_buffer_t * b, u16 sp, u16 dp,
+ ip_address_t * sip, ip_address_t * dip)
+{
+ u16 udpsum;
+ udp_header_t *uh;
+ void *ih;
+
+ uh = pkt_push_udp (vm, b, sp, dp);
+
+ ih = pkt_push_ip (vm, b, sip, dip, IP_PROTOCOL_UDP);
+
+ udpsum = udp_checksum (uh, clib_net_to_host_u16 (uh->length), ih,
+ ip_addr_version (sip));
+ if (udpsum == (u16) ~ 0)
+ {
+ clib_warning ("Failed UDP checksum! Discarding");
+ return 0;
+ }
+ uh->checksum = udpsum;
+ return ih;
+}
+
+void *
+pkt_push_ecm_hdr (vlib_buffer_t * b)
+{
+ ecm_hdr_t *h;
+ h = vlib_buffer_push_uninit (b, sizeof (h[0]));
+
+ memset (h, 0, sizeof (h[0]));
+ h->type = LISP_ENCAP_CONTROL_TYPE;
+ memset (h->reserved2, 0, sizeof (h->reserved2));
+
+ return h;
+}
+
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/lisp-cp/packets.h b/src/vnet/lisp-cp/packets.h
new file mode 100644
index 00000000000..212a1d78163
--- /dev/null
+++ b/src/vnet/lisp-cp/packets.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/vnet.h>
+#include <vnet/lisp-cp/lisp_types.h>
+
+#define IP_DF 0x4000 /* don't fragment */
+
+void *pkt_push_ip (vlib_main_t * vm, vlib_buffer_t * b, ip_address_t * src,
+ ip_address_t * dst, u32 proto);
+
+void *pkt_push_udp_and_ip (vlib_main_t * vm, vlib_buffer_t * b, u16 sp,
+ u16 dp, ip_address_t * sip, ip_address_t * dip);
+
+void *pkt_push_ecm_hdr (vlib_buffer_t * b);
+
+always_inline u8 *
+vlib_buffer_get_tail (vlib_buffer_t * b)
+{
+ return b->data + b->current_data + b->current_length;
+}
+
+always_inline void *
+vlib_buffer_put_uninit (vlib_buffer_t * b, u8 size)
+{
+ /* XXX should make sure there's enough space! */
+ void *p = vlib_buffer_get_tail (b);
+ b->current_length += size;
+ return p;
+}
+
+always_inline void *
+vlib_buffer_push_uninit (vlib_buffer_t * b, u8 size)
+{
+ /* XXX should make sure there's enough space! */
+ ASSERT (b->current_data >= size);
+ b->current_data -= size;
+ b->current_length += size;
+
+ return vlib_buffer_get_current (b);
+}
+
+always_inline void *
+vlib_buffer_make_headroom (vlib_buffer_t * b, u8 size)
+{
+ /* XXX should make sure there's enough space! */
+ b->current_data += size;
+ return vlib_buffer_get_current (b);
+}
+
+always_inline void *
+vlib_buffer_pull (vlib_buffer_t * b, u8 size)
+{
+ if (b->current_length < size)
+ return 0;
+
+ void *data = vlib_buffer_get_current (b);
+ vlib_buffer_advance (b, size);
+ return data;
+}
+
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/lisp-gpe/decap.c b/src/vnet/lisp-gpe/decap.c
new file mode 100644
index 00000000000..637d4a740c8
--- /dev/null
+++ b/src/vnet/lisp-gpe/decap.c
@@ -0,0 +1,501 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * @file
+ * @brief L2 LISP-GPE decap code.
+ *
+ */
+#include <vlib/vlib.h>
+#include <vnet/pg/pg.h>
+#include <vnet/lisp-gpe/lisp_gpe.h>
+
+typedef struct
+{
+ u32 next_index;
+ u32 tunnel_index;
+ u32 error;
+ lisp_gpe_header_t h;
+} lisp_gpe_rx_trace_t;
+
+static u8 *
+format_lisp_gpe_rx_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ lisp_gpe_rx_trace_t *t = va_arg (*args, lisp_gpe_rx_trace_t *);
+
+ if (t->tunnel_index != ~0)
+ {
+ s = format (s, "LISP-GPE: tunnel %d next %d error %d", t->tunnel_index,
+ t->next_index, t->error);
+ }
+ else
+ {
+ s = format (s, "LISP-GPE: no tunnel next %d error %d\n", t->next_index,
+ t->error);
+ }
+ s = format (s, "\n %U", format_lisp_gpe_header_with_length, &t->h,
+ (u32) sizeof (t->h) /* max size */ );
+ return s;
+}
+
+static u32 next_proto_to_next_index[LISP_GPE_NEXT_PROTOS] = {
+ LISP_GPE_INPUT_NEXT_DROP,
+ LISP_GPE_INPUT_NEXT_IP4_INPUT,
+ LISP_GPE_INPUT_NEXT_IP6_INPUT,
+ LISP_GPE_INPUT_NEXT_L2_INPUT,
+ LISP_GPE_INPUT_NEXT_DROP
+};
+
+always_inline u32
+next_protocol_to_next_index (lisp_gpe_header_t * lgh, u8 * next_header)
+{
+ /* lisp-gpe router */
+ if (PREDICT_TRUE ((lgh->flags & LISP_GPE_FLAGS_P)
+ && lgh->next_protocol < LISP_GPE_NEXT_PROTOS))
+ return next_proto_to_next_index[lgh->next_protocol];
+ /* legacy lisp router */
+ else if ((lgh->flags & LISP_GPE_FLAGS_P) == 0)
+ {
+ ip4_header_t *iph = (ip4_header_t *) next_header;
+ if ((iph->ip_version_and_header_length & 0xF0) == 0x40)
+ return LISP_GPE_INPUT_NEXT_IP4_INPUT;
+ else if ((iph->ip_version_and_header_length & 0xF0) == 0x60)
+ return LISP_GPE_INPUT_NEXT_IP6_INPUT;
+ else
+ return LISP_GPE_INPUT_NEXT_DROP;
+ }
+ else
+ return LISP_GPE_INPUT_NEXT_DROP;
+}
+
+always_inline tunnel_lookup_t *
+next_index_to_iface (lisp_gpe_main_t * lgm, u32 next_index)
+{
+ if (LISP_GPE_INPUT_NEXT_IP4_INPUT == next_index
+ || LISP_GPE_INPUT_NEXT_IP6_INPUT == next_index)
+ return &lgm->l3_ifaces;
+ else if (LISP_GPE_INPUT_NEXT_L2_INPUT == next_index)
+ return &lgm->l2_ifaces;
+ clib_warning ("next_index not associated to an interface!");
+ return 0;
+}
+
+static_always_inline void
+incr_decap_stats (vnet_main_t * vnm, u32 cpu_index, u32 length,
+ u32 sw_if_index, u32 * last_sw_if_index, u32 * n_packets,
+ u32 * n_bytes)
+{
+ vnet_interface_main_t *im;
+
+ if (PREDICT_TRUE (sw_if_index == *last_sw_if_index))
+ {
+ *n_packets += 1;
+ *n_bytes += length;
+ }
+ else
+ {
+ if (PREDICT_TRUE (*last_sw_if_index != ~0))
+ {
+ im = &vnm->interface_main;
+
+ vlib_increment_combined_counter (im->combined_sw_if_counters +
+ VNET_INTERFACE_COUNTER_RX,
+ cpu_index, *last_sw_if_index,
+ *n_packets, *n_bytes);
+ }
+ *last_sw_if_index = sw_if_index;
+ *n_packets = 1;
+ *n_bytes = length;
+ }
+}
+
+/**
+ * @brief LISP-GPE decap dispatcher.
+ * @node lisp_gpe_input_inline
+ *
+ * LISP-GPE decap dispatcher.
+ *
+ * Decaps IP-UDP-LISP-GPE header and based on the next protocol and in the
+ * GPE header and the vni decides the next node to forward the packet to.
+ *
+ * @param[in] vm vlib_main_t corresponding to current thread.
+ * @param[in] node vlib_node_runtime_t data for this node.
+ * @param[in] frame vlib_frame_t whose contents should be dispatched.
+ *
+ * @return number of vectors in frame.
+ */
+static uword
+lisp_gpe_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame, u8 is_v4)
+{
+ u32 n_left_from, next_index, *from, *to_next, cpu_index;
+ u32 n_bytes = 0, n_packets = 0, last_sw_if_index = ~0, drops = 0;
+ lisp_gpe_main_t *lgm = vnet_lisp_gpe_get_main ();
+
+ cpu_index = os_get_cpu_number ();
+ from = vlib_frame_vector_args (from_frame);
+ n_left_from = from_frame->n_vectors;
+
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ u32 bi0, bi1;
+ vlib_buffer_t *b0, *b1;
+ ip4_udp_lisp_gpe_header_t *iul4_0, *iul4_1;
+ ip6_udp_lisp_gpe_header_t *iul6_0, *iul6_1;
+ lisp_gpe_header_t *lh0, *lh1;
+ u32 next0, next1, error0, error1;
+ uword *si0, *si1;
+ tunnel_lookup_t *tl0, *tl1;
+
+ /* Prefetch next iteration. */
+ {
+ vlib_buffer_t *p2, *p3;
+
+ p2 = vlib_get_buffer (vm, from[2]);
+ p3 = vlib_get_buffer (vm, from[3]);
+
+ vlib_prefetch_buffer_header (p2, LOAD);
+ vlib_prefetch_buffer_header (p3, LOAD);
+
+ CLIB_PREFETCH (p2->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
+ CLIB_PREFETCH (p3->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
+ }
+
+ bi0 = from[0];
+ bi1 = from[1];
+ to_next[0] = bi0;
+ to_next[1] = bi1;
+ from += 2;
+ to_next += 2;
+ n_left_to_next -= 2;
+ n_left_from -= 2;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ b1 = vlib_get_buffer (vm, bi1);
+
+ /* udp leaves current_data pointing at the lisp header */
+ if (is_v4)
+ {
+ vlib_buffer_advance (b0,
+ -(word) (sizeof (udp_header_t) +
+ sizeof (ip4_header_t)));
+ vlib_buffer_advance (b1,
+ -(word) (sizeof (udp_header_t) +
+ sizeof (ip4_header_t)));
+
+ iul4_0 = vlib_buffer_get_current (b0);
+ iul4_1 = vlib_buffer_get_current (b1);
+
+ /* pop (ip, udp, lisp-gpe) */
+ vlib_buffer_advance (b0, sizeof (*iul4_0));
+ vlib_buffer_advance (b1, sizeof (*iul4_1));
+
+ lh0 = &iul4_0->lisp;
+ lh1 = &iul4_1->lisp;
+ }
+ else
+ {
+ vlib_buffer_advance (b0,
+ -(word) (sizeof (udp_header_t) +
+ sizeof (ip6_header_t)));
+ vlib_buffer_advance (b1,
+ -(word) (sizeof (udp_header_t) +
+ sizeof (ip6_header_t)));
+
+ iul6_0 = vlib_buffer_get_current (b0);
+ iul6_1 = vlib_buffer_get_current (b1);
+
+ /* pop (ip, udp, lisp-gpe) */
+ vlib_buffer_advance (b0, sizeof (*iul6_0));
+ vlib_buffer_advance (b1, sizeof (*iul6_1));
+
+ lh0 = &iul6_0->lisp;
+ lh1 = &iul6_1->lisp;
+ }
+
+ /* determine next_index from lisp-gpe header */
+ next0 = next_protocol_to_next_index (lh0,
+ vlib_buffer_get_current (b0));
+ next1 = next_protocol_to_next_index (lh1,
+ vlib_buffer_get_current (b1));
+
+ /* determine if tunnel is l2 or l3 */
+ tl0 = next_index_to_iface (lgm, next0);
+ tl1 = next_index_to_iface (lgm, next1);
+
+ /* map iid/vni to lisp-gpe sw_if_index which is used by ipx_input to
+ * decide the rx vrf and the input features to be applied */
+ si0 = hash_get (tl0->sw_if_index_by_vni,
+ clib_net_to_host_u32 (lh0->iid));
+ si1 = hash_get (tl1->sw_if_index_by_vni,
+ clib_net_to_host_u32 (lh1->iid));
+
+
+ /* Required to make the l2 tag push / pop code work on l2 subifs */
+ vnet_update_l2_len (b0);
+ vnet_update_l2_len (b1);
+
+ if (si0)
+ {
+ incr_decap_stats (lgm->vnet_main, cpu_index,
+ vlib_buffer_length_in_chain (vm, b0), si0[0],
+ &last_sw_if_index, &n_packets, &n_bytes);
+ vnet_buffer (b0)->sw_if_index[VLIB_RX] = si0[0];
+ error0 = 0;
+ }
+ else
+ {
+ next0 = LISP_GPE_INPUT_NEXT_DROP;
+ error0 = LISP_GPE_ERROR_NO_TUNNEL;
+ drops++;
+ }
+
+ if (si1)
+ {
+ incr_decap_stats (lgm->vnet_main, cpu_index,
+ vlib_buffer_length_in_chain (vm, b1), si1[0],
+ &last_sw_if_index, &n_packets, &n_bytes);
+ vnet_buffer (b1)->sw_if_index[VLIB_RX] = si1[0];
+ error1 = 0;
+ }
+ else
+ {
+ next1 = LISP_GPE_INPUT_NEXT_DROP;
+ error1 = LISP_GPE_ERROR_NO_TUNNEL;
+ drops++;
+ }
+
+ b0->error = error0 ? node->errors[error0] : 0;
+ b1->error = error1 ? node->errors[error1] : 0;
+
+ if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ lisp_gpe_rx_trace_t *tr = vlib_add_trace (vm, node, b0,
+ sizeof (*tr));
+ tr->next_index = next0;
+ tr->error = error0;
+ tr->h = lh0[0];
+ }
+
+ if (PREDICT_FALSE (b1->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ lisp_gpe_rx_trace_t *tr = vlib_add_trace (vm, node, b1,
+ sizeof (*tr));
+ tr->next_index = next1;
+ tr->error = error1;
+ tr->h = lh1[0];
+ }
+
+ vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next,
+ n_left_to_next, bi0, bi1, next0,
+ next1);
+ }
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 bi0;
+ vlib_buffer_t *b0;
+ u32 next0;
+ ip4_udp_lisp_gpe_header_t *iul4_0;
+ ip6_udp_lisp_gpe_header_t *iul6_0;
+ lisp_gpe_header_t *lh0;
+ u32 error0;
+ uword *si0;
+ tunnel_lookup_t *tl0;
+
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+
+ /* udp leaves current_data pointing at the lisp header
+ * TODO: there's no difference in processing between v4 and v6
+ * encapsulated packets so the code should be simplified if ip header
+ * info is not going to be used for dp smrs/dpsec */
+ if (is_v4)
+ {
+ vlib_buffer_advance (b0,
+ -(word) (sizeof (udp_header_t) +
+ sizeof (ip4_header_t)));
+
+ iul4_0 = vlib_buffer_get_current (b0);
+
+ /* pop (ip, udp, lisp-gpe) */
+ vlib_buffer_advance (b0, sizeof (*iul4_0));
+
+ lh0 = &iul4_0->lisp;
+ }
+ else
+ {
+ vlib_buffer_advance (b0,
+ -(word) (sizeof (udp_header_t) +
+ sizeof (ip6_header_t)));
+
+ iul6_0 = vlib_buffer_get_current (b0);
+
+ /* pop (ip, udp, lisp-gpe) */
+ vlib_buffer_advance (b0, sizeof (*iul6_0));
+
+ lh0 = &iul6_0->lisp;
+ }
+
+ /* TODO if security is to be implemented, something similar to RPF,
+ * probably we'd like to check that the peer is allowed to send us
+ * packets. For this, we should use the tunnel table OR check that
+ * we have a mapping for the source eid and that the outer source of
+ * the packet is one of its locators */
+
+ /* determine next_index from lisp-gpe header */
+ next0 = next_protocol_to_next_index (lh0,
+ vlib_buffer_get_current (b0));
+
+ /* determine if tunnel is l2 or l3 */
+ tl0 = next_index_to_iface (lgm, next0);
+
+ /* map iid/vni to lisp-gpe sw_if_index which is used by ipx_input to
+ * decide the rx vrf and the input features to be applied.
+ * NOTE: vni uses only the first 24 bits */
+ si0 = hash_get (tl0->sw_if_index_by_vni,
+ clib_net_to_host_u32 (lh0->iid << 8));
+
+ /* Required to make the l2 tag push / pop code work on l2 subifs */
+ vnet_update_l2_len (b0);
+
+ if (si0)
+ {
+ incr_decap_stats (lgm->vnet_main, cpu_index,
+ vlib_buffer_length_in_chain (vm, b0), si0[0],
+ &last_sw_if_index, &n_packets, &n_bytes);
+ vnet_buffer (b0)->sw_if_index[VLIB_RX] = si0[0];
+ error0 = 0;
+ }
+ else
+ {
+ next0 = LISP_GPE_INPUT_NEXT_DROP;
+ error0 = LISP_GPE_ERROR_NO_TUNNEL;
+ drops++;
+ }
+
+ /* TODO error handling if security is implemented */
+ b0->error = error0 ? node->errors[error0] : 0;
+
+ if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ lisp_gpe_rx_trace_t *tr = vlib_add_trace (vm, node, b0,
+ sizeof (*tr));
+ tr->next_index = next0;
+ tr->error = error0;
+ tr->h = lh0[0];
+ }
+
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
+ n_left_to_next, bi0, next0);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ /* flush iface stats */
+ incr_decap_stats (lgm->vnet_main, cpu_index, 0, ~0, &last_sw_if_index,
+ &n_packets, &n_bytes);
+ vlib_node_increment_counter (vm, lisp_gpe_ip4_input_node.index,
+ LISP_GPE_ERROR_NO_TUNNEL, drops);
+ return from_frame->n_vectors;
+}
+
+static uword
+lisp_gpe_ip4_input (vlib_main_t * vm, vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame)
+{
+ return lisp_gpe_input_inline (vm, node, from_frame, 1);
+}
+
+static uword
+lisp_gpe_ip6_input (vlib_main_t * vm, vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame)
+{
+ return lisp_gpe_input_inline (vm, node, from_frame, 0);
+}
+
+static char *lisp_gpe_ip4_input_error_strings[] = {
+#define lisp_gpe_error(n,s) s,
+#include <vnet/lisp-gpe/lisp_gpe_error.def>
+#undef lisp_gpe_error
+};
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (lisp_gpe_ip4_input_node) = {
+ .function = lisp_gpe_ip4_input,
+ .name = "lisp-gpe-ip4-input",
+ /* Takes a vector of packets. */
+ .vector_size = sizeof (u32),
+ .n_next_nodes = LISP_GPE_INPUT_N_NEXT,
+ .next_nodes = {
+#define _(s,n) [LISP_GPE_INPUT_NEXT_##s] = n,
+ foreach_lisp_gpe_ip_input_next
+#undef _
+ },
+
+ .n_errors = ARRAY_LEN (lisp_gpe_ip4_input_error_strings),
+ .error_strings = lisp_gpe_ip4_input_error_strings,
+
+ .format_buffer = format_lisp_gpe_header_with_length,
+ .format_trace = format_lisp_gpe_rx_trace,
+ // $$$$ .unformat_buffer = unformat_lisp_gpe_header,
+};
+/* *INDENT-ON* */
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (lisp_gpe_ip6_input_node) = {
+ .function = lisp_gpe_ip6_input,
+ .name = "lisp-gpe-ip6-input",
+ /* Takes a vector of packets. */
+ .vector_size = sizeof (u32),
+ .n_next_nodes = LISP_GPE_INPUT_N_NEXT,
+ .next_nodes = {
+#define _(s,n) [LISP_GPE_INPUT_NEXT_##s] = n,
+ foreach_lisp_gpe_ip_input_next
+#undef _
+ },
+
+ .n_errors = ARRAY_LEN (lisp_gpe_ip4_input_error_strings),
+ .error_strings = lisp_gpe_ip4_input_error_strings,
+
+ .format_buffer = format_lisp_gpe_header_with_length,
+ .format_trace = format_lisp_gpe_rx_trace,
+ // $$$$ .unformat_buffer = unformat_lisp_gpe_header,
+};
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/lisp-gpe/dir.dox b/src/vnet/lisp-gpe/dir.dox
new file mode 100644
index 00000000000..afa6da9ab2c
--- /dev/null
+++ b/src/vnet/lisp-gpe/dir.dox
@@ -0,0 +1,26 @@
+/*
+ *
+ * Copyright (c) 2013 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ @dir
+ @brief LISP-GPE code.
+
+ An implementation of LISP-GPE as per:
+ rfc-6830
+ draft-lewis-lisp-gpe-02
+
+ See file: rfc.txt
+
+*/ \ No newline at end of file
diff --git a/src/vnet/lisp-gpe/interface.c b/src/vnet/lisp-gpe/interface.c
new file mode 100644
index 00000000000..3288b2414b7
--- /dev/null
+++ b/src/vnet/lisp-gpe/interface.c
@@ -0,0 +1,709 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @file
+ * @brief Common utility functions for LISP-GPE interfaces.
+ *
+ */
+
+#include <vppinfra/error.h>
+#include <vppinfra/hash.h>
+#include <vnet/vnet.h>
+#include <vnet/ip/ip.h>
+#include <vnet/ip/udp.h>
+#include <vnet/ethernet/ethernet.h>
+#include <vnet/lisp-gpe/lisp_gpe.h>
+#include <vnet/lisp-gpe/lisp_gpe_fwd_entry.h>
+#include <vnet/lisp-gpe/lisp_gpe_tenant.h>
+#include <vnet/lisp-gpe/lisp_gpe_adjacency.h>
+#include <vnet/adj/adj.h>
+#include <vnet/fib/fib_table.h>
+#include <vnet/fib/ip4_fib.h>
+#include <vnet/fib/ip6_fib.h>
+#include <vnet/lisp-cp/lisp_cp_dpo.h>
+
+/**
+ * @brief The VLIB node arc/edge from the interface's TX node, to the L2
+ * load-balanceing node. Which is where all packets go
+ */
+static uword l2_arc_to_lb;
+
+#define foreach_lisp_gpe_tx_next \
+ _(DROP, "error-drop") \
+ _(IP4_LOOKUP, "ip4-lookup") \
+ _(IP6_LOOKUP, "ip6-lookup")
+
+typedef enum
+{
+#define _(sym,str) LISP_GPE_TX_NEXT_##sym,
+ foreach_lisp_gpe_tx_next
+#undef _
+ LISP_GPE_TX_N_NEXT,
+} lisp_gpe_tx_next_t;
+
+typedef struct
+{
+ u32 tunnel_index;
+} lisp_gpe_tx_trace_t;
+
+u8 *
+format_lisp_gpe_tx_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ lisp_gpe_tx_trace_t *t = va_arg (*args, lisp_gpe_tx_trace_t *);
+
+ s = format (s, "LISP-GPE-TX: tunnel %d", t->tunnel_index);
+ return s;
+}
+
+#define is_v4_packet(_h) ((*(u8*) _h) & 0xF0) == 0x40
+
+/**
+ * @brief LISP-GPE interface TX (encap) function.
+ * @node lisp_gpe_interface_tx
+ *
+ * The LISP-GPE interface TX (encap) function.
+ *
+ * Looks up the associated tunnel based on the adjacency hit in the SD FIB
+ * and if the tunnel is multihomed it uses the flow hash to determine
+ * sub-tunnel, and rewrite string, to be used to encapsulate the packet.
+ *
+ * @param[in] vm vlib_main_t corresponding to the current thread.
+ * @param[in] node vlib_node_runtime_t data for this node.
+ * @param[in] frame vlib_frame_t whose contents should be dispatched.
+ *
+ * @return number of vectors in frame.
+ */
+static uword
+lisp_gpe_interface_tx (vlib_main_t * vm, vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame)
+{
+ u32 n_left_from, next_index, *from, *to_next;
+ lisp_gpe_main_t *lgm = &lisp_gpe_main;
+
+ from = vlib_frame_vector_args (from_frame);
+ n_left_from = from_frame->n_vectors;
+
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 bi0, adj_index0, next0;
+ const ip_adjacency_t *adj0;
+ const dpo_id_t *dpo0;
+ vlib_buffer_t *b0;
+ u8 is_v4_0;
+
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+
+ /* Fixup the checksum and len fields in the LISP tunnel encap
+ * that was applied at the midchain node */
+ is_v4_0 = is_v4_packet (vlib_buffer_get_current (b0));
+ ip_udp_fixup_one (lgm->vlib_main, b0, is_v4_0);
+
+ /* Follow the DPO on which the midchain is stacked */
+ adj_index0 = vnet_buffer (b0)->ip.adj_index[VLIB_TX];
+ adj0 = adj_get (adj_index0);
+ dpo0 = &adj0->sub_type.midchain.next_dpo;
+ next0 = dpo0->dpoi_next_node;
+ vnet_buffer (b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
+
+ if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ lisp_gpe_tx_trace_t *tr = vlib_add_trace (vm, node, b0,
+ sizeof (*tr));
+ tr->tunnel_index = adj_index0;
+ }
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
+ n_left_to_next, bi0, next0);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ return from_frame->n_vectors;
+}
+
+static u8 *
+format_lisp_gpe_name (u8 * s, va_list * args)
+{
+ u32 dev_instance = va_arg (*args, u32);
+ return format (s, "lisp_gpe%d", dev_instance);
+}
+
+/* *INDENT-OFF* */
+VNET_DEVICE_CLASS (lisp_gpe_device_class) = {
+ .name = "LISP_GPE",
+ .format_device_name = format_lisp_gpe_name,
+ .format_tx_trace = format_lisp_gpe_tx_trace,
+ .tx_function = lisp_gpe_interface_tx,
+};
+/* *INDENT-ON* */
+
+u8 *
+format_lisp_gpe_header_with_length (u8 * s, va_list * args)
+{
+ lisp_gpe_header_t *h = va_arg (*args, lisp_gpe_header_t *);
+ u32 max_header_bytes = va_arg (*args, u32);
+ u32 header_bytes;
+
+ header_bytes = sizeof (h[0]);
+ if (max_header_bytes != 0 && header_bytes > max_header_bytes)
+ return format (s, "lisp-gpe header truncated");
+
+ s = format (s, "flags: ");
+#define _(n,v) if (h->flags & v) s = format (s, "%s ", #n);
+ foreach_lisp_gpe_flag_bit;
+#undef _
+
+ s = format (s, "\n ver_res %d res %d next_protocol %d iid %d(%x)",
+ h->ver_res, h->res, h->next_protocol,
+ clib_net_to_host_u32 (h->iid), clib_net_to_host_u32 (h->iid));
+ return s;
+}
+
+/* *INDENT-OFF* */
+VNET_HW_INTERFACE_CLASS (lisp_gpe_hw_class) = {
+ .name = "LISP_GPE",
+ .format_header = format_lisp_gpe_header_with_length,
+ .build_rewrite = lisp_gpe_build_rewrite,
+ .update_adjacency = lisp_gpe_update_adjacency,
+};
+/* *INDENT-ON* */
+
+
+typedef struct
+{
+ u32 lb_index;
+} l2_lisp_gpe_tx_trace_t;
+
+static u8 *
+format_l2_lisp_gpe_tx_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ l2_lisp_gpe_tx_trace_t *t = va_arg (*args, l2_lisp_gpe_tx_trace_t *);
+
+ s = format (s, "L2-LISP-GPE-TX: load-balance %d", t->lb_index);
+ return s;
+}
+
+/**
+ * @brief LISP-GPE interface TX (encap) function for L2 overlays.
+ * @node l2_lisp_gpe_interface_tx
+ *
+ * The L2 LISP-GPE interface TX (encap) function.
+ *
+ * Uses bridge domain index, source and destination ethernet addresses to
+ * lookup tunnel. If the tunnel is multihomed a flow has is used to determine
+ * the sub-tunnel and therefore the rewrite string to be used to encapsulate
+ * the packets.
+ *
+ * @param[in] vm vlib_main_t corresponding to the current thread.
+ * @param[in] node vlib_node_runtime_t data for this node.
+ * @param[in] frame vlib_frame_t whose contents should be dispatched.
+ *
+ * @return number of vectors in frame.
+ */
+static uword
+l2_lisp_gpe_interface_tx (vlib_main_t * vm, vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame)
+{
+ u32 n_left_from, next_index, *from, *to_next;
+ lisp_gpe_main_t *lgm = &lisp_gpe_main;
+
+ from = vlib_frame_vector_args (from_frame);
+ n_left_from = from_frame->n_vectors;
+
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ vlib_buffer_t *b0;
+ u32 bi0, lbi0;
+ ethernet_header_t *e0;
+
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ e0 = vlib_buffer_get_current (b0);
+
+ vnet_buffer (b0)->lisp.overlay_afi = LISP_AFI_MAC;
+
+ /* lookup dst + src mac */
+ lbi0 = lisp_l2_fib_lookup (lgm, vnet_buffer (b0)->l2.bd_index,
+ e0->src_address, e0->dst_address);
+ vnet_buffer (b0)->ip.adj_index[VLIB_TX] = lbi0;
+
+
+ if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ l2_lisp_gpe_tx_trace_t *tr = vlib_add_trace (vm, node, b0,
+ sizeof (*tr));
+ tr->lb_index = lbi0;
+ }
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
+ n_left_to_next, bi0, l2_arc_to_lb);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ return from_frame->n_vectors;
+}
+
+static u8 *
+format_l2_lisp_gpe_name (u8 * s, va_list * args)
+{
+ u32 dev_instance = va_arg (*args, u32);
+ return format (s, "l2_lisp_gpe%d", dev_instance);
+}
+
+/* *INDENT-OFF* */
+VNET_DEVICE_CLASS (l2_lisp_gpe_device_class,static) = {
+ .name = "L2_LISP_GPE",
+ .format_device_name = format_l2_lisp_gpe_name,
+ .format_tx_trace = format_l2_lisp_gpe_tx_trace,
+ .tx_function = l2_lisp_gpe_interface_tx,
+};
+/* *INDENT-ON* */
+
+static vnet_hw_interface_t *
+lisp_gpe_create_iface (lisp_gpe_main_t * lgm, u32 vni, u32 dp_table,
+ vnet_device_class_t * dev_class,
+ tunnel_lookup_t * tuns)
+{
+ u32 flen;
+ u32 hw_if_index = ~0;
+ u8 *new_name;
+ vnet_hw_interface_t *hi;
+ vnet_main_t *vnm = lgm->vnet_main;
+
+ /* create hw lisp_gpeX iface if needed, otherwise reuse existing */
+ flen = vec_len (lgm->free_tunnel_hw_if_indices);
+ if (flen > 0)
+ {
+ hw_if_index = lgm->free_tunnel_hw_if_indices[flen - 1];
+ _vec_len (lgm->free_tunnel_hw_if_indices) -= 1;
+
+ hi = vnet_get_hw_interface (vnm, hw_if_index);
+
+ /* rename interface */
+ new_name = format (0, "%U", dev_class->format_device_name, vni);
+
+ vec_add1 (new_name, 0);
+ vnet_rename_interface (vnm, hw_if_index, (char *) new_name);
+ vec_free (new_name);
+
+ /* clear old stats of freed interface before reuse */
+ vnet_interface_main_t *im = &vnm->interface_main;
+ vnet_interface_counter_lock (im);
+ vlib_zero_combined_counter (&im->combined_sw_if_counters
+ [VNET_INTERFACE_COUNTER_TX],
+ hi->sw_if_index);
+ vlib_zero_combined_counter (&im->combined_sw_if_counters
+ [VNET_INTERFACE_COUNTER_RX],
+ hi->sw_if_index);
+ vlib_zero_simple_counter (&im->sw_if_counters
+ [VNET_INTERFACE_COUNTER_DROP],
+ hi->sw_if_index);
+ vnet_interface_counter_unlock (im);
+ }
+ else
+ {
+ hw_if_index = vnet_register_interface (vnm, dev_class->index, vni,
+ lisp_gpe_hw_class.index, 0);
+ hi = vnet_get_hw_interface (vnm, hw_if_index);
+ }
+
+ hash_set (tuns->hw_if_index_by_dp_table, dp_table, hw_if_index);
+
+ /* set tunnel termination: post decap, packets are tagged as having been
+ * originated by lisp-gpe interface */
+ hash_set (tuns->sw_if_index_by_vni, vni, hi->sw_if_index);
+ hash_set (tuns->vni_by_sw_if_index, hi->sw_if_index, vni);
+
+ return hi;
+}
+
+static void
+lisp_gpe_remove_iface (lisp_gpe_main_t * lgm, u32 hi_index, u32 dp_table,
+ tunnel_lookup_t * tuns)
+{
+ vnet_main_t *vnm = lgm->vnet_main;
+ vnet_hw_interface_t *hi;
+ uword *vnip;
+
+ hi = vnet_get_hw_interface (vnm, hi_index);
+
+ /* disable interface */
+ vnet_sw_interface_set_flags (vnm, hi->sw_if_index, 0 /* down */ );
+ vnet_hw_interface_set_flags (vnm, hi->hw_if_index, 0 /* down */ );
+ hash_unset (tuns->hw_if_index_by_dp_table, dp_table);
+ vec_add1 (lgm->free_tunnel_hw_if_indices, hi->hw_if_index);
+
+ /* clean tunnel termination and vni to sw_if_index binding */
+ vnip = hash_get (tuns->vni_by_sw_if_index, hi->sw_if_index);
+ if (0 == vnip)
+ {
+ clib_warning ("No vni associated to interface %d", hi->sw_if_index);
+ return;
+ }
+ hash_unset (tuns->sw_if_index_by_vni, vnip[0]);
+ hash_unset (tuns->vni_by_sw_if_index, hi->sw_if_index);
+}
+
+static void
+lisp_gpe_iface_set_table (u32 sw_if_index, u32 table_id)
+{
+ fib_node_index_t fib_index;
+
+ fib_index = fib_table_find_or_create_and_lock (FIB_PROTOCOL_IP4, table_id);
+ vec_validate (ip4_main.fib_index_by_sw_if_index, sw_if_index);
+ ip4_main.fib_index_by_sw_if_index[sw_if_index] = fib_index;
+ ip4_sw_interface_enable_disable (sw_if_index, 1);
+
+ fib_index = fib_table_find_or_create_and_lock (FIB_PROTOCOL_IP6, table_id);
+ vec_validate (ip6_main.fib_index_by_sw_if_index, sw_if_index);
+ ip6_main.fib_index_by_sw_if_index[sw_if_index] = fib_index;
+ ip6_sw_interface_enable_disable (sw_if_index, 1);
+}
+
+static void
+lisp_gpe_tenant_del_default_routes (u32 table_id)
+{
+ fib_protocol_t proto;
+
+ FOR_EACH_FIB_IP_PROTOCOL (proto)
+ {
+ fib_prefix_t prefix = {
+ .fp_proto = proto,
+ };
+ u32 fib_index;
+
+ fib_index = fib_table_find (prefix.fp_proto, table_id);
+ fib_table_entry_special_remove (fib_index, &prefix, FIB_SOURCE_LISP);
+ fib_table_unlock (fib_index, prefix.fp_proto);
+ }
+}
+
+static void
+lisp_gpe_tenant_add_default_routes (u32 table_id)
+{
+ fib_protocol_t proto;
+
+ FOR_EACH_FIB_IP_PROTOCOL (proto)
+ {
+ fib_prefix_t prefix = {
+ .fp_proto = proto,
+ };
+ u32 fib_index;
+
+ /*
+ * Add a deafult route that results in a control plane punt DPO
+ */
+ fib_index = fib_table_find_or_create_and_lock (prefix.fp_proto, table_id);
+ fib_table_entry_special_dpo_add (fib_index, &prefix, FIB_SOURCE_LISP,
+ FIB_ENTRY_FLAG_EXCLUSIVE,
+ lisp_cp_dpo_get (fib_proto_to_dpo
+ (proto)));
+ }
+}
+
+
+/**
+ * @brief Add/del LISP-GPE L3 interface.
+ *
+ * Creates LISP-GPE interface, sets ingress arcs from lisp_gpeX_lookup,
+ * installs default routes that attract all traffic with no more specific
+ * routes to lgpe-ipx-lookup, set egress arcs to ipx-lookup, sets
+ * the interface in the right vrf and enables it.
+ *
+ * @param[in] lgm Reference to @ref lisp_gpe_main_t.
+ * @param[in] a Parameters to create interface.
+ *
+ * @return number of vectors in frame.
+ */
+u32
+lisp_gpe_add_l3_iface (lisp_gpe_main_t * lgm, u32 vni, u32 table_id)
+{
+ vnet_main_t *vnm = lgm->vnet_main;
+ tunnel_lookup_t *l3_ifaces = &lgm->l3_ifaces;
+ vnet_hw_interface_t *hi;
+ uword *hip, *si;
+
+ hip = hash_get (l3_ifaces->hw_if_index_by_dp_table, table_id);
+
+ if (hip)
+ {
+ clib_warning ("vrf %d already mapped to a vni", table_id);
+ return ~0;
+ }
+
+ si = hash_get (l3_ifaces->sw_if_index_by_vni, vni);
+
+ if (si)
+ {
+ clib_warning ("Interface for vni %d already exists", vni);
+ }
+
+ /* create lisp iface and populate tunnel tables */
+ hi = lisp_gpe_create_iface (lgm, vni, table_id,
+ &lisp_gpe_device_class, l3_ifaces);
+
+ /* insert default routes that point to lisp-cp lookup */
+ lisp_gpe_iface_set_table (hi->sw_if_index, table_id);
+ lisp_gpe_tenant_add_default_routes (table_id);
+
+ /* enable interface */
+ vnet_sw_interface_set_flags (vnm, hi->sw_if_index,
+ VNET_SW_INTERFACE_FLAG_ADMIN_UP);
+ vnet_hw_interface_set_flags (vnm, hi->hw_if_index,
+ VNET_HW_INTERFACE_FLAG_LINK_UP);
+
+ return (hi->sw_if_index);
+}
+
+void
+lisp_gpe_del_l3_iface (lisp_gpe_main_t * lgm, u32 vni, u32 table_id)
+{
+ vnet_main_t *vnm = lgm->vnet_main;
+ tunnel_lookup_t *l3_ifaces = &lgm->l3_ifaces;
+ vnet_hw_interface_t *hi;
+ uword *hip;
+
+ hip = hash_get (l3_ifaces->hw_if_index_by_dp_table, table_id);
+
+ if (hip == 0)
+ {
+ clib_warning ("The interface for vrf %d doesn't exist", table_id);
+ return;
+ }
+
+ hi = vnet_get_hw_interface (vnm, hip[0]);
+
+ lisp_gpe_remove_iface (lgm, hip[0], table_id, &lgm->l3_ifaces);
+
+ /* unset default routes */
+ ip4_sw_interface_enable_disable (hi->sw_if_index, 0);
+ ip6_sw_interface_enable_disable (hi->sw_if_index, 0);
+ lisp_gpe_tenant_del_default_routes (table_id);
+}
+
+/**
+ * @brief Add/del LISP-GPE L2 interface.
+ *
+ * Creates LISP-GPE interface, sets it in L2 mode in the appropriate
+ * bridge domain, sets egress arcs and enables it.
+ *
+ * @param[in] lgm Reference to @ref lisp_gpe_main_t.
+ * @param[in] a Parameters to create interface.
+ *
+ * @return number of vectors in frame.
+ */
+u32
+lisp_gpe_add_l2_iface (lisp_gpe_main_t * lgm, u32 vni, u32 bd_id)
+{
+ vnet_main_t *vnm = lgm->vnet_main;
+ tunnel_lookup_t *l2_ifaces = &lgm->l2_ifaces;
+ vnet_hw_interface_t *hi;
+ uword *hip, *si;
+ u16 bd_index;
+
+ bd_index = bd_find_or_add_bd_index (&bd_main, bd_id);
+ hip = hash_get (l2_ifaces->hw_if_index_by_dp_table, bd_index);
+
+ if (hip)
+ {
+ clib_warning ("bridge domain %d already mapped to a vni", bd_id);
+ return ~0;
+ }
+
+ si = hash_get (l2_ifaces->sw_if_index_by_vni, vni);
+ if (si)
+ {
+ clib_warning ("Interface for vni %d already exists", vni);
+ return ~0;
+ }
+
+ /* create lisp iface and populate tunnel tables */
+ hi = lisp_gpe_create_iface (lgm, vni, bd_index,
+ &l2_lisp_gpe_device_class, &lgm->l2_ifaces);
+
+ /* enable interface */
+ vnet_sw_interface_set_flags (vnm, hi->sw_if_index,
+ VNET_SW_INTERFACE_FLAG_ADMIN_UP);
+ vnet_hw_interface_set_flags (vnm, hi->hw_if_index,
+ VNET_HW_INTERFACE_FLAG_LINK_UP);
+
+ l2_arc_to_lb = vlib_node_add_named_next (vlib_get_main (),
+ hi->tx_node_index,
+ "l2-load-balance");
+
+ /* we're ready. add iface to l2 bridge domain */
+ set_int_l2_mode (lgm->vlib_main, vnm, MODE_L2_BRIDGE, hi->sw_if_index,
+ bd_index, 0, 0, 0);
+
+ return (hi->sw_if_index);
+}
+
+/**
+ * @brief Add/del LISP-GPE L2 interface.
+ *
+ * Creates LISP-GPE interface, sets it in L2 mode in the appropriate
+ * bridge domain, sets egress arcs and enables it.
+ *
+ * @param[in] lgm Reference to @ref lisp_gpe_main_t.
+ * @param[in] a Parameters to create interface.
+ *
+ * @return number of vectors in frame.
+ */
+void
+lisp_gpe_del_l2_iface (lisp_gpe_main_t * lgm, u32 vni, u32 bd_id)
+{
+ tunnel_lookup_t *l2_ifaces = &lgm->l2_ifaces;
+ u16 bd_index;
+ uword *hip;
+
+ bd_index = bd_find_or_add_bd_index (&bd_main, bd_id);
+ hip = hash_get (l2_ifaces->hw_if_index_by_dp_table, bd_index);
+
+ if (hip == 0)
+ {
+ clib_warning ("The interface for bridge domain %d doesn't exist",
+ bd_id);
+ return;
+ }
+ lisp_gpe_remove_iface (lgm, hip[0], bd_index, &lgm->l2_ifaces);
+}
+
+static clib_error_t *
+lisp_gpe_add_del_iface_command_fn (vlib_main_t * vm, unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ unformat_input_t _line_input, *line_input = &_line_input;
+ u8 is_add = 1;
+ u32 table_id, vni, bd_id;
+ u8 vni_is_set = 0, vrf_is_set = 0, bd_index_is_set = 0;
+
+ if (vnet_lisp_gpe_enable_disable_status () == 0)
+ {
+ return clib_error_return (0, "LISP is disabled");
+ }
+
+ /* Get a line of input. */
+ if (!unformat_user (input, unformat_line_input, line_input))
+ return 0;
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "add"))
+ is_add = 1;
+ else if (unformat (line_input, "del"))
+ is_add = 0;
+ else if (unformat (line_input, "vrf %d", &table_id))
+ {
+ vrf_is_set = 1;
+ }
+ else if (unformat (line_input, "vni %d", &vni))
+ {
+ vni_is_set = 1;
+ }
+ else if (unformat (line_input, "bd %d", &bd_id))
+ {
+ bd_index_is_set = 1;
+ }
+ else
+ {
+ return clib_error_return (0, "parse error: '%U'",
+ format_unformat_error, line_input);
+ }
+ }
+
+ if (vrf_is_set && bd_index_is_set)
+ return clib_error_return (0,
+ "Cannot set both vrf and brdige domain index!");
+
+ if (!vni_is_set)
+ return clib_error_return (0, "vni must be set!");
+
+ if (!vrf_is_set && !bd_index_is_set)
+ return clib_error_return (0, "vrf or bridge domain index must be set!");
+
+ if (bd_index_is_set)
+ {
+ if (is_add)
+ {
+ if (~0 == lisp_gpe_tenant_l2_iface_add_or_lock (vni, bd_id))
+ return clib_error_return (0, "L2 interface not created");
+ }
+ else
+ lisp_gpe_tenant_l2_iface_unlock (vni);
+ }
+ else
+ {
+ if (is_add)
+ {
+ if (~0 == lisp_gpe_tenant_l3_iface_add_or_lock (vni, table_id))
+ return clib_error_return (0, "L3 interface not created");
+ }
+ else
+ lisp_gpe_tenant_l3_iface_unlock (vni);
+ }
+
+ return (NULL);
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (add_del_lisp_gpe_iface_command, static) = {
+ .path = "lisp gpe iface",
+ .short_help = "lisp gpe iface add/del vni <vni> vrf <vrf>",
+ .function = lisp_gpe_add_del_iface_command_fn,
+};
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/lisp-gpe/lisp_gpe.api b/src/vnet/lisp-gpe/lisp_gpe.api
new file mode 100644
index 00000000000..3956b97d6a1
--- /dev/null
+++ b/src/vnet/lisp-gpe/lisp_gpe.api
@@ -0,0 +1,143 @@
+/*
+ * Copyright (c) 2015-2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/** \brief add or delete lisp gpe tunnel
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param is_add - add address if non-zero, else delete
+ @param eid_type -
+ 0 : ipv4
+ 1 : ipv6
+ 2 : mac
+ @param rmt_eid - remote eid
+ @param lcl_eid - local eid
+ @param rmt_len - remote prefix len
+ @param lcl_len - local prefix len
+ @param vni - virtual network identifier
+ @param dp_table - vrf/bridge domain id
+ @param loc_num - number of locators
+ @param lcl_locs - array of local locators
+ @param rmt_locs - array of remote locators
+ @param action - negative action when 0 locators configured
+*/
+define lisp_gpe_add_del_fwd_entry
+{
+ u32 client_index;
+ u32 context;
+ u8 is_add;
+ u8 eid_type;
+ u8 rmt_eid[16];
+ u8 lcl_eid[16];
+ u8 rmt_len;
+ u8 lcl_len;
+ u32 vni;
+ u32 dp_table;
+ u32 loc_num;
+ u8 lcl_locs[loc_num];
+ u8 rmt_locs[loc_num];
+ u8 action;
+};
+
+/** \brief Reply for gpe_fwd_entry add/del
+ @param context - returned sender context, to match reply w/ request
+ @param retval - return code
+*/
+define lisp_gpe_add_del_fwd_entry_reply
+{
+ u32 context;
+ i32 retval;
+};
+
+/** \brief enable or disable lisp-gpe protocol
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param is_en - enable protocol if non-zero, else disable
+*/
+define lisp_gpe_enable_disable
+{
+ u32 client_index;
+ u32 context;
+ u8 is_en;
+};
+
+/** \brief Reply for gpe enable/disable
+ @param context - returned sender context, to match reply w/ request
+ @param retval - return code
+*/
+define lisp_gpe_enable_disable_reply
+{
+ u32 context;
+ i32 retval;
+};
+
+/** \brief add or delete gpe_iface
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param is_add - add address if non-zero, else delete
+*/
+define lisp_gpe_add_del_iface
+{
+ u32 client_index;
+ u32 context;
+ u8 is_add;
+ u8 is_l2;
+ u32 dp_table;
+ u32 vni;
+};
+
+/** \brief Reply for gpe_iface add/del
+ @param context - returned sender context, to match reply w/ request
+ @param retval - return code
+*/
+define lisp_gpe_add_del_iface_reply
+{
+ u32 context;
+ i32 retval;
+};
+
+define lisp_gpe_tunnel_details
+{
+ u32 context;
+ u32 tunnels;
+ u8 is_ipv6;
+ u8 source_ip[16];
+ u8 destination_ip[16];
+ u32 encap_fib_id;
+ u32 decap_fib_id;
+ u32 dcap_next;
+ u8 lisp_ver;
+ u8 next_protocol;
+ u8 flags;
+ u8 ver_res;
+ u8 res;
+ u32 iid;
+};
+
+/** \brief Request for gpe tunnel summary status
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ */
+define lisp_gpe_tunnel_dump
+{
+ u32 client_index;
+ u32 context;
+};
+
+/*
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
+ \ No newline at end of file
diff --git a/src/vnet/lisp-gpe/lisp_gpe.c b/src/vnet/lisp-gpe/lisp_gpe.c
new file mode 100644
index 00000000000..fbda8687c3b
--- /dev/null
+++ b/src/vnet/lisp-gpe/lisp_gpe.c
@@ -0,0 +1,327 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * @file
+ * @brief Common utility functions for IPv4, IPv6 and L2 LISP-GPE tunnels.
+ *
+ */
+
+#include <vnet/lisp-gpe/lisp_gpe.h>
+#include <vnet/lisp-gpe/lisp_gpe_fwd_entry.h>
+#include <vnet/lisp-gpe/lisp_gpe_adjacency.h>
+#include <vnet/lisp-gpe/lisp_gpe_tenant.h>
+
+/** LISP-GPE global state */
+lisp_gpe_main_t lisp_gpe_main;
+
+
+/** CLI command to add/del forwarding entry. */
+static clib_error_t *
+lisp_gpe_add_del_fwd_entry_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ unformat_input_t _line_input, *line_input = &_line_input;
+ u8 is_add = 1;
+ ip_address_t lloc, rloc;
+ clib_error_t *error = 0;
+ gid_address_t _reid, *reid = &_reid, _leid, *leid = &_leid;
+ u8 reid_set = 0, leid_set = 0, is_negative = 0, vrf_set = 0, vni_set = 0;
+ u32 vni, vrf, action = ~0, p, w;
+ locator_pair_t pair, *pairs = 0;
+ int rv;
+
+ /* Get a line of input. */
+ if (!unformat_user (input, unformat_line_input, line_input))
+ return 0;
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "del"))
+ is_add = 0;
+ else if (unformat (line_input, "add"))
+ is_add = 1;
+ else if (unformat (line_input, "leid %U", unformat_gid_address, leid))
+ {
+ leid_set = 1;
+ }
+ else if (unformat (line_input, "reid %U", unformat_gid_address, reid))
+ {
+ reid_set = 1;
+ }
+ else if (unformat (line_input, "vni %u", &vni))
+ {
+ gid_address_vni (leid) = vni;
+ gid_address_vni (reid) = vni;
+ vni_set = 1;
+ }
+ else if (unformat (line_input, "vrf %u", &vrf))
+ {
+ vrf_set = 1;
+ }
+ else if (unformat (line_input, "bd %u", &vrf))
+ {
+ vrf_set = 1;
+ }
+ else if (unformat (line_input, "negative action %U",
+ unformat_negative_mapping_action, &action))
+ {
+ is_negative = 1;
+ }
+ else if (unformat (line_input, "loc-pair %U %U p %d w %d",
+ unformat_ip_address, &lloc,
+ unformat_ip_address, &rloc, &p, &w))
+ {
+ pair.lcl_loc = lloc;
+ pair.rmt_loc = rloc;
+ pair.priority = p;
+ pair.weight = w;
+ vec_add1 (pairs, pair);
+ }
+ else
+ {
+ error = unformat_parse_error (line_input);
+ goto done;
+ }
+ }
+ unformat_free (line_input);
+
+ if (!vni_set || !vrf_set)
+ {
+ error = clib_error_return (0, "vni and vrf must be set!");
+ goto done;
+ }
+
+ if (!reid_set)
+ {
+ error = clib_error_return (0, "remote eid must be set!");
+ goto done;
+ }
+
+ if (is_negative)
+ {
+ if (~0 == action)
+ {
+ error = clib_error_return (0, "no action set for negative tunnel!");
+ goto done;
+ }
+ }
+ else
+ {
+ if (vec_len (pairs) == 0)
+ {
+ error = clib_error_return (0, "expected ip4/ip6 locators.");
+ goto done;
+ }
+ }
+
+ if (!leid_set)
+ {
+ /* if leid not set, make sure it's the same AFI like reid */
+ gid_address_type (leid) = gid_address_type (reid);
+ if (GID_ADDR_IP_PREFIX == gid_address_type (reid))
+ gid_address_ip_version (leid) = gid_address_ip_version (reid);
+ }
+
+ /* add fwd entry */
+ vnet_lisp_gpe_add_del_fwd_entry_args_t _a, *a = &_a;
+ memset (a, 0, sizeof (a[0]));
+
+ a->is_add = is_add;
+ a->is_negative = is_negative;
+ a->vni = vni;
+ a->table_id = vrf;
+ gid_address_copy (&a->lcl_eid, leid);
+ gid_address_copy (&a->rmt_eid, reid);
+ a->locator_pairs = pairs;
+
+ rv = vnet_lisp_gpe_add_del_fwd_entry (a, 0);
+ if (0 != rv)
+ {
+ error = clib_error_return (0, "failed to %s gpe tunnel!",
+ is_add ? "add" : "delete");
+ }
+
+done:
+ vec_free (pairs);
+ return error;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (lisp_gpe_add_del_fwd_entry_command, static) = {
+ .path = "lisp gpe entry",
+ .short_help = "lisp gpe entry add/del vni <vni> vrf <vrf> [leid <leid>]"
+ "reid <reid> [loc-pair <lloc> <rloc> p <priority> w <weight>] "
+ "[negative action <action>]",
+ .function = lisp_gpe_add_del_fwd_entry_command_fn,
+};
+/* *INDENT-ON* */
+
+/** Check if LISP-GPE is enabled. */
+u8
+vnet_lisp_gpe_enable_disable_status (void)
+{
+ lisp_gpe_main_t *lgm = &lisp_gpe_main;
+
+ return lgm->is_en;
+}
+
+/** Enable/disable LISP-GPE. */
+clib_error_t *
+vnet_lisp_gpe_enable_disable (vnet_lisp_gpe_enable_disable_args_t * a)
+{
+ lisp_gpe_main_t *lgm = &lisp_gpe_main;
+
+ if (a->is_en)
+ {
+ lgm->is_en = 1;
+ }
+ else
+ {
+ /* remove all entries */
+ vnet_lisp_gpe_fwd_entry_flush ();
+
+ /* disable all l3 ifaces */
+ lisp_gpe_tenant_flush ();
+
+ lgm->is_en = 0;
+ }
+
+ return 0;
+}
+
+/** CLI command to enable/disable LISP-GPE. */
+static clib_error_t *
+lisp_gpe_enable_disable_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ unformat_input_t _line_input, *line_input = &_line_input;
+ u8 is_en = 1;
+ vnet_lisp_gpe_enable_disable_args_t _a, *a = &_a;
+
+ /* Get a line of input. */
+ if (!unformat_user (input, unformat_line_input, line_input))
+ return 0;
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "enable"))
+ is_en = 1;
+ else if (unformat (line_input, "disable"))
+ is_en = 0;
+ else
+ {
+ return clib_error_return (0, "parse error: '%U'",
+ format_unformat_error, line_input);
+ }
+ }
+ a->is_en = is_en;
+ return vnet_lisp_gpe_enable_disable (a);
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (enable_disable_lisp_gpe_command, static) = {
+ .path = "lisp gpe",
+ .short_help = "lisp gpe [enable|disable]",
+ .function = lisp_gpe_enable_disable_command_fn,
+};
+/* *INDENT-ON* */
+
+/** CLI command to show LISP-GPE interfaces. */
+static clib_error_t *
+lisp_show_iface_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ lisp_gpe_main_t *lgm = &lisp_gpe_main;
+ hash_pair_t *p;
+
+ vlib_cli_output (vm, "%=10s%=12s", "vrf", "hw_if_index");
+
+ /* *INDENT-OFF* */
+ hash_foreach_pair (p, lgm->l3_ifaces.hw_if_index_by_dp_table, ({
+ vlib_cli_output (vm, "%=10d%=10d", p->key, p->value[0]);
+ }));
+ /* *INDENT-ON* */
+
+ if (0 != lgm->l2_ifaces.hw_if_index_by_dp_table)
+ {
+ vlib_cli_output (vm, "%=10s%=12s", "bd_id", "hw_if_index");
+ /* *INDENT-OFF* */
+ hash_foreach_pair (p, lgm->l2_ifaces.hw_if_index_by_dp_table, ({
+ vlib_cli_output (vm, "%=10d%=10d", p->key, p->value[0]);
+ }));
+ /* *INDENT-ON* */
+ }
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (lisp_show_iface_command) = {
+ .path = "show lisp gpe interface",
+ .short_help = "show lisp gpe interface",
+ .function = lisp_show_iface_command_fn,
+};
+/* *INDENT-ON* */
+
+/** Format LISP-GPE status. */
+u8 *
+format_vnet_lisp_gpe_status (u8 * s, va_list * args)
+{
+ lisp_gpe_main_t *lgm = &lisp_gpe_main;
+ return format (s, "%s", lgm->is_en ? "enabled" : "disabled");
+}
+
+
+/** LISP-GPE init function. */
+clib_error_t *
+lisp_gpe_init (vlib_main_t * vm)
+{
+ lisp_gpe_main_t *lgm = &lisp_gpe_main;
+ clib_error_t *error = 0;
+
+ if ((error = vlib_call_init_function (vm, ip_main_init)))
+ return error;
+
+ if ((error = vlib_call_init_function (vm, ip4_lookup_init)))
+ return error;
+
+ lgm->vnet_main = vnet_get_main ();
+ lgm->vlib_main = vm;
+ lgm->im4 = &ip4_main;
+ lgm->im6 = &ip6_main;
+ lgm->lm4 = &ip4_main.lookup_main;
+ lgm->lm6 = &ip6_main.lookup_main;
+
+ lgm->lisp_gpe_fwd_entries =
+ hash_create_mem (0, sizeof (lisp_gpe_fwd_entry_key_t), sizeof (uword));
+
+ udp_register_dst_port (vm, UDP_DST_PORT_lisp_gpe,
+ lisp_gpe_ip4_input_node.index, 1 /* is_ip4 */ );
+ udp_register_dst_port (vm, UDP_DST_PORT_lisp_gpe6,
+ lisp_gpe_ip6_input_node.index, 0 /* is_ip4 */ );
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (lisp_gpe_init);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/lisp-gpe/lisp_gpe.h b/src/vnet/lisp-gpe/lisp_gpe.h
new file mode 100644
index 00000000000..bb0f788b197
--- /dev/null
+++ b/src/vnet/lisp-gpe/lisp_gpe.h
@@ -0,0 +1,257 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * @file
+ * @brief LISP-GPE definitions.
+ */
+
+#ifndef included_vnet_lisp_gpe_h
+#define included_vnet_lisp_gpe_h
+
+#include <vppinfra/error.h>
+#include <vppinfra/mhash.h>
+#include <vnet/vnet.h>
+#include <vnet/ip/ip.h>
+#include <vnet/l2/l2_input.h>
+#include <vnet/ethernet/ethernet.h>
+#include <vnet/ip/ip4_packet.h>
+#include <vnet/ip/udp.h>
+#include <vnet/lisp-cp/lisp_types.h>
+#include <vnet/lisp-gpe/lisp_gpe_packet.h>
+#include <vnet/adj/adj_types.h>
+
+/** IP4-UDP-LISP encap header */
+/* *INDENT-OFF* */
+typedef CLIB_PACKED (struct {
+ ip4_header_t ip4; /* 20 bytes */
+ udp_header_t udp; /* 8 bytes */
+ lisp_gpe_header_t lisp; /* 8 bytes */
+}) ip4_udp_lisp_gpe_header_t;
+/* *INDENT-ON* */
+
+/** IP6-UDP-LISP encap header */
+/* *INDENT-OFF* */
+typedef CLIB_PACKED (struct {
+ ip6_header_t ip6; /* 40 bytes */
+ udp_header_t udp; /* 8 bytes */
+ lisp_gpe_header_t lisp; /* 8 bytes */
+}) ip6_udp_lisp_gpe_header_t;
+/* *INDENT-ON* */
+
+#define foreach_lisp_gpe_ip_input_next \
+_(DROP, "error-drop") \
+_(IP4_INPUT, "ip4-input") \
+_(IP6_INPUT, "ip6-input") \
+_(L2_INPUT, "l2-input")
+
+/** Enum of possible next nodes post LISP-GPE decap */
+typedef enum
+{
+#define _(s,n) LISP_GPE_INPUT_NEXT_##s,
+ foreach_lisp_gpe_ip_input_next
+#undef _
+ LISP_GPE_INPUT_N_NEXT,
+} lisp_gpe_input_next_t;
+
+typedef enum
+{
+#define lisp_gpe_error(n,s) LISP_GPE_ERROR_##n,
+#include <vnet/lisp-gpe/lisp_gpe_error.def>
+#undef lisp_gpe_error
+ LISP_GPE_N_ERROR,
+} lisp_gpe_error_t;
+
+typedef struct tunnel_lookup
+{
+ /** Lookup lisp-gpe interfaces by dp table (eg. vrf/bridge index) */
+ uword *hw_if_index_by_dp_table;
+
+ /** lookup decap tunnel termination sw_if_index by vni and vice versa */
+ uword *sw_if_index_by_vni;
+
+ // FIXME - Need this?
+ uword *vni_by_sw_if_index;
+} tunnel_lookup_t;
+
+/** LISP-GPE global state*/
+typedef struct lisp_gpe_main
+{
+ /**
+ * @brief DB of all forwarding entries. The Key is:{l-EID,r-EID,vni}
+ * where the EID encodes L2 or L3
+ */
+ uword *lisp_gpe_fwd_entries;
+
+ /**
+ * @brief A Pool of all LISP forwarding entries
+ */
+ struct lisp_gpe_fwd_entry_t_ *lisp_fwd_entry_pool;
+
+ /** Free vlib hw_if_indices */
+ u32 *free_tunnel_hw_if_indices;
+
+ u8 is_en;
+
+ /* L3 data structures
+ * ================== */
+ tunnel_lookup_t l3_ifaces;
+
+ /* L2 data structures
+ * ================== */
+
+ /** L2 LISP FIB */
+ BVT (clib_bihash) l2_fib;
+
+ tunnel_lookup_t l2_ifaces;
+
+ /** Load-balance for a miss in the table */
+ dpo_id_t l2_lb_cp_lkup;
+
+ /** convenience */
+ vlib_main_t *vlib_main;
+ vnet_main_t *vnet_main;
+ ip4_main_t *im4;
+ ip6_main_t *im6;
+ ip_lookup_main_t *lm4;
+ ip_lookup_main_t *lm6;
+} lisp_gpe_main_t;
+
+/** LISP-GPE global state*/
+lisp_gpe_main_t lisp_gpe_main;
+
+always_inline lisp_gpe_main_t *
+vnet_lisp_gpe_get_main ()
+{
+ return &lisp_gpe_main;
+}
+
+
+extern vlib_node_registration_t lisp_gpe_ip4_input_node;
+extern vlib_node_registration_t lisp_gpe_ip6_input_node;
+extern vnet_hw_interface_class_t lisp_gpe_hw_class;
+
+u8 *format_lisp_gpe_header_with_length (u8 * s, va_list * args);
+
+/** Read LISP-GPE status */
+u8 vnet_lisp_gpe_enable_disable_status (void);
+
+u32
+lisp_gpe_l3_iface_find_or_create (lisp_gpe_main_t * lgm,
+ u32 overlay_table_id, u32 vni);
+
+/** Add/del LISP-GPE interface. */
+extern void lisp_gpe_del_l2_iface (lisp_gpe_main_t * lgm, u32 vni, u32 bd_id);
+extern u32 lisp_gpe_add_l2_iface (lisp_gpe_main_t * lgm, u32 vni, u32 bd_id);
+extern void lisp_gpe_del_l3_iface (lisp_gpe_main_t * lgm, u32 vni, u32 bd_id);
+extern u32 lisp_gpe_add_l3_iface (lisp_gpe_main_t * lgm, u32 vni, u32 bd_id);
+
+
+typedef struct
+{
+ u8 is_en;
+} vnet_lisp_gpe_enable_disable_args_t;
+
+clib_error_t
+ * vnet_lisp_gpe_enable_disable (vnet_lisp_gpe_enable_disable_args_t * a);
+
+typedef enum
+{
+ NO_ACTION,
+ FORWARD_NATIVE,
+ SEND_MAP_REQUEST,
+ DROP
+} negative_fwd_actions_e;
+
+/** */
+typedef struct
+{
+ u8 is_add;
+
+ /** type of mapping */
+ u8 is_negative;
+
+ /** action for negative mappings */
+ negative_fwd_actions_e action;
+
+ /** local eid */
+ gid_address_t lcl_eid;
+
+ /** remote eid */
+ gid_address_t rmt_eid;
+
+ /** vector of locator pairs */
+ locator_pair_t *locator_pairs;
+
+ /** FIB index to lookup remote locator at encap */
+ u32 encap_fib_index;
+
+ /** FIB index to lookup inner IP at decap */
+ u32 decap_fib_index;
+
+ /* TODO remove */
+ u32 decap_next_index;
+
+ /** VNI/tenant id in HOST byte order */
+ u32 vni;
+
+ /** vrf or bd where fwd entry should be inserted */
+ union
+ {
+ /** table (vrf) id */
+ u32 table_id;
+
+ /** bridge domain id */
+ u16 bd_id;
+
+ /** generic access */
+ u32 dp_table;
+ };
+} vnet_lisp_gpe_add_del_fwd_entry_args_t;
+
+#define foreach_lgpe_ip4_lookup_next \
+ _(DROP, "error-drop") \
+ _(LISP_CP_LOOKUP, "lisp-cp-lookup")
+
+typedef enum lgpe_ip4_lookup_next
+{
+#define _(sym,str) LGPE_IP4_LOOKUP_NEXT_##sym,
+ foreach_lgpe_ip4_lookup_next
+#undef _
+ LGPE_IP4_LOOKUP_N_NEXT,
+} lgpe_ip4_lookup_next_t;
+
+#define foreach_lgpe_ip6_lookup_next \
+ _(DROP, "error-drop") \
+ _(LISP_CP_LOOKUP, "lisp-cp-lookup")
+
+typedef enum lgpe_ip6_lookup_next
+{
+#define _(sym,str) LGPE_IP6_LOOKUP_NEXT_##sym,
+ foreach_lgpe_ip6_lookup_next
+#undef _
+ LGPE_IP6_LOOKUP_N_NEXT,
+} lgpe_ip6_lookup_next_t;
+
+u8 *format_vnet_lisp_gpe_status (u8 * s, va_list * args);
+
+#endif /* included_vnet_lisp_gpe_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/lisp-gpe/lisp_gpe_adjacency.c b/src/vnet/lisp-gpe/lisp_gpe_adjacency.c
new file mode 100644
index 00000000000..8c96a25cc5d
--- /dev/null
+++ b/src/vnet/lisp-gpe/lisp_gpe_adjacency.c
@@ -0,0 +1,542 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * @file
+ * @brief Common utility functions for IPv4, IPv6 and L2 LISP-GPE adjacencys.
+ *
+ */
+
+#include <vnet/dpo/load_balance.h>
+#include <vnet/lisp-cp/lisp_types.h>
+#include <vnet/lisp-gpe/lisp_gpe_sub_interface.h>
+#include <vnet/lisp-gpe/lisp_gpe_adjacency.h>
+#include <vnet/lisp-gpe/lisp_gpe_tunnel.h>
+#include <vnet/fib/fib_entry.h>
+#include <vnet/adj/adj_midchain.h>
+
+/**
+ * Memory pool of all adjacencies
+ */
+static lisp_gpe_adjacency_t *lisp_adj_pool;
+
+/**
+ * Hash table of all adjacencies. key:{nh, itf}
+ * We never have an all zeros address since the interfaces are multi-access,
+ * therefore there is no ambiguity between a v4 and v6 next-hop, so we don't
+ * need to add the protocol to the key.
+ */
+static
+BVT (clib_bihash)
+ lisp_adj_db;
+
+#define LISP_ADJ_SET_KEY(_key, _itf, _nh) \
+{ \
+ _key.key[0] = (_nh)->ip.v6.as_u64[0]; \
+ _key.key[1] = (_nh)->ip.v6.as_u64[1]; \
+ _key.key[2] = (_itf); \
+}
+
+ static index_t lisp_adj_find (const ip_address_t * addr, u32 sw_if_index)
+{
+ BVT (clib_bihash_kv) kv;
+
+ LISP_ADJ_SET_KEY (kv, sw_if_index, addr);
+
+ if (BV (clib_bihash_search) (&lisp_adj_db, &kv, &kv) < 0)
+ {
+ return (INDEX_INVALID);
+ }
+ else
+ {
+ return (kv.value);
+ }
+}
+
+static void
+lisp_adj_insert (const ip_address_t * addr, u32 sw_if_index, index_t ai)
+{
+ BVT (clib_bihash_kv) kv;
+
+ LISP_ADJ_SET_KEY (kv, sw_if_index, addr);
+ kv.value = ai;
+
+ BV (clib_bihash_add_del) (&lisp_adj_db, &kv, 1);
+}
+
+static void
+lisp_adj_remove (const ip_address_t * addr, u32 sw_if_index)
+{
+ BVT (clib_bihash_kv) kv;
+
+ LISP_ADJ_SET_KEY (kv, sw_if_index, addr);
+
+ BV (clib_bihash_add_del) (&lisp_adj_db, &kv, 0);
+}
+
+static lisp_gpe_adjacency_t *
+lisp_gpe_adjacency_get_i (index_t lai)
+{
+ return (pool_elt_at_index (lisp_adj_pool, lai));
+}
+
+fib_forward_chain_type_t
+lisp_gpe_adj_get_fib_chain_type (const lisp_gpe_adjacency_t * ladj)
+{
+ switch (ip_addr_version (&ladj->remote_rloc))
+ {
+ case IP4:
+ return (FIB_FORW_CHAIN_TYPE_UNICAST_IP4);
+ case IP6:
+ return (FIB_FORW_CHAIN_TYPE_UNICAST_IP6);
+ default:
+ ASSERT (0);
+ break;
+ }
+ return (FIB_FORW_CHAIN_TYPE_UNICAST_IP4);
+}
+
+static void
+ip46_address_to_ip_address (const ip46_address_t * a, ip_address_t * b)
+{
+ if (ip46_address_is_ip4 (a))
+ {
+ memset (b, 0, sizeof (*b));
+ ip_address_set (b, &a->ip4, IP4);
+ }
+ else
+ {
+ ip_address_set (b, &a->ip6, IP6);
+ }
+}
+
+/**
+ * @brief Stack the tunnel's midchain on the IP forwarding chain of the via
+ */
+static void
+lisp_gpe_adj_stack_one (lisp_gpe_adjacency_t * ladj, adj_index_t ai)
+{
+ const lisp_gpe_tunnel_t *lgt;
+ dpo_id_t tmp = DPO_INVALID;
+
+ lgt = lisp_gpe_tunnel_get (ladj->tunnel_index);
+ fib_entry_contribute_forwarding (lgt->fib_entry_index,
+ lisp_gpe_adj_get_fib_chain_type (ladj),
+ &tmp);
+
+ if (DPO_LOAD_BALANCE == tmp.dpoi_type)
+ {
+ /*
+ * post LISP rewrite we will load-balance. However, the LISP encap
+ * is always the same for this adjacency/tunnel and hence the IP/UDP src,dst
+ * hash is always the same result too. So we do that hash now and
+ * stack on the choice.
+ * If the choice is an incomplete adj then we will need a poke when
+ * it becomes complete. This happens since the adj update walk propagates
+ * as far a recursive paths.
+ */
+ const dpo_id_t *choice;
+ load_balance_t *lb;
+ int hash;
+
+ lb = load_balance_get (tmp.dpoi_index);
+
+ if (IP4 == ip_addr_version (&ladj->remote_rloc))
+ {
+ hash = ip4_compute_flow_hash ((ip4_header_t *) adj_get_rewrite (ai),
+ lb->lb_hash_config);
+ }
+ else
+ {
+ hash = ip6_compute_flow_hash ((ip6_header_t *) adj_get_rewrite (ai),
+ lb->lb_hash_config);
+ }
+
+ choice =
+ load_balance_get_bucket_i (lb, hash & lb->lb_n_buckets_minus_1);
+ dpo_copy (&tmp, choice);
+ }
+
+ adj_nbr_midchain_stack (ai, &tmp);
+ dpo_reset (&tmp);
+}
+
+/**
+ * @brief Call back when restacking all adjacencies on a GRE interface
+ */
+static adj_walk_rc_t
+lisp_gpe_adj_walk_cb (adj_index_t ai, void *ctx)
+{
+ lisp_gpe_adjacency_t *ladj = ctx;
+
+ lisp_gpe_adj_stack_one (ladj, ai);
+
+ return (ADJ_WALK_RC_CONTINUE);
+}
+
+static void
+lisp_gpe_adj_stack (lisp_gpe_adjacency_t * ladj)
+{
+ fib_protocol_t nh_proto;
+ ip46_address_t nh;
+
+ ip_address_to_46 (&ladj->remote_rloc, &nh, &nh_proto);
+
+ /*
+ * walk all the adjacencies on th lisp interface and restack them
+ */
+ adj_nbr_walk_nh (ladj->sw_if_index,
+ nh_proto, &nh, lisp_gpe_adj_walk_cb, ladj);
+}
+
+static lisp_gpe_next_protocol_e
+lisp_gpe_adj_proto_from_vnet_link_type (vnet_link_t linkt)
+{
+ switch (linkt)
+ {
+ case VNET_LINK_IP4:
+ return (LISP_GPE_NEXT_PROTO_IP4);
+ case VNET_LINK_IP6:
+ return (LISP_GPE_NEXT_PROTO_IP6);
+ case VNET_LINK_ETHERNET:
+ return (LISP_GPE_NEXT_PROTO_ETHERNET);
+ default:
+ ASSERT (0);
+ }
+ return (LISP_GPE_NEXT_PROTO_IP4);
+}
+
+#define is_v4_packet(_h) ((*(u8*) _h) & 0xF0) == 0x40
+
+static void
+lisp_gpe_fixup (vlib_main_t * vm, ip_adjacency_t * adj, vlib_buffer_t * b)
+{
+ /* Fixup the checksum and len fields in the LISP tunnel encap
+ * that was applied at the midchain node */
+ ip_udp_fixup_one (vm, b, is_v4_packet (vlib_buffer_get_current (b)));
+}
+
+/**
+ * @brief The LISP-GPE interface registered function to update, i.e.
+ * provide an rewrite string for, an adjacency.
+ */
+void
+lisp_gpe_update_adjacency (vnet_main_t * vnm, u32 sw_if_index, adj_index_t ai)
+{
+ const lisp_gpe_tunnel_t *lgt;
+ lisp_gpe_adjacency_t *ladj;
+ ip_adjacency_t *adj;
+ ip_address_t rloc;
+ vnet_link_t linkt;
+ index_t lai;
+
+ adj = adj_get (ai);
+ ip46_address_to_ip_address (&adj->sub_type.nbr.next_hop, &rloc);
+
+ /*
+ * find an existing or create a new adj
+ */
+ lai = lisp_adj_find (&rloc, sw_if_index);
+
+ ASSERT (INDEX_INVALID != lai);
+
+ ladj = pool_elt_at_index (lisp_adj_pool, lai);
+ lgt = lisp_gpe_tunnel_get (ladj->tunnel_index);
+ linkt = adj_get_link_type (ai);
+
+ adj_nbr_midchain_update_rewrite
+ (ai, lisp_gpe_fixup,
+ (VNET_LINK_ETHERNET == linkt ?
+ ADJ_MIDCHAIN_FLAG_NO_COUNT :
+ ADJ_MIDCHAIN_FLAG_NONE),
+ lisp_gpe_tunnel_build_rewrite
+ (lgt, ladj, lisp_gpe_adj_proto_from_vnet_link_type (linkt)));
+
+ lisp_gpe_adj_stack_one (ladj, ai);
+}
+
+u8 *
+lisp_gpe_build_rewrite (vnet_main_t * vnm,
+ u32 sw_if_index,
+ vnet_link_t link_type, const void *dst_address)
+{
+ ASSERT (0);
+ return (NULL);
+}
+
+index_t
+lisp_gpe_adjacency_find_or_create_and_lock (const locator_pair_t * pair,
+ u32 overlay_table_id, u32 vni)
+{
+ const lisp_gpe_sub_interface_t *l3s;
+ const lisp_gpe_tunnel_t *lgt;
+ lisp_gpe_adjacency_t *ladj;
+ index_t lai, l3si;
+
+ /*
+ * first find the L3 sub-interface that corresponds to the loacl-rloc and vni
+ */
+ l3si = lisp_gpe_sub_interface_find_or_create_and_lock (&pair->lcl_loc,
+ overlay_table_id,
+ vni);
+ l3s = lisp_gpe_sub_interface_get (l3si);
+
+ /*
+ * find an existing or create a new adj
+ */
+ lai = lisp_adj_find (&pair->rmt_loc, l3s->sw_if_index);
+
+ if (INDEX_INVALID == lai)
+ {
+
+ pool_get (lisp_adj_pool, ladj);
+ memset (ladj, 0, sizeof (*ladj));
+ lai = (ladj - lisp_adj_pool);
+
+ ip_address_copy (&ladj->remote_rloc, &pair->rmt_loc);
+ ladj->vni = vni;
+ /* transfer the lock to the adj */
+ ladj->lisp_l3_sub_index = l3si;
+ ladj->sw_if_index = l3s->sw_if_index;
+
+ /* if vni is non-default */
+ if (ladj->vni)
+ ladj->flags = LISP_GPE_FLAGS_I;
+
+ /* work in lisp-gpe not legacy mode */
+ ladj->flags |= LISP_GPE_FLAGS_P;
+
+ /*
+ * find the tunnel that will provide the underlying transport
+ * and hence the rewrite.
+ * The RLOC FIB index is default table - always.
+ */
+ ladj->tunnel_index = lisp_gpe_tunnel_find_or_create_and_lock (pair, 0);
+
+ lgt = lisp_gpe_tunnel_get (ladj->tunnel_index);
+
+ /*
+ * become of child of the RLOC FIB entry so we are updated when
+ * its reachability changes, allowing us to re-stack the midcahins
+ */
+ ladj->fib_entry_child_index = fib_entry_child_add (lgt->fib_entry_index,
+ FIB_NODE_TYPE_LISP_ADJ,
+ lai);
+
+ lisp_adj_insert (&ladj->remote_rloc, ladj->sw_if_index, lai);
+ }
+ else
+ {
+ /* unlock the interface from the find. */
+ lisp_gpe_sub_interface_unlock (l3si);
+ ladj = lisp_gpe_adjacency_get_i (lai);
+ }
+
+ ladj->locks++;
+
+ return (lai);
+}
+
+/**
+ * @brief Get a pointer to a tunnel from a pointer to a FIB node
+ */
+static lisp_gpe_adjacency_t *
+lisp_gpe_adjacency_from_fib_node (const fib_node_t * node)
+{
+ return ((lisp_gpe_adjacency_t *)
+ ((char *) node -
+ STRUCT_OFFSET_OF (lisp_gpe_adjacency_t, fib_node)));
+}
+
+static void
+lisp_gpe_adjacency_last_lock_gone (lisp_gpe_adjacency_t * ladj)
+{
+ const lisp_gpe_tunnel_t *lgt;
+
+ /*
+ * no children so we are not counting locks. no-op.
+ * at least not counting
+ */
+ lisp_adj_remove (&ladj->remote_rloc, ladj->sw_if_index);
+
+ /*
+ * unlock the resources this adj holds
+ */
+ lgt = lisp_gpe_tunnel_get (ladj->tunnel_index);
+
+ fib_entry_child_remove (lgt->fib_entry_index, ladj->fib_entry_child_index);
+
+ lisp_gpe_tunnel_unlock (ladj->tunnel_index);
+ lisp_gpe_sub_interface_unlock (ladj->lisp_l3_sub_index);
+
+ pool_put (lisp_adj_pool, ladj);
+}
+
+void
+lisp_gpe_adjacency_unlock (index_t lai)
+{
+ lisp_gpe_adjacency_t *ladj;
+
+ ladj = lisp_gpe_adjacency_get_i (lai);
+
+ ladj->locks--;
+
+ if (0 == ladj->locks)
+ {
+ lisp_gpe_adjacency_last_lock_gone (ladj);
+ }
+}
+
+const lisp_gpe_adjacency_t *
+lisp_gpe_adjacency_get (index_t lai)
+{
+ return (lisp_gpe_adjacency_get_i (lai));
+}
+
+
+/**
+ * @brief LISP GPE tunnel back walk
+ *
+ * The FIB entry through which this tunnel resolves has been updated.
+ * re-stack the midchain on the new forwarding.
+ */
+static fib_node_back_walk_rc_t
+lisp_gpe_adjacency_back_walk (fib_node_t * node,
+ fib_node_back_walk_ctx_t * ctx)
+{
+ lisp_gpe_adj_stack (lisp_gpe_adjacency_from_fib_node (node));
+
+ return (FIB_NODE_BACK_WALK_CONTINUE);
+}
+
+static fib_node_t *
+lisp_gpe_adjacency_get_fib_node (fib_node_index_t index)
+{
+ lisp_gpe_adjacency_t *ladj;
+
+ ladj = pool_elt_at_index (lisp_adj_pool, index);
+ return (&ladj->fib_node);
+}
+
+static void
+lisp_gpe_adjacency_last_fib_lock_gone (fib_node_t * node)
+{
+ lisp_gpe_adjacency_last_lock_gone (lisp_gpe_adjacency_from_fib_node (node));
+}
+
+const static fib_node_vft_t lisp_gpe_tuennel_vft = {
+ .fnv_get = lisp_gpe_adjacency_get_fib_node,
+ .fnv_back_walk = lisp_gpe_adjacency_back_walk,
+ .fnv_last_lock = lisp_gpe_adjacency_last_fib_lock_gone,
+};
+
+u8 *
+format_lisp_gpe_adjacency (u8 * s, va_list * args)
+{
+ lisp_gpe_adjacency_t *ladj = va_arg (*args, lisp_gpe_adjacency_t *);
+ lisp_gpe_adjacency_format_flags_t flags =
+ va_arg (*args, lisp_gpe_adjacency_format_flags_t);
+
+ if (flags & LISP_GPE_ADJ_FORMAT_FLAG_DETAIL)
+ {
+ s =
+ format (s, "index %d locks:%d\n", ladj - lisp_adj_pool, ladj->locks);
+ }
+
+ s = format (s, " vni: %d,", ladj->vni);
+ s = format (s, " remote-RLOC: %U,", format_ip_address, &ladj->remote_rloc);
+
+ if (flags & LISP_GPE_ADJ_FORMAT_FLAG_DETAIL)
+ {
+ s = format (s, " %U\n",
+ format_lisp_gpe_sub_interface,
+ lisp_gpe_sub_interface_get (ladj->lisp_l3_sub_index));
+ s = format (s, " %U\n",
+ format_lisp_gpe_tunnel,
+ lisp_gpe_tunnel_get (ladj->tunnel_index));
+ }
+ else
+ {
+ s = format (s, " LISP L3 sub-interface index: %d,",
+ ladj->lisp_l3_sub_index);
+ s = format (s, " LISP tunnel index: %d", ladj->tunnel_index);
+ }
+
+
+ return (s);
+}
+
+static clib_error_t *
+lisp_gpe_adjacency_show (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ lisp_gpe_adjacency_t *ladj;
+ index_t index;
+
+ if (pool_elts (lisp_adj_pool) == 0)
+ vlib_cli_output (vm, "No lisp-gpe Adjacencies");
+
+ if (unformat (input, "%d", &index))
+ {
+ ladj = lisp_gpe_adjacency_get_i (index);
+ vlib_cli_output (vm, "%U", format_lisp_gpe_adjacency, ladj,
+ LISP_GPE_ADJ_FORMAT_FLAG_DETAIL);
+ }
+ else
+ {
+ /* *INDENT-OFF* */
+ pool_foreach (ladj, lisp_adj_pool,
+ ({
+ vlib_cli_output (vm, "[%d] %U\n",
+ ladj - lisp_adj_pool,
+ format_lisp_gpe_adjacency, ladj,
+ LISP_GPE_ADJ_FORMAT_FLAG_NONE);
+ }));
+ /* *INDENT-ON* */
+ }
+
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (show_lisp_gpe_tunnel_command, static) =
+{
+ .path = "show lisp gpe adjacency",
+ .function = lisp_gpe_adjacency_show,
+};
+/* *INDENT-ON* */
+
+#define LISP_ADJ_NBR_DEFAULT_HASH_NUM_BUCKETS (256)
+#define LISP_ADJ_NBR_DEFAULT_HASH_MEMORY_SIZE (1<<20)
+
+static clib_error_t *
+lisp_gpe_adj_module_init (vlib_main_t * vm)
+{
+ BV (clib_bihash_init) (&lisp_adj_db,
+ "Adjacency Neighbour table",
+ LISP_ADJ_NBR_DEFAULT_HASH_NUM_BUCKETS,
+ LISP_ADJ_NBR_DEFAULT_HASH_MEMORY_SIZE);
+
+ fib_node_register_type (FIB_NODE_TYPE_LISP_ADJ, &lisp_gpe_tuennel_vft);
+ return (NULL);
+}
+
+VLIB_INIT_FUNCTION (lisp_gpe_adj_module_init)
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/lisp-gpe/lisp_gpe_adjacency.h b/src/vnet/lisp-gpe/lisp_gpe_adjacency.h
new file mode 100644
index 00000000000..adc3acaee3c
--- /dev/null
+++ b/src/vnet/lisp-gpe/lisp_gpe_adjacency.h
@@ -0,0 +1,136 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * @file
+ * @brief Common utility functions for IPv4, IPv6 and L2 LISP-GPE adjacencys.
+ *
+ */
+
+#ifndef LISP_GPE_ADJACENCY_H__
+#define LISP_GPE_ADJACENCY_H__
+
+#include <vnet/fib/fib_node.h>
+#include <vnet/lisp-gpe/lisp_gpe.h>
+
+/**
+ * @brief A LISP GPE Adjacency.
+ *
+ * A adjacency represents peer on an L3 sub-interface to which to send traffic.
+ * adjacencies are thus present in the EID space.
+ * The peer is identified by the key:{remote-rloc, sub-interface}, which is
+ * equivalent to the usal adjacency key {next-hop, interface}. So curiously
+ * the rloc address from the underlay is used as a next hop address in the overlay
+ * This is OK because:
+ * 1 - the RLOC is unique in the underlay AND there is only one underlay VRF per
+ * overlay
+ * 2 - the RLOC may overlap with an address in the overlay, but we do not create
+ * an adj-fib (i.e. a route in the overlay FIB for the rloc)
+ *
+ *
+ */
+typedef struct lisp_gpe_adjacency_t_
+{
+ /**
+ * The LISP adj is a part of the FIB control plane graph.
+ */
+ fib_node_t fib_node;
+
+ /**
+ * remote RLOC. The adjacency's next-hop
+ */
+ ip_address_t remote_rloc;
+
+ /**
+ * The VNI. Used in combination with the local-rloc to get the sub-interface
+ */
+ u32 vni;
+
+ /**
+ * The number of locks/reference counts on the adjacency.
+ */
+ u32 locks;
+
+ /**
+ * The index of the LISP L3 subinterface
+ */
+ u32 lisp_l3_sub_index;
+
+ /**
+ * The SW IF index of the sub-interface this adjacency uses.
+ * Cached for convenience from the LISP L3 sub-interface
+ */
+ u32 sw_if_index;
+
+ /**
+ * The index of the LISP GPE tunnel that provides the transport
+ * in the underlay.
+ */
+ u32 tunnel_index;
+
+ /**
+ * This adjacency is a child of the FIB entry to reach the RLOC.
+ * This is so when the reachability of that RLOC changes, we can restack
+ * the FIB adjacnecies.
+ */
+ u32 fib_entry_child_index;
+
+ /**
+ * LISP header fields in HOST byte order
+ */
+ u8 flags;
+ u8 ver_res;
+ u8 res;
+ u8 next_protocol;
+
+} lisp_gpe_adjacency_t;
+
+extern index_t lisp_gpe_adjacency_find_or_create_and_lock (const
+ locator_pair_t *
+ pair,
+ u32 rloc_fib_index,
+ u32 vni);
+
+extern void lisp_gpe_adjacency_unlock (index_t l3si);
+
+extern const lisp_gpe_adjacency_t *lisp_gpe_adjacency_get (index_t l3si);
+
+extern void lisp_gpe_update_adjacency (vnet_main_t * vnm,
+ u32 sw_if_index, adj_index_t ai);
+extern u8 *lisp_gpe_build_rewrite (vnet_main_t * vnm,
+ u32 sw_if_index,
+ vnet_link_t link_type,
+ const void *dst_address);
+
+
+/**
+ * @brief Flags for displaying the adjacency
+ */
+typedef enum lisp_gpe_adjacency_format_flags_t_
+{
+ LISP_GPE_ADJ_FORMAT_FLAG_NONE,
+ LISP_GPE_ADJ_FORMAT_FLAG_DETAIL,
+} lisp_gpe_adjacency_format_flags_t;
+
+extern u8 *format_lisp_gpe_adjacency (u8 * s, va_list * args);
+
+#endif
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/lisp-gpe/lisp_gpe_api.c b/src/vnet/lisp-gpe/lisp_gpe_api.c
new file mode 100644
index 00000000000..176ded501cd
--- /dev/null
+++ b/src/vnet/lisp-gpe/lisp_gpe_api.c
@@ -0,0 +1,304 @@
+/*
+ *------------------------------------------------------------------
+ * lisp_gpe_api.c - lisp_gpe api
+ *
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+
+#include <vnet/vnet.h>
+#include <vlibmemory/api.h>
+
+#include <vnet/interface.h>
+#include <vnet/api_errno.h>
+#include <vnet/lisp-gpe/lisp_gpe.h>
+#include <vnet/lisp-gpe/lisp_gpe_fwd_entry.h>
+#include <vnet/lisp-gpe/lisp_gpe_tenant.h>
+
+#include <vnet/vnet_msg_enum.h>
+
+#define vl_typedefs /* define message structures */
+#include <vnet/vnet_all_api_h.h>
+#undef vl_typedefs
+
+#define vl_endianfun /* define message structures */
+#include <vnet/vnet_all_api_h.h>
+#undef vl_endianfun
+
+/* instantiate all the print functions we know about */
+#define vl_print(handle, ...) vlib_cli_output (handle, __VA_ARGS__)
+#define vl_printfun
+#include <vnet/vnet_all_api_h.h>
+#undef vl_printfun
+
+#include <vlibapi/api_helper_macros.h>
+
+#define foreach_vpe_api_msg \
+_(LISP_GPE_ADD_DEL_FWD_ENTRY, lisp_gpe_add_del_fwd_entry) \
+_(LISP_GPE_ENABLE_DISABLE, lisp_gpe_enable_disable) \
+_(LISP_GPE_ADD_DEL_IFACE, lisp_gpe_add_del_iface) \
+_(LISP_GPE_TUNNEL_DUMP, lisp_gpe_tunnel_dump)
+
+/** Used for transferring locators via VPP API */
+/* *INDENT-OFF* */
+typedef CLIB_PACKED (struct {
+ u8 is_ip4; /**< is locator an IPv4 address */
+ u8 priority; /**< locator priority */
+ u8 weight; /**< locator weight */
+ u8 addr[16]; /**< IPv4/IPv6 address */
+}) rloc_t;
+/* *INDENT-ON* */
+
+static locator_pair_t *
+unformat_lisp_loc_pairs (void *lcl_locs, void *rmt_locs, u32 rloc_num)
+{
+ u32 i;
+ locator_pair_t *pairs = 0, pair;
+ rloc_t *r;
+
+ for (i = 0; i < rloc_num; i++)
+ {
+ /* local locator */
+ r = &((rloc_t *) lcl_locs)[i];
+ memset (&pair.lcl_loc, 0, sizeof (pair.lcl_loc));
+ ip_address_set (&pair.lcl_loc, &r->addr, r->is_ip4 ? IP4 : IP6);
+
+ /* remote locators */
+ r = &((rloc_t *) rmt_locs)[i];
+ memset (&pair.rmt_loc, 0, sizeof (pair.rmt_loc));
+ ip_address_set (&pair.rmt_loc, &r->addr, r->is_ip4 ? IP4 : IP6);
+
+ pair.priority = r->priority;
+ pair.weight = r->weight;
+
+ vec_add1 (pairs, pair);
+ }
+ return pairs;
+}
+
+static int
+unformat_lisp_eid_api (gid_address_t * dst, u32 vni, u8 type, void *src,
+ u8 len)
+{
+ switch (type)
+ {
+ case 0: /* ipv4 */
+ gid_address_type (dst) = GID_ADDR_IP_PREFIX;
+ gid_address_ip_set (dst, src, IP4);
+ gid_address_ippref_len (dst) = len;
+ ip_prefix_normalize (&gid_address_ippref (dst));
+ break;
+ case 1: /* ipv6 */
+ gid_address_type (dst) = GID_ADDR_IP_PREFIX;
+ gid_address_ip_set (dst, src, IP6);
+ gid_address_ippref_len (dst) = len;
+ ip_prefix_normalize (&gid_address_ippref (dst));
+ break;
+ case 2: /* l2 mac */
+ gid_address_type (dst) = GID_ADDR_MAC;
+ clib_memcpy (&gid_address_mac (dst), src, 6);
+ break;
+ default:
+ /* unknown type */
+ return VNET_API_ERROR_INVALID_VALUE;
+ }
+
+ gid_address_vni (dst) = vni;
+
+ return 0;
+}
+
+static void
+ vl_api_lisp_gpe_add_del_fwd_entry_t_handler
+ (vl_api_lisp_gpe_add_del_fwd_entry_t * mp)
+{
+ vl_api_lisp_gpe_add_del_fwd_entry_reply_t *rmp;
+ vnet_lisp_gpe_add_del_fwd_entry_args_t _a, *a = &_a;
+ locator_pair_t *pairs = 0;
+ int rv = 0;
+
+ memset (a, 0, sizeof (a[0]));
+
+ rv = unformat_lisp_eid_api (&a->rmt_eid, mp->vni, mp->eid_type,
+ mp->rmt_eid, mp->rmt_len);
+ rv |= unformat_lisp_eid_api (&a->lcl_eid, mp->vni, mp->eid_type,
+ mp->lcl_eid, mp->lcl_len);
+
+ pairs = unformat_lisp_loc_pairs (mp->lcl_locs, mp->rmt_locs, mp->loc_num);
+
+ if (rv || 0 == pairs)
+ goto send_reply;
+
+ a->is_add = mp->is_add;
+ a->locator_pairs = pairs;
+ a->dp_table = mp->dp_table;
+ a->vni = mp->vni;
+ a->action = mp->action;
+
+ rv = vnet_lisp_gpe_add_del_fwd_entry (a, 0);
+ vec_free (pairs);
+send_reply:
+ REPLY_MACRO (VL_API_LISP_GPE_ADD_DEL_FWD_ENTRY_REPLY);
+}
+
+static void
+vl_api_lisp_gpe_enable_disable_t_handler (vl_api_lisp_gpe_enable_disable_t *
+ mp)
+{
+ vl_api_lisp_gpe_enable_disable_reply_t *rmp;
+ int rv = 0;
+ vnet_lisp_gpe_enable_disable_args_t _a, *a = &_a;
+
+ a->is_en = mp->is_en;
+ vnet_lisp_gpe_enable_disable (a);
+
+ REPLY_MACRO (VL_API_LISP_GPE_ENABLE_DISABLE_REPLY);
+}
+
+static void
+vl_api_lisp_gpe_add_del_iface_t_handler (vl_api_lisp_gpe_add_del_iface_t * mp)
+{
+ vl_api_lisp_gpe_add_del_iface_reply_t *rmp;
+ int rv = 0;
+
+ if (mp->is_l2)
+ {
+ if (mp->is_add)
+ {
+ if (~0 ==
+ lisp_gpe_tenant_l2_iface_add_or_lock (mp->vni, mp->dp_table))
+ rv = 1;
+ }
+ else
+ lisp_gpe_tenant_l2_iface_unlock (mp->vni);
+ }
+ else
+ {
+ if (mp->is_add)
+ {
+ if (~0 ==
+ lisp_gpe_tenant_l3_iface_add_or_lock (mp->vni, mp->dp_table))
+ rv = 1;
+ }
+ else
+ lisp_gpe_tenant_l3_iface_unlock (mp->vni);
+ }
+
+ REPLY_MACRO (VL_API_LISP_GPE_ADD_DEL_IFACE_REPLY);
+}
+
+static void
+send_lisp_gpe_fwd_entry_details (lisp_gpe_fwd_entry_t * lfe,
+ unix_shared_memory_queue_t * q, u32 context)
+{
+ vl_api_lisp_gpe_tunnel_details_t *rmp;
+ lisp_gpe_main_t *lgm = &lisp_gpe_main;
+
+ rmp = vl_msg_api_alloc (sizeof (*rmp));
+ memset (rmp, 0, sizeof (*rmp));
+ rmp->_vl_msg_id = ntohs (VL_API_LISP_GPE_TUNNEL_DETAILS);
+
+ rmp->tunnels = lfe - lgm->lisp_fwd_entry_pool;
+
+ rmp->is_ipv6 = ip_prefix_version (&(lfe->key->rmt.ippref)) == IP6 ? 1 : 0;
+ ip_address_copy_addr (rmp->source_ip,
+ &ip_prefix_addr (&(lfe->key->rmt.ippref)));
+ ip_address_copy_addr (rmp->destination_ip,
+ &ip_prefix_addr (&(lfe->key->rmt.ippref)));
+
+ rmp->encap_fib_id = htonl (0);
+ rmp->decap_fib_id = htonl (lfe->eid_fib_index);
+ rmp->iid = htonl (lfe->key->vni);
+ rmp->context = context;
+
+ vl_msg_api_send_shmem (q, (u8 *) & rmp);
+}
+
+static void
+vl_api_lisp_gpe_tunnel_dump_t_handler (vl_api_lisp_gpe_tunnel_dump_t * mp)
+{
+ unix_shared_memory_queue_t *q = NULL;
+ lisp_gpe_main_t *lgm = &lisp_gpe_main;
+ lisp_gpe_fwd_entry_t *lfe = NULL;
+
+ if (pool_elts (lgm->lisp_fwd_entry_pool) == 0)
+ {
+ return;
+ }
+
+ q = vl_api_client_index_to_input_queue (mp->client_index);
+ if (q == 0)
+ {
+ return;
+ }
+
+ /* *INDENT-OFF* */
+ pool_foreach(lfe, lgm->lisp_fwd_entry_pool,
+ ({
+ send_lisp_gpe_fwd_entry_details(lfe, q, mp->context);
+ }));
+ /* *INDENT-ON* */
+}
+
+/*
+ * lisp_gpe_api_hookup
+ * Add vpe's API message handlers to the table.
+ * vlib has alread mapped shared memory and
+ * added the client registration handlers.
+ * See .../vlib-api/vlibmemory/memclnt_vlib.c:memclnt_process()
+ */
+#define vl_msg_name_crc_list
+#include <vnet/vnet_all_api_h.h>
+#undef vl_msg_name_crc_list
+
+static void
+setup_message_id_table (api_main_t * am)
+{
+#define _(id,n,crc) vl_msg_api_add_msg_name_crc (am, #n "_" #crc, id);
+ foreach_vl_msg_name_crc_lisp_gpe;
+#undef _
+}
+
+static clib_error_t *
+lisp_gpe_api_hookup (vlib_main_t * vm)
+{
+ api_main_t *am = &api_main;
+
+#define _(N,n) \
+ vl_msg_api_set_handlers(VL_API_##N, #n, \
+ vl_api_##n##_t_handler, \
+ vl_noop_handler, \
+ vl_api_##n##_t_endian, \
+ vl_api_##n##_t_print, \
+ sizeof(vl_api_##n##_t), 1);
+ foreach_vpe_api_msg;
+#undef _
+
+ /*
+ * Set up the (msg_name, crc, message-id) table
+ */
+ setup_message_id_table (am);
+
+ return 0;
+}
+
+VLIB_API_INIT_FUNCTION (lisp_gpe_api_hookup);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/lisp-gpe/lisp_gpe_error.def b/src/vnet/lisp-gpe/lisp_gpe_error.def
new file mode 100644
index 00000000000..415fada73d2
--- /dev/null
+++ b/src/vnet/lisp-gpe/lisp_gpe_error.def
@@ -0,0 +1,18 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+lisp_gpe_error (ENCAPSULATED, "good packets encapsulated")
+lisp_gpe_error (DECAPSULATED, "good packets decapsulated")
+lisp_gpe_error (NO_TUNNEL, "tunnel does not exist")
diff --git a/src/vnet/lisp-gpe/lisp_gpe_fwd_entry.c b/src/vnet/lisp-gpe/lisp_gpe_fwd_entry.c
new file mode 100644
index 00000000000..26a93a87b9d
--- /dev/null
+++ b/src/vnet/lisp-gpe/lisp_gpe_fwd_entry.c
@@ -0,0 +1,1053 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/lisp-gpe/lisp_gpe_fwd_entry.h>
+#include <vnet/lisp-gpe/lisp_gpe_adjacency.h>
+#include <vnet/lisp-gpe/lisp_gpe_tenant.h>
+#include <vnet/lisp-cp/lisp_cp_dpo.h>
+#include <vnet/fib/fib_table.h>
+#include <vnet/fib/fib_entry.h>
+#include <vnet/fib/fib_path_list.h>
+#include <vnet/fib/ip6_fib.h>
+#include <vnet/fib/ip4_fib.h>
+#include <vnet/dpo/drop_dpo.h>
+#include <vnet/dpo/lookup_dpo.h>
+#include <vnet/dpo/load_balance.h>
+#include <vnet/adj/adj_midchain.h>
+
+/**
+ * @brief Add route to IP4 or IP6 Destination FIB.
+ *
+ * Add a route to the destination FIB that results in the lookup
+ * in the SRC FIB. The SRC FIB is created is it does not yet exist.
+ *
+ * @param[in] dst_table_id Destination FIB Table-ID
+ * @param[in] dst_prefix Destination IP prefix.
+ *
+ * @return src_fib_index The index/ID of the SRC FIB created.
+ */
+static u32
+ip_dst_fib_add_route (u32 dst_fib_index, const ip_prefix_t * dst_prefix)
+{
+ fib_node_index_t src_fib_index;
+ fib_prefix_t dst_fib_prefix;
+ fib_node_index_t dst_fei;
+
+ ASSERT (NULL != dst_prefix);
+
+ ip_prefix_to_fib_prefix (dst_prefix, &dst_fib_prefix);
+
+ /*
+ * lookup the destination prefix in the VRF table and retrieve the
+ * LISP associated data
+ */
+ dst_fei = fib_table_lookup_exact_match (dst_fib_index, &dst_fib_prefix);
+
+ /*
+ * If the FIB entry is not present, or not LISP sourced, add it
+ */
+ if (dst_fei == FIB_NODE_INDEX_INVALID ||
+ NULL == fib_entry_get_source_data (dst_fei, FIB_SOURCE_LISP))
+ {
+ dpo_id_t src_lkup_dpo = DPO_INVALID;
+
+ /* create a new src FIB. */
+ src_fib_index =
+ fib_table_create_and_lock (dst_fib_prefix.fp_proto,
+ "LISP-src for [%d,%U]",
+ dst_fib_index,
+ format_fib_prefix, &dst_fib_prefix);
+ /*
+ * add src fib default route
+ */
+ fib_prefix_t prefix = {
+ .fp_proto = dst_fib_prefix.fp_proto,
+ };
+ fib_table_entry_special_dpo_add (src_fib_index, &prefix,
+ FIB_SOURCE_LISP,
+ FIB_ENTRY_FLAG_EXCLUSIVE,
+ lisp_cp_dpo_get (fib_proto_to_dpo
+ (dst_fib_prefix.fp_proto)));
+ /*
+ * create a data-path object to perform the source address lookup
+ * in the SRC FIB
+ */
+ lookup_dpo_add_or_lock_w_fib_index (src_fib_index,
+ (ip_prefix_version (dst_prefix) ==
+ IP6 ? DPO_PROTO_IP6 :
+ DPO_PROTO_IP4),
+ LOOKUP_INPUT_SRC_ADDR,
+ LOOKUP_TABLE_FROM_CONFIG,
+ &src_lkup_dpo);
+
+ /*
+ * add the entry to the destination FIB that uses the lookup DPO
+ */
+ dst_fei = fib_table_entry_special_dpo_add (dst_fib_index,
+ &dst_fib_prefix,
+ FIB_SOURCE_LISP,
+ FIB_ENTRY_FLAG_EXCLUSIVE,
+ &src_lkup_dpo);
+
+ /*
+ * the DPO is locked by the FIB entry, and we have no further
+ * need for it.
+ */
+ dpo_unlock (&src_lkup_dpo);
+
+ /*
+ * save the SRC FIB index on the entry so we can retrieve it for
+ * subsequent routes.
+ */
+ fib_entry_set_source_data (dst_fei, FIB_SOURCE_LISP, &src_fib_index);
+ }
+ else
+ {
+ /*
+ * destination FIB entry already present
+ */
+ src_fib_index = *(u32 *) fib_entry_get_source_data (dst_fei,
+ FIB_SOURCE_LISP);
+ }
+
+ return (src_fib_index);
+}
+
+/**
+ * @brief Del route to IP4 or IP6 SD FIB.
+ *
+ * Remove routes from both destination and source FIBs.
+ *
+ * @param[in] src_fib_index The index/ID of the SRC FIB
+ * @param[in] src_prefix Source IP prefix.
+ * @param[in] dst_fib_index The index/ID of the DST FIB
+ * @param[in] dst_prefix Destination IP prefix.
+ */
+static void
+ip_src_dst_fib_del_route (u32 src_fib_index,
+ const ip_prefix_t * src_prefix,
+ u32 dst_fib_index, const ip_prefix_t * dst_prefix)
+{
+ fib_prefix_t dst_fib_prefix, src_fib_prefix;
+ u8 have_default = 0;
+ u32 n_entries;
+
+ ASSERT (NULL != dst_prefix);
+ ASSERT (NULL != src_prefix);
+
+ ip_prefix_to_fib_prefix (dst_prefix, &dst_fib_prefix);
+ ip_prefix_to_fib_prefix (src_prefix, &src_fib_prefix);
+
+ fib_table_entry_delete (src_fib_index, &src_fib_prefix, FIB_SOURCE_LISP);
+
+ /* check if only default left or empty */
+ fib_prefix_t default_pref = {
+ .fp_proto = dst_fib_prefix.fp_proto
+ };
+
+ if (fib_table_lookup_exact_match (src_fib_index,
+ &default_pref) != FIB_NODE_INDEX_INVALID)
+ have_default = 1;
+
+ n_entries = fib_table_get_num_entries (src_fib_index,
+ src_fib_prefix.fp_proto,
+ FIB_SOURCE_LISP);
+ if (n_entries == 0 || (have_default && n_entries == 1))
+ {
+ /*
+ * remove src FIB default route
+ */
+ if (have_default)
+ fib_table_entry_special_remove (src_fib_index, &default_pref,
+ FIB_SOURCE_LISP);
+
+ /*
+ * there's nothing left now, unlock the source FIB and the
+ * destination route
+ */
+ fib_table_entry_special_remove (dst_fib_index,
+ &dst_fib_prefix, FIB_SOURCE_LISP);
+ fib_table_unlock (src_fib_index, src_fib_prefix.fp_proto);
+ }
+}
+
+/**
+ * @brief Add route to IP4 or IP6 SRC FIB.
+ *
+ * Adds a route to in the LISP SRC FIB with the result of the route
+ * being the DPO passed.
+ *
+ * @param[in] src_fib_index The index/ID of the SRC FIB
+ * @param[in] src_prefix Source IP prefix.
+ * @param[in] src_dpo The DPO the route will link to.
+ */
+static void
+ip_src_fib_add_route_w_dpo (u32 src_fib_index,
+ const ip_prefix_t * src_prefix,
+ const dpo_id_t * src_dpo)
+{
+ fib_prefix_t src_fib_prefix;
+
+ ip_prefix_to_fib_prefix (src_prefix, &src_fib_prefix);
+
+ /*
+ * add the entry into the source fib.
+ */
+ fib_node_index_t src_fei;
+
+ src_fei = fib_table_lookup_exact_match (src_fib_index, &src_fib_prefix);
+
+ if (FIB_NODE_INDEX_INVALID == src_fei ||
+ !fib_entry_is_sourced (src_fei, FIB_SOURCE_LISP))
+ {
+ fib_table_entry_special_dpo_add (src_fib_index,
+ &src_fib_prefix,
+ FIB_SOURCE_LISP,
+ FIB_ENTRY_FLAG_EXCLUSIVE, src_dpo);
+ }
+}
+
+static fib_route_path_t *
+lisp_gpe_mk_fib_paths (const lisp_fwd_path_t * paths)
+{
+ const lisp_gpe_adjacency_t *ladj;
+ fib_route_path_t *rpaths = NULL;
+ u8 best_priority;
+ u32 ii;
+
+ vec_validate (rpaths, vec_len (paths) - 1);
+
+ best_priority = paths[0].priority;
+
+ vec_foreach_index (ii, paths)
+ {
+ if (paths[0].priority != best_priority)
+ break;
+
+ ladj = lisp_gpe_adjacency_get (paths[ii].lisp_adj);
+
+ ip_address_to_46 (&ladj->remote_rloc,
+ &rpaths[ii].frp_addr, &rpaths[ii].frp_proto);
+
+ rpaths[ii].frp_sw_if_index = ladj->sw_if_index;
+ rpaths[ii].frp_weight = (paths[ii].weight ? paths[ii].weight : 1);
+ }
+
+ ASSERT (0 != vec_len (rpaths));
+
+ return (rpaths);
+}
+
+/**
+ * @brief Add route to IP4 or IP6 SRC FIB.
+ *
+ * Adds a route to in the LISP SRC FIB for the tunnel.
+ *
+ * @param[in] src_fib_index The index/ID of the SRC FIB
+ * @param[in] src_prefix Source IP prefix.
+ * @param[in] paths The paths from which to construct the
+ * load balance
+ */
+static void
+ip_src_fib_add_route (u32 src_fib_index,
+ const ip_prefix_t * src_prefix,
+ const lisp_fwd_path_t * paths)
+{
+ fib_prefix_t src_fib_prefix;
+ fib_route_path_t *rpaths;
+
+ ip_prefix_to_fib_prefix (src_prefix, &src_fib_prefix);
+
+ rpaths = lisp_gpe_mk_fib_paths (paths);
+
+ fib_table_entry_update (src_fib_index,
+ &src_fib_prefix,
+ FIB_SOURCE_LISP, FIB_ENTRY_FLAG_NONE, rpaths);
+ vec_free (rpaths);
+}
+
+
+static void
+create_fib_entries (lisp_gpe_fwd_entry_t * lfe)
+{
+ dpo_proto_t dproto;
+
+ dproto = (ip_prefix_version (&lfe->key->rmt.ippref) == IP4 ?
+ DPO_PROTO_IP4 : DPO_PROTO_IP6);
+
+ lfe->src_fib_index = ip_dst_fib_add_route (lfe->eid_fib_index,
+ &lfe->key->rmt.ippref);
+
+ if (LISP_GPE_FWD_ENTRY_TYPE_NEGATIVE == lfe->type)
+ {
+ dpo_id_t dpo = DPO_INVALID;
+
+ switch (lfe->action)
+ {
+ case LISP_NO_ACTION:
+ /* TODO update timers? */
+ case LISP_FORWARD_NATIVE:
+ /* TODO check if route/next-hop for eid exists in fib and add
+ * more specific for the eid with the next-hop found */
+ case LISP_SEND_MAP_REQUEST:
+ /* insert tunnel that always sends map-request */
+ dpo_copy (&dpo, lisp_cp_dpo_get (dproto));
+ break;
+ case LISP_DROP:
+ /* for drop fwd entries, just add route, no need to add encap tunnel */
+ dpo_copy (&dpo, drop_dpo_get (dproto));
+ break;
+ }
+ ip_src_fib_add_route_w_dpo (lfe->src_fib_index,
+ &lfe->key->lcl.ippref, &dpo);
+ dpo_reset (&dpo);
+ }
+ else
+ {
+ ip_src_fib_add_route (lfe->src_fib_index,
+ &lfe->key->lcl.ippref, lfe->paths);
+ }
+}
+
+static void
+delete_fib_entries (lisp_gpe_fwd_entry_t * lfe)
+{
+ ip_src_dst_fib_del_route (lfe->src_fib_index,
+ &lfe->key->lcl.ippref,
+ lfe->eid_fib_index, &lfe->key->rmt.ippref);
+}
+
+static void
+gid_to_dp_address (gid_address_t * g, dp_address_t * d)
+{
+ switch (gid_address_type (g))
+ {
+ case GID_ADDR_IP_PREFIX:
+ case GID_ADDR_SRC_DST:
+ ip_prefix_copy (&d->ippref, &gid_address_ippref (g));
+ d->type = FID_ADDR_IP_PREF;
+ break;
+ case GID_ADDR_MAC:
+ default:
+ mac_copy (&d->mac, &gid_address_mac (g));
+ d->type = FID_ADDR_MAC;
+ break;
+ }
+}
+
+static lisp_gpe_fwd_entry_t *
+find_fwd_entry (lisp_gpe_main_t * lgm,
+ vnet_lisp_gpe_add_del_fwd_entry_args_t * a,
+ lisp_gpe_fwd_entry_key_t * key)
+{
+ uword *p;
+
+ memset (key, 0, sizeof (*key));
+
+ if (GID_ADDR_IP_PREFIX == gid_address_type (&a->rmt_eid))
+ {
+ /*
+ * the ip version of the source is not set to ip6 when the
+ * source is all zeros. force it.
+ */
+ ip_prefix_version (&gid_address_ippref (&a->lcl_eid)) =
+ ip_prefix_version (&gid_address_ippref (&a->rmt_eid));
+ }
+
+ gid_to_dp_address (&a->rmt_eid, &key->rmt);
+ gid_to_dp_address (&a->lcl_eid, &key->lcl);
+ key->vni = a->vni;
+
+ p = hash_get_mem (lgm->lisp_gpe_fwd_entries, key);
+
+ if (NULL != p)
+ {
+ return (pool_elt_at_index (lgm->lisp_fwd_entry_pool, p[0]));
+ }
+ return (NULL);
+}
+
+static int
+lisp_gpe_fwd_entry_path_sort (void *a1, void *a2)
+{
+ lisp_fwd_path_t *p1 = a1, *p2 = a2;
+
+ return (p1->priority - p2->priority);
+}
+
+static void
+lisp_gpe_fwd_entry_mk_paths (lisp_gpe_fwd_entry_t * lfe,
+ vnet_lisp_gpe_add_del_fwd_entry_args_t * a)
+{
+ lisp_fwd_path_t *path;
+ u32 index;
+
+ vec_validate (lfe->paths, vec_len (a->locator_pairs) - 1);
+
+ vec_foreach_index (index, a->locator_pairs)
+ {
+ path = &lfe->paths[index];
+
+ path->priority = a->locator_pairs[index].priority;
+ path->weight = a->locator_pairs[index].weight;
+
+ path->lisp_adj =
+ lisp_gpe_adjacency_find_or_create_and_lock (&a->locator_pairs
+ [index],
+ a->dp_table, lfe->key->vni);
+ }
+ vec_sort_with_function (lfe->paths, lisp_gpe_fwd_entry_path_sort);
+}
+
+/**
+ * @brief Add/Delete LISP IP forwarding entry.
+ *
+ * creation of forwarding entries for IP LISP overlay:
+ *
+ * @param[in] lgm Reference to @ref lisp_gpe_main_t.
+ * @param[in] a Parameters for building the forwarding entry.
+ *
+ * @return 0 on success.
+ */
+static int
+add_ip_fwd_entry (lisp_gpe_main_t * lgm,
+ vnet_lisp_gpe_add_del_fwd_entry_args_t * a)
+{
+ lisp_gpe_fwd_entry_key_t key;
+ lisp_gpe_fwd_entry_t *lfe;
+ fib_protocol_t fproto;
+
+ lfe = find_fwd_entry (lgm, a, &key);
+
+ if (NULL != lfe)
+ /* don't support updates */
+ return VNET_API_ERROR_INVALID_VALUE;
+
+ pool_get (lgm->lisp_fwd_entry_pool, lfe);
+ memset (lfe, 0, sizeof (*lfe));
+ lfe->key = clib_mem_alloc (sizeof (key));
+ memcpy (lfe->key, &key, sizeof (key));
+
+ hash_set_mem (lgm->lisp_gpe_fwd_entries, lfe->key,
+ lfe - lgm->lisp_fwd_entry_pool);
+
+ fproto = (IP4 == ip_prefix_version (&fid_addr_ippref (&lfe->key->rmt)) ?
+ FIB_PROTOCOL_IP4 : FIB_PROTOCOL_IP6);
+
+ lfe->type = (a->is_negative ?
+ LISP_GPE_FWD_ENTRY_TYPE_NEGATIVE :
+ LISP_GPE_FWD_ENTRY_TYPE_NORMAL);
+ lfe->tenant = lisp_gpe_tenant_find_or_create (lfe->key->vni);
+ lfe->eid_table_id = a->table_id;
+ lfe->eid_fib_index = fib_table_find_or_create_and_lock (fproto,
+ lfe->eid_table_id);
+
+ if (LISP_GPE_FWD_ENTRY_TYPE_NEGATIVE != lfe->type)
+ {
+ lisp_gpe_fwd_entry_mk_paths (lfe, a);
+ }
+
+ create_fib_entries (lfe);
+
+ return (0);
+}
+
+static void
+del_ip_fwd_entry_i (lisp_gpe_main_t * lgm, lisp_gpe_fwd_entry_t * lfe)
+{
+ lisp_fwd_path_t *path;
+ fib_protocol_t fproto;
+
+ vec_foreach (path, lfe->paths)
+ {
+ lisp_gpe_adjacency_unlock (path->lisp_adj);
+ }
+
+ delete_fib_entries (lfe);
+
+ fproto = (IP4 == ip_prefix_version (&fid_addr_ippref (&lfe->key->rmt)) ?
+ FIB_PROTOCOL_IP4 : FIB_PROTOCOL_IP6);
+ fib_table_unlock (lfe->eid_fib_index, fproto);
+
+ hash_unset_mem (lgm->lisp_gpe_fwd_entries, lfe->key);
+ clib_mem_free (lfe->key);
+ pool_put (lgm->lisp_fwd_entry_pool, lfe);
+}
+
+/**
+ * @brief Add/Delete LISP IP forwarding entry.
+ *
+ * removal of forwarding entries for IP LISP overlay:
+ *
+ * @param[in] lgm Reference to @ref lisp_gpe_main_t.
+ * @param[in] a Parameters for building the forwarding entry.
+ *
+ * @return 0 on success.
+ */
+static int
+del_ip_fwd_entry (lisp_gpe_main_t * lgm,
+ vnet_lisp_gpe_add_del_fwd_entry_args_t * a)
+{
+ lisp_gpe_fwd_entry_key_t key;
+ lisp_gpe_fwd_entry_t *lfe;
+
+ lfe = find_fwd_entry (lgm, a, &key);
+
+ if (NULL == lfe)
+ /* no such entry */
+ return VNET_API_ERROR_INVALID_VALUE;
+
+ del_ip_fwd_entry_i (lgm, lfe);
+
+ return (0);
+}
+
+static void
+make_mac_fib_key (BVT (clib_bihash_kv) * kv, u16 bd_index, u8 src_mac[6],
+ u8 dst_mac[6])
+{
+ kv->key[0] = (((u64) bd_index) << 48) | mac_to_u64 (dst_mac);
+ kv->key[1] = mac_to_u64 (src_mac);
+ kv->key[2] = 0;
+}
+
+/**
+ * @brief Lookup L2 SD FIB entry
+ *
+ * Does a vni + dest + source lookup in the L2 LISP FIB. If the lookup fails
+ * it tries a second time with source set to 0 (i.e., a simple dest lookup).
+ *
+ * @param[in] lgm Reference to @ref lisp_gpe_main_t.
+ * @param[in] bd_index Bridge domain index.
+ * @param[in] src_mac Source mac address.
+ * @param[in] dst_mac Destination mac address.
+ *
+ * @return index of mapping matching the lookup key.
+ */
+index_t
+lisp_l2_fib_lookup (lisp_gpe_main_t * lgm, u16 bd_index, u8 src_mac[6],
+ u8 dst_mac[6])
+{
+ int rv;
+ BVT (clib_bihash_kv) kv, value;
+
+ make_mac_fib_key (&kv, bd_index, src_mac, dst_mac);
+ rv = BV (clib_bihash_search_inline_2) (&lgm->l2_fib, &kv, &value);
+
+ /* no match, try with src 0, catch all for dst */
+ if (rv != 0)
+ {
+ kv.key[1] = 0;
+ rv = BV (clib_bihash_search_inline_2) (&lgm->l2_fib, &kv, &value);
+ if (rv == 0)
+ return value.value;
+ }
+ else
+ return value.value;
+
+ return lisp_gpe_main.l2_lb_cp_lkup.dpoi_index;
+}
+
+/**
+ * @brief Add/del L2 SD FIB entry
+ *
+ * Inserts value in L2 FIB keyed by vni + dest + source. If entry is
+ * overwritten the associated value is returned.
+ *
+ * @param[in] lgm Reference to @ref lisp_gpe_main_t.
+ * @param[in] bd_index Bridge domain index.
+ * @param[in] src_mac Source mac address.
+ * @param[in] dst_mac Destination mac address.
+ * @param[in] val Value to add.
+ * @param[in] is_add Add/del flag.
+ *
+ * @return ~0 or value of overwritten entry.
+ */
+static u32
+lisp_l2_fib_add_del_entry (u16 bd_index, u8 src_mac[6],
+ u8 dst_mac[6], const dpo_id_t * dpo, u8 is_add)
+{
+ lisp_gpe_main_t *lgm = &lisp_gpe_main;
+ BVT (clib_bihash_kv) kv, value;
+ u32 old_val = ~0;
+
+ make_mac_fib_key (&kv, bd_index, src_mac, dst_mac);
+
+ if (BV (clib_bihash_search) (&lgm->l2_fib, &kv, &value) == 0)
+ old_val = value.value;
+
+ if (!is_add)
+ BV (clib_bihash_add_del) (&lgm->l2_fib, &kv, 0 /* is_add */ );
+ else
+ {
+ kv.value = dpo->dpoi_index;
+ BV (clib_bihash_add_del) (&lgm->l2_fib, &kv, 1 /* is_add */ );
+ }
+ return old_val;
+}
+
+#define L2_FIB_DEFAULT_HASH_NUM_BUCKETS (64 * 1024)
+#define L2_FIB_DEFAULT_HASH_MEMORY_SIZE (32<<20)
+
+static void
+l2_fib_init (lisp_gpe_main_t * lgm)
+{
+ index_t lbi;
+
+ BV (clib_bihash_init) (&lgm->l2_fib, "l2 fib",
+ 1 << max_log2 (L2_FIB_DEFAULT_HASH_NUM_BUCKETS),
+ L2_FIB_DEFAULT_HASH_MEMORY_SIZE);
+
+ /*
+ * the result from a 'miss' in a L2 Table
+ */
+ lbi = load_balance_create (1, DPO_PROTO_ETHERNET, 0);
+ load_balance_set_bucket (lbi, 0, lisp_cp_dpo_get (DPO_PROTO_ETHERNET));
+
+ dpo_set (&lgm->l2_lb_cp_lkup, DPO_LOAD_BALANCE, DPO_PROTO_ETHERNET, lbi);
+}
+
+static void
+del_l2_fwd_entry_i (lisp_gpe_main_t * lgm, lisp_gpe_fwd_entry_t * lfe)
+{
+ lisp_fwd_path_t *path;
+
+ if (LISP_GPE_FWD_ENTRY_TYPE_NEGATIVE != lfe->type)
+ {
+ vec_foreach (path, lfe->paths)
+ {
+ lisp_gpe_adjacency_unlock (path->lisp_adj);
+ }
+ fib_path_list_child_remove (lfe->l2.path_list_index,
+ lfe->l2.child_index);
+ }
+
+ lisp_l2_fib_add_del_entry (lfe->l2.eid_bd_index,
+ fid_addr_mac (&lfe->key->lcl),
+ fid_addr_mac (&lfe->key->rmt), NULL, 0);
+
+ hash_unset_mem (lgm->lisp_gpe_fwd_entries, lfe->key);
+ clib_mem_free (lfe->key);
+ pool_put (lgm->lisp_fwd_entry_pool, lfe);
+}
+
+/**
+ * @brief Delete LISP L2 forwarding entry.
+ *
+ * Coordinates the removal of forwarding entries for L2 LISP overlay:
+ *
+ * @param[in] lgm Reference to @ref lisp_gpe_main_t.
+ * @param[in] a Parameters for building the forwarding entry.
+ *
+ * @return 0 on success.
+ */
+static int
+del_l2_fwd_entry (lisp_gpe_main_t * lgm,
+ vnet_lisp_gpe_add_del_fwd_entry_args_t * a)
+{
+ lisp_gpe_fwd_entry_key_t key;
+ lisp_gpe_fwd_entry_t *lfe;
+
+ lfe = find_fwd_entry (lgm, a, &key);
+
+ if (NULL == lfe)
+ return VNET_API_ERROR_INVALID_VALUE;
+
+ del_l2_fwd_entry_i (lgm, lfe);
+
+ return (0);
+}
+
+/**
+ * @brief Construct and insert the forwarding information used by a L2 entry
+ */
+static void
+lisp_gpe_l2_update_fwding (lisp_gpe_fwd_entry_t * lfe)
+{
+ lisp_gpe_main_t *lgm = &lisp_gpe_main;
+ dpo_id_t dpo = DPO_INVALID;
+
+ if (LISP_GPE_FWD_ENTRY_TYPE_NEGATIVE != lfe->type)
+ {
+ fib_path_list_contribute_forwarding (lfe->l2.path_list_index,
+ FIB_FORW_CHAIN_TYPE_ETHERNET,
+ &lfe->l2.dpo);
+ dpo_copy (&dpo, &lfe->l2.dpo);
+ }
+ else
+ {
+ dpo_copy (&dpo, &lgm->l2_lb_cp_lkup);
+ }
+
+ /* add entry to l2 lisp fib */
+ lisp_l2_fib_add_del_entry (lfe->l2.eid_bd_index,
+ fid_addr_mac (&lfe->key->lcl),
+ fid_addr_mac (&lfe->key->rmt), &dpo, 1);
+
+ dpo_reset (&dpo);
+}
+
+/**
+ * @brief Add LISP L2 forwarding entry.
+ *
+ * Coordinates the creation of forwarding entries for L2 LISP overlay:
+ * creates lisp-gpe tunnel and injects new entry in Source/Dest L2 FIB.
+ *
+ * @param[in] lgm Reference to @ref lisp_gpe_main_t.
+ * @param[in] a Parameters for building the forwarding entry.
+ *
+ * @return 0 on success.
+ */
+static int
+add_l2_fwd_entry (lisp_gpe_main_t * lgm,
+ vnet_lisp_gpe_add_del_fwd_entry_args_t * a)
+{
+ lisp_gpe_fwd_entry_key_t key;
+ bd_main_t *bdm = &bd_main;
+ lisp_gpe_fwd_entry_t *lfe;
+ uword *bd_indexp;
+
+ bd_indexp = hash_get (bdm->bd_index_by_bd_id, a->bd_id);
+ if (!bd_indexp)
+ {
+ clib_warning ("bridge domain %d doesn't exist", a->bd_id);
+ return -1;
+ }
+
+ lfe = find_fwd_entry (lgm, a, &key);
+
+ if (NULL != lfe)
+ /* don't support updates */
+ return VNET_API_ERROR_INVALID_VALUE;
+
+ pool_get (lgm->lisp_fwd_entry_pool, lfe);
+ memset (lfe, 0, sizeof (*lfe));
+ lfe->key = clib_mem_alloc (sizeof (key));
+ memcpy (lfe->key, &key, sizeof (key));
+
+ hash_set_mem (lgm->lisp_gpe_fwd_entries, lfe->key,
+ lfe - lgm->lisp_fwd_entry_pool);
+
+ lfe->type = (a->is_negative ?
+ LISP_GPE_FWD_ENTRY_TYPE_NEGATIVE :
+ LISP_GPE_FWD_ENTRY_TYPE_NORMAL);
+ lfe->l2.eid_bd_id = a->bd_id;
+ lfe->l2.eid_bd_index = bd_indexp[0];
+ lfe->tenant = lisp_gpe_tenant_find_or_create (lfe->key->vni);
+
+ if (LISP_GPE_FWD_ENTRY_TYPE_NEGATIVE != lfe->type)
+ {
+ fib_route_path_t *rpaths;
+
+ /*
+ * Make the sorted array of LISP paths with their resp. adjacency
+ */
+ lisp_gpe_fwd_entry_mk_paths (lfe, a);
+
+ /*
+ * From the LISP paths, construct a FIB path list that will
+ * contribute a load-balance.
+ */
+ rpaths = lisp_gpe_mk_fib_paths (lfe->paths);
+
+ lfe->l2.path_list_index =
+ fib_path_list_create (FIB_PATH_LIST_FLAG_NONE, rpaths);
+
+ /*
+ * become a child of the path-list so we receive updates when
+ * its forwarding state changes. this includes an implicit lock.
+ */
+ lfe->l2.child_index =
+ fib_path_list_child_add (lfe->l2.path_list_index,
+ FIB_NODE_TYPE_LISP_GPE_FWD_ENTRY,
+ lfe - lgm->lisp_fwd_entry_pool);
+ }
+ else
+ {
+ lfe->action = a->action;
+ }
+
+ lisp_gpe_l2_update_fwding (lfe);
+
+ return 0;
+}
+
+/**
+ * @brief conver from the embedded fib_node_t struct to the LSIP entry
+ */
+static lisp_gpe_fwd_entry_t *
+lisp_gpe_fwd_entry_from_fib_node (fib_node_t * node)
+{
+ return ((lisp_gpe_fwd_entry_t *) (((char *) node) -
+ STRUCT_OFFSET_OF (lisp_gpe_fwd_entry_t,
+ node)));
+}
+
+/**
+ * @brief Function invoked during a backwalk of the FIB graph
+ */
+static fib_node_back_walk_rc_t
+lisp_gpe_fib_node_back_walk (fib_node_t * node,
+ fib_node_back_walk_ctx_t * ctx)
+{
+ lisp_gpe_l2_update_fwding (lisp_gpe_fwd_entry_from_fib_node (node));
+
+ return (FIB_NODE_BACK_WALK_CONTINUE);
+}
+
+/**
+ * @brief Get a fib_node_t struct from the index of a LISP fwd entry
+ */
+static fib_node_t *
+lisp_gpe_fwd_entry_get_fib_node (fib_node_index_t index)
+{
+ lisp_gpe_main_t *lgm = &lisp_gpe_main;
+ lisp_gpe_fwd_entry_t *lfe;
+
+ lfe = pool_elt_at_index (lgm->lisp_fwd_entry_pool, index);
+
+ return (&(lfe->node));
+}
+
+/**
+ * @brief An indication from the graph that the last lock has gone
+ */
+static void
+lisp_gpe_fwd_entry_fib_node_last_lock_gone (fib_node_t * node)
+{
+ /* We don't manage the locks of the LISP objects via the graph, since
+ * this object has no children. so this is a no-op. */
+}
+
+/**
+ * @brief Virtual function table to register with FIB for the LISP type
+ */
+const static fib_node_vft_t lisp_fwd_vft = {
+ .fnv_get = lisp_gpe_fwd_entry_get_fib_node,
+ .fnv_last_lock = lisp_gpe_fwd_entry_fib_node_last_lock_gone,
+ .fnv_back_walk = lisp_gpe_fib_node_back_walk,
+};
+
+/**
+ * @brief Forwarding entry create/remove dispatcher.
+ *
+ * Calls l2 or l3 forwarding entry add/del function based on input data.
+ *
+ * @param[in] a Forwarding entry parameters.
+ * @param[out] hw_if_indexp NOT USED
+ *
+ * @return 0 on success.
+ */
+int
+vnet_lisp_gpe_add_del_fwd_entry (vnet_lisp_gpe_add_del_fwd_entry_args_t * a,
+ u32 * hw_if_indexp)
+{
+ lisp_gpe_main_t *lgm = &lisp_gpe_main;
+ u8 type;
+
+ if (vnet_lisp_gpe_enable_disable_status () == 0)
+ {
+ clib_warning ("LISP is disabled!");
+ return VNET_API_ERROR_LISP_DISABLED;
+ }
+
+ type = gid_address_type (&a->rmt_eid);
+ switch (type)
+ {
+ case GID_ADDR_IP_PREFIX:
+ if (a->is_add)
+ return add_ip_fwd_entry (lgm, a);
+ else
+ return del_ip_fwd_entry (lgm, a);
+ break;
+ case GID_ADDR_MAC:
+ if (a->is_add)
+ return add_l2_fwd_entry (lgm, a);
+ else
+ return del_l2_fwd_entry (lgm, a);
+ default:
+ clib_warning ("Forwarding entries for type %d not supported!", type);
+ return -1;
+ }
+}
+
+/**
+ * @brief Flush all the forwrding entries
+ */
+void
+vnet_lisp_gpe_fwd_entry_flush (void)
+{
+ lisp_gpe_main_t *lgm = &lisp_gpe_main;
+ lisp_gpe_fwd_entry_t *lfe;
+
+ /* *INDENT-OFF* */
+ pool_foreach (lfe, lgm->lisp_fwd_entry_pool,
+ ({
+ switch (fid_addr_type(&lfe->key->rmt))
+ {
+ case FID_ADDR_MAC:
+ del_l2_fwd_entry_i (lgm, lfe);
+ break;
+ case FID_ADDR_IP_PREF:
+ del_ip_fwd_entry_i (lgm, lfe);
+ break;
+ }
+ }));
+ /* *INDENT-ON* */
+}
+
+static u8 *
+format_lisp_fwd_path (u8 * s, va_list ap)
+{
+ lisp_fwd_path_t *lfp = va_arg (ap, lisp_fwd_path_t *);
+
+ s = format (s, "priority:%d weight:%d ", lfp->priority, lfp->weight);
+ s = format (s, "adj:[%U]\n",
+ format_lisp_gpe_adjacency,
+ lisp_gpe_adjacency_get (lfp->lisp_adj),
+ LISP_GPE_ADJ_FORMAT_FLAG_NONE);
+
+ return (s);
+}
+
+typedef enum lisp_gpe_fwd_entry_format_flag_t_
+{
+ LISP_GPE_FWD_ENTRY_FORMAT_NONE = (0 << 0),
+ LISP_GPE_FWD_ENTRY_FORMAT_DETAIL = (1 << 1),
+} lisp_gpe_fwd_entry_format_flag_t;
+
+
+static u8 *
+format_lisp_gpe_fwd_entry (u8 * s, va_list ap)
+{
+ lisp_gpe_main_t *lgm = &lisp_gpe_main;
+ lisp_gpe_fwd_entry_t *lfe = va_arg (ap, lisp_gpe_fwd_entry_t *);
+ lisp_gpe_fwd_entry_format_flag_t flags =
+ va_arg (ap, lisp_gpe_fwd_entry_format_flag_t);
+
+ s = format (s, "VNI:%d VRF:%d EID: %U -> %U [index:%d]",
+ lfe->key->vni, lfe->eid_table_id,
+ format_fid_address, &lfe->key->lcl,
+ format_fid_address, &lfe->key->rmt,
+ lfe - lgm->lisp_fwd_entry_pool);
+
+ if (LISP_GPE_FWD_ENTRY_TYPE_NEGATIVE == lfe->type)
+ {
+ s = format (s, "\n Negative - action:%U",
+ format_negative_mapping_action, lfe->action);
+ }
+ else
+ {
+ lisp_fwd_path_t *path;
+
+ s = format (s, "\n via:");
+ vec_foreach (path, lfe->paths)
+ {
+ s = format (s, "\n %U", format_lisp_fwd_path, path);
+ }
+ }
+
+ if (flags & LISP_GPE_FWD_ENTRY_FORMAT_DETAIL)
+ {
+ switch (fid_addr_type (&lfe->key->rmt))
+ {
+ case FID_ADDR_MAC:
+ s = format (s, " fib-path-list:%d\n", lfe->l2.path_list_index);
+ s = format (s, " dpo:%U\n", format_dpo_id, &lfe->l2.dpo, 0);
+ break;
+ case FID_ADDR_IP_PREF:
+ break;
+ }
+ }
+
+ return (s);
+}
+
+static clib_error_t *
+lisp_gpe_fwd_entry_show (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ lisp_gpe_main_t *lgm = &lisp_gpe_main;
+ lisp_gpe_fwd_entry_t *lfe;
+ index_t index;
+ u32 vni = ~0;
+
+ if (unformat (input, "vni %d", &vni))
+ ;
+ else if (unformat (input, "%d", &index))
+ {
+ if (!pool_is_free_index (lgm->lisp_fwd_entry_pool, index))
+ {
+ lfe = pool_elt_at_index (lgm->lisp_fwd_entry_pool, index);
+
+ vlib_cli_output (vm, "[%d@] %U",
+ index,
+ format_lisp_gpe_fwd_entry, lfe,
+ LISP_GPE_FWD_ENTRY_FORMAT_DETAIL);
+ }
+ else
+ {
+ vlib_cli_output (vm, "entry %d invalid", index);
+ }
+
+ return (NULL);
+ }
+
+ /* *INDENT-OFF* */
+ pool_foreach (lfe, lgm->lisp_fwd_entry_pool,
+ ({
+ if ((vni == ~0) ||
+ (lfe->key->vni == vni))
+ vlib_cli_output (vm, "%U", format_lisp_gpe_fwd_entry, lfe,
+ LISP_GPE_FWD_ENTRY_FORMAT_NONE);
+ }));
+ /* *INDENT-ON* */
+
+ return (NULL);
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (lisp_gpe_fwd_entry_show_command, static) = {
+ .path = "show lisp gpe entry",
+ .short_help = "show lisp gpe entry vni <vni> vrf <vrf> [leid <leid>] reid <reid>",
+ .function = lisp_gpe_fwd_entry_show,
+};
+/* *INDENT-ON* */
+
+clib_error_t *
+lisp_gpe_fwd_entry_init (vlib_main_t * vm)
+{
+ lisp_gpe_main_t *lgm = &lisp_gpe_main;
+ clib_error_t *error = NULL;
+
+ if ((error = vlib_call_init_function (vm, lisp_cp_dpo_module_init)))
+ return (error);
+
+ l2_fib_init (lgm);
+
+ fib_node_register_type (FIB_NODE_TYPE_LISP_GPE_FWD_ENTRY, &lisp_fwd_vft);
+
+ return (error);
+}
+
+VLIB_INIT_FUNCTION (lisp_gpe_fwd_entry_init);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/lisp-gpe/lisp_gpe_fwd_entry.h b/src/vnet/lisp-gpe/lisp_gpe_fwd_entry.h
new file mode 100644
index 00000000000..f79236711ea
--- /dev/null
+++ b/src/vnet/lisp-gpe/lisp_gpe_fwd_entry.h
@@ -0,0 +1,188 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * @file
+ * @brief LISP-GPE definitions.
+ */
+
+#ifndef __LISP_GPE_FWD_ENTRY_H__
+#define __LISP_GPE_FWD_ENTRY_H__
+
+#include <vnet/lisp-gpe/lisp_gpe.h>
+
+/**
+ * @brief A path on which to forward lisp traffic
+ */
+typedef struct lisp_fwd_path_t_
+{
+ /**
+ * The adjacency constructed for the locator pair
+ */
+ index_t lisp_adj;
+
+ /**
+ * Priority. Only the paths with the best priority will be installed in FIB
+ */
+ u8 priority;
+
+ /**
+ * [UE]CMP weigt for the path
+ */
+ u8 weight;
+
+} lisp_fwd_path_t;
+
+/**
+ * @brief A Forwarding entry can be 'normal' or 'negative'
+ * Negative implies we deliberately want to add a FIB entry for an EID
+ * that results in 'special' behaviour determined by an 'action'.
+ * @normal means send it down some tunnels.
+ */
+typedef enum lisp_gpe_fwd_entry_type_t_
+{
+ LISP_GPE_FWD_ENTRY_TYPE_NORMAL,
+ LISP_GPE_FWD_ENTRY_TYPE_NEGATIVE,
+} lisp_gpe_fwd_entry_type_t;
+
+
+/**
+ * LISP-GPE fwd entry key
+ */
+typedef struct lisp_gpe_fwd_entry_key_t_
+{
+ dp_address_t rmt;
+ dp_address_t lcl;
+ u32 vni;
+} lisp_gpe_fwd_entry_key_t;
+
+/**
+ * @brief A LISP Forwarding Entry
+ *
+ * A forwarding entry is from a locai EID to a remote EID over a set of rloc pairs
+ */
+typedef struct lisp_gpe_fwd_entry_t_
+{
+ /**
+ * This object joins the FIB control plane graph to receive updates to
+ * for changes to the graph.
+ */
+ fib_node_t node;
+
+ /**
+ * The Entry's key: {lEID,r-EID,vni}
+ */
+ lisp_gpe_fwd_entry_key_t *key;
+
+ /**
+ * The forwarding entry type
+ */
+ lisp_gpe_fwd_entry_type_t type;
+
+ /**
+ * The tenant the entry belongs to
+ */
+ u32 tenant;
+
+ /**
+ * The VRF (in the case of L3) or Bridge-Domain (for L2) index
+ */
+ union
+ {
+ /**
+ * Fields relevant to an L2 entry
+ */
+ struct
+ {
+ /**
+ * The VRF ID
+ */
+ u32 eid_table_id;
+
+ /**
+ * The FIB index for the overlay, i.e. the FIB in which the EIDs
+ * are present
+ */
+ u32 eid_fib_index;
+ /**
+ * The SRC-FIB index for created for anding source-route entries
+ */
+ u32 src_fib_index;
+ };
+ /**
+ * Fields relevant to an L2 entry
+ */
+ struct
+ {
+ /**
+ * The Bridge-Domain (for L2) index
+ */
+ u32 eid_bd_id;
+
+ /**
+ * The Bridge-domain index for the overlay EIDs
+ */
+ u32 eid_bd_index;
+
+ /**
+ * The path-list created for the forwarding
+ */
+ fib_node_index_t path_list_index;
+
+ /**
+ * Child index of this entry on the path-list
+ */
+ u32 child_index;
+
+ /**
+ * The DPO used to forward
+ */
+ dpo_id_t dpo;
+ } l2;
+ };
+
+ union
+ {
+ /**
+ * @brief When the type is 'normal'
+ * The RLOC pair that form the route's paths. i.e. where to send
+ * packets for this route.
+ */
+ lisp_fwd_path_t *paths;
+
+ /**
+ * @brief When the type is negative. The action to take.
+ */
+ negative_fwd_actions_e action;
+ };
+} lisp_gpe_fwd_entry_t;
+
+extern int
+vnet_lisp_gpe_add_del_fwd_entry (vnet_lisp_gpe_add_del_fwd_entry_args_t * a,
+ u32 * hw_if_indexp);
+
+extern void vnet_lisp_gpe_fwd_entry_flush (void);
+
+extern u32 lisp_l2_fib_lookup (lisp_gpe_main_t * lgm,
+ u16 bd_index, u8 src_mac[8], u8 dst_mac[8]);
+
+#endif
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/lisp-gpe/lisp_gpe_packet.h b/src/vnet/lisp-gpe/lisp_gpe_packet.h
new file mode 100644
index 00000000000..62ac9bd7015
--- /dev/null
+++ b/src/vnet/lisp-gpe/lisp_gpe_packet.h
@@ -0,0 +1,149 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * @file
+ * @brief LISP-GPE packet header structure
+ *
+ */
+
+#ifndef included_lisp_gpe_packet_h
+#define included_lisp_gpe_packet_h
+
+/*
+ * From draft-lewis-lisp-gpe-02.txt
+ *
+ * 0 1 2 3
+ * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |N|L|E|V|I|P|R|O|Ver| Reserved | Next Protocol |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Instance ID/Locator-Status-Bits |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * N: The N-bit is the nonce-present bit. When this bit is set to 1,
+ * the low-order 24 bits of the first 32 bits of the LISP header
+ * contain a Nonce. See Section 6.3.1 for details. Both N- and
+ * V-bits MUST NOT be set in the same packet. If they are, a
+ * decapsulating ETR MUST treat the 'Nonce/Map-Version' field as
+ * having a Nonce value present.
+ *
+ * L: The L-bit is the 'Locator-Status-Bits' field enabled bit. When
+ * this bit is set to 1, the Locator-Status-Bits in the second
+ * 32 bits of the LISP header are in use.
+ *
+ * E: The E-bit is the echo-nonce-request bit. This bit MUST be ignored
+ * and has no meaning when the N-bit is set to 0. When the N-bit is
+ * set to 1 and this bit is set to 1, an ITR is requesting that the
+ * nonce value in the 'Nonce' field be echoed back in LISP-
+ * encapsulated packets when the ITR is also an ETR. See
+ * Section 6.3.1 for details.
+ *
+ * V: The V-bit is the Map-Version present bit. When this bit is set to
+ * 1, the N-bit MUST be 0. Refer to Section 6.6.3 for more details.
+ *
+ * I: The I-bit is the Instance ID bit. See Section 5.5 for more
+ * details. When this bit is set to 1, the 'Locator-Status-Bits'
+ * field is reduced to 8 bits and the high-order 24 bits are used as
+ * an Instance ID. If the L-bit is set to 0, then the low-order
+ * 8 bits are transmitted as zero and ignored on receipt.
+ *
+ * P Bit: Flag bit 5 is defined as the Next Protocol bit. The P bit
+ * MUST be set to 1 to indicate the presence of the 8 bit next
+ * protocol field.
+ *
+ * P = 0 indicates that the payload MUST conform to LISP as defined
+ * in [RFC6830].
+ *
+ * Flag bit 5 was chosen as the P bit because this flag bit is
+ * currently unallocated in LISP [RFC6830].
+ *
+ * O: Flag bit 7 is defined as the O bit. When the O bit is set to 1, the
+ * packet is an OAM packet and OAM processing MUST occur. The OAM
+ * protocol details are out of scope for this document. As with the
+ * P-bit, bit 7 is currently a reserved flag in [RFC6830].
+ *
+ * Next Protocol Field: The lower 8 bits of the first word are used to
+ * carry a next protocol. This next protocol field contains the
+ * protocol of the encapsulated payload packet.
+ *
+ * LISP [RFC6830] uses the lower 16 bits of the first word for either
+ * a nonce, an echo-nonce ([RFC6830]) or to support map-versioning
+ * ([RFC6834]). These are all optional capabilities that are
+ * indicated by setting the N, E, and the V bit respectively.
+ *
+ * To maintain the desired data plane compatibility, when the P bit
+ * is set, the N, E, and V bits MUST be set to zero.
+ *
+ * A new protocol registry will be requested from IANA for the Next
+ * Protocol field. This draft defines the following Next Protocol
+ * values:
+ *
+ * 0x1 : IPv4
+ * 0x2 : IPv6
+ * 0x3 : Ethernet
+ * 0x4: Network Service Header
+ */
+
+/** LISP-GPE header */
+typedef struct
+{
+ u8 flags;
+ u8 ver_res;
+ u8 res;
+ u8 next_protocol;
+ u32 iid;
+} lisp_gpe_header_t;
+
+#define foreach_lisp_gpe_flag_bit \
+_(N, 0x80) \
+_(L, 0x40) \
+_(E, 0x20) \
+_(V, 0x10) \
+_(I, 0x08) \
+_(P, 0x04) \
+_(O, 0x01)
+
+typedef enum
+{
+#define _(n,v) LISP_GPE_FLAGS_##n = v,
+ foreach_lisp_gpe_flag_bit
+#undef _
+} vnet_lisp_gpe_flag_bit_t;
+
+#define LISP_GPE_VERSION 0x0
+
+#define LISP_GPE_NEXT_PROTOCOL_IP4 0x1
+#define LISP_GPE_NEXT_PROTOCOL_IP6 0x2
+#define LISP_GPE_NEXT_PROTOCOL_ETHERNET 0x3
+#define LISP_GPE_NEXT_PROTOCOL_NSH 0x4
+
+typedef enum
+{
+ LISP_GPE_NEXT_PROTO_IP4 = 1,
+ LISP_GPE_NEXT_PROTO_IP6,
+ LISP_GPE_NEXT_PROTO_ETHERNET,
+ LISP_GPE_NEXT_PROTO_NSH,
+ LISP_GPE_NEXT_PROTOS
+} lisp_gpe_next_protocol_e;
+
+#endif /* included_lisp_gpe_packet_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/lisp-gpe/lisp_gpe_sub_interface.c b/src/vnet/lisp-gpe/lisp_gpe_sub_interface.c
new file mode 100644
index 00000000000..5b69bd157b8
--- /dev/null
+++ b/src/vnet/lisp-gpe/lisp_gpe_sub_interface.c
@@ -0,0 +1,278 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * @file
+ * @brief LISP sub-interfaces.
+ *
+ */
+#include <vnet/lisp-gpe/lisp_gpe_tenant.h>
+#include <vnet/lisp-gpe/lisp_gpe_sub_interface.h>
+#include <vnet/fib/fib_table.h>
+#include <vnet/interface.h>
+
+/**
+ * @brief Pool of all l3-sub-interfaces
+ */
+static lisp_gpe_sub_interface_t *lisp_gpe_sub_interface_pool;
+
+/**
+ * A DB of all LISP L3 sub-interfaces. The key is:{VNI,l-RLOC}
+ */
+static uword *lisp_gpe_sub_interfaces;
+
+/**
+ * A DB of all VNET L3 sub-interfaces. The key is:{VNI,l-RLOC}
+ * Used in the data-plane for interface lookup on decap.
+ */
+uword *lisp_gpe_sub_interfaces_sw_if_index;
+
+/**
+ * The next available sub-interface ID. FIXME
+ */
+static u32 lisp_gpe_sub_interface_id;
+
+
+static index_t
+lisp_gpe_sub_interface_db_find (const ip_address_t * lrloc, u32 vni)
+{
+ uword *p;
+
+ lisp_gpe_sub_interface_key_t key = {
+ .local_rloc = *lrloc,
+ .vni = clib_host_to_net_u32 (vni),
+ };
+
+ p = hash_get_mem (lisp_gpe_sub_interfaces, &key);
+
+ if (NULL == p)
+ return (INDEX_INVALID);
+ else
+ return (p[0]);
+}
+
+static void
+lisp_gpe_sub_interface_db_insert (const lisp_gpe_sub_interface_t * l3s)
+{
+ hash_set_mem (lisp_gpe_sub_interfaces,
+ &l3s->key, l3s - lisp_gpe_sub_interface_pool);
+ hash_set_mem (lisp_gpe_sub_interfaces_sw_if_index,
+ &l3s->key, l3s->sw_if_index);
+}
+
+static void
+lisp_gpe_sub_interface_db_remove (const lisp_gpe_sub_interface_t * l3s)
+{
+ hash_unset_mem (lisp_gpe_sub_interfaces, &l3s->key);
+ hash_unset_mem (lisp_gpe_sub_interfaces_sw_if_index, &l3s->key);
+}
+
+lisp_gpe_sub_interface_t *
+lisp_gpe_sub_interface_get_i (index_t l3si)
+{
+ return (pool_elt_at_index (lisp_gpe_sub_interface_pool, l3si));
+}
+
+static void
+lisp_gpe_sub_interface_set_table (u32 sw_if_index, u32 table_id)
+{
+ fib_node_index_t fib_index;
+
+ fib_index = fib_table_find_or_create_and_lock (FIB_PROTOCOL_IP4, table_id);
+ ASSERT (FIB_NODE_INDEX_INVALID != fib_index);
+
+ vec_validate (ip4_main.fib_index_by_sw_if_index, sw_if_index);
+ ip4_main.fib_index_by_sw_if_index[sw_if_index] = fib_index;
+
+ fib_index = fib_table_find_or_create_and_lock (FIB_PROTOCOL_IP6, table_id);
+ ASSERT (FIB_NODE_INDEX_INVALID != fib_index);
+
+ vec_validate (ip6_main.fib_index_by_sw_if_index, sw_if_index);
+ ip6_main.fib_index_by_sw_if_index[sw_if_index] = fib_index;
+}
+
+static void
+lisp_gpe_sub_interface_unset_table (u32 sw_if_index, u32 table_id)
+{
+ ip4_main.fib_index_by_sw_if_index[sw_if_index] = 0;
+ ip4_sw_interface_enable_disable (sw_if_index, 0);
+
+ ip6_main.fib_index_by_sw_if_index[sw_if_index] = 0;
+ ip6_sw_interface_enable_disable (sw_if_index, 0);
+}
+
+index_t
+lisp_gpe_sub_interface_find_or_create_and_lock (const ip_address_t * lrloc,
+ u32 overlay_table_id, u32 vni)
+{
+ lisp_gpe_sub_interface_t *l3s;
+ index_t l3si;
+
+ l3si = lisp_gpe_sub_interface_db_find (lrloc, clib_host_to_net_u32 (vni));
+
+ if (INDEX_INVALID == l3si)
+ {
+ u32 main_sw_if_index, sub_sw_if_index;
+
+ /*
+ * find the main interface from the VNI
+ */
+ main_sw_if_index =
+ lisp_gpe_tenant_l3_iface_add_or_lock (vni, overlay_table_id);
+
+ vnet_sw_interface_t sub_itf_template = {
+ .type = VNET_SW_INTERFACE_TYPE_SUB,
+ .flood_class = VNET_FLOOD_CLASS_NORMAL,
+ .sup_sw_if_index = main_sw_if_index,
+ .sub.id = lisp_gpe_sub_interface_id++,
+ };
+
+ if (NULL != vnet_create_sw_interface (vnet_get_main (),
+ &sub_itf_template,
+ &sub_sw_if_index))
+ return (INDEX_INVALID);
+
+ pool_get (lisp_gpe_sub_interface_pool, l3s);
+ memset (l3s, 0, sizeof (*l3s));
+ l3s->key = clib_mem_alloc (sizeof (*l3s->key));
+ memset (l3s->key, 0, sizeof (*l3s->key));
+
+ l3s->key->local_rloc = *lrloc;
+ l3s->key->vni = clib_host_to_net_u32 (vni);
+ l3s->main_sw_if_index = main_sw_if_index;
+ l3s->sw_if_index = sub_sw_if_index;
+ l3s->eid_table_id = overlay_table_id;
+
+ l3si = (l3s - lisp_gpe_sub_interface_pool);
+
+ // FIXME. enable When we get an adj
+ ip6_sw_interface_enable_disable (l3s->sw_if_index, 1);
+ ip4_sw_interface_enable_disable (l3s->sw_if_index, 1);
+
+ vnet_sw_interface_set_flags (vnet_get_main (),
+ l3s->sw_if_index,
+ VNET_SW_INTERFACE_FLAG_ADMIN_UP);
+
+ lisp_gpe_sub_interface_db_insert (l3s);
+ }
+ else
+ {
+ l3s = lisp_gpe_sub_interface_get_i (l3si);
+ l3s->eid_table_id = overlay_table_id;
+ }
+
+ lisp_gpe_sub_interface_set_table (l3s->sw_if_index, l3s->eid_table_id);
+ l3s->locks++;
+
+ return (l3si);
+}
+
+void
+lisp_gpe_sub_interface_unlock (index_t l3si)
+{
+ lisp_gpe_sub_interface_t *l3s;
+
+ l3s = lisp_gpe_sub_interface_get_i (l3si);
+
+ l3s->locks--;
+
+ if (0 == l3s->locks)
+ {
+ lisp_gpe_sub_interface_unset_table (l3s->sw_if_index,
+ l3s->eid_table_id);
+
+ lisp_gpe_tenant_l3_iface_unlock (clib_net_to_host_u32 (l3s->key->vni));
+ vnet_sw_interface_set_flags (vnet_get_main (), l3s->sw_if_index, 0);
+ vnet_delete_sub_interface (l3s->sw_if_index);
+
+ lisp_gpe_sub_interface_db_remove (l3s);
+
+ clib_mem_free (l3s->key);
+ pool_put (lisp_gpe_sub_interface_pool, l3s);
+ }
+}
+
+const lisp_gpe_sub_interface_t *
+lisp_gpe_sub_interface_get (index_t l3si)
+{
+ return (lisp_gpe_sub_interface_get_i (l3si));
+}
+
+u8 *
+format_lisp_gpe_sub_interface (u8 * s, va_list ap)
+{
+ lisp_gpe_sub_interface_t *l3s = va_arg (ap, lisp_gpe_sub_interface_t *);
+ vnet_main_t *vnm = vnet_get_main ();
+
+ s = format (s, "%=16U",
+ format_vnet_sw_interface_name,
+ vnm, vnet_get_sw_interface (vnm, l3s->sw_if_index));
+ s = format (s, "%=10d", clib_net_to_host_u32 (l3s->key->vni));
+ s = format (s, "%=12d", l3s->sw_if_index);
+ s = format (s, "%U", format_ip_address, &l3s->key->local_rloc);
+
+ return (s);
+}
+
+/** CLI command to show LISP-GPE interfaces. */
+static clib_error_t *
+lisp_gpe_sub_interface_show (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ lisp_gpe_sub_interface_t *l3s;
+
+ vlib_cli_output (vm, "%=16s%=10s%=12s%s", "Name", "VNI", "SW IF Index",
+ "local RLOC");
+
+ /* *INDENT-OFF* */
+ pool_foreach (l3s, lisp_gpe_sub_interface_pool,
+ ({
+ vlib_cli_output (vm, "%U", format_lisp_gpe_sub_interface, l3s);
+ }));
+ /* *INDENT-ON* */
+
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (lisp_gpe_sub_interface_command) = {
+ .path = "show lisp gpe sub-interface",
+ .short_help = "show lisp gpe sub-interface",
+ .function = lisp_gpe_sub_interface_show,
+};
+/* *INDENT-ON* */
+
+static clib_error_t *
+lisp_gpe_sub_interface_module_init (vlib_main_t * vm)
+{
+ lisp_gpe_sub_interfaces =
+ hash_create_mem (0,
+ sizeof (lisp_gpe_sub_interface_key_t), sizeof (uword));
+ lisp_gpe_sub_interfaces_sw_if_index =
+ hash_create_mem (0,
+ sizeof (lisp_gpe_sub_interface_key_t), sizeof (uword));
+
+ return (NULL);
+}
+
+VLIB_INIT_FUNCTION (lisp_gpe_sub_interface_module_init);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/lisp-gpe/lisp_gpe_sub_interface.h b/src/vnet/lisp-gpe/lisp_gpe_sub_interface.h
new file mode 100644
index 00000000000..ad942f415d1
--- /dev/null
+++ b/src/vnet/lisp-gpe/lisp_gpe_sub_interface.h
@@ -0,0 +1,157 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * @file
+ * @brief LISP sub-interfaces.
+ *
+ */
+
+#ifndef __LISP_GPE_SUB_INTERFACE_H__
+#define __LISP_GPE_SUB_INTERFACE_H__
+
+#include <vnet/lisp-gpe/lisp_gpe.h>
+
+/**
+ * A Key for lookup in the L£ sub-interface DB
+ */
+typedef struct lisp_gpe_sub_interface_key_t_
+{
+ /**
+ * The local-RLOC. This is the interface's 'source' address.
+ */
+ ip_address_t local_rloc;
+
+ /**
+ * The VNI. In network byte order!
+ */
+ u32 vni;
+} lisp_gpe_sub_interface_key_t;
+
+/**
+ * @brief A LISP L3 sub-interface
+ *
+ * A LISP sub-interface is a multi-access interface, whose local address is a
+ * single local-RLOC. Adjacencies that form on this sub-interface, represent
+ * remote RLOCs.
+ * This is analogous to an ethernet interface.
+ * As with all interface types it can only be present in one VRF, hence a
+ * LISP sub-interface is per-local-rloc and per-VNI.
+ */
+typedef struct lisp_gpe_sub_interface_t_
+{
+ /**
+ * The interface's key inthe DB; rloc & vni;
+ * The key is allocated from the heap so it can be used in the hash-table.
+ * if it's part of the object, then it is subjet to realloc, which no-worky.
+ */
+ lisp_gpe_sub_interface_key_t *key;
+
+ /**
+ * The Table-ID in the overlay that this interface is bound to.
+ */
+ u32 eid_table_id;
+
+ /**
+ * A reference counting lock on the number of users of this interface.
+ * When this count drops to 0 the interface is deleted.
+ */
+ u32 locks;
+
+ /**
+ * The SW if index assigned to this sub-interface
+ */
+ u32 sw_if_index;
+
+ /**
+ * The SW IF index assigned to the main interface of which this is a sub.
+ */
+ u32 main_sw_if_index;
+} lisp_gpe_sub_interface_t;
+
+extern index_t lisp_gpe_sub_interface_find_or_create_and_lock (const
+ ip_address_t *
+ lrloc,
+ u32
+ eid_table_id,
+ u32 vni);
+
+extern u8 *format_lisp_gpe_sub_interface (u8 * s, va_list ap);
+
+extern void lisp_gpe_sub_interface_unlock (index_t itf);
+
+extern const lisp_gpe_sub_interface_t *lisp_gpe_sub_interface_get (index_t
+ itf);
+
+/**
+ * A DB of all L3 sub-interfaces. The key is:{VNI,l-RLOC}
+ */
+extern uword *lisp_gpe_sub_interfaces_sw_if_index;
+
+/**
+ * @brief
+ * Get a VNET L3 interface matching the local-RLOC and VNI
+ * Called from the data-plane
+ */
+always_inline u32
+lisp_gpe_sub_interface_find_ip6 (const ip6_address_t * addr, u32 vni)
+{
+ lisp_gpe_sub_interface_key_t key;
+ const uword *p;
+
+ key.local_rloc.ip.v6.as_u64[0] = addr->as_u64[0];
+ key.local_rloc.ip.v6.as_u64[1] = addr->as_u64[1];
+ key.local_rloc.version = IP6;
+ key.vni = vni;
+
+ p = hash_get_mem (&lisp_gpe_sub_interfaces_sw_if_index, &key);
+
+ if (NULL != p)
+ return p[0];
+
+ return (INDEX_INVALID);
+}
+
+/**
+ * @brief
+ * Get a VNET L3 interface matching the local-RLOC and VNI
+ * Called from the data-plane
+ */
+always_inline index_t
+lisp_gpe_sub_interface_find_ip4 (const ip4_address_t * addr, u32 vni)
+{
+ lisp_gpe_sub_interface_key_t key;
+ const uword *p;
+
+ key.local_rloc.ip.v4.as_u32 = addr->as_u32;
+ key.local_rloc.version = IP4;
+ key.vni = vni;
+
+ p = hash_get_mem (&lisp_gpe_sub_interfaces_sw_if_index, &key);
+
+ if (NULL != p)
+ return p[0];
+
+ return (INDEX_INVALID);
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
+
+#endif
diff --git a/src/vnet/lisp-gpe/lisp_gpe_tenant.c b/src/vnet/lisp-gpe/lisp_gpe_tenant.c
new file mode 100644
index 00000000000..6abb7731830
--- /dev/null
+++ b/src/vnet/lisp-gpe/lisp_gpe_tenant.c
@@ -0,0 +1,330 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/lisp-gpe/lisp_gpe_tenant.h>
+
+/**
+ * The pool of all tenants
+ */
+static lisp_gpe_tenant_t *lisp_gpe_tenant_pool;
+
+/**
+ * The hash table of all tenants: key:{VNI}.
+ */
+uword *lisp_gpe_tenant_db;
+
+static lisp_gpe_tenant_t *
+lisp_gpe_tenant_find (u32 vni)
+{
+ uword *p;
+
+ p = hash_get (lisp_gpe_tenant_db, vni);
+
+ if (NULL == p)
+ return (NULL);
+
+ return (pool_elt_at_index (lisp_gpe_tenant_pool, p[0]));
+}
+
+static lisp_gpe_tenant_t *
+lisp_gpe_tenant_find_or_create_i (u32 vni)
+{
+ lisp_gpe_tenant_t *lt;
+
+ lt = lisp_gpe_tenant_find (vni);
+
+ if (NULL == lt)
+ {
+ pool_get (lisp_gpe_tenant_pool, lt);
+ memset (lt, 0, sizeof (*lt));
+
+ lt->lt_vni = vni;
+ lt->lt_table_id = ~0;
+ lt->lt_bd_id = ~0;
+
+ hash_set (lisp_gpe_tenant_db, vni, lt - lisp_gpe_tenant_pool);
+ }
+
+ return (lt);
+}
+
+/**
+ * @brief Find or create a tenant for the given VNI
+ */
+u32
+lisp_gpe_tenant_find_or_create (u32 vni)
+{
+ lisp_gpe_tenant_t *lt;
+
+ lt = lisp_gpe_tenant_find (vni);
+
+ if (NULL == lt)
+ {
+ lt = lisp_gpe_tenant_find_or_create_i (vni);
+ }
+
+ return (lt - lisp_gpe_tenant_pool);
+}
+
+/**
+ * @brief If there are no more locks/users of te tenant, then delete it
+ */
+static void
+lisp_gpe_tenant_delete_if_empty (lisp_gpe_tenant_t * lt)
+{
+ int i;
+
+ for (i = 0; i < LISP_GPE_TENANT_LOCK_NUM; i++)
+ {
+ if (lt->lt_locks[i])
+ return;
+ }
+
+ hash_unset (lisp_gpe_tenant_db, lt->lt_vni);
+ pool_put (lisp_gpe_tenant_pool, lt);
+}
+
+/**
+ * @brief Add/create and lock a new or find and lock the existing L3
+ * interface for the tenant
+ *
+ * @paran vni The tenant's VNI
+ * @param table_id the Tenant's L3 table ID.
+ *
+ * @return the SW IF index of the L3 interface
+ */
+u32
+lisp_gpe_tenant_l3_iface_add_or_lock (u32 vni, u32 table_id)
+{
+ lisp_gpe_tenant_t *lt;
+
+ lt = lisp_gpe_tenant_find_or_create_i (vni);
+
+ if (~0 == lt->lt_table_id)
+ lt->lt_table_id = table_id;
+
+ ASSERT (lt->lt_table_id == table_id);
+
+ if (0 == lt->lt_locks[LISP_GPE_TENANT_LOCK_L3_IFACE])
+ {
+ /* create the l3 interface since there are currently no users of it */
+ lt->lt_l3_sw_if_index =
+ lisp_gpe_add_l3_iface (&lisp_gpe_main, vni, table_id);
+ }
+
+ lt->lt_locks[LISP_GPE_TENANT_LOCK_L3_IFACE]++;
+
+ return (lt->lt_l3_sw_if_index);
+}
+
+/**
+ * @brief Release the lock held on the tenant's L3 interface
+ */
+void
+lisp_gpe_tenant_l3_iface_unlock (u32 vni)
+{
+ lisp_gpe_tenant_t *lt;
+
+ lt = lisp_gpe_tenant_find (vni);
+
+ if (NULL == lt)
+ {
+ clib_warning ("No tenant for VNI %d", vni);
+ return;
+ }
+
+ if (0 == lt->lt_locks[LISP_GPE_TENANT_LOCK_L3_IFACE])
+ {
+ clib_warning ("No L3 interface for tenant VNI %d", vni);
+ return;
+ }
+
+ lt->lt_locks[LISP_GPE_TENANT_LOCK_L3_IFACE]--;
+
+ if (0 == lt->lt_locks[LISP_GPE_TENANT_LOCK_L3_IFACE])
+ {
+ /* the last user has gone, so delete the l3 interface */
+ lisp_gpe_del_l3_iface (&lisp_gpe_main, vni, lt->lt_table_id);
+ }
+
+ /*
+ * If there are no more locks on any tenant managed resource, then
+ * this tenant is toast.
+ */
+ lisp_gpe_tenant_delete_if_empty (lt);
+}
+
+/**
+ * @brief Add/create and lock a new or find and lock the existing L2
+ * interface for the tenant
+ *
+ * @paran vni The tenant's VNI
+ * @param table_id the Tenant's L2 Bridge Domain ID.
+ *
+ * @return the SW IF index of the L2 interface
+ */
+u32
+lisp_gpe_tenant_l2_iface_add_or_lock (u32 vni, u32 bd_id)
+{
+ lisp_gpe_tenant_t *lt;
+
+ lt = lisp_gpe_tenant_find_or_create_i (vni);
+
+ if (NULL == lt)
+ {
+ clib_warning ("No tenant for VNI %d", vni);
+ return ~0;
+ }
+
+ if (~0 == lt->lt_bd_id)
+ lt->lt_bd_id = bd_id;
+
+ ASSERT (lt->lt_bd_id == bd_id);
+
+ if (0 == lt->lt_locks[LISP_GPE_TENANT_LOCK_L2_IFACE])
+ {
+ /* create the l2 interface since there are currently no users of it */
+ lt->lt_l2_sw_if_index =
+ lisp_gpe_add_l2_iface (&lisp_gpe_main, vni, bd_id);
+ }
+
+ lt->lt_locks[LISP_GPE_TENANT_LOCK_L2_IFACE]++;
+
+ return (lt->lt_l2_sw_if_index);
+}
+
+/**
+ * @brief Release the lock held on the tenant's L3 interface
+ */
+void
+lisp_gpe_tenant_l2_iface_unlock (u32 vni)
+{
+ lisp_gpe_tenant_t *lt;
+
+ lt = lisp_gpe_tenant_find (vni);
+
+ if (NULL == lt)
+ {
+ clib_warning ("No tenant for VNI %d", vni);
+ return;
+ }
+
+ if (0 == lt->lt_locks[LISP_GPE_TENANT_LOCK_L2_IFACE])
+ {
+ clib_warning ("No L2 interface for tenant VNI %d", vni);
+ return;
+ }
+
+ lt->lt_locks[LISP_GPE_TENANT_LOCK_L2_IFACE]--;
+
+ if (0 == lt->lt_locks[LISP_GPE_TENANT_LOCK_L2_IFACE])
+ {
+ /* the last user has gone, so delete the l2 interface */
+ lisp_gpe_del_l2_iface (&lisp_gpe_main, vni, lt->lt_bd_id);
+ }
+
+ /*
+ * If there are no more locks on any tenant managed resource, then
+ * this tenant is toast.
+ */
+ lisp_gpe_tenant_delete_if_empty (lt);
+}
+
+/**
+ * @brief get a const pointer to the tenant object
+ */
+const lisp_gpe_tenant_t *
+lisp_gpe_tenant_get (u32 index)
+{
+ return (pool_elt_at_index (lisp_gpe_tenant_pool, index));
+}
+
+/**
+ * @brief Flush/delete ALL the tenants
+ */
+void
+lisp_gpe_tenant_flush (void)
+{
+ lisp_gpe_tenant_t *lt;
+
+ /* *INDENT-OFF* */
+ pool_foreach(lt, lisp_gpe_tenant_pool,
+ ({
+ lisp_gpe_tenant_l2_iface_unlock(lt->lt_vni);
+ lisp_gpe_tenant_l3_iface_unlock(lt->lt_vni);
+ }));
+ /* *INDENT-ON* */
+}
+
+/**
+ * @brif Show/display one tenant
+ */
+static u8 *
+format_lisp_gpe_tenant (u8 * s, va_list ap)
+{
+ const lisp_gpe_tenant_t *lt = va_arg (ap, lisp_gpe_tenant_t *);
+
+ s = format (s, "VNI:%d ", lt->lt_vni);
+
+ if (lt->lt_table_id != ~0)
+ {
+ s = format (s, "VRF:%d ", lt->lt_table_id);
+ s = format (s, "L3-SW-IF:%d ", lt->lt_l3_sw_if_index);
+ }
+
+ if (lt->lt_bd_id != ~0)
+ {
+ s = format (s, "BD-ID:%d ", lt->lt_bd_id);
+ s = format (s, "L2-SW-IF:%d ", lt->lt_l2_sw_if_index);
+ }
+
+ return (s);
+}
+
+/**
+ * @brief CLI command to show LISP-GPE tenant.
+ */
+static clib_error_t *
+lisp_gpe_tenant_show (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ lisp_gpe_tenant_t *lt;
+
+ /* *INDENT-OFF* */
+ pool_foreach (lt, lisp_gpe_tenant_pool,
+ ({
+ vlib_cli_output (vm, "%U", format_lisp_gpe_tenant, lt);
+ }));
+ /* *INDENT-ON* */
+
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (lisp_gpe_tenant_command) = {
+ .path = "show lisp gpe tenant",
+ .short_help = "show lisp gpe tenant",
+ .function = lisp_gpe_tenant_show,
+};
+/* *INDENT-ON* */
+
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/lisp-gpe/lisp_gpe_tenant.h b/src/vnet/lisp-gpe/lisp_gpe_tenant.h
new file mode 100644
index 00000000000..5db7dde833b
--- /dev/null
+++ b/src/vnet/lisp-gpe/lisp_gpe_tenant.h
@@ -0,0 +1,88 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LISP_GPE_TENANT_H__
+#define __LISP_GPE_TENANT_H__
+
+#include <vnet/lisp-gpe/lisp_gpe.h>
+
+/**
+ * Refernece counting lock types on the tenant.
+ * When all of these counters drop to zero, we no longer need the tenant.
+ */
+typedef enum lisp_gpe_tenant_lock_t_
+{
+ LISP_GPE_TENANT_LOCK_L2_IFACE,
+ LISP_GPE_TENANT_LOCK_L3_IFACE,
+ LISP_GPE_TENANT_LOCK_NUM,
+} lisp_gpe_tenant_lock_t;
+
+/**
+ * @brief Representation of the data associated with a LISP overlay tenant
+ *
+ * This object exists to manage the shared resources of the L2 and L3 interface
+ * of a given tenant.
+ */
+typedef struct lisp_gpe_tenant_t_
+{
+ /**
+ * The VNI is the identifier of the tenant
+ */
+ u32 lt_vni;
+
+ /**
+ * The tenant can have both L2 and L3 services enabled.
+ */
+ u32 lt_table_id;
+ u32 lt_bd_id;
+
+ /**
+ * The number of locks on the tenant's L3 interface.
+ */
+ u32 lt_locks[LISP_GPE_TENANT_LOCK_NUM];
+
+ /**
+ * The L3 SW interface index
+ */
+ u32 lt_l3_sw_if_index;
+
+ /**
+ * The L2 SW interface index
+ */
+ u32 lt_l2_sw_if_index;
+
+} lisp_gpe_tenant_t;
+
+extern u32 lisp_gpe_tenant_find_or_create (u32 vni);
+
+extern u32 lisp_gpe_tenant_l3_iface_add_or_lock (u32 vni, u32 vrf);
+extern void lisp_gpe_tenant_l3_iface_unlock (u32 vni);
+
+extern u32 lisp_gpe_tenant_l2_iface_add_or_lock (u32 vni, u32 vrf);
+extern void lisp_gpe_tenant_l2_iface_unlock (u32 vni);
+
+extern const lisp_gpe_tenant_t *lisp_gpe_tenant_get (u32 index);
+
+extern void lisp_gpe_tenant_flush (void);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
+
+#endif
diff --git a/src/vnet/lisp-gpe/lisp_gpe_tunnel.c b/src/vnet/lisp-gpe/lisp_gpe_tunnel.c
new file mode 100644
index 00000000000..e4e59707e8f
--- /dev/null
+++ b/src/vnet/lisp-gpe/lisp_gpe_tunnel.c
@@ -0,0 +1,289 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * @file
+ * @brief Common utility functions for IPv4, IPv6 and L2 LISP-GPE tunnels.
+ *
+ */
+#include <vnet/lisp-gpe/lisp_gpe.h>
+#include <vnet/lisp-gpe/lisp_gpe_tunnel.h>
+#include <vnet/lisp-gpe/lisp_gpe_adjacency.h>
+
+#include <vnet/fib/fib_table.h>
+
+/**
+ * @brief Pool of all LISP tunnels
+ */
+static lisp_gpe_tunnel_t *lisp_gpe_tunnel_pool;
+
+/**
+ * @brief a DB of all tunnels
+ */
+static uword *lisp_gpe_tunnel_db;
+
+/**
+ * @brief Compute IP-UDP-GPE sub-tunnel encap/rewrite header.
+ *
+ * @param[in] t Parent of the sub-tunnel.
+ * @param[in] st Sub-tunnel.
+ * @param[in] lp Local and remote locators used in the encap header.
+ *
+ * @return 0 on success.
+ */
+u8 *
+lisp_gpe_tunnel_build_rewrite (const lisp_gpe_tunnel_t * lgt,
+ const lisp_gpe_adjacency_t * ladj,
+ lisp_gpe_next_protocol_e payload_proto)
+{
+ lisp_gpe_header_t *lisp0;
+ u8 *rw = 0;
+ int len;
+
+ if (IP4 == ip_addr_version (&lgt->key->lcl))
+ {
+ ip4_udp_lisp_gpe_header_t *h0;
+ ip4_header_t *ip0;
+
+ len = sizeof (*h0);
+
+ vec_validate_aligned (rw, len - 1, CLIB_CACHE_LINE_BYTES);
+
+ h0 = (ip4_udp_lisp_gpe_header_t *) rw;
+
+ /* Fixed portion of the (outer) ip4 header */
+ ip0 = &h0->ip4;
+ ip0->ip_version_and_header_length = 0x45;
+ ip0->ttl = 254;
+ ip0->protocol = IP_PROTOCOL_UDP;
+
+ /* we fix up the ip4 header length and checksum after-the-fact */
+ ip_address_copy_addr (&ip0->src_address, &lgt->key->lcl);
+ ip_address_copy_addr (&ip0->dst_address, &lgt->key->rmt);
+ ip0->checksum = ip4_header_checksum (ip0);
+
+ /* UDP header, randomize src port on something, maybe? */
+ h0->udp.src_port = clib_host_to_net_u16 (4341);
+ h0->udp.dst_port = clib_host_to_net_u16 (UDP_DST_PORT_lisp_gpe);
+
+ /* LISP-gpe header */
+ lisp0 = &h0->lisp;
+ }
+ else
+ {
+ ip6_udp_lisp_gpe_header_t *h0;
+ ip6_header_t *ip0;
+
+ len = sizeof (*h0);
+
+ vec_validate_aligned (rw, len - 1, CLIB_CACHE_LINE_BYTES);
+
+ h0 = (ip6_udp_lisp_gpe_header_t *) rw;
+
+ /* Fixed portion of the (outer) ip6 header */
+ ip0 = &h0->ip6;
+ ip0->ip_version_traffic_class_and_flow_label =
+ clib_host_to_net_u32 (0x6 << 28);
+ ip0->hop_limit = 254;
+ ip0->protocol = IP_PROTOCOL_UDP;
+
+ /* we fix up the ip6 header length after-the-fact */
+ ip_address_copy_addr (&ip0->src_address, &lgt->key->lcl);
+ ip_address_copy_addr (&ip0->dst_address, &lgt->key->rmt);
+
+ /* UDP header, randomize src port on something, maybe? */
+ h0->udp.src_port = clib_host_to_net_u16 (4341);
+ h0->udp.dst_port = clib_host_to_net_u16 (UDP_DST_PORT_lisp_gpe);
+
+ /* LISP-gpe header */
+ lisp0 = &h0->lisp;
+ }
+
+ lisp0->flags = ladj->flags;
+ lisp0->ver_res = 0;
+ lisp0->res = 0;
+ lisp0->next_protocol = payload_proto;
+ lisp0->iid = clib_host_to_net_u32 (ladj->vni) >> 8; /* first 24 bits only */
+
+ return (rw);
+}
+
+static lisp_gpe_tunnel_t *
+lisp_gpe_tunnel_db_find (const lisp_gpe_tunnel_key_t * key)
+{
+ uword *p;
+
+ p = hash_get_mem (lisp_gpe_tunnel_db, (void *) key);
+
+ if (NULL != p)
+ {
+ return (pool_elt_at_index (lisp_gpe_tunnel_pool, p[0]));
+ }
+ return (NULL);
+}
+
+lisp_gpe_tunnel_t *
+lisp_gpe_tunnel_get_i (index_t lgti)
+{
+ return (pool_elt_at_index (lisp_gpe_tunnel_pool, lgti));
+}
+
+index_t
+lisp_gpe_tunnel_find_or_create_and_lock (const locator_pair_t * pair,
+ u32 rloc_fib_index)
+{
+ lisp_gpe_tunnel_key_t key = {
+ .lcl = pair->lcl_loc,
+ .rmt = pair->rmt_loc,
+ .fib_index = rloc_fib_index,
+ };
+ lisp_gpe_tunnel_t *lgt;
+ fib_prefix_t pfx;
+
+ lgt = lisp_gpe_tunnel_db_find (&key);
+
+ if (NULL == lgt)
+ {
+ pool_get (lisp_gpe_tunnel_pool, lgt);
+ memset (lgt, 0, sizeof (*lgt));
+
+ lgt->key = clib_mem_alloc (sizeof (*lgt->key));
+ memset (lgt->key, 0, sizeof (*lgt->key));
+
+ lgt->key->rmt = pair->rmt_loc;
+ lgt->key->lcl = pair->lcl_loc;
+ lgt->key->fib_index = rloc_fib_index;
+
+ /*
+ * source the FIB entry for the RLOC so we can track its forwarding
+ * chain
+ */
+ ip_address_to_fib_prefix (&lgt->key->rmt, &pfx);
+
+ lgt->fib_entry_index = fib_table_entry_special_add (rloc_fib_index,
+ &pfx,
+ FIB_SOURCE_RR,
+ FIB_ENTRY_FLAG_NONE,
+ ADJ_INDEX_INVALID);
+
+ hash_set_mem (lisp_gpe_tunnel_db, &lgt->key,
+ (lgt - lisp_gpe_tunnel_pool));
+ }
+
+ lgt->locks++;
+
+ return (lgt - lisp_gpe_tunnel_pool);
+}
+
+void
+lisp_gpe_tunnel_unlock (index_t lgti)
+{
+ lisp_gpe_tunnel_t *lgt;
+
+ lgt = lisp_gpe_tunnel_get_i (lgti);
+ lgt->locks--;
+
+ if (0 == lgt->locks)
+ {
+ hash_unset_mem (lisp_gpe_tunnel_db, &lgt->key);
+ clib_mem_free (lgt->key);
+ pool_put (lisp_gpe_tunnel_pool, lgt);
+ }
+}
+
+const lisp_gpe_tunnel_t *
+lisp_gpe_tunnel_get (index_t lgti)
+{
+ return (lisp_gpe_tunnel_get_i (lgti));
+}
+
+/** Format LISP-GPE tunnel. */
+u8 *
+format_lisp_gpe_tunnel (u8 * s, va_list * args)
+{
+ lisp_gpe_tunnel_t *lgt = va_arg (*args, lisp_gpe_tunnel_t *);
+
+ s = format (s, "tunnel %d\n", lgt - lisp_gpe_tunnel_pool);
+ s = format (s, " fib-index: %d, locks:%d \n",
+ lgt->key->fib_index, lgt->locks);
+ s = format (s, " lisp ver 0\n");
+
+ s = format (s, " locator-pair:\n");
+ s = format (s, " local: %U remote: %U\n",
+ format_ip_address, &lgt->key->lcl,
+ format_ip_address, &lgt->key->rmt);
+ s = format (s, " RLOC FIB entry: %d\n", lgt->fib_entry_index);
+
+ return s;
+}
+
+/**
+ * CLI command to show LISP-GPE tunnels.
+ */
+static clib_error_t *
+show_lisp_gpe_tunnel_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ lisp_gpe_tunnel_t *lgt;
+ index_t index;
+
+ if (pool_elts (lisp_gpe_tunnel_pool) == 0)
+ vlib_cli_output (vm, "No lisp-gpe tunnels configured...");
+
+ if (unformat (input, "%d", &index))
+ {
+ lgt = lisp_gpe_tunnel_get_i (index);
+ vlib_cli_output (vm, "%U", format_lisp_gpe_tunnel, lgt);
+ }
+ else
+ {
+ /* *INDENT-OFF* */
+ pool_foreach (lgt, lisp_gpe_tunnel_pool,
+ ({
+ vlib_cli_output (vm, "%U", format_lisp_gpe_tunnel, lgt);
+ }));
+ /* *INDENT-ON* */
+ }
+
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (show_lisp_gpe_tunnel_command, static) =
+{
+ .path = "show lisp gpe tunnel",
+ .function = show_lisp_gpe_tunnel_command_fn,
+};
+/* *INDENT-ON* */
+
+static clib_error_t *
+lisp_gpe_tunnel_module_init (vlib_main_t * vm)
+{
+ lisp_gpe_tunnel_db = hash_create_mem (0,
+ sizeof (lisp_gpe_tunnel_key_t),
+ sizeof (uword));
+
+ return (NULL);
+}
+
+VLIB_INIT_FUNCTION (lisp_gpe_tunnel_module_init);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/lisp-gpe/lisp_gpe_tunnel.h b/src/vnet/lisp-gpe/lisp_gpe_tunnel.h
new file mode 100644
index 00000000000..333d2882883
--- /dev/null
+++ b/src/vnet/lisp-gpe/lisp_gpe_tunnel.h
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * @file
+ * @brief Common utility functions for IPv4, IPv6 and L2 LISP-GPE tunnels.
+ *
+ */
+
+#ifndef LISP_GPE_TUNNEL_H__
+#define LISP_GPE_TUNNEL_H__
+
+#include <vnet/lisp-gpe/lisp_gpe.h>
+#include <vnet/lisp-gpe/lisp_gpe_packet.h>
+
+/**
+ * Forward declaration
+ */
+struct lisp_gpe_adjacency_t_;
+
+/**
+ * A Key for a tunnel
+ */
+typedef struct lisp_gpe_tunnel_key_t_
+{
+ ip_address_t rmt;
+ ip_address_t lcl;
+ u32 fib_index;
+} lisp_gpe_tunnel_key_t;
+
+/**
+ * @brief A LISP GPE Tunnel.
+ *
+ * A tunnel represents an associatation between a local and remote RLOC.
+ * As such it represents a unique LISP rewrite.
+ */
+typedef struct lisp_gpe_tunnel_t_
+{
+ /**
+ * RLOC pair and rloc fib_index. This is the tunnel's key.
+ */
+ lisp_gpe_tunnel_key_t *key;
+
+ /**
+ * number of reference counting locks
+ */
+ u32 locks;
+
+ /**
+ * the FIB entry through which the remote rloc is reachable
+ s */
+ fib_node_index_t fib_entry_index;
+} lisp_gpe_tunnel_t;
+
+extern index_t lisp_gpe_tunnel_find_or_create_and_lock (const locator_pair_t *
+ pair,
+ u32 rloc_fib_index);
+
+extern void lisp_gpe_tunnel_unlock (index_t lgti);
+
+extern const lisp_gpe_tunnel_t *lisp_gpe_tunnel_get (index_t lgti);
+
+extern u8 *lisp_gpe_tunnel_build_rewrite (const lisp_gpe_tunnel_t * lgt,
+ const struct lisp_gpe_adjacency_t_
+ *ladj,
+ lisp_gpe_next_protocol_e
+ payload_proto);
+extern u8 *format_lisp_gpe_tunnel (u8 * s, va_list * args);
+
+#endif
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/lisp-gpe/rfc.txt b/src/vnet/lisp-gpe/rfc.txt
new file mode 100644
index 00000000000..5e3da150c70
--- /dev/null
+++ b/src/vnet/lisp-gpe/rfc.txt
@@ -0,0 +1,826 @@
+Network Working Group D. Lewis
+Internet-Draft Cisco Systems, Inc.
+Intended status: Informational P. Agarwal
+Expires: January 5, 2015 Broadcom
+ L. Kreeger
+ F. Maino
+ P. Quinn
+ M. Smith
+ N. Yadav
+ Cisco Systems, Inc.
+ July 4, 2014
+
+
+ LISP Generic Protocol Extension
+ draft-lewis-lisp-gpe-02.txt
+
+Abstract
+
+ This draft describes extending the Locator/ID Separation Protocol
+ (LISP) [RFC6830], via changes to the LISP header, with three new
+ capabilities: support for multi-protocol encapsulation, operations,
+ administration and management (OAM) signaling, and explicit
+ versioning.
+
+Status of this Memo
+
+ This Internet-Draft is submitted in full conformance with the
+ provisions of BCP 78 and BCP 79.
+
+ Internet-Drafts are working documents of the Internet Engineering
+ Task Force (IETF). Note that other groups may also distribute
+ working documents as Internet-Drafts. The list of current Internet-
+ Drafts is at http://datatracker.ietf.org/drafts/current/.
+
+ Internet-Drafts are draft documents valid for a maximum of six months
+ and may be updated, replaced, or obsoleted by other documents at any
+ time. It is inappropriate to use Internet-Drafts as reference
+ material or to cite them other than as "work in progress."
+
+ This Internet-Draft will expire on January 5, 2015.
+
+Copyright Notice
+
+ Copyright (c) 2014 IETF Trust and the persons identified as the
+ document authors. All rights reserved.
+
+ This document is subject to BCP 78 and the IETF Trust's Legal
+ Provisions Relating to IETF Documents
+
+
+
+Lewis, et al. Expires January 5, 2015 [Page 1]
+
+Internet-Draft LISP Generic Protocol Extension July 2014
+
+
+ (http://trustee.ietf.org/license-info) in effect on the date of
+ publication of this document. Please review these documents
+ carefully, as they describe your rights and restrictions with respect
+ to this document. Code Components extracted from this document must
+ include Simplified BSD License text as described in Section 4.e of
+ the Trust Legal Provisions and are provided without warranty as
+ described in the Simplified BSD License.
+
+
+Table of Contents
+
+ 1. Introduction . . . . . . . . . . . . . . . . . . . . . . . . . 3
+ 2. LISP Header Without Protocol Extensions . . . . . . . . . . . 4
+ 3. Generic Protocol Extension for LISP (LISP-gpe) . . . . . . . . 5
+ 3.1. Multi Protocol Support . . . . . . . . . . . . . . . . . . 5
+ 3.2. OAM Support . . . . . . . . . . . . . . . . . . . . . . . 6
+ 3.3. Version Bits . . . . . . . . . . . . . . . . . . . . . . . 6
+ 4. Backward Compatibility . . . . . . . . . . . . . . . . . . . . 8
+ 4.1. LISP-gpe Routers to (legacy) LISP Routers . . . . . . . . 8
+ 4.2. (legacy) LISP Routers to LISP-gpe Routers . . . . . . . . 8
+ 4.3. Type of Service . . . . . . . . . . . . . . . . . . . . . 8
+ 4.4. VLAN Identifier (VID) . . . . . . . . . . . . . . . . . . 8
+ 5. LISP-gpe Examples . . . . . . . . . . . . . . . . . . . . . . 9
+ 6. Security Considerations . . . . . . . . . . . . . . . . . . . 11
+ 7. Acknowledgments . . . . . . . . . . . . . . . . . . . . . . . 12
+ 8. IANA Considerations . . . . . . . . . . . . . . . . . . . . . 13
+ 9. References . . . . . . . . . . . . . . . . . . . . . . . . . . 14
+ 9.1. Normative References . . . . . . . . . . . . . . . . . . . 14
+ 9.2. Informative References . . . . . . . . . . . . . . . . . . 14
+ Authors' Addresses . . . . . . . . . . . . . . . . . . . . . . . . 15
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+Lewis, et al. Expires January 5, 2015 [Page 2]
+
+Internet-Draft LISP Generic Protocol Extension July 2014
+
+
+1. Introduction
+
+ LISP [RFC6830] defines an encapsulation format that carries IPv4 or
+ IPv6 (henceforth referred to as IP) packets in a LISP header and
+ outer UDP/IP transport.
+
+ The LISP header does not specify the protocol being encapsulated and
+ therefore is currently limited to encapsulating only IP packet
+ payloads. Other protocols, most notably VXLAN [VXLAN] (which defines
+ a similar header format to LISP), are used to encapsulate L2
+ protocols such as Ethernet. LISP [RFC6830] can be extended to
+ indicate the inner protocol, enabling the encapsulation of Ethernet,
+ IP or any other desired protocol all the while ensuring compatibility
+ with existing LISP [RFC6830] deployments.
+
+ As LISP is deployed, there's also the need to provide increased
+ visibility and diagnostic capabilities within the overlay.
+
+ This document describes extending LISP ([RFC6830]) via the following
+ changes:
+
+ Next Protocol Bit (P bit): A reserved flag bit is allocated, and set
+ in the LISP-gpe header to indicate that a next protocol field is
+ present.
+
+ OAM Flag Bit (O bit): A reserved flag bit is allocated, and set in
+ the LISP-gpe header, to indicate that the packet is an OAM packet.
+
+ Version: Two reserved bits are allocated, and set in the LISP-gpe
+ header, to indicate LISP-gpe protocol version.
+
+ Next protocol: An 8 bit next protocol field is present in the LISP-
+ gpe header.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+Lewis, et al. Expires January 5, 2015 [Page 3]
+
+Internet-Draft LISP Generic Protocol Extension July 2014
+
+
+2. LISP Header Without Protocol Extensions
+
+ As described in the introduction, the LISP header has no protocol
+ identifier that indicates the type of payload being carried by LISP.
+ Because of this, LISP is limited to an IP payload. Furthermore, the
+ LISP header has no mechanism to signal OAM packets.
+
+ The LISP header contains flags (some defined, some reserved), a
+ Nonce/Map-version field and an instance ID/Locator-status-bit field.
+ The flags provide flexibility to define how the reserved bits can be
+ used to change the definition of the LISP header.
+
+
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ |N|L|E|V|I|flags| Nonce/Map-Version |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Instance ID/Locator-Status-Bits |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+
+ Figure 1: LISP Header
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+Lewis, et al. Expires January 5, 2015 [Page 4]
+
+Internet-Draft LISP Generic Protocol Extension July 2014
+
+
+3. Generic Protocol Extension for LISP (LISP-gpe)
+
+3.1. Multi Protocol Support
+
+ This draft defines the following changes to the LISP header in order
+ to support multi-protocol encapsulation.
+
+ P Bit: Flag bit 5 is defined as the Next Protocol bit. The P bit
+ MUST be set to 1 to indicate the presence of the 8 bit next
+ protocol field.
+
+ P = 0 indicates that the payload MUST conform to LISP as defined
+ in [RFC6830].
+
+ Flag bit 5 was chosen as the P bit because this flag bit is
+ currently unallocated in LISP [RFC6830].
+
+ Next Protocol Field: The lower 8 bits of the first word are used to
+ carry a next protocol. This next protocol field contains the
+ protocol of the encapsulated payload packet.
+
+ LISP [RFC6830] uses the lower 16 bits of the first word for either
+ a nonce, an echo-nonce ([RFC6830]) or to support map-versioning
+ ([RFC6834]). These are all optional capabilities that are
+ indicated by setting the N, E, and the V bit respectively.
+
+ To maintain the desired data plane compatibility, when the P bit
+ is set, the N, E, and V bits MUST be set to zero.
+
+ A new protocol registry will be requested from IANA for the Next
+ Protocol field. This draft defines the following Next Protocol
+ values:
+
+ 0x1 : IPv4
+
+ 0x2 : IPv6
+
+ 0x3 : Ethernet
+
+ 0x4: Network Service Header
+
+
+
+
+
+
+
+
+
+
+
+Lewis, et al. Expires January 5, 2015 [Page 5]
+
+Internet-Draft LISP Generic Protocol Extension July 2014
+
+
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ |N|L|E|V|I|P|R|R| Reserved | Next Protocol |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Instance ID/Locator-Status-Bits |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+
+
+ Figure 2: LISP-gpe Next Protocol (P=1)
+
+3.2. OAM Support
+
+ Flag bit 7 is defined as the O bit. When the O bit is set to 1, the
+ packet is an OAM packet and OAM processing MUST occur. The OAM
+ protocol details are out of scope for this document. As with the
+ P-bit, bit 7 is currently a reserved flag in [RFC6830].
+
+
+
+
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ |N|L|E|V|I|P|R|O| Reserved | Next Protocol |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Instance ID/Locator-Status-Bits |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+
+
+ Figure 3: LISP-gpe OAM bit (P=1)
+
+3.3. Version Bits
+
+ LISP-gpe bits8 and 9 are defined as version bits. The version field
+ is used to ensure backward compatibility going forward with future
+ LISP-gpe updates.
+
+ The initial version for LISP-gpe is 0.
+
+
+
+
+
+
+
+
+
+
+Lewis, et al. Expires January 5, 2015 [Page 6]
+
+Internet-Draft LISP Generic Protocol Extension July 2014
+
+
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ |N|L|E|V|I|P|R|O|Ver| Reserved | Next Protocol |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Instance ID/Locator-Status-Bits |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+
+
+ Figure 4: LISP-gpe Version bits (P=1)
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+Lewis, et al. Expires January 5, 2015 [Page 7]
+
+Internet-Draft LISP Generic Protocol Extension July 2014
+
+
+4. Backward Compatibility
+
+ Undefined (in RFC6830) flag bits 5 and 7, LISP-gpe P and O bits, were
+ selected to ensure compatibility with existing LISP [RFC6830]
+ deployments.
+
+ Similarly, using P = 0 to indicate that the format of the header and
+ payload conforms to [RFC6830] ensures compatibility with existing
+ LISP hardware forwarding platforms.
+
+4.1. LISP-gpe Routers to (legacy) LISP Routers
+
+ A LISP-gpe router MUST not encapsulate non-IP packet nor OAM packets
+ to a LISP router. A method for determining the capabilities of a
+ LISP router (gpe or "legacy") is out of the scope of this draft.
+
+ When encapsulating IP packets to a LISP router the P bit SHOULD be
+ set to 1 and the UDP port MUST be set to 4341. OAM bit MUST be set
+ to 0. The Next Protocol field SHOULD be 0x1 (IPv4) or 0x2 (IPv6).
+ The (legacy) LISP router will ignore the P bit and the protocol type
+ field. The (legacy) LISP router will treat the packet as a LISP
+ packet and inspect the first nibble of the payload to determine the
+ IP version.
+
+ When the P bit is set, the N, E, and V bits MUST be set to zero. The
+ receiving (legacy) LISP router will ignore N, E and V bits, when the
+ P bit is set.
+
+4.2. (legacy) LISP Routers to LISP-gpe Routers
+
+ When a LISP-gpe router receives a packet from a (legacy) LISP router,
+ the P bit MUST not be set and the UDP port MUST be 4341. The payload
+ MUST be IP, and the LISP-gpe router will inspect the first nibble of
+ the payload to determine IP version.
+
+4.3. Type of Service
+
+ When a LISP-gpe router performs Ethernet encapsulation, the inner
+ 802.1Q [IEEE8021Q] priority code point (PCP) field MAY be mapped from
+ the encapsulated frame to the Type of Service field in the outer IPv4
+ header, or in the case of IPv6 the 'Traffic Class' field.
+
+4.4. VLAN Identifier (VID)
+
+ When a LISP-gpe router performs Ethernet encapsulation, the inner
+ header 802.1Q [IEEE8021Q] VLAN Identifier (VID) MAY be mapped to, or
+ used to determine the LISP Instance ID field.
+
+
+
+
+Lewis, et al. Expires January 5, 2015 [Page 8]
+
+Internet-Draft LISP Generic Protocol Extension July 2014
+
+
+5. LISP-gpe Examples
+
+ This section provides two examples of IP protocols, and one example
+ of Ethernet encapsulated LISP-gpe using the generic extension
+ described in this document.
+
+
+
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ |N|L|E|V|I|1|0|0|0| Reserved | NP = IPv4 |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Instance ID/Locator-Status-Bits |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Original IPv4 Packet |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+
+
+ Figure 5: IPv4 and LISP-gpe
+
+
+
+
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ |N|L|E|V|I|1|0|0|0| Reserved | NP = IPv6 |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Instance ID/Locator-Status-Bits |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Original IPv6 Packet |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+
+
+ Figure 6: IPv6 and LISP-gpe
+
+
+
+
+
+
+
+
+
+
+
+
+
+Lewis, et al. Expires January 5, 2015 [Page 9]
+
+Internet-Draft LISP Generic Protocol Extension July 2014
+
+
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ |N|L|E|V|I|1|0|0|0| Reserved | NP = Ethernet |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Instance ID/Locator-Status-Bits |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Original Ethernet Frame |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+
+
+ Figure 7: Ethernet and LISP-gpe
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+Lewis, et al. Expires January 5, 2015 [Page 10]
+
+Internet-Draft LISP Generic Protocol Extension July 2014
+
+
+6. Security Considerations
+
+ LISP-gpe security considerations are similar to the LISP security
+ considerations documented at length in LISP [RFC6830]. With LISP-
+ gpe, issues such as dataplane spoofing, flooding, and traffic
+ redirection are dependent on the particular protocol payload
+ encapsulated.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+Lewis, et al. Expires January 5, 2015 [Page 11]
+
+Internet-Draft LISP Generic Protocol Extension July 2014
+
+
+7. Acknowledgments
+
+ A special thank you goes to Dino Farinacci for his guidance and
+ detailed review.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+Lewis, et al. Expires January 5, 2015 [Page 12]
+
+Internet-Draft LISP Generic Protocol Extension July 2014
+
+
+8. IANA Considerations
+
+ IANA is requested to set up a registry of "Next Protocol". These are
+ 8-bit values. Next Protocol values 0, 1, 2, 3 and 4 are defined in
+ this draft. New values are assigned via Standards Action [RFC5226].
+
+ +---------------+-------------+---------------+
+ | Next Protocol | Description | Reference |
+ +---------------+-------------+---------------+
+ | 0 | Reserved | This document |
+ | | | |
+ | 1 | IPv4 | This document |
+ | | | |
+ | 2 | IPv6 | This document |
+ | | | |
+ | 3 | Ethernet | This document |
+ | | | |
+ | 4 | NSH | This document |
+ | | | |
+ | 5..253 | Unassigned | |
+ +---------------+-------------+---------------+
+
+ Table 1
+
+ There are ten bits at the beginning of the LISP-gpe header. New
+ bits are assigned via Standards Action [RFC5226].
+
+ Bits 0-3 - Assigned by LISP [RFC6830]
+ Bit 4 - Instance ID (I bit)
+ Bit 5 - Next Protocol (P bit)
+ Bit 6 - Reserved
+ Bit 7 - OAM (O bit)
+ Bits 8-9 - Version
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+Lewis, et al. Expires January 5, 2015 [Page 13]
+
+Internet-Draft LISP Generic Protocol Extension July 2014
+
+
+9. References
+
+9.1. Normative References
+
+ [RFC0768] Postel, J., "User Datagram Protocol", STD 6, RFC 768,
+ August 1980.
+
+ [RFC0791] Postel, J., "Internet Protocol", STD 5, RFC 791,
+ September 1981.
+
+ [RFC2119] Bradner, S., "Key words for use in RFCs to Indicate
+ Requirement Levels", BCP 14, RFC 2119, March 1997.
+
+ [RFC5226] Narten, T. and H. Alvestrand, "Guidelines for Writing an
+ IANA Considerations Section in RFCs", BCP 26, RFC 5226,
+ May 2008.
+
+9.2. Informative References
+
+ [ETYPES] The IEEE Registration Authority, "IEEE 802 Numbers", 2012,
+ <http://www.iana.org/assignments/ieee-802-numbers/
+ ieee-802-numbers.xml>.
+
+ [IEEE8021Q]
+ The IEEE Computer Society, "Media Access Control (MAC)
+ Bridges and Virtual Bridge Local Area Networks", August
+ 2012, <http://standards.ieee.org/getieee802/download/
+ 802.1Q-2011.pdf>.
+
+ [RFC1700] Reynolds, J. and J. Postel, "Assigned Numbers", RFC 1700,
+ October 1994.
+
+ [RFC6830] Farinacci, D., Fuller, V., Meyer, D., and D. Lewis, "The
+ Locator/ID Separation Protocol (LISP)", RFC 6830,
+ January 2013.
+
+ [RFC6834] Iannone, L., Saucez, D., and O. Bonaventure, "Locator/ID
+ Separation Protocol (LISP) Map-Versioning", RFC 6834,
+ January 2013.
+
+ [VXLAN] Dutt, D., Mahalingam, M., Duda, K., Agarwal, P., Kreeger,
+ L., Sridhar, T., Bursell, M., and C. Wright, "VXLAN: A
+ Framework for Overlaying Virtualized Layer 2 Networks over
+ Layer 3 Networks", 2013.
+
+
+
+
+
+
+
+Lewis, et al. Expires January 5, 2015 [Page 14]
+
+Internet-Draft LISP Generic Protocol Extension July 2014
+
+
+Authors' Addresses
+
+ Darrel Lewis
+ Cisco Systems, Inc.
+
+ Email: darlewis@cisco.com
+
+
+ Puneet Agarwal
+ Broadcom
+
+ Email: pagarwal@broadcom.com
+
+
+ Larry Kreeger
+ Cisco Systems, Inc.
+
+ Email: kreeger@cisco.com
+
+
+ Fabio Maino
+ Cisco Systems, Inc.
+
+ Email: fmaino@cisco.com
+
+
+ Paul Quinn
+ Cisco Systems, Inc.
+
+ Email: paulq@cisco.com
+
+
+ Michael Smith
+ Cisco Systems, Inc.
+
+ Email: michsmit@cisco.com
+
+
+ Navindra Yadav
+ Cisco Systems, Inc.
+
+ Email: nyadav@cisco.com
diff --git a/src/vnet/llc/llc.c b/src/vnet/llc/llc.c
new file mode 100644
index 00000000000..975207b651d
--- /dev/null
+++ b/src/vnet/llc/llc.c
@@ -0,0 +1,241 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * llc.c: llc support
+ *
+ * Copyright (c) 2010 Eliot Dresselhaus
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <vnet/vnet.h>
+#include <vnet/llc/llc.h>
+
+/* Global main structure. */
+llc_main_t llc_main;
+
+u8 *
+format_llc_protocol (u8 * s, va_list * args)
+{
+ llc_protocol_t p = va_arg (*args, u32);
+ llc_main_t *pm = &llc_main;
+ llc_protocol_info_t *pi = llc_get_protocol_info (pm, p);
+
+ if (pi)
+ s = format (s, "%s", pi->name);
+ else
+ s = format (s, "0x%02x", p);
+
+ return s;
+}
+
+u8 *
+format_llc_header_with_length (u8 * s, va_list * args)
+{
+ llc_main_t *pm = &llc_main;
+ llc_header_t *h = va_arg (*args, llc_header_t *);
+ u32 max_header_bytes = va_arg (*args, u32);
+ llc_protocol_t p = h->dst_sap;
+ uword indent, header_bytes;
+
+ header_bytes = llc_header_length (h);
+ if (max_header_bytes != 0 && header_bytes > max_header_bytes)
+ return format (s, "llc header truncated");
+
+ indent = format_get_indent (s);
+
+ s = format (s, "LLC %U -> %U",
+ format_llc_protocol, h->src_sap,
+ format_llc_protocol, h->dst_sap);
+
+ if (h->control != 0x03)
+ s = format (s, ", control 0x%x", llc_header_get_control (h));
+
+ if (max_header_bytes != 0 && header_bytes > max_header_bytes)
+ {
+ llc_protocol_info_t *pi = llc_get_protocol_info (pm, p);
+ vlib_node_t *node = vlib_get_node (pm->vlib_main, pi->node_index);
+ if (node->format_buffer)
+ s = format (s, "\n%U%U",
+ format_white_space, indent,
+ node->format_buffer, (void *) (h + 1),
+ max_header_bytes - header_bytes);
+ }
+
+ return s;
+}
+
+u8 *
+format_llc_header (u8 * s, va_list * args)
+{
+ llc_header_t *h = va_arg (*args, llc_header_t *);
+ return format (s, "%U", format_llc_header_with_length, h, 0);
+}
+
+/* Returns llc protocol as an int in host byte order. */
+uword
+unformat_llc_protocol (unformat_input_t * input, va_list * args)
+{
+ u8 *result = va_arg (*args, u8 *);
+ llc_main_t *pm = &llc_main;
+ int p, i;
+
+ /* Numeric type. */
+ if (unformat (input, "0x%x", &p) || unformat (input, "%d", &p))
+ {
+ if (p >= (1 << 8))
+ return 0;
+ *result = p;
+ return 1;
+ }
+
+ /* Named type. */
+ if (unformat_user (input, unformat_vlib_number_by_name,
+ pm->protocol_info_by_name, &i))
+ {
+ llc_protocol_info_t *pi = vec_elt_at_index (pm->protocol_infos, i);
+ *result = pi->protocol;
+ return 1;
+ }
+
+ return 0;
+}
+
+uword
+unformat_llc_header (unformat_input_t * input, va_list * args)
+{
+ u8 **result = va_arg (*args, u8 **);
+ llc_header_t _h, *h = &_h;
+ u8 p;
+
+ if (!unformat (input, "%U", unformat_llc_protocol, &p))
+ return 0;
+
+ h->src_sap = h->dst_sap = p;
+ h->control = 0x3;
+
+ /* Add header to result. */
+ {
+ void *p;
+ u32 n_bytes = sizeof (h[0]);
+
+ vec_add2 (*result, p, n_bytes);
+ clib_memcpy (p, h, n_bytes);
+ }
+
+ return 1;
+}
+
+static u8 *
+llc_build_rewrite (vnet_main_t * vnm,
+ u32 sw_if_index,
+ vnet_link_t link_type, const void *dst_address)
+{
+ llc_header_t *h;
+ u8 *rewrite = NULL;
+ llc_protocol_t protocol;
+
+ switch (link_type)
+ {
+#define _(a,b) case VNET_LINK_##a: protocol = LLC_PROTOCOL_##b; break
+ _(IP4, ip4);
+#undef _
+ default:
+ return (NULL);
+ }
+
+ vec_validate (rewrite, sizeof (*h) - 1);
+ h = (llc_header_t *) rewrite;
+ h->src_sap = h->dst_sap = protocol;
+ h->control = 0x3;
+
+ return (rewrite);
+}
+
+/* *INDENT-OFF* */
+VNET_HW_INTERFACE_CLASS (llc_hw_interface_class) = {
+ .name = "LLC",
+ .format_header = format_llc_header_with_length,
+ .unformat_header = unformat_llc_header,
+ .build_rewrite = llc_build_rewrite,
+};
+/* *INDENT-ON* */
+
+static void
+add_protocol (llc_main_t * pm, llc_protocol_t protocol, char *protocol_name)
+{
+ llc_protocol_info_t *pi;
+ u32 i;
+
+ vec_add2 (pm->protocol_infos, pi, 1);
+ i = pi - pm->protocol_infos;
+
+ pi->name = protocol_name;
+ pi->protocol = protocol;
+ pi->next_index = pi->node_index = ~0;
+
+ hash_set (pm->protocol_info_by_protocol, protocol, i);
+ hash_set_mem (pm->protocol_info_by_name, pi->name, i);
+}
+
+static clib_error_t *
+llc_init (vlib_main_t * vm)
+{
+ clib_error_t *error;
+ llc_main_t *pm = &llc_main;
+
+ memset (pm, 0, sizeof (pm[0]));
+ pm->vlib_main = vm;
+
+ pm->protocol_info_by_name = hash_create_string (0, sizeof (uword));
+ pm->protocol_info_by_protocol = hash_create (0, sizeof (uword));
+
+#define _(f,n) add_protocol (pm, LLC_PROTOCOL_##f, #f);
+ foreach_llc_protocol;
+#undef _
+
+ if ((error = vlib_call_init_function (vm, snap_init)))
+ return error;
+
+ return vlib_call_init_function (vm, llc_input_init);
+}
+
+VLIB_INIT_FUNCTION (llc_init);
+
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/llc/llc.h b/src/vnet/llc/llc.h
new file mode 100644
index 00000000000..0b85f5d8a8b
--- /dev/null
+++ b/src/vnet/llc/llc.h
@@ -0,0 +1,194 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * llc.h: LLC definitions
+ *
+ * Copyright (c) 2008 Eliot Dresselhaus
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef included_llc_h
+#define included_llc_h
+
+#include <vnet/vnet.h>
+#include <vnet/pg/pg.h>
+
+/* Protocol (SSAP/DSAP) types. */
+#define foreach_llc_protocol \
+ _ (null, 0x0) \
+ _ (sublayer, 0x2) \
+ _ (sna_path_control, 0x4) \
+ _ (ip4, 0x6) \
+ _ (sna1, 0x8) \
+ _ (sna2, 0xc) \
+ _ (sna3, 0x40) \
+ _ (proway_lan, 0x0e) \
+ _ (netware1, 0x10) \
+ _ (netware2, 0xe0) \
+ _ (osi_layer1, 0x14) \
+ _ (osi_layer2, 0x20) \
+ _ (osi_layer3, 0x34) \
+ _ (osi_layer4, 0x54) \
+ _ (osi_layer5, 0xfe) \
+ _ (bpdu, 0x42) \
+ _ (arp, 0x98) \
+ _ (snap, 0xaa) \
+ _ (vines1, 0xba) \
+ _ (vines2, 0xbc) \
+ _ (netbios, 0xf0) \
+ _ (global_dsap, 0xff)
+
+typedef enum
+{
+#define _(f,n) LLC_PROTOCOL_##f = n,
+ foreach_llc_protocol
+#undef _
+} llc_protocol_t;
+
+typedef struct
+{
+#define LLC_DST_SAP_IS_GROUP (1 << 0)
+#define LLC_SRC_SAP_IS_RESPONSE (1 << 0)
+ u8 dst_sap, src_sap;
+
+ /* Control byte.
+ [0] 1 => supervisory 0 => information
+ [1] unnumbered frame. */
+ u8 control;
+
+ /* Only present if (control & 3) != 3. */
+ u8 extended_control[0];
+} llc_header_t;
+
+always_inline u16
+llc_header_get_control (llc_header_t * h)
+{
+ u16 r = h->control;
+ return r | ((((r & 3) != 3) ? h->extended_control[0] : 0) << 8);
+}
+
+always_inline u8
+llc_header_length (llc_header_t * h)
+{
+ return ((h->control & 3) != 3 ? 4 : 3);
+}
+
+typedef struct
+{
+ /* Name (a c string). */
+ char *name;
+
+ /* LLC protocol (SAP type). */
+ llc_protocol_t protocol;
+
+ /* Node which handles this type. */
+ u32 node_index;
+
+ /* Next index for this type. */
+ u32 next_index;
+} llc_protocol_info_t;
+
+#define foreach_llc_error \
+ _ (NONE, "no error") \
+ _ (UNKNOWN_PROTOCOL, "unknown llc ssap/dsap") \
+ _ (UNKNOWN_CONTROL, "control != 0x3")
+
+typedef enum
+{
+#define _(f,s) LLC_ERROR_##f,
+ foreach_llc_error
+#undef _
+ LLC_N_ERROR,
+} llc_error_t;
+
+typedef struct
+{
+ vlib_main_t *vlib_main;
+
+ llc_protocol_info_t *protocol_infos;
+
+ /* Hash tables mapping name/protocol to protocol info index. */
+ uword *protocol_info_by_name, *protocol_info_by_protocol;
+
+ /* llc-input next index indexed by protocol. */
+ u8 input_next_by_protocol[256];
+} llc_main_t;
+
+always_inline llc_protocol_info_t *
+llc_get_protocol_info (llc_main_t * m, llc_protocol_t protocol)
+{
+ uword *p = hash_get (m->protocol_info_by_protocol, protocol);
+ return p ? vec_elt_at_index (m->protocol_infos, p[0]) : 0;
+}
+
+extern llc_main_t llc_main;
+
+/* Register given node index to take input for given llc type. */
+void
+llc_register_input_protocol (vlib_main_t * vm,
+ llc_protocol_t protocol, u32 node_index);
+
+void llc_set_adjacency (vnet_rewrite_header_t * rw,
+ uword max_data_bytes, llc_protocol_t protocol);
+
+format_function_t format_llc_protocol;
+format_function_t format_llc_header;
+format_function_t format_llc_header_with_length;
+
+/* Parse llc protocol as 0xXXXX or protocol name. */
+unformat_function_t unformat_llc_protocol;
+
+/* Parse llc header. */
+unformat_function_t unformat_llc_header;
+unformat_function_t unformat_pg_llc_header;
+
+always_inline void
+llc_setup_node (vlib_main_t * vm, u32 node_index)
+{
+ vlib_node_t *n = vlib_get_node (vm, node_index);
+ pg_node_t *pn = pg_get_node (node_index);
+
+ n->format_buffer = format_llc_header_with_length;
+ n->unformat_buffer = unformat_llc_header;
+ pn->unformat_edit = unformat_pg_llc_header;
+}
+
+#endif /* included_llc_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/llc/node.c b/src/vnet/llc/node.c
new file mode 100644
index 00000000000..60b5c13420c
--- /dev/null
+++ b/src/vnet/llc/node.c
@@ -0,0 +1,331 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * llc_node.c: llc packet processing
+ *
+ * Copyright (c) 2010 Eliot Dresselhaus
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <vlib/vlib.h>
+#include <vnet/pg/pg.h>
+#include <vnet/llc/llc.h>
+
+#define foreach_llc_input_next \
+ _ (PUNT, "error-punt") \
+ _ (DROP, "error-drop")
+
+typedef enum
+{
+#define _(s,n) LLC_INPUT_NEXT_##s,
+ foreach_llc_input_next
+#undef _
+ LLC_INPUT_N_NEXT,
+} llc_input_next_t;
+
+typedef struct
+{
+ u8 packet_data[32];
+} llc_input_trace_t;
+
+static u8 *
+format_llc_input_trace (u8 * s, va_list * va)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*va, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*va, vlib_node_t *);
+ llc_input_trace_t *t = va_arg (*va, llc_input_trace_t *);
+
+ s = format (s, "%U", format_llc_header, t->packet_data);
+
+ return s;
+}
+
+static uword
+llc_input (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * from_frame)
+{
+ llc_main_t *lm = &llc_main;
+ u32 n_left_from, next_index, *from, *to_next;
+
+ from = vlib_frame_vector_args (from_frame);
+ n_left_from = from_frame->n_vectors;
+
+ if (node->flags & VLIB_NODE_FLAG_TRACE)
+ vlib_trace_frame_buffers_only (vm, node,
+ from,
+ n_left_from,
+ sizeof (from[0]),
+ sizeof (llc_input_trace_t));
+
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ u32 bi0, bi1;
+ vlib_buffer_t *b0, *b1;
+ llc_header_t *h0, *h1;
+ u8 next0, next1, len0, len1, enqueue_code;
+
+ /* Prefetch next iteration. */
+ {
+ vlib_buffer_t *b2, *b3;
+
+ b2 = vlib_get_buffer (vm, from[2]);
+ b3 = vlib_get_buffer (vm, from[3]);
+
+ vlib_prefetch_buffer_header (b2, LOAD);
+ vlib_prefetch_buffer_header (b3, LOAD);
+
+ CLIB_PREFETCH (b2->data, sizeof (h0[0]), LOAD);
+ CLIB_PREFETCH (b3->data, sizeof (h1[0]), LOAD);
+ }
+
+ bi0 = from[0];
+ bi1 = from[1];
+ to_next[0] = bi0;
+ to_next[1] = bi1;
+ from += 2;
+ to_next += 2;
+ n_left_to_next -= 2;
+ n_left_from -= 2;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ b1 = vlib_get_buffer (vm, bi1);
+
+ h0 = (void *) (b0->data + b0->current_data);
+ h1 = (void *) (b1->data + b1->current_data);
+
+ len0 = llc_header_length (h0);
+ len1 = llc_header_length (h1);
+
+ b0->current_data += len0;
+ b1->current_data += len1;
+
+ b0->current_length -= len0;
+ b1->current_length -= len1;
+
+ next0 = lm->input_next_by_protocol[h0->dst_sap];
+ next1 = lm->input_next_by_protocol[h1->dst_sap];
+
+ b0->error =
+ node->errors[next0 ==
+ LLC_INPUT_NEXT_DROP ? LLC_ERROR_UNKNOWN_PROTOCOL :
+ LLC_ERROR_NONE];
+ b1->error =
+ node->errors[next1 ==
+ LLC_INPUT_NEXT_DROP ? LLC_ERROR_UNKNOWN_PROTOCOL :
+ LLC_ERROR_NONE];
+
+ enqueue_code = (next0 != next_index) + 2 * (next1 != next_index);
+
+ if (PREDICT_FALSE (enqueue_code != 0))
+ {
+ switch (enqueue_code)
+ {
+ case 1:
+ /* A B A */
+ to_next[-2] = bi1;
+ to_next -= 1;
+ n_left_to_next += 1;
+ vlib_set_next_frame_buffer (vm, node, next0, bi0);
+ break;
+
+ case 2:
+ /* A A B */
+ to_next -= 1;
+ n_left_to_next += 1;
+ vlib_set_next_frame_buffer (vm, node, next1, bi1);
+ break;
+
+ case 3:
+ /* A B B or A B C */
+ to_next -= 2;
+ n_left_to_next += 2;
+ vlib_set_next_frame_buffer (vm, node, next0, bi0);
+ vlib_set_next_frame_buffer (vm, node, next1, bi1);
+ if (next0 == next1)
+ {
+ vlib_put_next_frame (vm, node, next_index,
+ n_left_to_next);
+ next_index = next1;
+ vlib_get_next_frame (vm, node, next_index, to_next,
+ n_left_to_next);
+ }
+ }
+ }
+ }
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 bi0;
+ vlib_buffer_t *b0;
+ llc_header_t *h0;
+ u8 next0, len0;
+
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+
+ h0 = (void *) (b0->data + b0->current_data);
+
+ len0 = llc_header_length (h0);
+
+ b0->current_data += len0;
+
+ b0->current_length -= len0;
+
+ next0 = lm->input_next_by_protocol[h0->dst_sap];
+
+ b0->error =
+ node->errors[next0 ==
+ LLC_INPUT_NEXT_DROP ? LLC_ERROR_UNKNOWN_PROTOCOL :
+ LLC_ERROR_NONE];
+
+ /* Sent packet to wrong next? */
+ if (PREDICT_FALSE (next0 != next_index))
+ {
+ /* Return old frame; remove incorrectly enqueued packet. */
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next + 1);
+
+ /* Send to correct next. */
+ next_index = next0;
+ vlib_get_next_frame (vm, node, next_index,
+ to_next, n_left_to_next);
+
+ to_next[0] = bi0;
+ to_next += 1;
+ n_left_to_next -= 1;
+ }
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ return from_frame->n_vectors;
+}
+
+static char *llc_error_strings[] = {
+#define _(f,s) s,
+ foreach_llc_error
+#undef _
+};
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (llc_input_node) = {
+ .function = llc_input,
+ .name = "llc-input",
+ /* Takes a vector of packets. */
+ .vector_size = sizeof (u32),
+
+ .n_errors = LLC_N_ERROR,
+ .error_strings = llc_error_strings,
+
+ .n_next_nodes = LLC_INPUT_N_NEXT,
+ .next_nodes = {
+#define _(s,n) [LLC_INPUT_NEXT_##s] = n,
+ foreach_llc_input_next
+#undef _
+ },
+
+ .format_buffer = format_llc_header_with_length,
+ .format_trace = format_llc_input_trace,
+ .unformat_buffer = unformat_llc_header,
+};
+/* *INDENT-ON* */
+
+static clib_error_t *
+llc_input_init (vlib_main_t * vm)
+{
+ llc_main_t *lm = &llc_main;
+
+ {
+ clib_error_t *error = vlib_call_init_function (vm, llc_init);
+ if (error)
+ clib_error_report (error);
+ }
+
+ llc_setup_node (vm, llc_input_node.index);
+
+ {
+ int i;
+ for (i = 0; i < ARRAY_LEN (lm->input_next_by_protocol); i++)
+ lm->input_next_by_protocol[i] = LLC_INPUT_NEXT_DROP;
+ }
+
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (llc_input_init);
+
+void
+llc_register_input_protocol (vlib_main_t * vm,
+ llc_protocol_t protocol, u32 node_index)
+{
+ llc_main_t *lm = &llc_main;
+ llc_protocol_info_t *pi;
+
+ {
+ clib_error_t *error = vlib_call_init_function (vm, llc_input_init);
+ if (error)
+ clib_error_report (error);
+ /* Otherwise, osi_input_init will wipe out e.g. the snap init */
+ error = vlib_call_init_function (vm, osi_input_init);
+ if (error)
+ clib_error_report (error);
+ }
+
+ pi = llc_get_protocol_info (lm, protocol);
+ pi->node_index = node_index;
+ pi->next_index = vlib_node_add_next (vm, llc_input_node.index, node_index);
+
+ lm->input_next_by_protocol[protocol] = pi->next_index;
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/llc/pg.c b/src/vnet/llc/pg.c
new file mode 100644
index 00000000000..ad18a4b05c7
--- /dev/null
+++ b/src/vnet/llc/pg.c
@@ -0,0 +1,113 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * llc_pg.c: packet generator llc interface
+ *
+ * Copyright (c) 2008 Eliot Dresselhaus
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <vlib/vlib.h>
+#include <vnet/pg/pg.h>
+#include <vnet/llc/llc.h>
+
+typedef struct
+{
+ pg_edit_t dst_sap;
+ pg_edit_t src_sap;
+ pg_edit_t control;
+} pg_llc_header_t;
+
+static inline void
+pg_llc_header_init (pg_llc_header_t * e)
+{
+ pg_edit_init (&e->dst_sap, llc_header_t, dst_sap);
+ pg_edit_init (&e->src_sap, llc_header_t, src_sap);
+ pg_edit_init (&e->control, llc_header_t, control);
+}
+
+uword
+unformat_pg_llc_header (unformat_input_t * input, va_list * args)
+{
+ pg_stream_t *s = va_arg (*args, pg_stream_t *);
+ pg_llc_header_t *h;
+ u32 group_index, error;
+
+ h = pg_create_edit_group (s, sizeof (h[0]), sizeof (llc_header_t),
+ &group_index);
+ pg_llc_header_init (h);
+
+ pg_edit_set_fixed (&h->control, 0x03);
+
+ error = 1;
+ if (!unformat (input, "%U -> %U",
+ unformat_pg_edit,
+ unformat_llc_protocol, &h->src_sap, &h->dst_sap))
+ goto done;
+
+ {
+ llc_main_t *pm = &llc_main;
+ llc_protocol_info_t *pi = 0;
+ pg_node_t *pg_node = 0;
+
+ if (h->dst_sap.type == PG_EDIT_FIXED)
+ {
+ u8 t = *h->dst_sap.values[PG_EDIT_LO];
+ pi = llc_get_protocol_info (pm, t);
+ if (pi && pi->node_index != ~0)
+ pg_node = pg_get_node (pi->node_index);
+ }
+
+ if (pg_node && pg_node->unformat_edit
+ && unformat_user (input, pg_node->unformat_edit, s))
+ ;
+
+ else if (!unformat_user (input, unformat_pg_payload, s))
+ goto done;
+ }
+
+ error = 0;
+done:
+ if (error)
+ pg_free_edit_group (s);
+ return error == 0;
+}
+
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/lldp/dir.dox b/src/vnet/lldp/dir.dox
new file mode 100644
index 00000000000..6aa45f70f16
--- /dev/null
+++ b/src/vnet/lldp/dir.dox
@@ -0,0 +1,18 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ @dir vnet/vnet/lldp
+ @brief Link Layer Discovery Protocol (LLDP) implementation
+*/
diff --git a/src/vnet/lldp/lldp_cli.c b/src/vnet/lldp/lldp_cli.c
new file mode 100644
index 00000000000..45f688c58b4
--- /dev/null
+++ b/src/vnet/lldp/lldp_cli.c
@@ -0,0 +1,646 @@
+/*
+ * Copyright (c) 2011-2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @file
+ * @brief LLDP CLI handling
+ *
+ */
+#include <vnet/lisp-cp/lisp_types.h>
+#include <vnet/lldp/lldp_node.h>
+
+#ifndef ETHER_ADDR_LEN
+#include <net/ethernet.h>
+#endif
+
+typedef enum lldp_cfg_err
+{
+ lldp_ok,
+ lldp_not_supported,
+ lldp_invalid_arg,
+} lldp_cfg_err_t;
+
+static clib_error_t *
+lldp_cfg_err_to_clib_err (lldp_cfg_err_t e)
+{
+
+ switch (e)
+ {
+ case lldp_ok:
+ return 0;
+ case lldp_not_supported:
+ return clib_error_return (0, "not supported");
+ case lldp_invalid_arg:
+ return clib_error_return (0, "invalid argument");
+ }
+ return 0;
+}
+
+static lldp_cfg_err_t
+lldp_cfg_intf_set (u32 hw_if_index, int enable)
+{
+ lldp_main_t *lm = &lldp_main;
+ vnet_main_t *vnm = lm->vnet_main;
+ ethernet_main_t *em = &ethernet_main;
+ const vnet_hw_interface_t *hi = vnet_get_hw_interface (vnm, hw_if_index);
+ const ethernet_interface_t *eif = ethernet_get_interface (em, hw_if_index);
+
+ if (!eif)
+ {
+ return lldp_not_supported;
+ }
+
+ if (enable)
+ {
+ lldp_intf_t *n = lldp_get_intf (lm, hw_if_index);
+ if (n)
+ {
+ /* already enabled */
+ return 0;
+ }
+ n = lldp_create_intf (lm, hw_if_index);
+ const vnet_sw_interface_t *sw =
+ vnet_get_sw_interface (lm->vnet_main, hi->sw_if_index);
+ if (sw->flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP)
+ {
+ lldp_schedule_intf (lm, n);
+ }
+ }
+ else
+ {
+ lldp_intf_t *n = lldp_get_intf (lm, hi->sw_if_index);
+ lldp_delete_intf (lm, n);
+ }
+
+ return 0;
+}
+
+static clib_error_t *
+lldp_intf_cmd (vlib_main_t * vm, unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ lldp_main_t *lm = &lldp_main;
+ vnet_main_t *vnm = lm->vnet_main;
+ u32 hw_if_index;
+ int enable = 0;
+
+ if (unformat (input, "%U %U", unformat_vnet_hw_interface, vnm, &hw_if_index,
+ unformat_vlib_enable_disable, &enable))
+ {
+ return
+ lldp_cfg_err_to_clib_err (lldp_cfg_intf_set (hw_if_index, enable));
+ }
+ else
+ {
+ return clib_error_return (0, "unknown input `%U'",
+ format_unformat_error, input);
+ }
+ return 0;
+}
+
+static lldp_cfg_err_t
+lldp_cfg_set (u8 ** host, int hold_time, int tx_interval)
+{
+ lldp_main_t *lm = &lldp_main;
+ int reschedule = 0;
+ if (host && *host)
+ {
+ vec_free (lm->sys_name);
+ lm->sys_name = *host;
+ *host = NULL;
+ }
+ if (hold_time)
+ {
+ if (hold_time < LLDP_MIN_TX_HOLD || hold_time > LLDP_MAX_TX_HOLD)
+ {
+ return lldp_invalid_arg;
+ }
+ if (lm->msg_tx_hold != hold_time)
+ {
+ lm->msg_tx_hold = hold_time;
+ reschedule = 1;
+ }
+ }
+ if (tx_interval)
+ {
+ if (tx_interval < LLDP_MIN_TX_INTERVAL ||
+ tx_interval > LLDP_MAX_TX_INTERVAL)
+ {
+ return lldp_invalid_arg;
+ }
+ if (lm->msg_tx_interval != tx_interval)
+ {
+ reschedule = 1;
+ lm->msg_tx_interval = tx_interval;
+ }
+ }
+ if (reschedule)
+ {
+ vlib_process_signal_event (lm->vlib_main, lm->lldp_process_node_index,
+ LLDP_EVENT_RESCHEDULE, 0);
+ }
+ return lldp_ok;
+}
+
+static clib_error_t *
+lldp_cfg_cmd (vlib_main_t * vm, unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ int hold_time = 0;
+ int tx_interval = 0;
+ u8 *host = NULL;
+ clib_error_t *ret = NULL;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "system-name %s", &host))
+ {
+ }
+ else if (unformat (input, "tx-hold %d", &hold_time))
+ {
+ if (hold_time < LLDP_MIN_TX_HOLD || hold_time > LLDP_MAX_TX_HOLD)
+ {
+ ret =
+ clib_error_return (0,
+ "invalid tx-hold `%d' (out of range <%d,%d>)",
+ hold_time, LLDP_MIN_TX_HOLD,
+ LLDP_MAX_TX_HOLD);
+ goto out;
+ }
+ }
+ else if (unformat (input, "tx-interval %d", &tx_interval))
+ {
+ if (tx_interval < LLDP_MIN_TX_INTERVAL ||
+ tx_interval > LLDP_MAX_TX_INTERVAL)
+ {
+ ret =
+ clib_error_return (0,
+ "invalid tx-interval `%d' (out of range <%d,%d>)",
+ tx_interval, LLDP_MIN_TX_INTERVAL,
+ LLDP_MAX_TX_INTERVAL);
+ goto out;
+ }
+ }
+ else
+ {
+ ret = clib_error_return (0, "unknown input `%U'",
+ format_unformat_error, input);
+ goto out;
+ }
+ }
+ ret =
+ lldp_cfg_err_to_clib_err (lldp_cfg_set (&host, hold_time, tx_interval));
+out:
+ vec_free (host);
+ return ret;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND(set_interface_lldp_cmd, static) = {
+ .path = "set interface lldp",
+ .short_help = "set interface lldp <interface> (enable | disable) ",
+ .function = lldp_intf_cmd,
+};
+
+VLIB_CLI_COMMAND(set_lldp_cmd, static) = {
+ .path = "set lldp",
+ .short_help = "set lldp [system-name <string>] [tx-hold <value>] "
+ "[tx-interval <value>]",
+ .function = lldp_cfg_cmd,
+};
+/* *INDENT-ON* */
+
+static const char *
+lldp_chassis_id_subtype_str (lldp_chassis_id_subtype_t t)
+{
+ switch (t)
+ {
+#define F(num, val, str) \
+ case num: \
+ return str;
+ foreach_chassis_id_subtype (F)
+#undef F
+ }
+ return "unknown chassis subtype";
+}
+
+static const char *
+lldp_port_id_subtype_str (lldp_port_id_subtype_t t)
+{
+ switch (t)
+ {
+#define F(num, val, str) \
+ case num: \
+ return str;
+ foreach_port_id_subtype (F)
+#undef F
+ }
+ return "unknown port subtype";
+}
+
+/*
+ * format port id subtype&value
+ *
+ * @param va - 1st argument - unsigned - port id subtype
+ * @param va - 2nd argument - u8* - port id
+ * @param va - 3rd argument - unsigned - port id length
+ * @param va - 4th argument - int - 1 for detailed output, 0 for simple
+ */
+u8 *
+format_lldp_port_id (u8 * s, va_list * va)
+{
+ const lldp_port_id_subtype_t subtype = va_arg (*va, unsigned);
+ const u8 *id = va_arg (*va, u8 *);
+ const unsigned len = va_arg (*va, unsigned);
+ const int detail = va_arg (*va, int);
+ if (!id)
+ {
+ return s;
+ }
+ switch (subtype)
+ {
+ case LLDP_PORT_ID_SUBTYPE_NAME (intf_alias):
+ /* fallthrough */
+ case LLDP_PORT_ID_SUBTYPE_NAME (port_comp):
+ /* fallthrough */
+ case LLDP_PORT_ID_SUBTYPE_NAME (local):
+ /* fallthrough */
+ case LLDP_PORT_ID_SUBTYPE_NAME (intf_name):
+ if (detail)
+ {
+ s = format (s, "%U(%s)", format_ascii_bytes, id, len,
+ lldp_port_id_subtype_str (subtype));
+ }
+ else
+ {
+ s = format (s, "%U", format_ascii_bytes, id, len);
+ }
+ break;
+ case LLDP_PORT_ID_SUBTYPE_NAME (mac_addr):
+ if (ETHER_ADDR_LEN == len)
+ {
+ if (detail)
+ {
+ s = format (s, "%U(%s)", format_mac_address, id,
+ lldp_port_id_subtype_str (subtype));
+ }
+ else
+ {
+ s = format (s, "%U", format_mac_address, id);
+ }
+ break;
+ }
+ /* fallthrough */
+ case LLDP_PORT_ID_SUBTYPE_NAME (net_addr):
+ /* TODO */
+ /* fallthrough */
+ default:
+ if (detail)
+ {
+ s = format (s, "%U(%s)", format_hex_bytes, id, len,
+ lldp_port_id_subtype_str (subtype));
+ }
+ else
+ {
+ s = format (s, "%U", format_hex_bytes, id, len);
+ }
+ break;
+ }
+ return s;
+}
+
+/*
+ * format chassis id subtype&value
+ *
+ * @param s format string
+ * @param va - 1st argument - unsigned - chassis id subtype
+ * @param va - 2nd argument - u8* - chassis id
+ * @param va - 3rd argument - unsigned - chassis id length
+ * @param va - 4th argument - int - 1 for detailed output, 0 for simple
+ */
+u8 *
+format_lldp_chassis_id (u8 * s, va_list * va)
+{
+ const lldp_chassis_id_subtype_t subtype =
+ va_arg (*va, lldp_chassis_id_subtype_t);
+ const u8 *id = va_arg (*va, u8 *);
+ const unsigned len = va_arg (*va, unsigned);
+ const int detail = va_arg (*va, int);
+ if (!id)
+ {
+ return s;
+ }
+ switch (subtype)
+ {
+ case LLDP_CHASS_ID_SUBTYPE_NAME (chassis_comp):
+ /* fallthrough */
+ case LLDP_CHASS_ID_SUBTYPE_NAME (intf_alias):
+ /* fallthrough */
+ case LLDP_CHASS_ID_SUBTYPE_NAME (port_comp):
+ /* fallthrough */
+ case LLDP_PORT_ID_SUBTYPE_NAME (local):
+ /* fallthrough */
+ case LLDP_CHASS_ID_SUBTYPE_NAME (intf_name):
+ if (detail)
+ {
+ s = format (s, "%U(%s)", format_ascii_bytes, id, len,
+ lldp_chassis_id_subtype_str (subtype));
+ }
+ else
+ {
+ s = format (s, "%U", format_ascii_bytes, id, len);
+ }
+ break;
+ case LLDP_CHASS_ID_SUBTYPE_NAME (mac_addr):
+ if (ETHER_ADDR_LEN == len)
+ {
+ if (detail)
+ {
+ s = format (s, "%U(%s)", format_mac_address, id,
+ lldp_chassis_id_subtype_str (subtype));
+ }
+ else
+ {
+ s = format (s, "%U", format_mac_address, id);
+ }
+ break;
+ }
+ /* fallthrough */
+ case LLDP_CHASS_ID_SUBTYPE_NAME (net_addr):
+ /* TODO */
+ default:
+ if (detail)
+ {
+ s = format (s, "%U(%s)", format_hex_bytes, id, len,
+ lldp_chassis_id_subtype_str (subtype));
+ }
+ else
+ {
+ s = format (s, "%U", format_hex_bytes, id, len);
+ }
+ break;
+ }
+ return s;
+}
+
+/*
+ * convert a tlv code to human-readable string
+ */
+static const char *
+lldp_tlv_code_str (lldp_tlv_code_t t)
+{
+ switch (t)
+ {
+#define F(n, t, s) \
+ case n: \
+ return s;
+ foreach_lldp_tlv_type (F)
+#undef F
+ }
+ return "unknown lldp tlv";
+}
+
+/*
+ * format a single LLDP TLV
+ *
+ * @param s format string
+ * @param va variable list - pointer to lldp_tlv_t is expected
+ */
+u8 *
+format_lldp_tlv (u8 * s, va_list * va)
+{
+ const lldp_tlv_t *tlv = va_arg (*va, lldp_tlv_t *);
+ if (!tlv)
+ {
+ return s;
+ }
+ u16 l = lldp_tlv_get_length (tlv);
+ switch (lldp_tlv_get_code (tlv))
+ {
+ case LLDP_TLV_NAME (chassis_id):
+ s = format (s, "%U", format_lldp_chassis_id,
+ ((lldp_chassis_id_tlv_t *) tlv)->subtype,
+ ((lldp_chassis_id_tlv_t *) tlv)->id,
+ l - STRUCT_SIZE_OF (lldp_chassis_id_tlv_t, subtype), 1);
+ break;
+ case LLDP_TLV_NAME (port_id):
+ s = format (s, "%U", format_lldp_port_id,
+ ((lldp_port_id_tlv_t *) tlv)->subtype,
+ ((lldp_port_id_tlv_t *) tlv)->id,
+ l - STRUCT_SIZE_OF (lldp_port_id_tlv_t, subtype), 1);
+ break;
+ case LLDP_TLV_NAME (ttl):
+ s = format (s, "%d", ntohs (((lldp_ttl_tlv_t *) tlv)->ttl));
+ break;
+ case LLDP_TLV_NAME (sys_name):
+ /* fallthrough */
+ case LLDP_TLV_NAME (sys_desc):
+ s = format (s, "%U", format_ascii_bytes, tlv->v, l);
+ break;
+ default:
+ s = format (s, "%U", format_hex_bytes, tlv->v, l);
+ }
+
+ return s;
+}
+
+static u8 *
+format_time_ago (u8 * s, va_list * va)
+{
+ f64 ago = va_arg (*va, double);
+ f64 now = va_arg (*va, double);
+ if (ago < 0.01)
+ {
+ return format (s, "never");
+ }
+ return format (s, "%.1fs ago", now - ago);
+}
+
+static u8 *
+format_lldp_intfs_detail (u8 * s, vlib_main_t * vm, const lldp_main_t * lm)
+{
+ vnet_main_t *vnm = &vnet_main;
+ const lldp_intf_t *n;
+ const vnet_hw_interface_t *hw;
+ const vnet_sw_interface_t *sw;
+ s = format (s, "LLDP configuration:\n");
+ if (lm->sys_name)
+ {
+ s = format (s, "Configured system name: %U\n", format_ascii_bytes,
+ lm->sys_name, vec_len (lm->sys_name));
+ }
+ s = format (s, "Configured tx-hold: %d\n", (int) lm->msg_tx_hold);
+ s = format (s, "Configured tx-interval: %d\n", (int) lm->msg_tx_interval);
+ s = format (s, "\nLLDP-enabled interface table:\n");
+ f64 now = vlib_time_now (vm);
+
+ /* *INDENT-OFF* */
+ pool_foreach(
+ n, lm->intfs, ({
+ hw = vnet_get_hw_interface(vnm, n->hw_if_index);
+ sw = vnet_get_sw_interface(lm->vnet_main, hw->sw_if_index);
+ /* Interface shutdown */
+ if (!(sw->flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP))
+ {
+ s = format(s, "\nInterface name: %s\nInterface/peer state: "
+ "interface down\nLast packet sent: %U\n",
+ hw->name, format_time_ago, n->last_sent, now);
+ }
+ else if (now < n->last_heard + n->ttl)
+ {
+ s = format(s,
+ "\nInterface name: %s\nInterface/peer state: "
+ "active\nPeer chassis ID: %U\nRemote port ID: %U\nLast "
+ "packet sent: %U\nLast packet received: %U\n",
+ hw->name, format_lldp_chassis_id, n->chassis_id_subtype,
+ n->chassis_id, vec_len(n->chassis_id), 1,
+ format_lldp_port_id, n->port_id_subtype, n->port_id,
+ vec_len(n->port_id), 1, format_time_ago, n->last_sent,
+ now, format_time_ago, n->last_heard, now);
+ }
+ else
+ {
+ s = format(s, "\nInterface name: %s\nInterface/peer state: "
+ "inactive(timeout)\nLast known peer chassis ID: "
+ "%U\nLast known peer port ID: %U\nLast packet sent: "
+ "%U\nLast packet received: %U\n",
+ hw->name, format_lldp_chassis_id, n->chassis_id_subtype,
+ n->chassis_id, vec_len(n->chassis_id), 1,
+ format_lldp_port_id, n->port_id_subtype, n->port_id,
+ vec_len(n->port_id), 1, format_time_ago, n->last_sent,
+ now, format_time_ago, n->last_heard, now);
+ }
+ }));
+ /* *INDENT-ON* */
+ return s;
+}
+
+static u8 *
+format_lldp_intfs (u8 * s, va_list * va)
+{
+ vlib_main_t *vm = va_arg (*va, vlib_main_t *);
+ const lldp_main_t *lm = va_arg (*va, lldp_main_t *);
+ const int detail = va_arg (*va, int);
+ vnet_main_t *vnm = &vnet_main;
+ const lldp_intf_t *n;
+
+ if (detail)
+ {
+ return format_lldp_intfs_detail (s, vm, lm);
+ }
+
+ f64 now = vlib_time_now (vm);
+ s = format (s, "%-25s %-25s %-25s %=15s %=15s %=10s\n", "Local interface",
+ "Peer chassis ID", "Remote port ID", "Last heard", "Last sent",
+ "Status");
+
+ /* *INDENT-OFF* */
+ pool_foreach(
+ n, lm->intfs, ({
+ const vnet_hw_interface_t *hw =
+ vnet_get_hw_interface(vnm, n->hw_if_index);
+ const vnet_sw_interface_t *sw =
+ vnet_get_sw_interface(lm->vnet_main, hw->sw_if_index);
+ /* Interface shutdown */
+ if (!(sw->flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP))
+ continue;
+ if (now < n->last_heard + n->ttl)
+ {
+ s = format(s, "%-25s %-25U %-25U %=15U %=15U %=10s\n", hw->name,
+ format_lldp_chassis_id, n->chassis_id_subtype,
+ n->chassis_id, vec_len(n->chassis_id), 0,
+ format_lldp_port_id, n->port_id_subtype, n->port_id,
+ vec_len(n->port_id), 0, format_time_ago, n->last_heard,
+ now, format_time_ago, n->last_sent, now, "active");
+ }
+ else
+ {
+ s = format(s, "%-25s %-25s %-25s %=15U %=15U %=10s\n", hw->name,
+ "", "", format_time_ago, n->last_heard, now,
+ format_time_ago, n->last_sent, now, "inactive");
+ }
+ }));
+ /* *INDENT-ON* */
+ return s;
+}
+
+static clib_error_t *
+show_lldp (vlib_main_t * vm, unformat_input_t * input,
+ CLIB_UNUSED (vlib_cli_command_t * lmd))
+{
+ lldp_main_t *lm = &lldp_main;
+
+ if (unformat (input, "detail"))
+ {
+ vlib_cli_output (vm, "%U\n", format_lldp_intfs, vm, lm, 1);
+ }
+ else
+ {
+ vlib_cli_output (vm, "%U\n", format_lldp_intfs, vm, lm, 0);
+ }
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND(show_lldp_command, static) = {
+ .path = "show lldp",
+ .short_help = "show lldp [detail]",
+ .function = show_lldp,
+};
+/* *INDENT-ON* */
+
+/*
+ * packet trace format function, very similar to
+ * lldp_packet_scan except that we call the per TLV format
+ * functions instead of the per TLV processing functions
+ */
+u8 *
+lldp_input_format_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ const lldp_input_trace_t *t = va_arg (*args, lldp_input_trace_t *);
+ const u8 *cur;
+ const lldp_tlv_t *tlv;
+ cur = t->data;
+ while (((cur + lldp_tlv_get_length ((lldp_tlv_t *) cur)) <
+ t->data + t->len))
+ {
+ tlv = (lldp_tlv_t *) cur;
+ if (cur == t->data)
+ {
+ s = format (s, "TLV #%d(%s): %U\n", lldp_tlv_get_code (tlv),
+ lldp_tlv_code_str (lldp_tlv_get_code (tlv)),
+ format_lldp_tlv, tlv);
+ }
+ else
+ {
+ s = format (s, " TLV #%d(%s): %U\n", lldp_tlv_get_code (tlv),
+ lldp_tlv_code_str (lldp_tlv_get_code (tlv)),
+ format_lldp_tlv, tlv);
+ }
+ cur += STRUCT_SIZE_OF (lldp_tlv_t, head) + lldp_tlv_get_length (tlv);
+ }
+
+ return s;
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/lldp/lldp_doc.md b/src/vnet/lldp/lldp_doc.md
new file mode 100644
index 00000000000..bac480a51d3
--- /dev/null
+++ b/src/vnet/lldp/lldp_doc.md
@@ -0,0 +1,84 @@
+# VPP Link Layer Discovery Protocol (LLDP) implementation {#lldp_doc}
+
+This is a memo intended to contain documentation of the VPP LLDP implementation
+Everything that is not directly obvious should come here.
+
+
+## LLDP
+LLDP is a link layer protocol to advertise the capabilities and current status of the system.
+
+There are 2 nodes handling LLDP
+
+1.) input-node which processes incoming packets and updates the local database
+2.) process-node which is responsible for sending out LLDP packets from VPP side
+
+
+### Configuration
+
+LLDP has a global configuration and a per-interface enable setting.
+
+Global configuration is modified using the "set lldp" command
+
+set lldp [system-name <string>] [tx-hold <value>] [tx-interval <value>]
+
+system-name: the name of the VPP system sent to peers in the system-name TLV
+tx-hold: multiplier for tx-interval when setting time-to-live (TTL) value in the LLDP packets (TTL = tx-hold * tx-interval + 1, if TTL > 65535, then TTL = 65535)
+tx-interval: time interval between sending out LLDP packets
+
+Per interface setting is done using the "set interface lldp" command
+
+set interface lldp <interface> (enable | disable)
+
+interface: the name of the interface for which to enable/disable LLDP
+
+
+### Configuration example
+
+Configure system-name as "VPP" and transmit interval to 10 seconds:
+
+set lldp system-name VPP tx-interval 10
+
+Enable LLDP on interface TenGigabitEthernet5/0/1
+
+set interface lldp TenGigabitEthernet5/0/1 enable
+
+
+### Operational data
+
+The list of LLDP-enabled interfaces which are up can be shown using "show lldp" command
+
+Example:
+DBGvpp# show lldp
+Local interface Peer chassis ID Remote port ID Last heard Last sent Status
+GigabitEthernet2/0/1 never 27.0s ago inactive
+TenGigabitEthernet5/0/1 8c:60:4f:dd:ca:52 Eth1/3/3 20.1s ago 18.3s ago active
+
+All LLDP configuration data with all LLDP-enabled interfaces can be shown using "show lldp detail" command
+
+Example:
+DBGvpp# show lldp detail
+LLDP configuration:
+Configured system name: vpp
+Configured tx-hold: 4
+Configured tx-interval: 30
+
+LLDP-enabled interface table:
+
+Interface name: GigabitEthernet2/0/1
+Interface/peer state: inactive(timeout)
+Last known peer chassis ID:
+Last known peer port ID:
+Last packet sent: 12.4s ago
+Last packet received: never
+
+Interface name: GigabitEthernet2/0/2
+Interface/peer state: interface down
+Last packet sent: never
+
+Interface name: TenGigabitEthernet5/0/1
+Interface/peer state: active
+Peer chassis ID: 8c:60:4f:dd:ca:52(MAC address)
+Remote port ID: Eth1/3/3(Locally assigned)
+Last packet sent: 3.6s ago
+Last packet received: 5.5s ago
+
diff --git a/src/vnet/lldp/lldp_input.c b/src/vnet/lldp/lldp_input.c
new file mode 100644
index 00000000000..762743d0d8d
--- /dev/null
+++ b/src/vnet/lldp/lldp_input.c
@@ -0,0 +1,302 @@
+/*
+ * Copyright (c) 2011-2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * @file
+ * @brief LLDP packet parsing implementation
+ */
+#include <vnet/lldp/lldp_node.h>
+#include <vnet/lldp/lldp_protocol.h>
+#include <vlibmemory/api.h>
+
+typedef struct
+{
+ u32 hw_if_index;
+ u8 chassis_id_len;
+ u8 chassis_id_subtype;
+ u8 portid_len;
+ u8 portid_subtype;
+ u16 ttl;
+ u8 data[0]; /* this contains both chassis id (chassis_id_len bytes) and port
+ id (portid_len bytes) */
+} lldp_intf_update_t;
+
+static void
+lldp_rpc_update_peer_cb (const lldp_intf_update_t * a)
+{
+ ASSERT (os_get_cpu_number () == 0);
+
+ lldp_intf_t *n = lldp_get_intf (&lldp_main, a->hw_if_index);
+ if (!n)
+ {
+ /* LLDP turned off for this interface, ignore the update */
+ return;
+ }
+ const u8 *chassis_id = a->data;
+ const u8 *portid = a->data + a->chassis_id_len;
+
+ if (n->chassis_id)
+ {
+ _vec_len (n->chassis_id) = 0;
+ }
+ vec_add (n->chassis_id, chassis_id, a->chassis_id_len);
+ n->chassis_id_subtype = a->chassis_id_subtype;
+ if (n->port_id)
+ {
+ _vec_len (n->port_id) = 0;
+ }
+ vec_add (n->port_id, portid, a->portid_len);
+ n->port_id_subtype = a->portid_subtype;
+ n->ttl = a->ttl;
+ n->last_heard = vlib_time_now (lldp_main.vlib_main);
+}
+
+static void
+lldp_rpc_update_peer (u32 hw_if_index, const u8 * chid, u8 chid_len,
+ u8 chid_subtype, const u8 * portid,
+ u8 portid_len, u8 portid_subtype, u16 ttl)
+{
+ const size_t data_size =
+ sizeof (lldp_intf_update_t) + chid_len + portid_len;
+ u8 data[data_size];
+ lldp_intf_update_t *u = (lldp_intf_update_t *) data;
+ u->hw_if_index = hw_if_index;
+ u->chassis_id_len = chid_len;
+ u->chassis_id_subtype = chid_subtype;
+ u->ttl = ttl;
+ u->portid_len = portid_len;
+ u->portid_subtype = portid_subtype;
+ clib_memcpy (u->data, chid, chid_len);
+ clib_memcpy (u->data + chid_len, portid, portid_len);
+ vl_api_rpc_call_main_thread (lldp_rpc_update_peer_cb, data, data_size);
+}
+
+lldp_tlv_code_t
+lldp_tlv_get_code (const lldp_tlv_t * tlv)
+{
+ return tlv->head.byte1 >> 1;
+}
+
+void
+lldp_tlv_set_code (lldp_tlv_t * tlv, lldp_tlv_code_t code)
+{
+ tlv->head.byte1 = (tlv->head.byte1 & 1) + (code << 1);
+}
+
+u16
+lldp_tlv_get_length (const lldp_tlv_t * tlv)
+{
+ return (((u16) (tlv->head.byte1 & 1)) << 8) + tlv->head.byte2;
+}
+
+void
+lldp_tlv_set_length (lldp_tlv_t * tlv, u16 length)
+{
+ tlv->head.byte2 = length & ((1 << 8) - 1);
+ if (length > (1 << 8) - 1)
+ {
+ tlv->head.byte1 |= 1;
+ }
+ else
+ {
+ tlv->head.byte1 &= (1 << 8) - 2;
+ }
+}
+
+lldp_main_t lldp_main;
+
+static int
+lldp_packet_scan (u32 hw_if_index, const lldp_tlv_t * pkt)
+{
+ const lldp_tlv_t *tlv = pkt;
+
+#define TLV_VIOLATES_PKT_BOUNDARY(pkt, tlv) \
+ (((((u8 *)tlv) + sizeof (lldp_tlv_t)) > ((u8 *)pkt + vec_len (pkt))) || \
+ ((((u8 *)tlv) + lldp_tlv_get_length (tlv)) > ((u8 *)pkt + vec_len (pkt))))
+
+ /* first tlv is always chassis id, followed by port id and ttl tlvs */
+ if (TLV_VIOLATES_PKT_BOUNDARY (pkt, tlv) ||
+ LLDP_TLV_NAME (chassis_id) != lldp_tlv_get_code (tlv))
+ {
+ return LLDP_ERROR_BAD_TLV;
+ }
+
+ u16 l = lldp_tlv_get_length (tlv);
+ if (l < STRUCT_SIZE_OF (lldp_chassis_id_tlv_t, subtype) +
+ LLDP_MIN_CHASS_ID_LEN ||
+ l > STRUCT_SIZE_OF (lldp_chassis_id_tlv_t, subtype) +
+ LLDP_MAX_CHASS_ID_LEN)
+ {
+ return LLDP_ERROR_BAD_TLV;
+ }
+
+ u8 chid_subtype = ((lldp_chassis_id_tlv_t *) tlv)->subtype;
+ u8 *chid = ((lldp_chassis_id_tlv_t *) tlv)->id;
+ u8 chid_len = l - STRUCT_SIZE_OF (lldp_chassis_id_tlv_t, subtype);
+
+ tlv = (lldp_tlv_t *) ((u8 *) tlv + STRUCT_SIZE_OF (lldp_tlv_t, head) + l);
+
+ if (TLV_VIOLATES_PKT_BOUNDARY (pkt, tlv) ||
+ LLDP_TLV_NAME (port_id) != lldp_tlv_get_code (tlv))
+ {
+ return LLDP_ERROR_BAD_TLV;
+ }
+ l = lldp_tlv_get_length (tlv);
+ if (l < STRUCT_SIZE_OF (lldp_port_id_tlv_t, subtype) +
+ LLDP_MIN_PORT_ID_LEN ||
+ l > STRUCT_SIZE_OF (lldp_chassis_id_tlv_t, subtype) +
+ LLDP_MAX_PORT_ID_LEN)
+ {
+ return LLDP_ERROR_BAD_TLV;
+ }
+
+ u8 portid_subtype = ((lldp_port_id_tlv_t *) tlv)->subtype;
+ u8 *portid = ((lldp_port_id_tlv_t *) tlv)->id;
+ u8 portid_len = l - STRUCT_SIZE_OF (lldp_port_id_tlv_t, subtype);
+
+ tlv = (lldp_tlv_t *) ((u8 *) tlv + STRUCT_SIZE_OF (lldp_tlv_t, head) + l);
+
+ if (TLV_VIOLATES_PKT_BOUNDARY (pkt, tlv) ||
+ LLDP_TLV_NAME (ttl) != lldp_tlv_get_code (tlv))
+ {
+ return LLDP_ERROR_BAD_TLV;
+ }
+ l = lldp_tlv_get_length (tlv);
+ if (l != STRUCT_SIZE_OF (lldp_ttl_tlv_t, ttl))
+ {
+ return LLDP_ERROR_BAD_TLV;
+ }
+ u16 ttl = ntohs (((lldp_ttl_tlv_t *) tlv)->ttl);
+ tlv = (lldp_tlv_t *) ((u8 *) tlv + STRUCT_SIZE_OF (lldp_tlv_t, head) + l);
+ while (!TLV_VIOLATES_PKT_BOUNDARY (pkt, tlv) &&
+ LLDP_TLV_NAME (pdu_end) != lldp_tlv_get_code (tlv))
+ {
+ switch (lldp_tlv_get_code (tlv))
+ {
+#define F(num, type, str) \
+ case LLDP_TLV_NAME (type): \
+ /* ignore optional TLV */ \
+ break;
+ foreach_lldp_optional_tlv_type (F);
+#undef F
+ default:
+ return LLDP_ERROR_BAD_TLV;
+ }
+ tlv = (lldp_tlv_t *) ((u8 *) tlv + STRUCT_SIZE_OF (lldp_tlv_t, head) +
+ lldp_tlv_get_length (tlv));
+ }
+ /* last tlv is pdu_end */
+ if (TLV_VIOLATES_PKT_BOUNDARY (pkt, tlv) ||
+ LLDP_TLV_NAME (pdu_end) != lldp_tlv_get_code (tlv) ||
+ 0 != lldp_tlv_get_length (tlv))
+ {
+ return LLDP_ERROR_BAD_TLV;
+ }
+ lldp_rpc_update_peer (hw_if_index, chid, chid_len, chid_subtype, portid,
+ portid_len, portid_subtype, ttl);
+ return LLDP_ERROR_NONE;
+}
+
+lldp_intf_t *
+lldp_get_intf (lldp_main_t * lm, u32 hw_if_index)
+{
+ uword *p = hash_get (lm->intf_by_hw_if_index, hw_if_index);
+
+ if (p)
+ {
+ return pool_elt_at_index (lm->intfs, p[0]);
+ }
+ return NULL;
+}
+
+lldp_intf_t *
+lldp_create_intf (lldp_main_t * lm, u32 hw_if_index)
+{
+
+ uword *p;
+ lldp_intf_t *n;
+ p = hash_get (lm->intf_by_hw_if_index, hw_if_index);
+
+ if (p == 0)
+ {
+ pool_get (lm->intfs, n);
+ memset (n, 0, sizeof (*n));
+ n->hw_if_index = hw_if_index;
+ hash_set (lm->intf_by_hw_if_index, n->hw_if_index, n - lm->intfs);
+ }
+ else
+ {
+ n = pool_elt_at_index (lm->intfs, p[0]);
+ }
+ return n;
+}
+
+/*
+ * lldp input routine
+ */
+lldp_error_t
+lldp_input (vlib_main_t * vm, vlib_buffer_t * b0, u32 bi0)
+{
+ lldp_main_t *lm = &lldp_main;
+ lldp_error_t e;
+
+ /* find our interface */
+ vnet_sw_interface_t *sw_interface = vnet_get_sw_interface (lm->vnet_main,
+ vnet_buffer
+ (b0)->sw_if_index
+ [VLIB_RX]);
+ lldp_intf_t *n = lldp_get_intf (lm, sw_interface->hw_if_index);
+
+ if (!n)
+ {
+ /* lldp disabled on this interface, we're done */
+ return LLDP_ERROR_DISABLED;
+ }
+
+ /* Actually scan the packet */
+ e = lldp_packet_scan (sw_interface->hw_if_index,
+ vlib_buffer_get_current (b0));
+
+ return e;
+}
+
+/*
+ * setup function
+ */
+static clib_error_t *
+lldp_init (vlib_main_t * vm)
+{
+ clib_error_t *error;
+ lldp_main_t *lm = &lldp_main;
+
+ if ((error = vlib_call_init_function (vm, lldp_template_init)))
+ return error;
+
+ lm->vlib_main = vm;
+ lm->vnet_main = vnet_get_main ();
+ lm->msg_tx_hold = 4; /* default value per IEEE 802.1AB-2009 */
+ lm->msg_tx_interval = 30; /* default value per IEEE 802.1AB-2009 */
+
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (lldp_init);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/lldp/lldp_node.c b/src/vnet/lldp/lldp_node.c
new file mode 100644
index 00000000000..acaa5e10e36
--- /dev/null
+++ b/src/vnet/lldp/lldp_node.c
@@ -0,0 +1,341 @@
+/*
+ * Copyright (c) 2011-2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * @file
+ * @brief LLDP nodes implementation
+ */
+#include <vnet/lldp/lldp_node.h>
+#include <vnet/ethernet/ethernet.h>
+#include <vnet/ethernet/packet.h>
+
+/* set this to 1 to turn on debug prints via clib_warning() */
+#define LLDP_DEBUG (0)
+
+static vlib_node_registration_t lldp_process_node;
+
+#define F(sym, string) static char LLDP_ERR_##sym##_STR[] = string;
+foreach_lldp_error (F);
+#undef F
+
+/*
+ * packet counter strings
+ * Dump these counters via the "show error" CLI command
+ */
+static char *lldp_error_strings[] = {
+#define F(sym, string) LLDP_ERR_##sym##_STR,
+ foreach_lldp_error (F)
+#undef F
+};
+
+/*
+ * We actually send all lldp pkts to the "error" node after scanning
+ * them, so the graph node has only one next-index. The "error-drop"
+ * node automatically bumps our per-node packet counters for us.
+ */
+typedef enum
+{
+ LLDP_INPUT_NEXT_NORMAL,
+ LLDP_INPUT_N_NEXT,
+} lldp_next_t;
+
+/*
+ * Process a frame of lldp packets
+ * Expect 1 packet / frame
+ */
+static uword
+lldp_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ u32 n_left_from, *from;
+ lldp_input_trace_t *t0;
+
+ from = vlib_frame_vector_args (frame); /* array of buffer indices */
+ n_left_from = frame->n_vectors; /* number of buffer indices */
+
+ while (n_left_from > 0)
+ {
+ u32 bi0;
+ vlib_buffer_t *b0;
+ u32 next0, error0;
+
+ bi0 = from[0];
+ b0 = vlib_get_buffer (vm, bi0);
+
+ next0 = LLDP_INPUT_NEXT_NORMAL;
+
+ /* scan this lldp pkt. error0 is the counter index to bump */
+ error0 = lldp_input (vm, b0, bi0);
+ b0->error = node->errors[error0];
+
+ /* If this pkt is traced, snapshot the data */
+ if (b0->flags & VLIB_BUFFER_IS_TRACED)
+ {
+ int len;
+ t0 = vlib_add_trace (vm, node, b0, sizeof (*t0));
+ len = (b0->current_length < sizeof (t0->data)) ? b0->current_length
+ : sizeof (t0->data);
+ t0->len = len;
+ clib_memcpy (t0->data, vlib_buffer_get_current (b0), len);
+ }
+ /* push this pkt to the next graph node, always error-drop */
+ vlib_set_next_frame_buffer (vm, node, next0, bi0);
+
+ from += 1;
+ n_left_from -= 1;
+ }
+
+ return frame->n_vectors;
+}
+
+/*
+ * lldp input graph node declaration
+ */
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE(lldp_input_node, static) = {
+ .function = lldp_node_fn,
+ .name = "lldp-input",
+ .vector_size = sizeof(u32),
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_errors = LLDP_N_ERROR,
+ .error_strings = lldp_error_strings,
+
+ .format_trace = lldp_input_format_trace,
+
+ .n_next_nodes = LLDP_INPUT_N_NEXT,
+ .next_nodes =
+ {
+ [LLDP_INPUT_NEXT_NORMAL] = "error-drop",
+ },
+};
+/* *INDENT-ON* */
+
+/*
+ * lldp process node function
+ */
+static uword
+lldp_process (vlib_main_t * vm, vlib_node_runtime_t * rt, vlib_frame_t * f)
+{
+ lldp_main_t *lm = &lldp_main;
+ f64 timeout = 0;
+ uword event_type, *event_data = 0;
+
+ /* So we can send events to the lldp process */
+ lm->lldp_process_node_index = lldp_process_node.index;
+
+ /* with ethernet input */
+ ethernet_register_input_type (vm, ETHERNET_TYPE_802_1_LLDP /* LLDP */ ,
+ lldp_input_node.index);
+
+ while (1)
+ {
+ if (vec_len (lm->intfs_timeouts))
+ {
+#if LLDP_DEBUG
+ clib_warning ("DEBUG: wait for event with timeout %f", timeout);
+#endif
+ (void) vlib_process_wait_for_event_or_clock (vm, timeout);
+ }
+ else
+ {
+#if LLDP_DEBUG
+ clib_warning ("DEBUG: wait for event without timeout");
+#endif
+ (void) vlib_process_wait_for_event (vm);
+ }
+ event_type = vlib_process_get_events (vm, &event_data);
+ switch (event_type)
+ {
+ case ~0: /* no events => timeout */
+ /* nothing to do here */
+ break;
+ case LLDP_EVENT_RESCHEDULE:
+ /* nothing to do here - reschedule is done automatically after
+ * each event or timeout */
+ break;
+ default:
+ clib_warning ("BUG: event type 0x%wx", event_type);
+ break;
+ }
+ if (!vec_len (lm->intfs_timeouts))
+ {
+ continue;
+ }
+ /* send packet(s) and schedule another timeut */
+ const f64 now = vlib_time_now (lm->vlib_main);
+ while (1)
+ {
+ lldp_intf_t *n = pool_elt_at_index (lm->intfs,
+ lm->intfs_timeouts
+ [lm->intfs_timeouts_idx]);
+ if (n->last_sent < 0.01 || now > n->last_sent + lm->msg_tx_interval)
+ {
+#if LLDP_DEBUG
+ clib_warning ("send packet to lldp %p, if idx %d", n,
+ n->hw_if_index);
+#endif
+ lldp_send_ethernet (lm, n, 0);
+ ++lm->intfs_timeouts_idx;
+ if (lm->intfs_timeouts_idx >= vec_len (lm->intfs_timeouts))
+ {
+ lm->intfs_timeouts_idx = 0;
+ }
+ continue;
+ }
+ else
+ {
+ timeout = n->last_sent + lm->msg_tx_interval - now;
+ break;
+ }
+ }
+#if LLDP_DEBUG
+ clib_warning ("DEBUG: timeout set to %f", timeout);
+ u8 *s = NULL;
+ u32 i;
+ vec_foreach_index (i, lm->intfs_timeouts)
+ {
+ if (i == lm->intfs_timeouts_idx)
+ {
+ s = format (s, " [%d]", lm->intfs_timeouts[i]);
+ }
+ else
+ {
+ s = format (s, " %d", lm->intfs_timeouts[i]);
+ }
+ }
+ clib_warning ("DEBUG: timeout schedule: %s", s);
+ vec_free (s);
+#endif
+ if (event_data)
+ {
+ _vec_len (event_data) = 0;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * lldp process node declaration
+ */
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE(lldp_process_node, static) = {
+ .function = lldp_process,
+ .type = VLIB_NODE_TYPE_PROCESS,
+ .name = "lldp-process",
+};
+/* *INDENT-ON* */
+
+void
+lldp_schedule_intf (lldp_main_t * lm, lldp_intf_t * n)
+{
+ const int idx = n - lm->intfs;
+ u32 v;
+ vec_foreach_index (v, lm->intfs_timeouts)
+ {
+ if (lm->intfs_timeouts[v] == idx)
+ {
+ /* already scheduled */
+ return;
+ }
+ }
+ n->last_sent = 0; /* ensure that a packet is sent out immediately */
+ /* put the interface at the current position in the timeouts - it
+ * will timeout immediately */
+ vec_insert (lm->intfs_timeouts, 1, lm->intfs_timeouts_idx);
+ lm->intfs_timeouts[lm->intfs_timeouts_idx] = n - lm->intfs;
+ vlib_process_signal_event (lm->vlib_main, lm->lldp_process_node_index,
+ LLDP_EVENT_RESCHEDULE, 0);
+#if LLDP_DEBUG
+ clib_warning ("DEBUG: schedule interface %p, if idx %d", n, n->hw_if_index);
+#endif
+}
+
+void
+lldp_unschedule_intf (lldp_main_t * lm, lldp_intf_t * n)
+{
+ if (!n)
+ {
+ return;
+ }
+#if LLDP_DEBUG
+ clib_warning ("DEBUG: unschedule interface %p, if idx %d", n,
+ n->hw_if_index);
+#endif
+ const int idx = n - lm->intfs;
+ u32 v;
+ /* remove intf index from timeouts vector */
+ vec_foreach_index (v, lm->intfs_timeouts)
+ {
+ if (lm->intfs_timeouts[v] == idx)
+ {
+ vec_delete (lm->intfs_timeouts, 1, v);
+ break;
+ }
+ }
+ /* wrap current timeout index to first element if needed */
+ if (lm->intfs_timeouts_idx >= vec_len (lm->intfs_timeouts))
+ {
+ lm->intfs_timeouts_idx = 0;
+ }
+ vlib_process_signal_event (lm->vlib_main, lm->lldp_process_node_index,
+ LLDP_EVENT_RESCHEDULE, 0);
+}
+
+static clib_error_t *
+lldp_sw_interface_up_down (vnet_main_t * vnm, u32 sw_if_index, u32 flags)
+{
+ lldp_main_t *lm = &lldp_main;
+ vnet_hw_interface_t *hi = vnet_get_sup_hw_interface (vnm, sw_if_index);
+ lldp_intf_t *n = lldp_get_intf (lm, hi->hw_if_index);
+ if (n)
+ {
+ if (!(flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP))
+ {
+ /* FIXME - the packet sent here isn't send properly - need to find a
+ * way to send the packet before interface goes down */
+ lldp_send_ethernet (lm, n, 1);
+ lldp_unschedule_intf (lm, n);
+ }
+ }
+ return 0;
+}
+
+VNET_SW_INTERFACE_ADMIN_UP_DOWN_FUNCTION (lldp_sw_interface_up_down);
+
+static clib_error_t *
+lldp_hw_interface_up_down (vnet_main_t * vnm, u32 hw_if_index, u32 flags)
+{
+ lldp_main_t *lm = &lldp_main;
+ lldp_intf_t *n = lldp_get_intf (lm, hw_if_index);
+ if (n)
+ {
+ if (flags & VNET_HW_INTERFACE_FLAG_LINK_UP)
+ {
+ lldp_schedule_intf (lm, n);
+ }
+ }
+ return 0;
+}
+
+VNET_HW_INTERFACE_LINK_UP_DOWN_FUNCTION (lldp_hw_interface_up_down);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/lldp/lldp_node.h b/src/vnet/lldp/lldp_node.h
new file mode 100644
index 00000000000..477ca7dc691
--- /dev/null
+++ b/src/vnet/lldp/lldp_node.h
@@ -0,0 +1,145 @@
+/*
+ * Copyright (c) 2011-2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * @file
+ * @brief LLDP global declarations
+ */
+#ifndef __included_lldp_node_h__
+#define __included_lldp_node_h__
+
+#include <vlib/vlib.h>
+#include <vlib/unix/unix.h>
+#include <vnet/snap/snap.h>
+#include <vppinfra/format.h>
+#include <vppinfra/hash.h>
+
+#include <vnet/lldp/lldp_protocol.h>
+
+typedef struct lldp_intf
+{
+ /* hw interface index */
+ u32 hw_if_index;
+
+ /* Timers */
+ f64 last_heard;
+ f64 last_sent;
+
+ /* Info received from peer */
+ u8 *chassis_id;
+ u8 *port_id;
+ u16 ttl;
+ lldp_port_id_subtype_t port_id_subtype;
+ lldp_chassis_id_subtype_t chassis_id_subtype;
+
+} lldp_intf_t;
+
+typedef struct
+{
+ /* pool of lldp-enabled interface context data */
+ lldp_intf_t *intfs;
+
+ /* rapidly find an interface by vlib hw interface index */
+ uword *intf_by_hw_if_index;
+
+ /* Background process node index */
+ u32 lldp_process_node_index;
+
+ /* interface idxs (into intfs pool) in the order of timing out */
+ u32 *intfs_timeouts;
+
+ /* index of the interface which will time out next */
+ u32 intfs_timeouts_idx;
+
+ /* packet template for sending out packets */
+ vlib_packet_template_t packet_template;
+
+ /* convenience variables */
+ vlib_main_t *vlib_main;
+ vnet_main_t *vnet_main;
+
+ /* system name advertised over LLDP (default is none) */
+ u8 *sys_name;
+
+ /* IEEE Std 802.1AB-2009:
+ * 9.2.5.6 msgTxHold
+ * This variable is used, as a multiplier of msgTxInterval, to determine the
+ * value of txTTL that is carried in LLDP frames transmitted by the LLDP
+ * agent. The recommended default value of msgTxHold is 4; this value can
+ * be changed by management to any value in the range 1 through 100.
+ */
+ u8 msg_tx_hold;
+
+ /* IEEE Std 802.1AB-2009:
+ * 9.2.5.7 msgTxInterval
+ * This variable defines the time interval in timer ticks between
+ * transmissions during normal transmission periods (i.e., txFast is zero).
+ * The recommended default value for msgTxInterval is 30 s; this value can
+ * be changed by management to any value in the range 1 through 3600.
+ */
+ u16 msg_tx_interval;
+} lldp_main_t;
+
+#define LLDP_MIN_TX_HOLD (1)
+#define LLDP_MAX_TX_HOLD (100)
+#define LLDP_MIN_TX_INTERVAL (1)
+#define LLDP_MAX_TX_INTERVAL (3600)
+
+extern lldp_main_t lldp_main;
+
+/* Packet counters */
+#define foreach_lldp_error(F) \
+ F(NONE, "good lldp packets (processed)") \
+ F(CACHE_HIT, "good lldp packets (cache hit)") \
+ F(BAD_TLV, "lldp packets with bad TLVs") \
+ F(DISABLED, "lldp packets received on disabled interfaces")
+
+typedef enum
+{
+#define F(sym, str) LLDP_ERROR_##sym,
+ foreach_lldp_error (F)
+#undef F
+ LLDP_N_ERROR,
+} lldp_error_t;
+
+/* lldp packet trace capture */
+typedef struct
+{
+ u32 len;
+ u8 data[400];
+} lldp_input_trace_t;
+
+enum
+{
+ LLDP_EVENT_RESCHEDULE = 1,
+} lldp_process_event_t;
+
+lldp_intf_t *lldp_get_intf (lldp_main_t * lm, u32 hw_if_index);
+lldp_intf_t *lldp_create_intf (lldp_main_t * lm, u32 hw_if_index);
+void lldp_delete_intf (lldp_main_t * lm, lldp_intf_t * n);
+lldp_error_t lldp_input (vlib_main_t * vm, vlib_buffer_t * b0, u32 bi0);
+u8 *lldp_input_format_trace (u8 * s, va_list * args);
+void lldp_send_ethernet (lldp_main_t * lm, lldp_intf_t * n, int shutdown);
+void lldp_schedule_intf (lldp_main_t * lm, lldp_intf_t * n);
+void lldp_unschedule_intf (lldp_main_t * lm, lldp_intf_t * n);
+
+#endif /* __included_lldp_node_h__ */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/lldp/lldp_output.c b/src/vnet/lldp/lldp_output.c
new file mode 100644
index 00000000000..6cb2627098b
--- /dev/null
+++ b/src/vnet/lldp/lldp_output.c
@@ -0,0 +1,216 @@
+/*
+ * Copyright (c) 2011-2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * @file
+ * @brief LLDP packet generation implementation
+ */
+#include <vnet/lldp/lldp_node.h>
+
+static void
+lldp_add_chassis_id (const vnet_hw_interface_t * hw, u8 ** t0p)
+{
+ lldp_chassis_id_tlv_t *t = (lldp_chassis_id_tlv_t *) * t0p;
+
+ lldp_tlv_set_code ((lldp_tlv_t *) t, LLDP_TLV_NAME (chassis_id));
+ t->subtype = LLDP_CHASS_ID_SUBTYPE_NAME (mac_addr);
+
+ const size_t addr_len = 6;
+ clib_memcpy (&t->id, hw->hw_address, addr_len);
+ const size_t len =
+ STRUCT_SIZE_OF (lldp_chassis_id_tlv_t, subtype) + addr_len;
+ lldp_tlv_set_length ((lldp_tlv_t *) t, len);
+ *t0p += STRUCT_SIZE_OF (lldp_tlv_t, head) + len;
+}
+
+static void
+lldp_add_port_id (const vnet_hw_interface_t * hw, u8 ** t0p)
+{
+ lldp_port_id_tlv_t *t = (lldp_port_id_tlv_t *) * t0p;
+
+ lldp_tlv_set_code ((lldp_tlv_t *) t, LLDP_TLV_NAME (port_id));
+ t->subtype = LLDP_PORT_ID_SUBTYPE_NAME (intf_name);
+
+ const size_t name_len = vec_len (hw->name);
+ clib_memcpy (&t->id, hw->name, name_len);
+ const size_t len = STRUCT_SIZE_OF (lldp_port_id_tlv_t, subtype) + name_len;
+ lldp_tlv_set_length ((lldp_tlv_t *) t, len);
+ *t0p += STRUCT_SIZE_OF (lldp_tlv_t, head) + len;
+}
+
+static void
+lldp_add_ttl (const lldp_main_t * lm, u8 ** t0p, int shutdown)
+{
+ lldp_ttl_tlv_t *t = (lldp_ttl_tlv_t *) * t0p;
+ lldp_tlv_set_code ((lldp_tlv_t *) t, LLDP_TLV_NAME (ttl));
+ if (shutdown)
+ {
+ t->ttl = 0;
+ }
+ else
+ {
+ if ((size_t) lm->msg_tx_interval * lm->msg_tx_hold + 1 > (1 << 16) - 1)
+ {
+ t->ttl = htons ((1 << 16) - 1);
+ }
+ else
+ {
+ t->ttl = htons (lm->msg_tx_hold * lm->msg_tx_interval + 1);
+ }
+ }
+ const size_t len = STRUCT_SIZE_OF (lldp_ttl_tlv_t, ttl);
+ lldp_tlv_set_length ((lldp_tlv_t *) t, len);
+ *t0p += STRUCT_SIZE_OF (lldp_tlv_t, head) + len;
+}
+
+static void
+lldp_add_sys_name (const lldp_main_t * lm, u8 ** t0p)
+{
+ const size_t len = vec_len (lm->sys_name);
+ if (len)
+ {
+ lldp_tlv_t *t = (lldp_tlv_t *) * t0p;
+ lldp_tlv_set_code (t, LLDP_TLV_NAME (sys_name));
+ lldp_tlv_set_length (t, len);
+ clib_memcpy (t->v, lm->sys_name, len);
+ *t0p += STRUCT_SIZE_OF (lldp_tlv_t, head) + len;
+ }
+}
+
+static void
+lldp_add_pdu_end (u8 ** t0p)
+{
+ lldp_tlv_t *t = (lldp_tlv_t *) * t0p;
+ lldp_tlv_set_code (t, LLDP_TLV_NAME (pdu_end));
+ lldp_tlv_set_length (t, 0);
+ *t0p += STRUCT_SIZE_OF (lldp_tlv_t, head);
+}
+
+static void
+lldp_add_tlvs (lldp_main_t * lm, vnet_hw_interface_t * hw, u8 ** t0p,
+ int shutdown)
+{
+ lldp_add_chassis_id (hw, t0p);
+ lldp_add_port_id (hw, t0p);
+ lldp_add_ttl (lm, t0p, shutdown);
+ lldp_add_sys_name (lm, t0p);
+ lldp_add_pdu_end (t0p);
+}
+
+/*
+ * send a lldp pkt on an ethernet interface
+ */
+void
+lldp_send_ethernet (lldp_main_t * lm, lldp_intf_t * n, int shutdown)
+{
+ u32 *to_next;
+ ethernet_header_t *h0;
+ vnet_hw_interface_t *hw;
+ u32 bi0;
+ vlib_buffer_t *b0;
+ u8 *t0;
+ vlib_frame_t *f;
+ vlib_main_t *vm = lm->vlib_main;
+ vnet_main_t *vnm = lm->vnet_main;
+
+ /*
+ * see lldp_template_init() to understand what's already painted
+ * into the buffer by the packet template mechanism
+ */
+ h0 = vlib_packet_template_get_packet (vm, &lm->packet_template, &bi0);
+
+ /* Add the interface's ethernet source address */
+ hw = vnet_get_hw_interface (vnm, n->hw_if_index);
+
+ clib_memcpy (h0->src_address, hw->hw_address, vec_len (hw->hw_address));
+
+ u8 *data = ((u8 *) h0) + sizeof (*h0);
+ t0 = data;
+
+ /* add TLVs */
+ lldp_add_tlvs (lm, hw, &t0, shutdown);
+
+ /* Set the outbound packet length */
+ b0 = vlib_get_buffer (vm, bi0);
+ b0->current_length = sizeof (*h0) + t0 - data;
+
+ /* And the outbound interface */
+ vnet_buffer (b0)->sw_if_index[VLIB_TX] = hw->sw_if_index;
+
+ /* And output the packet on the correct interface */
+ f = vlib_get_frame_to_node (vm, hw->output_node_index);
+ to_next = vlib_frame_vector_args (f);
+ to_next[0] = bi0;
+ f->n_vectors = 1;
+
+ vlib_put_frame_to_node (vm, hw->output_node_index, f);
+ n->last_sent = vlib_time_now (vm);
+}
+
+void
+lldp_delete_intf (lldp_main_t * lm, lldp_intf_t * n)
+{
+ if (n)
+ {
+ lldp_unschedule_intf (lm, n);
+ hash_unset (lm->intf_by_hw_if_index, n->hw_if_index);
+ vec_free (n->chassis_id);
+ vec_free (n->port_id);
+ pool_put (lm->intfs, n);
+ }
+}
+
+static clib_error_t *
+lldp_template_init (vlib_main_t * vm)
+{
+ lldp_main_t *lm = &lldp_main;
+
+ /* Create the ethernet lldp packet template */
+ {
+ ethernet_header_t h;
+
+ memset (&h, 0, sizeof (h));
+
+ /*
+ * Send to 01:80:C2:00:00:0E - propagation constrained to a single
+ * physical link - stopped by all type of bridge
+ */
+ h.dst_address[0] = 0x01;
+ h.dst_address[1] = 0x80;
+ h.dst_address[2] = 0xC2;
+ /* h.dst_address[3] = 0x00; (memset) */
+ /* h.dst_address[4] = 0x00; (memset) */
+ h.dst_address[5] = 0x0E;
+
+ /* leave src address blank (fill in at send time) */
+
+ h.type = htons (ETHERNET_TYPE_802_1_LLDP);
+
+ vlib_packet_template_init (vm, &lm->packet_template,
+ /* data */ &h, sizeof (h),
+ /* alloc chunk size */ 8, "lldp-ethernet");
+ }
+
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (lldp_template_init);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/lldp/lldp_protocol.h b/src/vnet/lldp/lldp_protocol.h
new file mode 100644
index 00000000000..e641b26e20d
--- /dev/null
+++ b/src/vnet/lldp/lldp_protocol.h
@@ -0,0 +1,142 @@
+/*
+ * Copyright (c) 2011-2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef __included_lldp_protocol_h__
+#define __included_lldp_protocol_h__
+/**
+ * @file
+ * @brief LLDP protocol declarations
+ */
+#include <vnet/srp/packet.h>
+
+/*
+ * optional TLV codes.
+ */
+#define foreach_lldp_optional_tlv_type(F) \
+ F (4, port_desc, "Port Description") \
+ F (5, sys_name, "System name") \
+ F (6, sys_desc, "System Description") \
+ F (7, sys_caps, "System Capabilities") \
+ F (8, mgmt_addr, "Management Address") \
+ F (127, org_spec, "Organizationally Specific TLV")
+
+/*
+ * all TLV codes.
+ */
+#define foreach_lldp_tlv_type(F) \
+ F (0, pdu_end, "End of LLDPDU") \
+ F (1, chassis_id, "Chassis ID") \
+ F (2, port_id, "Port ID") \
+ F (3, ttl, "Time To Live") \
+ foreach_lldp_optional_tlv_type (F)
+
+#define LLDP_TLV_NAME(t) LLDP_TLV_##t
+
+typedef enum
+{
+#define F(n, t, s) LLDP_TLV_NAME (t) = n,
+ foreach_lldp_tlv_type (F)
+#undef F
+} lldp_tlv_code_t;
+
+struct lldp_tlv_head
+{
+ u8 byte1; /* contains TLV code in the upper 7 bits + MSB of length */
+ u8 byte2; /* contains the lower bits of length */
+};
+
+/* *INDENT-OFF* */
+typedef CLIB_PACKED (struct {
+ struct lldp_tlv_head head;
+ u8 v[0];
+}) lldp_tlv_t;
+/* *INDENT-ON* */
+
+lldp_tlv_code_t lldp_tlv_get_code (const lldp_tlv_t * tlv);
+void lldp_tlv_set_code (lldp_tlv_t * tlv, lldp_tlv_code_t code);
+u16 lldp_tlv_get_length (const lldp_tlv_t * tlv);
+void lldp_tlv_set_length (lldp_tlv_t * tlv, u16 length);
+
+#define foreach_chassis_id_subtype(F) \
+ F (0, reserved, "Reserved") \
+ F (1, chassis_comp, "Chassis component") \
+ F (2, intf_alias, "Interface alias") \
+ F (3, port_comp, "Port component") \
+ F (4, mac_addr, "MAC address") \
+ F (5, net_addr, "Network address") \
+ F (6, intf_name, "Interface name") \
+ F (7, local, "Locally assigned")
+
+#define LLDP_CHASS_ID_SUBTYPE_NAME(t) LLDP_CHASS_ID_SUBTYPE_##t
+#define LLDP_MIN_CHASS_ID_LEN (1)
+#define LLDP_MAX_CHASS_ID_LEN (255)
+
+typedef enum
+{
+#define F(n, t, s) LLDP_CHASS_ID_SUBTYPE_NAME (t) = n,
+ foreach_chassis_id_subtype (F)
+#undef F
+} lldp_chassis_id_subtype_t;
+
+/* *INDENT-OFF* */
+typedef CLIB_PACKED (struct {
+ struct lldp_tlv_head head;
+ u8 subtype;
+ u8 id[0];
+}) lldp_chassis_id_tlv_t;
+/* *INDENT-ON* */
+
+#define foreach_port_id_subtype(F) \
+ F (0, reserved, "Reserved") \
+ F (1, intf_alias, "Interface alias") \
+ F (2, port_comp, "Port component") \
+ F (3, mac_addr, "MAC address") \
+ F (4, net_addr, "Network address") \
+ F (5, intf_name, "Interface name") \
+ F (6, agent_circuit_id, "Agent circuit ID") \
+ F (7, local, "Locally assigned")
+
+#define LLDP_PORT_ID_SUBTYPE_NAME(t) LLDP_PORT_ID_SUBTYPE_##t
+#define LLDP_MIN_PORT_ID_LEN (1)
+#define LLDP_MAX_PORT_ID_LEN (255)
+
+typedef enum
+{
+#define F(n, t, s) LLDP_PORT_ID_SUBTYPE_NAME (t) = n,
+ foreach_port_id_subtype (F)
+#undef F
+} lldp_port_id_subtype_t;
+
+/* *INDENT-OFF* */
+typedef CLIB_PACKED (struct {
+ struct lldp_tlv_head head;
+ u8 subtype;
+ u8 id[0];
+}) lldp_port_id_tlv_t;
+
+typedef CLIB_PACKED (struct {
+ struct lldp_tlv_head head;
+ u16 ttl;
+}) lldp_ttl_tlv_t;
+/* *INDENT-ON* */
+
+#endif /* __included_lldp_protocol_h__ */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/map/examples/gen-rules.py b/src/vnet/map/examples/gen-rules.py
new file mode 100755
index 00000000000..7964aa9a359
--- /dev/null
+++ b/src/vnet/map/examples/gen-rules.py
@@ -0,0 +1,186 @@
+#!/usr/bin/env python3
+
+# Copyright (c) 2015 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import ipaddress
+import argparse
+import sys
+
+# map add domain ip4-pfx <pfx> ip6-pfx ::/0 ip6-src <ip6-src> ea-bits-len 0 psid-offset 6 psid-len 6
+# map add rule index <0> psid <psid> ip6-dst <ip6-dst>
+
+def_ip4_pfx = '192.0.2.0/24'
+def_ip6_pfx = '2001:db8::/32'
+def_ip6_src = '2001:db8::1'
+def_psid_offset = 6
+def_psid_len = 6
+def_ea_bits_len = 0
+
+parser = argparse.ArgumentParser(description='MAP VPP configuration generator')
+parser.add_argument('-t', action="store", dest="mapmode")
+parser.add_argument('-f', action="store", dest="format", default="vpp")
+parser.add_argument('--ip4-prefix', action="store", dest="ip4_pfx", default=def_ip4_pfx)
+parser.add_argument('--ip6-prefix', action="store", dest="ip6_pfx", default=def_ip6_pfx)
+parser.add_argument('--ip6-src', action="store", dest="ip6_src", default=def_ip6_src)
+parser.add_argument('--psid-len', action="store", dest="psid_len", default=def_psid_len)
+parser.add_argument('--psid-offset', action="store", dest="psid_offset", default=def_psid_offset)
+parser.add_argument('--ea-bits-len', action="store", dest="ea_bits_len", default=def_ea_bits_len)
+args = parser.parse_args()
+
+#
+# Print domain
+#
+def domain_print(i, ip4_pfx, ip6_pfx, ip6_src, eabits_len, psid_offset, psid_len):
+ if format == 'vpp':
+ print("map add domain ip4-pfx " + ip4_pfx + " ip6-pfx", ip6_pfx, "ip6-src " + ip6_src +
+ " ea-bits-len", eabits_len, "psid-offset", psid_offset, "psid-len", psid_len)
+ if format == 'confd':
+ print("vpp softwire softwire-instances softwire-instance", i, "br-ipv6 " + ip6_src +
+ " ipv6-prefix " + ip6_pfx + " ipv4-prefix " + ip4_pfx +
+ " ea-bits-len", eabits_len, "psid-offset", psid_offset, "psid-len", psid_len)
+ if format == 'xml':
+ print("<softwire-instance>")
+ print("<id>", i, "</id>");
+ print(" <br-ipv6>" + ip6_src + "</br-ipv6>")
+ print(" <ipv6-prefix>" + ip6_pfx + "</ipv6-prefix>")
+ print(" <ipv4-prefix>" + ip4_pfx + "</ipv4-prefix>")
+ print(" <ea-len>", eabits_len, "</ea-len>")
+ print(" <psid-len>", psid_len, "</psid-len>")
+ print(" <psid-offset>", psid_offset, "</psid-offset>")
+
+def domain_print_end():
+ if format == 'xml':
+ print("</softwire-instance>")
+
+def rule_print(i, psid, dst):
+ if format == 'vpp':
+ print("map add rule index", i, "psid", psid, "ip6-dst", dst)
+ if format == 'confd':
+ print("binding", psid, "ipv6-addr", dst)
+ if format == 'xml':
+ print(" <binding>")
+ print(" <psid>", psid, "</psid>")
+ print(" <ipv6-addr>", dst, "</ipv6-addr>")
+ print(" </binding>")
+
+#
+# Algorithmic mapping Shared IPv4 address
+#
+def algo(ip4_pfx_str, ip6_pfx_str, ip6_src_str, ea_bits_len, psid_offset, psid_len, ip6_src_ecmp = False):
+ domain_print(0, ip4_pfx_str, ip6_pfx_str, ip6_src_str, ea_bits_len, psid_offset, psid_len)
+ domain_print_end()
+
+#
+# 1:1 Full IPv4 address
+#
+def lw46(ip4_pfx_str, ip6_pfx_str, ip6_src_str, ea_bits_len, psid_offset, psid_len, ip6_src_ecmp = False):
+ ip4_pfx = ipaddress.ip_network(ip4_pfx_str)
+ ip6_src = ipaddress.ip_address(ip6_src_str)
+ ip6_dst = ipaddress.ip_network(ip6_pfx_str)
+ psid_len = 0
+ mod = ip4_pfx.num_addresses / 1024
+
+ for i in range(ip4_pfx.num_addresses):
+ domain_print(i, str(ip4_pfx[i]) + "/32", str(ip6_dst[i]) + "/128", str(ip6_src), 0, 0, 0)
+ domain_print_end()
+ if ip6_src_ecmp and not i % mod:
+ ip6_src = ip6_src + 1
+
+#
+# 1:1 Shared IPv4 address, shared BR (16) VPP CLI
+#
+def lw46_shared(ip4_pfx_str, ip6_pfx_str, ip6_src_str, ea_bits_len, psid_offset, psid_len, ip6_src_ecmp = False):
+ ip4_pfx = ipaddress.ip_network(ip4_pfx_str)
+ ip6_src = ipaddress.ip_address(ip6_src_str)
+ ip6_dst = ipaddress.ip_network(ip6_pfx_str)
+ mod = ip4_pfx.num_addresses / 1024
+
+ for i in range(ip4_pfx.num_addresses):
+ domain_print(i, str(ip4_pfx[i]) + "/32", "::/0", str(ip6_src), 0, 0, psid_len)
+ for psid in range(0x1 << int(psid_len)):
+ rule_print(i, psid, str(ip6_dst[(i * (0x1<<int(psid_len))) + psid]))
+ domain_print_end()
+ if ip6_src_ecmp and not i % mod:
+ ip6_src = ip6_src + 1
+
+
+#
+# 1:1 Shared IPv4 address, shared BR
+#
+def lw46_shared_b(ip4_pfx_str, ip6_pfx_str, ip6_src_str, ea_bits_len, psid_offset, psid_len, ip6_src_ecmp = False):
+ ip4_pfx = ipaddress.ip_network(ip4_pfx_str)
+ ip6_src = ipaddress.ip_address(ip6_src_str)
+ ip6_dst = list(ipaddress.ip_network(ip6_pfx_str).subnets(new_prefix=56))
+ mod = ip4_pfx.num_addresses / 1024
+
+ for i in range(ip4_pfx.num_addresses):
+ domain_print(i, str(ip4_pfx[i]) + "/32", "::/0", str(ip6_src), 0, 0, psid_len)
+ for psid in range(0x1 << psid_len):
+ enduserprefix = list(ip6_dst.pop(0).subnets(new_prefix=64))[255-1]
+ rule_print(i, psid, enduserprefix[(i * (0x1<<psid_len)) + psid])
+ domain_print_end()
+ if ip6_src_ecmp and not i % mod:
+ ip6_src = ip6_src + 1
+
+
+def xml_header_print():
+ print('''
+<?xml version="1.0" encoding="UTF-8"?>
+ <hello xmlns="urn:ietf:params:xml:ns:netconf:base:1.0">
+ <capabilities>
+ <capability>urn:ietf:params:netconf:base:1.0</capability>
+ </capabilities>
+ </hello>
+]]>]]>
+
+<?xml version="1.0" encoding="UTF-8"?>
+ <rpc xmlns="urn:ietf:params:xml:ns:netconf:base:1.0" message-id="1">
+ <edit-config>
+ <target>
+ <candidate/>
+ </target>
+ <config>
+
+ <vpp xmlns="http://www.cisco.com/yang/cisco-vpp">
+ <softwire>
+ <softwire-instances>
+
+ ''')
+
+def xml_footer_print():
+ print('''
+</softwire-instances>
+</softwire>
+</vpp>
+ </config>
+ </edit-config>
+ </rpc>
+
+]]>]]>
+
+<?xml version="1.0" encoding="UTF-8"?>
+ <rpc xmlns="urn:ietf:params:xml:ns:netconf:base:1.0" message-id="2">
+ <close-session/>
+ </rpc>
+
+]]>]]>
+ ''')
+
+
+format = args.format
+if format == 'xml':
+ xml_header_print()
+globals()[args.mapmode](args.ip4_pfx, args.ip6_pfx, args.ip6_src, args.ea_bits_len, args.psid_offset, args.psid_len)
+if format == 'xml':
+ xml_footer_print()
diff --git a/src/vnet/map/examples/health_check.c b/src/vnet/map/examples/health_check.c
new file mode 100644
index 00000000000..5f0d85fec08
--- /dev/null
+++ b/src/vnet/map/examples/health_check.c
@@ -0,0 +1,109 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <string.h>
+#include <netinet/in.h>
+#include <netinet/ip.h>
+#include <netinet/ip_icmp.h>
+#include <arpa/inet.h>
+#include <net/if.h>
+#include <stdbool.h>
+#include <errno.h>
+
+static void
+usage (void) {
+ fprintf(stderr,
+ "Usage: health_check"
+ " -d debug"
+ " -I interface"
+ "\n");
+ exit(2);
+}
+
+int
+main (int argc, char **argv)
+{
+ int sd, ch;
+ uint8_t *opt, *pkt;
+ struct ifreq ifr;
+ char *interface = NULL;
+ bool debug = false;
+
+ while ((ch = getopt(argc, argv, "h?" "I:" "d")) != EOF) {
+ switch(ch) {
+ case 'I':
+ interface = optarg;
+ break;
+ case 'd':
+ debug = true;
+ break;
+ default:
+ usage();
+ break;
+ }
+ }
+
+ argc -= optind;
+ argv += optind;
+
+ if (!interface)
+ usage();
+
+ /* Request a socket descriptor sd. */
+ if ((sd = socket (AF_INET6, SOCK_RAW, IPPROTO_IPIP)) < 0) {
+ perror ("Failed to get socket descriptor ");
+ exit (EXIT_FAILURE);
+ }
+
+ memset(&ifr, 0, sizeof(ifr));
+ snprintf(ifr.ifr_name, sizeof(ifr.ifr_name), "%s", interface);
+
+ /* Bind socket to interface of this node. */
+ if (setsockopt (sd, SOL_SOCKET, SO_BINDTODEVICE, (void *) &ifr, sizeof (ifr)) < 0) {
+ perror ("SO_BINDTODEVICE failed");
+ exit (EXIT_FAILURE);
+ }
+ if (debug) printf("Binding to interface %s\n", interface);
+
+ while (1) {
+ struct sockaddr_in6 src_addr;
+ socklen_t addrlen = sizeof(src_addr);
+ char source[INET6_ADDRSTRLEN+1];
+ int len;
+ uint8_t inpack[IP_MAXPACKET];
+
+ if ((len = recvfrom(sd, inpack, sizeof(inpack), 0, (struct sockaddr *)&src_addr, &addrlen)) < 0) {
+ perror("recvfrom failed ");
+ }
+ if (inet_ntop(AF_INET6, &src_addr.sin6_addr, source, INET6_ADDRSTRLEN) == NULL) {
+ perror("inet_ntop() failed.");
+ exit(EXIT_FAILURE);
+ }
+
+ /* Reply */
+ struct iphdr *ip = (struct iphdr *)inpack;
+ uint32_t saddr;
+ struct icmphdr *icmp;
+
+ saddr = ip->saddr;
+ ip->saddr = ip->daddr;
+ ip->daddr = saddr;
+
+ switch (ip->protocol) {
+ case 1:
+ if (debug) printf ("ICMP Echo request from %s\n", source);
+ icmp = (struct icmphdr *)&ip[1];
+ icmp->type = ICMP_ECHOREPLY;
+ break;
+ default:
+ fprintf(stderr, "Unsupported protocol %d", ip->protocol);
+ }
+ if (len = sendto(sd, inpack, len, 0, (struct sockaddr *)&src_addr, addrlen) < 0) {
+ perror("sendto failed ");
+ }
+ }
+
+ close (sd);
+
+ return (EXIT_SUCCESS);
+}
diff --git a/src/vnet/map/examples/test_map.py b/src/vnet/map/examples/test_map.py
new file mode 100755
index 00000000000..21388d49526
--- /dev/null
+++ b/src/vnet/map/examples/test_map.py
@@ -0,0 +1,141 @@
+#!/usr/bin/env python
+
+import time,argparse,sys,cmd, unittest
+from ipaddress import *
+
+parser = argparse.ArgumentParser(description='VPP MAP test')
+parser.add_argument('-i', nargs='*', action="store", dest="inputdir")
+args = parser.parse_args()
+
+for dir in args.inputdir:
+ sys.path.append(dir)
+from vpp_papi import *
+
+#
+# 1:1 Shared IPv4 address, shared BR (16) VPP CLI
+#
+def lw46_shared(ip4_pfx_str, ip6_pfx_str, ip6_src_str, ea_bits_len, psid_offset, psid_len, ip6_src_ecmp = False):
+ ip4_pfx = ip_network(ip4_pfx_str)
+ ip6_src = ip_address(ip6_src_str)
+ ip6_dst = ip_network(ip6_pfx_str)
+ ip6_nul = IPv6Address(u'0::0')
+ mod = ip4_pfx.num_addresses / 1024
+
+ for i in range(ip4_pfx.num_addresses):
+ a = time.clock()
+ t = map_add_domain(0, ip6_nul.packed, ip4_pfx[i].packed, ip6_src.packed, 0, 32, 128, ea_bits_len, psid_offset, psid_len, 0, 0)
+ #print "Return from map_add_domain", t
+ if t == None:
+ print "map_add_domain failed"
+ continue
+ if t.retval != 0:
+ print "map_add_domain failed", t
+ continue
+ for psid in range(0x1 << int(psid_len)):
+ r = map_add_del_rule(0, t.index, 1, (ip6_dst[(i * (0x1<<int(psid_len))) + psid]).packed, psid)
+ #print "Return from map_add_del_rule", r
+
+ if ip6_src_ecmp and not i % mod:
+ ip6_src = ip6_src + 1
+
+ print "Running time:", time.clock() - a
+
+class TestMAP(unittest.TestCase):
+ '''
+ def test_delete_all(self):
+ t = map_domain_dump(0)
+ self.assertNotEqual(t, None)
+ print "Number of domains configured: ", len(t)
+ for d in t:
+ ts = map_del_domain(0, d.domainindex)
+ self.assertNotEqual(ts, None)
+ t = map_domain_dump(0)
+ self.assertNotEqual(t, None)
+ print "Number of domains configured: ", len(t)
+ self.assertEqual(len(t), 0)
+
+ '''
+
+ def test_a_million_rules(self):
+ ip4_pfx = u'192.0.2.0/24'
+ ip6_pfx = u'2001:db8::/32'
+ ip6_src = u'2001:db8::1'
+ psid_offset = 6
+ psid_len = 6
+ ea_bits_len = 0
+ lw46_shared(ip4_pfx, ip6_pfx, ip6_src, ea_bits_len, psid_offset, psid_len)
+
+#
+# RX thread, that should sit on blocking vpe_api_read()
+
+#
+
+
+#
+#
+#
+import threading
+class RXThread (threading.Thread):
+ def __init__(self):
+ threading.Thread.__init__(self)
+
+ def run(self):
+ print "Starting "
+ i = 0
+ while True:
+ msg = vpe_api_read()
+ if msg:
+ #print msg
+ id = unpack('>H', msg[0:2])
+ size = unpack('>H', msg[2:4])
+ print "Received", id, "of size", size
+ i += 1
+ #del msg
+ continue
+
+ #time.sleep(0.001)
+ return
+
+# Create RX thread
+rxthread = RXThread()
+rxthread.setDaemon(True)
+
+print "Connect", connect_to_vpe("client124")
+import timeit
+rxthread.start()
+print "After thread started"
+
+#pneum_kill_thread()
+print "After thread killed"
+
+#t = show_version(0)
+#print "Result from show version", t
+
+print timeit.timeit('t = show_version(0)', number=1000, setup="from __main__ import show_version")
+time.sleep(10)
+#print timeit.timeit('control_ping(0)', number=10, setup="from __main__ import control_ping")
+
+
+disconnect_from_vpe()
+sys.exit()
+
+
+print t.program, t.version,t.builddate,t.builddirectory
+
+'''
+
+t = map_domain_dump(0)
+if not t:
+ print('show map domain failed')
+
+for d in t:
+ print("IP6 prefix:",str(IPv6Address(d.ip6prefix)))
+ print( "IP4 prefix:",str(IPv4Address(d.ip4prefix)))
+'''
+
+suite = unittest.TestLoader().loadTestsFromTestCase(TestMAP)
+unittest.TextTestRunner(verbosity=2).run(suite)
+
+disconnect_from_vpe()
+
+
diff --git a/src/vnet/map/gen-rules.py b/src/vnet/map/gen-rules.py
new file mode 100755
index 00000000000..533a8e237f7
--- /dev/null
+++ b/src/vnet/map/gen-rules.py
@@ -0,0 +1,107 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2015 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import ipaddress
+import argparse
+import sys
+
+# map add domain ip4-pfx <pfx> ip6-pfx ::/0 ip6-src <ip6-src> ea-bits-len 0 psid-offset 6 psid-len 6
+# map add rule index <0> psid <psid> ip6-dst <ip6-dst>
+
+parser = argparse.ArgumentParser(description='MAP VPP configuration generator')
+parser.add_argument('-t', action="store", dest="mapmode")
+args = parser.parse_args()
+
+#
+# 1:1 Shared IPv4 address, shared BR
+#
+def shared11br():
+ ip4_pfx = ipaddress.ip_network('20.0.0.0/16')
+ ip6_dst = ipaddress.ip_network('bbbb::/32')
+ psid_len = 6
+ for i in range(ip4_pfx.num_addresses):
+ print("map add domain ip4-pfx " + str(ip4_pfx[i]) + "/32 ip6-pfx ::/0 ip6-shared-src cccc:bbbb::1",
+ "ea-bits-len 0 psid-offset 6 psid-len", psid_len)
+ for psid in range(0x1 << psid_len):
+ print("map add rule index", i, "psid", psid, "ip6-dst", ip6_dst[(i * (0x1<<psid_len)) + psid])
+
+
+#
+# 1:1 Shared IPv4 address
+#
+def shared11():
+ ip4_pfx = ipaddress.ip_network('20.0.0.0/16')
+ ip6_src = ipaddress.ip_network('cccc:bbbb::/64')
+ ip6_dst = ipaddress.ip_network('bbbb::/32')
+ psid_len = 6
+ for i in range(ip4_pfx.num_addresses):
+ print("map add domain ip4-pfx " + str(ip4_pfx[i]) + "/32 ip6-pfx ::/0 ip6-src", ip6_src[i],
+ "ea-bits-len 0 psid-offset 6 psid-len", psid_len)
+ for psid in range(0x1 << psid_len):
+ print("map add rule index", i, "psid", psid, "ip6-dst", ip6_dst[(i * (0x1<<psid_len)) + psid])
+
+#
+# 1:1 Shared IPv4 address small
+#
+def smallshared11():
+ ip4_pfx = ipaddress.ip_network('20.0.0.0/24')
+ ip6_src = ipaddress.ip_network('cccc:bbbb::/64')
+ ip6_dst = ipaddress.ip_network('bbbb::/32')
+ psid_len = 6
+ for i in range(ip4_pfx.num_addresses):
+ print("map add domain ip4-pfx " + str(ip4_pfx[i]) + "/32 ip6-pfx ::/0 ip6-src", ip6_src[i],
+ "ea-bits-len 0 psid-offset 6 psid-len", psid_len)
+ for psid in range(0x1 << psid_len):
+ print("map add rule index", i, "psid", psid, "ip6-dst", ip6_dst[(i * (0x1<<psid_len)) + psid])
+
+#
+# 1:1 Full IPv4 address
+#
+def full11():
+ ip4_pfx = ipaddress.ip_network('20.0.0.0/16')
+ ip6_src = ipaddress.ip_network('cccc:bbbb::/64')
+ ip6_dst = ipaddress.ip_network('bbbb::/32')
+ psid_len = 0
+ for i in range(ip4_pfx.num_addresses):
+ print("map add domain ip4-pfx " + str(ip4_pfx[i]) + "/32 ip6-pfx " + str(ip6_dst[i]) + "/128 ip6-src", ip6_src[i],
+ "ea-bits-len 0 psid-offset 0 psid-len 0")
+def full11br():
+ ip4_pfx = ipaddress.ip_network('20.0.0.0/16')
+ ip6_dst = ipaddress.ip_network('bbbb::/32')
+ psid_len = 0
+ for i in range(ip4_pfx.num_addresses):
+ print("map add domain ip4-pfx " + str(ip4_pfx[i]) + "/32 ip6-pfx " + str(ip6_dst[i]) + "/128 ip6-shared-src cccc:bbbb::1",
+ "ea-bits-len 0 psid-offset 0 psid-len 0")
+
+#
+# Algorithmic mapping Shared IPv4 address
+#
+def algo():
+ print("map add domain ip4-pfx 20.0.0.0/24 ip6-pfx bbbb::/32 ip6-src cccc:bbbb::1 ea-bits-len 16 psid-offset 6 psid-len 8")
+ print("map add domain ip4-pfx 20.0.1.0/24 ip6-pfx bbbb:1::/32 ip6-src cccc:bbbb::2 ea-bits-len 8 psid-offset 0 psid-len 0")
+
+#
+# IP4 forwarding
+#
+def ip4():
+ ip4_pfx = ipaddress.ip_network('20.0.0.0/16')
+ for i in range(ip4_pfx.num_addresses):
+ print("ip route add " + str(ip4_pfx[i]) + "/32 via 172.16.0.2")
+
+
+globals()[args.mapmode]()
+
+
diff --git a/src/vnet/map/ip4_map.c b/src/vnet/map/ip4_map.c
new file mode 100644
index 00000000000..9fd10f62eb1
--- /dev/null
+++ b/src/vnet/map/ip4_map.c
@@ -0,0 +1,813 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Defines used for testing various optimisation schemes
+ */
+#define MAP_ENCAP_DUAL 0
+
+#include "map.h"
+#include "../ip/ip_frag.h"
+
+vlib_node_registration_t ip4_map_reass_node;
+
+enum ip4_map_next_e
+{
+ IP4_MAP_NEXT_IP6_LOOKUP,
+#ifdef MAP_SKIP_IP6_LOOKUP
+ IP4_MAP_NEXT_IP6_REWRITE,
+#endif
+ IP4_MAP_NEXT_IP4_FRAGMENT,
+ IP4_MAP_NEXT_IP6_FRAGMENT,
+ IP4_MAP_NEXT_REASS,
+ IP4_MAP_NEXT_ICMP_ERROR,
+ IP4_MAP_NEXT_DROP,
+ IP4_MAP_N_NEXT,
+};
+
+enum ip4_map_reass_next_t
+{
+ IP4_MAP_REASS_NEXT_IP6_LOOKUP,
+ IP4_MAP_REASS_NEXT_IP4_FRAGMENT,
+ IP4_MAP_REASS_NEXT_DROP,
+ IP4_MAP_REASS_N_NEXT,
+};
+
+typedef struct
+{
+ u32 map_domain_index;
+ u16 port;
+ u8 cached;
+} map_ip4_map_reass_trace_t;
+
+u8 *
+format_ip4_map_reass_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ map_ip4_map_reass_trace_t *t = va_arg (*args, map_ip4_map_reass_trace_t *);
+ return format (s, "MAP domain index: %d L4 port: %u Status: %s",
+ t->map_domain_index, t->port,
+ t->cached ? "cached" : "forwarded");
+}
+
+/*
+ * ip4_map_get_port
+ */
+u16
+ip4_map_get_port (ip4_header_t * ip, map_dir_e dir)
+{
+ /* Find port information */
+ if (PREDICT_TRUE ((ip->protocol == IP_PROTOCOL_TCP) ||
+ (ip->protocol == IP_PROTOCOL_UDP)))
+ {
+ udp_header_t *udp = (void *) (ip + 1);
+ return (dir == MAP_SENDER ? udp->src_port : udp->dst_port);
+ }
+ else if (ip->protocol == IP_PROTOCOL_ICMP)
+ {
+ /*
+ * 1) ICMP Echo request or Echo reply
+ * 2) ICMP Error with inner packet being UDP or TCP
+ * 3) ICMP Error with inner packet being ICMP Echo request or Echo reply
+ */
+ icmp46_header_t *icmp = (void *) (ip + 1);
+ if (icmp->type == ICMP4_echo_request || icmp->type == ICMP4_echo_reply)
+ {
+ return *((u16 *) (icmp + 1));
+ }
+ else if (clib_net_to_host_u16 (ip->length) >= 56)
+ { // IP + ICMP + IP + L4 header
+ ip4_header_t *icmp_ip = (ip4_header_t *) (icmp + 2);
+ if (PREDICT_TRUE ((icmp_ip->protocol == IP_PROTOCOL_TCP) ||
+ (icmp_ip->protocol == IP_PROTOCOL_UDP)))
+ {
+ udp_header_t *udp = (void *) (icmp_ip + 1);
+ return (dir == MAP_SENDER ? udp->dst_port : udp->src_port);
+ }
+ else if (icmp_ip->protocol == IP_PROTOCOL_ICMP)
+ {
+ icmp46_header_t *inner_icmp = (void *) (icmp_ip + 1);
+ if (inner_icmp->type == ICMP4_echo_request
+ || inner_icmp->type == ICMP4_echo_reply)
+ return (*((u16 *) (inner_icmp + 1)));
+ }
+ }
+ }
+ return (0);
+}
+
+static_always_inline u16
+ip4_map_port_and_security_check (map_domain_t * d, ip4_header_t * ip,
+ u32 * next, u8 * error)
+{
+ u16 port = 0;
+
+ if (d->psid_length > 0)
+ {
+ if (ip4_get_fragment_offset (ip) == 0)
+ {
+ if (PREDICT_FALSE
+ ((ip->ip_version_and_header_length != 0x45)
+ || clib_host_to_net_u16 (ip->length) < 28))
+ {
+ return 0;
+ }
+ port = ip4_map_get_port (ip, MAP_RECEIVER);
+ if (port)
+ {
+ /* Verify that port is not among the well-known ports */
+ if ((d->psid_offset > 0)
+ && (clib_net_to_host_u16 (port) <
+ (0x1 << (16 - d->psid_offset))))
+ {
+ *error = MAP_ERROR_ENCAP_SEC_CHECK;
+ }
+ else
+ {
+ if (ip4_get_fragment_more (ip))
+ *next = IP4_MAP_NEXT_REASS;
+ return (port);
+ }
+ }
+ else
+ {
+ *error = MAP_ERROR_BAD_PROTOCOL;
+ }
+ }
+ else
+ {
+ *next = IP4_MAP_NEXT_REASS;
+ }
+ }
+ return (0);
+}
+
+/*
+ * ip4_map_vtcfl
+ */
+static_always_inline u32
+ip4_map_vtcfl (ip4_header_t * ip4, vlib_buffer_t * p)
+{
+ map_main_t *mm = &map_main;
+ u8 tc = mm->tc_copy ? ip4->tos : mm->tc;
+ u32 vtcfl = 0x6 << 28;
+ vtcfl |= tc << 20;
+ vtcfl |= vnet_buffer (p)->ip.flow_hash & 0x000fffff;
+
+ return (clib_host_to_net_u32 (vtcfl));
+}
+
+static_always_inline bool
+ip4_map_ip6_lookup_bypass (vlib_buffer_t * p0, ip4_header_t * ip)
+{
+#ifdef MAP_SKIP_IP6_LOOKUP
+ map_main_t *mm = &map_main;
+ u32 adj_index0 = mm->adj6_index;
+ if (adj_index0 > 0)
+ {
+ ip_lookup_main_t *lm6 = &ip6_main.lookup_main;
+ ip_adjacency_t *adj = ip_get_adjacency (lm6, mm->adj6_index);
+ if (adj->n_adj > 1)
+ {
+ u32 hash_c0 = ip4_compute_flow_hash (ip, IP_FLOW_HASH_DEFAULT);
+ adj_index0 += (hash_c0 & (adj->n_adj - 1));
+ }
+ vnet_buffer (p0)->ip.adj_index[VLIB_TX] = adj_index0;
+ return (true);
+ }
+#endif
+ return (false);
+}
+
+/*
+ * ip4_map_ttl
+ */
+static inline void
+ip4_map_decrement_ttl (ip4_header_t * ip, u8 * error)
+{
+ i32 ttl = ip->ttl;
+
+ /* Input node should have reject packets with ttl 0. */
+ ASSERT (ip->ttl > 0);
+
+ u32 checksum = ip->checksum + clib_host_to_net_u16 (0x0100);
+ checksum += checksum >= 0xffff;
+ ip->checksum = checksum;
+ ttl -= 1;
+ ip->ttl = ttl;
+ *error = ttl <= 0 ? IP4_ERROR_TIME_EXPIRED : *error;
+
+ /* Verify checksum. */
+ ASSERT (ip->checksum == ip4_header_checksum (ip));
+}
+
+static u32
+ip4_map_fragment (vlib_buffer_t * b, u16 mtu, bool df, u8 * error)
+{
+ map_main_t *mm = &map_main;
+
+ if (mm->frag_inner)
+ {
+ ip_frag_set_vnet_buffer (b, sizeof (ip6_header_t), mtu,
+ IP4_FRAG_NEXT_IP6_LOOKUP,
+ IP_FRAG_FLAG_IP6_HEADER);
+ return (IP4_MAP_NEXT_IP4_FRAGMENT);
+ }
+ else
+ {
+ if (df && !mm->frag_ignore_df)
+ {
+ icmp4_error_set_vnet_buffer (b, ICMP4_destination_unreachable,
+ ICMP4_destination_unreachable_fragmentation_needed_and_dont_fragment_set,
+ mtu);
+ vlib_buffer_advance (b, sizeof (ip6_header_t));
+ *error = MAP_ERROR_DF_SET;
+ return (IP4_MAP_NEXT_ICMP_ERROR);
+ }
+ ip_frag_set_vnet_buffer (b, 0, mtu, IP6_FRAG_NEXT_IP6_LOOKUP,
+ IP_FRAG_FLAG_IP6_HEADER);
+ return (IP4_MAP_NEXT_IP6_FRAGMENT);
+ }
+}
+
+/*
+ * ip4_map
+ */
+static uword
+ip4_map (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
+ vlib_node_runtime_t *error_node =
+ vlib_node_get_runtime (vm, ip4_map_node.index);
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+ map_main_t *mm = &map_main;
+ vlib_combined_counter_main_t *cm = mm->domain_counters;
+ u32 cpu_index = os_get_cpu_number ();
+
+ while (n_left_from > 0)
+ {
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ /* Dual loop */
+ while (n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ u32 pi0, pi1;
+ vlib_buffer_t *p0, *p1;
+ map_domain_t *d0, *d1;
+ u8 error0 = MAP_ERROR_NONE, error1 = MAP_ERROR_NONE;
+ ip4_header_t *ip40, *ip41;
+ u16 port0 = 0, port1 = 0;
+ ip6_header_t *ip6h0, *ip6h1;
+ u32 map_domain_index0 = ~0, map_domain_index1 = ~0;
+ u32 next0 = IP4_MAP_NEXT_IP6_LOOKUP, next1 =
+ IP4_MAP_NEXT_IP6_LOOKUP;
+
+ /* Prefetch next iteration. */
+ {
+ vlib_buffer_t *p2, *p3;
+
+ p2 = vlib_get_buffer (vm, from[2]);
+ p3 = vlib_get_buffer (vm, from[3]);
+
+ vlib_prefetch_buffer_header (p2, STORE);
+ vlib_prefetch_buffer_header (p3, STORE);
+ /* IPv4 + 8 = 28. possibly plus -40 */
+ CLIB_PREFETCH (p2->data - 40, 68, STORE);
+ CLIB_PREFETCH (p3->data - 40, 68, STORE);
+ }
+
+ pi0 = to_next[0] = from[0];
+ pi1 = to_next[1] = from[1];
+ from += 2;
+ n_left_from -= 2;
+ to_next += 2;
+ n_left_to_next -= 2;
+
+ p0 = vlib_get_buffer (vm, pi0);
+ p1 = vlib_get_buffer (vm, pi1);
+ ip40 = vlib_buffer_get_current (p0);
+ ip41 = vlib_buffer_get_current (p1);
+ d0 =
+ ip4_map_get_domain (vnet_buffer (p0)->ip.adj_index[VLIB_TX],
+ &map_domain_index0);
+ d1 =
+ ip4_map_get_domain (vnet_buffer (p1)->ip.adj_index[VLIB_TX],
+ &map_domain_index1);
+ ASSERT (d0);
+ ASSERT (d1);
+
+ /*
+ * Shared IPv4 address
+ */
+ port0 = ip4_map_port_and_security_check (d0, ip40, &next0, &error0);
+ port1 = ip4_map_port_and_security_check (d1, ip41, &next1, &error1);
+
+ /* Decrement IPv4 TTL */
+ ip4_map_decrement_ttl (ip40, &error0);
+ ip4_map_decrement_ttl (ip41, &error1);
+ bool df0 =
+ ip40->flags_and_fragment_offset &
+ clib_host_to_net_u16 (IP4_HEADER_FLAG_DONT_FRAGMENT);
+ bool df1 =
+ ip41->flags_and_fragment_offset &
+ clib_host_to_net_u16 (IP4_HEADER_FLAG_DONT_FRAGMENT);
+
+ /* MAP calc */
+ u32 da40 = clib_net_to_host_u32 (ip40->dst_address.as_u32);
+ u32 da41 = clib_net_to_host_u32 (ip41->dst_address.as_u32);
+ u16 dp40 = clib_net_to_host_u16 (port0);
+ u16 dp41 = clib_net_to_host_u16 (port1);
+ u64 dal60 = map_get_pfx (d0, da40, dp40);
+ u64 dal61 = map_get_pfx (d1, da41, dp41);
+ u64 dar60 = map_get_sfx (d0, da40, dp40);
+ u64 dar61 = map_get_sfx (d1, da41, dp41);
+ if (dal60 == 0 && dar60 == 0 && error0 == MAP_ERROR_NONE
+ && next0 != IP4_MAP_NEXT_REASS)
+ error0 = MAP_ERROR_NO_BINDING;
+ if (dal61 == 0 && dar61 == 0 && error1 == MAP_ERROR_NONE
+ && next1 != IP4_MAP_NEXT_REASS)
+ error1 = MAP_ERROR_NO_BINDING;
+
+ /* construct ipv6 header */
+ vlib_buffer_advance (p0, -sizeof (ip6_header_t));
+ vlib_buffer_advance (p1, -sizeof (ip6_header_t));
+ ip6h0 = vlib_buffer_get_current (p0);
+ ip6h1 = vlib_buffer_get_current (p1);
+ vnet_buffer (p0)->sw_if_index[VLIB_TX] = (u32) ~ 0;
+ vnet_buffer (p1)->sw_if_index[VLIB_TX] = (u32) ~ 0;
+
+ ip6h0->ip_version_traffic_class_and_flow_label =
+ ip4_map_vtcfl (ip40, p0);
+ ip6h1->ip_version_traffic_class_and_flow_label =
+ ip4_map_vtcfl (ip41, p1);
+ ip6h0->payload_length = ip40->length;
+ ip6h1->payload_length = ip41->length;
+ ip6h0->protocol = IP_PROTOCOL_IP_IN_IP;
+ ip6h1->protocol = IP_PROTOCOL_IP_IN_IP;
+ ip6h0->hop_limit = 0x40;
+ ip6h1->hop_limit = 0x40;
+ ip6h0->src_address = d0->ip6_src;
+ ip6h1->src_address = d1->ip6_src;
+ ip6h0->dst_address.as_u64[0] = clib_host_to_net_u64 (dal60);
+ ip6h0->dst_address.as_u64[1] = clib_host_to_net_u64 (dar60);
+ ip6h1->dst_address.as_u64[0] = clib_host_to_net_u64 (dal61);
+ ip6h1->dst_address.as_u64[1] = clib_host_to_net_u64 (dar61);
+
+ /*
+ * Determine next node. Can be one of:
+ * ip6-lookup, ip6-rewrite, ip4-fragment, ip4-virtreass, error-drop
+ */
+ if (PREDICT_TRUE (error0 == MAP_ERROR_NONE))
+ {
+ if (PREDICT_FALSE
+ (d0->mtu
+ && (clib_net_to_host_u16 (ip6h0->payload_length) +
+ sizeof (*ip6h0) > d0->mtu)))
+ {
+ next0 = ip4_map_fragment (p0, d0->mtu, df0, &error0);
+ }
+ else
+ {
+ next0 =
+ ip4_map_ip6_lookup_bypass (p0,
+ ip40) ?
+ IP4_MAP_NEXT_IP6_REWRITE : next0;
+ vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_TX,
+ cpu_index,
+ map_domain_index0, 1,
+ clib_net_to_host_u16
+ (ip6h0->payload_length) +
+ 40);
+ }
+ }
+ else
+ {
+ next0 = IP4_MAP_NEXT_DROP;
+ }
+
+ /*
+ * Determine next node. Can be one of:
+ * ip6-lookup, ip6-rewrite, ip4-fragment, ip4-virtreass, error-drop
+ */
+ if (PREDICT_TRUE (error1 == MAP_ERROR_NONE))
+ {
+ if (PREDICT_FALSE
+ (d1->mtu
+ && (clib_net_to_host_u16 (ip6h1->payload_length) +
+ sizeof (*ip6h1) > d1->mtu)))
+ {
+ next1 = ip4_map_fragment (p1, d1->mtu, df1, &error1);
+ }
+ else
+ {
+ next1 =
+ ip4_map_ip6_lookup_bypass (p1,
+ ip41) ?
+ IP4_MAP_NEXT_IP6_REWRITE : next1;
+ vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_TX,
+ cpu_index,
+ map_domain_index1, 1,
+ clib_net_to_host_u16
+ (ip6h1->payload_length) +
+ 40);
+ }
+ }
+ else
+ {
+ next1 = IP4_MAP_NEXT_DROP;
+ }
+
+ if (PREDICT_FALSE (p0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ map_trace_t *tr = vlib_add_trace (vm, node, p0, sizeof (*tr));
+ tr->map_domain_index = map_domain_index0;
+ tr->port = port0;
+ }
+ if (PREDICT_FALSE (p1->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ map_trace_t *tr = vlib_add_trace (vm, node, p1, sizeof (*tr));
+ tr->map_domain_index = map_domain_index1;
+ tr->port = port1;
+ }
+
+ p0->error = error_node->errors[error0];
+ p1->error = error_node->errors[error1];
+
+ vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next,
+ n_left_to_next, pi0, pi1, next0,
+ next1);
+ }
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 pi0;
+ vlib_buffer_t *p0;
+ map_domain_t *d0;
+ u8 error0 = MAP_ERROR_NONE;
+ ip4_header_t *ip40;
+ u16 port0 = 0;
+ ip6_header_t *ip6h0;
+ u32 next0 = IP4_MAP_NEXT_IP6_LOOKUP;
+ u32 map_domain_index0 = ~0;
+
+ pi0 = to_next[0] = from[0];
+ from += 1;
+ n_left_from -= 1;
+ to_next += 1;
+ n_left_to_next -= 1;
+
+ p0 = vlib_get_buffer (vm, pi0);
+ ip40 = vlib_buffer_get_current (p0);
+ d0 =
+ ip4_map_get_domain (vnet_buffer (p0)->ip.adj_index[VLIB_TX],
+ &map_domain_index0);
+ ASSERT (d0);
+
+ /*
+ * Shared IPv4 address
+ */
+ port0 = ip4_map_port_and_security_check (d0, ip40, &next0, &error0);
+
+ /* Decrement IPv4 TTL */
+ ip4_map_decrement_ttl (ip40, &error0);
+ bool df0 =
+ ip40->flags_and_fragment_offset &
+ clib_host_to_net_u16 (IP4_HEADER_FLAG_DONT_FRAGMENT);
+
+ /* MAP calc */
+ u32 da40 = clib_net_to_host_u32 (ip40->dst_address.as_u32);
+ u16 dp40 = clib_net_to_host_u16 (port0);
+ u64 dal60 = map_get_pfx (d0, da40, dp40);
+ u64 dar60 = map_get_sfx (d0, da40, dp40);
+ if (dal60 == 0 && dar60 == 0 && error0 == MAP_ERROR_NONE
+ && next0 != IP4_MAP_NEXT_REASS)
+ error0 = MAP_ERROR_NO_BINDING;
+
+ /* construct ipv6 header */
+ vlib_buffer_advance (p0, -(sizeof (ip6_header_t)));
+ ip6h0 = vlib_buffer_get_current (p0);
+ vnet_buffer (p0)->sw_if_index[VLIB_TX] = (u32) ~ 0;
+
+ ip6h0->ip_version_traffic_class_and_flow_label =
+ ip4_map_vtcfl (ip40, p0);
+ ip6h0->payload_length = ip40->length;
+ ip6h0->protocol = IP_PROTOCOL_IP_IN_IP;
+ ip6h0->hop_limit = 0x40;
+ ip6h0->src_address = d0->ip6_src;
+ ip6h0->dst_address.as_u64[0] = clib_host_to_net_u64 (dal60);
+ ip6h0->dst_address.as_u64[1] = clib_host_to_net_u64 (dar60);
+
+ /*
+ * Determine next node. Can be one of:
+ * ip6-lookup, ip6-rewrite, ip4-fragment, ip4-virtreass, error-drop
+ */
+ if (PREDICT_TRUE (error0 == MAP_ERROR_NONE))
+ {
+ if (PREDICT_FALSE
+ (d0->mtu
+ && (clib_net_to_host_u16 (ip6h0->payload_length) +
+ sizeof (*ip6h0) > d0->mtu)))
+ {
+ next0 = ip4_map_fragment (p0, d0->mtu, df0, &error0);
+ }
+ else
+ {
+ next0 =
+ ip4_map_ip6_lookup_bypass (p0,
+ ip40) ?
+ IP4_MAP_NEXT_IP6_REWRITE : next0;
+ vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_TX,
+ cpu_index,
+ map_domain_index0, 1,
+ clib_net_to_host_u16
+ (ip6h0->payload_length) +
+ 40);
+ }
+ }
+ else
+ {
+ next0 = IP4_MAP_NEXT_DROP;
+ }
+
+ if (PREDICT_FALSE (p0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ map_trace_t *tr = vlib_add_trace (vm, node, p0, sizeof (*tr));
+ tr->map_domain_index = map_domain_index0;
+ tr->port = port0;
+ }
+
+ p0->error = error_node->errors[error0];
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
+ n_left_to_next, pi0, next0);
+ }
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ return frame->n_vectors;
+}
+
+/*
+ * ip4_map_reass
+ */
+static uword
+ip4_map_reass (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
+ vlib_node_runtime_t *error_node =
+ vlib_node_get_runtime (vm, ip4_map_reass_node.index);
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+ map_main_t *mm = &map_main;
+ vlib_combined_counter_main_t *cm = mm->domain_counters;
+ u32 cpu_index = os_get_cpu_number ();
+ u32 *fragments_to_drop = NULL;
+ u32 *fragments_to_loopback = NULL;
+
+ while (n_left_from > 0)
+ {
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 pi0;
+ vlib_buffer_t *p0;
+ map_domain_t *d0;
+ u8 error0 = MAP_ERROR_NONE;
+ ip4_header_t *ip40;
+ i32 port0 = 0;
+ ip6_header_t *ip60;
+ u32 next0 = IP4_MAP_REASS_NEXT_IP6_LOOKUP;
+ u32 map_domain_index0;
+ u8 cached = 0;
+
+ pi0 = to_next[0] = from[0];
+ from += 1;
+ n_left_from -= 1;
+ to_next += 1;
+ n_left_to_next -= 1;
+
+ p0 = vlib_get_buffer (vm, pi0);
+ ip60 = vlib_buffer_get_current (p0);
+ ip40 = (ip4_header_t *) (ip60 + 1);
+ d0 =
+ ip4_map_get_domain (vnet_buffer (p0)->ip.adj_index[VLIB_TX],
+ &map_domain_index0);
+
+ map_ip4_reass_lock ();
+ map_ip4_reass_t *r = map_ip4_reass_get (ip40->src_address.as_u32,
+ ip40->dst_address.as_u32,
+ ip40->fragment_id,
+ ip40->protocol,
+ &fragments_to_drop);
+ if (PREDICT_FALSE (!r))
+ {
+ // Could not create a caching entry
+ error0 = MAP_ERROR_FRAGMENT_MEMORY;
+ }
+ else if (PREDICT_TRUE (ip4_get_fragment_offset (ip40)))
+ {
+ if (r->port >= 0)
+ {
+ // We know the port already
+ port0 = r->port;
+ }
+ else if (map_ip4_reass_add_fragment (r, pi0))
+ {
+ // Not enough space for caching
+ error0 = MAP_ERROR_FRAGMENT_MEMORY;
+ map_ip4_reass_free (r, &fragments_to_drop);
+ }
+ else
+ {
+ cached = 1;
+ }
+ }
+ else
+ if ((port0 =
+ ip4_get_port (ip40, MAP_RECEIVER, p0->current_length)) < 0)
+ {
+ // Could not find port. We'll free the reassembly.
+ error0 = MAP_ERROR_BAD_PROTOCOL;
+ port0 = 0;
+ map_ip4_reass_free (r, &fragments_to_drop);
+ }
+ else
+ {
+ r->port = port0;
+ map_ip4_reass_get_fragments (r, &fragments_to_loopback);
+ }
+
+#ifdef MAP_IP4_REASS_COUNT_BYTES
+ if (!cached && r)
+ {
+ r->forwarded += clib_host_to_net_u16 (ip40->length) - 20;
+ if (!ip4_get_fragment_more (ip40))
+ r->expected_total =
+ ip4_get_fragment_offset (ip40) * 8 +
+ clib_host_to_net_u16 (ip40->length) - 20;
+ if (r->forwarded >= r->expected_total)
+ map_ip4_reass_free (r, &fragments_to_drop);
+ }
+#endif
+
+ map_ip4_reass_unlock ();
+
+ // NOTE: Most operations have already been performed by ip4_map
+ // All we need is the right destination address
+ ip60->dst_address.as_u64[0] =
+ map_get_pfx_net (d0, ip40->dst_address.as_u32, port0);
+ ip60->dst_address.as_u64[1] =
+ map_get_sfx_net (d0, ip40->dst_address.as_u32, port0);
+
+ if (PREDICT_FALSE
+ (d0->mtu
+ && (clib_net_to_host_u16 (ip60->payload_length) +
+ sizeof (*ip60) > d0->mtu)))
+ {
+ vnet_buffer (p0)->ip_frag.header_offset = sizeof (*ip60);
+ vnet_buffer (p0)->ip_frag.next_index = IP4_FRAG_NEXT_IP6_LOOKUP;
+ vnet_buffer (p0)->ip_frag.mtu = d0->mtu;
+ vnet_buffer (p0)->ip_frag.flags = IP_FRAG_FLAG_IP6_HEADER;
+ next0 = IP4_MAP_REASS_NEXT_IP4_FRAGMENT;
+ }
+
+ if (PREDICT_FALSE (p0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ map_ip4_map_reass_trace_t *tr =
+ vlib_add_trace (vm, node, p0, sizeof (*tr));
+ tr->map_domain_index = map_domain_index0;
+ tr->port = port0;
+ tr->cached = cached;
+ }
+
+ if (cached)
+ {
+ //Dequeue the packet
+ n_left_to_next++;
+ to_next--;
+ }
+ else
+ {
+ if (error0 == MAP_ERROR_NONE)
+ vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_TX,
+ cpu_index, map_domain_index0,
+ 1,
+ clib_net_to_host_u16
+ (ip60->payload_length) + 40);
+ next0 =
+ (error0 == MAP_ERROR_NONE) ? next0 : IP4_MAP_REASS_NEXT_DROP;
+ p0->error = error_node->errors[error0];
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
+ n_left_to_next, pi0, next0);
+ }
+
+ //Loopback when we reach the end of the inpu vector
+ if (n_left_from == 0 && vec_len (fragments_to_loopback))
+ {
+ from = vlib_frame_vector_args (frame);
+ u32 len = vec_len (fragments_to_loopback);
+ if (len <= VLIB_FRAME_SIZE)
+ {
+ clib_memcpy (from, fragments_to_loopback,
+ sizeof (u32) * len);
+ n_left_from = len;
+ vec_reset_length (fragments_to_loopback);
+ }
+ else
+ {
+ clib_memcpy (from,
+ fragments_to_loopback + (len -
+ VLIB_FRAME_SIZE),
+ sizeof (u32) * VLIB_FRAME_SIZE);
+ n_left_from = VLIB_FRAME_SIZE;
+ _vec_len (fragments_to_loopback) = len - VLIB_FRAME_SIZE;
+ }
+ }
+ }
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ map_send_all_to_node (vm, fragments_to_drop, node,
+ &error_node->errors[MAP_ERROR_FRAGMENT_DROPPED],
+ IP4_MAP_REASS_NEXT_DROP);
+
+ vec_free (fragments_to_drop);
+ vec_free (fragments_to_loopback);
+ return frame->n_vectors;
+}
+
+static char *map_error_strings[] = {
+#define _(sym,string) string,
+ foreach_map_error
+#undef _
+};
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE(ip4_map_node) = {
+ .function = ip4_map,
+ .name = "ip4-map",
+ .vector_size = sizeof(u32),
+ .format_trace = format_map_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_errors = MAP_N_ERROR,
+ .error_strings = map_error_strings,
+
+ .n_next_nodes = IP4_MAP_N_NEXT,
+ .next_nodes = {
+ [IP4_MAP_NEXT_IP6_LOOKUP] = "ip6-lookup",
+#ifdef MAP_SKIP_IP6_LOOKUP
+ [IP4_MAP_NEXT_IP6_REWRITE] = "ip6-rewrite",
+#endif
+ [IP4_MAP_NEXT_IP4_FRAGMENT] = "ip4-frag",
+ [IP4_MAP_NEXT_IP6_FRAGMENT] = "ip6-frag",
+ [IP4_MAP_NEXT_REASS] = "ip4-map-reass",
+ [IP4_MAP_NEXT_ICMP_ERROR] = "ip4-icmp-error",
+ [IP4_MAP_NEXT_DROP] = "error-drop",
+ },
+};
+/* *INDENT-ON* */
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE(ip4_map_reass_node) = {
+ .function = ip4_map_reass,
+ .name = "ip4-map-reass",
+ .vector_size = sizeof(u32),
+ .format_trace = format_ip4_map_reass_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_errors = MAP_N_ERROR,
+ .error_strings = map_error_strings,
+
+ .n_next_nodes = IP4_MAP_REASS_N_NEXT,
+ .next_nodes = {
+ [IP4_MAP_REASS_NEXT_IP6_LOOKUP] = "ip6-lookup",
+ [IP4_MAP_REASS_NEXT_IP4_FRAGMENT] = "ip4-frag",
+ [IP4_MAP_REASS_NEXT_DROP] = "error-drop",
+ },
+};
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/map/ip4_map_t.c b/src/vnet/map/ip4_map_t.c
new file mode 100644
index 00000000000..15974d8a46e
--- /dev/null
+++ b/src/vnet/map/ip4_map_t.c
@@ -0,0 +1,1363 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "map.h"
+
+#include "../ip/ip_frag.h"
+
+#define IP4_MAP_T_DUAL_LOOP 1
+
+typedef enum
+{
+ IP4_MAPT_NEXT_MAPT_TCP_UDP,
+ IP4_MAPT_NEXT_MAPT_ICMP,
+ IP4_MAPT_NEXT_MAPT_FRAGMENTED,
+ IP4_MAPT_NEXT_DROP,
+ IP4_MAPT_N_NEXT
+} ip4_mapt_next_t;
+
+typedef enum
+{
+ IP4_MAPT_ICMP_NEXT_IP6_LOOKUP,
+ IP4_MAPT_ICMP_NEXT_IP6_FRAG,
+ IP4_MAPT_ICMP_NEXT_DROP,
+ IP4_MAPT_ICMP_N_NEXT
+} ip4_mapt_icmp_next_t;
+
+typedef enum
+{
+ IP4_MAPT_TCP_UDP_NEXT_IP6_LOOKUP,
+ IP4_MAPT_TCP_UDP_NEXT_IP6_FRAG,
+ IP4_MAPT_TCP_UDP_NEXT_DROP,
+ IP4_MAPT_TCP_UDP_N_NEXT
+} ip4_mapt_tcp_udp_next_t;
+
+typedef enum
+{
+ IP4_MAPT_FRAGMENTED_NEXT_IP6_LOOKUP,
+ IP4_MAPT_FRAGMENTED_NEXT_IP6_FRAG,
+ IP4_MAPT_FRAGMENTED_NEXT_DROP,
+ IP4_MAPT_FRAGMENTED_N_NEXT
+} ip4_mapt_fragmented_next_t;
+
+//This is used to pass information within the buffer data.
+//Buffer structure being too small to contain big structures like this.
+/* *INDENT-OFF* */
+typedef CLIB_PACKED (struct {
+ ip6_address_t daddr;
+ ip6_address_t saddr;
+ //IPv6 header + Fragmentation header will be here
+ //sizeof(ip6) + sizeof(ip_frag) - sizeof(ip4)
+ u8 unused[28];
+}) ip4_mapt_pseudo_header_t;
+/* *INDENT-ON* */
+
+#define frag_id_4to6(id) (id)
+
+//TODO: Find the right place in memory for this.
+/* *INDENT-OFF* */
+static u8 icmp_to_icmp6_updater_pointer_table[] =
+ { 0, 1, 4, 4, ~0,
+ ~0, ~0, ~0, 7, 6,
+ ~0, ~0, 8, 8, 8,
+ 8, 24, 24, 24, 24
+ };
+/* *INDENT-ON* */
+
+
+static_always_inline int
+ip4_map_fragment_cache (ip4_header_t * ip4, u16 port)
+{
+ u32 *ignore = NULL;
+ map_ip4_reass_lock ();
+ map_ip4_reass_t *r =
+ map_ip4_reass_get (ip4->src_address.as_u32, ip4->dst_address.as_u32,
+ ip4->fragment_id,
+ (ip4->protocol ==
+ IP_PROTOCOL_ICMP) ? IP_PROTOCOL_ICMP6 : ip4->protocol,
+ &ignore);
+ if (r)
+ r->port = port;
+
+ map_ip4_reass_unlock ();
+ return !r;
+}
+
+static_always_inline i32
+ip4_map_fragment_get_port (ip4_header_t * ip4)
+{
+ u32 *ignore = NULL;
+ map_ip4_reass_lock ();
+ map_ip4_reass_t *r =
+ map_ip4_reass_get (ip4->src_address.as_u32, ip4->dst_address.as_u32,
+ ip4->fragment_id,
+ (ip4->protocol ==
+ IP_PROTOCOL_ICMP) ? IP_PROTOCOL_ICMP6 : ip4->protocol,
+ &ignore);
+ i32 ret = r ? r->port : -1;
+ map_ip4_reass_unlock ();
+ return ret;
+}
+
+
+/* Statelessly translates an ICMP packet into ICMPv6.
+ *
+ * Warning: The checksum will need to be recomputed.
+ *
+ */
+static_always_inline int
+ip4_icmp_to_icmp6_in_place (icmp46_header_t * icmp, u32 icmp_len,
+ i32 * receiver_port, ip4_header_t ** inner_ip4)
+{
+ *inner_ip4 = NULL;
+ switch (icmp->type)
+ {
+ case ICMP4_echo_reply:
+ *receiver_port = ((u16 *) icmp)[2];
+ icmp->type = ICMP6_echo_reply;
+ break;
+ case ICMP4_echo_request:
+ *receiver_port = ((u16 *) icmp)[2];
+ icmp->type = ICMP6_echo_request;
+ break;
+ case ICMP4_destination_unreachable:
+ *inner_ip4 = (ip4_header_t *) (((u8 *) icmp) + 8);
+ *receiver_port = ip4_get_port (*inner_ip4, MAP_SENDER, icmp_len - 8);
+
+ switch (icmp->code)
+ {
+ case ICMP4_destination_unreachable_destination_unreachable_net: //0
+ case ICMP4_destination_unreachable_destination_unreachable_host: //1
+ icmp->type = ICMP6_destination_unreachable;
+ icmp->code = ICMP6_destination_unreachable_no_route_to_destination;
+ break;
+ case ICMP4_destination_unreachable_protocol_unreachable: //2
+ icmp->type = ICMP6_parameter_problem;
+ icmp->code = ICMP6_parameter_problem_unrecognized_next_header;
+ break;
+ case ICMP4_destination_unreachable_port_unreachable: //3
+ icmp->type = ICMP6_destination_unreachable;
+ icmp->code = ICMP6_destination_unreachable_port_unreachable;
+ break;
+ case ICMP4_destination_unreachable_fragmentation_needed_and_dont_fragment_set: //4
+ icmp->type =
+ ICMP6_packet_too_big;
+ icmp->code = 0;
+ {
+ u32 advertised_mtu = clib_net_to_host_u32 (*((u32 *) (icmp + 1)));
+ if (advertised_mtu)
+ advertised_mtu += 20;
+ else
+ advertised_mtu = 1000; //FIXME ! (RFC 1191 - plateau value)
+
+ //FIXME: = minimum(advertised MTU+20, MTU_of_IPv6_nexthop, (MTU_of_IPv4_nexthop)+20)
+ *((u32 *) (icmp + 1)) = clib_host_to_net_u32 (advertised_mtu);
+ }
+ break;
+
+ case ICMP4_destination_unreachable_source_route_failed: //5
+ case ICMP4_destination_unreachable_destination_network_unknown: //6
+ case ICMP4_destination_unreachable_destination_host_unknown: //7
+ case ICMP4_destination_unreachable_source_host_isolated: //8
+ case ICMP4_destination_unreachable_network_unreachable_for_type_of_service: //11
+ case ICMP4_destination_unreachable_host_unreachable_for_type_of_service: //12
+ icmp->type =
+ ICMP6_destination_unreachable;
+ icmp->code = ICMP6_destination_unreachable_no_route_to_destination;
+ break;
+ case ICMP4_destination_unreachable_network_administratively_prohibited: //9
+ case ICMP4_destination_unreachable_host_administratively_prohibited: //10
+ case ICMP4_destination_unreachable_communication_administratively_prohibited: //13
+ case ICMP4_destination_unreachable_precedence_cutoff_in_effect: //15
+ icmp->type = ICMP6_destination_unreachable;
+ icmp->code =
+ ICMP6_destination_unreachable_destination_administratively_prohibited;
+ break;
+ case ICMP4_destination_unreachable_host_precedence_violation: //14
+ default:
+ return -1;
+ }
+ break;
+
+ case ICMP4_time_exceeded: //11
+ *inner_ip4 = (ip4_header_t *) (((u8 *) icmp) + 8);
+ *receiver_port = ip4_get_port (*inner_ip4, MAP_SENDER, icmp_len - 8);
+ icmp->type = ICMP6_time_exceeded;
+ //icmp->code = icmp->code //unchanged
+ break;
+
+ case ICMP4_parameter_problem:
+ *inner_ip4 = (ip4_header_t *) (((u8 *) icmp) + 8);
+ *receiver_port = ip4_get_port (*inner_ip4, MAP_SENDER, icmp_len - 8);
+
+ switch (icmp->code)
+ {
+ case ICMP4_parameter_problem_pointer_indicates_error:
+ case ICMP4_parameter_problem_bad_length:
+ icmp->type = ICMP6_parameter_problem;
+ icmp->code = ICMP6_parameter_problem_erroneous_header_field;
+ {
+ u8 ptr =
+ icmp_to_icmp6_updater_pointer_table[*((u8 *) (icmp + 1))];
+ if (ptr == 0xff)
+ return -1;
+
+ *((u32 *) (icmp + 1)) = clib_host_to_net_u32 (ptr);
+ }
+ break;
+ default:
+ //All other codes cause dropping the packet
+ return -1;
+ }
+ break;
+
+ default:
+ //All other types cause dropping the packet
+ return -1;
+ break;
+ }
+ return 0;
+}
+
+static_always_inline void
+_ip4_map_t_icmp (map_domain_t * d, vlib_buffer_t * p, u8 * error)
+{
+ ip4_header_t *ip4, *inner_ip4;
+ ip6_header_t *ip6, *inner_ip6;
+ u32 ip_len;
+ icmp46_header_t *icmp;
+ i32 recv_port;
+ ip_csum_t csum;
+ u16 *inner_L4_checksum = 0;
+ ip6_frag_hdr_t *inner_frag;
+ u32 inner_frag_id;
+ u32 inner_frag_offset;
+ u8 inner_frag_more;
+
+ ip4 = vlib_buffer_get_current (p);
+ ip_len = clib_net_to_host_u16 (ip4->length);
+ ASSERT (ip_len <= p->current_length);
+
+ icmp = (icmp46_header_t *) (ip4 + 1);
+ if (ip4_icmp_to_icmp6_in_place (icmp, ip_len - sizeof (*ip4),
+ &recv_port, &inner_ip4))
+ {
+ *error = MAP_ERROR_ICMP;
+ return;
+ }
+
+ if (recv_port < 0)
+ {
+ // In case of 1:1 mapping, we don't care about the port
+ if (d->ea_bits_len == 0 && d->rules)
+ {
+ recv_port = 0;
+ }
+ else
+ {
+ *error = MAP_ERROR_ICMP;
+ return;
+ }
+ }
+
+ if (inner_ip4)
+ {
+ //We have 2 headers to translate.
+ //We need to make some room in the middle of the packet
+
+ if (PREDICT_FALSE (ip4_is_fragment (inner_ip4)))
+ {
+ //Here it starts getting really tricky
+ //We will add a fragmentation header in the inner packet
+
+ if (!ip4_is_first_fragment (inner_ip4))
+ {
+ //For now we do not handle unless it is the first fragment
+ //Ideally we should handle the case as we are in slow path already
+ *error = MAP_ERROR_FRAGMENTED;
+ return;
+ }
+
+ vlib_buffer_advance (p,
+ -2 * (sizeof (*ip6) - sizeof (*ip4)) -
+ sizeof (*inner_frag));
+ ip6 = vlib_buffer_get_current (p);
+ clib_memcpy (u8_ptr_add (ip6, sizeof (*ip6) - sizeof (*ip4)), ip4,
+ 20 + 8);
+ ip4 =
+ (ip4_header_t *) u8_ptr_add (ip6, sizeof (*ip6) - sizeof (*ip4));
+ icmp = (icmp46_header_t *) (ip4 + 1);
+
+ inner_ip6 =
+ (ip6_header_t *) u8_ptr_add (inner_ip4,
+ sizeof (*ip4) - sizeof (*ip6) -
+ sizeof (*inner_frag));
+ inner_frag =
+ (ip6_frag_hdr_t *) u8_ptr_add (inner_ip6, sizeof (*inner_ip6));
+ ip6->payload_length =
+ u16_net_add (ip4->length,
+ sizeof (*ip6) - 2 * sizeof (*ip4) +
+ sizeof (*inner_frag));
+ inner_frag_id = frag_id_4to6 (inner_ip4->fragment_id);
+ inner_frag_offset = ip4_get_fragment_offset (inner_ip4);
+ inner_frag_more =
+ ! !(inner_ip4->flags_and_fragment_offset &
+ clib_net_to_host_u16 (IP4_HEADER_FLAG_MORE_FRAGMENTS));
+ }
+ else
+ {
+ vlib_buffer_advance (p, -2 * (sizeof (*ip6) - sizeof (*ip4)));
+ ip6 = vlib_buffer_get_current (p);
+ clib_memcpy (u8_ptr_add (ip6, sizeof (*ip6) - sizeof (*ip4)), ip4,
+ 20 + 8);
+ ip4 =
+ (ip4_header_t *) u8_ptr_add (ip6, sizeof (*ip6) - sizeof (*ip4));
+ icmp = (icmp46_header_t *) u8_ptr_add (ip4, sizeof (*ip4));
+ inner_ip6 =
+ (ip6_header_t *) u8_ptr_add (inner_ip4,
+ sizeof (*ip4) - sizeof (*ip6));
+ ip6->payload_length =
+ u16_net_add (ip4->length, sizeof (*ip6) - 2 * sizeof (*ip4));
+ inner_frag = NULL;
+ }
+
+ if (PREDICT_TRUE (inner_ip4->protocol == IP_PROTOCOL_TCP))
+ {
+ inner_L4_checksum = &((tcp_header_t *) (inner_ip4 + 1))->checksum;
+ *inner_L4_checksum =
+ ip_csum_fold (ip_csum_sub_even
+ (*inner_L4_checksum,
+ *((u64 *) (&inner_ip4->src_address))));
+ }
+ else if (PREDICT_TRUE (inner_ip4->protocol == IP_PROTOCOL_UDP))
+ {
+ inner_L4_checksum = &((udp_header_t *) (inner_ip4 + 1))->checksum;
+ if (!*inner_L4_checksum)
+ {
+ //The inner packet was first translated, and therefore came from IPv6.
+ //As the packet was an IPv6 packet, the UDP checksum can't be NULL
+ *error = MAP_ERROR_ICMP;
+ return;
+ }
+ *inner_L4_checksum =
+ ip_csum_fold (ip_csum_sub_even
+ (*inner_L4_checksum,
+ *((u64 *) (&inner_ip4->src_address))));
+ }
+ else if (inner_ip4->protocol == IP_PROTOCOL_ICMP)
+ {
+ //We have an ICMP inside an ICMP
+ //It needs to be translated, but not for error ICMP messages
+ icmp46_header_t *inner_icmp = (icmp46_header_t *) (inner_ip4 + 1);
+ csum = inner_icmp->checksum;
+ //Only types ICMP4_echo_request and ICMP4_echo_reply are handled by ip4_icmp_to_icmp6_in_place
+ csum = ip_csum_sub_even (csum, *((u16 *) inner_icmp));
+ inner_icmp->type = (inner_icmp->type == ICMP4_echo_request) ?
+ ICMP6_echo_request : ICMP6_echo_reply;
+ csum = ip_csum_add_even (csum, *((u16 *) inner_icmp));
+ csum =
+ ip_csum_add_even (csum, clib_host_to_net_u16 (IP_PROTOCOL_ICMP6));
+ csum =
+ ip_csum_add_even (csum, inner_ip4->length - sizeof (*inner_ip4));
+ inner_icmp->checksum = ip_csum_fold (csum);
+ inner_L4_checksum = &inner_icmp->checksum;
+ inner_ip4->protocol = IP_PROTOCOL_ICMP6;
+ }
+ else
+ {
+ /* To shut up Coverity */
+ os_panic ();
+ }
+
+ //FIXME: Security check with the port found in the inner packet
+
+ csum = *inner_L4_checksum; //Initial checksum of the inner L4 header
+ //FIXME: Shouldn't we remove ip addresses from there ?
+
+ inner_ip6->ip_version_traffic_class_and_flow_label =
+ clib_host_to_net_u32 ((6 << 28) + (inner_ip4->tos << 20));
+ inner_ip6->payload_length =
+ u16_net_add (inner_ip4->length, -sizeof (*inner_ip4));
+ inner_ip6->hop_limit = inner_ip4->ttl;
+ inner_ip6->protocol = inner_ip4->protocol;
+
+ //Note that the source address is within the domain
+ //while the destination address is the one outside the domain
+ ip4_map_t_embedded_address (d, &inner_ip6->dst_address,
+ &inner_ip4->dst_address);
+ inner_ip6->src_address.as_u64[0] =
+ map_get_pfx_net (d, inner_ip4->src_address.as_u32, recv_port);
+ inner_ip6->src_address.as_u64[1] =
+ map_get_sfx_net (d, inner_ip4->src_address.as_u32, recv_port);
+
+ if (PREDICT_FALSE (inner_frag != NULL))
+ {
+ inner_frag->next_hdr = inner_ip6->protocol;
+ inner_frag->identification = inner_frag_id;
+ inner_frag->rsv = 0;
+ inner_frag->fragment_offset_and_more =
+ ip6_frag_hdr_offset_and_more (inner_frag_offset, inner_frag_more);
+ inner_ip6->protocol = IP_PROTOCOL_IPV6_FRAGMENTATION;
+ inner_ip6->payload_length =
+ clib_host_to_net_u16 (clib_net_to_host_u16
+ (inner_ip6->payload_length) +
+ sizeof (*inner_frag));
+ }
+
+ csum = ip_csum_add_even (csum, inner_ip6->src_address.as_u64[0]);
+ csum = ip_csum_add_even (csum, inner_ip6->src_address.as_u64[1]);
+ csum = ip_csum_add_even (csum, inner_ip6->dst_address.as_u64[0]);
+ csum = ip_csum_add_even (csum, inner_ip6->dst_address.as_u64[1]);
+ *inner_L4_checksum = ip_csum_fold (csum);
+
+ }
+ else
+ {
+ vlib_buffer_advance (p, sizeof (*ip4) - sizeof (*ip6));
+ ip6 = vlib_buffer_get_current (p);
+ ip6->payload_length =
+ clib_host_to_net_u16 (clib_net_to_host_u16 (ip4->length) -
+ sizeof (*ip4));
+ }
+
+ //Translate outer IPv6
+ ip6->ip_version_traffic_class_and_flow_label =
+ clib_host_to_net_u32 ((6 << 28) + (ip4->tos << 20));
+
+ ip6->hop_limit = ip4->ttl;
+ ip6->protocol = IP_PROTOCOL_ICMP6;
+
+ ip4_map_t_embedded_address (d, &ip6->src_address, &ip4->src_address);
+ ip6->dst_address.as_u64[0] =
+ map_get_pfx_net (d, ip4->dst_address.as_u32, recv_port);
+ ip6->dst_address.as_u64[1] =
+ map_get_sfx_net (d, ip4->dst_address.as_u32, recv_port);
+
+ //Truncate when the packet exceeds the minimal IPv6 MTU
+ if (p->current_length > 1280)
+ {
+ ip6->payload_length = clib_host_to_net_u16 (1280 - sizeof (*ip6));
+ p->current_length = 1280; //Looks too simple to be correct...
+ }
+
+ //TODO: We could do an easy diff-checksum for echo requests/replies
+ //Recompute ICMP checksum
+ icmp->checksum = 0;
+ csum = ip_csum_with_carry (0, ip6->payload_length);
+ csum = ip_csum_with_carry (csum, clib_host_to_net_u16 (ip6->protocol));
+ csum = ip_csum_with_carry (csum, ip6->src_address.as_u64[0]);
+ csum = ip_csum_with_carry (csum, ip6->src_address.as_u64[1]);
+ csum = ip_csum_with_carry (csum, ip6->dst_address.as_u64[0]);
+ csum = ip_csum_with_carry (csum, ip6->dst_address.as_u64[1]);
+ csum =
+ ip_incremental_checksum (csum, icmp,
+ clib_net_to_host_u16 (ip6->payload_length));
+ icmp->checksum = ~ip_csum_fold (csum);
+}
+
+static uword
+ip4_map_t_icmp (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
+ vlib_node_runtime_t *error_node =
+ vlib_node_get_runtime (vm, ip4_map_t_icmp_node.index);
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+ vlib_combined_counter_main_t *cm = map_main.domain_counters;
+ u32 cpu_index = os_get_cpu_number ();
+
+ while (n_left_from > 0)
+ {
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 pi0;
+ vlib_buffer_t *p0;
+ ip4_mapt_icmp_next_t next0;
+ u8 error0;
+ map_domain_t *d0;
+ u16 len0;
+
+ next0 = IP4_MAPT_ICMP_NEXT_IP6_LOOKUP;
+ pi0 = to_next[0] = from[0];
+ from += 1;
+ n_left_from -= 1;
+ to_next += 1;
+ n_left_to_next -= 1;
+ error0 = MAP_ERROR_NONE;
+
+ p0 = vlib_get_buffer (vm, pi0);
+ vlib_buffer_advance (p0, sizeof (ip4_mapt_pseudo_header_t)); //The pseudo-header is not used
+ len0 =
+ clib_net_to_host_u16 (((ip4_header_t *)
+ vlib_buffer_get_current (p0))->length);
+ d0 =
+ pool_elt_at_index (map_main.domains,
+ vnet_buffer (p0)->map_t.map_domain_index);
+ _ip4_map_t_icmp (d0, p0, &error0);
+
+ if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
+ {
+ vnet_buffer (p0)->ip_frag.header_offset = 0;
+ vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
+ vnet_buffer (p0)->ip_frag.next_index = IP6_FRAG_NEXT_IP6_LOOKUP;
+ next0 = IP4_MAPT_ICMP_NEXT_IP6_FRAG;
+ }
+ if (PREDICT_TRUE (error0 == MAP_ERROR_NONE))
+ {
+ vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_TX,
+ cpu_index,
+ vnet_buffer (p0)->map_t.
+ map_domain_index, 1, len0);
+ }
+ else
+ {
+ next0 = IP4_MAPT_ICMP_NEXT_DROP;
+ }
+ p0->error = error_node->errors[error0];
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next, pi0,
+ next0);
+ }
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+ return frame->n_vectors;
+}
+
+static uword
+ip4_map_t_fragmented (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 pi0;
+ vlib_buffer_t *p0;
+ ip4_header_t *ip40;
+ ip6_header_t *ip60;
+ ip6_frag_hdr_t *frag0;
+ ip4_mapt_pseudo_header_t *pheader0;
+ ip4_mapt_fragmented_next_t next0;
+
+ next0 = IP4_MAPT_FRAGMENTED_NEXT_IP6_LOOKUP;
+ pi0 = to_next[0] = from[0];
+ from += 1;
+ n_left_from -= 1;
+ to_next += 1;
+ n_left_to_next -= 1;
+
+ p0 = vlib_get_buffer (vm, pi0);
+
+ //Accessing pseudo header
+ pheader0 = vlib_buffer_get_current (p0);
+ vlib_buffer_advance (p0, sizeof (*pheader0));
+
+ //Accessing ip4 header
+ ip40 = vlib_buffer_get_current (p0);
+ frag0 =
+ (ip6_frag_hdr_t *) u8_ptr_add (ip40,
+ sizeof (*ip40) - sizeof (*frag0));
+ ip60 =
+ (ip6_header_t *) u8_ptr_add (ip40,
+ sizeof (*ip40) - sizeof (*frag0) -
+ sizeof (*ip60));
+ vlib_buffer_advance (p0,
+ sizeof (*ip40) - sizeof (*ip60) -
+ sizeof (*frag0));
+
+ //We know that the protocol was one of ICMP, TCP or UDP
+ //because the first fragment was found and cached
+ frag0->next_hdr =
+ (ip40->protocol ==
+ IP_PROTOCOL_ICMP) ? IP_PROTOCOL_ICMP6 : ip40->protocol;
+ frag0->identification = frag_id_4to6 (ip40->fragment_id);
+ frag0->rsv = 0;
+ frag0->fragment_offset_and_more =
+ ip6_frag_hdr_offset_and_more (ip4_get_fragment_offset (ip40),
+ clib_net_to_host_u16
+ (ip40->flags_and_fragment_offset) &
+ IP4_HEADER_FLAG_MORE_FRAGMENTS);
+
+ ip60->ip_version_traffic_class_and_flow_label =
+ clib_host_to_net_u32 ((6 << 28) + (ip40->tos << 20));
+ ip60->payload_length =
+ clib_host_to_net_u16 (clib_net_to_host_u16 (ip40->length) -
+ sizeof (*ip40) + sizeof (*frag0));
+ ip60->hop_limit = ip40->ttl;
+ ip60->protocol = IP_PROTOCOL_IPV6_FRAGMENTATION;
+ ip60->dst_address.as_u64[0] = pheader0->daddr.as_u64[0];
+ ip60->dst_address.as_u64[1] = pheader0->daddr.as_u64[1];
+ ip60->src_address.as_u64[0] = pheader0->saddr.as_u64[0];
+ ip60->src_address.as_u64[1] = pheader0->saddr.as_u64[1];
+
+ if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
+ {
+ vnet_buffer (p0)->ip_frag.header_offset = 0;
+ vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
+ vnet_buffer (p0)->ip_frag.next_index = IP6_FRAG_NEXT_IP6_LOOKUP;
+ next0 = IP4_MAPT_FRAGMENTED_NEXT_IP6_FRAG;
+ }
+
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next, pi0,
+ next0);
+ }
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+ return frame->n_vectors;
+}
+
+static uword
+ip4_map_t_tcp_udp (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+#ifdef IP4_MAP_T_DUAL_LOOP
+ while (n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ u32 pi0, pi1;
+ vlib_buffer_t *p0, *p1;
+ ip4_header_t *ip40, *ip41;
+ ip6_header_t *ip60, *ip61;
+ ip_csum_t csum0, csum1;
+ u16 *checksum0, *checksum1;
+ ip6_frag_hdr_t *frag0, *frag1;
+ u32 frag_id0, frag_id1;
+ ip4_mapt_pseudo_header_t *pheader0, *pheader1;
+ ip4_mapt_tcp_udp_next_t next0, next1;
+
+ pi0 = to_next[0] = from[0];
+ pi1 = to_next[1] = from[1];
+ from += 2;
+ n_left_from -= 2;
+ to_next += 2;
+ n_left_to_next -= 2;
+
+ next0 = IP4_MAPT_TCP_UDP_NEXT_IP6_LOOKUP;
+ next1 = IP4_MAPT_TCP_UDP_NEXT_IP6_LOOKUP;
+ p0 = vlib_get_buffer (vm, pi0);
+ p1 = vlib_get_buffer (vm, pi1);
+
+ //Accessing pseudo header
+ pheader0 = vlib_buffer_get_current (p0);
+ pheader1 = vlib_buffer_get_current (p1);
+ vlib_buffer_advance (p0, sizeof (*pheader0));
+ vlib_buffer_advance (p1, sizeof (*pheader1));
+
+ //Accessing ip4 header
+ ip40 = vlib_buffer_get_current (p0);
+ ip41 = vlib_buffer_get_current (p1);
+ checksum0 =
+ (u16 *) u8_ptr_add (ip40,
+ vnet_buffer (p0)->map_t.checksum_offset);
+ checksum1 =
+ (u16 *) u8_ptr_add (ip41,
+ vnet_buffer (p1)->map_t.checksum_offset);
+
+ //UDP checksum is optional over IPv4 but mandatory for IPv6
+ //We do not check udp->length sanity but use our safe computed value instead
+ if (PREDICT_FALSE
+ (!*checksum0 && ip40->protocol == IP_PROTOCOL_UDP))
+ {
+ u16 udp_len =
+ clib_host_to_net_u16 (ip40->length) - sizeof (*ip40);
+ udp_header_t *udp =
+ (udp_header_t *) u8_ptr_add (ip40, sizeof (*ip40));
+ ip_csum_t csum;
+ csum = ip_incremental_checksum (0, udp, udp_len);
+ csum =
+ ip_csum_with_carry (csum, clib_host_to_net_u16 (udp_len));
+ csum =
+ ip_csum_with_carry (csum,
+ clib_host_to_net_u16 (IP_PROTOCOL_UDP));
+ csum =
+ ip_csum_with_carry (csum, *((u64 *) (&ip40->src_address)));
+ *checksum0 = ~ip_csum_fold (csum);
+ }
+ if (PREDICT_FALSE
+ (!*checksum1 && ip41->protocol == IP_PROTOCOL_UDP))
+ {
+ u16 udp_len =
+ clib_host_to_net_u16 (ip41->length) - sizeof (*ip40);
+ udp_header_t *udp =
+ (udp_header_t *) u8_ptr_add (ip41, sizeof (*ip40));
+ ip_csum_t csum;
+ csum = ip_incremental_checksum (0, udp, udp_len);
+ csum =
+ ip_csum_with_carry (csum, clib_host_to_net_u16 (udp_len));
+ csum =
+ ip_csum_with_carry (csum,
+ clib_host_to_net_u16 (IP_PROTOCOL_UDP));
+ csum =
+ ip_csum_with_carry (csum, *((u64 *) (&ip41->src_address)));
+ *checksum1 = ~ip_csum_fold (csum);
+ }
+
+ csum0 = ip_csum_sub_even (*checksum0, ip40->src_address.as_u32);
+ csum1 = ip_csum_sub_even (*checksum1, ip41->src_address.as_u32);
+ csum0 = ip_csum_sub_even (csum0, ip40->dst_address.as_u32);
+ csum1 = ip_csum_sub_even (csum1, ip41->dst_address.as_u32);
+
+ // Deal with fragmented packets
+ if (PREDICT_FALSE (ip40->flags_and_fragment_offset &
+ clib_host_to_net_u16
+ (IP4_HEADER_FLAG_MORE_FRAGMENTS)))
+ {
+ ip60 =
+ (ip6_header_t *) u8_ptr_add (ip40,
+ sizeof (*ip40) - sizeof (*ip60) -
+ sizeof (*frag0));
+ frag0 =
+ (ip6_frag_hdr_t *) u8_ptr_add (ip40,
+ sizeof (*ip40) -
+ sizeof (*frag0));
+ frag_id0 = frag_id_4to6 (ip40->fragment_id);
+ vlib_buffer_advance (p0,
+ sizeof (*ip40) - sizeof (*ip60) -
+ sizeof (*frag0));
+ }
+ else
+ {
+ ip60 =
+ (ip6_header_t *) (((u8 *) ip40) + sizeof (*ip40) -
+ sizeof (*ip60));
+ vlib_buffer_advance (p0, sizeof (*ip40) - sizeof (*ip60));
+ frag0 = NULL;
+ }
+
+ if (PREDICT_FALSE (ip41->flags_and_fragment_offset &
+ clib_host_to_net_u16
+ (IP4_HEADER_FLAG_MORE_FRAGMENTS)))
+ {
+ ip61 =
+ (ip6_header_t *) u8_ptr_add (ip41,
+ sizeof (*ip40) - sizeof (*ip60) -
+ sizeof (*frag0));
+ frag1 =
+ (ip6_frag_hdr_t *) u8_ptr_add (ip41,
+ sizeof (*ip40) -
+ sizeof (*frag0));
+ frag_id1 = frag_id_4to6 (ip41->fragment_id);
+ vlib_buffer_advance (p1,
+ sizeof (*ip40) - sizeof (*ip60) -
+ sizeof (*frag0));
+ }
+ else
+ {
+ ip61 =
+ (ip6_header_t *) (((u8 *) ip41) + sizeof (*ip40) -
+ sizeof (*ip60));
+ vlib_buffer_advance (p1, sizeof (*ip40) - sizeof (*ip60));
+ frag1 = NULL;
+ }
+
+ ip60->ip_version_traffic_class_and_flow_label =
+ clib_host_to_net_u32 ((6 << 28) + (ip40->tos << 20));
+ ip61->ip_version_traffic_class_and_flow_label =
+ clib_host_to_net_u32 ((6 << 28) + (ip41->tos << 20));
+ ip60->payload_length = u16_net_add (ip40->length, -sizeof (*ip40));
+ ip61->payload_length = u16_net_add (ip41->length, -sizeof (*ip40));
+ ip60->hop_limit = ip40->ttl;
+ ip61->hop_limit = ip41->ttl;
+ ip60->protocol = ip40->protocol;
+ ip61->protocol = ip41->protocol;
+
+ if (PREDICT_FALSE (frag0 != NULL))
+ {
+ frag0->next_hdr = ip60->protocol;
+ frag0->identification = frag_id0;
+ frag0->rsv = 0;
+ frag0->fragment_offset_and_more =
+ ip6_frag_hdr_offset_and_more (0, 1);
+ ip60->protocol = IP_PROTOCOL_IPV6_FRAGMENTATION;
+ ip60->payload_length =
+ u16_net_add (ip60->payload_length, sizeof (*frag0));
+ }
+
+ if (PREDICT_FALSE (frag1 != NULL))
+ {
+ frag1->next_hdr = ip61->protocol;
+ frag1->identification = frag_id1;
+ frag1->rsv = 0;
+ frag1->fragment_offset_and_more =
+ ip6_frag_hdr_offset_and_more (0, 1);
+ ip61->protocol = IP_PROTOCOL_IPV6_FRAGMENTATION;
+ ip61->payload_length =
+ u16_net_add (ip61->payload_length, sizeof (*frag0));
+ }
+
+ //Finally copying the address
+ ip60->dst_address.as_u64[0] = pheader0->daddr.as_u64[0];
+ ip61->dst_address.as_u64[0] = pheader1->daddr.as_u64[0];
+ ip60->dst_address.as_u64[1] = pheader0->daddr.as_u64[1];
+ ip61->dst_address.as_u64[1] = pheader1->daddr.as_u64[1];
+ ip60->src_address.as_u64[0] = pheader0->saddr.as_u64[0];
+ ip61->src_address.as_u64[0] = pheader1->saddr.as_u64[0];
+ ip60->src_address.as_u64[1] = pheader0->saddr.as_u64[1];
+ ip61->src_address.as_u64[1] = pheader1->saddr.as_u64[1];
+
+ csum0 = ip_csum_add_even (csum0, ip60->src_address.as_u64[0]);
+ csum1 = ip_csum_add_even (csum1, ip61->src_address.as_u64[0]);
+ csum0 = ip_csum_add_even (csum0, ip60->src_address.as_u64[1]);
+ csum1 = ip_csum_add_even (csum1, ip61->src_address.as_u64[1]);
+ csum0 = ip_csum_add_even (csum0, ip60->dst_address.as_u64[0]);
+ csum1 = ip_csum_add_even (csum1, ip61->dst_address.as_u64[0]);
+ csum0 = ip_csum_add_even (csum0, ip60->dst_address.as_u64[1]);
+ csum1 = ip_csum_add_even (csum1, ip61->dst_address.as_u64[1]);
+ *checksum0 = ip_csum_fold (csum0);
+ *checksum1 = ip_csum_fold (csum1);
+
+ if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
+ {
+ vnet_buffer (p0)->ip_frag.header_offset = 0;
+ vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
+ vnet_buffer (p0)->ip_frag.next_index = IP6_FRAG_NEXT_IP6_LOOKUP;
+ next0 = IP4_MAPT_TCP_UDP_NEXT_IP6_FRAG;
+ }
+
+ if (vnet_buffer (p1)->map_t.mtu < p1->current_length)
+ {
+ vnet_buffer (p1)->ip_frag.header_offset = 0;
+ vnet_buffer (p1)->ip_frag.mtu = vnet_buffer (p1)->map_t.mtu;
+ vnet_buffer (p1)->ip_frag.next_index = IP6_FRAG_NEXT_IP6_LOOKUP;
+ next1 = IP4_MAPT_TCP_UDP_NEXT_IP6_FRAG;
+ }
+
+ vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
+ to_next, n_left_to_next, pi0, pi1,
+ next0, next1);
+ }
+#endif
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 pi0;
+ vlib_buffer_t *p0;
+ ip4_header_t *ip40;
+ ip6_header_t *ip60;
+ ip_csum_t csum0;
+ u16 *checksum0;
+ ip6_frag_hdr_t *frag0;
+ u32 frag_id0;
+ ip4_mapt_pseudo_header_t *pheader0;
+ ip4_mapt_tcp_udp_next_t next0;
+
+ pi0 = to_next[0] = from[0];
+ from += 1;
+ n_left_from -= 1;
+ to_next += 1;
+ n_left_to_next -= 1;
+
+ next0 = IP4_MAPT_TCP_UDP_NEXT_IP6_LOOKUP;
+ p0 = vlib_get_buffer (vm, pi0);
+
+ //Accessing pseudo header
+ pheader0 = vlib_buffer_get_current (p0);
+ vlib_buffer_advance (p0, sizeof (*pheader0));
+
+ //Accessing ip4 header
+ ip40 = vlib_buffer_get_current (p0);
+ checksum0 =
+ (u16 *) u8_ptr_add (ip40,
+ vnet_buffer (p0)->map_t.checksum_offset);
+
+ //UDP checksum is optional over IPv4 but mandatory for IPv6
+ //We do not check udp->length sanity but use our safe computed value instead
+ if (PREDICT_FALSE
+ (!*checksum0 && ip40->protocol == IP_PROTOCOL_UDP))
+ {
+ u16 udp_len =
+ clib_host_to_net_u16 (ip40->length) - sizeof (*ip40);
+ udp_header_t *udp =
+ (udp_header_t *) u8_ptr_add (ip40, sizeof (*ip40));
+ ip_csum_t csum;
+ csum = ip_incremental_checksum (0, udp, udp_len);
+ csum =
+ ip_csum_with_carry (csum, clib_host_to_net_u16 (udp_len));
+ csum =
+ ip_csum_with_carry (csum,
+ clib_host_to_net_u16 (IP_PROTOCOL_UDP));
+ csum =
+ ip_csum_with_carry (csum, *((u64 *) (&ip40->src_address)));
+ *checksum0 = ~ip_csum_fold (csum);
+ }
+
+ csum0 = ip_csum_sub_even (*checksum0, ip40->src_address.as_u32);
+ csum0 = ip_csum_sub_even (csum0, ip40->dst_address.as_u32);
+
+ // Deal with fragmented packets
+ if (PREDICT_FALSE (ip40->flags_and_fragment_offset &
+ clib_host_to_net_u16
+ (IP4_HEADER_FLAG_MORE_FRAGMENTS)))
+ {
+ ip60 =
+ (ip6_header_t *) u8_ptr_add (ip40,
+ sizeof (*ip40) - sizeof (*ip60) -
+ sizeof (*frag0));
+ frag0 =
+ (ip6_frag_hdr_t *) u8_ptr_add (ip40,
+ sizeof (*ip40) -
+ sizeof (*frag0));
+ frag_id0 = frag_id_4to6 (ip40->fragment_id);
+ vlib_buffer_advance (p0,
+ sizeof (*ip40) - sizeof (*ip60) -
+ sizeof (*frag0));
+ }
+ else
+ {
+ ip60 =
+ (ip6_header_t *) (((u8 *) ip40) + sizeof (*ip40) -
+ sizeof (*ip60));
+ vlib_buffer_advance (p0, sizeof (*ip40) - sizeof (*ip60));
+ frag0 = NULL;
+ }
+
+ ip60->ip_version_traffic_class_and_flow_label =
+ clib_host_to_net_u32 ((6 << 28) + (ip40->tos << 20));
+ ip60->payload_length = u16_net_add (ip40->length, -sizeof (*ip40));
+ ip60->hop_limit = ip40->ttl;
+ ip60->protocol = ip40->protocol;
+
+ if (PREDICT_FALSE (frag0 != NULL))
+ {
+ frag0->next_hdr = ip60->protocol;
+ frag0->identification = frag_id0;
+ frag0->rsv = 0;
+ frag0->fragment_offset_and_more =
+ ip6_frag_hdr_offset_and_more (0, 1);
+ ip60->protocol = IP_PROTOCOL_IPV6_FRAGMENTATION;
+ ip60->payload_length =
+ u16_net_add (ip60->payload_length, sizeof (*frag0));
+ }
+
+ //Finally copying the address
+ ip60->dst_address.as_u64[0] = pheader0->daddr.as_u64[0];
+ ip60->dst_address.as_u64[1] = pheader0->daddr.as_u64[1];
+ ip60->src_address.as_u64[0] = pheader0->saddr.as_u64[0];
+ ip60->src_address.as_u64[1] = pheader0->saddr.as_u64[1];
+
+ csum0 = ip_csum_add_even (csum0, ip60->src_address.as_u64[0]);
+ csum0 = ip_csum_add_even (csum0, ip60->src_address.as_u64[1]);
+ csum0 = ip_csum_add_even (csum0, ip60->dst_address.as_u64[0]);
+ csum0 = ip_csum_add_even (csum0, ip60->dst_address.as_u64[1]);
+ *checksum0 = ip_csum_fold (csum0);
+
+ if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
+ {
+ //Send to fragmentation node if necessary
+ vnet_buffer (p0)->ip_frag.header_offset = 0;
+ vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
+ vnet_buffer (p0)->ip_frag.next_index = IP6_FRAG_NEXT_IP6_LOOKUP;
+ next0 = IP4_MAPT_TCP_UDP_NEXT_IP6_FRAG;
+ }
+
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next, pi0,
+ next0);
+ }
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ return frame->n_vectors;
+}
+
+static_always_inline void
+ip4_map_t_classify (vlib_buffer_t * p0, map_domain_t * d0,
+ ip4_header_t * ip40, u16 ip4_len0, i32 * dst_port0,
+ u8 * error0, ip4_mapt_next_t * next0)
+{
+ if (PREDICT_FALSE (ip4_get_fragment_offset (ip40)))
+ {
+ *next0 = IP4_MAPT_NEXT_MAPT_FRAGMENTED;
+ if (d0->ea_bits_len == 0 && d0->rules)
+ {
+ *dst_port0 = 0;
+ }
+ else
+ {
+ *dst_port0 = ip4_map_fragment_get_port (ip40);
+ *error0 = (*dst_port0 == -1) ? MAP_ERROR_FRAGMENT_MEMORY : *error0;
+ }
+ }
+ else if (PREDICT_TRUE (ip40->protocol == IP_PROTOCOL_TCP))
+ {
+ vnet_buffer (p0)->map_t.checksum_offset = 36;
+ *next0 = IP4_MAPT_NEXT_MAPT_TCP_UDP;
+ *error0 = ip4_len0 < 40 ? MAP_ERROR_MALFORMED : *error0;
+ *dst_port0 = (i32) * ((u16 *) u8_ptr_add (ip40, sizeof (*ip40) + 2));
+ }
+ else if (PREDICT_TRUE (ip40->protocol == IP_PROTOCOL_UDP))
+ {
+ vnet_buffer (p0)->map_t.checksum_offset = 26;
+ *next0 = IP4_MAPT_NEXT_MAPT_TCP_UDP;
+ *error0 = ip4_len0 < 28 ? MAP_ERROR_MALFORMED : *error0;
+ *dst_port0 = (i32) * ((u16 *) u8_ptr_add (ip40, sizeof (*ip40) + 2));
+ }
+ else if (ip40->protocol == IP_PROTOCOL_ICMP)
+ {
+ *next0 = IP4_MAPT_NEXT_MAPT_ICMP;
+ if (d0->ea_bits_len == 0 && d0->rules)
+ *dst_port0 = 0;
+ else if (((icmp46_header_t *) u8_ptr_add (ip40, sizeof (*ip40)))->code
+ == ICMP4_echo_reply
+ || ((icmp46_header_t *)
+ u8_ptr_add (ip40,
+ sizeof (*ip40)))->code == ICMP4_echo_request)
+ *dst_port0 = (i32) * ((u16 *) u8_ptr_add (ip40, sizeof (*ip40) + 6));
+ }
+ else
+ {
+ *error0 = MAP_ERROR_BAD_PROTOCOL;
+ }
+}
+
+static uword
+ip4_map_t (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
+ vlib_node_runtime_t *error_node =
+ vlib_node_get_runtime (vm, ip4_map_t_node.index);
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+ vlib_combined_counter_main_t *cm = map_main.domain_counters;
+ u32 cpu_index = os_get_cpu_number ();
+
+ while (n_left_from > 0)
+ {
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+#ifdef IP4_MAP_T_DUAL_LOOP
+ while (n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ u32 pi0, pi1;
+ vlib_buffer_t *p0, *p1;
+ ip4_header_t *ip40, *ip41;
+ map_domain_t *d0, *d1;
+ ip4_mapt_next_t next0 = 0, next1 = 0;
+ u16 ip4_len0, ip4_len1;
+ u8 error0, error1;
+ i32 dst_port0, dst_port1;
+ ip4_mapt_pseudo_header_t *pheader0, *pheader1;
+
+ pi0 = to_next[0] = from[0];
+ pi1 = to_next[1] = from[1];
+ from += 2;
+ n_left_from -= 2;
+ to_next += 2;
+ n_left_to_next -= 2;
+ error0 = MAP_ERROR_NONE;
+ error1 = MAP_ERROR_NONE;
+
+ p0 = vlib_get_buffer (vm, pi0);
+ p1 = vlib_get_buffer (vm, pi1);
+ ip40 = vlib_buffer_get_current (p0);
+ ip41 = vlib_buffer_get_current (p1);
+ ip4_len0 = clib_host_to_net_u16 (ip40->length);
+ ip4_len1 = clib_host_to_net_u16 (ip41->length);
+
+ if (PREDICT_FALSE (p0->current_length < ip4_len0 ||
+ ip40->ip_version_and_header_length != 0x45))
+ {
+ error0 = MAP_ERROR_UNKNOWN;
+ next0 = IP4_MAPT_NEXT_DROP;
+ }
+
+ if (PREDICT_FALSE (p1->current_length < ip4_len1 ||
+ ip41->ip_version_and_header_length != 0x45))
+ {
+ error1 = MAP_ERROR_UNKNOWN;
+ next1 = IP4_MAPT_NEXT_DROP;
+ }
+
+ d0 = ip4_map_get_domain (vnet_buffer (p0)->ip.adj_index[VLIB_TX],
+ &vnet_buffer (p0)->map_t.map_domain_index);
+ d1 = ip4_map_get_domain (vnet_buffer (p1)->ip.adj_index[VLIB_TX],
+ &vnet_buffer (p1)->map_t.map_domain_index);
+
+ vnet_buffer (p0)->map_t.mtu = d0->mtu ? d0->mtu : ~0;
+ vnet_buffer (p1)->map_t.mtu = d1->mtu ? d1->mtu : ~0;
+
+ dst_port0 = -1;
+ dst_port1 = -1;
+
+ ip4_map_t_classify (p0, d0, ip40, ip4_len0, &dst_port0, &error0,
+ &next0);
+ ip4_map_t_classify (p1, d1, ip41, ip4_len1, &dst_port1, &error1,
+ &next1);
+
+ //Add MAP-T pseudo header in front of the packet
+ vlib_buffer_advance (p0, -sizeof (*pheader0));
+ vlib_buffer_advance (p1, -sizeof (*pheader1));
+ pheader0 = vlib_buffer_get_current (p0);
+ pheader1 = vlib_buffer_get_current (p1);
+
+ //Save addresses within the packet
+ ip4_map_t_embedded_address (d0, &pheader0->saddr,
+ &ip40->src_address);
+ ip4_map_t_embedded_address (d1, &pheader1->saddr,
+ &ip41->src_address);
+ pheader0->daddr.as_u64[0] =
+ map_get_pfx_net (d0, ip40->dst_address.as_u32, (u16) dst_port0);
+ pheader0->daddr.as_u64[1] =
+ map_get_sfx_net (d0, ip40->dst_address.as_u32, (u16) dst_port0);
+ pheader1->daddr.as_u64[0] =
+ map_get_pfx_net (d1, ip41->dst_address.as_u32, (u16) dst_port1);
+ pheader1->daddr.as_u64[1] =
+ map_get_sfx_net (d1, ip41->dst_address.as_u32, (u16) dst_port1);
+
+ if (PREDICT_FALSE
+ (ip4_is_first_fragment (ip40) && (dst_port0 != -1)
+ && (d0->ea_bits_len != 0 || !d0->rules)
+ && ip4_map_fragment_cache (ip40, dst_port0)))
+ {
+ error0 = MAP_ERROR_FRAGMENT_MEMORY;
+ }
+
+ if (PREDICT_FALSE
+ (ip4_is_first_fragment (ip41) && (dst_port1 != -1)
+ && (d1->ea_bits_len != 0 || !d1->rules)
+ && ip4_map_fragment_cache (ip41, dst_port1)))
+ {
+ error1 = MAP_ERROR_FRAGMENT_MEMORY;
+ }
+
+ if (PREDICT_TRUE
+ (error0 == MAP_ERROR_NONE && next0 != IP4_MAPT_NEXT_MAPT_ICMP))
+ {
+ vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_TX,
+ cpu_index,
+ vnet_buffer (p0)->map_t.
+ map_domain_index, 1,
+ clib_net_to_host_u16 (ip40->
+ length));
+ }
+
+ if (PREDICT_TRUE
+ (error1 == MAP_ERROR_NONE && next1 != IP4_MAPT_NEXT_MAPT_ICMP))
+ {
+ vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_TX,
+ cpu_index,
+ vnet_buffer (p1)->map_t.
+ map_domain_index, 1,
+ clib_net_to_host_u16 (ip41->
+ length));
+ }
+
+ next0 = (error0 != MAP_ERROR_NONE) ? IP4_MAPT_NEXT_DROP : next0;
+ next1 = (error1 != MAP_ERROR_NONE) ? IP4_MAPT_NEXT_DROP : next1;
+ p0->error = error_node->errors[error0];
+ p1->error = error_node->errors[error1];
+ vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next,
+ n_left_to_next, pi0, pi1, next0,
+ next1);
+ }
+#endif
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 pi0;
+ vlib_buffer_t *p0;
+ ip4_header_t *ip40;
+ map_domain_t *d0;
+ ip4_mapt_next_t next0;
+ u16 ip4_len0;
+ u8 error0;
+ i32 dst_port0;
+ ip4_mapt_pseudo_header_t *pheader0;
+
+ pi0 = to_next[0] = from[0];
+ from += 1;
+ n_left_from -= 1;
+ to_next += 1;
+ n_left_to_next -= 1;
+ error0 = MAP_ERROR_NONE;
+
+ p0 = vlib_get_buffer (vm, pi0);
+ ip40 = vlib_buffer_get_current (p0);
+ ip4_len0 = clib_host_to_net_u16 (ip40->length);
+ if (PREDICT_FALSE (p0->current_length < ip4_len0 ||
+ ip40->ip_version_and_header_length != 0x45))
+ {
+ error0 = MAP_ERROR_UNKNOWN;
+ next0 = IP4_MAPT_NEXT_DROP;
+ }
+
+ d0 = ip4_map_get_domain (vnet_buffer (p0)->ip.adj_index[VLIB_TX],
+ &vnet_buffer (p0)->map_t.map_domain_index);
+
+ vnet_buffer (p0)->map_t.mtu = d0->mtu ? d0->mtu : ~0;
+
+ dst_port0 = -1;
+ ip4_map_t_classify (p0, d0, ip40, ip4_len0, &dst_port0, &error0,
+ &next0);
+
+ //Add MAP-T pseudo header in front of the packet
+ vlib_buffer_advance (p0, -sizeof (*pheader0));
+ pheader0 = vlib_buffer_get_current (p0);
+
+ //Save addresses within the packet
+ ip4_map_t_embedded_address (d0, &pheader0->saddr,
+ &ip40->src_address);
+ pheader0->daddr.as_u64[0] =
+ map_get_pfx_net (d0, ip40->dst_address.as_u32, (u16) dst_port0);
+ pheader0->daddr.as_u64[1] =
+ map_get_sfx_net (d0, ip40->dst_address.as_u32, (u16) dst_port0);
+
+ //It is important to cache at this stage because the result might be necessary
+ //for packets within the same vector.
+ //Actually, this approach even provides some limited out-of-order fragments support
+ if (PREDICT_FALSE
+ (ip4_is_first_fragment (ip40) && (dst_port0 != -1)
+ && (d0->ea_bits_len != 0 || !d0->rules)
+ && ip4_map_fragment_cache (ip40, dst_port0)))
+ {
+ error0 = MAP_ERROR_UNKNOWN;
+ }
+
+ if (PREDICT_TRUE
+ (error0 == MAP_ERROR_NONE && next0 != IP4_MAPT_NEXT_MAPT_ICMP))
+ {
+ vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_TX,
+ cpu_index,
+ vnet_buffer (p0)->map_t.
+ map_domain_index, 1,
+ clib_net_to_host_u16 (ip40->
+ length));
+ }
+
+ next0 = (error0 != MAP_ERROR_NONE) ? IP4_MAPT_NEXT_DROP : next0;
+ p0->error = error_node->errors[error0];
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next, pi0,
+ next0);
+ }
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+ return frame->n_vectors;
+}
+
+static char *map_t_error_strings[] = {
+#define _(sym,string) string,
+ foreach_map_error
+#undef _
+};
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE(ip4_map_t_fragmented_node) = {
+ .function = ip4_map_t_fragmented,
+ .name = "ip4-map-t-fragmented",
+ .vector_size = sizeof(u32),
+ .format_trace = format_map_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_errors = MAP_N_ERROR,
+ .error_strings = map_t_error_strings,
+
+ .n_next_nodes = IP4_MAPT_FRAGMENTED_N_NEXT,
+ .next_nodes = {
+ [IP4_MAPT_FRAGMENTED_NEXT_IP6_LOOKUP] = "ip6-lookup",
+ [IP4_MAPT_FRAGMENTED_NEXT_IP6_FRAG] = IP6_FRAG_NODE_NAME,
+ [IP4_MAPT_FRAGMENTED_NEXT_DROP] = "error-drop",
+ },
+};
+/* *INDENT-ON* */
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE(ip4_map_t_icmp_node) = {
+ .function = ip4_map_t_icmp,
+ .name = "ip4-map-t-icmp",
+ .vector_size = sizeof(u32),
+ .format_trace = format_map_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_errors = MAP_N_ERROR,
+ .error_strings = map_t_error_strings,
+
+ .n_next_nodes = IP4_MAPT_ICMP_N_NEXT,
+ .next_nodes = {
+ [IP4_MAPT_ICMP_NEXT_IP6_LOOKUP] = "ip6-lookup",
+ [IP4_MAPT_ICMP_NEXT_IP6_FRAG] = IP6_FRAG_NODE_NAME,
+ [IP4_MAPT_ICMP_NEXT_DROP] = "error-drop",
+ },
+};
+/* *INDENT-ON* */
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE(ip4_map_t_tcp_udp_node) = {
+ .function = ip4_map_t_tcp_udp,
+ .name = "ip4-map-t-tcp-udp",
+ .vector_size = sizeof(u32),
+ .format_trace = format_map_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_errors = MAP_N_ERROR,
+ .error_strings = map_t_error_strings,
+
+ .n_next_nodes = IP4_MAPT_TCP_UDP_N_NEXT,
+ .next_nodes = {
+ [IP4_MAPT_TCP_UDP_NEXT_IP6_LOOKUP] = "ip6-lookup",
+ [IP4_MAPT_TCP_UDP_NEXT_IP6_FRAG] = IP6_FRAG_NODE_NAME,
+ [IP4_MAPT_TCP_UDP_NEXT_DROP] = "error-drop",
+ },
+};
+/* *INDENT-ON* */
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE(ip4_map_t_node) = {
+ .function = ip4_map_t,
+ .name = "ip4-map-t",
+ .vector_size = sizeof(u32),
+ .format_trace = format_map_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_errors = MAP_N_ERROR,
+ .error_strings = map_t_error_strings,
+
+ .n_next_nodes = IP4_MAPT_N_NEXT,
+ .next_nodes = {
+ [IP4_MAPT_NEXT_MAPT_TCP_UDP] = "ip4-map-t-tcp-udp",
+ [IP4_MAPT_NEXT_MAPT_ICMP] = "ip4-map-t-icmp",
+ [IP4_MAPT_NEXT_MAPT_FRAGMENTED] = "ip4-map-t-fragmented",
+ [IP4_MAPT_NEXT_DROP] = "error-drop",
+ },
+};
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/map/ip6_map.c b/src/vnet/map/ip6_map.c
new file mode 100644
index 00000000000..d2945059df7
--- /dev/null
+++ b/src/vnet/map/ip6_map.c
@@ -0,0 +1,1269 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "map.h"
+
+#include "../ip/ip_frag.h"
+
+enum ip6_map_next_e
+{
+ IP6_MAP_NEXT_IP4_LOOKUP,
+#ifdef MAP_SKIP_IP6_LOOKUP
+ IP6_MAP_NEXT_IP4_REWRITE,
+#endif
+ IP6_MAP_NEXT_IP6_REASS,
+ IP6_MAP_NEXT_IP4_REASS,
+ IP6_MAP_NEXT_IP4_FRAGMENT,
+ IP6_MAP_NEXT_IP6_ICMP_RELAY,
+ IP6_MAP_NEXT_IP6_LOCAL,
+ IP6_MAP_NEXT_DROP,
+ IP6_MAP_NEXT_ICMP,
+ IP6_MAP_N_NEXT,
+};
+
+enum ip6_map_ip6_reass_next_e
+{
+ IP6_MAP_IP6_REASS_NEXT_IP6_MAP,
+ IP6_MAP_IP6_REASS_NEXT_DROP,
+ IP6_MAP_IP6_REASS_N_NEXT,
+};
+
+enum ip6_map_ip4_reass_next_e
+{
+ IP6_MAP_IP4_REASS_NEXT_IP4_LOOKUP,
+ IP6_MAP_IP4_REASS_NEXT_IP4_FRAGMENT,
+ IP6_MAP_IP4_REASS_NEXT_DROP,
+ IP6_MAP_IP4_REASS_N_NEXT,
+};
+
+enum ip6_icmp_relay_next_e
+{
+ IP6_ICMP_RELAY_NEXT_IP4_LOOKUP,
+ IP6_ICMP_RELAY_NEXT_DROP,
+ IP6_ICMP_RELAY_N_NEXT,
+};
+
+vlib_node_registration_t ip6_map_ip4_reass_node;
+vlib_node_registration_t ip6_map_ip6_reass_node;
+static vlib_node_registration_t ip6_map_icmp_relay_node;
+
+typedef struct
+{
+ u32 map_domain_index;
+ u16 port;
+ u8 cached;
+} map_ip6_map_ip4_reass_trace_t;
+
+u8 *
+format_ip6_map_ip4_reass_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ map_ip6_map_ip4_reass_trace_t *t =
+ va_arg (*args, map_ip6_map_ip4_reass_trace_t *);
+ return format (s, "MAP domain index: %d L4 port: %u Status: %s",
+ t->map_domain_index, t->port,
+ t->cached ? "cached" : "forwarded");
+}
+
+typedef struct
+{
+ u16 offset;
+ u16 frag_len;
+ u8 out;
+} map_ip6_map_ip6_reass_trace_t;
+
+u8 *
+format_ip6_map_ip6_reass_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ map_ip6_map_ip6_reass_trace_t *t =
+ va_arg (*args, map_ip6_map_ip6_reass_trace_t *);
+ return format (s, "Offset: %d Fragment length: %d Status: %s", t->offset,
+ t->frag_len, t->out ? "out" : "in");
+}
+
+/*
+ * ip6_map_sec_check
+ */
+static_always_inline bool
+ip6_map_sec_check (map_domain_t * d, u16 port, ip4_header_t * ip4,
+ ip6_header_t * ip6)
+{
+ u16 sp4 = clib_net_to_host_u16 (port);
+ u32 sa4 = clib_net_to_host_u32 (ip4->src_address.as_u32);
+ u64 sal6 = map_get_pfx (d, sa4, sp4);
+ u64 sar6 = map_get_sfx (d, sa4, sp4);
+
+ if (PREDICT_FALSE
+ (sal6 != clib_net_to_host_u64 (ip6->src_address.as_u64[0])
+ || sar6 != clib_net_to_host_u64 (ip6->src_address.as_u64[1])))
+ return (false);
+ return (true);
+}
+
+static_always_inline void
+ip6_map_security_check (map_domain_t * d, ip4_header_t * ip4,
+ ip6_header_t * ip6, u32 * next, u8 * error)
+{
+ map_main_t *mm = &map_main;
+ if (d->ea_bits_len || d->rules)
+ {
+ if (d->psid_length > 0)
+ {
+ if (!ip4_is_fragment (ip4))
+ {
+ u16 port = ip4_map_get_port (ip4, MAP_SENDER);
+ if (port)
+ {
+ if (mm->sec_check)
+ *error =
+ ip6_map_sec_check (d, port, ip4,
+ ip6) ? MAP_ERROR_NONE :
+ MAP_ERROR_DECAP_SEC_CHECK;
+ }
+ else
+ {
+ *error = MAP_ERROR_BAD_PROTOCOL;
+ }
+ }
+ else
+ {
+ *next = mm->sec_check_frag ? IP6_MAP_NEXT_IP4_REASS : *next;
+ }
+ }
+ }
+}
+
+static_always_inline bool
+ip6_map_ip4_lookup_bypass (vlib_buffer_t * p0, ip4_header_t * ip)
+{
+#ifdef MAP_SKIP_IP6_LOOKUP
+ map_main_t *mm = &map_main;
+ u32 adj_index0 = mm->adj4_index;
+ if (adj_index0 > 0)
+ {
+ ip_lookup_main_t *lm4 = &ip4_main.lookup_main;
+ ip_adjacency_t *adj = ip_get_adjacency (lm4, mm->adj4_index);
+ if (adj->n_adj > 1)
+ {
+ u32 hash_c0 = ip4_compute_flow_hash (ip, IP_FLOW_HASH_DEFAULT);
+ adj_index0 += (hash_c0 & (adj->n_adj - 1));
+ }
+ vnet_buffer (p0)->ip.adj_index[VLIB_TX] = adj_index0;
+ return (true);
+ }
+#endif
+ return (false);
+}
+
+/*
+ * ip6_map
+ */
+static uword
+ip6_map (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
+ vlib_node_runtime_t *error_node =
+ vlib_node_get_runtime (vm, ip6_map_node.index);
+ map_main_t *mm = &map_main;
+ vlib_combined_counter_main_t *cm = mm->domain_counters;
+ u32 cpu_index = os_get_cpu_number ();
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+ while (n_left_from > 0)
+ {
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ /* Dual loop */
+ while (n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ u32 pi0, pi1;
+ vlib_buffer_t *p0, *p1;
+ u8 error0 = MAP_ERROR_NONE;
+ u8 error1 = MAP_ERROR_NONE;
+ map_domain_t *d0 = 0, *d1 = 0;
+ ip4_header_t *ip40, *ip41;
+ ip6_header_t *ip60, *ip61;
+ u16 port0 = 0, port1 = 0;
+ u32 map_domain_index0 = ~0, map_domain_index1 = ~0;
+ u32 next0 = IP6_MAP_NEXT_IP4_LOOKUP;
+ u32 next1 = IP6_MAP_NEXT_IP4_LOOKUP;
+
+ /* Prefetch next iteration. */
+ {
+ vlib_buffer_t *p2, *p3;
+
+ p2 = vlib_get_buffer (vm, from[2]);
+ p3 = vlib_get_buffer (vm, from[3]);
+
+ vlib_prefetch_buffer_header (p2, LOAD);
+ vlib_prefetch_buffer_header (p3, LOAD);
+
+ /* IPv6 + IPv4 header + 8 bytes of ULP */
+ CLIB_PREFETCH (p2->data, 68, LOAD);
+ CLIB_PREFETCH (p3->data, 68, LOAD);
+ }
+
+ pi0 = to_next[0] = from[0];
+ pi1 = to_next[1] = from[1];
+ from += 2;
+ n_left_from -= 2;
+ to_next += 2;
+ n_left_to_next -= 2;
+
+ p0 = vlib_get_buffer (vm, pi0);
+ p1 = vlib_get_buffer (vm, pi1);
+ ip60 = vlib_buffer_get_current (p0);
+ ip61 = vlib_buffer_get_current (p1);
+ vlib_buffer_advance (p0, sizeof (ip6_header_t));
+ vlib_buffer_advance (p1, sizeof (ip6_header_t));
+ ip40 = vlib_buffer_get_current (p0);
+ ip41 = vlib_buffer_get_current (p1);
+
+ /*
+ * Encapsulated IPv4 packet
+ * - IPv4 fragmented -> Pass to virtual reassembly unless security check disabled
+ * - Lookup/Rewrite or Fragment node in case of packet > MTU
+ * Fragmented IPv6 packet
+ * ICMP IPv6 packet
+ * - Error -> Pass to ICMPv6/ICMPv4 relay
+ * - Info -> Pass to IPv6 local
+ * Anything else -> drop
+ */
+ if (PREDICT_TRUE
+ (ip60->protocol == IP_PROTOCOL_IP_IN_IP
+ && clib_net_to_host_u16 (ip60->payload_length) > 20))
+ {
+ d0 =
+ ip6_map_get_domain (vnet_buffer (p0)->ip.adj_index[VLIB_TX],
+ (ip4_address_t *) & ip40->src_address.
+ as_u32, &map_domain_index0, &error0);
+ }
+ else if (ip60->protocol == IP_PROTOCOL_ICMP6 &&
+ clib_net_to_host_u16 (ip60->payload_length) >
+ sizeof (icmp46_header_t))
+ {
+ icmp46_header_t *icmp = (void *) (ip60 + 1);
+ next0 = (icmp->type == ICMP6_echo_request
+ || icmp->type ==
+ ICMP6_echo_reply) ? IP6_MAP_NEXT_IP6_LOCAL :
+ IP6_MAP_NEXT_IP6_ICMP_RELAY;
+ }
+ else if (ip60->protocol == IP_PROTOCOL_IPV6_FRAGMENTATION)
+ {
+ next0 = IP6_MAP_NEXT_IP6_REASS;
+ }
+ else
+ {
+ error0 = MAP_ERROR_BAD_PROTOCOL;
+ }
+ if (PREDICT_TRUE
+ (ip61->protocol == IP_PROTOCOL_IP_IN_IP
+ && clib_net_to_host_u16 (ip61->payload_length) > 20))
+ {
+ d1 =
+ ip6_map_get_domain (vnet_buffer (p1)->ip.adj_index[VLIB_TX],
+ (ip4_address_t *) & ip41->src_address.
+ as_u32, &map_domain_index1, &error1);
+ }
+ else if (ip61->protocol == IP_PROTOCOL_ICMP6 &&
+ clib_net_to_host_u16 (ip61->payload_length) >
+ sizeof (icmp46_header_t))
+ {
+ icmp46_header_t *icmp = (void *) (ip61 + 1);
+ next1 = (icmp->type == ICMP6_echo_request
+ || icmp->type ==
+ ICMP6_echo_reply) ? IP6_MAP_NEXT_IP6_LOCAL :
+ IP6_MAP_NEXT_IP6_ICMP_RELAY;
+ }
+ else if (ip61->protocol == IP_PROTOCOL_IPV6_FRAGMENTATION)
+ {
+ next1 = IP6_MAP_NEXT_IP6_REASS;
+ }
+ else
+ {
+ error1 = MAP_ERROR_BAD_PROTOCOL;
+ }
+
+ if (d0)
+ {
+ /* MAP inbound security check */
+ ip6_map_security_check (d0, ip40, ip60, &next0, &error0);
+
+ if (PREDICT_TRUE (error0 == MAP_ERROR_NONE &&
+ next0 == IP6_MAP_NEXT_IP4_LOOKUP))
+ {
+ if (PREDICT_FALSE
+ (d0->mtu
+ && (clib_host_to_net_u16 (ip40->length) > d0->mtu)))
+ {
+ vnet_buffer (p0)->ip_frag.header_offset = 0;
+ vnet_buffer (p0)->ip_frag.flags = 0;
+ vnet_buffer (p0)->ip_frag.next_index =
+ IP4_FRAG_NEXT_IP4_LOOKUP;
+ vnet_buffer (p0)->ip_frag.mtu = d0->mtu;
+ next0 = IP6_MAP_NEXT_IP4_FRAGMENT;
+ }
+ else
+ {
+ next0 =
+ ip6_map_ip4_lookup_bypass (p0,
+ ip40) ?
+ IP6_MAP_NEXT_IP4_REWRITE : next0;
+ }
+ vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_RX,
+ cpu_index,
+ map_domain_index0, 1,
+ clib_net_to_host_u16
+ (ip40->length));
+ }
+ }
+ if (d1)
+ {
+ /* MAP inbound security check */
+ ip6_map_security_check (d1, ip41, ip61, &next1, &error1);
+
+ if (PREDICT_TRUE (error1 == MAP_ERROR_NONE &&
+ next1 == IP6_MAP_NEXT_IP4_LOOKUP))
+ {
+ if (PREDICT_FALSE
+ (d1->mtu
+ && (clib_host_to_net_u16 (ip41->length) > d1->mtu)))
+ {
+ vnet_buffer (p1)->ip_frag.header_offset = 0;
+ vnet_buffer (p1)->ip_frag.flags = 0;
+ vnet_buffer (p1)->ip_frag.next_index =
+ IP4_FRAG_NEXT_IP4_LOOKUP;
+ vnet_buffer (p1)->ip_frag.mtu = d1->mtu;
+ next1 = IP6_MAP_NEXT_IP4_FRAGMENT;
+ }
+ else
+ {
+ next1 =
+ ip6_map_ip4_lookup_bypass (p1,
+ ip41) ?
+ IP6_MAP_NEXT_IP4_REWRITE : next1;
+ }
+ vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_RX,
+ cpu_index,
+ map_domain_index1, 1,
+ clib_net_to_host_u16
+ (ip41->length));
+ }
+ }
+
+ if (PREDICT_FALSE (p0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ map_trace_t *tr = vlib_add_trace (vm, node, p0, sizeof (*tr));
+ tr->map_domain_index = map_domain_index0;
+ tr->port = port0;
+ }
+
+ if (PREDICT_FALSE (p1->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ map_trace_t *tr = vlib_add_trace (vm, node, p1, sizeof (*tr));
+ tr->map_domain_index = map_domain_index1;
+ tr->port = port1;
+ }
+
+ if (error0 == MAP_ERROR_DECAP_SEC_CHECK && mm->icmp6_enabled)
+ {
+ /* Set ICMP parameters */
+ vlib_buffer_advance (p0, -sizeof (ip6_header_t));
+ icmp6_error_set_vnet_buffer (p0, ICMP6_destination_unreachable,
+ ICMP6_destination_unreachable_source_address_failed_policy,
+ 0);
+ next0 = IP6_MAP_NEXT_ICMP;
+ }
+ else
+ {
+ next0 = (error0 == MAP_ERROR_NONE) ? next0 : IP6_MAP_NEXT_DROP;
+ }
+
+ if (error1 == MAP_ERROR_DECAP_SEC_CHECK && mm->icmp6_enabled)
+ {
+ /* Set ICMP parameters */
+ vlib_buffer_advance (p1, -sizeof (ip6_header_t));
+ icmp6_error_set_vnet_buffer (p1, ICMP6_destination_unreachable,
+ ICMP6_destination_unreachable_source_address_failed_policy,
+ 0);
+ next1 = IP6_MAP_NEXT_ICMP;
+ }
+ else
+ {
+ next1 = (error1 == MAP_ERROR_NONE) ? next1 : IP6_MAP_NEXT_DROP;
+ }
+
+ /* Reset packet */
+ if (next0 == IP6_MAP_NEXT_IP6_LOCAL)
+ vlib_buffer_advance (p0, -sizeof (ip6_header_t));
+ if (next1 == IP6_MAP_NEXT_IP6_LOCAL)
+ vlib_buffer_advance (p1, -sizeof (ip6_header_t));
+
+ p0->error = error_node->errors[error0];
+ p1->error = error_node->errors[error1];
+ vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next,
+ n_left_to_next, pi0, pi1, next0,
+ next1);
+ }
+
+ /* Single loop */
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 pi0;
+ vlib_buffer_t *p0;
+ u8 error0 = MAP_ERROR_NONE;
+ map_domain_t *d0 = 0;
+ ip4_header_t *ip40;
+ ip6_header_t *ip60;
+ i32 port0 = 0;
+ u32 map_domain_index0 = ~0;
+ u32 next0 = IP6_MAP_NEXT_IP4_LOOKUP;
+
+ pi0 = to_next[0] = from[0];
+ from += 1;
+ n_left_from -= 1;
+ to_next += 1;
+ n_left_to_next -= 1;
+
+ p0 = vlib_get_buffer (vm, pi0);
+ ip60 = vlib_buffer_get_current (p0);
+ vlib_buffer_advance (p0, sizeof (ip6_header_t));
+ ip40 = vlib_buffer_get_current (p0);
+
+ /*
+ * Encapsulated IPv4 packet
+ * - IPv4 fragmented -> Pass to virtual reassembly unless security check disabled
+ * - Lookup/Rewrite or Fragment node in case of packet > MTU
+ * Fragmented IPv6 packet
+ * ICMP IPv6 packet
+ * - Error -> Pass to ICMPv6/ICMPv4 relay
+ * - Info -> Pass to IPv6 local
+ * Anything else -> drop
+ */
+ if (PREDICT_TRUE
+ (ip60->protocol == IP_PROTOCOL_IP_IN_IP
+ && clib_net_to_host_u16 (ip60->payload_length) > 20))
+ {
+ d0 =
+ ip6_map_get_domain (vnet_buffer (p0)->ip.adj_index[VLIB_TX],
+ (ip4_address_t *) & ip40->src_address.
+ as_u32, &map_domain_index0, &error0);
+ }
+ else if (ip60->protocol == IP_PROTOCOL_ICMP6 &&
+ clib_net_to_host_u16 (ip60->payload_length) >
+ sizeof (icmp46_header_t))
+ {
+ icmp46_header_t *icmp = (void *) (ip60 + 1);
+ next0 = (icmp->type == ICMP6_echo_request
+ || icmp->type ==
+ ICMP6_echo_reply) ? IP6_MAP_NEXT_IP6_LOCAL :
+ IP6_MAP_NEXT_IP6_ICMP_RELAY;
+ }
+ else if (ip60->protocol == IP_PROTOCOL_IPV6_FRAGMENTATION &&
+ (((ip6_frag_hdr_t *) (ip60 + 1))->next_hdr ==
+ IP_PROTOCOL_IP_IN_IP))
+ {
+ next0 = IP6_MAP_NEXT_IP6_REASS;
+ }
+ else
+ {
+ error0 = MAP_ERROR_BAD_PROTOCOL;
+ }
+
+ if (d0)
+ {
+ /* MAP inbound security check */
+ ip6_map_security_check (d0, ip40, ip60, &next0, &error0);
+
+ if (PREDICT_TRUE (error0 == MAP_ERROR_NONE &&
+ next0 == IP6_MAP_NEXT_IP4_LOOKUP))
+ {
+ if (PREDICT_FALSE
+ (d0->mtu
+ && (clib_host_to_net_u16 (ip40->length) > d0->mtu)))
+ {
+ vnet_buffer (p0)->ip_frag.header_offset = 0;
+ vnet_buffer (p0)->ip_frag.flags = 0;
+ vnet_buffer (p0)->ip_frag.next_index =
+ IP4_FRAG_NEXT_IP4_LOOKUP;
+ vnet_buffer (p0)->ip_frag.mtu = d0->mtu;
+ next0 = IP6_MAP_NEXT_IP4_FRAGMENT;
+ }
+ else
+ {
+ next0 =
+ ip6_map_ip4_lookup_bypass (p0,
+ ip40) ?
+ IP6_MAP_NEXT_IP4_REWRITE : next0;
+ }
+ vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_RX,
+ cpu_index,
+ map_domain_index0, 1,
+ clib_net_to_host_u16
+ (ip40->length));
+ }
+ }
+
+ if (PREDICT_FALSE (p0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ map_trace_t *tr = vlib_add_trace (vm, node, p0, sizeof (*tr));
+ tr->map_domain_index = map_domain_index0;
+ tr->port = (u16) port0;
+ }
+
+ if (mm->icmp6_enabled &&
+ (error0 == MAP_ERROR_DECAP_SEC_CHECK
+ || error0 == MAP_ERROR_NO_DOMAIN))
+ {
+ /* Set ICMP parameters */
+ vlib_buffer_advance (p0, -sizeof (ip6_header_t));
+ icmp6_error_set_vnet_buffer (p0, ICMP6_destination_unreachable,
+ ICMP6_destination_unreachable_source_address_failed_policy,
+ 0);
+ next0 = IP6_MAP_NEXT_ICMP;
+ }
+ else
+ {
+ next0 = (error0 == MAP_ERROR_NONE) ? next0 : IP6_MAP_NEXT_DROP;
+ }
+
+ /* Reset packet */
+ if (next0 == IP6_MAP_NEXT_IP6_LOCAL)
+ vlib_buffer_advance (p0, -sizeof (ip6_header_t));
+
+ p0->error = error_node->errors[error0];
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
+ n_left_to_next, pi0, next0);
+ }
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ return frame->n_vectors;
+}
+
+
+static_always_inline void
+ip6_map_ip6_reass_prepare (vlib_main_t * vm, vlib_node_runtime_t * node,
+ map_ip6_reass_t * r, u32 ** fragments_ready,
+ u32 ** fragments_to_drop)
+{
+ ip4_header_t *ip40;
+ ip6_header_t *ip60;
+ ip6_frag_hdr_t *frag0;
+ vlib_buffer_t *p0;
+
+ if (!r->ip4_header.ip_version_and_header_length)
+ return;
+
+ //The IP header is here, we need to check for packets
+ //that can be forwarded
+ int i;
+ for (i = 0; i < MAP_IP6_REASS_MAX_FRAGMENTS_PER_REASSEMBLY; i++)
+ {
+ if (r->fragments[i].pi == ~0 ||
+ ((!r->fragments[i].next_data_len)
+ && (r->fragments[i].next_data_offset != (0xffff))))
+ continue;
+
+ p0 = vlib_get_buffer (vm, r->fragments[i].pi);
+ ip60 = vlib_buffer_get_current (p0);
+ frag0 = (ip6_frag_hdr_t *) (ip60 + 1);
+ ip40 = (ip4_header_t *) (frag0 + 1);
+
+ if (ip6_frag_hdr_offset (frag0))
+ {
+ //Not first fragment, add the IPv4 header
+ clib_memcpy (ip40, &r->ip4_header, 20);
+ }
+
+#ifdef MAP_IP6_REASS_COUNT_BYTES
+ r->forwarded +=
+ clib_net_to_host_u16 (ip60->payload_length) - sizeof (*frag0);
+#endif
+
+ if (ip6_frag_hdr_more (frag0))
+ {
+ //Not last fragment, we copy end of next
+ clib_memcpy (u8_ptr_add (ip60, p0->current_length),
+ r->fragments[i].next_data, 20);
+ p0->current_length += 20;
+ ip60->payload_length = u16_net_add (ip60->payload_length, 20);
+ }
+
+ if (!ip4_is_fragment (ip40))
+ {
+ ip40->fragment_id = frag_id_6to4 (frag0->identification);
+ ip40->flags_and_fragment_offset =
+ clib_host_to_net_u16 (ip6_frag_hdr_offset (frag0));
+ }
+ else
+ {
+ ip40->flags_and_fragment_offset =
+ clib_host_to_net_u16 (ip4_get_fragment_offset (ip40) +
+ ip6_frag_hdr_offset (frag0));
+ }
+
+ if (ip6_frag_hdr_more (frag0))
+ ip40->flags_and_fragment_offset |=
+ clib_host_to_net_u16 (IP4_HEADER_FLAG_MORE_FRAGMENTS);
+
+ ip40->length =
+ clib_host_to_net_u16 (p0->current_length - sizeof (*ip60) -
+ sizeof (*frag0));
+ ip40->checksum = ip4_header_checksum (ip40);
+
+ if (PREDICT_FALSE (p0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ map_ip6_map_ip6_reass_trace_t *tr =
+ vlib_add_trace (vm, node, p0, sizeof (*tr));
+ tr->offset = ip4_get_fragment_offset (ip40);
+ tr->frag_len = clib_net_to_host_u16 (ip40->length) - sizeof (*ip40);
+ tr->out = 1;
+ }
+
+ vec_add1 (*fragments_ready, r->fragments[i].pi);
+ r->fragments[i].pi = ~0;
+ r->fragments[i].next_data_len = 0;
+ r->fragments[i].next_data_offset = 0;
+ map_main.ip6_reass_buffered_counter--;
+
+ //TODO: Best solution would be that ip6_map handles extension headers
+ // and ignores atomic fragment. But in the meantime, let's just copy the header.
+
+ u8 protocol = frag0->next_hdr;
+ memmove (u8_ptr_add (ip40, -sizeof (*ip60)), ip60, sizeof (*ip60));
+ ((ip6_header_t *) u8_ptr_add (ip40, -sizeof (*ip60)))->protocol =
+ protocol;
+ vlib_buffer_advance (p0, sizeof (*frag0));
+ }
+}
+
+void
+map_ip6_drop_pi (u32 pi)
+{
+ vlib_main_t *vm = vlib_get_main ();
+ vlib_node_runtime_t *n =
+ vlib_node_get_runtime (vm, ip6_map_ip6_reass_node.index);
+ vlib_set_next_frame_buffer (vm, n, IP6_MAP_IP6_REASS_NEXT_DROP, pi);
+}
+
+void
+map_ip4_drop_pi (u32 pi)
+{
+ vlib_main_t *vm = vlib_get_main ();
+ vlib_node_runtime_t *n =
+ vlib_node_get_runtime (vm, ip6_map_ip4_reass_node.index);
+ vlib_set_next_frame_buffer (vm, n, IP6_MAP_IP4_REASS_NEXT_DROP, pi);
+}
+
+/*
+ * ip6_reass
+ * TODO: We should count the number of successfully
+ * transmitted fragment bytes and compare that to the last fragment
+ * offset such that we can free the reassembly structure when all fragments
+ * have been forwarded.
+ */
+static uword
+ip6_map_ip6_reass (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
+ vlib_node_runtime_t *error_node =
+ vlib_node_get_runtime (vm, ip6_map_ip6_reass_node.index);
+ u32 *fragments_to_drop = NULL;
+ u32 *fragments_ready = NULL;
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+ while (n_left_from > 0)
+ {
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ /* Single loop */
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 pi0;
+ vlib_buffer_t *p0;
+ u8 error0 = MAP_ERROR_NONE;
+ ip6_header_t *ip60;
+ ip6_frag_hdr_t *frag0;
+ u16 offset;
+ u16 next_offset;
+ u16 frag_len;
+
+ pi0 = to_next[0] = from[0];
+ from += 1;
+ n_left_from -= 1;
+ to_next += 1;
+ n_left_to_next -= 1;
+
+ p0 = vlib_get_buffer (vm, pi0);
+ ip60 = vlib_buffer_get_current (p0);
+ frag0 = (ip6_frag_hdr_t *) (ip60 + 1);
+ offset =
+ clib_host_to_net_u16 (frag0->fragment_offset_and_more) & (~7);
+ frag_len =
+ clib_net_to_host_u16 (ip60->payload_length) - sizeof (*frag0);
+ next_offset =
+ ip6_frag_hdr_more (frag0) ? (offset + frag_len) : (0xffff);
+
+ //FIXME: Support other extension headers, maybe
+
+ if (PREDICT_FALSE (p0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ map_ip6_map_ip6_reass_trace_t *tr =
+ vlib_add_trace (vm, node, p0, sizeof (*tr));
+ tr->offset = offset;
+ tr->frag_len = frag_len;
+ tr->out = 0;
+ }
+
+ map_ip6_reass_lock ();
+ map_ip6_reass_t *r =
+ map_ip6_reass_get (&ip60->src_address, &ip60->dst_address,
+ frag0->identification, frag0->next_hdr,
+ &fragments_to_drop);
+ //FIXME: Use better error codes
+ if (PREDICT_FALSE (!r))
+ {
+ // Could not create a caching entry
+ error0 = MAP_ERROR_FRAGMENT_MEMORY;
+ }
+ else if (PREDICT_FALSE ((frag_len <= 20 &&
+ (ip6_frag_hdr_more (frag0) || (!offset)))))
+ {
+ //Very small fragment are restricted to the last one and
+ //can't be the first one
+ error0 = MAP_ERROR_FRAGMENT_MALFORMED;
+ }
+ else
+ if (map_ip6_reass_add_fragment
+ (r, pi0, offset, next_offset, (u8 *) (frag0 + 1), frag_len))
+ {
+ map_ip6_reass_free (r, &fragments_to_drop);
+ error0 = MAP_ERROR_FRAGMENT_MEMORY;
+ }
+ else
+ {
+#ifdef MAP_IP6_REASS_COUNT_BYTES
+ if (!ip6_frag_hdr_more (frag0))
+ r->expected_total = offset + frag_len;
+#endif
+ ip6_map_ip6_reass_prepare (vm, node, r, &fragments_ready,
+ &fragments_to_drop);
+#ifdef MAP_IP6_REASS_COUNT_BYTES
+ if (r->forwarded >= r->expected_total)
+ map_ip6_reass_free (r, &fragments_to_drop);
+#endif
+ }
+ map_ip6_reass_unlock ();
+
+ if (error0 == MAP_ERROR_NONE)
+ {
+ if (frag_len > 20)
+ {
+ //Dequeue the packet
+ n_left_to_next++;
+ to_next--;
+ }
+ else
+ {
+ //All data from that packet was copied no need to keep it, but this is not an error
+ p0->error = error_node->errors[MAP_ERROR_NONE];
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ pi0,
+ IP6_MAP_IP6_REASS_NEXT_DROP);
+ }
+ }
+ else
+ {
+ p0->error = error_node->errors[error0];
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
+ n_left_to_next, pi0,
+ IP6_MAP_IP6_REASS_NEXT_DROP);
+ }
+ }
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ map_send_all_to_node (vm, fragments_ready, node,
+ &error_node->errors[MAP_ERROR_NONE],
+ IP6_MAP_IP6_REASS_NEXT_IP6_MAP);
+ map_send_all_to_node (vm, fragments_to_drop, node,
+ &error_node->errors[MAP_ERROR_FRAGMENT_DROPPED],
+ IP6_MAP_IP6_REASS_NEXT_DROP);
+
+ vec_free (fragments_to_drop);
+ vec_free (fragments_ready);
+ return frame->n_vectors;
+}
+
+/*
+ * ip6_ip4_virt_reass
+ */
+static uword
+ip6_map_ip4_reass (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
+ vlib_node_runtime_t *error_node =
+ vlib_node_get_runtime (vm, ip6_map_ip4_reass_node.index);
+ map_main_t *mm = &map_main;
+ vlib_combined_counter_main_t *cm = mm->domain_counters;
+ u32 cpu_index = os_get_cpu_number ();
+ u32 *fragments_to_drop = NULL;
+ u32 *fragments_to_loopback = NULL;
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+ while (n_left_from > 0)
+ {
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ /* Single loop */
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 pi0;
+ vlib_buffer_t *p0;
+ u8 error0 = MAP_ERROR_NONE;
+ map_domain_t *d0;
+ ip4_header_t *ip40;
+ ip6_header_t *ip60;
+ i32 port0 = 0;
+ u32 map_domain_index0 = ~0;
+ u32 next0 = IP6_MAP_IP4_REASS_NEXT_IP4_LOOKUP;
+ u8 cached = 0;
+
+ pi0 = to_next[0] = from[0];
+ from += 1;
+ n_left_from -= 1;
+ to_next += 1;
+ n_left_to_next -= 1;
+
+ p0 = vlib_get_buffer (vm, pi0);
+ ip40 = vlib_buffer_get_current (p0);
+ ip60 = ((ip6_header_t *) ip40) - 1;
+
+ d0 =
+ ip6_map_get_domain (vnet_buffer (p0)->ip.adj_index[VLIB_TX],
+ (ip4_address_t *) & ip40->src_address.as_u32,
+ &map_domain_index0, &error0);
+
+ map_ip4_reass_lock ();
+ //This node only deals with fragmented ip4
+ map_ip4_reass_t *r = map_ip4_reass_get (ip40->src_address.as_u32,
+ ip40->dst_address.as_u32,
+ ip40->fragment_id,
+ ip40->protocol,
+ &fragments_to_drop);
+ if (PREDICT_FALSE (!r))
+ {
+ // Could not create a caching entry
+ error0 = MAP_ERROR_FRAGMENT_MEMORY;
+ }
+ else if (PREDICT_TRUE (ip4_get_fragment_offset (ip40)))
+ {
+ // This is a fragment
+ if (r->port >= 0)
+ {
+ // We know the port already
+ port0 = r->port;
+ }
+ else if (map_ip4_reass_add_fragment (r, pi0))
+ {
+ // Not enough space for caching
+ error0 = MAP_ERROR_FRAGMENT_MEMORY;
+ map_ip4_reass_free (r, &fragments_to_drop);
+ }
+ else
+ {
+ cached = 1;
+ }
+ }
+ else
+ if ((port0 =
+ ip4_get_port (ip40, MAP_SENDER, p0->current_length)) < 0)
+ {
+ // Could not find port from first fragment. Stop reassembling.
+ error0 = MAP_ERROR_BAD_PROTOCOL;
+ port0 = 0;
+ map_ip4_reass_free (r, &fragments_to_drop);
+ }
+ else
+ {
+ // Found port. Remember it and loopback saved fragments
+ r->port = port0;
+ map_ip4_reass_get_fragments (r, &fragments_to_loopback);
+ }
+
+#ifdef MAP_IP4_REASS_COUNT_BYTES
+ if (!cached && r)
+ {
+ r->forwarded += clib_host_to_net_u16 (ip40->length) - 20;
+ if (!ip4_get_fragment_more (ip40))
+ r->expected_total =
+ ip4_get_fragment_offset (ip40) * 8 +
+ clib_host_to_net_u16 (ip40->length) - 20;
+ if (r->forwarded >= r->expected_total)
+ map_ip4_reass_free (r, &fragments_to_drop);
+ }
+#endif
+
+ map_ip4_reass_unlock ();
+
+ if (PREDICT_TRUE (error0 == MAP_ERROR_NONE))
+ error0 =
+ ip6_map_sec_check (d0, port0, ip40,
+ ip60) ? MAP_ERROR_NONE :
+ MAP_ERROR_DECAP_SEC_CHECK;
+
+ if (PREDICT_FALSE
+ (d0->mtu && (clib_host_to_net_u16 (ip40->length) > d0->mtu)
+ && error0 == MAP_ERROR_NONE && !cached))
+ {
+ vnet_buffer (p0)->ip_frag.header_offset = 0;
+ vnet_buffer (p0)->ip_frag.flags = 0;
+ vnet_buffer (p0)->ip_frag.next_index = IP4_FRAG_NEXT_IP4_LOOKUP;
+ vnet_buffer (p0)->ip_frag.mtu = d0->mtu;
+ next0 = IP6_MAP_IP4_REASS_NEXT_IP4_FRAGMENT;
+ }
+
+ if (PREDICT_FALSE (p0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ map_ip6_map_ip4_reass_trace_t *tr =
+ vlib_add_trace (vm, node, p0, sizeof (*tr));
+ tr->map_domain_index = map_domain_index0;
+ tr->port = port0;
+ tr->cached = cached;
+ }
+
+ if (cached)
+ {
+ //Dequeue the packet
+ n_left_to_next++;
+ to_next--;
+ }
+ else
+ {
+ if (error0 == MAP_ERROR_NONE)
+ vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_RX,
+ cpu_index, map_domain_index0,
+ 1,
+ clib_net_to_host_u16
+ (ip40->length));
+ next0 =
+ (error0 ==
+ MAP_ERROR_NONE) ? next0 : IP6_MAP_IP4_REASS_NEXT_DROP;
+ p0->error = error_node->errors[error0];
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
+ n_left_to_next, pi0, next0);
+ }
+
+ //Loopback when we reach the end of the inpu vector
+ if (n_left_from == 0 && vec_len (fragments_to_loopback))
+ {
+ from = vlib_frame_vector_args (frame);
+ u32 len = vec_len (fragments_to_loopback);
+ if (len <= VLIB_FRAME_SIZE)
+ {
+ clib_memcpy (from, fragments_to_loopback,
+ sizeof (u32) * len);
+ n_left_from = len;
+ vec_reset_length (fragments_to_loopback);
+ }
+ else
+ {
+ clib_memcpy (from,
+ fragments_to_loopback + (len -
+ VLIB_FRAME_SIZE),
+ sizeof (u32) * VLIB_FRAME_SIZE);
+ n_left_from = VLIB_FRAME_SIZE;
+ _vec_len (fragments_to_loopback) = len - VLIB_FRAME_SIZE;
+ }
+ }
+ }
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+ map_send_all_to_node (vm, fragments_to_drop, node,
+ &error_node->errors[MAP_ERROR_FRAGMENT_DROPPED],
+ IP6_MAP_IP4_REASS_NEXT_DROP);
+
+ vec_free (fragments_to_drop);
+ vec_free (fragments_to_loopback);
+ return frame->n_vectors;
+}
+
+/*
+ * ip6_icmp_relay
+ */
+static uword
+ip6_map_icmp_relay (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
+ vlib_node_runtime_t *error_node =
+ vlib_node_get_runtime (vm, ip6_map_icmp_relay_node.index);
+ map_main_t *mm = &map_main;
+ u32 cpu_index = os_get_cpu_number ();
+ u16 *fragment_ids, *fid;
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+
+ /* Get random fragment IDs for replies. */
+ fid = fragment_ids =
+ clib_random_buffer_get_data (&vm->random_buffer,
+ n_left_from * sizeof (fragment_ids[0]));
+
+ while (n_left_from > 0)
+ {
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ /* Single loop */
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 pi0;
+ vlib_buffer_t *p0;
+ u8 error0 = MAP_ERROR_NONE;
+ ip6_header_t *ip60;
+ u32 next0 = IP6_ICMP_RELAY_NEXT_IP4_LOOKUP;
+ u32 mtu;
+
+ pi0 = to_next[0] = from[0];
+ from += 1;
+ n_left_from -= 1;
+ to_next += 1;
+ n_left_to_next -= 1;
+
+ p0 = vlib_get_buffer (vm, pi0);
+ ip60 = vlib_buffer_get_current (p0);
+ u16 tlen = clib_net_to_host_u16 (ip60->payload_length);
+
+ /*
+ * In:
+ * IPv6 header (40)
+ * ICMPv6 header (8)
+ * IPv6 header (40)
+ * Original IPv4 header / packet
+ * Out:
+ * New IPv4 header
+ * New ICMP header
+ * Original IPv4 header / packet
+ */
+
+ /* Need at least ICMP(8) + IPv6(40) + IPv4(20) + L4 header(8) */
+ if (tlen < 76)
+ {
+ error0 = MAP_ERROR_ICMP_RELAY;
+ goto error;
+ }
+
+ icmp46_header_t *icmp60 = (icmp46_header_t *) (ip60 + 1);
+ ip6_header_t *inner_ip60 = (ip6_header_t *) (icmp60 + 2);
+
+ if (inner_ip60->protocol != IP_PROTOCOL_IP_IN_IP)
+ {
+ error0 = MAP_ERROR_ICMP_RELAY;
+ goto error;
+ }
+
+ ip4_header_t *inner_ip40 = (ip4_header_t *) (inner_ip60 + 1);
+ vlib_buffer_advance (p0, 60); /* sizeof ( IPv6 + ICMP + IPv6 - IPv4 - ICMP ) */
+ ip4_header_t *new_ip40 = vlib_buffer_get_current (p0);
+ icmp46_header_t *new_icmp40 = (icmp46_header_t *) (new_ip40 + 1);
+
+ /*
+ * Relay according to RFC2473, section 8.3
+ */
+ switch (icmp60->type)
+ {
+ case ICMP6_destination_unreachable:
+ case ICMP6_time_exceeded:
+ case ICMP6_parameter_problem:
+ /* Type 3 - destination unreachable, Code 1 - host unreachable */
+ new_icmp40->type = ICMP4_destination_unreachable;
+ new_icmp40->code =
+ ICMP4_destination_unreachable_destination_unreachable_host;
+ break;
+
+ case ICMP6_packet_too_big:
+ /* Type 3 - destination unreachable, Code 4 - packet too big */
+ /* Potential TODO: Adjust domain tunnel MTU based on the value received here */
+ mtu = clib_net_to_host_u32 (*((u32 *) (icmp60 + 1)));
+
+ /* Check DF flag */
+ if (!
+ (inner_ip40->flags_and_fragment_offset &
+ clib_host_to_net_u16 (IP4_HEADER_FLAG_DONT_FRAGMENT)))
+ {
+ error0 = MAP_ERROR_ICMP_RELAY;
+ goto error;
+ }
+
+ new_icmp40->type = ICMP4_destination_unreachable;
+ new_icmp40->code =
+ ICMP4_destination_unreachable_fragmentation_needed_and_dont_fragment_set;
+ *((u32 *) (new_icmp40 + 1)) =
+ clib_host_to_net_u32 (mtu < 1280 ? 1280 : mtu);
+ break;
+
+ default:
+ error0 = MAP_ERROR_ICMP_RELAY;
+ break;
+ }
+
+ /*
+ * Ensure the total ICMP packet is no longer than 576 bytes (RFC1812)
+ */
+ new_ip40->ip_version_and_header_length = 0x45;
+ new_ip40->tos = 0;
+ u16 nlen = (tlen - 20) > 576 ? 576 : tlen - 20;
+ new_ip40->length = clib_host_to_net_u16 (nlen);
+ new_ip40->fragment_id = fid[0];
+ fid++;
+ new_ip40->ttl = 64;
+ new_ip40->protocol = IP_PROTOCOL_ICMP;
+ new_ip40->src_address = mm->icmp4_src_address;
+ new_ip40->dst_address = inner_ip40->src_address;
+ new_ip40->checksum = ip4_header_checksum (new_ip40);
+
+ new_icmp40->checksum = 0;
+ ip_csum_t sum = ip_incremental_checksum (0, new_icmp40, nlen - 20);
+ new_icmp40->checksum = ~ip_csum_fold (sum);
+
+ vlib_increment_simple_counter (&mm->icmp_relayed, cpu_index, 0, 1);
+
+ error:
+ if (PREDICT_FALSE (p0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ map_trace_t *tr = vlib_add_trace (vm, node, p0, sizeof (*tr));
+ tr->map_domain_index = 0;
+ tr->port = 0;
+ }
+
+ next0 =
+ (error0 == MAP_ERROR_NONE) ? next0 : IP6_ICMP_RELAY_NEXT_DROP;
+ p0->error = error_node->errors[error0];
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
+ n_left_to_next, pi0, next0);
+ }
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ return frame->n_vectors;
+
+}
+
+static char *map_error_strings[] = {
+#define _(sym,string) string,
+ foreach_map_error
+#undef _
+};
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE(ip6_map_node) = {
+ .function = ip6_map,
+ .name = "ip6-map",
+ .vector_size = sizeof(u32),
+ .format_trace = format_map_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_errors = MAP_N_ERROR,
+ .error_strings = map_error_strings,
+
+ .n_next_nodes = IP6_MAP_N_NEXT,
+ .next_nodes = {
+ [IP6_MAP_NEXT_IP4_LOOKUP] = "ip4-lookup",
+#ifdef MAP_SKIP_IP6_LOOKUP
+ [IP6_MAP_NEXT_IP4_REWRITE] = "ip4-rewrite",
+#endif
+ [IP6_MAP_NEXT_IP6_REASS] = "ip6-map-ip6-reass",
+ [IP6_MAP_NEXT_IP4_REASS] = "ip6-map-ip4-reass",
+ [IP6_MAP_NEXT_IP4_FRAGMENT] = "ip4-frag",
+ [IP6_MAP_NEXT_IP6_ICMP_RELAY] = "ip6-map-icmp-relay",
+ [IP6_MAP_NEXT_IP6_LOCAL] = "ip6-local",
+ [IP6_MAP_NEXT_DROP] = "error-drop",
+ [IP6_MAP_NEXT_ICMP] = "ip6-icmp-error",
+ },
+};
+/* *INDENT-ON* */
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE(ip6_map_ip6_reass_node) = {
+ .function = ip6_map_ip6_reass,
+ .name = "ip6-map-ip6-reass",
+ .vector_size = sizeof(u32),
+ .format_trace = format_ip6_map_ip6_reass_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+ .n_errors = MAP_N_ERROR,
+ .error_strings = map_error_strings,
+ .n_next_nodes = IP6_MAP_IP6_REASS_N_NEXT,
+ .next_nodes = {
+ [IP6_MAP_IP6_REASS_NEXT_IP6_MAP] = "ip6-map",
+ [IP6_MAP_IP6_REASS_NEXT_DROP] = "error-drop",
+ },
+};
+/* *INDENT-ON* */
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE(ip6_map_ip4_reass_node) = {
+ .function = ip6_map_ip4_reass,
+ .name = "ip6-map-ip4-reass",
+ .vector_size = sizeof(u32),
+ .format_trace = format_ip6_map_ip4_reass_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+ .n_errors = MAP_N_ERROR,
+ .error_strings = map_error_strings,
+ .n_next_nodes = IP6_MAP_IP4_REASS_N_NEXT,
+ .next_nodes = {
+ [IP6_MAP_IP4_REASS_NEXT_IP4_LOOKUP] = "ip4-lookup",
+ [IP6_MAP_IP4_REASS_NEXT_IP4_FRAGMENT] = "ip4-frag",
+ [IP6_MAP_IP4_REASS_NEXT_DROP] = "error-drop",
+ },
+};
+/* *INDENT-ON* */
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE(ip6_map_icmp_relay_node, static) = {
+ .function = ip6_map_icmp_relay,
+ .name = "ip6-map-icmp-relay",
+ .vector_size = sizeof(u32),
+ .format_trace = format_map_trace, //FIXME
+ .type = VLIB_NODE_TYPE_INTERNAL,
+ .n_errors = MAP_N_ERROR,
+ .error_strings = map_error_strings,
+ .n_next_nodes = IP6_ICMP_RELAY_N_NEXT,
+ .next_nodes = {
+ [IP6_ICMP_RELAY_NEXT_IP4_LOOKUP] = "ip4-lookup",
+ [IP6_ICMP_RELAY_NEXT_DROP] = "error-drop",
+ },
+};
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/map/ip6_map_t.c b/src/vnet/map/ip6_map_t.c
new file mode 100644
index 00000000000..eb3996c2467
--- /dev/null
+++ b/src/vnet/map/ip6_map_t.c
@@ -0,0 +1,1517 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "map.h"
+
+#include "../ip/ip_frag.h"
+
+#define IP6_MAP_T_DUAL_LOOP
+
+typedef enum
+{
+ IP6_MAPT_NEXT_MAPT_TCP_UDP,
+ IP6_MAPT_NEXT_MAPT_ICMP,
+ IP6_MAPT_NEXT_MAPT_FRAGMENTED,
+ IP6_MAPT_NEXT_DROP,
+ IP6_MAPT_N_NEXT
+} ip6_mapt_next_t;
+
+typedef enum
+{
+ IP6_MAPT_ICMP_NEXT_IP4_LOOKUP,
+ IP6_MAPT_ICMP_NEXT_IP4_FRAG,
+ IP6_MAPT_ICMP_NEXT_DROP,
+ IP6_MAPT_ICMP_N_NEXT
+} ip6_mapt_icmp_next_t;
+
+typedef enum
+{
+ IP6_MAPT_TCP_UDP_NEXT_IP4_LOOKUP,
+ IP6_MAPT_TCP_UDP_NEXT_IP4_FRAG,
+ IP6_MAPT_TCP_UDP_NEXT_DROP,
+ IP6_MAPT_TCP_UDP_N_NEXT
+} ip6_mapt_tcp_udp_next_t;
+
+typedef enum
+{
+ IP6_MAPT_FRAGMENTED_NEXT_IP4_LOOKUP,
+ IP6_MAPT_FRAGMENTED_NEXT_IP4_FRAG,
+ IP6_MAPT_FRAGMENTED_NEXT_DROP,
+ IP6_MAPT_FRAGMENTED_N_NEXT
+} ip6_mapt_fragmented_next_t;
+
+static_always_inline int
+ip6_map_fragment_cache (ip6_header_t * ip6, ip6_frag_hdr_t * frag,
+ map_domain_t * d, u16 port)
+{
+ u32 *ignore = NULL;
+ map_ip4_reass_lock ();
+ map_ip4_reass_t *r = map_ip4_reass_get (map_get_ip4 (&ip6->src_address),
+ ip6_map_t_embedded_address (d,
+ &ip6->
+ dst_address),
+ frag_id_6to4 (frag->identification),
+ (ip6->protocol ==
+ IP_PROTOCOL_ICMP6) ?
+ IP_PROTOCOL_ICMP : ip6->protocol,
+ &ignore);
+ if (r)
+ r->port = port;
+
+ map_ip4_reass_unlock ();
+ return !r;
+}
+
+/* Returns the associated port or -1 */
+static_always_inline i32
+ip6_map_fragment_get (ip6_header_t * ip6, ip6_frag_hdr_t * frag,
+ map_domain_t * d)
+{
+ u32 *ignore = NULL;
+ map_ip4_reass_lock ();
+ map_ip4_reass_t *r = map_ip4_reass_get (map_get_ip4 (&ip6->src_address),
+ ip6_map_t_embedded_address (d,
+ &ip6->
+ dst_address),
+ frag_id_6to4 (frag->identification),
+ (ip6->protocol ==
+ IP_PROTOCOL_ICMP6) ?
+ IP_PROTOCOL_ICMP : ip6->protocol,
+ &ignore);
+ i32 ret = r ? r->port : -1;
+ map_ip4_reass_unlock ();
+ return ret;
+}
+
+static_always_inline u8
+ip6_translate_tos (const ip6_header_t * ip6)
+{
+#ifdef IP6_MAP_T_OVERRIDE_TOS
+ return IP6_MAP_T_OVERRIDE_TOS;
+#else
+ return (clib_net_to_host_u32 (ip6->ip_version_traffic_class_and_flow_label)
+ & 0x0ff00000) >> 20;
+#endif
+}
+
+//TODO: Find right place in memory for that
+/* *INDENT-OFF* */
+static u8 icmp6_to_icmp_updater_pointer_table[] =
+ { 0, 1, ~0, ~0,
+ 2, 2, 9, 8,
+ 12, 12, 12, 12,
+ 12, 12, 12, 12,
+ 12, 12, 12, 12,
+ 12, 12, 12, 12,
+ 24, 24, 24, 24,
+ 24, 24, 24, 24,
+ 24, 24, 24, 24,
+ 24, 24, 24, 24
+ };
+/* *INDENT-ON* */
+
+static_always_inline int
+ip6_icmp_to_icmp6_in_place (icmp46_header_t * icmp, u32 icmp_len,
+ i32 * sender_port, ip6_header_t ** inner_ip6)
+{
+ *inner_ip6 = NULL;
+ switch (icmp->type)
+ {
+ case ICMP6_echo_request:
+ *sender_port = ((u16 *) icmp)[2];
+ icmp->type = ICMP4_echo_request;
+ break;
+ case ICMP6_echo_reply:
+ *sender_port = ((u16 *) icmp)[2];
+ icmp->type = ICMP4_echo_reply;
+ break;
+ case ICMP6_destination_unreachable:
+ *inner_ip6 = (ip6_header_t *) u8_ptr_add (icmp, 8);
+ *sender_port = ip6_get_port (*inner_ip6, MAP_RECEIVER, icmp_len);
+
+ switch (icmp->code)
+ {
+ case ICMP6_destination_unreachable_no_route_to_destination: //0
+ case ICMP6_destination_unreachable_beyond_scope_of_source_address: //2
+ case ICMP6_destination_unreachable_address_unreachable: //3
+ icmp->type = ICMP4_destination_unreachable;
+ icmp->code =
+ ICMP4_destination_unreachable_destination_unreachable_host;
+ break;
+ case ICMP6_destination_unreachable_destination_administratively_prohibited: //1
+ icmp->type =
+ ICMP4_destination_unreachable;
+ icmp->code =
+ ICMP4_destination_unreachable_communication_administratively_prohibited;
+ break;
+ case ICMP6_destination_unreachable_port_unreachable:
+ icmp->type = ICMP4_destination_unreachable;
+ icmp->code = ICMP4_destination_unreachable_port_unreachable;
+ break;
+ default:
+ return -1;
+ }
+ break;
+ case ICMP6_packet_too_big:
+ *inner_ip6 = (ip6_header_t *) u8_ptr_add (icmp, 8);
+ *sender_port = ip6_get_port (*inner_ip6, MAP_RECEIVER, icmp_len);
+
+ icmp->type = ICMP4_destination_unreachable;
+ icmp->code = 4;
+ {
+ u32 advertised_mtu = clib_net_to_host_u32 (*((u32 *) (icmp + 1)));
+ advertised_mtu -= 20;
+ //FIXME: = minimum(advertised MTU-20, MTU_of_IPv4_nexthop, (MTU_of_IPv6_nexthop)-20)
+ ((u16 *) (icmp))[3] = clib_host_to_net_u16 (advertised_mtu);
+ }
+ break;
+
+ case ICMP6_time_exceeded:
+ *inner_ip6 = (ip6_header_t *) u8_ptr_add (icmp, 8);
+ *sender_port = ip6_get_port (*inner_ip6, MAP_RECEIVER, icmp_len);
+
+ icmp->type = ICMP4_time_exceeded;
+ break;
+
+ case ICMP6_parameter_problem:
+ *inner_ip6 = (ip6_header_t *) u8_ptr_add (icmp, 8);
+ *sender_port = ip6_get_port (*inner_ip6, MAP_RECEIVER, icmp_len);
+
+ switch (icmp->code)
+ {
+ case ICMP6_parameter_problem_erroneous_header_field:
+ icmp->type = ICMP4_parameter_problem;
+ icmp->code = ICMP4_parameter_problem_pointer_indicates_error;
+ u32 pointer = clib_net_to_host_u32 (*((u32 *) (icmp + 1)));
+ if (pointer >= 40)
+ return -1;
+
+ ((u8 *) (icmp + 1))[0] =
+ icmp6_to_icmp_updater_pointer_table[pointer];
+ break;
+ case ICMP6_parameter_problem_unrecognized_next_header:
+ icmp->type = ICMP4_destination_unreachable;
+ icmp->code = ICMP4_destination_unreachable_port_unreachable;
+ break;
+ case ICMP6_parameter_problem_unrecognized_option:
+ default:
+ return -1;
+ }
+ break;
+ default:
+ return -1;
+ break;
+ }
+ return 0;
+}
+
+static_always_inline void
+_ip6_map_t_icmp (map_domain_t * d, vlib_buffer_t * p, u8 * error)
+{
+ ip6_header_t *ip6, *inner_ip6;
+ ip4_header_t *ip4, *inner_ip4;
+ u32 ip6_pay_len;
+ icmp46_header_t *icmp;
+ i32 sender_port;
+ ip_csum_t csum;
+ u32 ip4_sadr, inner_ip4_dadr;
+
+ ip6 = vlib_buffer_get_current (p);
+ ip6_pay_len = clib_net_to_host_u16 (ip6->payload_length);
+ icmp = (icmp46_header_t *) (ip6 + 1);
+ ASSERT (ip6_pay_len + sizeof (*ip6) <= p->current_length);
+
+ if (ip6->protocol != IP_PROTOCOL_ICMP6)
+ {
+ //No extensions headers allowed here
+ //TODO: SR header
+ *error = MAP_ERROR_MALFORMED;
+ return;
+ }
+
+ //There are no fragmented ICMP messages, so no extension header for now
+
+ if (ip6_icmp_to_icmp6_in_place
+ (icmp, ip6_pay_len, &sender_port, &inner_ip6))
+ {
+ //TODO: In case of 1:1 mapping it is not necessary to have the sender port
+ *error = MAP_ERROR_ICMP;
+ return;
+ }
+
+ if (sender_port < 0)
+ {
+ // In case of 1:1 mapping, we don't care about the port
+ if (d->ea_bits_len == 0 && d->rules)
+ {
+ sender_port = 0;
+ }
+ else
+ {
+ *error = MAP_ERROR_ICMP;
+ return;
+ }
+ }
+
+ //Security check
+ //Note that this prevents an intermediate IPv6 router from answering the request
+ ip4_sadr = map_get_ip4 (&ip6->src_address);
+ if (ip6->src_address.as_u64[0] != map_get_pfx_net (d, ip4_sadr, sender_port)
+ || ip6->src_address.as_u64[1] != map_get_sfx_net (d, ip4_sadr,
+ sender_port))
+ {
+ *error = MAP_ERROR_SEC_CHECK;
+ return;
+ }
+
+ if (inner_ip6)
+ {
+ u16 *inner_L4_checksum, inner_l4_offset, inner_frag_offset,
+ inner_frag_id;
+ u8 *inner_l4, inner_protocol;
+
+ //We have two headers to translate
+ // FROM
+ // [ IPv6 ]<- ext ->[IC][ IPv6 ]<- ext ->[L4 header ...
+ // Handled cases:
+ // [ IPv6 ][IC][ IPv6 ][L4 header ...
+ // [ IPv6 ][IC][ IPv6 ][Fr][L4 header ...
+ // TO
+ // [ IPv4][IC][ IPv4][L4 header ...
+
+ //TODO: This was already done deep in ip6_icmp_to_icmp6_in_place
+ //We shouldn't have to do it again
+ if (ip6_parse (inner_ip6, ip6_pay_len - 8,
+ &inner_protocol, &inner_l4_offset, &inner_frag_offset))
+ {
+ *error = MAP_ERROR_MALFORMED;
+ return;
+ }
+
+ inner_l4 = u8_ptr_add (inner_ip6, inner_l4_offset);
+ inner_ip4 =
+ (ip4_header_t *) u8_ptr_add (inner_l4, -sizeof (*inner_ip4));
+ if (inner_frag_offset)
+ {
+ ip6_frag_hdr_t *inner_frag =
+ (ip6_frag_hdr_t *) u8_ptr_add (inner_ip6, inner_frag_offset);
+ inner_frag_id = frag_id_6to4 (inner_frag->identification);
+ }
+ else
+ {
+ inner_frag_id = 0;
+ }
+
+ //Do the translation of the inner packet
+ if (inner_protocol == IP_PROTOCOL_TCP)
+ {
+ inner_L4_checksum = (u16 *) u8_ptr_add (inner_l4, 16);
+ }
+ else if (inner_protocol == IP_PROTOCOL_UDP)
+ {
+ inner_L4_checksum = (u16 *) u8_ptr_add (inner_l4, 6);
+ }
+ else if (inner_protocol == IP_PROTOCOL_ICMP6)
+ {
+ icmp46_header_t *inner_icmp = (icmp46_header_t *) inner_l4;
+ csum = inner_icmp->checksum;
+ csum = ip_csum_sub_even (csum, *((u16 *) inner_icmp));
+ //It cannot be of a different type as ip6_icmp_to_icmp6_in_place succeeded
+ inner_icmp->type = (inner_icmp->type == ICMP6_echo_request) ?
+ ICMP4_echo_request : ICMP4_echo_reply;
+ csum = ip_csum_add_even (csum, *((u16 *) inner_icmp));
+ inner_icmp->checksum = ip_csum_fold (csum);
+ inner_protocol = IP_PROTOCOL_ICMP; //Will be copied to ip6 later
+ inner_L4_checksum = &inner_icmp->checksum;
+ }
+ else
+ {
+ *error = MAP_ERROR_BAD_PROTOCOL;
+ return;
+ }
+
+ csum = *inner_L4_checksum;
+ csum = ip_csum_sub_even (csum, inner_ip6->src_address.as_u64[0]);
+ csum = ip_csum_sub_even (csum, inner_ip6->src_address.as_u64[1]);
+ csum = ip_csum_sub_even (csum, inner_ip6->dst_address.as_u64[0]);
+ csum = ip_csum_sub_even (csum, inner_ip6->dst_address.as_u64[1]);
+
+ //Sanity check of the outer destination address
+ if (ip6->dst_address.as_u64[0] != inner_ip6->src_address.as_u64[0] &&
+ ip6->dst_address.as_u64[1] != inner_ip6->src_address.as_u64[1])
+ {
+ *error = MAP_ERROR_SEC_CHECK;
+ return;
+ }
+
+ //Security check of inner packet
+ inner_ip4_dadr = map_get_ip4 (&inner_ip6->dst_address);
+ if (inner_ip6->dst_address.as_u64[0] !=
+ map_get_pfx_net (d, inner_ip4_dadr, sender_port)
+ || inner_ip6->dst_address.as_u64[1] != map_get_sfx_net (d,
+ inner_ip4_dadr,
+ sender_port))
+ {
+ *error = MAP_ERROR_SEC_CHECK;
+ return;
+ }
+
+ inner_ip4->dst_address.as_u32 = inner_ip4_dadr;
+ inner_ip4->src_address.as_u32 =
+ ip6_map_t_embedded_address (d, &inner_ip6->src_address);
+ inner_ip4->ip_version_and_header_length =
+ IP4_VERSION_AND_HEADER_LENGTH_NO_OPTIONS;
+ inner_ip4->tos = ip6_translate_tos (inner_ip6);
+ inner_ip4->length =
+ u16_net_add (inner_ip6->payload_length,
+ sizeof (*ip4) + sizeof (*ip6) - inner_l4_offset);
+ inner_ip4->fragment_id = inner_frag_id;
+ inner_ip4->flags_and_fragment_offset =
+ clib_host_to_net_u16 (IP4_HEADER_FLAG_MORE_FRAGMENTS);
+ inner_ip4->ttl = inner_ip6->hop_limit;
+ inner_ip4->protocol = inner_protocol;
+ inner_ip4->checksum = ip4_header_checksum (inner_ip4);
+
+ if (inner_ip4->protocol == IP_PROTOCOL_ICMP)
+ {
+ //Remove remainings of the pseudo-header in the csum
+ csum =
+ ip_csum_sub_even (csum, clib_host_to_net_u16 (IP_PROTOCOL_ICMP6));
+ csum =
+ ip_csum_sub_even (csum, inner_ip4->length - sizeof (*inner_ip4));
+ }
+ else
+ {
+ //Update to new pseudo-header
+ csum = ip_csum_add_even (csum, inner_ip4->src_address.as_u32);
+ csum = ip_csum_add_even (csum, inner_ip4->dst_address.as_u32);
+ }
+ *inner_L4_checksum = ip_csum_fold (csum);
+
+ //Move up icmp header
+ ip4 = (ip4_header_t *) u8_ptr_add (inner_l4, -2 * sizeof (*ip4) - 8);
+ clib_memcpy (u8_ptr_add (inner_l4, -sizeof (*ip4) - 8), icmp, 8);
+ icmp = (icmp46_header_t *) u8_ptr_add (inner_l4, -sizeof (*ip4) - 8);
+ }
+ else
+ {
+ //Only one header to translate
+ ip4 = (ip4_header_t *) u8_ptr_add (ip6, sizeof (*ip6) - sizeof (*ip4));
+ }
+ vlib_buffer_advance (p, (u32) (((u8 *) ip4) - ((u8 *) ip6)));
+
+ ip4->dst_address.as_u32 = ip6_map_t_embedded_address (d, &ip6->dst_address);
+ ip4->src_address.as_u32 = ip4_sadr;
+ ip4->ip_version_and_header_length =
+ IP4_VERSION_AND_HEADER_LENGTH_NO_OPTIONS;
+ ip4->tos = ip6_translate_tos (ip6);
+ ip4->fragment_id = 0;
+ ip4->flags_and_fragment_offset = 0;
+ ip4->ttl = ip6->hop_limit;
+ ip4->protocol = IP_PROTOCOL_ICMP;
+ //TODO fix the length depending on offset length
+ ip4->length = u16_net_add (ip6->payload_length,
+ (inner_ip6 ==
+ NULL) ? sizeof (*ip4) : (2 * sizeof (*ip4) -
+ sizeof (*ip6)));
+ ip4->checksum = ip4_header_checksum (ip4);
+
+ //TODO: We could do an easy diff-checksum for echo requests/replies
+ //Recompute ICMP checksum
+ icmp->checksum = 0;
+ csum =
+ ip_incremental_checksum (0, icmp,
+ clib_net_to_host_u16 (ip4->length) -
+ sizeof (*ip4));
+ icmp->checksum = ~ip_csum_fold (csum);
+}
+
+static uword
+ip6_map_t_icmp (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
+ vlib_node_runtime_t *error_node =
+ vlib_node_get_runtime (vm, ip6_map_t_icmp_node.index);
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+ vlib_combined_counter_main_t *cm = map_main.domain_counters;
+ u32 cpu_index = os_get_cpu_number ();
+
+ while (n_left_from > 0)
+ {
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 pi0;
+ vlib_buffer_t *p0;
+ u8 error0;
+ ip6_mapt_icmp_next_t next0;
+ map_domain_t *d0;
+ u16 len0;
+
+ pi0 = to_next[0] = from[0];
+ from += 1;
+ n_left_from -= 1;
+ to_next += 1;
+ n_left_to_next -= 1;
+ error0 = MAP_ERROR_NONE;
+ next0 = IP6_MAPT_ICMP_NEXT_IP4_LOOKUP;
+
+ p0 = vlib_get_buffer (vm, pi0);
+ len0 =
+ clib_net_to_host_u16 (((ip6_header_t *)
+ vlib_buffer_get_current
+ (p0))->payload_length);
+ d0 =
+ pool_elt_at_index (map_main.domains,
+ vnet_buffer (p0)->map_t.map_domain_index);
+ _ip6_map_t_icmp (d0, p0, &error0);
+
+ if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
+ {
+ //Send to fragmentation node if necessary
+ vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
+ vnet_buffer (p0)->ip_frag.header_offset = 0;
+ vnet_buffer (p0)->ip_frag.next_index = IP4_FRAG_NEXT_IP4_LOOKUP;
+ next0 = IP6_MAPT_ICMP_NEXT_IP4_FRAG;
+ }
+
+ if (PREDICT_TRUE (error0 == MAP_ERROR_NONE))
+ {
+ vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_RX,
+ cpu_index,
+ vnet_buffer (p0)->
+ map_t.map_domain_index, 1,
+ len0);
+ }
+ else
+ {
+ next0 = IP6_MAPT_ICMP_NEXT_DROP;
+ }
+
+ p0->error = error_node->errors[error0];
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next, pi0,
+ next0);
+ }
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+ return frame->n_vectors;
+}
+
+static uword
+ip6_map_t_fragmented (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+#ifdef IP6_MAP_T_DUAL_LOOP
+ while (n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ u32 pi0, pi1;
+ vlib_buffer_t *p0, *p1;
+ ip6_header_t *ip60, *ip61;
+ ip6_frag_hdr_t *frag0, *frag1;
+ ip4_header_t *ip40, *ip41;
+ u16 frag_id0, frag_offset0, frag_id1, frag_offset1;
+ u8 frag_more0, frag_more1;
+ u32 next0, next1;
+
+ pi0 = to_next[0] = from[0];
+ pi1 = to_next[1] = from[1];
+ from += 2;
+ n_left_from -= 2;
+ to_next += 2;
+ n_left_to_next -= 2;
+
+ next0 = IP6_MAPT_TCP_UDP_NEXT_IP4_LOOKUP;
+ next1 = IP6_MAPT_TCP_UDP_NEXT_IP4_LOOKUP;
+ p0 = vlib_get_buffer (vm, pi0);
+ p1 = vlib_get_buffer (vm, pi1);
+ ip60 = vlib_buffer_get_current (p0);
+ ip61 = vlib_buffer_get_current (p1);
+ frag0 =
+ (ip6_frag_hdr_t *) u8_ptr_add (ip60,
+ vnet_buffer (p0)->map_t.
+ v6.frag_offset);
+ frag1 =
+ (ip6_frag_hdr_t *) u8_ptr_add (ip61,
+ vnet_buffer (p1)->map_t.
+ v6.frag_offset);
+ ip40 =
+ (ip4_header_t *) u8_ptr_add (ip60,
+ vnet_buffer (p0)->map_t.
+ v6.l4_offset - sizeof (*ip40));
+ ip41 =
+ (ip4_header_t *) u8_ptr_add (ip61,
+ vnet_buffer (p1)->map_t.
+ v6.l4_offset - sizeof (*ip40));
+ vlib_buffer_advance (p0,
+ vnet_buffer (p0)->map_t.v6.l4_offset -
+ sizeof (*ip40));
+ vlib_buffer_advance (p1,
+ vnet_buffer (p1)->map_t.v6.l4_offset -
+ sizeof (*ip40));
+
+ frag_id0 = frag_id_6to4 (frag0->identification);
+ frag_id1 = frag_id_6to4 (frag1->identification);
+ frag_more0 = ip6_frag_hdr_more (frag0);
+ frag_more1 = ip6_frag_hdr_more (frag1);
+ frag_offset0 = ip6_frag_hdr_offset (frag0);
+ frag_offset1 = ip6_frag_hdr_offset (frag1);
+
+ ip40->dst_address.as_u32 = vnet_buffer (p0)->map_t.v6.daddr;
+ ip41->dst_address.as_u32 = vnet_buffer (p1)->map_t.v6.daddr;
+ ip40->src_address.as_u32 = vnet_buffer (p0)->map_t.v6.saddr;
+ ip41->src_address.as_u32 = vnet_buffer (p1)->map_t.v6.saddr;
+ ip40->ip_version_and_header_length =
+ IP4_VERSION_AND_HEADER_LENGTH_NO_OPTIONS;
+ ip41->ip_version_and_header_length =
+ IP4_VERSION_AND_HEADER_LENGTH_NO_OPTIONS;
+ ip40->tos = ip6_translate_tos (ip60);
+ ip41->tos = ip6_translate_tos (ip61);
+ ip40->length = u16_net_add (ip60->payload_length,
+ sizeof (*ip40) -
+ vnet_buffer (p0)->map_t.v6.l4_offset +
+ sizeof (*ip60));
+ ip41->length =
+ u16_net_add (ip61->payload_length,
+ sizeof (*ip40) -
+ vnet_buffer (p1)->map_t.v6.l4_offset +
+ sizeof (*ip60));
+ ip40->fragment_id = frag_id0;
+ ip41->fragment_id = frag_id1;
+ ip40->flags_and_fragment_offset =
+ clib_host_to_net_u16 (frag_offset0 |
+ (frag_more0 ? IP4_HEADER_FLAG_MORE_FRAGMENTS
+ : 0));
+ ip41->flags_and_fragment_offset =
+ clib_host_to_net_u16 (frag_offset1 |
+ (frag_more1 ? IP4_HEADER_FLAG_MORE_FRAGMENTS
+ : 0));
+ ip40->ttl = ip60->hop_limit;
+ ip41->ttl = ip61->hop_limit;
+ ip40->protocol =
+ (vnet_buffer (p0)->map_t.v6.l4_protocol ==
+ IP_PROTOCOL_ICMP6) ? IP_PROTOCOL_ICMP : vnet_buffer (p0)->
+ map_t.v6.l4_protocol;
+ ip41->protocol =
+ (vnet_buffer (p1)->map_t.v6.l4_protocol ==
+ IP_PROTOCOL_ICMP6) ? IP_PROTOCOL_ICMP : vnet_buffer (p1)->
+ map_t.v6.l4_protocol;
+ ip40->checksum = ip4_header_checksum (ip40);
+ ip41->checksum = ip4_header_checksum (ip41);
+
+ if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
+ {
+ vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
+ vnet_buffer (p0)->ip_frag.header_offset = 0;
+ vnet_buffer (p0)->ip_frag.next_index = IP4_FRAG_NEXT_IP4_LOOKUP;
+ next0 = IP6_MAPT_FRAGMENTED_NEXT_IP4_FRAG;
+ }
+
+ if (vnet_buffer (p1)->map_t.mtu < p1->current_length)
+ {
+ vnet_buffer (p1)->ip_frag.mtu = vnet_buffer (p1)->map_t.mtu;
+ vnet_buffer (p1)->ip_frag.header_offset = 0;
+ vnet_buffer (p1)->ip_frag.next_index = IP4_FRAG_NEXT_IP4_LOOKUP;
+ next1 = IP6_MAPT_FRAGMENTED_NEXT_IP4_FRAG;
+ }
+
+ vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
+ to_next, n_left_to_next, pi0, pi1,
+ next0, next1);
+ }
+#endif
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 pi0;
+ vlib_buffer_t *p0;
+ ip6_header_t *ip60;
+ ip6_frag_hdr_t *frag0;
+ ip4_header_t *ip40;
+ u16 frag_id0;
+ u8 frag_more0;
+ u16 frag_offset0;
+ u32 next0;
+
+ pi0 = to_next[0] = from[0];
+ from += 1;
+ n_left_from -= 1;
+ to_next += 1;
+ n_left_to_next -= 1;
+
+ next0 = IP6_MAPT_TCP_UDP_NEXT_IP4_LOOKUP;
+ p0 = vlib_get_buffer (vm, pi0);
+ ip60 = vlib_buffer_get_current (p0);
+ frag0 =
+ (ip6_frag_hdr_t *) u8_ptr_add (ip60,
+ vnet_buffer (p0)->map_t.
+ v6.frag_offset);
+ ip40 =
+ (ip4_header_t *) u8_ptr_add (ip60,
+ vnet_buffer (p0)->map_t.
+ v6.l4_offset - sizeof (*ip40));
+ vlib_buffer_advance (p0,
+ vnet_buffer (p0)->map_t.v6.l4_offset -
+ sizeof (*ip40));
+
+ frag_id0 = frag_id_6to4 (frag0->identification);
+ frag_more0 = ip6_frag_hdr_more (frag0);
+ frag_offset0 = ip6_frag_hdr_offset (frag0);
+
+ ip40->dst_address.as_u32 = vnet_buffer (p0)->map_t.v6.daddr;
+ ip40->src_address.as_u32 = vnet_buffer (p0)->map_t.v6.saddr;
+ ip40->ip_version_and_header_length =
+ IP4_VERSION_AND_HEADER_LENGTH_NO_OPTIONS;
+ ip40->tos = ip6_translate_tos (ip60);
+ ip40->length = u16_net_add (ip60->payload_length,
+ sizeof (*ip40) -
+ vnet_buffer (p0)->map_t.v6.l4_offset +
+ sizeof (*ip60));
+ ip40->fragment_id = frag_id0;
+ ip40->flags_and_fragment_offset =
+ clib_host_to_net_u16 (frag_offset0 |
+ (frag_more0 ? IP4_HEADER_FLAG_MORE_FRAGMENTS
+ : 0));
+ ip40->ttl = ip60->hop_limit;
+ ip40->protocol =
+ (vnet_buffer (p0)->map_t.v6.l4_protocol ==
+ IP_PROTOCOL_ICMP6) ? IP_PROTOCOL_ICMP : vnet_buffer (p0)->
+ map_t.v6.l4_protocol;
+ ip40->checksum = ip4_header_checksum (ip40);
+
+ if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
+ {
+ //Send to fragmentation node if necessary
+ vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
+ vnet_buffer (p0)->ip_frag.header_offset = 0;
+ vnet_buffer (p0)->ip_frag.next_index = IP4_FRAG_NEXT_IP4_LOOKUP;
+ next0 = IP6_MAPT_FRAGMENTED_NEXT_IP4_FRAG;
+ }
+
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next, pi0,
+ next0);
+ }
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+ return frame->n_vectors;
+}
+
+static uword
+ip6_map_t_tcp_udp (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+ while (n_left_from > 0)
+ {
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+#ifdef IP6_MAP_T_DUAL_LOOP
+ while (n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ u32 pi0, pi1;
+ vlib_buffer_t *p0, *p1;
+ ip6_header_t *ip60, *ip61;
+ ip_csum_t csum0, csum1;
+ ip4_header_t *ip40, *ip41;
+ u16 fragment_id0, flags0, *checksum0,
+ fragment_id1, flags1, *checksum1;
+ ip6_mapt_tcp_udp_next_t next0, next1;
+
+ pi0 = to_next[0] = from[0];
+ pi1 = to_next[1] = from[1];
+ from += 2;
+ n_left_from -= 2;
+ to_next += 2;
+ n_left_to_next -= 2;
+ next0 = IP6_MAPT_TCP_UDP_NEXT_IP4_LOOKUP;
+ next1 = IP6_MAPT_TCP_UDP_NEXT_IP4_LOOKUP;
+
+ p0 = vlib_get_buffer (vm, pi0);
+ p1 = vlib_get_buffer (vm, pi1);
+ ip60 = vlib_buffer_get_current (p0);
+ ip61 = vlib_buffer_get_current (p1);
+ ip40 =
+ (ip4_header_t *) u8_ptr_add (ip60,
+ vnet_buffer (p0)->map_t.
+ v6.l4_offset - sizeof (*ip40));
+ ip41 =
+ (ip4_header_t *) u8_ptr_add (ip61,
+ vnet_buffer (p1)->map_t.
+ v6.l4_offset - sizeof (*ip40));
+ vlib_buffer_advance (p0,
+ vnet_buffer (p0)->map_t.v6.l4_offset -
+ sizeof (*ip40));
+ vlib_buffer_advance (p1,
+ vnet_buffer (p1)->map_t.v6.l4_offset -
+ sizeof (*ip40));
+ checksum0 =
+ (u16 *) u8_ptr_add (ip60,
+ vnet_buffer (p0)->map_t.checksum_offset);
+ checksum1 =
+ (u16 *) u8_ptr_add (ip61,
+ vnet_buffer (p1)->map_t.checksum_offset);
+
+ csum0 = ip_csum_sub_even (*checksum0, ip60->src_address.as_u64[0]);
+ csum1 = ip_csum_sub_even (*checksum1, ip61->src_address.as_u64[0]);
+ csum0 = ip_csum_sub_even (csum0, ip60->src_address.as_u64[1]);
+ csum1 = ip_csum_sub_even (csum1, ip61->src_address.as_u64[1]);
+ csum0 = ip_csum_sub_even (csum0, ip60->dst_address.as_u64[0]);
+ csum1 = ip_csum_sub_even (csum0, ip61->dst_address.as_u64[0]);
+ csum0 = ip_csum_sub_even (csum0, ip60->dst_address.as_u64[1]);
+ csum1 = ip_csum_sub_even (csum1, ip61->dst_address.as_u64[1]);
+ csum0 = ip_csum_add_even (csum0, vnet_buffer (p0)->map_t.v6.daddr);
+ csum1 = ip_csum_add_even (csum1, vnet_buffer (p1)->map_t.v6.daddr);
+ csum0 = ip_csum_add_even (csum0, vnet_buffer (p0)->map_t.v6.saddr);
+ csum1 = ip_csum_add_even (csum1, vnet_buffer (p1)->map_t.v6.saddr);
+ *checksum0 = ip_csum_fold (csum0);
+ *checksum1 = ip_csum_fold (csum1);
+
+ if (PREDICT_FALSE (vnet_buffer (p0)->map_t.v6.frag_offset))
+ {
+ ip6_frag_hdr_t *hdr = (ip6_frag_hdr_t *) u8_ptr_add (ip60,
+ vnet_buffer
+ (p0)->
+ map_t.
+ v6.frag_offset);
+ fragment_id0 = frag_id_6to4 (hdr->identification);
+ flags0 = clib_host_to_net_u16 (IP4_HEADER_FLAG_MORE_FRAGMENTS);
+ }
+ else
+ {
+ fragment_id0 = 0;
+ flags0 = 0;
+ }
+
+ if (PREDICT_FALSE (vnet_buffer (p1)->map_t.v6.frag_offset))
+ {
+ ip6_frag_hdr_t *hdr = (ip6_frag_hdr_t *) u8_ptr_add (ip61,
+ vnet_buffer
+ (p1)->
+ map_t.
+ v6.frag_offset);
+ fragment_id1 = frag_id_6to4 (hdr->identification);
+ flags1 = clib_host_to_net_u16 (IP4_HEADER_FLAG_MORE_FRAGMENTS);
+ }
+ else
+ {
+ fragment_id1 = 0;
+ flags1 = 0;
+ }
+
+ ip40->dst_address.as_u32 = vnet_buffer (p0)->map_t.v6.daddr;
+ ip41->dst_address.as_u32 = vnet_buffer (p1)->map_t.v6.daddr;
+ ip40->src_address.as_u32 = vnet_buffer (p0)->map_t.v6.saddr;
+ ip41->src_address.as_u32 = vnet_buffer (p1)->map_t.v6.saddr;
+ ip40->ip_version_and_header_length =
+ IP4_VERSION_AND_HEADER_LENGTH_NO_OPTIONS;
+ ip41->ip_version_and_header_length =
+ IP4_VERSION_AND_HEADER_LENGTH_NO_OPTIONS;
+ ip40->tos = ip6_translate_tos (ip60);
+ ip41->tos = ip6_translate_tos (ip61);
+ ip40->length = u16_net_add (ip60->payload_length,
+ sizeof (*ip40) + sizeof (*ip60) -
+ vnet_buffer (p0)->map_t.v6.l4_offset);
+ ip41->length =
+ u16_net_add (ip61->payload_length,
+ sizeof (*ip40) + sizeof (*ip60) -
+ vnet_buffer (p1)->map_t.v6.l4_offset);
+ ip40->fragment_id = fragment_id0;
+ ip41->fragment_id = fragment_id1;
+ ip40->flags_and_fragment_offset = flags0;
+ ip41->flags_and_fragment_offset = flags1;
+ ip40->ttl = ip60->hop_limit;
+ ip41->ttl = ip61->hop_limit;
+ ip40->protocol = vnet_buffer (p0)->map_t.v6.l4_protocol;
+ ip41->protocol = vnet_buffer (p1)->map_t.v6.l4_protocol;
+ ip40->checksum = ip4_header_checksum (ip40);
+ ip41->checksum = ip4_header_checksum (ip41);
+
+ if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
+ {
+ vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
+ vnet_buffer (p0)->ip_frag.header_offset = 0;
+ vnet_buffer (p0)->ip_frag.next_index = IP4_FRAG_NEXT_IP4_LOOKUP;
+ next0 = IP6_MAPT_TCP_UDP_NEXT_IP4_FRAG;
+ }
+
+ if (vnet_buffer (p1)->map_t.mtu < p1->current_length)
+ {
+ vnet_buffer (p1)->ip_frag.mtu = vnet_buffer (p1)->map_t.mtu;
+ vnet_buffer (p1)->ip_frag.header_offset = 0;
+ vnet_buffer (p1)->ip_frag.next_index = IP4_FRAG_NEXT_IP4_LOOKUP;
+ next1 = IP6_MAPT_TCP_UDP_NEXT_IP4_FRAG;
+ }
+
+ vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next,
+ n_left_to_next, pi0, pi1, next0,
+ next1);
+ }
+#endif
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 pi0;
+ vlib_buffer_t *p0;
+ ip6_header_t *ip60;
+ u16 *checksum0;
+ ip_csum_t csum0;
+ ip4_header_t *ip40;
+ u16 fragment_id0;
+ u16 flags0;
+ ip6_mapt_tcp_udp_next_t next0;
+
+ pi0 = to_next[0] = from[0];
+ from += 1;
+ n_left_from -= 1;
+ to_next += 1;
+ n_left_to_next -= 1;
+ next0 = IP6_MAPT_TCP_UDP_NEXT_IP4_LOOKUP;
+
+ p0 = vlib_get_buffer (vm, pi0);
+ ip60 = vlib_buffer_get_current (p0);
+ ip40 =
+ (ip4_header_t *) u8_ptr_add (ip60,
+ vnet_buffer (p0)->map_t.
+ v6.l4_offset - sizeof (*ip40));
+ vlib_buffer_advance (p0,
+ vnet_buffer (p0)->map_t.v6.l4_offset -
+ sizeof (*ip40));
+ checksum0 =
+ (u16 *) u8_ptr_add (ip60,
+ vnet_buffer (p0)->map_t.checksum_offset);
+
+ //TODO: This can probably be optimized
+ csum0 = ip_csum_sub_even (*checksum0, ip60->src_address.as_u64[0]);
+ csum0 = ip_csum_sub_even (csum0, ip60->src_address.as_u64[1]);
+ csum0 = ip_csum_sub_even (csum0, ip60->dst_address.as_u64[0]);
+ csum0 = ip_csum_sub_even (csum0, ip60->dst_address.as_u64[1]);
+ csum0 = ip_csum_add_even (csum0, vnet_buffer (p0)->map_t.v6.daddr);
+ csum0 = ip_csum_add_even (csum0, vnet_buffer (p0)->map_t.v6.saddr);
+ *checksum0 = ip_csum_fold (csum0);
+
+ if (PREDICT_FALSE (vnet_buffer (p0)->map_t.v6.frag_offset))
+ {
+ //Only the first fragment
+ ip6_frag_hdr_t *hdr = (ip6_frag_hdr_t *) u8_ptr_add (ip60,
+ vnet_buffer
+ (p0)->
+ map_t.
+ v6.frag_offset);
+ fragment_id0 = frag_id_6to4 (hdr->identification);
+ flags0 = clib_host_to_net_u16 (IP4_HEADER_FLAG_MORE_FRAGMENTS);
+ }
+ else
+ {
+ fragment_id0 = 0;
+ flags0 = 0;
+ }
+
+ ip40->dst_address.as_u32 = vnet_buffer (p0)->map_t.v6.daddr;
+ ip40->src_address.as_u32 = vnet_buffer (p0)->map_t.v6.saddr;
+ ip40->ip_version_and_header_length =
+ IP4_VERSION_AND_HEADER_LENGTH_NO_OPTIONS;
+ ip40->tos = ip6_translate_tos (ip60);
+ ip40->length = u16_net_add (ip60->payload_length,
+ sizeof (*ip40) + sizeof (*ip60) -
+ vnet_buffer (p0)->map_t.v6.l4_offset);
+ ip40->fragment_id = fragment_id0;
+ ip40->flags_and_fragment_offset = flags0;
+ ip40->ttl = ip60->hop_limit;
+ ip40->protocol = vnet_buffer (p0)->map_t.v6.l4_protocol;
+ ip40->checksum = ip4_header_checksum (ip40);
+
+ if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
+ {
+ //Send to fragmentation node if necessary
+ vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
+ vnet_buffer (p0)->ip_frag.header_offset = 0;
+ vnet_buffer (p0)->ip_frag.next_index = IP4_FRAG_NEXT_IP4_LOOKUP;
+ next0 = IP6_MAPT_TCP_UDP_NEXT_IP4_FRAG;
+ }
+
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next, pi0,
+ next0);
+ }
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+ return frame->n_vectors;
+}
+
+static_always_inline void
+ip6_map_t_classify (vlib_buffer_t * p0, ip6_header_t * ip60,
+ map_domain_t * d0, i32 * src_port0,
+ u8 * error0, ip6_mapt_next_t * next0,
+ u32 l4_len0, ip6_frag_hdr_t * frag0)
+{
+ if (PREDICT_FALSE (vnet_buffer (p0)->map_t.v6.frag_offset &&
+ ip6_frag_hdr_offset (frag0)))
+ {
+ *next0 = IP6_MAPT_NEXT_MAPT_FRAGMENTED;
+ if (d0->ea_bits_len == 0 && d0->rules)
+ {
+ *src_port0 = 0;
+ }
+ else
+ {
+ *src_port0 = ip6_map_fragment_get (ip60, frag0, d0);
+ *error0 = (*src_port0 != -1) ? *error0 : MAP_ERROR_FRAGMENT_DROPPED;
+ }
+ }
+ else
+ if (PREDICT_TRUE
+ (vnet_buffer (p0)->map_t.v6.l4_protocol == IP_PROTOCOL_TCP))
+ {
+ *error0 =
+ l4_len0 < sizeof (tcp_header_t) ? MAP_ERROR_MALFORMED : *error0;
+ vnet_buffer (p0)->map_t.checksum_offset =
+ vnet_buffer (p0)->map_t.v6.l4_offset + 16;
+ *next0 = IP6_MAPT_NEXT_MAPT_TCP_UDP;
+ *src_port0 =
+ (i32) *
+ ((u16 *) u8_ptr_add (ip60, vnet_buffer (p0)->map_t.v6.l4_offset));
+ }
+ else
+ if (PREDICT_TRUE
+ (vnet_buffer (p0)->map_t.v6.l4_protocol == IP_PROTOCOL_UDP))
+ {
+ *error0 =
+ l4_len0 < sizeof (udp_header_t) ? MAP_ERROR_MALFORMED : *error0;
+ vnet_buffer (p0)->map_t.checksum_offset =
+ vnet_buffer (p0)->map_t.v6.l4_offset + 6;
+ *next0 = IP6_MAPT_NEXT_MAPT_TCP_UDP;
+ *src_port0 =
+ (i32) *
+ ((u16 *) u8_ptr_add (ip60, vnet_buffer (p0)->map_t.v6.l4_offset));
+ }
+ else if (vnet_buffer (p0)->map_t.v6.l4_protocol == IP_PROTOCOL_ICMP6)
+ {
+ *error0 =
+ l4_len0 < sizeof (icmp46_header_t) ? MAP_ERROR_MALFORMED : *error0;
+ *next0 = IP6_MAPT_NEXT_MAPT_ICMP;
+ if (d0->ea_bits_len == 0 && d0->rules)
+ {
+ *src_port0 = 0;
+ }
+ else
+ if (((icmp46_header_t *)
+ u8_ptr_add (ip60,
+ vnet_buffer (p0)->map_t.v6.l4_offset))->code ==
+ ICMP6_echo_reply
+ || ((icmp46_header_t *)
+ u8_ptr_add (ip60,
+ vnet_buffer (p0)->map_t.v6.l4_offset))->code ==
+ ICMP6_echo_request)
+ {
+ *src_port0 =
+ (i32) *
+ ((u16 *)
+ u8_ptr_add (ip60, vnet_buffer (p0)->map_t.v6.l4_offset + 6));
+ }
+ }
+ else
+ {
+ //TODO: In case of 1:1 mapping, it might be possible to do something with those packets.
+ *error0 = MAP_ERROR_BAD_PROTOCOL;
+ }
+}
+
+static uword
+ip6_map_t (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
+ vlib_node_runtime_t *error_node =
+ vlib_node_get_runtime (vm, ip6_map_t_node.index);
+ vlib_combined_counter_main_t *cm = map_main.domain_counters;
+ u32 cpu_index = os_get_cpu_number ();
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+ while (n_left_from > 0)
+ {
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+#ifdef IP6_MAP_T_DUAL_LOOP
+ while (n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ u32 pi0, pi1;
+ vlib_buffer_t *p0, *p1;
+ ip6_header_t *ip60, *ip61;
+ u8 error0, error1;
+ ip6_mapt_next_t next0, next1;
+ u32 l4_len0, l4_len1;
+ i32 src_port0, src_port1;
+ map_domain_t *d0, *d1;
+ ip6_frag_hdr_t *frag0, *frag1;
+ u32 saddr0, saddr1;
+ next0 = next1 = 0; //Because compiler whines
+
+ pi0 = to_next[0] = from[0];
+ pi1 = to_next[1] = from[1];
+ from += 2;
+ n_left_from -= 2;
+ to_next += 2;
+ n_left_to_next -= 2;
+
+ error0 = MAP_ERROR_NONE;
+ error1 = MAP_ERROR_NONE;
+
+ p0 = vlib_get_buffer (vm, pi0);
+ p1 = vlib_get_buffer (vm, pi1);
+ ip60 = vlib_buffer_get_current (p0);
+ ip61 = vlib_buffer_get_current (p1);
+
+ saddr0 = map_get_ip4 (&ip60->src_address);
+ saddr1 = map_get_ip4 (&ip61->src_address);
+ d0 = ip6_map_get_domain (vnet_buffer (p0)->ip.adj_index[VLIB_TX],
+ (ip4_address_t *) & saddr0,
+ &vnet_buffer (p0)->map_t.map_domain_index,
+ &error0);
+ d1 =
+ ip6_map_get_domain (vnet_buffer (p1)->ip.adj_index[VLIB_TX],
+ (ip4_address_t *) & saddr1,
+ &vnet_buffer (p1)->map_t.map_domain_index,
+ &error1);
+
+ vnet_buffer (p0)->map_t.v6.saddr = saddr0;
+ vnet_buffer (p1)->map_t.v6.saddr = saddr1;
+ vnet_buffer (p0)->map_t.v6.daddr =
+ ip6_map_t_embedded_address (d0, &ip60->dst_address);
+ vnet_buffer (p1)->map_t.v6.daddr =
+ ip6_map_t_embedded_address (d1, &ip61->dst_address);
+ vnet_buffer (p0)->map_t.mtu = d0->mtu ? d0->mtu : ~0;
+ vnet_buffer (p1)->map_t.mtu = d1->mtu ? d1->mtu : ~0;
+
+ if (PREDICT_FALSE (ip6_parse (ip60, p0->current_length,
+ &(vnet_buffer (p0)->map_t.
+ v6.l4_protocol),
+ &(vnet_buffer (p0)->map_t.
+ v6.l4_offset),
+ &(vnet_buffer (p0)->map_t.
+ v6.frag_offset))))
+ {
+ error0 = MAP_ERROR_MALFORMED;
+ next0 = IP6_MAPT_NEXT_DROP;
+ }
+
+ if (PREDICT_FALSE (ip6_parse (ip61, p1->current_length,
+ &(vnet_buffer (p1)->map_t.
+ v6.l4_protocol),
+ &(vnet_buffer (p1)->map_t.
+ v6.l4_offset),
+ &(vnet_buffer (p1)->map_t.
+ v6.frag_offset))))
+ {
+ error1 = MAP_ERROR_MALFORMED;
+ next1 = IP6_MAPT_NEXT_DROP;
+ }
+
+ src_port0 = src_port1 = -1;
+ l4_len0 = (u32) clib_net_to_host_u16 (ip60->payload_length) +
+ sizeof (*ip60) - vnet_buffer (p0)->map_t.v6.l4_offset;
+ l4_len1 = (u32) clib_net_to_host_u16 (ip61->payload_length) +
+ sizeof (*ip60) - vnet_buffer (p1)->map_t.v6.l4_offset;
+ frag0 =
+ (ip6_frag_hdr_t *) u8_ptr_add (ip60,
+ vnet_buffer (p0)->map_t.
+ v6.frag_offset);
+ frag1 =
+ (ip6_frag_hdr_t *) u8_ptr_add (ip61,
+ vnet_buffer (p1)->map_t.
+ v6.frag_offset);
+
+ ip6_map_t_classify (p0, ip60, d0, &src_port0, &error0, &next0,
+ l4_len0, frag0);
+ ip6_map_t_classify (p1, ip61, d1, &src_port1, &error1, &next1,
+ l4_len1, frag1);
+
+ if (PREDICT_FALSE
+ ((src_port0 != -1)
+ && (ip60->src_address.as_u64[0] !=
+ map_get_pfx_net (d0, vnet_buffer (p0)->map_t.v6.saddr,
+ src_port0)
+ || ip60->src_address.as_u64[1] != map_get_sfx_net (d0,
+ vnet_buffer
+ (p0)->map_t.v6.saddr,
+ src_port0))))
+ {
+ error0 = MAP_ERROR_SEC_CHECK;
+ }
+
+ if (PREDICT_FALSE
+ ((src_port1 != -1)
+ && (ip61->src_address.as_u64[0] !=
+ map_get_pfx_net (d1, vnet_buffer (p1)->map_t.v6.saddr,
+ src_port1)
+ || ip61->src_address.as_u64[1] != map_get_sfx_net (d1,
+ vnet_buffer
+ (p1)->map_t.v6.saddr,
+ src_port1))))
+ {
+ error1 = MAP_ERROR_SEC_CHECK;
+ }
+
+ if (PREDICT_FALSE (vnet_buffer (p0)->map_t.v6.frag_offset &&
+ !ip6_frag_hdr_offset ((ip6_frag_hdr_t *)
+ u8_ptr_add (ip60,
+ vnet_buffer
+ (p0)->map_t.
+ v6.frag_offset)))
+ && (src_port0 != -1) && (d0->ea_bits_len != 0 || !d0->rules)
+ && (error0 == MAP_ERROR_NONE))
+ {
+ ip6_map_fragment_cache (ip60,
+ (ip6_frag_hdr_t *) u8_ptr_add (ip60,
+ vnet_buffer
+ (p0)->map_t.
+ v6.frag_offset),
+ d0, src_port0);
+ }
+
+ if (PREDICT_FALSE (vnet_buffer (p1)->map_t.v6.frag_offset &&
+ !ip6_frag_hdr_offset ((ip6_frag_hdr_t *)
+ u8_ptr_add (ip61,
+ vnet_buffer
+ (p1)->map_t.
+ v6.frag_offset)))
+ && (src_port1 != -1) && (d1->ea_bits_len != 0 || !d1->rules)
+ && (error1 == MAP_ERROR_NONE))
+ {
+ ip6_map_fragment_cache (ip61,
+ (ip6_frag_hdr_t *) u8_ptr_add (ip61,
+ vnet_buffer
+ (p1)->map_t.
+ v6.frag_offset),
+ d1, src_port1);
+ }
+
+ if (PREDICT_TRUE
+ (error0 == MAP_ERROR_NONE && next0 != IP6_MAPT_NEXT_MAPT_ICMP))
+ {
+ vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_RX,
+ cpu_index,
+ vnet_buffer (p0)->
+ map_t.map_domain_index, 1,
+ clib_net_to_host_u16
+ (ip60->payload_length));
+ }
+
+ if (PREDICT_TRUE
+ (error1 == MAP_ERROR_NONE && next1 != IP6_MAPT_NEXT_MAPT_ICMP))
+ {
+ vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_RX,
+ cpu_index,
+ vnet_buffer (p1)->
+ map_t.map_domain_index, 1,
+ clib_net_to_host_u16
+ (ip61->payload_length));
+ }
+
+ next0 = (error0 != MAP_ERROR_NONE) ? IP6_MAPT_NEXT_DROP : next0;
+ next1 = (error1 != MAP_ERROR_NONE) ? IP6_MAPT_NEXT_DROP : next1;
+ p0->error = error_node->errors[error0];
+ p1->error = error_node->errors[error1];
+ vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next,
+ n_left_to_next, pi0, pi1, next0,
+ next1);
+ }
+#endif
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 pi0;
+ vlib_buffer_t *p0;
+ ip6_header_t *ip60;
+ u8 error0;
+ u32 l4_len0;
+ i32 src_port0;
+ map_domain_t *d0;
+ ip6_frag_hdr_t *frag0;
+ ip6_mapt_next_t next0 = 0;
+ u32 saddr;
+
+ pi0 = to_next[0] = from[0];
+ from += 1;
+ n_left_from -= 1;
+ to_next += 1;
+ n_left_to_next -= 1;
+ error0 = MAP_ERROR_NONE;
+
+ p0 = vlib_get_buffer (vm, pi0);
+ ip60 = vlib_buffer_get_current (p0);
+ //Save saddr in a different variable to not overwrite ip.adj_index
+ saddr = map_get_ip4 (&ip60->src_address);
+ d0 = ip6_map_get_domain (vnet_buffer (p0)->ip.adj_index[VLIB_TX],
+ (ip4_address_t *) & saddr,
+ &vnet_buffer (p0)->map_t.map_domain_index,
+ &error0);
+
+ //FIXME: What if d0 is null
+ vnet_buffer (p0)->map_t.v6.saddr = saddr;
+ vnet_buffer (p0)->map_t.v6.daddr =
+ ip6_map_t_embedded_address (d0, &ip60->dst_address);
+ vnet_buffer (p0)->map_t.mtu = d0->mtu ? d0->mtu : ~0;
+
+ if (PREDICT_FALSE (ip6_parse (ip60, p0->current_length,
+ &(vnet_buffer (p0)->map_t.
+ v6.l4_protocol),
+ &(vnet_buffer (p0)->map_t.
+ v6.l4_offset),
+ &(vnet_buffer (p0)->map_t.
+ v6.frag_offset))))
+ {
+ error0 = MAP_ERROR_MALFORMED;
+ next0 = IP6_MAPT_NEXT_DROP;
+ }
+
+ src_port0 = -1;
+ l4_len0 = (u32) clib_net_to_host_u16 (ip60->payload_length) +
+ sizeof (*ip60) - vnet_buffer (p0)->map_t.v6.l4_offset;
+ frag0 =
+ (ip6_frag_hdr_t *) u8_ptr_add (ip60,
+ vnet_buffer (p0)->map_t.
+ v6.frag_offset);
+
+
+ if (PREDICT_FALSE (vnet_buffer (p0)->map_t.v6.frag_offset &&
+ ip6_frag_hdr_offset (frag0)))
+ {
+ src_port0 = ip6_map_fragment_get (ip60, frag0, d0);
+ error0 = (src_port0 != -1) ? error0 : MAP_ERROR_FRAGMENT_MEMORY;
+ next0 = IP6_MAPT_NEXT_MAPT_FRAGMENTED;
+ }
+ else
+ if (PREDICT_TRUE
+ (vnet_buffer (p0)->map_t.v6.l4_protocol == IP_PROTOCOL_TCP))
+ {
+ error0 =
+ l4_len0 <
+ sizeof (tcp_header_t) ? MAP_ERROR_MALFORMED : error0;
+ vnet_buffer (p0)->map_t.checksum_offset =
+ vnet_buffer (p0)->map_t.v6.l4_offset + 16;
+ next0 = IP6_MAPT_NEXT_MAPT_TCP_UDP;
+ src_port0 =
+ (i32) *
+ ((u16 *)
+ u8_ptr_add (ip60, vnet_buffer (p0)->map_t.v6.l4_offset));
+ }
+ else
+ if (PREDICT_TRUE
+ (vnet_buffer (p0)->map_t.v6.l4_protocol == IP_PROTOCOL_UDP))
+ {
+ error0 =
+ l4_len0 <
+ sizeof (udp_header_t) ? MAP_ERROR_MALFORMED : error0;
+ vnet_buffer (p0)->map_t.checksum_offset =
+ vnet_buffer (p0)->map_t.v6.l4_offset + 6;
+ next0 = IP6_MAPT_NEXT_MAPT_TCP_UDP;
+ src_port0 =
+ (i32) *
+ ((u16 *)
+ u8_ptr_add (ip60, vnet_buffer (p0)->map_t.v6.l4_offset));
+ }
+ else if (vnet_buffer (p0)->map_t.v6.l4_protocol ==
+ IP_PROTOCOL_ICMP6)
+ {
+ error0 =
+ l4_len0 <
+ sizeof (icmp46_header_t) ? MAP_ERROR_MALFORMED : error0;
+ next0 = IP6_MAPT_NEXT_MAPT_ICMP;
+ if (((icmp46_header_t *)
+ u8_ptr_add (ip60,
+ vnet_buffer (p0)->map_t.v6.l4_offset))->code ==
+ ICMP6_echo_reply
+ || ((icmp46_header_t *)
+ u8_ptr_add (ip60,
+ vnet_buffer (p0)->map_t.v6.
+ l4_offset))->code == ICMP6_echo_request)
+ src_port0 =
+ (i32) *
+ ((u16 *)
+ u8_ptr_add (ip60,
+ vnet_buffer (p0)->map_t.v6.l4_offset + 6));
+ }
+ else
+ {
+ //TODO: In case of 1:1 mapping, it might be possible to do something with those packets.
+ error0 = MAP_ERROR_BAD_PROTOCOL;
+ }
+
+ //Security check
+ if (PREDICT_FALSE
+ ((src_port0 != -1)
+ && (ip60->src_address.as_u64[0] !=
+ map_get_pfx_net (d0, vnet_buffer (p0)->map_t.v6.saddr,
+ src_port0)
+ || ip60->src_address.as_u64[1] != map_get_sfx_net (d0,
+ vnet_buffer
+ (p0)->map_t.v6.saddr,
+ src_port0))))
+ {
+ //Security check when src_port0 is not zero (non-first fragment, UDP or TCP)
+ error0 = MAP_ERROR_SEC_CHECK;
+ }
+
+ //Fragmented first packet needs to be cached for following packets
+ if (PREDICT_FALSE (vnet_buffer (p0)->map_t.v6.frag_offset &&
+ !ip6_frag_hdr_offset ((ip6_frag_hdr_t *)
+ u8_ptr_add (ip60,
+ vnet_buffer
+ (p0)->map_t.
+ v6.frag_offset)))
+ && (src_port0 != -1) && (d0->ea_bits_len != 0 || !d0->rules)
+ && (error0 == MAP_ERROR_NONE))
+ {
+ ip6_map_fragment_cache (ip60,
+ (ip6_frag_hdr_t *) u8_ptr_add (ip60,
+ vnet_buffer
+ (p0)->map_t.
+ v6.frag_offset),
+ d0, src_port0);
+ }
+
+ if (PREDICT_TRUE
+ (error0 == MAP_ERROR_NONE && next0 != IP6_MAPT_NEXT_MAPT_ICMP))
+ {
+ vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_RX,
+ cpu_index,
+ vnet_buffer (p0)->
+ map_t.map_domain_index, 1,
+ clib_net_to_host_u16
+ (ip60->payload_length));
+ }
+
+ next0 = (error0 != MAP_ERROR_NONE) ? IP6_MAPT_NEXT_DROP : next0;
+ p0->error = error_node->errors[error0];
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next, pi0,
+ next0);
+ }
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+ return frame->n_vectors;
+}
+
+static char *map_t_error_strings[] = {
+#define _(sym,string) string,
+ foreach_map_error
+#undef _
+};
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE(ip6_map_t_fragmented_node) = {
+ .function = ip6_map_t_fragmented,
+ .name = "ip6-map-t-fragmented",
+ .vector_size = sizeof (u32),
+ .format_trace = format_map_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_errors = MAP_N_ERROR,
+ .error_strings = map_t_error_strings,
+
+ .n_next_nodes = IP6_MAPT_FRAGMENTED_N_NEXT,
+ .next_nodes = {
+ [IP6_MAPT_FRAGMENTED_NEXT_IP4_LOOKUP] = "ip4-lookup",
+ [IP6_MAPT_FRAGMENTED_NEXT_IP4_FRAG] = IP4_FRAG_NODE_NAME,
+ [IP6_MAPT_FRAGMENTED_NEXT_DROP] = "error-drop",
+ },
+};
+/* *INDENT-ON* */
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE(ip6_map_t_icmp_node) = {
+ .function = ip6_map_t_icmp,
+ .name = "ip6-map-t-icmp",
+ .vector_size = sizeof (u32),
+ .format_trace = format_map_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_errors = MAP_N_ERROR,
+ .error_strings = map_t_error_strings,
+
+ .n_next_nodes = IP6_MAPT_ICMP_N_NEXT,
+ .next_nodes = {
+ [IP6_MAPT_ICMP_NEXT_IP4_LOOKUP] = "ip4-lookup",
+ [IP6_MAPT_ICMP_NEXT_IP4_FRAG] = IP4_FRAG_NODE_NAME,
+ [IP6_MAPT_ICMP_NEXT_DROP] = "error-drop",
+ },
+};
+/* *INDENT-ON* */
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE(ip6_map_t_tcp_udp_node) = {
+ .function = ip6_map_t_tcp_udp,
+ .name = "ip6-map-t-tcp-udp",
+ .vector_size = sizeof (u32),
+ .format_trace = format_map_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_errors = MAP_N_ERROR,
+ .error_strings = map_t_error_strings,
+
+ .n_next_nodes = IP6_MAPT_TCP_UDP_N_NEXT,
+ .next_nodes = {
+ [IP6_MAPT_TCP_UDP_NEXT_IP4_LOOKUP] = "ip4-lookup",
+ [IP6_MAPT_TCP_UDP_NEXT_IP4_FRAG] = IP4_FRAG_NODE_NAME,
+ [IP6_MAPT_TCP_UDP_NEXT_DROP] = "error-drop",
+ },
+};
+/* *INDENT-ON* */
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE(ip6_map_t_node) = {
+ .function = ip6_map_t,
+ .name = "ip6-map-t",
+ .vector_size = sizeof(u32),
+ .format_trace = format_map_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_errors = MAP_N_ERROR,
+ .error_strings = map_t_error_strings,
+
+ .n_next_nodes = IP6_MAPT_N_NEXT,
+ .next_nodes = {
+ [IP6_MAPT_NEXT_MAPT_TCP_UDP] = "ip6-map-t-tcp-udp",
+ [IP6_MAPT_NEXT_MAPT_ICMP] = "ip6-map-t-icmp",
+ [IP6_MAPT_NEXT_MAPT_FRAGMENTED] = "ip6-map-t-fragmented",
+ [IP6_MAPT_NEXT_DROP] = "error-drop",
+ },
+};
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/map/map.api b/src/vnet/map/map.api
new file mode 100644
index 00000000000..4e4be85effa
--- /dev/null
+++ b/src/vnet/map/map.api
@@ -0,0 +1,178 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+/** \brief Add MAP domains
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param ip6_prefix - Rule IPv6 prefix
+ @param ip4_prefix - Rule IPv4 prefix
+ @param ip6_src - MAP domain IPv6 BR address / Tunnel source
+ @param ip6_prefix_len - Rule IPv6 prefix length
+ @param ip4_prefix_len - Rule IPv4 prefix length
+ @param ea_bits_len - Embedded Address bits length
+ @param psid_offset - Port Set Identifider (PSID) offset
+ @param psid_length - PSID length
+ @param is_translation - MAP-E / MAP-T
+ @param mtu - MTU
+*/
+define map_add_domain
+{
+ u32 client_index;
+ u32 context;
+ u8 ip6_prefix[16];
+ u8 ip4_prefix[4];
+ u8 ip6_src[16];
+ u8 ip6_prefix_len;
+ u8 ip4_prefix_len;
+ u8 ip6_src_prefix_len;
+ u8 ea_bits_len;
+ u8 psid_offset;
+ u8 psid_length;
+ u8 is_translation;
+ u16 mtu;
+};
+
+/** \brief Reply for MAP domain add
+ @param context - returned sender context, to match reply w/ request
+ @param index - MAP domain index
+ @param retval - return code
+*/
+define map_add_domain_reply
+{
+ u32 context;
+ u32 index;
+ i32 retval;
+};
+
+/** \brief Delete MAP domain
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param index - MAP Domain index
+*/
+define map_del_domain
+{
+ u32 client_index;
+ u32 context;
+ u32 index;
+};
+
+/** \brief Reply for MAP domain del
+ @param context - returned sender context, to match reply w/ request
+ @param retval - return code
+*/
+define map_del_domain_reply
+{
+ u32 context;
+ i32 retval;
+};
+
+/** \brief Add or Delete MAP rule from a domain (Only used for shared IPv4 per subscriber)
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param index - MAP Domain index
+ @param is_add - If 1 add rule, if 0 delete rule
+ @param ip6_dst - MAP CE IPv6 address
+ @param psid - Rule PSID
+*/
+define map_add_del_rule
+{
+ u32 client_index;
+ u32 context;
+ u32 index;
+ u32 is_add;
+ u8 ip6_dst[16];
+ u16 psid;
+};
+
+/** \brief Reply for MAP rule add/del
+ @param context - returned sender context, to match reply w/ request
+ @param retval - return code
+*/
+define map_add_del_rule_reply
+{
+ u32 context;
+ i32 retval;
+};
+
+/** \brief Get list of map domains
+ @param client_index - opaque cookie to identify the sender
+*/
+define map_domain_dump
+{
+ u32 client_index;
+ u32 context;
+};
+
+define map_domain_details
+{
+ u32 context;
+ u32 domain_index;
+ u8 ip6_prefix[16];
+ u8 ip4_prefix[4];
+ u8 ip6_src[16];
+ u8 ip6_prefix_len;
+ u8 ip4_prefix_len;
+ u8 ip6_src_len;
+ u8 ea_bits_len;
+ u8 psid_offset;
+ u8 psid_length;
+ u8 flags;
+ u16 mtu;
+ u8 is_translation;
+};
+
+define map_rule_dump
+{
+ u32 client_index;
+ u32 context;
+ u32 domain_index;
+};
+
+define map_rule_details
+{
+ u32 context;
+ u8 ip6_dst[16];
+ u16 psid;
+};
+
+/** \brief Request for a single block of summary stats
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+*/
+define map_summary_stats
+{
+ u32 client_index;
+ u32 context;
+};
+
+/** \brief Reply for map_summary_stats request
+ @param context - sender context, to match reply w/ request
+ @param retval - return code for request
+ @param total_bindings -
+ @param total_pkts -
+ @param total_ip4_fragments -
+ @param total_security_check -
+*/
+define map_summary_stats_reply
+{
+ u32 context;
+ i32 retval;
+ u64 total_bindings;
+ u64 total_pkts[2];
+ u64 total_bytes[2];
+ u64 total_ip4_fragments;
+ u64 total_security_check[2];
+};
diff --git a/src/vnet/map/map.c b/src/vnet/map/map.c
new file mode 100644
index 00000000000..aeec6a946c9
--- /dev/null
+++ b/src/vnet/map/map.c
@@ -0,0 +1,2166 @@
+/*
+ * map.c : MAP support
+ *
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/fib/fib_table.h>
+#include <vnet/fib/ip6_fib.h>
+#include <vnet/adj/adj.h>
+#include <vnet/map/map_dpo.h>
+
+#include "map.h"
+
+#ifdef __SSE4_2__
+static inline u32
+crc_u32 (u32 data, u32 value)
+{
+ __asm__ volatile ("crc32l %[data], %[value];":[value] "+r" (value):[data]
+ "rm" (data));
+ return value;
+}
+#else
+#include <vppinfra/xxhash.h>
+
+static inline u32
+crc_u32 (u32 data, u32 value)
+{
+ u64 tmp = ((u64) data << 32) | (u64) value;
+ return (u32) clib_xxhash (tmp);
+}
+#endif
+
+/*
+ * This code supports the following MAP modes:
+ *
+ * Algorithmic Shared IPv4 address (ea_bits_len > 0):
+ * ea_bits_len + ip4_prefix > 32
+ * psid_length > 0, ip6_prefix < 64, ip4_prefix <= 32
+ * Algorithmic Full IPv4 address (ea_bits_len > 0):
+ * ea_bits_len + ip4_prefix = 32
+ * psid_length = 0, ip6_prefix < 64, ip4_prefix <= 32
+ * Algorithmic IPv4 prefix (ea_bits_len > 0):
+ * ea_bits_len + ip4_prefix < 32
+ * psid_length = 0, ip6_prefix < 64, ip4_prefix <= 32
+ *
+ * Independent Shared IPv4 address (ea_bits_len = 0):
+ * ip4_prefix = 32
+ * psid_length > 0
+ * Rule IPv6 address = 128, Rule PSID Set
+ * Independent Full IPv4 address (ea_bits_len = 0):
+ * ip4_prefix = 32
+ * psid_length = 0, ip6_prefix = 128
+ * Independent IPv4 prefix (ea_bits_len = 0):
+ * ip4_prefix < 32
+ * psid_length = 0, ip6_prefix = 128
+ *
+ */
+
+/*
+ * This code supports MAP-T:
+ *
+ * With DMR prefix length equal to 96.
+ *
+ */
+
+
+i32
+ip4_get_port (ip4_header_t * ip, map_dir_e dir, u16 buffer_len)
+{
+ //TODO: use buffer length
+ if (ip->ip_version_and_header_length != 0x45 ||
+ ip4_get_fragment_offset (ip))
+ return -1;
+
+ if (PREDICT_TRUE ((ip->protocol == IP_PROTOCOL_TCP) ||
+ (ip->protocol == IP_PROTOCOL_UDP)))
+ {
+ udp_header_t *udp = (void *) (ip + 1);
+ return (dir == MAP_SENDER) ? udp->src_port : udp->dst_port;
+ }
+ else if (ip->protocol == IP_PROTOCOL_ICMP)
+ {
+ icmp46_header_t *icmp = (void *) (ip + 1);
+ if (icmp->type == ICMP4_echo_request || icmp->type == ICMP4_echo_reply)
+ {
+ return *((u16 *) (icmp + 1));
+ }
+ else if (clib_net_to_host_u16 (ip->length) >= 64)
+ {
+ ip = (ip4_header_t *) (icmp + 2);
+ if (PREDICT_TRUE ((ip->protocol == IP_PROTOCOL_TCP) ||
+ (ip->protocol == IP_PROTOCOL_UDP)))
+ {
+ udp_header_t *udp = (void *) (ip + 1);
+ return (dir == MAP_SENDER) ? udp->dst_port : udp->src_port;
+ }
+ else if (ip->protocol == IP_PROTOCOL_ICMP)
+ {
+ icmp46_header_t *icmp = (void *) (ip + 1);
+ if (icmp->type == ICMP4_echo_request ||
+ icmp->type == ICMP4_echo_reply)
+ {
+ return *((u16 *) (icmp + 1));
+ }
+ }
+ }
+ }
+ return -1;
+}
+
+i32
+ip6_get_port (ip6_header_t * ip6, map_dir_e dir, u16 buffer_len)
+{
+ u8 l4_protocol;
+ u16 l4_offset;
+ u16 frag_offset;
+ u8 *l4;
+
+ if (ip6_parse (ip6, buffer_len, &l4_protocol, &l4_offset, &frag_offset))
+ return -1;
+
+ //TODO: Use buffer length
+
+ if (frag_offset &&
+ ip6_frag_hdr_offset (((ip6_frag_hdr_t *)
+ u8_ptr_add (ip6, frag_offset))))
+ return -1; //Can't deal with non-first fragment for now
+
+ l4 = u8_ptr_add (ip6, l4_offset);
+ if (l4_protocol == IP_PROTOCOL_TCP || l4_protocol == IP_PROTOCOL_UDP)
+ {
+ return (dir ==
+ MAP_SENDER) ? ((udp_header_t *) (l4))->src_port : ((udp_header_t
+ *)
+ (l4))->dst_port;
+ }
+ else if (l4_protocol == IP_PROTOCOL_ICMP6)
+ {
+ icmp46_header_t *icmp = (icmp46_header_t *) (l4);
+ if (icmp->type == ICMP6_echo_request)
+ {
+ return (dir == MAP_SENDER) ? ((u16 *) (icmp))[2] : -1;
+ }
+ else if (icmp->type == ICMP6_echo_reply)
+ {
+ return (dir == MAP_SENDER) ? -1 : ((u16 *) (icmp))[2];
+ }
+ }
+ return -1;
+}
+
+
+int
+map_create_domain (ip4_address_t * ip4_prefix,
+ u8 ip4_prefix_len,
+ ip6_address_t * ip6_prefix,
+ u8 ip6_prefix_len,
+ ip6_address_t * ip6_src,
+ u8 ip6_src_len,
+ u8 ea_bits_len,
+ u8 psid_offset,
+ u8 psid_length, u32 * map_domain_index, u16 mtu, u8 flags)
+{
+ u8 suffix_len, suffix_shift;
+ map_main_t *mm = &map_main;
+ dpo_id_t dpo_v4 = DPO_INVALID;
+ dpo_id_t dpo_v6 = DPO_INVALID;
+ fib_node_index_t fei;
+ map_domain_t *d;
+
+ /* Sanity check on the src prefix length */
+ if (flags & MAP_DOMAIN_TRANSLATION)
+ {
+ if (ip6_src_len != 96)
+ {
+ clib_warning ("MAP-T only supports ip6_src_len = 96 for now.");
+ return -1;
+ }
+ }
+ else
+ {
+ if (ip6_src_len != 128)
+ {
+ clib_warning
+ ("MAP-E requires a BR address, not a prefix (ip6_src_len should "
+ "be 128).");
+ return -1;
+ }
+ }
+
+ /* How many, and which bits to grab from the IPv4 DA */
+ if (ip4_prefix_len + ea_bits_len < 32)
+ {
+ flags |= MAP_DOMAIN_PREFIX;
+ suffix_shift = 32 - ip4_prefix_len - ea_bits_len;
+ suffix_len = ea_bits_len;
+ }
+ else
+ {
+ suffix_shift = 0;
+ suffix_len = 32 - ip4_prefix_len;
+ }
+
+ /* EA bits must be within the first 64 bits */
+ if (ea_bits_len > 0 && ((ip6_prefix_len + ea_bits_len) > 64 ||
+ ip6_prefix_len + suffix_len + psid_length > 64))
+ {
+ clib_warning
+ ("Embedded Address bits must be within the first 64 bits of "
+ "the IPv6 prefix");
+ return -1;
+ }
+
+ /* Get domain index */
+ pool_get_aligned (mm->domains, d, CLIB_CACHE_LINE_BYTES);
+ memset (d, 0, sizeof (*d));
+ *map_domain_index = d - mm->domains;
+
+ /* Init domain struct */
+ d->ip4_prefix.as_u32 = ip4_prefix->as_u32;
+ d->ip4_prefix_len = ip4_prefix_len;
+ d->ip6_prefix = *ip6_prefix;
+ d->ip6_prefix_len = ip6_prefix_len;
+ d->ip6_src = *ip6_src;
+ d->ip6_src_len = ip6_src_len;
+ d->ea_bits_len = ea_bits_len;
+ d->psid_offset = psid_offset;
+ d->psid_length = psid_length;
+ d->mtu = mtu;
+ d->flags = flags;
+ d->suffix_shift = suffix_shift;
+ d->suffix_mask = (1 << suffix_len) - 1;
+
+ d->psid_shift = 16 - psid_length - psid_offset;
+ d->psid_mask = (1 << d->psid_length) - 1;
+ d->ea_shift = 64 - ip6_prefix_len - suffix_len - d->psid_length;
+
+ /* MAP data-plane object */
+ if (d->flags & MAP_DOMAIN_TRANSLATION)
+ map_t_dpo_create (DPO_PROTO_IP4, *map_domain_index, &dpo_v4);
+ else
+ map_dpo_create (DPO_PROTO_IP4, *map_domain_index, &dpo_v4);
+
+ /* Create ip4 route */
+ fib_prefix_t pfx = {
+ .fp_proto = FIB_PROTOCOL_IP4,
+ .fp_len = d->ip4_prefix_len,
+ .fp_addr = {
+ .ip4 = d->ip4_prefix,
+ }
+ ,
+ };
+ fib_table_entry_special_dpo_add (0, &pfx,
+ FIB_SOURCE_MAP,
+ FIB_ENTRY_FLAG_EXCLUSIVE, &dpo_v4);
+ dpo_reset (&dpo_v4);
+
+ /*
+ * Multiple MAP domains may share same source IPv6 TEP.
+ * In this case the route will exist and be MAP sourced.
+ * Find the adj (if any) already contributed and modify it
+ */
+ fib_prefix_t pfx6 = {
+ .fp_proto = FIB_PROTOCOL_IP6,
+ .fp_len = d->ip6_src_len,
+ .fp_addr = {
+ .ip6 = d->ip6_src,
+ }
+ ,
+ };
+ fei = fib_table_lookup_exact_match (0, &pfx6);
+
+ if (FIB_NODE_INDEX_INVALID != fei)
+ {
+ dpo_id_t dpo = DPO_INVALID;
+
+ if (fib_entry_get_dpo_for_source (fei, FIB_SOURCE_MAP, &dpo))
+ {
+ /*
+ * modify the existing MAP to indicate it's shared
+ * skip to route add.
+ */
+ const dpo_id_t *md_dpo;
+ map_dpo_t *md;
+
+ ASSERT (DPO_LOAD_BALANCE == dpo.dpoi_type);
+
+ md_dpo = load_balance_get_bucket (dpo.dpoi_index, 0);
+ md = map_dpo_get (md_dpo->dpoi_index);
+
+ md->md_domain = ~0;
+ dpo_copy (&dpo_v6, md_dpo);
+ dpo_reset (&dpo);
+
+ goto route_add;
+ }
+ }
+
+ if (d->flags & MAP_DOMAIN_TRANSLATION)
+ map_t_dpo_create (DPO_PROTO_IP6, *map_domain_index, &dpo_v6);
+ else
+ map_dpo_create (DPO_PROTO_IP6, *map_domain_index, &dpo_v6);
+
+route_add:
+ /*
+ * Create ip6 route. This is a reference counted add. If the prefix
+ * already exists and is MAP sourced, it is now MAP source n+1 times
+ * and will need to be removed n+1 times.
+ */
+ fib_table_entry_special_dpo_add (0, &pfx6,
+ FIB_SOURCE_MAP,
+ FIB_ENTRY_FLAG_EXCLUSIVE, &dpo_v6);
+ dpo_reset (&dpo_v6);
+
+ /* Validate packet/byte counters */
+ map_domain_counter_lock (mm);
+ int i;
+ for (i = 0; i < vec_len (mm->simple_domain_counters); i++)
+ {
+ vlib_validate_simple_counter (&mm->simple_domain_counters[i],
+ *map_domain_index);
+ vlib_zero_simple_counter (&mm->simple_domain_counters[i],
+ *map_domain_index);
+ }
+ for (i = 0; i < vec_len (mm->domain_counters); i++)
+ {
+ vlib_validate_combined_counter (&mm->domain_counters[i],
+ *map_domain_index);
+ vlib_zero_combined_counter (&mm->domain_counters[i], *map_domain_index);
+ }
+ map_domain_counter_unlock (mm);
+
+ return 0;
+}
+
+/*
+ * map_delete_domain
+ */
+int
+map_delete_domain (u32 map_domain_index)
+{
+ map_main_t *mm = &map_main;
+ map_domain_t *d;
+
+ if (pool_is_free_index (mm->domains, map_domain_index))
+ {
+ clib_warning ("MAP domain delete: domain does not exist: %d",
+ map_domain_index);
+ return -1;
+ }
+
+ d = pool_elt_at_index (mm->domains, map_domain_index);
+
+ fib_prefix_t pfx = {
+ .fp_proto = FIB_PROTOCOL_IP4,
+ .fp_len = d->ip4_prefix_len,
+ .fp_addr = {
+ .ip4 = d->ip4_prefix,
+ }
+ ,
+ };
+ fib_table_entry_special_remove (0, &pfx, FIB_SOURCE_MAP);
+
+ fib_prefix_t pfx6 = {
+ .fp_proto = FIB_PROTOCOL_IP6,
+ .fp_len = d->ip6_src_len,
+ .fp_addr = {
+ .ip6 = d->ip6_src,
+ }
+ ,
+ };
+ fib_table_entry_special_remove (0, &pfx6, FIB_SOURCE_MAP);
+
+ /* Deleting rules */
+ if (d->rules)
+ clib_mem_free (d->rules);
+
+ pool_put (mm->domains, d);
+
+ return 0;
+}
+
+int
+map_add_del_psid (u32 map_domain_index, u16 psid, ip6_address_t * tep,
+ u8 is_add)
+{
+ map_domain_t *d;
+ map_main_t *mm = &map_main;
+
+ if (pool_is_free_index (mm->domains, map_domain_index))
+ {
+ clib_warning ("MAP rule: domain does not exist: %d", map_domain_index);
+ return -1;
+ }
+ d = pool_elt_at_index (mm->domains, map_domain_index);
+
+ /* Rules are only used in 1:1 independent case */
+ if (d->ea_bits_len > 0)
+ return (-1);
+
+ if (!d->rules)
+ {
+ u32 l = (0x1 << d->psid_length) * sizeof (ip6_address_t);
+ d->rules = clib_mem_alloc_aligned (l, CLIB_CACHE_LINE_BYTES);
+ if (!d->rules)
+ return -1;
+ memset (d->rules, 0, l);
+ }
+
+ if (psid >= (0x1 << d->psid_length))
+ {
+ clib_warning ("MAP rule: PSID outside bounds: %d [%d]", psid,
+ 0x1 << d->psid_length);
+ return -1;
+ }
+
+ if (is_add)
+ {
+ d->rules[psid] = *tep;
+ }
+ else
+ {
+ memset (&d->rules[psid], 0, sizeof (ip6_address_t));
+ }
+ return 0;
+}
+
+#ifdef MAP_SKIP_IP6_LOOKUP
+static void
+map_pre_resolve (ip4_address_t * ip4, ip6_address_t * ip6)
+{
+ map_main_t *mm = &map_main;
+ ip6_main_t *im6 = &ip6_main;
+
+ if (ip6->as_u64[0] != 0 || ip6->as_u64[1] != 0)
+ {
+ // FIXME NOT an ADJ
+ mm->adj6_index = ip6_fib_table_fwding_lookup (im6, 0, ip6);
+ clib_warning ("FIB lookup results in: %u", mm->adj6_index);
+ }
+ if (ip4->as_u32 != 0)
+ {
+ // FIXME NOT an ADJ
+ mm->adj4_index = ip4_fib_table_lookup_lb (0, ip4);
+ clib_warning ("FIB lookup results in: %u", mm->adj4_index);
+ }
+}
+#endif
+
+static clib_error_t *
+map_security_check_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ unformat_input_t _line_input, *line_input = &_line_input;
+ map_main_t *mm = &map_main;
+ /* Get a line of input. */
+ if (!unformat_user (input, unformat_line_input, line_input))
+ return 0;
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "off"))
+ mm->sec_check = false;
+ else if (unformat (line_input, "on"))
+ mm->sec_check = true;
+ else
+ return clib_error_return (0, "unknown input `%U'",
+ format_unformat_error, input);
+ }
+ unformat_free (line_input);
+ return 0;
+}
+
+static clib_error_t *
+map_security_check_frag_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ unformat_input_t _line_input, *line_input = &_line_input;
+ map_main_t *mm = &map_main;
+ /* Get a line of input. */
+ if (!unformat_user (input, unformat_line_input, line_input))
+ return 0;
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "off"))
+ mm->sec_check_frag = false;
+ else if (unformat (line_input, "on"))
+ mm->sec_check_frag = true;
+ else
+ return clib_error_return (0, "unknown input `%U'",
+ format_unformat_error, input);
+ }
+ unformat_free (line_input);
+ return 0;
+}
+
+static clib_error_t *
+map_add_domain_command_fn (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ unformat_input_t _line_input, *line_input = &_line_input;
+ ip4_address_t ip4_prefix;
+ ip6_address_t ip6_prefix;
+ ip6_address_t ip6_src;
+ u32 ip6_prefix_len = 0, ip4_prefix_len = 0, map_domain_index, ip6_src_len;
+ u32 num_m_args = 0;
+ /* Optional arguments */
+ u32 ea_bits_len = 0, psid_offset = 0, psid_length = 0;
+ u32 mtu = 0;
+ u8 flags = 0;
+ ip6_src_len = 128;
+
+ /* Get a line of input. */
+ if (!unformat_user (input, unformat_line_input, line_input))
+ return 0;
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat
+ (line_input, "ip4-pfx %U/%d", unformat_ip4_address, &ip4_prefix,
+ &ip4_prefix_len))
+ num_m_args++;
+ else
+ if (unformat
+ (line_input, "ip6-pfx %U/%d", unformat_ip6_address, &ip6_prefix,
+ &ip6_prefix_len))
+ num_m_args++;
+ else
+ if (unformat
+ (line_input, "ip6-src %U/%d", unformat_ip6_address, &ip6_src,
+ &ip6_src_len))
+ num_m_args++;
+ else
+ if (unformat
+ (line_input, "ip6-src %U", unformat_ip6_address, &ip6_src))
+ num_m_args++;
+ else if (unformat (line_input, "ea-bits-len %d", &ea_bits_len))
+ num_m_args++;
+ else if (unformat (line_input, "psid-offset %d", &psid_offset))
+ num_m_args++;
+ else if (unformat (line_input, "psid-len %d", &psid_length))
+ num_m_args++;
+ else if (unformat (line_input, "mtu %d", &mtu))
+ num_m_args++;
+ else if (unformat (line_input, "map-t"))
+ flags |= MAP_DOMAIN_TRANSLATION;
+ else
+ return clib_error_return (0, "unknown input `%U'",
+ format_unformat_error, input);
+ }
+ unformat_free (line_input);
+
+ if (num_m_args < 3)
+ return clib_error_return (0, "mandatory argument(s) missing");
+
+ map_create_domain (&ip4_prefix, ip4_prefix_len,
+ &ip6_prefix, ip6_prefix_len, &ip6_src, ip6_src_len,
+ ea_bits_len, psid_offset, psid_length, &map_domain_index,
+ mtu, flags);
+
+ return 0;
+}
+
+static clib_error_t *
+map_del_domain_command_fn (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ unformat_input_t _line_input, *line_input = &_line_input;
+ u32 num_m_args = 0;
+ u32 map_domain_index;
+
+ /* Get a line of input. */
+ if (!unformat_user (input, unformat_line_input, line_input))
+ return 0;
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "index %d", &map_domain_index))
+ num_m_args++;
+ else
+ return clib_error_return (0, "unknown input `%U'",
+ format_unformat_error, input);
+ }
+ unformat_free (line_input);
+
+ if (num_m_args != 1)
+ return clib_error_return (0, "mandatory argument(s) missing");
+
+ map_delete_domain (map_domain_index);
+
+ return 0;
+}
+
+static clib_error_t *
+map_add_rule_command_fn (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ unformat_input_t _line_input, *line_input = &_line_input;
+ ip6_address_t tep;
+ u32 num_m_args = 0;
+ u32 psid = 0, map_domain_index;
+
+ /* Get a line of input. */
+ if (!unformat_user (input, unformat_line_input, line_input))
+ return 0;
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "index %d", &map_domain_index))
+ num_m_args++;
+ else if (unformat (line_input, "psid %d", &psid))
+ num_m_args++;
+ else
+ if (unformat (line_input, "ip6-dst %U", unformat_ip6_address, &tep))
+ num_m_args++;
+ else
+ return clib_error_return (0, "unknown input `%U'",
+ format_unformat_error, input);
+ }
+ unformat_free (line_input);
+
+ if (num_m_args != 3)
+ return clib_error_return (0, "mandatory argument(s) missing");
+
+ if (map_add_del_psid (map_domain_index, psid, &tep, 1) != 0)
+ {
+ return clib_error_return (0, "Failing to add Mapping Rule");
+ }
+ return 0;
+}
+
+#if MAP_SKIP_IP6_LOOKUP
+static clib_error_t *
+map_pre_resolve_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ unformat_input_t _line_input, *line_input = &_line_input;
+ ip4_address_t ip4nh;
+ ip6_address_t ip6nh;
+ map_main_t *mm = &map_main;
+
+ memset (&ip4nh, 0, sizeof (ip4nh));
+ memset (&ip6nh, 0, sizeof (ip6nh));
+
+ /* Get a line of input. */
+ if (!unformat_user (input, unformat_line_input, line_input))
+ return 0;
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "ip4-nh %U", unformat_ip4_address, &ip4nh))
+ mm->preresolve_ip4 = ip4nh;
+ else
+ if (unformat (line_input, "ip6-nh %U", unformat_ip6_address, &ip6nh))
+ mm->preresolve_ip6 = ip6nh;
+ else
+ return clib_error_return (0, "unknown input `%U'",
+ format_unformat_error, input);
+ }
+ unformat_free (line_input);
+
+ map_pre_resolve (&ip4nh, &ip6nh);
+
+ return 0;
+}
+#endif
+
+static clib_error_t *
+map_icmp_relay_source_address_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ unformat_input_t _line_input, *line_input = &_line_input;
+ ip4_address_t icmp_src_address;
+ map_main_t *mm = &map_main;
+
+ mm->icmp4_src_address.as_u32 = 0;
+
+ /* Get a line of input. */
+ if (!unformat_user (input, unformat_line_input, line_input))
+ return 0;
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat
+ (line_input, "%U", unformat_ip4_address, &icmp_src_address))
+ mm->icmp4_src_address = icmp_src_address;
+ else
+ return clib_error_return (0, "unknown input `%U'",
+ format_unformat_error, input);
+ }
+ unformat_free (line_input);
+
+ return 0;
+}
+
+static clib_error_t *
+map_icmp_unreachables_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ unformat_input_t _line_input, *line_input = &_line_input;
+ map_main_t *mm = &map_main;
+ int num_m_args = 0;
+
+ /* Get a line of input. */
+ if (!unformat_user (input, unformat_line_input, line_input))
+ return 0;
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ num_m_args++;
+ if (unformat (line_input, "on"))
+ mm->icmp6_enabled = true;
+ else if (unformat (line_input, "off"))
+ mm->icmp6_enabled = false;
+ else
+ return clib_error_return (0, "unknown input `%U'",
+ format_unformat_error, input);
+ }
+ unformat_free (line_input);
+
+
+ if (num_m_args != 1)
+ return clib_error_return (0, "mandatory argument(s) missing");
+
+ return 0;
+}
+
+static clib_error_t *
+map_fragment_command_fn (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ unformat_input_t _line_input, *line_input = &_line_input;
+ map_main_t *mm = &map_main;
+
+ /* Get a line of input. */
+ if (!unformat_user (input, unformat_line_input, line_input))
+ return 0;
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "inner"))
+ mm->frag_inner = true;
+ else if (unformat (line_input, "outer"))
+ mm->frag_inner = false;
+ else
+ return clib_error_return (0, "unknown input `%U'",
+ format_unformat_error, input);
+ }
+ unformat_free (line_input);
+
+ return 0;
+}
+
+static clib_error_t *
+map_fragment_df_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ unformat_input_t _line_input, *line_input = &_line_input;
+ map_main_t *mm = &map_main;
+
+ /* Get a line of input. */
+ if (!unformat_user (input, unformat_line_input, line_input))
+ return 0;
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "on"))
+ mm->frag_ignore_df = true;
+ else if (unformat (line_input, "off"))
+ mm->frag_ignore_df = false;
+ else
+ return clib_error_return (0, "unknown input `%U'",
+ format_unformat_error, input);
+ }
+ unformat_free (line_input);
+
+ return 0;
+}
+
+static clib_error_t *
+map_traffic_class_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ unformat_input_t _line_input, *line_input = &_line_input;
+ map_main_t *mm = &map_main;
+ u32 tc = 0;
+
+ mm->tc_copy = false;
+
+ /* Get a line of input. */
+ if (!unformat_user (input, unformat_line_input, line_input))
+ return 0;
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "copy"))
+ mm->tc_copy = true;
+ else if (unformat (line_input, "%x", &tc))
+ mm->tc = tc & 0xff;
+ else
+ return clib_error_return (0, "unknown input `%U'",
+ format_unformat_error, input);
+ }
+ unformat_free (line_input);
+
+ return 0;
+}
+
+static u8 *
+format_map_domain (u8 * s, va_list * args)
+{
+ map_domain_t *d = va_arg (*args, map_domain_t *);
+ bool counters = va_arg (*args, int);
+ map_main_t *mm = &map_main;
+ ip6_address_t ip6_prefix;
+
+ if (d->rules)
+ memset (&ip6_prefix, 0, sizeof (ip6_prefix));
+ else
+ ip6_prefix = d->ip6_prefix;
+
+ s = format (s,
+ "[%d] ip4-pfx %U/%d ip6-pfx %U/%d ip6-src %U/%d ea_bits_len %d psid-offset %d psid-len %d mtu %d %s",
+ d - mm->domains,
+ format_ip4_address, &d->ip4_prefix, d->ip4_prefix_len,
+ format_ip6_address, &ip6_prefix, d->ip6_prefix_len,
+ format_ip6_address, &d->ip6_src, d->ip6_src_len,
+ d->ea_bits_len, d->psid_offset, d->psid_length, d->mtu,
+ (d->flags & MAP_DOMAIN_TRANSLATION) ? "map-t" : "");
+
+ if (counters)
+ {
+ map_domain_counter_lock (mm);
+ vlib_counter_t v;
+ vlib_get_combined_counter (&mm->domain_counters[MAP_DOMAIN_COUNTER_TX],
+ d - mm->domains, &v);
+ s = format (s, " TX: %lld/%lld", v.packets, v.bytes);
+ vlib_get_combined_counter (&mm->domain_counters[MAP_DOMAIN_COUNTER_RX],
+ d - mm->domains, &v);
+ s = format (s, " RX: %lld/%lld", v.packets, v.bytes);
+ map_domain_counter_unlock (mm);
+ }
+ s = format (s, "\n");
+
+ if (d->rules)
+ {
+ int i;
+ ip6_address_t dst;
+ for (i = 0; i < (0x1 << d->psid_length); i++)
+ {
+ dst = d->rules[i];
+ if (dst.as_u64[0] == 0 && dst.as_u64[1] == 0)
+ continue;
+ s = format (s,
+ " rule psid: %d ip6-dst %U\n", i, format_ip6_address,
+ &dst);
+ }
+ }
+ return s;
+}
+
+static u8 *
+format_map_ip4_reass (u8 * s, va_list * args)
+{
+ map_main_t *mm = &map_main;
+ map_ip4_reass_t *r = va_arg (*args, map_ip4_reass_t *);
+ map_ip4_reass_key_t *k = &r->key;
+ f64 now = vlib_time_now (mm->vlib_main);
+ f64 lifetime = (((f64) mm->ip4_reass_conf_lifetime_ms) / 1000);
+ f64 dt = (r->ts + lifetime > now) ? (r->ts + lifetime - now) : -1;
+ s = format (s,
+ "ip4-reass src=%U dst=%U protocol=%d identifier=%d port=%d lifetime=%.3lf\n",
+ format_ip4_address, &k->src.as_u8, format_ip4_address,
+ &k->dst.as_u8, k->protocol,
+ clib_net_to_host_u16 (k->fragment_id),
+ (r->port >= 0) ? clib_net_to_host_u16 (r->port) : -1, dt);
+ return s;
+}
+
+static u8 *
+format_map_ip6_reass (u8 * s, va_list * args)
+{
+ map_main_t *mm = &map_main;
+ map_ip6_reass_t *r = va_arg (*args, map_ip6_reass_t *);
+ map_ip6_reass_key_t *k = &r->key;
+ f64 now = vlib_time_now (mm->vlib_main);
+ f64 lifetime = (((f64) mm->ip6_reass_conf_lifetime_ms) / 1000);
+ f64 dt = (r->ts + lifetime > now) ? (r->ts + lifetime - now) : -1;
+ s = format (s,
+ "ip6-reass src=%U dst=%U protocol=%d identifier=%d lifetime=%.3lf\n",
+ format_ip6_address, &k->src.as_u8, format_ip6_address,
+ &k->dst.as_u8, k->protocol,
+ clib_net_to_host_u32 (k->fragment_id), dt);
+ return s;
+}
+
+static clib_error_t *
+show_map_domain_command_fn (vlib_main_t * vm, unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ unformat_input_t _line_input, *line_input = &_line_input;
+ map_main_t *mm = &map_main;
+ map_domain_t *d;
+ bool counters = false;
+ u32 map_domain_index = ~0;
+
+ /* Get a line of input. */
+ if (!unformat_user (input, unformat_line_input, line_input))
+ return 0;
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "counters"))
+ counters = true;
+ else if (unformat (line_input, "index %d", &map_domain_index))
+ ;
+ else
+ return clib_error_return (0, "unknown input `%U'",
+ format_unformat_error, input);
+ }
+ unformat_free (line_input);
+
+ if (pool_elts (mm->domains) == 0)
+ vlib_cli_output (vm, "No MAP domains are configured...");
+
+ if (map_domain_index == ~0)
+ {
+ /* *INDENT-OFF* */
+ pool_foreach(d, mm->domains, ({vlib_cli_output(vm, "%U", format_map_domain, d, counters);}));
+ /* *INDENT-ON* */
+ }
+ else
+ {
+ if (pool_is_free_index (mm->domains, map_domain_index))
+ {
+ return clib_error_return (0, "MAP domain does not exists %d",
+ map_domain_index);
+ }
+
+ d = pool_elt_at_index (mm->domains, map_domain_index);
+ vlib_cli_output (vm, "%U", format_map_domain, d, counters);
+ }
+
+ return 0;
+}
+
+static clib_error_t *
+show_map_fragments_command_fn (vlib_main_t * vm, unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ map_main_t *mm = &map_main;
+ map_ip4_reass_t *f4;
+ map_ip6_reass_t *f6;
+
+ /* *INDENT-OFF* */
+ pool_foreach(f4, mm->ip4_reass_pool, ({vlib_cli_output (vm, "%U", format_map_ip4_reass, f4);}));
+ /* *INDENT-ON* */
+ /* *INDENT-OFF* */
+ pool_foreach(f6, mm->ip6_reass_pool, ({vlib_cli_output (vm, "%U", format_map_ip6_reass, f6);}));
+ /* *INDENT-ON* */
+ return (0);
+}
+
+u64
+map_error_counter_get (u32 node_index, map_error_t map_error)
+{
+ vlib_main_t *vm = vlib_get_main ();
+ vlib_node_runtime_t *error_node = vlib_node_get_runtime (vm, node_index);
+ vlib_error_main_t *em = &vm->error_main;
+ vlib_error_t e = error_node->errors[map_error];
+ vlib_node_t *n = vlib_get_node (vm, node_index);
+ u32 ci;
+
+ ci = vlib_error_get_code (e);
+ ASSERT (ci < n->n_errors);
+ ci += n->error_heap_index;
+
+ return (em->counters[ci]);
+}
+
+static clib_error_t *
+show_map_stats_command_fn (vlib_main_t * vm, unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ map_main_t *mm = &map_main;
+ map_domain_t *d;
+ int domains = 0, rules = 0, domaincount = 0, rulecount = 0;
+ if (pool_elts (mm->domains) == 0)
+ vlib_cli_output (vm, "No MAP domains are configured...");
+
+ /* *INDENT-OFF* */
+ pool_foreach(d, mm->domains, ({
+ if (d->rules) {
+ rulecount+= 0x1 << d->psid_length;
+ rules += sizeof(ip6_address_t) * 0x1 << d->psid_length;
+ }
+ domains += sizeof(*d);
+ domaincount++;
+ }));
+ /* *INDENT-ON* */
+
+ vlib_cli_output (vm, "MAP domains structure: %d\n", sizeof (map_domain_t));
+ vlib_cli_output (vm, "MAP domains: %d (%d bytes)\n", domaincount, domains);
+ vlib_cli_output (vm, "MAP rules: %d (%d bytes)\n", rulecount, rules);
+ vlib_cli_output (vm, "Total: %d bytes)\n", rules + domains);
+
+#if MAP_SKIP_IP6_LOOKUP
+ vlib_cli_output (vm,
+ "MAP pre-resolve: IP6 next-hop: %U (%u), IP4 next-hop: %U (%u)\n",
+ format_ip6_address, &mm->preresolve_ip6, mm->adj6_index,
+ format_ip4_address, &mm->preresolve_ip4, mm->adj4_index);
+#endif
+
+ if (mm->tc_copy)
+ vlib_cli_output (vm, "MAP traffic-class: copy");
+ else
+ vlib_cli_output (vm, "MAP traffic-class: %x", mm->tc);
+
+ vlib_cli_output (vm,
+ "MAP IPv6 inbound security check: %s, fragmented packet security check: %s",
+ mm->sec_check ? "enabled" : "disabled",
+ mm->sec_check_frag ? "enabled" : "disabled");
+
+ vlib_cli_output (vm, "ICMP-relay IPv4 source address: %U\n",
+ format_ip4_address, &mm->icmp4_src_address);
+ vlib_cli_output (vm, "ICMP6 unreachables sent for unmatched packets: %s\n",
+ mm->icmp6_enabled ? "enabled" : "disabled");
+ vlib_cli_output (vm, "Inner fragmentation: %s\n",
+ mm->frag_inner ? "enabled" : "disabled");
+ vlib_cli_output (vm, "Fragment packets regardless of DF flag: %s\n",
+ mm->frag_ignore_df ? "enabled" : "disabled");
+
+ /*
+ * Counters
+ */
+ vlib_combined_counter_main_t *cm = mm->domain_counters;
+ u64 total_pkts[MAP_N_DOMAIN_COUNTER];
+ u64 total_bytes[MAP_N_DOMAIN_COUNTER];
+ int which, i;
+ vlib_counter_t v;
+
+ memset (total_pkts, 0, sizeof (total_pkts));
+ memset (total_bytes, 0, sizeof (total_bytes));
+
+ map_domain_counter_lock (mm);
+ vec_foreach (cm, mm->domain_counters)
+ {
+ which = cm - mm->domain_counters;
+
+ for (i = 0; i < vec_len (cm->maxi); i++)
+ {
+ vlib_get_combined_counter (cm, i, &v);
+ total_pkts[which] += v.packets;
+ total_bytes[which] += v.bytes;
+ }
+ }
+ map_domain_counter_unlock (mm);
+
+ vlib_cli_output (vm, "Encapsulated packets: %lld bytes: %lld\n",
+ total_pkts[MAP_DOMAIN_COUNTER_TX],
+ total_bytes[MAP_DOMAIN_COUNTER_TX]);
+ vlib_cli_output (vm, "Decapsulated packets: %lld bytes: %lld\n",
+ total_pkts[MAP_DOMAIN_COUNTER_RX],
+ total_bytes[MAP_DOMAIN_COUNTER_RX]);
+
+ vlib_cli_output (vm, "ICMP relayed packets: %d\n",
+ vlib_get_simple_counter (&mm->icmp_relayed, 0));
+
+ return 0;
+}
+
+static clib_error_t *
+map_params_reass_command_fn (vlib_main_t * vm, unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ unformat_input_t _line_input, *line_input = &_line_input;
+ u32 lifetime = ~0;
+ f64 ht_ratio = (MAP_IP4_REASS_CONF_HT_RATIO_MAX + 1);
+ u32 pool_size = ~0;
+ u64 buffers = ~(0ull);
+ u8 ip4 = 0, ip6 = 0;
+
+ if (!unformat_user (input, unformat_line_input, line_input))
+ return 0;
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "lifetime %u", &lifetime))
+ ;
+ else if (unformat (line_input, "ht-ratio %lf", &ht_ratio))
+ ;
+ else if (unformat (line_input, "pool-size %u", &pool_size))
+ ;
+ else if (unformat (line_input, "buffers %llu", &buffers))
+ ;
+ else if (unformat (line_input, "ip4"))
+ ip4 = 1;
+ else if (unformat (line_input, "ip6"))
+ ip6 = 1;
+ else
+ {
+ unformat_free (line_input);
+ return clib_error_return (0, "invalid input");
+ }
+ }
+ unformat_free (line_input);
+
+ if (!ip4 && !ip6)
+ return clib_error_return (0, "must specify ip4 and/or ip6");
+
+ if (ip4)
+ {
+ if (pool_size != ~0 && pool_size > MAP_IP4_REASS_CONF_POOL_SIZE_MAX)
+ return clib_error_return (0, "invalid ip4-reass pool-size ( > %d)",
+ MAP_IP4_REASS_CONF_POOL_SIZE_MAX);
+ if (ht_ratio != (MAP_IP4_REASS_CONF_HT_RATIO_MAX + 1)
+ && ht_ratio > MAP_IP4_REASS_CONF_HT_RATIO_MAX)
+ return clib_error_return (0, "invalid ip4-reass ht-ratio ( > %d)",
+ MAP_IP4_REASS_CONF_HT_RATIO_MAX);
+ if (lifetime != ~0 && lifetime > MAP_IP4_REASS_CONF_LIFETIME_MAX)
+ return clib_error_return (0, "invalid ip4-reass lifetime ( > %d)",
+ MAP_IP4_REASS_CONF_LIFETIME_MAX);
+ if (buffers != ~(0ull) && buffers > MAP_IP4_REASS_CONF_BUFFERS_MAX)
+ return clib_error_return (0, "invalid ip4-reass buffers ( > %ld)",
+ MAP_IP4_REASS_CONF_BUFFERS_MAX);
+ }
+
+ if (ip6)
+ {
+ if (pool_size != ~0 && pool_size > MAP_IP6_REASS_CONF_POOL_SIZE_MAX)
+ return clib_error_return (0, "invalid ip6-reass pool-size ( > %d)",
+ MAP_IP6_REASS_CONF_POOL_SIZE_MAX);
+ if (ht_ratio != (MAP_IP4_REASS_CONF_HT_RATIO_MAX + 1)
+ && ht_ratio > MAP_IP6_REASS_CONF_HT_RATIO_MAX)
+ return clib_error_return (0, "invalid ip6-reass ht-log2len ( > %d)",
+ MAP_IP6_REASS_CONF_HT_RATIO_MAX);
+ if (lifetime != ~0 && lifetime > MAP_IP6_REASS_CONF_LIFETIME_MAX)
+ return clib_error_return (0, "invalid ip6-reass lifetime ( > %d)",
+ MAP_IP6_REASS_CONF_LIFETIME_MAX);
+ if (buffers != ~(0ull) && buffers > MAP_IP6_REASS_CONF_BUFFERS_MAX)
+ return clib_error_return (0, "invalid ip6-reass buffers ( > %ld)",
+ MAP_IP6_REASS_CONF_BUFFERS_MAX);
+ }
+
+ if (ip4)
+ {
+ u32 reass = 0, packets = 0;
+ if (pool_size != ~0)
+ {
+ if (map_ip4_reass_conf_pool_size (pool_size, &reass, &packets))
+ {
+ vlib_cli_output (vm, "Could not set ip4-reass pool-size");
+ }
+ else
+ {
+ vlib_cli_output (vm,
+ "Setting ip4-reass pool-size (destroyed-reassembly=%u , dropped-fragments=%u)",
+ reass, packets);
+ }
+ }
+ if (ht_ratio != (MAP_IP4_REASS_CONF_HT_RATIO_MAX + 1))
+ {
+ if (map_ip4_reass_conf_ht_ratio (ht_ratio, &reass, &packets))
+ {
+ vlib_cli_output (vm, "Could not set ip4-reass ht-log2len");
+ }
+ else
+ {
+ vlib_cli_output (vm,
+ "Setting ip4-reass ht-log2len (destroyed-reassembly=%u , dropped-fragments=%u)",
+ reass, packets);
+ }
+ }
+ if (lifetime != ~0)
+ {
+ if (map_ip4_reass_conf_lifetime (lifetime))
+ vlib_cli_output (vm, "Could not set ip4-reass lifetime");
+ else
+ vlib_cli_output (vm, "Setting ip4-reass lifetime");
+ }
+ if (buffers != ~(0ull))
+ {
+ if (map_ip4_reass_conf_buffers (buffers))
+ vlib_cli_output (vm, "Could not set ip4-reass buffers");
+ else
+ vlib_cli_output (vm, "Setting ip4-reass buffers");
+ }
+
+ if (map_main.ip4_reass_conf_buffers >
+ map_main.ip4_reass_conf_pool_size *
+ MAP_IP4_REASS_MAX_FRAGMENTS_PER_REASSEMBLY)
+ {
+ vlib_cli_output (vm,
+ "Note: 'ip4-reass buffers' > pool-size * max-fragments-per-reassembly.");
+ }
+ }
+
+ if (ip6)
+ {
+ u32 reass = 0, packets = 0;
+ if (pool_size != ~0)
+ {
+ if (map_ip6_reass_conf_pool_size (pool_size, &reass, &packets))
+ {
+ vlib_cli_output (vm, "Could not set ip6-reass pool-size");
+ }
+ else
+ {
+ vlib_cli_output (vm,
+ "Setting ip6-reass pool-size (destroyed-reassembly=%u , dropped-fragments=%u)",
+ reass, packets);
+ }
+ }
+ if (ht_ratio != (MAP_IP4_REASS_CONF_HT_RATIO_MAX + 1))
+ {
+ if (map_ip6_reass_conf_ht_ratio (ht_ratio, &reass, &packets))
+ {
+ vlib_cli_output (vm, "Could not set ip6-reass ht-log2len");
+ }
+ else
+ {
+ vlib_cli_output (vm,
+ "Setting ip6-reass ht-log2len (destroyed-reassembly=%u , dropped-fragments=%u)",
+ reass, packets);
+ }
+ }
+ if (lifetime != ~0)
+ {
+ if (map_ip6_reass_conf_lifetime (lifetime))
+ vlib_cli_output (vm, "Could not set ip6-reass lifetime");
+ else
+ vlib_cli_output (vm, "Setting ip6-reass lifetime");
+ }
+ if (buffers != ~(0ull))
+ {
+ if (map_ip6_reass_conf_buffers (buffers))
+ vlib_cli_output (vm, "Could not set ip6-reass buffers");
+ else
+ vlib_cli_output (vm, "Setting ip6-reass buffers");
+ }
+
+ if (map_main.ip6_reass_conf_buffers >
+ map_main.ip6_reass_conf_pool_size *
+ MAP_IP6_REASS_MAX_FRAGMENTS_PER_REASSEMBLY)
+ {
+ vlib_cli_output (vm,
+ "Note: 'ip6-reass buffers' > pool-size * max-fragments-per-reassembly.");
+ }
+ }
+
+ return 0;
+}
+
+
+/*
+ * packet trace format function
+ */
+u8 *
+format_map_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ map_trace_t *t = va_arg (*args, map_trace_t *);
+ u32 map_domain_index = t->map_domain_index;
+ u16 port = t->port;
+
+ s =
+ format (s, "MAP domain index: %d L4 port: %u", map_domain_index,
+ clib_net_to_host_u16 (port));
+
+ return s;
+}
+
+static_always_inline map_ip4_reass_t *
+map_ip4_reass_lookup (map_ip4_reass_key_t * k, u32 bucket, f64 now)
+{
+ map_main_t *mm = &map_main;
+ u32 ri = mm->ip4_reass_hash_table[bucket];
+ while (ri != MAP_REASS_INDEX_NONE)
+ {
+ map_ip4_reass_t *r = pool_elt_at_index (mm->ip4_reass_pool, ri);
+ if (r->key.as_u64[0] == k->as_u64[0] &&
+ r->key.as_u64[1] == k->as_u64[1] &&
+ now < r->ts + (((f64) mm->ip4_reass_conf_lifetime_ms) / 1000))
+ {
+ return r;
+ }
+ ri = r->bucket_next;
+ }
+ return NULL;
+}
+
+#define map_ip4_reass_pool_index(r) (r - map_main.ip4_reass_pool)
+
+void
+map_ip4_reass_free (map_ip4_reass_t * r, u32 ** pi_to_drop)
+{
+ map_main_t *mm = &map_main;
+ map_ip4_reass_get_fragments (r, pi_to_drop);
+
+ // Unlink in hash bucket
+ map_ip4_reass_t *r2 = NULL;
+ u32 r2i = mm->ip4_reass_hash_table[r->bucket];
+ while (r2i != map_ip4_reass_pool_index (r))
+ {
+ ASSERT (r2i != MAP_REASS_INDEX_NONE);
+ r2 = pool_elt_at_index (mm->ip4_reass_pool, r2i);
+ r2i = r2->bucket_next;
+ }
+ if (r2)
+ {
+ r2->bucket_next = r->bucket_next;
+ }
+ else
+ {
+ mm->ip4_reass_hash_table[r->bucket] = r->bucket_next;
+ }
+
+ // Unlink in list
+ if (r->fifo_next == map_ip4_reass_pool_index (r))
+ {
+ mm->ip4_reass_fifo_last = MAP_REASS_INDEX_NONE;
+ }
+ else
+ {
+ if (mm->ip4_reass_fifo_last == map_ip4_reass_pool_index (r))
+ mm->ip4_reass_fifo_last = r->fifo_prev;
+ pool_elt_at_index (mm->ip4_reass_pool, r->fifo_prev)->fifo_next =
+ r->fifo_next;
+ pool_elt_at_index (mm->ip4_reass_pool, r->fifo_next)->fifo_prev =
+ r->fifo_prev;
+ }
+
+ pool_put (mm->ip4_reass_pool, r);
+ mm->ip4_reass_allocated--;
+}
+
+map_ip4_reass_t *
+map_ip4_reass_get (u32 src, u32 dst, u16 fragment_id,
+ u8 protocol, u32 ** pi_to_drop)
+{
+ map_ip4_reass_t *r;
+ map_main_t *mm = &map_main;
+ map_ip4_reass_key_t k = {.src.data_u32 = src,
+ .dst.data_u32 = dst,
+ .fragment_id = fragment_id,
+ .protocol = protocol
+ };
+
+ u32 h = 0;
+ h = crc_u32 (k.as_u32[0], h);
+ h = crc_u32 (k.as_u32[1], h);
+ h = crc_u32 (k.as_u32[2], h);
+ h = crc_u32 (k.as_u32[3], h);
+ h = h >> (32 - mm->ip4_reass_ht_log2len);
+
+ f64 now = vlib_time_now (mm->vlib_main);
+
+ //Cache garbage collection
+ while (mm->ip4_reass_fifo_last != MAP_REASS_INDEX_NONE)
+ {
+ map_ip4_reass_t *last =
+ pool_elt_at_index (mm->ip4_reass_pool, mm->ip4_reass_fifo_last);
+ if (last->ts + (((f64) mm->ip4_reass_conf_lifetime_ms) / 1000) < now)
+ map_ip4_reass_free (last, pi_to_drop);
+ else
+ break;
+ }
+
+ if ((r = map_ip4_reass_lookup (&k, h, now)))
+ return r;
+
+ if (mm->ip4_reass_allocated >= mm->ip4_reass_conf_pool_size)
+ return NULL;
+
+ pool_get (mm->ip4_reass_pool, r);
+ mm->ip4_reass_allocated++;
+ int i;
+ for (i = 0; i < MAP_IP4_REASS_MAX_FRAGMENTS_PER_REASSEMBLY; i++)
+ r->fragments[i] = ~0;
+
+ u32 ri = map_ip4_reass_pool_index (r);
+
+ //Link in new bucket
+ r->bucket = h;
+ r->bucket_next = mm->ip4_reass_hash_table[h];
+ mm->ip4_reass_hash_table[h] = ri;
+
+ //Link in fifo
+ if (mm->ip4_reass_fifo_last != MAP_REASS_INDEX_NONE)
+ {
+ r->fifo_next =
+ pool_elt_at_index (mm->ip4_reass_pool,
+ mm->ip4_reass_fifo_last)->fifo_next;
+ r->fifo_prev = mm->ip4_reass_fifo_last;
+ pool_elt_at_index (mm->ip4_reass_pool, r->fifo_prev)->fifo_next = ri;
+ pool_elt_at_index (mm->ip4_reass_pool, r->fifo_next)->fifo_prev = ri;
+ }
+ else
+ {
+ r->fifo_next = r->fifo_prev = ri;
+ mm->ip4_reass_fifo_last = ri;
+ }
+
+ //Set other fields
+ r->ts = now;
+ r->key = k;
+ r->port = -1;
+#ifdef MAP_IP4_REASS_COUNT_BYTES
+ r->expected_total = 0xffff;
+ r->forwarded = 0;
+#endif
+
+ return r;
+}
+
+int
+map_ip4_reass_add_fragment (map_ip4_reass_t * r, u32 pi)
+{
+ if (map_main.ip4_reass_buffered_counter >= map_main.ip4_reass_conf_buffers)
+ return -1;
+
+ int i;
+ for (i = 0; i < MAP_IP4_REASS_MAX_FRAGMENTS_PER_REASSEMBLY; i++)
+ if (r->fragments[i] == ~0)
+ {
+ r->fragments[i] = pi;
+ map_main.ip4_reass_buffered_counter++;
+ return 0;
+ }
+ return -1;
+}
+
+static_always_inline map_ip6_reass_t *
+map_ip6_reass_lookup (map_ip6_reass_key_t * k, u32 bucket, f64 now)
+{
+ map_main_t *mm = &map_main;
+ u32 ri = mm->ip6_reass_hash_table[bucket];
+ while (ri != MAP_REASS_INDEX_NONE)
+ {
+ map_ip6_reass_t *r = pool_elt_at_index (mm->ip6_reass_pool, ri);
+ if (now < r->ts + (((f64) mm->ip6_reass_conf_lifetime_ms) / 1000) &&
+ r->key.as_u64[0] == k->as_u64[0] &&
+ r->key.as_u64[1] == k->as_u64[1] &&
+ r->key.as_u64[2] == k->as_u64[2] &&
+ r->key.as_u64[3] == k->as_u64[3] &&
+ r->key.as_u64[4] == k->as_u64[4])
+ return r;
+ ri = r->bucket_next;
+ }
+ return NULL;
+}
+
+#define map_ip6_reass_pool_index(r) (r - map_main.ip6_reass_pool)
+
+void
+map_ip6_reass_free (map_ip6_reass_t * r, u32 ** pi_to_drop)
+{
+ map_main_t *mm = &map_main;
+ int i;
+ for (i = 0; i < MAP_IP6_REASS_MAX_FRAGMENTS_PER_REASSEMBLY; i++)
+ if (r->fragments[i].pi != ~0)
+ {
+ vec_add1 (*pi_to_drop, r->fragments[i].pi);
+ r->fragments[i].pi = ~0;
+ map_main.ip6_reass_buffered_counter--;
+ }
+
+ // Unlink in hash bucket
+ map_ip6_reass_t *r2 = NULL;
+ u32 r2i = mm->ip6_reass_hash_table[r->bucket];
+ while (r2i != map_ip6_reass_pool_index (r))
+ {
+ ASSERT (r2i != MAP_REASS_INDEX_NONE);
+ r2 = pool_elt_at_index (mm->ip6_reass_pool, r2i);
+ r2i = r2->bucket_next;
+ }
+ if (r2)
+ {
+ r2->bucket_next = r->bucket_next;
+ }
+ else
+ {
+ mm->ip6_reass_hash_table[r->bucket] = r->bucket_next;
+ }
+
+ // Unlink in list
+ if (r->fifo_next == map_ip6_reass_pool_index (r))
+ {
+ //Single element in the list, list is now empty
+ mm->ip6_reass_fifo_last = MAP_REASS_INDEX_NONE;
+ }
+ else
+ {
+ if (mm->ip6_reass_fifo_last == map_ip6_reass_pool_index (r)) //First element
+ mm->ip6_reass_fifo_last = r->fifo_prev;
+ pool_elt_at_index (mm->ip6_reass_pool, r->fifo_prev)->fifo_next =
+ r->fifo_next;
+ pool_elt_at_index (mm->ip6_reass_pool, r->fifo_next)->fifo_prev =
+ r->fifo_prev;
+ }
+
+ // Free from pool if necessary
+ pool_put (mm->ip6_reass_pool, r);
+ mm->ip6_reass_allocated--;
+}
+
+map_ip6_reass_t *
+map_ip6_reass_get (ip6_address_t * src, ip6_address_t * dst, u32 fragment_id,
+ u8 protocol, u32 ** pi_to_drop)
+{
+ map_ip6_reass_t *r;
+ map_main_t *mm = &map_main;
+ map_ip6_reass_key_t k = {
+ .src = *src,
+ .dst = *dst,
+ .fragment_id = fragment_id,
+ .protocol = protocol
+ };
+
+ u32 h = 0;
+ int i;
+ for (i = 0; i < 10; i++)
+ h = crc_u32 (k.as_u32[i], h);
+ h = h >> (32 - mm->ip6_reass_ht_log2len);
+
+ f64 now = vlib_time_now (mm->vlib_main);
+
+ //Cache garbage collection
+ while (mm->ip6_reass_fifo_last != MAP_REASS_INDEX_NONE)
+ {
+ map_ip6_reass_t *last =
+ pool_elt_at_index (mm->ip6_reass_pool, mm->ip6_reass_fifo_last);
+ if (last->ts + (((f64) mm->ip6_reass_conf_lifetime_ms) / 1000) < now)
+ map_ip6_reass_free (last, pi_to_drop);
+ else
+ break;
+ }
+
+ if ((r = map_ip6_reass_lookup (&k, h, now)))
+ return r;
+
+ if (mm->ip6_reass_allocated >= mm->ip6_reass_conf_pool_size)
+ return NULL;
+
+ pool_get (mm->ip6_reass_pool, r);
+ mm->ip6_reass_allocated++;
+ for (i = 0; i < MAP_IP6_REASS_MAX_FRAGMENTS_PER_REASSEMBLY; i++)
+ {
+ r->fragments[i].pi = ~0;
+ r->fragments[i].next_data_len = 0;
+ r->fragments[i].next_data_offset = 0;
+ }
+
+ u32 ri = map_ip6_reass_pool_index (r);
+
+ //Link in new bucket
+ r->bucket = h;
+ r->bucket_next = mm->ip6_reass_hash_table[h];
+ mm->ip6_reass_hash_table[h] = ri;
+
+ //Link in fifo
+ if (mm->ip6_reass_fifo_last != MAP_REASS_INDEX_NONE)
+ {
+ r->fifo_next =
+ pool_elt_at_index (mm->ip6_reass_pool,
+ mm->ip6_reass_fifo_last)->fifo_next;
+ r->fifo_prev = mm->ip6_reass_fifo_last;
+ pool_elt_at_index (mm->ip6_reass_pool, r->fifo_prev)->fifo_next = ri;
+ pool_elt_at_index (mm->ip6_reass_pool, r->fifo_next)->fifo_prev = ri;
+ }
+ else
+ {
+ r->fifo_next = r->fifo_prev = ri;
+ mm->ip6_reass_fifo_last = ri;
+ }
+
+ //Set other fields
+ r->ts = now;
+ r->key = k;
+ r->ip4_header.ip_version_and_header_length = 0;
+#ifdef MAP_IP6_REASS_COUNT_BYTES
+ r->expected_total = 0xffff;
+ r->forwarded = 0;
+#endif
+ return r;
+}
+
+int
+map_ip6_reass_add_fragment (map_ip6_reass_t * r, u32 pi,
+ u16 data_offset, u16 next_data_offset,
+ u8 * data_start, u16 data_len)
+{
+ map_ip6_fragment_t *f = NULL, *prev_f = NULL;
+ u16 copied_len = (data_len > 20) ? 20 : data_len;
+
+ if (map_main.ip6_reass_buffered_counter >= map_main.ip6_reass_conf_buffers)
+ return -1;
+
+ //Lookup for fragments for the current buffer
+ //and the one before that
+ int i;
+ for (i = 0; i < MAP_IP6_REASS_MAX_FRAGMENTS_PER_REASSEMBLY; i++)
+ {
+ if (data_offset && r->fragments[i].next_data_offset == data_offset)
+ {
+ prev_f = &r->fragments[i]; // This is buffer for previous packet
+ }
+ else if (r->fragments[i].next_data_offset == next_data_offset)
+ {
+ f = &r->fragments[i]; // This is a buffer for the current packet
+ }
+ else if (r->fragments[i].next_data_offset == 0)
+ { //Available
+ if (f == NULL)
+ f = &r->fragments[i];
+ else if (prev_f == NULL)
+ prev_f = &r->fragments[i];
+ }
+ }
+
+ if (!f || f->pi != ~0)
+ return -1;
+
+ if (data_offset)
+ {
+ if (!prev_f)
+ return -1;
+
+ clib_memcpy (prev_f->next_data, data_start, copied_len);
+ prev_f->next_data_len = copied_len;
+ prev_f->next_data_offset = data_offset;
+ }
+ else
+ {
+ if (((ip4_header_t *) data_start)->ip_version_and_header_length != 0x45)
+ return -1;
+
+ if (r->ip4_header.ip_version_and_header_length == 0)
+ clib_memcpy (&r->ip4_header, data_start, sizeof (ip4_header_t));
+ }
+
+ if (data_len > 20)
+ {
+ f->next_data_offset = next_data_offset;
+ f->pi = pi;
+ map_main.ip6_reass_buffered_counter++;
+ }
+ return 0;
+}
+
+void
+map_ip4_reass_reinit (u32 * trashed_reass, u32 * dropped_packets)
+{
+ map_main_t *mm = &map_main;
+ int i;
+
+ if (dropped_packets)
+ *dropped_packets = mm->ip4_reass_buffered_counter;
+ if (trashed_reass)
+ *trashed_reass = mm->ip4_reass_allocated;
+ if (mm->ip4_reass_fifo_last != MAP_REASS_INDEX_NONE)
+ {
+ u16 ri = mm->ip4_reass_fifo_last;
+ do
+ {
+ map_ip4_reass_t *r = pool_elt_at_index (mm->ip4_reass_pool, ri);
+ for (i = 0; i < MAP_IP4_REASS_MAX_FRAGMENTS_PER_REASSEMBLY; i++)
+ if (r->fragments[i] != ~0)
+ map_ip4_drop_pi (r->fragments[i]);
+
+ ri = r->fifo_next;
+ pool_put (mm->ip4_reass_pool, r);
+ }
+ while (ri != mm->ip4_reass_fifo_last);
+ }
+
+ vec_free (mm->ip4_reass_hash_table);
+ vec_resize (mm->ip4_reass_hash_table, 1 << mm->ip4_reass_ht_log2len);
+ for (i = 0; i < (1 << mm->ip4_reass_ht_log2len); i++)
+ mm->ip4_reass_hash_table[i] = MAP_REASS_INDEX_NONE;
+ pool_free (mm->ip4_reass_pool);
+ pool_alloc (mm->ip4_reass_pool, mm->ip4_reass_conf_pool_size);
+
+ mm->ip4_reass_allocated = 0;
+ mm->ip4_reass_fifo_last = MAP_REASS_INDEX_NONE;
+ mm->ip4_reass_buffered_counter = 0;
+}
+
+u8
+map_get_ht_log2len (f32 ht_ratio, u16 pool_size)
+{
+ u32 desired_size = (u32) (pool_size * ht_ratio);
+ u8 i;
+ for (i = 1; i < 31; i++)
+ if ((1 << i) >= desired_size)
+ return i;
+ return 4;
+}
+
+int
+map_ip4_reass_conf_ht_ratio (f32 ht_ratio, u32 * trashed_reass,
+ u32 * dropped_packets)
+{
+ map_main_t *mm = &map_main;
+ if (ht_ratio > MAP_IP4_REASS_CONF_HT_RATIO_MAX)
+ return -1;
+
+ map_ip4_reass_lock ();
+ mm->ip4_reass_conf_ht_ratio = ht_ratio;
+ mm->ip4_reass_ht_log2len =
+ map_get_ht_log2len (ht_ratio, mm->ip4_reass_conf_pool_size);
+ map_ip4_reass_reinit (trashed_reass, dropped_packets);
+ map_ip4_reass_unlock ();
+ return 0;
+}
+
+int
+map_ip4_reass_conf_pool_size (u16 pool_size, u32 * trashed_reass,
+ u32 * dropped_packets)
+{
+ map_main_t *mm = &map_main;
+ if (pool_size > MAP_IP4_REASS_CONF_POOL_SIZE_MAX)
+ return -1;
+
+ map_ip4_reass_lock ();
+ mm->ip4_reass_conf_pool_size = pool_size;
+ map_ip4_reass_reinit (trashed_reass, dropped_packets);
+ map_ip4_reass_unlock ();
+ return 0;
+}
+
+int
+map_ip4_reass_conf_lifetime (u16 lifetime_ms)
+{
+ map_main.ip4_reass_conf_lifetime_ms = lifetime_ms;
+ return 0;
+}
+
+int
+map_ip4_reass_conf_buffers (u32 buffers)
+{
+ map_main.ip4_reass_conf_buffers = buffers;
+ return 0;
+}
+
+void
+map_ip6_reass_reinit (u32 * trashed_reass, u32 * dropped_packets)
+{
+ map_main_t *mm = &map_main;
+ if (dropped_packets)
+ *dropped_packets = mm->ip6_reass_buffered_counter;
+ if (trashed_reass)
+ *trashed_reass = mm->ip6_reass_allocated;
+ int i;
+ if (mm->ip6_reass_fifo_last != MAP_REASS_INDEX_NONE)
+ {
+ u16 ri = mm->ip6_reass_fifo_last;
+ do
+ {
+ map_ip6_reass_t *r = pool_elt_at_index (mm->ip6_reass_pool, ri);
+ for (i = 0; i < MAP_IP6_REASS_MAX_FRAGMENTS_PER_REASSEMBLY; i++)
+ if (r->fragments[i].pi != ~0)
+ map_ip6_drop_pi (r->fragments[i].pi);
+
+ ri = r->fifo_next;
+ pool_put (mm->ip6_reass_pool, r);
+ }
+ while (ri != mm->ip6_reass_fifo_last);
+ mm->ip6_reass_fifo_last = MAP_REASS_INDEX_NONE;
+ }
+
+ vec_free (mm->ip6_reass_hash_table);
+ vec_resize (mm->ip6_reass_hash_table, 1 << mm->ip6_reass_ht_log2len);
+ for (i = 0; i < (1 << mm->ip6_reass_ht_log2len); i++)
+ mm->ip6_reass_hash_table[i] = MAP_REASS_INDEX_NONE;
+ pool_free (mm->ip6_reass_pool);
+ pool_alloc (mm->ip6_reass_pool, mm->ip4_reass_conf_pool_size);
+
+ mm->ip6_reass_allocated = 0;
+ mm->ip6_reass_buffered_counter = 0;
+}
+
+int
+map_ip6_reass_conf_ht_ratio (f32 ht_ratio, u32 * trashed_reass,
+ u32 * dropped_packets)
+{
+ map_main_t *mm = &map_main;
+ if (ht_ratio > MAP_IP6_REASS_CONF_HT_RATIO_MAX)
+ return -1;
+
+ map_ip6_reass_lock ();
+ mm->ip6_reass_conf_ht_ratio = ht_ratio;
+ mm->ip6_reass_ht_log2len =
+ map_get_ht_log2len (ht_ratio, mm->ip6_reass_conf_pool_size);
+ map_ip6_reass_reinit (trashed_reass, dropped_packets);
+ map_ip6_reass_unlock ();
+ return 0;
+}
+
+int
+map_ip6_reass_conf_pool_size (u16 pool_size, u32 * trashed_reass,
+ u32 * dropped_packets)
+{
+ map_main_t *mm = &map_main;
+ if (pool_size > MAP_IP6_REASS_CONF_POOL_SIZE_MAX)
+ return -1;
+
+ map_ip6_reass_lock ();
+ mm->ip6_reass_conf_pool_size = pool_size;
+ map_ip6_reass_reinit (trashed_reass, dropped_packets);
+ map_ip6_reass_unlock ();
+ return 0;
+}
+
+int
+map_ip6_reass_conf_lifetime (u16 lifetime_ms)
+{
+ map_main.ip6_reass_conf_lifetime_ms = lifetime_ms;
+ return 0;
+}
+
+int
+map_ip6_reass_conf_buffers (u32 buffers)
+{
+ map_main.ip6_reass_conf_buffers = buffers;
+ return 0;
+}
+
+/* *INDENT-OFF* */
+
+/*?
+ * Configure MAP reassembly behaviour
+ *
+ * @cliexpar
+ * @cliexstart{map params reassembly}
+ * @cliexend
+ ?*/
+VLIB_CLI_COMMAND(map_ip4_reass_lifetime_command, static) = {
+ .path = "map params reassembly",
+ .short_help = "map params reassembly [ip4 | ip6] [lifetime <lifetime-ms>] "
+ "[pool-size <pool-size>] [buffers <buffers>] "
+ "[ht-ratio <ht-ratio>]",
+ .function = map_params_reass_command_fn,
+};
+
+/*?
+ * Set or copy the IP TOS/Traffic Class field
+ *
+ * @cliexpar
+ * @cliexstart{map params traffic-class}
+ *
+ * This command is used to set the traffic-class field in translated
+ * or encapsulated packets. If copy is specifed (the default) then the
+ * traffic-class/TOS field is copied from the original packet to the
+ * translated / encapsulating header.
+ * @cliexend
+ ?*/
+VLIB_CLI_COMMAND(map_traffic_class_command, static) = {
+ .path = "map params traffic-class",
+ .short_help = "map params traffic-class {0x0-0xff | copy}",
+ .function = map_traffic_class_command_fn,
+};
+
+/*?
+ * Bypass IP4/IP6 lookup
+ *
+ * @cliexpar
+ * @cliexstart{map params pre-resolve}
+ *
+ * Bypass a second FIB lookup of the translated or encapsulated
+ * packet, and forward the packet directly to the specified
+ * next-hop. This optimization trades forwarding flexibility for
+ * performance.
+ * @cliexend
+ ?*/
+VLIB_CLI_COMMAND(map_pre_resolve_command, static) = {
+ .path = "map params pre-resolve",
+ .short_help = " map params pre-resolve {ip4-nh <address>} "
+ "| {ip6-nh <address>}",
+ .function = map_pre_resolve_command_fn,
+};
+
+/*?
+ * Enable or disable the MAP-E inbound security check
+ *
+ * @cliexpar
+ * @cliexstart{map params security-check}
+ *
+ * By default, a decapsulated packet's IPv4 source address will be
+ * verified against the outer header's IPv6 source address. Disabling
+ * this feature will allow IPv4 source address spoofing.
+ * @cliexend
+ ?*/
+VLIB_CLI_COMMAND(map_security_check_command, static) = {
+ .path = "map params security-check",
+ .short_help = "map params security-check on|off",
+ .function = map_security_check_command_fn,
+};
+
+/*?
+ * Specifiy the IPv4 source address used for relayed ICMP error messages
+ *
+ * @cliexpar
+ * @cliexstart{map params icmp source-address}
+ *
+ * This command specifies which IPv4 source address (must be local to
+ * the system), that is used for relayed received IPv6 ICMP error
+ * messages.
+ * @cliexend
+ ?*/
+VLIB_CLI_COMMAND(map_icmp_relay_source_address_command, static) = {
+ .path = "map params icmp source-address",
+ .short_help = "map params icmp source-address <ip4-address>",
+ .function = map_icmp_relay_source_address_command_fn,
+};
+
+/*?
+ * Send IPv6 ICMP unreachables
+ *
+ * @cliexpar
+ * @cliexstart{map params icmp6 unreachables}
+ *
+ * Send IPv6 ICMP unreachable messages back if security check fails or
+ * no MAP domain exists.
+ * @cliexend
+ ?*/
+VLIB_CLI_COMMAND(map_icmp_unreachables_command, static) = {
+ .path = "map params icmp6 unreachables",
+ .short_help = "map params icmp6 unreachables {on|off}",
+ .function = map_icmp_unreachables_command_fn,
+};
+
+/*?
+ * Configure MAP fragmentation behaviour
+ *
+ * @cliexpar
+ * @cliexstart{map params fragment}
+ * @cliexend
+ ?*/
+VLIB_CLI_COMMAND(map_fragment_command, static) = {
+ .path = "map params fragment",
+ .short_help = "map params fragment inner|outer",
+ .function = map_fragment_command_fn,
+};
+
+/*?
+ * Ignore the IPv4 Don't fragment bit
+ *
+ * @cliexpar
+ * @cliexstart{map params fragment ignore-df}
+ *
+ * Allows fragmentation of the IPv4 packet even if the DF bit is
+ * set. The choice between inner or outer fragmentation of tunnel
+ * packets is complicated. The benefit of inner fragmentation is that
+ * the ultimate endpoint must reassemble, instead of the tunnel
+ * endpoint.
+ * @cliexend
+ ?*/
+VLIB_CLI_COMMAND(map_fragment_df_command, static) = {
+ .path = "map params fragment ignore-df",
+ .short_help = "map params fragment ignore-df on|off",
+ .function = map_fragment_df_command_fn,
+};
+
+/*?
+ * Specifiy if the inbound security check should be done on fragments
+ *
+ * @cliexpar
+ * @cliexstart{map params security-check fragments}
+ *
+ * Typically the inbound on-decapsulation security check is only done
+ * on the first packet. The packet that contains the L4
+ * information. While a security check on every fragment is possible,
+ * it has a cost. State must be created on the first fragment.
+ * @cliexend
+ ?*/
+VLIB_CLI_COMMAND(map_security_check_frag_command, static) = {
+ .path = "map params security-check fragments",
+ .short_help = "map params security-check fragments on|off",
+ .function = map_security_check_frag_command_fn,
+};
+
+/*?
+ * Add MAP domain
+ *
+ * @cliexpar
+ * @cliexstart{map add domain}
+ * @cliexend
+ ?*/
+VLIB_CLI_COMMAND(map_add_domain_command, static) = {
+ .path = "map add domain",
+ .short_help = "map add domain ip4-pfx <ip4-pfx> ip6-pfx <ip6-pfx> "
+ "ip6-src <ip6-pfx> ea-bits-len <n> psid-offset <n> psid-len <n> "
+ "[map-t] [mtu <mtu>]",
+ .function = map_add_domain_command_fn,
+};
+
+/*?
+ * Add MAP rule to a domain
+ *
+ * @cliexpar
+ * @cliexstart{map add rule}
+ * @cliexend
+ ?*/
+VLIB_CLI_COMMAND(map_add_rule_command, static) = {
+ .path = "map add rule",
+ .short_help = "map add rule index <domain> psid <psid> ip6-dst <ip6-addr>",
+ .function = map_add_rule_command_fn,
+};
+
+/*?
+ * Delete MAP domain
+ *
+ * @cliexpar
+ * @cliexstart{map del domain}
+ * @cliexend
+ ?*/
+VLIB_CLI_COMMAND(map_del_command, static) = {
+ .path = "map del domain",
+ .short_help = "map del domain index <domain>",
+ .function = map_del_domain_command_fn,
+};
+
+/*?
+ * Show MAP domains
+ *
+ * @cliexpar
+ * @cliexstart{show map domain}
+ * @cliexend
+ ?*/
+VLIB_CLI_COMMAND(show_map_domain_command, static) = {
+ .path = "show map domain",
+ .short_help = "show map domain index <n> [counters]",
+ .function = show_map_domain_command_fn,
+};
+
+/*?
+ * Show MAP statistics
+ *
+ * @cliexpar
+ * @cliexstart{show map stats}
+ * @cliexend
+ ?*/
+VLIB_CLI_COMMAND(show_map_stats_command, static) = {
+ .path = "show map stats",
+ .short_help = "show map stats",
+ .function = show_map_stats_command_fn,
+};
+
+/*?
+ * Show MAP fragmentation information
+ *
+ * @cliexpar
+ * @cliexstart{show map fragments}
+ * @cliexend
+ ?*/
+VLIB_CLI_COMMAND(show_map_fragments_command, static) = {
+ .path = "show map fragments",
+ .short_help = "show map fragments",
+ .function = show_map_fragments_command_fn,
+};
+/* *INDENT-ON* */
+
+/*
+ * map_init
+ */
+clib_error_t *
+map_init (vlib_main_t * vm)
+{
+ map_main_t *mm = &map_main;
+ mm->vnet_main = vnet_get_main ();
+ mm->vlib_main = vm;
+
+#ifdef MAP_SKIP_IP6_LOOKUP
+ memset (&mm->preresolve_ip4, 0, sizeof (mm->preresolve_ip4));
+ memset (&mm->preresolve_ip6, 0, sizeof (mm->preresolve_ip6));
+ mm->adj4_index = 0;
+ mm->adj6_index = 0;
+#endif
+
+ /* traffic class */
+ mm->tc = 0;
+ mm->tc_copy = true;
+
+ /* Inbound security check */
+ mm->sec_check = true;
+ mm->sec_check_frag = false;
+
+ /* ICMP6 Type 1, Code 5 for security check failure */
+ mm->icmp6_enabled = false;
+
+ /* Inner or outer fragmentation */
+ mm->frag_inner = false;
+ mm->frag_ignore_df = false;
+
+ vec_validate (mm->domain_counters, MAP_N_DOMAIN_COUNTER - 1);
+ mm->domain_counters[MAP_DOMAIN_COUNTER_RX].name = "rx";
+ mm->domain_counters[MAP_DOMAIN_COUNTER_TX].name = "tx";
+
+ vlib_validate_simple_counter (&mm->icmp_relayed, 0);
+ vlib_zero_simple_counter (&mm->icmp_relayed, 0);
+
+ /* IP4 virtual reassembly */
+ mm->ip4_reass_hash_table = 0;
+ mm->ip4_reass_pool = 0;
+ mm->ip4_reass_lock =
+ clib_mem_alloc_aligned (CLIB_CACHE_LINE_BYTES, CLIB_CACHE_LINE_BYTES);
+ mm->ip4_reass_conf_ht_ratio = MAP_IP4_REASS_HT_RATIO_DEFAULT;
+ mm->ip4_reass_conf_lifetime_ms = MAP_IP4_REASS_LIFETIME_DEFAULT;
+ mm->ip4_reass_conf_pool_size = MAP_IP4_REASS_POOL_SIZE_DEFAULT;
+ mm->ip4_reass_conf_buffers = MAP_IP4_REASS_BUFFERS_DEFAULT;
+ mm->ip4_reass_ht_log2len =
+ map_get_ht_log2len (mm->ip4_reass_conf_ht_ratio,
+ mm->ip4_reass_conf_pool_size);
+ mm->ip4_reass_fifo_last = MAP_REASS_INDEX_NONE;
+ map_ip4_reass_reinit (NULL, NULL);
+
+ /* IP6 virtual reassembly */
+ mm->ip6_reass_hash_table = 0;
+ mm->ip6_reass_pool = 0;
+ mm->ip6_reass_lock =
+ clib_mem_alloc_aligned (CLIB_CACHE_LINE_BYTES, CLIB_CACHE_LINE_BYTES);
+ mm->ip6_reass_conf_ht_ratio = MAP_IP6_REASS_HT_RATIO_DEFAULT;
+ mm->ip6_reass_conf_lifetime_ms = MAP_IP6_REASS_LIFETIME_DEFAULT;
+ mm->ip6_reass_conf_pool_size = MAP_IP6_REASS_POOL_SIZE_DEFAULT;
+ mm->ip6_reass_conf_buffers = MAP_IP6_REASS_BUFFERS_DEFAULT;
+ mm->ip6_reass_ht_log2len =
+ map_get_ht_log2len (mm->ip6_reass_conf_ht_ratio,
+ mm->ip6_reass_conf_pool_size);
+ mm->ip6_reass_fifo_last = MAP_REASS_INDEX_NONE;
+ map_ip6_reass_reinit (NULL, NULL);
+
+ map_dpo_module_init ();
+
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (map_init);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/map/map.h b/src/vnet/map/map.h
new file mode 100644
index 00000000000..f446b739a93
--- /dev/null
+++ b/src/vnet/map/map.h
@@ -0,0 +1,591 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <stdbool.h>
+#include <vppinfra/error.h>
+#include <vnet/vnet.h>
+#include <vnet/ip/ip.h>
+#include <vlib/vlib.h>
+#include <vnet/fib/fib_types.h>
+#include <vnet/fib/ip4_fib.h>
+#include <vnet/adj/adj.h>
+#include <vnet/map/map_dpo.h>
+#include <vnet/dpo/load_balance.h>
+
+#define MAP_SKIP_IP6_LOOKUP 1
+
+typedef enum
+{
+ MAP_SENDER,
+ MAP_RECEIVER
+} map_dir_e;
+
+int map_create_domain (ip4_address_t * ip4_prefix, u8 ip4_prefix_len,
+ ip6_address_t * ip6_prefix, u8 ip6_prefix_len,
+ ip6_address_t * ip6_src, u8 ip6_src_len,
+ u8 ea_bits_len, u8 psid_offset, u8 psid_length,
+ u32 * map_domain_index, u16 mtu, u8 flags);
+int map_delete_domain (u32 map_domain_index);
+int map_add_del_psid (u32 map_domain_index, u16 psid, ip6_address_t * tep,
+ u8 is_add);
+u8 *format_map_trace (u8 * s, va_list * args);
+i32 ip4_get_port (ip4_header_t * ip, map_dir_e dir, u16 buffer_len);
+i32 ip6_get_port (ip6_header_t * ip6, map_dir_e dir, u16 buffer_len);
+u16 ip4_map_get_port (ip4_header_t * ip, map_dir_e dir);
+
+typedef enum __attribute__ ((__packed__))
+{
+ MAP_DOMAIN_PREFIX = 1 << 0, MAP_DOMAIN_TRANSLATION = 1 << 1, // The domain uses MAP-T
+} map_domain_flags_e;
+
+/**
+ * IP4 reassembly logic:
+ * One virtually reassembled flow requires a map_ip4_reass_t structure in order
+ * to keep the first-fragment port number and, optionally, cache out of sequence
+ * packets.
+ * There are up to MAP_IP4_REASS_MAX_REASSEMBLY such structures.
+ * When in use, those structures are stored in a hash table of MAP_IP4_REASS_BUCKETS buckets.
+ * When a new structure needs to be used, it is allocated from available ones.
+ * If there is no structure available, the oldest in use is selected and used if and
+ * only if it was first allocated more than MAP_IP4_REASS_LIFETIME seconds ago.
+ * In case no structure can be allocated, the fragment is dropped.
+ */
+
+#define MAP_IP4_REASS_LIFETIME_DEFAULT (100) /* ms */
+#define MAP_IP4_REASS_HT_RATIO_DEFAULT (1.0)
+#define MAP_IP4_REASS_POOL_SIZE_DEFAULT 1024 // Number of reassembly structures
+#define MAP_IP4_REASS_BUFFERS_DEFAULT 2048
+
+#define MAP_IP4_REASS_MAX_FRAGMENTS_PER_REASSEMBLY 5 // Number of fragment per reassembly
+
+#define MAP_IP6_REASS_LIFETIME_DEFAULT (100) /* ms */
+#define MAP_IP6_REASS_HT_RATIO_DEFAULT (1.0)
+#define MAP_IP6_REASS_POOL_SIZE_DEFAULT 1024 // Number of reassembly structures
+#define MAP_IP6_REASS_BUFFERS_DEFAULT 2048
+
+#define MAP_IP6_REASS_MAX_FRAGMENTS_PER_REASSEMBLY 5
+
+#define MAP_IP6_REASS_COUNT_BYTES
+#define MAP_IP4_REASS_COUNT_BYTES
+
+//#define IP6_MAP_T_OVERRIDE_TOS 0
+
+/*
+ * This structure _MUST_ be no larger than a single cache line (64 bytes).
+ * If more space is needed make a union of ip6_prefix and *rules, those are mutually exclusive.
+ */
+typedef struct
+{
+ ip6_address_t ip6_src;
+ ip6_address_t ip6_prefix;
+ ip6_address_t *rules;
+ u32 suffix_mask;
+ ip4_address_t ip4_prefix;
+ u16 psid_mask;
+ u16 mtu;
+ map_domain_flags_e flags;
+ u8 ip6_prefix_len;
+ u8 ip6_src_len;
+ u8 ea_bits_len;
+ u8 psid_offset;
+ u8 psid_length;
+
+ /* helpers */
+ u8 psid_shift;
+ u8 suffix_shift;
+ u8 ea_shift;
+
+ /* not used by forwarding */
+ u8 ip4_prefix_len;
+} map_domain_t;
+
+STATIC_ASSERT ((sizeof (map_domain_t) <= CLIB_CACHE_LINE_BYTES),
+ "MAP domain fits in one cacheline");
+
+#define MAP_REASS_INDEX_NONE ((u16)0xffff)
+
+/*
+ * Hash key, padded out to 16 bytes for fast compare
+ */
+/* *INDENT-OFF* */
+typedef union {
+ CLIB_PACKED (struct {
+ ip4_address_t src;
+ ip4_address_t dst;
+ u16 fragment_id;
+ u8 protocol;
+ });
+ u64 as_u64[2];
+ u32 as_u32[4];
+} map_ip4_reass_key_t;
+/* *INDENT-ON* */
+
+typedef struct
+{
+ map_ip4_reass_key_t key;
+ f64 ts;
+#ifdef MAP_IP4_REASS_COUNT_BYTES
+ u16 expected_total;
+ u16 forwarded;
+#endif
+ i32 port;
+ u16 bucket;
+ u16 bucket_next;
+ u16 fifo_prev;
+ u16 fifo_next;
+ u32 fragments[MAP_IP4_REASS_MAX_FRAGMENTS_PER_REASSEMBLY];
+} map_ip4_reass_t;
+
+/*
+ * MAP domain counters
+ */
+typedef enum
+{
+ /* Simple counters */
+ MAP_DOMAIN_IPV4_FRAGMENT = 0,
+ /* Combined counters */
+ MAP_DOMAIN_COUNTER_RX = 0,
+ MAP_DOMAIN_COUNTER_TX,
+ MAP_N_DOMAIN_COUNTER
+} map_domain_counter_t;
+
+/*
+ * main_main_t
+ */
+/* *INDENT-OFF* */
+typedef union {
+ CLIB_PACKED (struct {
+ ip6_address_t src;
+ ip6_address_t dst;
+ u32 fragment_id;
+ u8 protocol;
+ });
+ u64 as_u64[5];
+ u32 as_u32[10];
+} map_ip6_reass_key_t;
+/* *INDENT-OFF* */
+
+typedef struct {
+ u32 pi; //Cached packet or ~0
+ u16 next_data_offset; //The data offset of the additional 20 bytes or ~0
+ u8 next_data_len; //Number of bytes ready to be copied (20 if not last fragment)
+ u8 next_data[20]; //The 20 additional bytes
+} map_ip6_fragment_t;
+
+typedef struct {
+ map_ip6_reass_key_t key;
+ f64 ts;
+#ifdef MAP_IP6_REASS_COUNT_BYTES
+ u16 expected_total;
+ u16 forwarded;
+#endif
+ u16 bucket; //What hash bucket this element is linked in
+ u16 bucket_next;
+ u16 fifo_prev;
+ u16 fifo_next;
+ ip4_header_t ip4_header;
+ map_ip6_fragment_t fragments[MAP_IP6_REASS_MAX_FRAGMENTS_PER_REASSEMBLY];
+} map_ip6_reass_t;
+
+typedef struct {
+ /* pool of MAP domains */
+ map_domain_t *domains;
+
+ /* MAP Domain packet/byte counters indexed by map domain index */
+ vlib_simple_counter_main_t *simple_domain_counters;
+ vlib_combined_counter_main_t *domain_counters;
+ volatile u32 *counter_lock;
+
+#ifdef MAP_SKIP_IP6_LOOKUP
+ /* pre-presolve */
+ u32 adj6_index, adj4_index;
+ ip4_address_t preresolve_ip4;
+ ip6_address_t preresolve_ip6;
+#endif
+
+ /* Traffic class: zero, copy (~0) or fixed value */
+ u8 tc;
+ bool tc_copy;
+
+ bool sec_check; /* Inbound security check */
+ bool sec_check_frag; /* Inbound security check for (subsequent) fragments */
+ bool icmp6_enabled; /* Send destination unreachable for security check failure */
+
+ /* ICMPv6 -> ICMPv4 relay parameters */
+ ip4_address_t icmp4_src_address;
+ vlib_simple_counter_main_t icmp_relayed;
+
+ /* convenience */
+ vlib_main_t *vlib_main;
+ vnet_main_t *vnet_main;
+
+ /*
+ * IPv4 encap and decap reassembly
+ */
+ /* Configuration */
+ f32 ip4_reass_conf_ht_ratio; //Size of ht is 2^ceil(log2(ratio*pool_size))
+ u16 ip4_reass_conf_pool_size; //Max number of allocated reass structures
+ u16 ip4_reass_conf_lifetime_ms; //Time a reassembly struct is considered valid in ms
+ u32 ip4_reass_conf_buffers; //Maximum number of buffers used by ip4 reassembly
+
+ /* Runtime */
+ map_ip4_reass_t *ip4_reass_pool;
+ u8 ip4_reass_ht_log2len; //Hash table size is 2^log2len
+ u16 ip4_reass_allocated;
+ u16 *ip4_reass_hash_table;
+ u16 ip4_reass_fifo_last;
+ volatile u32 *ip4_reass_lock;
+
+ /* Counters */
+ u32 ip4_reass_buffered_counter;
+
+ bool frag_inner; /* Inner or outer fragmentation */
+ bool frag_ignore_df; /* Fragment (outer) packet even if DF is set */
+
+ /*
+ * IPv6 decap reassembly
+ */
+ /* Configuration */
+ f32 ip6_reass_conf_ht_ratio; //Size of ht is 2^ceil(log2(ratio*pool_size))
+ u16 ip6_reass_conf_pool_size; //Max number of allocated reass structures
+ u16 ip6_reass_conf_lifetime_ms; //Time a reassembly struct is considered valid in ms
+ u32 ip6_reass_conf_buffers; //Maximum number of buffers used by ip6 reassembly
+
+ /* Runtime */
+ map_ip6_reass_t *ip6_reass_pool;
+ u8 ip6_reass_ht_log2len; //Hash table size is 2^log2len
+ u16 ip6_reass_allocated;
+ u16 *ip6_reass_hash_table;
+ u16 ip6_reass_fifo_last;
+ volatile u32 *ip6_reass_lock;
+
+ /* Counters */
+ u32 ip6_reass_buffered_counter;
+
+} map_main_t;
+
+/*
+ * MAP Error counters/messages
+ */
+#define foreach_map_error \
+ /* Must be first. */ \
+ _(NONE, "valid MAP packets") \
+ _(BAD_PROTOCOL, "bad protocol") \
+ _(SEC_CHECK, "security check failed") \
+ _(ENCAP_SEC_CHECK, "encap security check failed") \
+ _(DECAP_SEC_CHECK, "decap security check failed") \
+ _(ICMP, "unable to translate ICMP") \
+ _(ICMP_RELAY, "unable to relay ICMP") \
+ _(UNKNOWN, "unknown") \
+ _(NO_BINDING, "no binding") \
+ _(NO_DOMAIN, "no domain") \
+ _(FRAGMENTED, "packet is a fragment") \
+ _(FRAGMENT_MEMORY, "could not cache fragment") \
+ _(FRAGMENT_MALFORMED, "fragment has unexpected format")\
+ _(FRAGMENT_DROPPED, "dropped cached fragment") \
+ _(MALFORMED, "malformed packet") \
+ _(DF_SET, "can't fragment, DF set")
+
+typedef enum {
+#define _(sym,str) MAP_ERROR_##sym,
+ foreach_map_error
+#undef _
+ MAP_N_ERROR,
+ } map_error_t;
+
+u64 map_error_counter_get(u32 node_index, map_error_t map_error);
+
+typedef struct {
+ u32 map_domain_index;
+ u16 port;
+} map_trace_t;
+
+map_main_t map_main;
+
+extern vlib_node_registration_t ip4_map_node;
+extern vlib_node_registration_t ip6_map_node;
+
+extern vlib_node_registration_t ip4_map_t_node;
+extern vlib_node_registration_t ip4_map_t_fragmented_node;
+extern vlib_node_registration_t ip4_map_t_tcp_udp_node;
+extern vlib_node_registration_t ip4_map_t_icmp_node;
+
+extern vlib_node_registration_t ip6_map_t_node;
+extern vlib_node_registration_t ip6_map_t_fragmented_node;
+extern vlib_node_registration_t ip6_map_t_tcp_udp_node;
+extern vlib_node_registration_t ip6_map_t_icmp_node;
+
+/*
+ * map_get_pfx
+ */
+static_always_inline u64
+map_get_pfx (map_domain_t *d, u32 addr, u16 port)
+{
+ u16 psid = (port >> d->psid_shift) & d->psid_mask;
+
+ if (d->ea_bits_len == 0 && d->rules)
+ return clib_net_to_host_u64(d->rules[psid].as_u64[0]);
+
+ u32 suffix = (addr >> d->suffix_shift) & d->suffix_mask;
+ u64 ea = d->ea_bits_len == 0 ? 0 : (((u64) suffix << d->psid_length)) | psid;
+
+ return clib_net_to_host_u64(d->ip6_prefix.as_u64[0]) | ea << d->ea_shift;
+}
+
+static_always_inline u64
+map_get_pfx_net (map_domain_t *d, u32 addr, u16 port)
+{
+ return clib_host_to_net_u64(map_get_pfx(d, clib_net_to_host_u32(addr),
+ clib_net_to_host_u16(port)));
+}
+
+/*
+ * map_get_sfx
+ */
+static_always_inline u64
+map_get_sfx (map_domain_t *d, u32 addr, u16 port)
+{
+ u16 psid = (port >> d->psid_shift) & d->psid_mask;
+
+ /* Shared 1:1 mode. */
+ if (d->ea_bits_len == 0 && d->rules)
+ return clib_net_to_host_u64(d->rules[psid].as_u64[1]);
+ if (d->ip6_prefix_len == 128)
+ return clib_net_to_host_u64(d->ip6_prefix.as_u64[1]);
+
+ /* IPv4 prefix */
+ if (d->flags & MAP_DOMAIN_PREFIX)
+ return (u64) (addr & (0xFFFFFFFF << d->suffix_shift)) << 16;
+
+ /* Shared or full IPv4 address */
+ return ((u64) addr << 16) | psid;
+}
+
+static_always_inline u64
+map_get_sfx_net (map_domain_t *d, u32 addr, u16 port)
+{
+ return clib_host_to_net_u64(map_get_sfx(d, clib_net_to_host_u32(addr),
+ clib_net_to_host_u16(port)));
+}
+
+static_always_inline u32
+map_get_ip4 (ip6_address_t *addr)
+{
+ return clib_host_to_net_u32(clib_net_to_host_u64(addr->as_u64[1]) >> 16);
+}
+
+/*
+ * Get the MAP domain from an IPv4 lookup adjacency.
+ */
+static_always_inline map_domain_t *
+ip4_map_get_domain (u32 mdi,
+ u32 *map_domain_index)
+{
+ map_main_t *mm = &map_main;
+ map_dpo_t *md;
+
+ md = map_dpo_get(mdi);
+
+ ASSERT(md);
+ *map_domain_index = md->md_domain;
+ return pool_elt_at_index(mm->domains, *map_domain_index);
+}
+
+/*
+ * Get the MAP domain from an IPv6 lookup adjacency.
+ * If the IPv6 address or prefix is not shared, no lookup is required.
+ * The IPv4 address is used otherwise.
+ */
+static_always_inline map_domain_t *
+ip6_map_get_domain (u32 mdi, ip4_address_t *addr,
+ u32 *map_domain_index, u8 *error)
+{
+ map_main_t *mm = &map_main;
+ map_dpo_t *md;
+
+ /*
+ * Disable direct MAP domain lookup on decap, until the security check is updated to verify IPv4 SA.
+ * (That's done implicitly when MAP domain is looked up in the IPv4 FIB)
+ */
+#ifdef MAP_NONSHARED_DOMAIN_ENABLED
+ md = map_dpo_get(mdi);
+
+ ASSERT(md);
+ *map_domain_index = md->md_domain;
+ if (*map_domain_index != ~0)
+ return pool_elt_at_index(mm->domains, *map_domain_index);
+#endif
+
+ u32 lbi = ip4_fib_forwarding_lookup(0, addr);
+ const dpo_id_t *dpo = load_balance_get_bucket(lbi, 0);
+ if (PREDICT_TRUE(dpo->dpoi_type == map_dpo_type ||
+ dpo->dpoi_type == map_t_dpo_type))
+ {
+ md = map_dpo_get(dpo->dpoi_index);
+ *map_domain_index = md->md_domain;
+ return pool_elt_at_index(mm->domains, *map_domain_index);
+ }
+ *error = MAP_ERROR_NO_DOMAIN;
+ return NULL;
+}
+
+map_ip4_reass_t *
+map_ip4_reass_get(u32 src, u32 dst, u16 fragment_id,
+ u8 protocol, u32 **pi_to_drop);
+void
+map_ip4_reass_free(map_ip4_reass_t *r, u32 **pi_to_drop);
+
+#define map_ip4_reass_lock() while (__sync_lock_test_and_set(map_main.ip4_reass_lock, 1)) {}
+#define map_ip4_reass_unlock() do {CLIB_MEMORY_BARRIER(); *map_main.ip4_reass_lock = 0;} while(0)
+
+static_always_inline void
+map_ip4_reass_get_fragments(map_ip4_reass_t *r, u32 **pi)
+{
+ int i;
+ for (i=0; i<MAP_IP4_REASS_MAX_FRAGMENTS_PER_REASSEMBLY; i++)
+ if(r->fragments[i] != ~0) {
+ vec_add1(*pi, r->fragments[i]);
+ r->fragments[i] = ~0;
+ map_main.ip4_reass_buffered_counter--;
+ }
+}
+
+int map_ip4_reass_add_fragment(map_ip4_reass_t *r, u32 pi);
+
+map_ip6_reass_t *
+map_ip6_reass_get(ip6_address_t *src, ip6_address_t *dst, u32 fragment_id,
+ u8 protocol, u32 **pi_to_drop);
+void
+map_ip6_reass_free(map_ip6_reass_t *r, u32 **pi_to_drop);
+
+#define map_ip6_reass_lock() while (__sync_lock_test_and_set(map_main.ip6_reass_lock, 1)) {}
+#define map_ip6_reass_unlock() do {CLIB_MEMORY_BARRIER(); *map_main.ip6_reass_lock = 0;} while(0)
+
+int
+map_ip6_reass_add_fragment(map_ip6_reass_t *r, u32 pi,
+ u16 data_offset, u16 next_data_offset,
+ u8 *data_start, u16 data_len);
+
+void map_ip4_drop_pi(u32 pi);
+
+int map_ip4_reass_conf_ht_ratio(f32 ht_ratio, u32 *trashed_reass, u32 *dropped_packets);
+#define MAP_IP4_REASS_CONF_HT_RATIO_MAX 100
+int map_ip4_reass_conf_pool_size(u16 pool_size, u32 *trashed_reass, u32 *dropped_packets);
+#define MAP_IP4_REASS_CONF_POOL_SIZE_MAX (0xfeff)
+int map_ip4_reass_conf_lifetime(u16 lifetime_ms);
+#define MAP_IP4_REASS_CONF_LIFETIME_MAX 0xffff
+int map_ip4_reass_conf_buffers(u32 buffers);
+#define MAP_IP4_REASS_CONF_BUFFERS_MAX (0xffffffff)
+
+void map_ip6_drop_pi(u32 pi);
+
+
+int map_ip6_reass_conf_ht_ratio(f32 ht_ratio, u32 *trashed_reass, u32 *dropped_packets);
+#define MAP_IP6_REASS_CONF_HT_RATIO_MAX 100
+int map_ip6_reass_conf_pool_size(u16 pool_size, u32 *trashed_reass, u32 *dropped_packets);
+#define MAP_IP6_REASS_CONF_POOL_SIZE_MAX (0xfeff)
+int map_ip6_reass_conf_lifetime(u16 lifetime_ms);
+#define MAP_IP6_REASS_CONF_LIFETIME_MAX 0xffff
+int map_ip6_reass_conf_buffers(u32 buffers);
+#define MAP_IP6_REASS_CONF_BUFFERS_MAX (0xffffffff)
+
+static_always_inline
+int ip6_parse(const ip6_header_t *ip6, u32 buff_len,
+ u8 *l4_protocol, u16 *l4_offset, u16 *frag_hdr_offset)
+{
+ if (ip6->protocol == IP_PROTOCOL_IPV6_FRAGMENTATION) {
+ *l4_protocol = ((ip6_frag_hdr_t *)(ip6 + 1))->next_hdr;
+ *frag_hdr_offset = sizeof(*ip6);
+ *l4_offset = sizeof(*ip6) + sizeof(ip6_frag_hdr_t);
+ } else {
+ *l4_protocol = ip6->protocol;
+ *frag_hdr_offset = 0;
+ *l4_offset = sizeof(*ip6);
+ }
+
+ return (buff_len < (*l4_offset + 4)) ||
+ (clib_net_to_host_u16(ip6->payload_length) < (*l4_offset + 4 - sizeof(*ip6)));
+}
+
+
+#define u8_ptr_add(ptr, index) (((u8 *)ptr) + index)
+#define u16_net_add(u, val) clib_host_to_net_u16(clib_net_to_host_u16(u) + (val))
+
+#define frag_id_6to4(id) ((id) ^ ((id) >> 16))
+
+static_always_inline void
+ip4_map_t_embedded_address (map_domain_t *d,
+ ip6_address_t *ip6, const ip4_address_t *ip4)
+{
+ ASSERT(d->ip6_src_len == 96); //No support for other lengths for now
+ ip6->as_u64[0] = d->ip6_src.as_u64[0];
+ ip6->as_u32[2] = d->ip6_src.as_u32[2];
+ ip6->as_u32[3] = ip4->as_u32;
+}
+
+static_always_inline u32
+ip6_map_t_embedded_address (map_domain_t *d, ip6_address_t *addr)
+{
+ ASSERT(d->ip6_src_len == 96); //No support for other lengths for now
+ return addr->as_u32[3];
+}
+
+static inline void
+map_domain_counter_lock (map_main_t *mm)
+{
+ if (mm->counter_lock)
+ while (__sync_lock_test_and_set(mm->counter_lock, 1))
+ /* zzzz */ ;
+}
+static inline void
+map_domain_counter_unlock (map_main_t *mm)
+{
+ if (mm->counter_lock)
+ *mm->counter_lock = 0;
+}
+
+
+static_always_inline void
+map_send_all_to_node(vlib_main_t *vm, u32 *pi_vector,
+ vlib_node_runtime_t *node, vlib_error_t *error,
+ u32 next)
+{
+ u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
+ //Deal with fragments that are ready
+ from = pi_vector;
+ n_left_from = vec_len(pi_vector);
+ next_index = node->cached_next_index;
+ while (n_left_from > 0) {
+ vlib_get_next_frame(vm, node, next_index, to_next, n_left_to_next);
+ while (n_left_from > 0 && n_left_to_next > 0) {
+ u32 pi0 = to_next[0] = from[0];
+ from += 1;
+ n_left_from -= 1;
+ to_next += 1;
+ n_left_to_next -= 1;
+ vlib_buffer_t *p0 = vlib_get_buffer(vm, pi0);
+ p0->error = *error;
+ vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, pi0, next);
+ }
+ vlib_put_next_frame(vm, node, next_index, n_left_to_next);
+ }
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/map/map_api.c b/src/vnet/map/map_api.c
new file mode 100644
index 00000000000..7febeb3d7a3
--- /dev/null
+++ b/src/vnet/map/map_api.c
@@ -0,0 +1,295 @@
+/*
+ *------------------------------------------------------------------
+ * map_api.c - vnet map api
+ *
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+
+#include <vnet/vnet.h>
+#include <vlibmemory/api.h>
+
+#include "map.h"
+#include <vnet/api_errno.h>
+#include <vnet/ip/ip.h>
+#include <vnet/fib/fib_table.h>
+#include <vnet/vnet_msg_enum.h>
+
+#define vl_typedefs /* define message structures */
+#include <vnet/vnet_all_api_h.h>
+#undef vl_typedefs
+
+#define vl_endianfun /* define message structures */
+#include <vnet/vnet_all_api_h.h>
+#undef vl_endianfun
+
+/* instantiate all the print functions we know about */
+#define vl_print(handle, ...) vlib_cli_output (handle, __VA_ARGS__)
+#define vl_printfun
+#include <vnet/vnet_all_api_h.h>
+#undef vl_printfun
+
+#include <vlibapi/api_helper_macros.h>
+
+#define foreach_vpe_api_msg \
+_(MAP_ADD_DOMAIN, map_add_domain) \
+_(MAP_DEL_DOMAIN, map_del_domain) \
+_(MAP_ADD_DEL_RULE, map_add_del_rule) \
+_(MAP_DOMAIN_DUMP, map_domain_dump) \
+_(MAP_RULE_DUMP, map_rule_dump) \
+_(MAP_SUMMARY_STATS, map_summary_stats)
+
+static void
+vl_api_map_add_domain_t_handler (vl_api_map_add_domain_t * mp)
+{
+ vl_api_map_add_domain_reply_t *rmp;
+ int rv = 0;
+ u32 index;
+ u8 flags = mp->is_translation ? MAP_DOMAIN_TRANSLATION : 0;
+ rv =
+ map_create_domain ((ip4_address_t *) & mp->ip4_prefix, mp->ip4_prefix_len,
+ (ip6_address_t *) & mp->ip6_prefix, mp->ip6_prefix_len,
+ (ip6_address_t *) & mp->ip6_src,
+ mp->ip6_src_prefix_len, mp->ea_bits_len,
+ mp->psid_offset, mp->psid_length, &index,
+ ntohs (mp->mtu), flags);
+
+ /* *INDENT-OFF* */
+ REPLY_MACRO2(VL_API_MAP_ADD_DOMAIN_REPLY,
+ ({
+ rmp->index = ntohl(index);
+ }));
+ /* *INDENT-ON* */
+}
+
+static void
+vl_api_map_del_domain_t_handler (vl_api_map_del_domain_t * mp)
+{
+ vl_api_map_del_domain_reply_t *rmp;
+ int rv = 0;
+
+ rv = map_delete_domain (ntohl (mp->index));
+
+ REPLY_MACRO (VL_API_MAP_DEL_DOMAIN_REPLY);
+}
+
+static void
+vl_api_map_add_del_rule_t_handler (vl_api_map_add_del_rule_t * mp)
+{
+ vl_api_map_del_domain_reply_t *rmp;
+ int rv = 0;
+
+ rv =
+ map_add_del_psid (ntohl (mp->index), ntohs (mp->psid),
+ (ip6_address_t *) mp->ip6_dst, mp->is_add);
+
+ REPLY_MACRO (VL_API_MAP_ADD_DEL_RULE_REPLY);
+}
+
+static void
+vl_api_map_domain_dump_t_handler (vl_api_map_domain_dump_t * mp)
+{
+ vl_api_map_domain_details_t *rmp;
+ map_main_t *mm = &map_main;
+ map_domain_t *d;
+ unix_shared_memory_queue_t *q;
+
+ if (pool_elts (mm->domains) == 0)
+ return;
+
+ q = vl_api_client_index_to_input_queue (mp->client_index);
+ if (q == 0)
+ {
+ return;
+ }
+
+ /* *INDENT-OFF* */
+ pool_foreach(d, mm->domains,
+ ({
+ /* Make sure every field is initiated (or don't skip the memset()) */
+ rmp = vl_msg_api_alloc (sizeof (*rmp));
+ rmp->_vl_msg_id = ntohs(VL_API_MAP_DOMAIN_DETAILS);
+ rmp->domain_index = htonl(d - mm->domains);
+ rmp->ea_bits_len = d->ea_bits_len;
+ rmp->psid_offset = d->psid_offset;
+ rmp->psid_length = d->psid_length;
+ clib_memcpy(rmp->ip4_prefix, &d->ip4_prefix, sizeof(rmp->ip4_prefix));
+ rmp->ip4_prefix_len = d->ip4_prefix_len;
+ clib_memcpy(rmp->ip6_prefix, &d->ip6_prefix, sizeof(rmp->ip6_prefix));
+ rmp->ip6_prefix_len = d->ip6_prefix_len;
+ clib_memcpy(rmp->ip6_src, &d->ip6_src, sizeof(rmp->ip6_src));
+ rmp->ip6_src_len = d->ip6_src_len;
+ rmp->mtu = htons(d->mtu);
+ rmp->is_translation = (d->flags & MAP_DOMAIN_TRANSLATION);
+ rmp->context = mp->context;
+
+ vl_msg_api_send_shmem (q, (u8 *)&rmp);
+ }));
+ /* *INDENT-ON* */
+}
+
+static void
+vl_api_map_rule_dump_t_handler (vl_api_map_rule_dump_t * mp)
+{
+ unix_shared_memory_queue_t *q;
+ u16 i;
+ ip6_address_t dst;
+ vl_api_map_rule_details_t *rmp;
+ map_main_t *mm = &map_main;
+ u32 domain_index = ntohl (mp->domain_index);
+ map_domain_t *d;
+
+ if (pool_elts (mm->domains) == 0)
+ return;
+
+ d = pool_elt_at_index (mm->domains, domain_index);
+ if (!d || !d->rules)
+ {
+ return;
+ }
+
+ q = vl_api_client_index_to_input_queue (mp->client_index);
+ if (q == 0)
+ {
+ return;
+ }
+
+ for (i = 0; i < (0x1 << d->psid_length); i++)
+ {
+ dst = d->rules[i];
+ if (dst.as_u64[0] == 0 && dst.as_u64[1] == 0)
+ {
+ continue;
+ }
+ rmp = vl_msg_api_alloc (sizeof (*rmp));
+ memset (rmp, 0, sizeof (*rmp));
+ rmp->_vl_msg_id = ntohs (VL_API_MAP_RULE_DETAILS);
+ rmp->psid = htons (i);
+ clib_memcpy (rmp->ip6_dst, &dst, sizeof (rmp->ip6_dst));
+ rmp->context = mp->context;
+ vl_msg_api_send_shmem (q, (u8 *) & rmp);
+ }
+}
+
+static void
+vl_api_map_summary_stats_t_handler (vl_api_map_summary_stats_t * mp)
+{
+ vl_api_map_summary_stats_reply_t *rmp;
+ vlib_combined_counter_main_t *cm;
+ vlib_counter_t v;
+ int i, which;
+ u64 total_pkts[VLIB_N_RX_TX];
+ u64 total_bytes[VLIB_N_RX_TX];
+ map_main_t *mm = &map_main;
+ unix_shared_memory_queue_t *q =
+ vl_api_client_index_to_input_queue (mp->client_index);
+
+ if (!q)
+ return;
+
+ rmp = vl_msg_api_alloc (sizeof (*rmp));
+ rmp->_vl_msg_id = ntohs (VL_API_MAP_SUMMARY_STATS_REPLY);
+ rmp->context = mp->context;
+ rmp->retval = 0;
+
+ memset (total_pkts, 0, sizeof (total_pkts));
+ memset (total_bytes, 0, sizeof (total_bytes));
+
+ map_domain_counter_lock (mm);
+ vec_foreach (cm, mm->domain_counters)
+ {
+ which = cm - mm->domain_counters;
+
+ for (i = 0; i < vec_len (cm->maxi); i++)
+ {
+ vlib_get_combined_counter (cm, i, &v);
+ total_pkts[which] += v.packets;
+ total_bytes[which] += v.bytes;
+ }
+ }
+
+ map_domain_counter_unlock (mm);
+
+ /* Note: in network byte order! */
+ rmp->total_pkts[MAP_DOMAIN_COUNTER_RX] =
+ clib_host_to_net_u64 (total_pkts[MAP_DOMAIN_COUNTER_RX]);
+ rmp->total_bytes[MAP_DOMAIN_COUNTER_RX] =
+ clib_host_to_net_u64 (total_bytes[MAP_DOMAIN_COUNTER_RX]);
+ rmp->total_pkts[MAP_DOMAIN_COUNTER_TX] =
+ clib_host_to_net_u64 (total_pkts[MAP_DOMAIN_COUNTER_TX]);
+ rmp->total_bytes[MAP_DOMAIN_COUNTER_TX] =
+ clib_host_to_net_u64 (total_bytes[MAP_DOMAIN_COUNTER_TX]);
+ rmp->total_bindings = clib_host_to_net_u64 (pool_elts (mm->domains));
+ rmp->total_ip4_fragments = 0; // Not yet implemented. Should be a simple counter.
+ rmp->total_security_check[MAP_DOMAIN_COUNTER_TX] =
+ clib_host_to_net_u64 (map_error_counter_get
+ (ip4_map_node.index, MAP_ERROR_ENCAP_SEC_CHECK));
+ rmp->total_security_check[MAP_DOMAIN_COUNTER_RX] =
+ clib_host_to_net_u64 (map_error_counter_get
+ (ip4_map_node.index, MAP_ERROR_DECAP_SEC_CHECK));
+
+ vl_msg_api_send_shmem (q, (u8 *) & rmp);
+}
+
+/*
+ * vpe_api_hookup
+ * Add vpe's API message handlers to the table.
+ * vlib has alread mapped shared memory and
+ * added the client registration handlers.
+ * See .../vlib-api/vlibmemory/memclnt_vlib.c:memclnt_process()
+ */
+#define vl_msg_name_crc_list
+#include <vnet/vnet_all_api_h.h>
+#undef vl_msg_name_crc_list
+
+static void
+setup_message_id_table (api_main_t * am)
+{
+#define _(id,n,crc) vl_msg_api_add_msg_name_crc (am, #n "_" #crc, id);
+ foreach_vl_msg_name_crc_map;
+#undef _
+}
+
+static clib_error_t *
+map_api_hookup (vlib_main_t * vm)
+{
+ api_main_t *am = &api_main;
+
+#define _(N,n) \
+ vl_msg_api_set_handlers(VL_API_##N, #n, \
+ vl_api_##n##_t_handler, \
+ vl_noop_handler, \
+ vl_api_##n##_t_endian, \
+ vl_api_##n##_t_print, \
+ sizeof(vl_api_##n##_t), 1);
+ foreach_vpe_api_msg;
+#undef _
+
+ /*
+ * Set up the (msg_name, crc, message-id) table
+ */
+ setup_message_id_table (am);
+
+ return 0;
+}
+
+VLIB_API_INIT_FUNCTION (map_api_hookup);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/map/map_doc.md b/src/vnet/map/map_doc.md
new file mode 100644
index 00000000000..17f3c51174b
--- /dev/null
+++ b/src/vnet/map/map_doc.md
@@ -0,0 +1,69 @@
+# VPP MAP and Lw4o6 implementation {#map_doc}
+
+This is a memo intended to contain documentation of the VPP MAP and Lw4o6 implementations.
+Everything that is not directly obvious should come here.
+
+
+
+## MAP-E Virtual Reassembly
+
+The MAP-E implementation supports handling of IPv4 fragments as well as IPv4-in-IPv6 inner and outer fragments. This is called virtual reassembly because the fragments are not actually reassembled. Instead, some meta-data are kept about the first fragment and reused for subsequent fragments.
+
+Fragment caching and handling is not always necessary. It is performed when:
+* An IPv4 fragment is received and the destination IPv4 address is shared.
+* An IPv6 packet is received with an inner IPv4 fragment, the IPv4 source address is shared, and 'security-check fragments' is on.
+* An IPv6 fragment is received.
+
+There are 3 dedicated nodes:
+* ip4-map-reass
+* ip6-map-ip4-reass
+* ip6-map-ip6-reass
+
+ip4-map sends all fragments to ip4-map-reass.
+ip6-map sends all inner-fragments to ip6-map-ip4-reass.
+ip6-map sends all outer-fragments to ip6-map-ip6-reass.
+
+IPv4 (resp. IPv6) virtual reassembly makes use of a hash table in order to store IPv4 (resp. IPv6) reassembly structures. The hash-key is based on the IPv4-src:IPv4-dst:Frag-ID:Protocol tuple (resp. IPv6-src:IPv6-dst:Frag-ID tuple, as the protocol is IPv4-in-IPv6). Therefore, each packet reassembly makes use of exactly one reassembly structure. When such a structure is allocated, it is timestamped with the current time. Finally, those structures are capable of storing a limited number of buffer indexes.
+
+An IPv4 (resp. IPv6) reassembly structure can cache up to MAP_IP4_REASS_MAX_FRAGMENTS_PER_REASSEMBLY (resp. MAP_IP6_REASS_MAX_FRAGMENTS_PER_REASSEMBLY) buffers. Buffers are cached until the first fragment is received.
+
+#### Virtual Reassembly configuration
+
+IPv4 and IPv6 virtual reassembly support the following configuration:
+ map params reassembly [ip4 | ip6] [lifetime <lifetime-ms>] [pool-size <pool-size>] [buffers <buffers>] [ht-ratio <ht-ratio>]
+
+lifetime:
+ The time in milliseconds a reassembly structure is considered valid. The longer, the more reliable is reassembly, but the more likely it is to exhaust the pool of reassembly structures. IPv4 standard suggests a lifetime of 15 seconds. IPv6 specifies a lifetime of 60 people. Those values are not realistic for high-throughput cases.
+
+buffers:
+ The upper limit of buffers that are allowed to be cached. It can be used to protect against fragmentation attacks which would aim to exhaust the global buffers pool.
+
+pool-size:
+ The number of reassembly structures that can be allocated. As each structure can store a small fixed number of fragments, it also sets an upper-bound of 'pool-size * MAP_IPX_REASS_MAX_FRAGMENTS_PER_REASSEMBLY' buffers that can be cached in total.
+
+ht-ratio:
+ The amount of buckets in the hash-table is pool-size * ht-ratio.
+
+
+Any time pool-size and ht-ratio is modified, the hash-table is destroyed and created again, which means all current state is lost.
+
+
+##### Additional considerations
+
+Reassembly at high rate is expensive in terms of buffers. There is a trade-off between the lifetime and number of allocated buffers. Reducing the lifetime helps, but at the cost of loosing state for fragments that are wide appart.
+
+Let:
+R be the packet rate at which fragments are received.
+F be the number of fragments per packet.
+
+Assuming the first fragment is always received last. We should have:
+buffers > lifetime * R / F * (F - 1)
+pool-size > lifetime * R/F
+
+This is a worst case. Receiving the first fragment earlier helps reducing the number of required buffers. Also, an optimization is implemented (MAP_IP6_REASS_COUNT_BYTES and MAP_IP4_REASS_COUNT_BYTES) which counts the number of transmitted bytes and remembers the total number of bytes which should be transmitted based on the last fragment, and therefore helps reducing 'pool-size'.
+
+But the formula shows that it is challenging to forward a significant amount of fragmented packets at high rates. For instance, with a lifetime of 1 second, 5Mpps packet rate would require buffering up to 2.5 millions fragments.
+
+If you want to do that, be prepared to configure a lot of fragments.
+
+
diff --git a/src/vnet/map/map_dpo.c b/src/vnet/map/map_dpo.c
new file mode 100644
index 00000000000..df2b5fa4197
--- /dev/null
+++ b/src/vnet/map/map_dpo.c
@@ -0,0 +1,191 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/ip/ip.h>
+#include <vnet/map/map_dpo.h>
+
+/**
+ * pool of all MPLS Label DPOs
+ */
+map_dpo_t *map_dpo_pool;
+
+/**
+ * The register MAP DPO type
+ */
+dpo_type_t map_dpo_type;
+dpo_type_t map_t_dpo_type;
+
+static map_dpo_t *
+map_dpo_alloc (void)
+{
+ map_dpo_t *md;
+
+ pool_get_aligned(map_dpo_pool, md, CLIB_CACHE_LINE_BYTES);
+ memset(md, 0, sizeof(*md));
+
+ return (md);
+}
+
+static index_t
+map_dpo_get_index (map_dpo_t *md)
+{
+ return (md - map_dpo_pool);
+}
+
+void
+map_dpo_create (dpo_proto_t dproto,
+ u32 domain_index,
+ dpo_id_t *dpo)
+{
+ map_dpo_t *md;
+
+ md = map_dpo_alloc();
+ md->md_domain = domain_index;
+ md->md_proto = dproto;
+
+ dpo_set(dpo,
+ map_dpo_type,
+ dproto,
+ map_dpo_get_index(md));
+}
+
+void
+map_t_dpo_create (dpo_proto_t dproto,
+ u32 domain_index,
+ dpo_id_t *dpo)
+{
+ map_dpo_t *md;
+
+ md = map_dpo_alloc();
+ md->md_domain = domain_index;
+ md->md_proto = dproto;
+
+ dpo_set(dpo,
+ map_t_dpo_type,
+ dproto,
+ map_dpo_get_index(md));
+}
+
+
+u8*
+format_map_dpo (u8 *s, va_list *args)
+{
+ index_t index = va_arg (*args, index_t);
+ CLIB_UNUSED(u32 indent) = va_arg (*args, u32);
+ map_dpo_t *md;
+
+ md = map_dpo_get(index);
+
+ return (format(s, "map:[%d]:%U domain:%d",
+ index,
+ format_dpo_proto, md->md_proto,
+ md->md_domain));
+}
+
+u8*
+format_map_t_dpo (u8 *s, va_list *args)
+{
+ index_t index = va_arg (*args, index_t);
+ CLIB_UNUSED(u32 indent) = va_arg (*args, u32);
+ map_dpo_t *md;
+
+ md = map_dpo_get(index);
+
+ return (format(s, "map-t:[%d]:%U domain:%d",
+ index,
+ format_dpo_proto, md->md_proto,
+ md->md_domain));
+}
+
+
+static void
+map_dpo_lock (dpo_id_t *dpo)
+{
+ map_dpo_t *md;
+
+ md = map_dpo_get(dpo->dpoi_index);
+
+ md->md_locks++;
+}
+
+static void
+map_dpo_unlock (dpo_id_t *dpo)
+{
+ map_dpo_t *md;
+
+ md = map_dpo_get(dpo->dpoi_index);
+
+ md->md_locks--;
+
+ if (0 == md->md_locks)
+ {
+ pool_put(map_dpo_pool, md);
+ }
+}
+
+const static dpo_vft_t md_vft = {
+ .dv_lock = map_dpo_lock,
+ .dv_unlock = map_dpo_unlock,
+ .dv_format = format_map_dpo,
+};
+
+const static char* const map_ip4_nodes[] =
+{
+ "ip4-map",
+ NULL,
+};
+const static char* const map_ip6_nodes[] =
+{
+ "ip6-map",
+ NULL,
+};
+
+const static char* const * const map_nodes[DPO_PROTO_NUM] =
+{
+ [DPO_PROTO_IP4] = map_ip4_nodes,
+ [DPO_PROTO_IP6] = map_ip6_nodes,
+ [DPO_PROTO_MPLS] = NULL,
+};
+
+const static dpo_vft_t md_t_vft = {
+ .dv_lock = map_dpo_lock,
+ .dv_unlock = map_dpo_unlock,
+ .dv_format = format_map_t_dpo,
+};
+
+const static char* const map_t_ip4_nodes[] =
+{
+ "ip4-map-t",
+ NULL,
+};
+const static char* const map_t_ip6_nodes[] =
+{
+ "ip6-map-t",
+ NULL,
+};
+
+const static char* const * const map_t_nodes[DPO_PROTO_NUM] =
+{
+ [DPO_PROTO_IP4] = map_t_ip4_nodes,
+ [DPO_PROTO_IP6] = map_t_ip6_nodes,
+ [DPO_PROTO_MPLS] = NULL,
+};
+
+void
+map_dpo_module_init (void)
+{
+ map_dpo_type = dpo_register_new_type(&md_vft, map_nodes);
+ map_t_dpo_type = dpo_register_new_type(&md_t_vft, map_t_nodes);
+}
diff --git a/src/vnet/map/map_dpo.h b/src/vnet/map/map_dpo.h
new file mode 100644
index 00000000000..be510dbaea6
--- /dev/null
+++ b/src/vnet/map/map_dpo.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __MAP_DPO_H__
+#define __MAP_DPO_H__
+
+#include <vnet/vnet.h>
+#include <vnet/dpo/dpo.h>
+
+/**
+ * A representation of a MAP DPO
+ */
+typedef struct map_dpo_t
+{
+ /**
+ * The dat-plane protocol
+ */
+ dpo_proto_t md_proto;
+
+ /**
+ * the MAP domain index
+ */
+ u32 md_domain;
+
+ /**
+ * Number of locks/users of the label
+ */
+ u16 md_locks;
+} map_dpo_t;
+
+extern void map_dpo_create (dpo_proto_t dproto,
+ u32 domain_index,
+ dpo_id_t *dpo);
+extern void map_t_dpo_create (dpo_proto_t dproto,
+ u32 domain_index,
+ dpo_id_t *dpo);
+
+extern u8* format_map_dpo(u8 *s, va_list *args);
+
+/*
+ * Encapsulation violation for fast data-path access
+ */
+extern map_dpo_t *map_dpo_pool;
+extern dpo_type_t map_dpo_type;
+extern dpo_type_t map_t_dpo_type;
+
+static inline map_dpo_t *
+map_dpo_get (index_t index)
+{
+ return (pool_elt_at_index(map_dpo_pool, index));
+}
+
+extern void map_dpo_module_init(void);
+
+#endif
diff --git a/src/vnet/map/test.c b/src/vnet/map/test.c
new file mode 100644
index 00000000000..f3c893a7a31
--- /dev/null
+++ b/src/vnet/map/test.c
@@ -0,0 +1,205 @@
+/*
+ * test.c : MAP unit tests
+ *
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <assert.h>
+#include "map.h"
+
+static map_domain_t *
+get_domain(ip4_address_t * ip4_prefix, u8 ip4_prefix_len,
+ ip6_address_t * ip6_prefix, u8 ip6_prefix_len,
+ ip6_address_t * ip6_src, u8 ip6_src_len,
+ u8 ea_bits_len, u8 psid_offset,
+ u8 psid_length, u16 mtu, u8 flags)
+{
+ map_domain_t * d = malloc(sizeof(*d));
+ u8 suffix_len;
+
+ /* EA bits must be within the first 64 bits */
+ if (ea_bits_len > 0 && (ip6_prefix_len + ea_bits_len) > 64)
+ return NULL;
+
+ /* Init domain struct */
+ d->ip4_prefix.as_u32 = ip4_prefix->as_u32;
+ d->ip4_prefix_len = ip4_prefix_len;
+ d->ip6_prefix = *ip6_prefix;
+ d->ip6_prefix_len = ip6_prefix_len;
+ d->ip6_src = *ip6_src;
+ d->ip6_src_len = ip6_src_len;
+ d->ea_bits_len = ea_bits_len;
+ d->psid_offset = psid_offset;
+ d->psid_length = psid_length;
+ d->mtu = mtu;
+ d->flags = flags;
+
+ /* How many, and which bits to grab from the IPv4 DA */
+ if (ip4_prefix_len + ea_bits_len < 32)
+ {
+ d->flags |= MAP_DOMAIN_PREFIX;
+ d->suffix_shift = 32 - ip4_prefix_len - ea_bits_len;
+ suffix_len = ea_bits_len;
+ }
+ else
+ {
+ d->suffix_shift = 0;
+ suffix_len = 32 - ip4_prefix_len;
+ }
+ d->suffix_mask = (1 << suffix_len) - 1;
+
+ d->psid_shift = 16 - psid_length - psid_offset;
+ d->psid_mask = (1 << d->psid_length) - 1;
+
+ if (ip6_prefix_len + suffix_len + d->psid_length > 64)
+ return NULL;
+
+ d->ea_shift = 64 - ip6_prefix_len - suffix_len - d->psid_length;
+
+ return d;
+}
+
+
+/*
+ * VPP-340:
+ * map_add_domain ip4-pfx 20.0.0.0/8 ip6-pfx 2001:db8::/40 ip6-src 2001:db8:ffff::/96 ea-bits-len 24 psid-offset 0 psid-len 0 map-t
+ * IPv4 src = 100.0.0.1
+ * IPv4 dst = 20.169.201.219
+ * UDP dest port = 1232
+ * IPv6 src = 2001:db8:ffff::6400:1
+ * IPv6 dst = a9c9:dfb8::14a9:c9db:0
+ * a9c9:dfb8::14a9:c9db:0 != 2001:db8:a9:c9db:0:14a9:c9db:0
+ */
+static void
+test_map_t_destaddr (void)
+{
+ ip4_address_t ip4_prefix;
+ ip6_address_t ip6_prefix;
+ ip6_address_t ip6_src;
+
+ ip4_prefix.as_u32 = clib_host_to_net_u32(0x14000000);
+ ip6_prefix.as_u64[0] = clib_host_to_net_u64(0x20010db800000000);
+ ip6_prefix.as_u64[1] = 0;
+ ip6_src.as_u64[0] = clib_host_to_net_u64(0x20010db8ffff0000);
+ map_domain_t * d = get_domain (&ip4_prefix, 8, &ip6_prefix, 40, &ip6_src, 96, 24, 0, 0, 0, MAP_DOMAIN_TRANSLATION);
+
+ ip6_address_t dst6;
+
+ dst6.as_u64[0] = map_get_pfx(d, 0x14a9c9db, 1232);
+ dst6.as_u64[1] = map_get_sfx(d, 0x14a9c9db, 1232);
+ assert(dst6.as_u64[0] == 0x20010db800a9c9db);
+ assert(dst6.as_u64[1] == 0x000014a9c9db0000);
+}
+
+/*
+ * VPP-228
+ * ip4-pfx 20.0.0.0/8
+ * ip6-pfx 2001:db8::/<n>
+ * ip6-src 2001:db8:ffff::1
+ * ea-bits-len 16 psid-offset 6 psid-len 8
+ * 20.169.201.219 port 1232
+ */
+static void
+test_map_eabits (void)
+{
+ ip4_address_t ip4_prefix;
+ ip6_address_t ip6_prefix;
+ ip6_address_t ip6_src;
+ ip6_address_t dst6;
+
+ ip4_prefix.as_u32 = clib_host_to_net_u32(0x14000000);
+ ip6_prefix.as_u64[0] = clib_host_to_net_u64(0x20010db800000000);
+ ip6_prefix.as_u64[1] = 0;
+ ip6_src.as_u64[0] = clib_host_to_net_u64(0x20010db8ffff0000);
+ ip6_src.as_u64[1] = clib_host_to_net_u64(0x0000000000000001);
+ map_domain_t * d = get_domain (&ip4_prefix, 16, &ip6_prefix, 48, &ip6_src,
+ 128, 16, 6, 8, 0, 0);
+ assert(!d);
+
+ //20.0.0.0/8 2001:db8::/32 4 2001:db8:a000::14a0:0:0
+ d = get_domain (&ip4_prefix, 8, &ip6_prefix, 32, &ip6_src,
+ 128, 4, 0, 0, 0, 0);
+ dst6.as_u64[0] = map_get_pfx(d, 0x14a9c9db, 1232);
+ dst6.as_u64[1] = map_get_sfx(d, 0x14a9c9db, 1232);
+ assert(dst6.as_u64[0] == 0x20010db8a0000000);
+ assert(dst6.as_u64[1] == 0x000014a000000000);
+
+ //20.0.0.0/8 2001:db8::/32 8 2001:db8:a900::14a9:0:0
+ d = get_domain (&ip4_prefix, 8, &ip6_prefix, 32, &ip6_src,
+ 128, 8, 0, 0, 0, 0);
+ dst6.as_u64[0] = map_get_pfx(d, 0x14a9c9db, 1232);
+ dst6.as_u64[1] = map_get_sfx(d, 0x14a9c9db, 1232);
+ assert(dst6.as_u64[0] == 0x20010db8a9000000);
+ assert(dst6.as_u64[1] == 0x000014a900000000);
+
+ //20.0.0.0/8 2001:db8::/32 10 2001:db8:a9c0::14a9:c000:0
+ d = get_domain (&ip4_prefix, 8, &ip6_prefix, 32, &ip6_src,
+ 128, 10, 0, 0, 0, 0);
+ dst6.as_u64[0] = map_get_pfx(d, 0x14a9c9db, 1232);
+ dst6.as_u64[1] = map_get_sfx(d, 0x14a9c9db, 1232);
+ assert(dst6.as_u64[0] == 0x20010db8a9c00000);
+ assert(dst6.as_u64[1] == 0x000014a9c0000000);
+
+ //20.0.0.0/8 2001:db8::/32 16 2001:db8:a9c9::14a9:c900:0
+ d = get_domain (&ip4_prefix, 8, &ip6_prefix, 32, &ip6_src,
+ 128, 16, 0, 0, 0, 0);
+ dst6.as_u64[0] = map_get_pfx(d, 0x14a9c9db, 1232);
+ dst6.as_u64[1] = map_get_sfx(d, 0x14a9c9db, 1232);
+ assert(dst6.as_u64[0] == 0x20010db8a9c90000);
+ assert(dst6.as_u64[1] == 0x000014a9c9000000);
+
+ //20.0.0.0/8 2001:db8::/32 20 2001:db8:a9c9:d000:0:14a9:c9d0:0
+ d = get_domain (&ip4_prefix, 8, &ip6_prefix, 32, &ip6_src,
+ 128, 20, 0, 0, 0, 0);
+ dst6.as_u64[0] = map_get_pfx(d, 0x14a9c9db, 1232);
+ dst6.as_u64[1] = map_get_sfx(d, 0x14a9c9db, 1232);
+ assert(dst6.as_u64[0] == 0x20010db8a9c9d000);
+ assert(dst6.as_u64[1] == 0x000014a9c9d00000);
+
+ //20.0.0.0/8 2001:db8::/32 23 2001:db8:a9c9:da00:0:14a9:c9da:0
+ d = get_domain (&ip4_prefix, 8, &ip6_prefix, 32, &ip6_src,
+ 128, 23, 0, 0, 0, 0);
+ dst6.as_u64[0] = map_get_pfx(d, 0x14a9c9db, 1232);
+ dst6.as_u64[1] = map_get_sfx(d, 0x14a9c9db, 1232);
+ assert(dst6.as_u64[0] == 0x20010db8a9c9da00);
+ assert(dst6.as_u64[1] == 0x000014a9c9da0000);
+
+ //20.169.201.0/24 2001:db8::/32 7 2001:db8:da00::14a9:c9da:0
+ d = get_domain (&ip4_prefix, 8, &ip6_prefix, 32, &ip6_src,
+ 128, 7, 0, 0, 0, 0);
+ dst6.as_u64[0] = map_get_pfx(d, 0x14a9c9db, 1232);
+ dst6.as_u64[1] = map_get_sfx(d, 0x14a9c9db, 1232);
+ assert(dst6.as_u64[0] == 0x20010db8a8000000);
+ assert(dst6.as_u64[1] == 0x000014a800000000);
+}
+
+#define foreach_test_case \
+ _(map_t_destaddr) \
+ _(map_eabits)
+
+static void
+run_tests (void)
+{
+#define _(_test_name) \
+ test_ ## _test_name ();
+
+ foreach_test_case
+#undef _
+}
+
+int main()
+{
+ run_tests ();
+ return 0;
+}
diff --git a/src/vnet/mcast/mcast.c b/src/vnet/mcast/mcast.c
new file mode 100644
index 00000000000..55be89ae907
--- /dev/null
+++ b/src/vnet/mcast/mcast.c
@@ -0,0 +1,565 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <vnet/mcast/mcast.h>
+
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+#include <vnet/pg/pg.h>
+#include <vppinfra/error.h>
+#include <vnet/ip/ip4_packet.h>
+#include <vnet/ip/icmp46_packet.h>
+#include <vnet/ip/ip4.h>
+
+typedef struct {
+ u32 sw_if_index;
+ u32 next_index;
+ u32 group_index;
+} mcast_prep_trace_t;
+
+/* packet trace format function */
+static u8 * format_mcast_prep_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ mcast_prep_trace_t * t = va_arg (*args, mcast_prep_trace_t *);
+
+ s = format (s, "MCAST_PREP: group %d, next index %d, tx_sw_if_index %d",
+ t->group_index, t->next_index, t->sw_if_index);
+ return s;
+}
+
+mcast_main_t mcast_main;
+vlib_node_registration_t mcast_prep_node;
+vlib_node_registration_t mcast_recycle_node;
+
+#define foreach_mcast_prep_error \
+_(MCASTS, "Multicast Packets")
+
+typedef enum {
+#define _(sym,str) MCAST_PREP_ERROR_##sym,
+ foreach_mcast_prep_error
+#undef _
+ MCAST_PREP_N_ERROR,
+} mcast_prep_error_t;
+
+static char * mcast_prep_error_strings[] = {
+#define _(sym,string) string,
+ foreach_mcast_prep_error
+#undef _
+};
+
+typedef enum {
+ MCAST_PREP_NEXT_DROP,
+ MCAST_PREP_N_NEXT,
+} mcast_prep_next_t;
+
+static uword
+mcast_prep_node_fn (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ u32 n_left_from, * from, * to_next;
+ mcast_prep_next_t next_index;
+ mcast_main_t * mcm = &mcast_main;
+ vlib_node_t *n = vlib_get_node (vm, mcast_prep_node.index);
+ u32 node_counter_base_index = n->error_heap_index;
+ vlib_error_main_t * em = &vm->error_main;
+ ip4_main_t * im = &ip4_main;
+ ip_lookup_main_t * lm = &im->lookup_main;
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame (vm, node, next_index,
+ to_next, n_left_to_next);
+
+ while (0 && n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ u32 bi0, bi1;
+ vlib_buffer_t * b0, * b1;
+ u32 next0, next1;
+ u32 sw_if_index0, sw_if_index1;
+
+ /* Prefetch next iteration. */
+ {
+ vlib_buffer_t * p2, * p3;
+
+ p2 = vlib_get_buffer (vm, from[2]);
+ p3 = vlib_get_buffer (vm, from[3]);
+
+ vlib_prefetch_buffer_header (p2, LOAD);
+ vlib_prefetch_buffer_header (p3, LOAD);
+
+ CLIB_PREFETCH (p2->data, CLIB_CACHE_LINE_BYTES, STORE);
+ CLIB_PREFETCH (p3->data, CLIB_CACHE_LINE_BYTES, STORE);
+ }
+
+ /* speculatively enqueue b0 and b1 to the current next frame */
+ to_next[0] = bi0 = from[0];
+ to_next[1] = bi1 = from[1];
+ from += 2;
+ to_next += 2;
+ n_left_from -= 2;
+ n_left_to_next -= 2;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ b1 = vlib_get_buffer (vm, bi1);
+
+ sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_RX];
+ next0 = 0;
+ sw_if_index1 = vnet_buffer(b1)->sw_if_index[VLIB_RX];
+ next1 = 0;
+
+ /* $$$$ your message in this space. Process 2 x pkts */
+
+ if (PREDICT_FALSE((node->flags & VLIB_NODE_FLAG_TRACE)))
+ {
+ if (b0->flags & VLIB_BUFFER_IS_TRACED)
+ {
+ mcast_prep_trace_t *t =
+ vlib_add_trace (vm, node, b0, sizeof (*t));
+ t->sw_if_index = sw_if_index0;
+ t->next_index = next0;
+ }
+ if (b1->flags & VLIB_BUFFER_IS_TRACED)
+ {
+ mcast_prep_trace_t *t =
+ vlib_add_trace (vm, node, b1, sizeof (*t));
+ t->sw_if_index = sw_if_index1;
+ t->next_index = next1;
+ }
+ }
+
+ /* verify speculative enqueues, maybe switch current next frame */
+ vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, bi1, next0, next1);
+ }
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 bi0;
+ vlib_buffer_t * b0;
+ u32 next0, adj_index0;
+ mcast_group_t * g0;
+ ip_adjacency_t * adj0;
+
+ /* speculatively enqueue b0 to the current next frame */
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+
+ adj_index0 = vnet_buffer (b0)->ip.adj_index[VLIB_TX];
+ adj0 = ip_get_adjacency (lm, adj_index0);
+ vnet_buffer(b0)->mcast.mcast_group_index = adj0->mcast_group_index;
+ g0 = pool_elt_at_index (mcm->groups, adj0->mcast_group_index);
+
+ /*
+ * Handle the degenerate single-copy case
+ * If we don't change the freelist, the packet will never
+ * make it to the recycle node...
+ */
+ if (PREDICT_TRUE(vec_len (g0->members) > 1))
+ {
+ /* Save the original free list index */
+ vnet_buffer(b0)->mcast.original_free_list_index =
+ b0->free_list_index;
+
+ /* Swap in the multicast recycle list */
+ b0->free_list_index = mcm->mcast_recycle_list_index;
+
+ /*
+ * Make sure that intermediate "frees" don't screw up
+ */
+ b0->recycle_count = vec_len (g0->members);
+ b0->flags |= VLIB_BUFFER_RECYCLE;
+
+ /* Set up for the recycle node */
+ vnet_buffer(b0)->mcast.mcast_current_index = 1;
+ }
+
+ /* Transmit the pkt on the first interface */
+ next0 = g0->members[0].prep_and_recycle_node_next_index;
+ vnet_buffer(b0)->sw_if_index[VLIB_TX] =
+ g0->members[0].tx_sw_if_index;
+
+ if (PREDICT_FALSE((node->flags & VLIB_NODE_FLAG_TRACE)
+ && (b0->flags & VLIB_BUFFER_IS_TRACED))) {
+ mcast_prep_trace_t *t =
+ vlib_add_trace (vm, node, b0, sizeof (*t));
+ t->next_index = next0;
+ t->sw_if_index = vnet_buffer(b0)->sw_if_index[VLIB_TX];
+ t->group_index = vnet_buffer(b0)->mcast.mcast_group_index;
+ }
+
+ /* verify speculative enqueue, maybe switch current next frame */
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, next0);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ em->counters[node_counter_base_index + MCAST_PREP_ERROR_MCASTS] +=
+ frame->n_vectors;
+
+ return frame->n_vectors;
+}
+
+VLIB_REGISTER_NODE (mcast_prep_node) = {
+ .function = mcast_prep_node_fn,
+ .name = "mcast_prep",
+ .vector_size = sizeof (u32),
+ .format_trace = format_mcast_prep_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_errors = ARRAY_LEN(mcast_prep_error_strings),
+ .error_strings = mcast_prep_error_strings,
+
+ .n_next_nodes = MCAST_PREP_N_NEXT,
+
+ /* edit / add dispositions here */
+ .next_nodes = {
+ [MCAST_PREP_NEXT_DROP] = "error-drop",
+ },
+};
+
+typedef struct {
+ u32 sw_if_index;
+ u32 next_index;
+ u32 current_member;
+ u32 group_index;
+} mcast_recycle_trace_t;
+
+static u8 * format_mcast_recycle_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ mcast_recycle_trace_t * t = va_arg (*args, mcast_recycle_trace_t *);
+
+ s = format (s,
+"MCAST_R: group %d, current member %d next (node) index %d, tx_sw_if_index %d",
+ t->group_index, t->current_member, t->next_index, t->sw_if_index);
+ return s;
+}
+
+#define foreach_mcast_recycle_error \
+_(RECYCLES, "Multicast Recycles")
+
+typedef enum {
+#define _(sym,str) MCAST_RECYCLE_ERROR_##sym,
+ foreach_mcast_recycle_error
+#undef _
+ MCAST_RECYCLE_N_ERROR,
+} mcast_recycle_error_t;
+
+static char * mcast_recycle_error_strings[] = {
+#define _(sym,string) string,
+ foreach_mcast_recycle_error
+#undef _
+};
+
+typedef enum {
+ MCAST_RECYCLE_NEXT_DROP,
+ MCAST_RECYCLE_N_NEXT,
+} mcast_recycle_next_t;
+
+static uword
+mcast_recycle_node_fn (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ u32 n_left_from, * from, * to_next;
+ mcast_recycle_next_t next_index;
+ mcast_main_t * mcm = &mcast_main;
+ vlib_node_t *n = vlib_get_node (vm, mcast_recycle_node.index);
+ u32 node_counter_base_index = n->error_heap_index;
+ vlib_error_main_t * em = &vm->error_main;
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame (vm, node, next_index,
+ to_next, n_left_to_next);
+
+ while (0 && n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ u32 bi0, bi1;
+ vlib_buffer_t * b0, * b1;
+ u32 next0, next1;
+ u32 sw_if_index0, sw_if_index1;
+
+ /* Prefetch next iteration. */
+ {
+ vlib_buffer_t * p2, * p3;
+
+ p2 = vlib_get_buffer (vm, from[2]);
+ p3 = vlib_get_buffer (vm, from[3]);
+
+ vlib_prefetch_buffer_header (p2, LOAD);
+ vlib_prefetch_buffer_header (p3, LOAD);
+
+ CLIB_PREFETCH (p2->data, CLIB_CACHE_LINE_BYTES, STORE);
+ CLIB_PREFETCH (p3->data, CLIB_CACHE_LINE_BYTES, STORE);
+ }
+
+ /* speculatively enqueue b0 and b1 to the current next frame */
+ to_next[0] = bi0 = from[0];
+ to_next[1] = bi1 = from[1];
+ from += 2;
+ to_next += 2;
+ n_left_from -= 2;
+ n_left_to_next -= 2;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ b1 = vlib_get_buffer (vm, bi1);
+
+ sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_RX];
+ next0 = 0;
+ sw_if_index1 = vnet_buffer(b1)->sw_if_index[VLIB_RX];
+ next1 = 0;
+
+ /* $$$$ your message in this space. Process 2 x pkts */
+
+ if (PREDICT_FALSE((node->flags & VLIB_NODE_FLAG_TRACE)))
+ {
+ if (b0->flags & VLIB_BUFFER_IS_TRACED)
+ {
+ mcast_recycle_trace_t *t =
+ vlib_add_trace (vm, node, b0, sizeof (*t));
+ t->sw_if_index = sw_if_index0;
+ t->next_index = next0;
+ }
+ if (b1->flags & VLIB_BUFFER_IS_TRACED)
+ {
+ mcast_recycle_trace_t *t =
+ vlib_add_trace (vm, node, b1, sizeof (*t));
+ t->sw_if_index = sw_if_index1;
+ t->next_index = next1;
+ }
+ }
+
+ /* verify speculative enqueues, maybe switch current next frame */
+ vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, bi1, next0, next1);
+ }
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 bi0;
+ vlib_buffer_t * b0;
+ u32 next0;
+ u32 current_member0;
+ mcast_group_t * g0;
+
+ /* speculatively enqueue b0 to the current next frame */
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+
+ g0 = pool_elt_at_index (mcm->groups,
+ vnet_buffer(b0)->mcast.mcast_group_index);
+
+ /* No more replicas? */
+ if (b0->recycle_count == 1)
+ {
+ /* Restore the original free list index */
+ b0->free_list_index =
+ vnet_buffer(b0)->mcast.original_free_list_index;
+ b0->flags &= ~(VLIB_BUFFER_RECYCLE);
+ }
+ current_member0 = vnet_buffer(b0)->mcast.mcast_current_index;
+
+ next0 =
+ g0->members[current_member0].prep_and_recycle_node_next_index;
+ vnet_buffer(b0)->sw_if_index[VLIB_TX] =
+ g0->members[current_member0].tx_sw_if_index;
+
+ vnet_buffer(b0)->mcast.mcast_current_index =
+ current_member0 + 1;
+
+ if (PREDICT_FALSE((node->flags & VLIB_NODE_FLAG_TRACE)
+ && (b0->flags & VLIB_BUFFER_IS_TRACED))) {
+ mcast_recycle_trace_t *t =
+ vlib_add_trace (vm, node, b0, sizeof (*t));
+ t->next_index = next0;
+ t->sw_if_index = vnet_buffer(b0)->sw_if_index[VLIB_TX];
+ t->group_index = vnet_buffer(b0)->mcast.mcast_group_index;
+ t->current_member = current_member0;
+ }
+
+ /* verify speculative enqueue, maybe switch current next frame */
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, next0);
+ }
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ em->counters[node_counter_base_index + MCAST_RECYCLE_ERROR_RECYCLES] +=
+ frame->n_vectors;
+
+ return frame->n_vectors;
+}
+
+VLIB_REGISTER_NODE (mcast_recycle_node) = {
+ .function = mcast_recycle_node_fn,
+ .name = "mcast-recycle",
+ .vector_size = sizeof (u32),
+ .format_trace = format_mcast_recycle_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_errors = ARRAY_LEN(mcast_recycle_error_strings),
+ .error_strings = mcast_recycle_error_strings,
+
+ .n_next_nodes = MCAST_RECYCLE_N_NEXT,
+
+ /* edit / add dispositions here */
+ .next_nodes = {
+ [MCAST_RECYCLE_NEXT_DROP] = "error-drop",
+ },
+};
+
+/*
+ * fish pkts back from the recycle queue/freelist
+ * un-flatten the context chains
+ */
+static void mcast_recycle_callback (vlib_main_t *vm,
+ vlib_buffer_free_list_t * fl)
+{
+ vlib_frame_t * f = 0;
+ u32 n_left_from;
+ u32 n_left_to_next = 0;
+ u32 n_this_frame = 0;
+ u32 * from;
+ u32 * to_next;
+ u32 bi0, pi0;
+ vlib_buffer_t *b0;
+ vlib_buffer_t *bnext0;
+ int i;
+
+ /* aligned, unaligned buffers */
+ for (i = 0; i < 2; i++)
+ {
+ if (i == 0)
+ {
+ from = fl->aligned_buffers;
+ n_left_from = vec_len (from);
+ }
+ else
+ {
+ from = fl->unaligned_buffers;
+ n_left_from = vec_len (from);
+ }
+
+ while (n_left_from > 0)
+ {
+ if (PREDICT_FALSE(n_left_to_next == 0))
+ {
+ if (f)
+ {
+ f->n_vectors = n_this_frame;
+ vlib_put_frame_to_node (vm, mcast_recycle_node.index, f);
+ }
+
+ f = vlib_get_frame_to_node (vm, mcast_recycle_node.index);
+ to_next = vlib_frame_vector_args (f);
+ n_left_to_next = VLIB_FRAME_SIZE;
+ n_this_frame = 0;
+ }
+
+ bi0 = from[0];
+ if (PREDICT_TRUE(n_left_from > 1))
+ {
+ pi0 = from[1];
+ vlib_prefetch_buffer_with_index(vm,pi0,LOAD);
+ }
+
+ bnext0 = b0 = vlib_get_buffer (vm, bi0);
+
+ while (bnext0->flags & VLIB_BUFFER_NEXT_PRESENT)
+ {
+ from += 1;
+ n_left_from -= 1;
+ bnext0 = vlib_get_buffer (vm, bnext0->next_buffer);
+ }
+ to_next[0] = bi0;
+
+ if (CLIB_DEBUG > 0)
+ vlib_buffer_set_known_state (vm, bi0, VLIB_BUFFER_KNOWN_ALLOCATED);
+
+ from++;
+ to_next++;
+ n_this_frame++;
+ n_left_to_next--;
+ n_left_from--;
+ }
+ }
+
+ vec_reset_length (fl->aligned_buffers);
+ vec_reset_length (fl->unaligned_buffers);
+
+ if (f)
+ {
+ ASSERT(n_this_frame);
+ f->n_vectors = n_this_frame;
+ vlib_put_frame_to_node (vm, mcast_recycle_node.index, f);
+ }
+}
+
+clib_error_t *mcast_init (vlib_main_t *vm)
+{
+ mcast_main_t * mcm = &mcast_main;
+ vlib_buffer_main_t * bm = vm->buffer_main;
+ vlib_buffer_free_list_t * fl;
+
+ mcm->vlib_main = vm;
+ mcm->vnet_main = vnet_get_main();
+ mcm->mcast_recycle_list_index =
+ vlib_buffer_create_free_list (vm, 1024 /* fictional */, "mcast-recycle");
+
+ fl = pool_elt_at_index (bm->buffer_free_list_pool,
+ mcm->mcast_recycle_list_index);
+
+ fl->buffers_added_to_freelist_function = mcast_recycle_callback;
+
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (mcast_init);
+
+
diff --git a/src/vnet/mcast/mcast.h b/src/vnet/mcast/mcast.h
new file mode 100644
index 00000000000..96e514427c6
--- /dev/null
+++ b/src/vnet/mcast/mcast.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef __included_vnet_mcast_h__
+#define __included_vnet_mcast_h__
+
+#include <vnet/vnet.h>
+#include <vlib/buffer.h>
+#include <vlib/buffer_funcs.h>
+
+typedef struct {
+ /* Arrange for both prep and recycle nodes to have identical
+ next indices for a given output interface */
+ u32 prep_and_recycle_node_next_index;
+
+ /* Show command, etc. */
+ u32 tx_sw_if_index;
+} mcast_group_member_t;
+
+typedef struct {
+ /* vector of group members */
+ mcast_group_member_t * members;
+} mcast_group_t;
+
+typedef struct {
+ /* pool of multicast (interface) groups */
+ mcast_group_t * groups;
+
+ /* multicast "free" list, aka recycle list */
+ u32 mcast_recycle_list_index;
+
+ /* convenience */
+ vlib_main_t * vlib_main;
+ vnet_main_t * vnet_main;
+} mcast_main_t;
+
+mcast_main_t mcast_main;
+
+#endif /* __included_vnet_mcast_h__ */
diff --git a/src/vnet/mcast/mcast_test.c b/src/vnet/mcast/mcast_test.c
new file mode 100644
index 00000000000..be80c9fc982
--- /dev/null
+++ b/src/vnet/mcast/mcast_test.c
@@ -0,0 +1,149 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <vnet/mcast/mcast.h>
+
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+#include <vnet/pg/pg.h>
+#include <vppinfra/error.h>
+#include <vnet/ip/lookup.h>
+#include <vnet/ip/ip4_packet.h>
+#include <vnet/ip/icmp46_packet.h>
+#include <vnet/ip/ip4.h>
+#include <vnet/mcast/mcast.h>
+
+typedef struct {
+ /* convenience */
+ vlib_main_t * vlib_main;
+ vnet_main_t * vnet_main;
+ mcast_main_t * mcast_main;
+} mcast_test_main_t;
+
+mcast_test_main_t mcast_test_main;
+vlib_node_registration_t mcast_prep_node;
+vlib_node_registration_t mcast_recycle_node;
+
+static clib_error_t *
+mcast_test_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ /* u8 *rewrite_data; */
+ /* mcast_test_main_t * mtm = &mcast_test_main; */
+ /* mcast_main_t * mcm = mtm->mcast_main; */
+ /* ip_adjacency_t adj; */
+ /* u32 adj_index; */
+ /* mcast_group_t * g; */
+ /* mcast_group_member_t * member; */
+ /* unformat_input_t _line_input, * line_input = &_line_input; */
+ /* ip4_address_t dst_addr, zero; */
+ /* ip4_main_t * im = &ip4_main; */
+ /* ip_lookup_main_t * lm = &im->lookup_main; */
+
+ /* /\* Get a line of input. *\/ */
+ /* if (! unformat_user (input, unformat_line_input, line_input)) */
+ /* return 0; */
+
+ /* pool_get (mcm->groups, g); */
+ /* memset (g, 0, sizeof (*g)); */
+
+ /* while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT) */
+ /* { */
+ /* vnet_hw_interface_t *hw; */
+ /* u32 next, sw_if_index; */
+
+ /* if (unformat (line_input, "%U", unformat_vnet_sw_interface, */
+ /* mtm->vnet_main, &sw_if_index)) */
+ /* { */
+ /* vec_add2 (g->members, member, 1); */
+ /* member->tx_sw_if_index = sw_if_index; */
+
+ /* hw = vnet_get_sup_hw_interface (mtm->vnet_main, */
+ /* sw_if_index); */
+
+ /* next = vlib_node_add_next (mtm->vlib_main, */
+ /* mcast_prep_node.index, */
+ /* hw->output_node_index); */
+
+ /* /\* Required to be the same next index... *\/ */
+ /* vlib_node_add_next_with_slot (mtm->vlib_main, */
+ /* mcast_recycle_node.index, */
+ /* hw->output_node_index, next); */
+ /* member->prep_and_recycle_node_next_index = next; */
+ /* } */
+ /* else */
+ /* { */
+ /* return unformat_parse_error (line_input); */
+ /* } */
+ /* } */
+
+ /* if (vec_len (g->members) == 0) */
+ /* { */
+ /* pool_put (mcm->groups, g); */
+ /* vlib_cli_output (vm, "no group members specified"); */
+ /* return 0; */
+ /* } */
+
+
+ /* adj.lookup_next_index = IP_LOOKUP_NEXT_REWRITE; */
+ /* adj.mcast_group_index = g - mcm->groups; */
+ /* rewrite_data = format (0, "abcdefg"); */
+
+ /* vnet_rewrite_for_tunnel */
+ /* (mtm->vnet_main, */
+ /* (u32)~0, /\* tx_sw_if_index, we dont know yet *\/ */
+ /* ip4_rewrite_node.index, */
+ /* mcast_prep_node.index, */
+ /* &adj.rewrite_header, */
+ /* rewrite_data, vec_len(rewrite_data)); */
+
+ /* ip_add_adjacency (lm, &adj, 1 /\* one adj *\/, */
+ /* &adj_index); */
+
+ /* dst_addr.as_u32 = clib_host_to_net_u32 (0x0a000002); */
+ /* zero.as_u32 = 0; */
+
+ /* ip4_add_del_route_next_hop (im, */
+ /* IP4_ROUTE_FLAG_ADD, */
+ /* &dst_addr, */
+ /* 24 /\* mask width *\/, */
+ /* &zero /\* no next hop *\/, */
+
+ /* 0, // next hop sw if index */
+ /* 1, // weight */
+ /* adj_index, */
+ /* 0 /\* explicit fib 0 *\/); */
+
+ return 0;
+}
+
+static VLIB_CLI_COMMAND (mcast_test_command) = {
+ .path = "test mc",
+ .short_help = "test mc",
+ .function = mcast_test_command_fn,
+};
+
+clib_error_t *mcast_test_init (vlib_main_t *vm)
+{
+ mcast_test_main_t * mtm = &mcast_test_main;
+
+ mtm->vlib_main = vm;
+ mtm->vnet_main = vnet_get_main();
+ mtm->mcast_main = &mcast_main;
+
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (mcast_test_init);
diff --git a/src/vnet/misc.c b/src/vnet/misc.c
new file mode 100644
index 00000000000..4c8c4cad5a7
--- /dev/null
+++ b/src/vnet/misc.c
@@ -0,0 +1,124 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * misc.c: vnet misc
+ *
+ * Copyright (c) 2012 Eliot Dresselhaus
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <vnet/vnet.h>
+#include <vnet/ip/ip.h>
+
+vnet_main_t vnet_main;
+
+vnet_main_t *
+vnet_get_main (void)
+{
+ return &vnet_main;
+}
+
+static uword
+vnet_local_interface_tx (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * f)
+{
+ ASSERT (0);
+ return f->n_vectors;
+}
+
+/* *INDENT-OFF* */
+VNET_DEVICE_CLASS (vnet_local_interface_device_class) = {
+ .name = "local",
+ .tx_function = vnet_local_interface_tx,
+};
+/* *INDENT-ON* */
+
+/* *INDENT-OFF* */
+VNET_HW_INTERFACE_CLASS (vnet_local_interface_hw_class,static) = {
+ .name = "local",
+};
+/* *INDENT-ON* */
+
+clib_error_t *
+vnet_main_init (vlib_main_t * vm)
+{
+ vnet_main_t *vnm = vnet_get_main ();
+ clib_error_t *error;
+ u32 hw_if_index;
+ vnet_hw_interface_t *hw;
+
+ if ((error = vlib_call_init_function (vm, vnet_interface_init)))
+ return error;
+
+ if ((error = vlib_call_init_function (vm, fib_module_init)))
+ return error;
+
+ if ((error = vlib_call_init_function (vm, ip_main_init)))
+ return error;
+
+ if ((error = vlib_call_init_function (vm, ip4_lookup_init)))
+ return error;
+
+ if ((error = vlib_call_init_function (vm, ip6_lookup_init)))
+ return error;
+
+ if ((error = vlib_call_init_function (vm, mpls_init)))
+ return error;
+
+ vnm->vlib_main = vm;
+
+ hw_if_index = vnet_register_interface
+ (vnm, vnet_local_interface_device_class.index, /* instance */ 0,
+ vnet_local_interface_hw_class.index, /* instance */ 0);
+ hw = vnet_get_hw_interface (vnm, hw_if_index);
+
+ vnm->local_interface_hw_if_index = hw_if_index;
+ vnm->local_interface_sw_if_index = hw->sw_if_index;
+
+ /* the local interface is used as an input interface when decapping from
+ * an IPSEC tunnel. so it needs to be IP enabled */
+ ip4_sw_interface_enable_disable (hw->sw_if_index, 1);
+ ip6_sw_interface_enable_disable (hw->sw_if_index, 1);
+
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (vnet_main_init);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/mpls/error.def b/src/vnet/mpls/error.def
new file mode 100644
index 00000000000..de8b9665dfb
--- /dev/null
+++ b/src/vnet/mpls/error.def
@@ -0,0 +1,31 @@
+/*
+ * mpls_error.def: mpls errors
+ *
+ * Copyright (c) 2012 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+mpls_error (NONE, "no error")
+mpls_error (UNKNOWN_PROTOCOL, "unknown protocol")
+mpls_error (UNSUPPORTED_VERSION, "unsupported version")
+mpls_error (PKTS_DECAP, "MPLS-GRE input packets decapsulated")
+mpls_error (PKTS_ENCAP, "MPLS-GRE output packets encapsulated")
+mpls_error (NO_LABEL, "MPLS-GRE no label for fib/dst")
+mpls_error (TTL_EXPIRED, "MPLS-GRE ttl expired")
+mpls_error (S_NOT_SET, "MPLS-GRE s-bit not set")
+mpls_error (BAD_LABEL, "invalid FIB id in label")
+mpls_error (NOT_IP4, "non-ip4 packets dropped")
+mpls_error (DISALLOWED_FIB, "disallowed FIB id")
+mpls_error (NOT_ENABLED, "MPLS not enabled")
+mpls_error (DROP, "MPLS DROP DPO")
+mpls_error (PUNT, "MPLS PUNT DPO")
diff --git a/src/vnet/mpls/interface.c b/src/vnet/mpls/interface.c
new file mode 100644
index 00000000000..692a2d1eb62
--- /dev/null
+++ b/src/vnet/mpls/interface.c
@@ -0,0 +1,121 @@
+/*
+ * interface.c: mpls interfaces
+ *
+ * Copyright (c) 2012 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/vnet.h>
+#include <vnet/pg/pg.h>
+#include <vnet/mpls/mpls.h>
+#include <vnet/fib/ip4_fib.h>
+#include <vnet/adj/adj_midchain.h>
+#include <vnet/dpo/classify_dpo.h>
+
+
+u8
+mpls_sw_interface_is_enabled (u32 sw_if_index)
+{
+ mpls_main_t * mm = &mpls_main;
+
+ if (vec_len(mm->mpls_enabled_by_sw_if_index) < sw_if_index)
+ return (0);
+
+ return (mm->mpls_enabled_by_sw_if_index[sw_if_index]);
+}
+
+void
+mpls_sw_interface_enable_disable (mpls_main_t * mm,
+ u32 sw_if_index,
+ u8 is_enable)
+{
+ fib_node_index_t lfib_index;
+
+ vec_validate_init_empty (mm->mpls_enabled_by_sw_if_index, sw_if_index, 0);
+
+ /*
+ * enable/disable only on the 1<->0 transition
+ */
+ if (is_enable)
+ {
+ if (1 != ++mm->mpls_enabled_by_sw_if_index[sw_if_index])
+ return;
+
+ lfib_index = fib_table_find_or_create_and_lock(FIB_PROTOCOL_MPLS,
+ MPLS_FIB_DEFAULT_TABLE_ID);
+ vec_validate(mm->fib_index_by_sw_if_index, 0);
+ mm->fib_index_by_sw_if_index[sw_if_index] = lfib_index;
+ }
+ else
+ {
+ ASSERT(mm->mpls_enabled_by_sw_if_index[sw_if_index] > 0);
+ if (0 != --mm->mpls_enabled_by_sw_if_index[sw_if_index])
+ return;
+
+ fib_table_unlock(mm->fib_index_by_sw_if_index[sw_if_index],
+ FIB_PROTOCOL_MPLS);
+ }
+
+ vnet_feature_enable_disable ("mpls-input", "mpls-lookup", sw_if_index,
+ is_enable, 0, 0);
+
+}
+
+static clib_error_t *
+mpls_interface_enable_disable (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ vnet_main_t * vnm = vnet_get_main();
+ clib_error_t * error = 0;
+ u32 sw_if_index, enable;
+
+ sw_if_index = ~0;
+
+ if (! unformat_user (input, unformat_vnet_sw_interface, vnm, &sw_if_index))
+ {
+ error = clib_error_return (0, "unknown interface `%U'",
+ format_unformat_error, input);
+ goto done;
+ }
+
+ if (unformat (input, "enable"))
+ enable = 1;
+ else if (unformat (input, "disable"))
+ enable = 0;
+ else
+ {
+ error = clib_error_return (0, "expected 'enable' or 'disable'",
+ format_unformat_error, input);
+ goto done;
+ }
+
+ mpls_sw_interface_enable_disable(&mpls_main, sw_if_index, enable);
+
+ done:
+ return error;
+}
+
+/*?
+ * This command enables an interface to accpet MPLS packets
+ *
+ * @cliexpar
+ * @cliexstart{set interface mpls}
+ * set interface mpls GigEthernet0/8/0 enable
+ * @cliexend
+ ?*/
+VLIB_CLI_COMMAND (set_interface_ip_table_command, static) = {
+ .path = "set interface mpls",
+ .function = mpls_interface_enable_disable,
+ .short_help = "Enable/Disable an interface for MPLS forwarding",
+};
diff --git a/src/vnet/mpls/mpls.c b/src/vnet/mpls/mpls.c
new file mode 100644
index 00000000000..0e610e175cc
--- /dev/null
+++ b/src/vnet/mpls/mpls.c
@@ -0,0 +1,511 @@
+/*
+ * mpls.c: mpls
+ *
+ * Copyright (c) 2012 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/vnet.h>
+#include <vnet/mpls/mpls.h>
+#include <vnet/fib/ip4_fib.h>
+#include <vnet/fib/mpls_fib.h>
+
+const static char* mpls_eos_bit_names[] = MPLS_EOS_BITS;
+
+mpls_main_t mpls_main;
+
+u8 * format_mpls_unicast_label (u8 * s, va_list * args)
+{
+ mpls_label_t label = va_arg (*args, mpls_label_t);
+
+ switch (label) {
+ case MPLS_IETF_IPV4_EXPLICIT_NULL_LABEL:
+ s = format (s, "%s", MPLS_IETF_IPV4_EXPLICIT_NULL_STRING);
+ break;
+ case MPLS_IETF_ROUTER_ALERT_LABEL:
+ s = format (s, "%s", MPLS_IETF_ROUTER_ALERT_STRING);
+ break;
+ case MPLS_IETF_IPV6_EXPLICIT_NULL_LABEL:
+ s = format (s, "%s", MPLS_IETF_IPV6_EXPLICIT_NULL_STRING);
+ break;
+ case MPLS_IETF_IMPLICIT_NULL_LABEL:
+ s = format (s, "%s", MPLS_IETF_IMPLICIT_NULL_STRING);
+ break;
+ case MPLS_IETF_ELI_LABEL:
+ s = format (s, "%s", MPLS_IETF_ELI_STRING);
+ break;
+ case MPLS_IETF_GAL_LABEL:
+ s = format (s, "%s", MPLS_IETF_GAL_STRING);
+ break;
+ default:
+ s = format (s, "%d", label);
+ break;
+ }
+ return s;
+}
+
+uword unformat_mpls_unicast_label (unformat_input_t * input, va_list * args)
+{
+ mpls_label_t *label = va_arg (*args, mpls_label_t*);
+
+ if (unformat (input, MPLS_IETF_IPV4_EXPLICIT_NULL_STRING))
+ *label = MPLS_IETF_IPV4_EXPLICIT_NULL_LABEL;
+ else if (unformat (input, MPLS_IETF_IPV6_EXPLICIT_NULL_STRING))
+ *label = MPLS_IETF_IPV6_EXPLICIT_NULL_LABEL;
+ else if (unformat (input, MPLS_IETF_ROUTER_ALERT_STRING))
+ *label = MPLS_IETF_ROUTER_ALERT_LABEL;
+ else if (unformat (input, MPLS_IETF_IMPLICIT_NULL_STRING))
+ *label = MPLS_IETF_IMPLICIT_NULL_LABEL;
+ else if (unformat (input, "%d", label))
+ ;
+
+ return (1);
+}
+
+u8 * format_mpls_eos_bit (u8 * s, va_list * args)
+{
+ mpls_eos_bit_t eb = va_arg (*args, mpls_eos_bit_t);
+
+ ASSERT(eb <= MPLS_EOS);
+
+ s = format(s, "%s", mpls_eos_bit_names[eb]);
+
+ return (s);
+}
+
+u8 * format_mpls_header (u8 * s, va_list * args)
+{
+ mpls_unicast_header_t hdr = va_arg (*args, mpls_unicast_header_t);
+
+ return (format(s, "[%U:%d:%d:%U]",
+ format_mpls_unicast_label,
+ vnet_mpls_uc_get_label(hdr.label_exp_s_ttl),
+ vnet_mpls_uc_get_ttl(hdr.label_exp_s_ttl),
+ vnet_mpls_uc_get_exp(hdr.label_exp_s_ttl),
+ format_mpls_eos_bit,
+ vnet_mpls_uc_get_s(hdr.label_exp_s_ttl)));
+}
+
+uword
+unformat_mpls_header (unformat_input_t * input, va_list * args)
+{
+ u8 ** result = va_arg (*args, u8 **);
+ mpls_unicast_header_t _h, * h = &_h;
+ u32 label, label_exp_s_ttl;
+
+ if (! unformat (input, "MPLS %d", &label))
+ return 0;
+
+ label_exp_s_ttl = (label<<12) | (1<<8) /* s-bit */ | 0xFF;
+ h->label_exp_s_ttl = clib_host_to_net_u32 (label_exp_s_ttl);
+
+ /* Add gre, mpls headers to result. */
+ {
+ void * p;
+ u32 h_n_bytes = sizeof (h[0]);
+
+ vec_add2 (*result, p, h_n_bytes);
+ clib_memcpy (p, h, h_n_bytes);
+ }
+
+ return 1;
+}
+
+uword
+unformat_mpls_label_net_byte_order (unformat_input_t * input,
+ va_list * args)
+{
+ u32 * result = va_arg (*args, u32 *);
+ u32 label;
+
+ if (!unformat (input, "MPLS: label %d", &label))
+ return 0;
+
+ label = (label<<12) | (1<<8) /* s-bit set */ | 0xFF /* ttl */;
+
+ *result = clib_host_to_net_u32 (label);
+ return 1;
+}
+
+u8 * format_mpls_unicast_header_host_byte_order (u8 * s, va_list * args)
+{
+ mpls_unicast_header_t *h = va_arg(*args, mpls_unicast_header_t *);
+ u32 label = h->label_exp_s_ttl;
+
+ s = format (s, "label %d exp %d, s %d, ttl %d",
+ vnet_mpls_uc_get_label (label),
+ vnet_mpls_uc_get_exp (label),
+ vnet_mpls_uc_get_s (label),
+ vnet_mpls_uc_get_ttl (label));
+ return s;
+}
+
+u8 * format_mpls_unicast_header_net_byte_order (u8 * s, va_list * args)
+{
+ mpls_unicast_header_t *h = va_arg(*args, mpls_unicast_header_t *);
+ mpls_unicast_header_t h_host;
+
+ h_host.label_exp_s_ttl = clib_net_to_host_u32 (h->label_exp_s_ttl);
+
+ return format (s, "%U", format_mpls_unicast_header_host_byte_order,
+ &h_host);
+}
+
+int
+mpls_dest_cmp(void * a1, void * a2)
+{
+ show_mpls_fib_t * r1 = a1;
+ show_mpls_fib_t * r2 = a2;
+
+ return clib_net_to_host_u32(r1->dest) - clib_net_to_host_u32(r2->dest);
+}
+
+int
+mpls_fib_index_cmp(void * a1, void * a2)
+{
+ show_mpls_fib_t * r1 = a1;
+ show_mpls_fib_t * r2 = a2;
+
+ return r1->fib_index - r2->fib_index;
+}
+
+int
+mpls_label_cmp(void * a1, void * a2)
+{
+ show_mpls_fib_t * r1 = a1;
+ show_mpls_fib_t * r2 = a2;
+
+ return r1->label - r2->label;
+}
+
+static clib_error_t *
+vnet_mpls_local_label (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ unformat_input_t _line_input, * line_input = &_line_input;
+ fib_route_path_t *rpaths = NULL, rpath;
+ u32 table_id, is_del, is_ip;
+ mpls_label_t local_label;
+ mpls_label_t out_label;
+ clib_error_t * error;
+ mpls_eos_bit_t eos;
+ vnet_main_t * vnm;
+ fib_prefix_t pfx;
+
+ vnm = vnet_get_main();
+ error = NULL;
+ is_ip = 0;
+ table_id = 0;
+ eos = MPLS_EOS;
+ is_del = 0;
+ local_label = MPLS_LABEL_INVALID;
+ memset(&pfx, 0, sizeof(pfx));
+
+ /* Get a line of input. */
+ if (! unformat_user (input, unformat_line_input, line_input))
+ return 0;
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ memset(&rpath, 0, sizeof(rpath));
+
+ if (unformat (line_input, "table %d", &table_id))
+ ;
+ else if (unformat (line_input, "del"))
+ is_del = 1;
+ else if (unformat (line_input, "add"))
+ is_del = 0;
+ else if (unformat (line_input, "eos"))
+ pfx.fp_eos = MPLS_EOS;
+ else if (unformat (line_input, "non-eos"))
+ pfx.fp_eos = MPLS_NON_EOS;
+ else if (unformat (line_input, "%U/%d",
+ unformat_ip4_address,
+ &pfx.fp_addr.ip4,
+ &pfx.fp_len))
+ {
+ pfx.fp_proto = FIB_PROTOCOL_IP4;
+ is_ip = 1;
+ }
+ else if (unformat (line_input, "%U/%d",
+ unformat_ip6_address,
+ &pfx.fp_addr.ip6,
+ &pfx.fp_len))
+ {
+ pfx.fp_proto = FIB_PROTOCOL_IP6;
+ is_ip = 1;
+ }
+ else if (unformat (line_input, "via %U %U weight %u",
+ unformat_ip4_address,
+ &rpath.frp_addr.ip4,
+ unformat_vnet_sw_interface, vnm,
+ &rpath.frp_sw_if_index,
+ &rpath.frp_weight))
+ {
+ rpath.frp_proto = FIB_PROTOCOL_IP4;
+ vec_add1(rpaths, rpath);
+ }
+
+ else if (unformat (line_input, "via %U %U weight %u",
+ unformat_ip6_address,
+ &rpath.frp_addr.ip6,
+ unformat_vnet_sw_interface, vnm,
+ &rpath.frp_sw_if_index,
+ &rpath.frp_weight))
+ {
+ rpath.frp_proto = FIB_PROTOCOL_IP6;
+ vec_add1(rpaths, rpath);
+ }
+
+ else if (unformat (line_input, "via %U %U",
+ unformat_ip4_address,
+ &rpath.frp_addr.ip4,
+ unformat_vnet_sw_interface, vnm,
+ &rpath.frp_sw_if_index))
+ {
+ rpath.frp_weight = 1;
+ rpath.frp_proto = FIB_PROTOCOL_IP4;
+ vec_add1(rpaths, rpath);
+ }
+
+ else if (unformat (line_input, "via %U %U",
+ unformat_ip6_address,
+ &rpath.frp_addr.ip6,
+ unformat_vnet_sw_interface, vnm,
+ &rpath.frp_sw_if_index))
+ {
+ rpath.frp_weight = 1;
+ rpath.frp_proto = FIB_PROTOCOL_IP6;
+ vec_add1(rpaths, rpath);
+ }
+ else if (unformat (line_input, "via %U next-hop-table %d",
+ unformat_ip4_address,
+ &rpath.frp_addr.ip4,
+ &rpath.frp_fib_index))
+ {
+ rpath.frp_weight = 1;
+ rpath.frp_sw_if_index = ~0;
+ rpath.frp_proto = FIB_PROTOCOL_IP4;
+ vec_add1(rpaths, rpath);
+ }
+ else if (unformat (line_input, "via %U next-hop-table %d",
+ unformat_ip6_address,
+ &rpath.frp_addr.ip6,
+ &rpath.frp_fib_index))
+ {
+ rpath.frp_weight = 1;
+ rpath.frp_sw_if_index = ~0;
+ rpath.frp_proto = FIB_PROTOCOL_IP6;
+ vec_add1(rpaths, rpath);
+ }
+ else if (unformat (line_input, "via %U",
+ unformat_ip4_address,
+ &rpath.frp_addr.ip4))
+ {
+ /*
+ * the recursive next-hops are by default in the same table
+ * as the prefix
+ */
+ rpath.frp_fib_index = table_id;
+ rpath.frp_weight = 1;
+ rpath.frp_sw_if_index = ~0;
+ rpath.frp_proto = FIB_PROTOCOL_IP4;
+ vec_add1(rpaths, rpath);
+ }
+ else if (unformat (line_input, "via %U",
+ unformat_ip6_address,
+ &rpath.frp_addr.ip6))
+ {
+ rpath.frp_fib_index = table_id;
+ rpath.frp_weight = 1;
+ rpath.frp_sw_if_index = ~0;
+ rpath.frp_proto = FIB_PROTOCOL_IP6;
+ vec_add1(rpaths, rpath);
+ }
+ else if (unformat (line_input, "%d", &local_label))
+ ;
+ else if (unformat (line_input,
+ "ip4-lookup-in-table %d",
+ &rpath.frp_fib_index))
+ {
+ rpath.frp_proto = FIB_PROTOCOL_IP4;
+ rpath.frp_sw_if_index = FIB_NODE_INDEX_INVALID;
+ pfx.fp_payload_proto = DPO_PROTO_IP4;
+ vec_add1(rpaths, rpath);
+ }
+ else if (unformat (line_input,
+ "ip6-lookup-in-table %d",
+ &rpath.frp_fib_index))
+ {
+ rpath.frp_proto = FIB_PROTOCOL_IP6;
+ rpath.frp_sw_if_index = FIB_NODE_INDEX_INVALID;
+ vec_add1(rpaths, rpath);
+ pfx.fp_payload_proto = DPO_PROTO_IP6;
+ }
+ else if (unformat (line_input,
+ "mpls-lookup-in-table %d",
+ &rpath.frp_fib_index))
+ {
+ rpath.frp_proto = FIB_PROTOCOL_MPLS;
+ rpath.frp_sw_if_index = FIB_NODE_INDEX_INVALID;
+ pfx.fp_payload_proto = DPO_PROTO_MPLS;
+ vec_add1(rpaths, rpath);
+ }
+ else if (unformat (line_input, "out-label %U",
+ unformat_mpls_unicast_label,
+ &out_label))
+ {
+ if (vec_len(rpaths) == 0)
+ {
+ error = clib_error_return(0 , "Paths then labels");
+ goto done;
+ }
+ vec_add1(rpaths[vec_len(rpaths)-1].frp_label_stack, out_label);
+ }
+ else
+ {
+ error = clib_error_return (0, "unkown input: %U",
+ format_unformat_error, line_input);
+ goto done;
+ }
+
+ }
+
+ if (MPLS_LABEL_INVALID == local_label)
+ {
+ error = clib_error_return (0, "local-label required: %U",
+ format_unformat_error, input);
+ goto done;
+ }
+
+
+ if (is_ip)
+ {
+ u32 fib_index = fib_table_find(pfx.fp_proto, table_id);
+
+ if (FIB_NODE_INDEX_INVALID == fib_index)
+ {
+ error = clib_error_return (0, "%U table-id %d does not exist",
+ format_fib_protocol, pfx.fp_proto, table_id);
+ goto done;
+ }
+
+ if (is_del)
+ {
+ fib_table_entry_local_label_remove(fib_index, &pfx, local_label);
+ }
+ else
+ {
+ fib_table_entry_local_label_add(fib_index, &pfx, local_label);
+ }
+ }
+ else
+ {
+ fib_node_index_t lfe, fib_index;
+ u32 fi;
+
+ if (NULL == rpaths)
+ {
+ error = clib_error_return(0 , "no paths");
+ goto done;
+ }
+
+ pfx.fp_proto = FIB_PROTOCOL_MPLS;
+ pfx.fp_len = 21;
+ pfx.fp_label = local_label;
+ pfx.fp_payload_proto = fib_proto_to_dpo(rpaths[0].frp_proto);
+
+ /*
+ * the CLI parsing stored table Ids, swap to FIB indicies
+ */
+ if (FIB_NODE_INDEX_INVALID == rpath.frp_sw_if_index)
+ {
+ fi = fib_table_id_find_fib_index(dpo_proto_to_fib(pfx.fp_payload_proto),
+ rpaths[0].frp_fib_index);
+
+ if (~0 == fi)
+ {
+ error = clib_error_return(0 , "%U Via table %d does not exist",
+ format_dpo_proto, pfx.fp_payload_proto,
+ rpaths[0].frp_fib_index);
+ goto done;
+ }
+ rpaths[0].frp_fib_index = fi;
+ }
+
+ fib_index = mpls_fib_index_from_table_id(table_id);
+
+ if (FIB_NODE_INDEX_INVALID == fib_index)
+ {
+ error = clib_error_return (0, "MPLS table-id %d does not exist",
+ table_id);
+ goto done;
+ }
+
+ lfe = fib_table_entry_path_add2(fib_index,
+ &pfx,
+ FIB_SOURCE_CLI,
+ FIB_ENTRY_FLAG_NONE,
+ rpaths);
+
+ if (FIB_NODE_INDEX_INVALID == lfe)
+ {
+ error = clib_error_return (0, "Failed to create %U-%U in MPLS table-id %d",
+ format_mpls_unicast_label, local_label,
+ format_mpls_eos_bit, eos,
+ table_id);
+ goto done;
+ }
+ }
+
+done:
+ return error;
+}
+
+VLIB_CLI_COMMAND (mpls_local_label_command, static) = {
+ .path = "mpls local-label",
+ .function = vnet_mpls_local_label,
+ .short_help = "Create/Delete MPL local labels",
+};
+
+int
+mpls_fib_reset_labels (u32 fib_id)
+{
+ // FIXME
+ return 0;
+}
+
+static clib_error_t *
+mpls_init (vlib_main_t * vm)
+{
+ mpls_main_t * mm = &mpls_main;
+ clib_error_t * error;
+
+ mm->vlib_main = vm;
+ mm->vnet_main = vnet_get_main();
+
+ if ((error = vlib_call_init_function (vm, ip_main_init)))
+ return error;
+
+ return vlib_call_init_function (vm, mpls_input_init);
+}
+
+VLIB_INIT_FUNCTION (mpls_init);
+
+mpls_main_t * mpls_get_main (vlib_main_t * vm)
+{
+ vlib_call_init_function (vm, mpls_init);
+ return &mpls_main;
+}
+
diff --git a/src/vnet/mpls/mpls.h b/src/vnet/mpls/mpls.h
new file mode 100644
index 00000000000..b6fdbce7d70
--- /dev/null
+++ b/src/vnet/mpls/mpls.h
@@ -0,0 +1,172 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef included_vnet_mpls_h
+#define included_vnet_mpls_h
+
+#include <vnet/vnet.h>
+#include <vnet/mpls/packet.h>
+#include <vnet/mpls/mpls_types.h>
+#include <vnet/ip/ip4_packet.h>
+#include <vnet/ethernet/ethernet.h>
+#include <vnet/fib/fib_node.h>
+#include <vnet/adj/adj.h>
+
+typedef enum {
+#define mpls_error(n,s) MPLS_ERROR_##n,
+#include <vnet/mpls/error.def>
+#undef mpls_error
+ MPLS_N_ERROR,
+} mpls_error_t;
+
+#define MPLS_FIB_DEFAULT_TABLE_ID 0
+
+/**
+ * Type exposure is to allow the DP fast/inlined access
+ */
+#define MPLS_FIB_KEY_SIZE 21
+#define MPLS_FIB_DB_SIZE (1 << (MPLS_FIB_KEY_SIZE-1))
+
+typedef struct mpls_fib_t_
+{
+ /**
+ * A hash table of entries. 21 bit key
+ * Hash table for reduced memory footprint
+ */
+ uword * mf_entries;
+
+ /**
+ * The load-balance indeices keyed by 21 bit label+eos bit.
+ * A flat array for maximum lookup performace.
+ */
+ index_t mf_lbs[MPLS_FIB_DB_SIZE];
+} mpls_fib_t;
+
+/**
+ * @brief Definition of a callback for receiving MPLS interface state change
+ * notifications
+ */
+typedef void (*mpls_interface_state_change_callback_t)(u32 sw_if_index,
+ u32 is_enable);
+
+typedef struct {
+ /* MPLS FIB index for each software interface */
+ u32 *fib_index_by_sw_if_index;
+
+ /** A pool of all the MPLS FIBs */
+ struct fib_table_t_ *fibs;
+
+ /** A hash table to lookup the mpls_fib by table ID */
+ uword *fib_index_by_table_id;
+
+ /* Feature arc indices */
+ u8 input_feature_arc_index;
+ u8 output_feature_arc_index;
+
+ /* IP4 enabled count by software interface */
+ u8 * mpls_enabled_by_sw_if_index;
+
+ /* convenience */
+ vlib_main_t * vlib_main;
+ vnet_main_t * vnet_main;
+} mpls_main_t;
+
+extern mpls_main_t mpls_main;
+
+extern clib_error_t * mpls_feature_init(vlib_main_t * vm);
+
+format_function_t format_mpls_protocol;
+format_function_t format_mpls_encap_index;
+
+format_function_t format_mpls_eos_bit;
+format_function_t format_mpls_unicast_header_net_byte_order;
+format_function_t format_mpls_unicast_label;
+format_function_t format_mpls_header;
+
+extern vlib_node_registration_t mpls_input_node;
+extern vlib_node_registration_t mpls_policy_encap_node;
+extern vlib_node_registration_t mpls_output_node;
+extern vlib_node_registration_t mpls_midchain_node;
+
+/* Parse mpls protocol as 0xXXXX or protocol name.
+ In either host or network byte order. */
+unformat_function_t unformat_mpls_protocol_host_byte_order;
+unformat_function_t unformat_mpls_protocol_net_byte_order;
+unformat_function_t unformat_mpls_label_net_byte_order;
+unformat_function_t unformat_mpls_unicast_label;
+
+/* Parse mpls header. */
+unformat_function_t unformat_mpls_header;
+unformat_function_t unformat_pg_mpls_header;
+
+void mpls_sw_interface_enable_disable (mpls_main_t * mm,
+ u32 sw_if_index,
+ u8 is_enable);
+
+u8 mpls_sw_interface_is_enabled (u32 sw_if_index);
+
+int mpls_fib_reset_labels (u32 fib_id);
+
+#define foreach_mpls_input_next \
+_(DROP, "error-drop") \
+_(LOOKUP, "mpls-lookup")
+
+typedef enum {
+#define _(s,n) MPLS_INPUT_NEXT_##s,
+ foreach_mpls_input_next
+#undef _
+ MPLS_INPUT_N_NEXT,
+} mpls_input_next_t;
+
+#define foreach_mpls_lookup_next \
+_(DROP, "error-drop") \
+_(IP4_INPUT, "ip4-input") \
+_(L2_OUTPUT, "l2-output")
+
+// FIXME remove.
+typedef enum {
+#define _(s,n) MPLS_LOOKUP_NEXT_##s,
+ foreach_mpls_lookup_next
+#undef _
+ MPLS_LOOKUP_N_NEXT,
+} mpls_lookup_next_t;
+
+#define foreach_mpls_output_next \
+_(DROP, "error-drop")
+
+typedef enum {
+#define _(s,n) MPLS_OUTPUT_NEXT_##s,
+ foreach_mpls_output_next
+#undef _
+ MPLS_OUTPUT_N_NEXT,
+} mpls_output_next_t;
+
+typedef struct {
+ u32 fib_index;
+ u32 entry_index;
+ u32 dest;
+ u32 s_bit;
+ u32 label;
+} show_mpls_fib_t;
+
+int
+mpls_dest_cmp(void * a1, void * a2);
+
+int
+mpls_fib_index_cmp(void * a1, void * a2);
+
+int
+mpls_label_cmp(void * a1, void * a2);
+
+#endif /* included_vnet_mpls_h */
diff --git a/src/vnet/mpls/mpls_features.c b/src/vnet/mpls/mpls_features.c
new file mode 100644
index 00000000000..a7593c55b09
--- /dev/null
+++ b/src/vnet/mpls/mpls_features.c
@@ -0,0 +1,156 @@
+/*
+ * mpls_features.c: MPLS input and output features
+ *
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/mpls/mpls.h>
+
+always_inline uword
+mpls_terminate (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame,
+ int error_code)
+{
+ u32 * buffers = vlib_frame_vector_args (frame);
+ uword n_packets = frame->n_vectors;
+
+ vlib_error_drop_buffers (vm, node,
+ buffers,
+ /* stride */ 1,
+ n_packets,
+ /* next */ 0,
+ mpls_input_node.index,
+ error_code);
+
+ return n_packets;
+}
+
+static uword
+mpls_punt (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ return (mpls_terminate(vm, node, frame, MPLS_ERROR_PUNT));
+}
+
+VLIB_REGISTER_NODE (mpls_punt_node) = {
+ .function = mpls_punt,
+ .name = "mpls-punt",
+ .vector_size = sizeof (u32),
+
+ .n_next_nodes = 1,
+ .next_nodes = {
+ [0] = "error-punt",
+ },
+};
+
+VLIB_NODE_FUNCTION_MULTIARCH (mpls_punt_node, mpls_punt)
+
+static uword
+mpls_drop (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ return (mpls_terminate(vm, node, frame, MPLS_ERROR_DROP));
+}
+
+VLIB_REGISTER_NODE (mpls_drop_node) = {
+ .function = mpls_drop,
+ .name = "mpls-drop",
+ .vector_size = sizeof (u32),
+
+ .n_next_nodes = 1,
+ .next_nodes = {
+ [0] = "error-drop",
+ },
+};
+
+VLIB_NODE_FUNCTION_MULTIARCH (mpls_drop_node, mpls_drop)
+
+static uword
+mpls_not_enabled (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ return (mpls_terminate(vm, node, frame, MPLS_ERROR_NOT_ENABLED));
+}
+
+VLIB_REGISTER_NODE (mpls_not_enabled_node) = {
+ .function = mpls_not_enabled,
+ .name = "mpls-not-enabled",
+ .vector_size = sizeof (u32),
+
+ .n_next_nodes = 1,
+ .next_nodes = {
+ [0] = "error-drop",
+ },
+};
+
+VLIB_NODE_FUNCTION_MULTIARCH (mpls_not_enabled_node, mpls_not_enabled)
+
+VNET_FEATURE_ARC_INIT (mpls_input, static) =
+{
+ .arc_name = "mpls-input",
+ .start_nodes = VNET_FEATURES ("mpls-input"),
+ .arc_index_ptr = &mpls_main.input_feature_arc_index,
+};
+
+VNET_FEATURE_INIT (mpls_lookup, static) = {
+ .arc_name = "mpls-input",
+ .node_name = "mpls-lookup",
+ .runs_before = VNET_FEATURES ("mpls-not-enabled"),
+};
+
+VNET_FEATURE_INIT (mpls_not_enabled, static) = {
+ .arc_name = "mpls-input",
+ .node_name = "mpls-not-enabled",
+ .runs_before = VNET_FEATURES (0), /* not before any other features */
+};
+
+VNET_FEATURE_ARC_INIT (mpls_output, static) =
+{
+ .arc_name = "mpls-output",
+ .start_nodes = VNET_FEATURES ("mpls-output", "mpls-midchain"),
+ .arc_index_ptr = &mpls_main.output_feature_arc_index,
+};
+
+/* Built-in ip4 tx feature path definition */
+VNET_FEATURE_INIT (mpls_interface_output, static) = {
+ .arc_name = "mpls-output",
+ .node_name = "interface-output",
+ .runs_before = 0, /* not before any other features */
+};
+
+static clib_error_t *
+mpls_sw_interface_add_del (vnet_main_t * vnm,
+ u32 sw_if_index,
+ u32 is_add)
+{
+ mpls_main_t * mm = &mpls_main;
+
+ vec_validate_init_empty (mm->mpls_enabled_by_sw_if_index, sw_if_index, 0);
+ vec_validate_init_empty (mm->fib_index_by_sw_if_index, sw_if_index, 0);
+
+ vnet_feature_enable_disable ("mpls-input", "mpls-not-enabled", sw_if_index,
+ is_add, 0, 0);
+ vnet_feature_enable_disable ("mpls-output", "interface-output", sw_if_index,
+ is_add, 0, 0);
+
+ return /* no error */ 0;
+}
+
+VNET_SW_INTERFACE_ADD_DEL_FUNCTION (mpls_sw_interface_add_del);
+
+
diff --git a/src/vnet/mpls/mpls_lookup.c b/src/vnet/mpls/mpls_lookup.c
new file mode 100644
index 00000000000..2d34cbde341
--- /dev/null
+++ b/src/vnet/mpls/mpls_lookup.c
@@ -0,0 +1,531 @@
+/*
+ * mpls_lookup.c: MPLS lookup
+ *
+ * Copyright (c) 2012-2014 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vlib/vlib.h>
+#include <vnet/pg/pg.h>
+#include <vnet/mpls/mpls.h>
+#include <vnet/fib/mpls_fib.h>
+#include <vnet/dpo/load_balance.h>
+
+vlib_node_registration_t mpls_lookup_node;
+
+typedef struct {
+ u32 next_index;
+ u32 lb_index;
+ u32 lfib_index;
+ u32 label_net_byte_order;
+ u32 hash;
+} mpls_lookup_trace_t;
+
+static u8 *
+format_mpls_lookup_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ mpls_lookup_trace_t * t = va_arg (*args, mpls_lookup_trace_t *);
+
+ s = format (s, "MPLS: next [%d], lookup fib index %d, LB index %d hash %d"
+ "label %d eos %d",
+ t->next_index, t->lfib_index, t->lb_index, t->hash,
+ vnet_mpls_uc_get_label(
+ clib_net_to_host_u32(t->label_net_byte_order)),
+ vnet_mpls_uc_get_s(t->label_net_byte_order));
+ return s;
+}
+
+/*
+ * Compute flow hash.
+ * We'll use it to select which adjacency to use for this flow. And other things.
+ */
+always_inline u32
+mpls_compute_flow_hash (const mpls_unicast_header_t * hdr,
+ flow_hash_config_t flow_hash_config)
+{
+ // FIXME
+ return (vnet_mpls_uc_get_label(hdr->label_exp_s_ttl));
+}
+
+static inline uword
+mpls_lookup (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame)
+{
+ vlib_combined_counter_main_t * cm = &load_balance_main.lbm_to_counters;
+ u32 n_left_from, next_index, * from, * to_next;
+ mpls_main_t * mm = &mpls_main;
+ u32 cpu_index = os_get_cpu_number();
+
+ from = vlib_frame_vector_args (from_frame);
+ n_left_from = from_frame->n_vectors;
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame (vm, node, next_index,
+ to_next, n_left_to_next);
+
+ while (n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ u32 lbi0, next0, lfib_index0, bi0, hash_c0;
+ const mpls_unicast_header_t * h0;
+ const load_balance_t *lb0;
+ const dpo_id_t *dpo0;
+ vlib_buffer_t * b0;
+ u32 lbi1, next1, lfib_index1, bi1, hash_c1;
+ const mpls_unicast_header_t * h1;
+ const load_balance_t *lb1;
+ const dpo_id_t *dpo1;
+ vlib_buffer_t * b1;
+
+ /* Prefetch next iteration. */
+ {
+ vlib_buffer_t * p2, * p3;
+
+ p2 = vlib_get_buffer (vm, from[2]);
+ p3 = vlib_get_buffer (vm, from[3]);
+
+ vlib_prefetch_buffer_header (p2, STORE);
+ vlib_prefetch_buffer_header (p3, STORE);
+
+ CLIB_PREFETCH (p2->data, sizeof (h0[0]), STORE);
+ CLIB_PREFETCH (p3->data, sizeof (h0[0]), STORE);
+ }
+
+ bi0 = to_next[0] = from[0];
+ bi1 = to_next[1] = from[1];
+
+ from += 2;
+ n_left_from -= 2;
+ to_next += 2;
+ n_left_to_next -= 2;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ b1 = vlib_get_buffer (vm, bi1);
+ h0 = vlib_buffer_get_current (b0);
+ h1 = vlib_buffer_get_current (b1);
+
+ lfib_index0 = vec_elt(mm->fib_index_by_sw_if_index,
+ vnet_buffer(b0)->sw_if_index[VLIB_RX]);
+ lfib_index1 = vec_elt(mm->fib_index_by_sw_if_index,
+ vnet_buffer(b1)->sw_if_index[VLIB_RX]);
+
+ lbi0 = mpls_fib_table_forwarding_lookup (lfib_index0, h0);
+ lbi1 = mpls_fib_table_forwarding_lookup (lfib_index1, h1);
+ lb0 = load_balance_get(lbi0);
+ lb1 = load_balance_get(lbi1);
+
+ hash_c0 = vnet_buffer(b0)->ip.flow_hash = 0;
+ hash_c1 = vnet_buffer(b1)->ip.flow_hash = 0;
+
+ if (PREDICT_FALSE(lb0->lb_n_buckets > 1))
+ {
+ hash_c0 = vnet_buffer (b0)->ip.flow_hash =
+ mpls_compute_flow_hash(h0, lb0->lb_hash_config);
+ }
+ if (PREDICT_FALSE(lb1->lb_n_buckets > 1))
+ {
+ hash_c1 = vnet_buffer (b1)->ip.flow_hash =
+ mpls_compute_flow_hash(h1, lb1->lb_hash_config);
+ }
+
+ ASSERT (lb0->lb_n_buckets > 0);
+ ASSERT (is_pow2 (lb0->lb_n_buckets));
+ ASSERT (lb1->lb_n_buckets > 0);
+ ASSERT (is_pow2 (lb1->lb_n_buckets));
+
+ dpo0 = load_balance_get_bucket_i(lb0,
+ (hash_c0 &
+ (lb0->lb_n_buckets_minus_1)));
+ dpo1 = load_balance_get_bucket_i(lb1,
+ (hash_c1 &
+ (lb1->lb_n_buckets_minus_1)));
+
+ next0 = dpo0->dpoi_next_node;
+ next1 = dpo1->dpoi_next_node;
+
+ vnet_buffer (b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
+ vnet_buffer (b1)->ip.adj_index[VLIB_TX] = dpo1->dpoi_index;
+
+ vlib_increment_combined_counter
+ (cm, cpu_index, lbi0, 1,
+ vlib_buffer_length_in_chain (vm, b0));
+ vlib_increment_combined_counter
+ (cm, cpu_index, lbi1, 1,
+ vlib_buffer_length_in_chain (vm, b1));
+
+ /*
+ * before we pop the label copy th values we need to maintain.
+ * The label header is in network byte order.
+ * last byte is the TTL.
+ * bits 2 to 4 inclusive are the EXP bits
+ */
+ vnet_buffer (b0)->mpls.ttl = ((char*)h0)[3];
+ vnet_buffer (b0)->mpls.exp = (((char*)h0)[2] & 0xe) >> 1;
+ vnet_buffer (b0)->mpls.first = 1;
+ vnet_buffer (b1)->mpls.ttl = ((char*)h1)[3];
+ vnet_buffer (b1)->mpls.exp = (((char*)h1)[2] & 0xe) >> 1;
+ vnet_buffer (b1)->mpls.first = 1;
+
+ /*
+ * pop the label that was just used in the lookup
+ */
+ vlib_buffer_advance(b0, sizeof(*h0));
+ vlib_buffer_advance(b1, sizeof(*h1));
+
+ if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ mpls_lookup_trace_t *tr = vlib_add_trace (vm, node,
+ b0, sizeof (*tr));
+ tr->next_index = next0;
+ tr->lb_index = lbi0;
+ tr->lfib_index = lfib_index0;
+ tr->hash = hash_c0;
+ tr->label_net_byte_order = h0->label_exp_s_ttl;
+ }
+
+ if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ mpls_lookup_trace_t *tr = vlib_add_trace (vm, node,
+ b1, sizeof (*tr));
+ tr->next_index = next1;
+ tr->lb_index = lbi1;
+ tr->lfib_index = lfib_index1;
+ tr->hash = hash_c1;
+ tr->label_net_byte_order = h1->label_exp_s_ttl;
+ }
+
+ vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, bi1, next0, next1);
+ }
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 lbi0, next0, lfib_index0, bi0, hash_c0;
+ const mpls_unicast_header_t * h0;
+ const load_balance_t *lb0;
+ const dpo_id_t *dpo0;
+ vlib_buffer_t * b0;
+
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ h0 = vlib_buffer_get_current (b0);
+
+ lfib_index0 = vec_elt(mm->fib_index_by_sw_if_index,
+ vnet_buffer(b0)->sw_if_index[VLIB_RX]);
+
+ lbi0 = mpls_fib_table_forwarding_lookup(lfib_index0, h0);
+ lb0 = load_balance_get(lbi0);
+
+ hash_c0 = vnet_buffer(b0)->ip.flow_hash = 0;
+ if (PREDICT_FALSE(lb0->lb_n_buckets > 1))
+ {
+ hash_c0 = vnet_buffer (b0)->ip.flow_hash =
+ mpls_compute_flow_hash(h0, lb0->lb_hash_config);
+ }
+
+ ASSERT (lb0->lb_n_buckets > 0);
+ ASSERT (is_pow2 (lb0->lb_n_buckets));
+
+ dpo0 = load_balance_get_bucket_i(lb0,
+ (hash_c0 &
+ (lb0->lb_n_buckets_minus_1)));
+
+ next0 = dpo0->dpoi_next_node;
+ vnet_buffer (b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
+
+ vlib_increment_combined_counter
+ (cm, cpu_index, lbi0, 1,
+ vlib_buffer_length_in_chain (vm, b0));
+
+ /*
+ * before we pop the label copy th values we need to maintain.
+ * The label header is in network byte order.
+ * last byte is the TTL.
+ * bits 2 to 4 inclusive are the EXP bits
+ */
+ vnet_buffer (b0)->mpls.ttl = ((char*)h0)[3];
+ vnet_buffer (b0)->mpls.exp = (((char*)h0)[2] & 0xe) >> 1;
+ vnet_buffer (b0)->mpls.first = 1;
+
+ /*
+ * pop the label that was just used in the lookup
+ */
+ vlib_buffer_advance(b0, sizeof(*h0));
+
+ if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ mpls_lookup_trace_t *tr = vlib_add_trace (vm, node,
+ b0, sizeof (*tr));
+ tr->next_index = next0;
+ tr->lb_index = lbi0;
+ tr->lfib_index = lfib_index0;
+ tr->hash = hash_c0;
+ tr->label_net_byte_order = h0->label_exp_s_ttl;
+ }
+
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, next0);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+ vlib_node_increment_counter (vm, mpls_lookup_node.index,
+ MPLS_ERROR_PKTS_DECAP, from_frame->n_vectors);
+ return from_frame->n_vectors;
+}
+
+static char * mpls_error_strings[] = {
+#define mpls_error(n,s) s,
+#include "error.def"
+#undef mpls_error
+};
+
+VLIB_REGISTER_NODE (mpls_lookup_node) = {
+ .function = mpls_lookup,
+ .name = "mpls-lookup",
+ /* Takes a vector of packets. */
+ .vector_size = sizeof (u32),
+ .n_errors = MPLS_N_ERROR,
+ .error_strings = mpls_error_strings,
+
+ .sibling_of = "ip4-lookup",
+
+ .format_buffer = format_mpls_header,
+ .format_trace = format_mpls_lookup_trace,
+ .unformat_buffer = unformat_mpls_header,
+};
+
+VLIB_NODE_FUNCTION_MULTIARCH (mpls_lookup_node, mpls_lookup)
+
+typedef struct {
+ u32 next_index;
+ u32 lb_index;
+ u32 hash;
+} mpls_load_balance_trace_t;
+
+static u8 *
+format_mpls_load_balance_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ mpls_load_balance_trace_t * t = va_arg (*args, mpls_load_balance_trace_t *);
+
+ s = format (s, "MPLS: next [%d], LB index %d hash %d",
+ t->next_index, t->lb_index, t->hash);
+ return s;
+}
+
+always_inline uword
+mpls_load_balance (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ vlib_combined_counter_main_t * cm = &load_balance_main.lbm_via_counters;
+ u32 n_left_from, n_left_to_next, * from, * to_next;
+ u32 cpu_index = os_get_cpu_number();
+ u32 next;
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ vlib_get_next_frame (vm, node, next,
+ to_next, n_left_to_next);
+
+
+ while (n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ mpls_lookup_next_t next0, next1;
+ const load_balance_t *lb0, *lb1;
+ vlib_buffer_t * p0, *p1;
+ u32 pi0, lbi0, hc0, pi1, lbi1, hc1;
+ const mpls_unicast_header_t *mpls0, *mpls1;
+ const dpo_id_t *dpo0, *dpo1;
+
+ /* Prefetch next iteration. */
+ {
+ vlib_buffer_t * p2, * p3;
+
+ p2 = vlib_get_buffer (vm, from[2]);
+ p3 = vlib_get_buffer (vm, from[3]);
+
+ vlib_prefetch_buffer_header (p2, STORE);
+ vlib_prefetch_buffer_header (p3, STORE);
+
+ CLIB_PREFETCH (p2->data, sizeof (mpls0[0]), STORE);
+ CLIB_PREFETCH (p3->data, sizeof (mpls0[0]), STORE);
+ }
+
+ pi0 = to_next[0] = from[0];
+ pi1 = to_next[1] = from[1];
+
+ from += 2;
+ n_left_from -= 2;
+ to_next += 2;
+ n_left_to_next -= 2;
+
+ p0 = vlib_get_buffer (vm, pi0);
+ p1 = vlib_get_buffer (vm, pi1);
+
+ mpls0 = vlib_buffer_get_current (p0);
+ mpls1 = vlib_buffer_get_current (p1);
+ lbi0 = vnet_buffer (p0)->ip.adj_index[VLIB_TX];
+ lbi1 = vnet_buffer (p1)->ip.adj_index[VLIB_TX];
+
+ lb0 = load_balance_get(lbi0);
+ lb1 = load_balance_get(lbi1);
+
+ /*
+ * this node is for via FIBs we can re-use the hash value from the
+ * to node if present.
+ * We don't want to use the same hash value at each level in the recursion
+ * graph as that would lead to polarisation
+ */
+ hc0 = vnet_buffer (p0)->ip.flow_hash = 0;
+ hc1 = vnet_buffer (p1)->ip.flow_hash = 0;
+
+ if (PREDICT_FALSE (lb0->lb_n_buckets > 1))
+ {
+ if (PREDICT_TRUE (vnet_buffer(p0)->ip.flow_hash))
+ {
+ hc0 = vnet_buffer(p0)->ip.flow_hash = vnet_buffer(p0)->ip.flow_hash >> 1;
+ }
+ else
+ {
+ hc0 = vnet_buffer(p0)->ip.flow_hash = mpls_compute_flow_hash(mpls0, hc0);
+ }
+ }
+ if (PREDICT_FALSE (lb1->lb_n_buckets > 1))
+ {
+ if (PREDICT_TRUE (vnet_buffer(p1)->ip.flow_hash))
+ {
+ hc1 = vnet_buffer(p1)->ip.flow_hash = vnet_buffer(p1)->ip.flow_hash >> 1;
+ }
+ else
+ {
+ hc1 = vnet_buffer(p1)->ip.flow_hash = mpls_compute_flow_hash(mpls1, hc1);
+ }
+ }
+
+ dpo0 = load_balance_get_bucket_i(lb0, hc0 & (lb0->lb_n_buckets_minus_1));
+ dpo1 = load_balance_get_bucket_i(lb1, hc1 & (lb1->lb_n_buckets_minus_1));
+
+ next0 = dpo0->dpoi_next_node;
+ next1 = dpo1->dpoi_next_node;
+
+ vnet_buffer (p0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
+ vnet_buffer (p1)->ip.adj_index[VLIB_TX] = dpo1->dpoi_index;
+
+ vlib_increment_combined_counter
+ (cm, cpu_index, lbi0, 1,
+ vlib_buffer_length_in_chain (vm, p0));
+ vlib_increment_combined_counter
+ (cm, cpu_index, lbi1, 1,
+ vlib_buffer_length_in_chain (vm, p1));
+
+ if (PREDICT_FALSE(p0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ mpls_load_balance_trace_t *tr = vlib_add_trace (vm, node,
+ p0, sizeof (*tr));
+ tr->next_index = next0;
+ tr->lb_index = lbi0;
+ tr->hash = hc0;
+ }
+
+ vlib_validate_buffer_enqueue_x2 (vm, node, next,
+ to_next, n_left_to_next,
+ pi0, pi1, next0, next1);
+ }
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ mpls_lookup_next_t next0;
+ const load_balance_t *lb0;
+ vlib_buffer_t * p0;
+ u32 pi0, lbi0, hc0;
+ const mpls_unicast_header_t *mpls0;
+ const dpo_id_t *dpo0;
+
+ pi0 = from[0];
+ to_next[0] = pi0;
+ from += 1;
+ to_next += 1;
+ n_left_to_next -= 1;
+ n_left_from -= 1;
+
+ p0 = vlib_get_buffer (vm, pi0);
+
+ mpls0 = vlib_buffer_get_current (p0);
+ lbi0 = vnet_buffer (p0)->ip.adj_index[VLIB_TX];
+
+ lb0 = load_balance_get(lbi0);
+
+ hc0 = vnet_buffer (p0)->ip.flow_hash = 0;
+ if (PREDICT_FALSE (lb0->lb_n_buckets > 1))
+ {
+ if (PREDICT_TRUE (vnet_buffer(p0)->ip.flow_hash))
+ {
+ hc0 = vnet_buffer(p0)->ip.flow_hash = vnet_buffer(p0)->ip.flow_hash >> 1;
+ }
+ else
+ {
+ hc0 = vnet_buffer(p0)->ip.flow_hash = mpls_compute_flow_hash(mpls0, hc0);
+ }
+ }
+
+ dpo0 = load_balance_get_bucket_i(lb0, hc0 & (lb0->lb_n_buckets_minus_1));
+
+ next0 = dpo0->dpoi_next_node;
+ vnet_buffer (p0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
+
+ vlib_increment_combined_counter
+ (cm, cpu_index, lbi0, 1,
+ vlib_buffer_length_in_chain (vm, p0));
+
+ vlib_validate_buffer_enqueue_x1 (vm, node, next,
+ to_next, n_left_to_next,
+ pi0, next0);
+ }
+
+ vlib_put_next_frame (vm, node, next, n_left_to_next);
+ }
+
+ return frame->n_vectors;
+}
+
+VLIB_REGISTER_NODE (mpls_load_balance_node) = {
+ .function = mpls_load_balance,
+ .name = "mpls-load-balance",
+ .vector_size = sizeof (u32),
+ .sibling_of = "mpls-lookup",
+
+ .format_trace = format_mpls_load_balance_trace,
+};
+
+VLIB_NODE_FUNCTION_MULTIARCH (mpls_load_balance_node, mpls_load_balance)
diff --git a/src/vnet/mpls/mpls_output.c b/src/vnet/mpls/mpls_output.c
new file mode 100644
index 00000000000..8292a0cb3d2
--- /dev/null
+++ b/src/vnet/mpls/mpls_output.c
@@ -0,0 +1,479 @@
+/*
+ * mpls_output.c: MPLS Adj rewrite
+ *
+ * Copyright (c) 2012-2014 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vlib/vlib.h>
+#include <vnet/pg/pg.h>
+#include <vnet/ip/ip.h>
+#include <vnet/mpls/mpls.h>
+
+typedef struct {
+ /* Adjacency taken. */
+ u32 adj_index;
+ u32 flow_hash;
+
+ /* Packet data, possibly *after* rewrite. */
+ u8 packet_data[64 - 1*sizeof(u32)];
+} mpls_output_trace_t;
+
+static u8 *
+format_mpls_output_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ mpls_output_trace_t * t = va_arg (*args, mpls_output_trace_t *);
+ vnet_main_t * vnm = vnet_get_main();
+ uword indent = format_get_indent (s);
+
+ s = format (s, "adj-idx %d : %U flow hash: 0x%08x",
+ t->adj_index,
+ format_ip_adjacency, t->adj_index, FORMAT_IP_ADJACENCY_NONE,
+ t->flow_hash);
+ s = format (s, "\n%U%U",
+ format_white_space, indent,
+ format_ip_adjacency_packet_data,
+ vnm, t->adj_index,
+ t->packet_data, sizeof (t->packet_data));
+ return s;
+}
+
+static inline uword
+mpls_output_inline (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame,
+ int is_midchain)
+{
+ u32 n_left_from, next_index, * from, * to_next, cpu_index;
+ vlib_node_runtime_t * error_node;
+ u32 n_left_to_next;
+
+ cpu_index = os_get_cpu_number();
+ error_node = vlib_node_get_runtime (vm, mpls_output_node.index);
+ from = vlib_frame_vector_args (from_frame);
+ n_left_from = from_frame->n_vectors;
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ vlib_get_next_frame (vm, node, next_index,
+ to_next, n_left_to_next);
+
+ while (n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ ip_adjacency_t * adj0;
+ mpls_unicast_header_t *hdr0;
+ vlib_buffer_t * p0;
+ u32 pi0, rw_len0, adj_index0, next0, error0;
+
+ ip_adjacency_t * adj1;
+ mpls_unicast_header_t *hdr1;
+ vlib_buffer_t * p1;
+ u32 pi1, rw_len1, adj_index1, next1, error1;
+
+ /* Prefetch next iteration. */
+ {
+ vlib_buffer_t * p2, * p3;
+
+ p2 = vlib_get_buffer (vm, from[2]);
+ p3 = vlib_get_buffer (vm, from[3]);
+
+ vlib_prefetch_buffer_header (p2, STORE);
+ vlib_prefetch_buffer_header (p3, STORE);
+
+ CLIB_PREFETCH (p2->data, sizeof (hdr0[0]), STORE);
+ CLIB_PREFETCH (p3->data, sizeof (hdr1[0]), STORE);
+ }
+
+ pi0 = to_next[0] = from[0];
+ pi1 = to_next[1] = from[1];
+
+ from += 2;
+ n_left_from -= 2;
+ to_next += 2;
+ n_left_to_next -= 2;
+
+ p0 = vlib_get_buffer (vm, pi0);
+ p1 = vlib_get_buffer (vm, pi1);
+
+ adj_index0 = vnet_buffer (p0)->ip.adj_index[VLIB_TX];
+ adj_index1 = vnet_buffer (p1)->ip.adj_index[VLIB_TX];
+
+ /* We should never rewrite a pkt using the MISS adjacency */
+ ASSERT(adj_index0);
+ ASSERT(adj_index1);
+
+ adj0 = adj_get(adj_index0);
+ adj1 = adj_get(adj_index1);
+ hdr0 = vlib_buffer_get_current (p0);
+ hdr1 = vlib_buffer_get_current (p1);
+
+ /* Guess we are only writing on simple Ethernet header. */
+ vnet_rewrite_two_headers (adj0[0], adj1[0], hdr0, hdr1,
+ sizeof (ethernet_header_t));
+
+ /* Update packet buffer attributes/set output interface. */
+ rw_len0 = adj0[0].rewrite_header.data_bytes;
+ rw_len1 = adj1[0].rewrite_header.data_bytes;
+
+ if (PREDICT_FALSE (rw_len0 > sizeof(ethernet_header_t)))
+ vlib_increment_combined_counter
+ (&adjacency_counters,
+ cpu_index, adj_index0,
+ /* packet increment */ 0,
+ /* byte increment */ rw_len0-sizeof(ethernet_header_t));
+ if (PREDICT_FALSE (rw_len1 > sizeof(ethernet_header_t)))
+ vlib_increment_combined_counter
+ (&adjacency_counters,
+ cpu_index, adj_index1,
+ /* packet increment */ 0,
+ /* byte increment */ rw_len1-sizeof(ethernet_header_t));
+
+ /* Check MTU of outgoing interface. */
+ if (PREDICT_TRUE(vlib_buffer_length_in_chain (vm, p0) <=
+ adj0[0].rewrite_header.max_l3_packet_bytes))
+ {
+ p0->current_data -= rw_len0;
+ p0->current_length += rw_len0;
+
+ vnet_buffer (p0)->sw_if_index[VLIB_TX] =
+ adj0[0].rewrite_header.sw_if_index;
+ next0 = adj0[0].rewrite_header.next_index;
+ error0 = IP4_ERROR_NONE;
+
+ if (is_midchain)
+ {
+ adj0->sub_type.midchain.fixup_func(vm, adj0, p0);
+ }
+ }
+ else
+ {
+ error0 = IP4_ERROR_MTU_EXCEEDED;
+ next0 = MPLS_OUTPUT_NEXT_DROP;
+ }
+ if (PREDICT_TRUE(vlib_buffer_length_in_chain (vm, p1) <=
+ adj1[0].rewrite_header.max_l3_packet_bytes))
+ {
+ p1->current_data -= rw_len1;
+ p1->current_length += rw_len1;
+
+ vnet_buffer (p1)->sw_if_index[VLIB_TX] =
+ adj1[0].rewrite_header.sw_if_index;
+ next1 = adj1[0].rewrite_header.next_index;
+ error1 = IP4_ERROR_NONE;
+
+ if (is_midchain)
+ {
+ adj1->sub_type.midchain.fixup_func(vm, adj1, p1);
+ }
+ }
+ else
+ {
+ error1 = IP4_ERROR_MTU_EXCEEDED;
+ next1 = MPLS_OUTPUT_NEXT_DROP;
+ }
+
+ p0->error = error_node->errors[error0];
+ p1->error = error_node->errors[error1];
+
+ if (PREDICT_FALSE(p0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ mpls_output_trace_t *tr = vlib_add_trace (vm, node,
+ p0, sizeof (*tr));
+ tr->adj_index = vnet_buffer(p0)->ip.adj_index[VLIB_TX];
+ tr->flow_hash = vnet_buffer(p0)->ip.flow_hash;
+ }
+ if (PREDICT_FALSE(p1->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ mpls_output_trace_t *tr = vlib_add_trace (vm, node,
+ p1, sizeof (*tr));
+ tr->adj_index = vnet_buffer(p1)->ip.adj_index[VLIB_TX];
+ tr->flow_hash = vnet_buffer(p1)->ip.flow_hash;
+ }
+
+ vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
+ to_next, n_left_to_next,
+ pi0, pi1, next0, next1);
+ }
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ ip_adjacency_t * adj0;
+ mpls_unicast_header_t *hdr0;
+ vlib_buffer_t * p0;
+ u32 pi0, rw_len0, adj_index0, next0, error0;
+
+ pi0 = to_next[0] = from[0];
+
+ p0 = vlib_get_buffer (vm, pi0);
+
+ adj_index0 = vnet_buffer (p0)->ip.adj_index[VLIB_TX];
+
+ /* We should never rewrite a pkt using the MISS adjacency */
+ ASSERT(adj_index0);
+
+ adj0 = adj_get(adj_index0);
+ hdr0 = vlib_buffer_get_current (p0);
+
+ /* Guess we are only writing on simple Ethernet header. */
+ vnet_rewrite_one_header (adj0[0], hdr0,
+ sizeof (ethernet_header_t));
+
+ /* Update packet buffer attributes/set output interface. */
+ rw_len0 = adj0[0].rewrite_header.data_bytes;
+
+ if (PREDICT_FALSE (rw_len0 > sizeof(ethernet_header_t)))
+ vlib_increment_combined_counter
+ (&adjacency_counters,
+ cpu_index, adj_index0,
+ /* packet increment */ 0,
+ /* byte increment */ rw_len0-sizeof(ethernet_header_t));
+
+ /* Check MTU of outgoing interface. */
+ if (PREDICT_TRUE(vlib_buffer_length_in_chain (vm, p0) <=
+ adj0[0].rewrite_header.max_l3_packet_bytes))
+ {
+ p0->current_data -= rw_len0;
+ p0->current_length += rw_len0;
+
+ vnet_buffer (p0)->sw_if_index[VLIB_TX] =
+ adj0[0].rewrite_header.sw_if_index;
+ next0 = adj0[0].rewrite_header.next_index;
+ error0 = IP4_ERROR_NONE;
+
+ if (is_midchain)
+ {
+ adj0->sub_type.midchain.fixup_func(vm, adj0, p0);
+ }
+ }
+ else
+ {
+ error0 = IP4_ERROR_MTU_EXCEEDED;
+ next0 = MPLS_OUTPUT_NEXT_DROP;
+ }
+ p0->error = error_node->errors[error0];
+
+ from += 1;
+ n_left_from -= 1;
+ to_next += 1;
+ n_left_to_next -= 1;
+
+ if (PREDICT_FALSE(p0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ mpls_output_trace_t *tr = vlib_add_trace (vm, node,
+ p0, sizeof (*tr));
+ tr->adj_index = vnet_buffer(p0)->ip.adj_index[VLIB_TX];
+ tr->flow_hash = vnet_buffer(p0)->ip.flow_hash;
+ }
+
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ pi0, next0);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+ vlib_node_increment_counter (vm, mpls_output_node.index,
+ MPLS_ERROR_PKTS_ENCAP,
+ from_frame->n_vectors);
+
+ return from_frame->n_vectors;
+}
+
+static char * mpls_error_strings[] = {
+#define mpls_error(n,s) s,
+#include "error.def"
+#undef mpls_error
+};
+
+static inline uword
+mpls_output (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame)
+{
+ return (mpls_output_inline(vm, node, from_frame, /* is_midchain */ 0));
+}
+
+VLIB_REGISTER_NODE (mpls_output_node) = {
+ .function = mpls_output,
+ .name = "mpls-output",
+ /* Takes a vector of packets. */
+ .vector_size = sizeof (u32),
+ .n_errors = MPLS_N_ERROR,
+ .error_strings = mpls_error_strings,
+
+ .n_next_nodes = MPLS_OUTPUT_N_NEXT,
+ .next_nodes = {
+#define _(s,n) [MPLS_OUTPUT_NEXT_##s] = n,
+ foreach_mpls_output_next
+#undef _
+ },
+
+ .format_trace = format_mpls_output_trace,
+};
+
+VLIB_NODE_FUNCTION_MULTIARCH (mpls_output_node, mpls_output)
+
+static inline uword
+mpls_midchain (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame)
+{
+ return (mpls_output_inline(vm, node, from_frame, /* is_midchain */ 1));
+}
+
+VLIB_REGISTER_NODE (mpls_midchain_node) = {
+ .function = mpls_midchain,
+ .name = "mpls-midchain",
+ .vector_size = sizeof (u32),
+
+ .format_trace = format_mpls_output_trace,
+
+ .sibling_of = "mpls-output",
+};
+
+VLIB_NODE_FUNCTION_MULTIARCH (mpls_midchain_node, mpls_midchain)
+
+/**
+ * @brief Next index values from the MPLS incomplete adj node
+ */
+#define foreach_mpls_adj_incomplete_next \
+_(DROP, "error-drop") \
+_(IP4, "ip4-arp") \
+_(IP6, "ip6-discover-neighbor")
+
+typedef enum {
+#define _(s,n) MPLS_ADJ_INCOMPLETE_NEXT_##s,
+ foreach_mpls_adj_incomplete_next
+#undef _
+ MPLS_ADJ_INCOMPLETE_N_NEXT,
+} mpls_adj_incomplete_next_t;
+
+/**
+ * @brief A struct to hold tracing information for the MPLS label imposition
+ * node.
+ */
+typedef struct mpls_adj_incomplete_trace_t_
+{
+ u32 next;
+} mpls_adj_incomplete_trace_t;
+
+
+/**
+ * @brief Graph node for incomplete MPLS adjacency.
+ * This node will push traffic to either the v4-arp or v6-nd node
+ * based on the next-hop proto of the adj.
+ * We pay a cost for this 'routing' node, but an incomplete adj is the
+ * exception case.
+ */
+static inline uword
+mpls_adj_incomplete (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame)
+{
+ u32 n_left_from, next_index, * from, * to_next;
+
+ from = vlib_frame_vector_args (from_frame);
+ n_left_from = from_frame->n_vectors;
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame (vm, node, next_index,
+ to_next, n_left_to_next);
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 pi0, next0, adj_index0;
+ ip_adjacency_t * adj0;
+ vlib_buffer_t * p0;
+
+ pi0 = to_next[0] = from[0];
+ p0 = vlib_get_buffer (vm, pi0);
+ from += 1;
+ n_left_from -= 1;
+ to_next += 1;
+ n_left_to_next -= 1;
+
+ adj_index0 = vnet_buffer (p0)->ip.adj_index[VLIB_TX];
+ ASSERT(adj_index0);
+
+ adj0 = adj_get(adj_index0);
+
+ if (PREDICT_TRUE(FIB_PROTOCOL_IP4 == adj0->ia_nh_proto))
+ {
+ next0 = MPLS_ADJ_INCOMPLETE_NEXT_IP4;
+ }
+ else
+ {
+ next0 = MPLS_ADJ_INCOMPLETE_NEXT_IP6;
+ }
+
+ if (PREDICT_FALSE(p0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ mpls_adj_incomplete_trace_t *tr =
+ vlib_add_trace (vm, node, p0, sizeof (*tr));
+ tr->next = next0;
+ }
+
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ pi0, next0);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ return from_frame->n_vectors;
+}
+
+static u8 *
+format_mpls_adj_incomplete_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ mpls_adj_incomplete_trace_t * t;
+ uword indent;
+
+ t = va_arg (*args, mpls_adj_incomplete_trace_t *);
+ indent = format_get_indent (s);
+
+ s = format (s, "%Unext:%d",
+ format_white_space, indent,
+ t->next);
+ return (s);
+}
+
+VLIB_REGISTER_NODE (mpls_adj_incomplete_node) = {
+ .function = mpls_adj_incomplete,
+ .name = "mpls-adj-incomplete",
+ .format_trace = format_mpls_adj_incomplete_trace,
+ /* Takes a vector of packets. */
+ .vector_size = sizeof (u32),
+ .n_errors = MPLS_N_ERROR,
+ .error_strings = mpls_error_strings,
+
+ .n_next_nodes = MPLS_ADJ_INCOMPLETE_N_NEXT,
+ .next_nodes = {
+#define _(s,n) [MPLS_ADJ_INCOMPLETE_NEXT_##s] = n,
+ foreach_mpls_adj_incomplete_next
+#undef _
+ },
+};
+
+VLIB_NODE_FUNCTION_MULTIARCH (mpls_adj_incomplete_node,
+ mpls_adj_incomplete)
diff --git a/src/vnet/mpls/mpls_tunnel.c b/src/vnet/mpls/mpls_tunnel.c
new file mode 100644
index 00000000000..8d1e30a36fb
--- /dev/null
+++ b/src/vnet/mpls/mpls_tunnel.c
@@ -0,0 +1,787 @@
+/*
+ * mpls_tunnel.c: MPLS tunnel interfaces (i.e. for RSVP-TE)
+ *
+ * Copyright (c) 2012 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/vnet.h>
+#include <vnet/pg/pg.h>
+#include <vnet/mpls/mpls_tunnel.h>
+#include <vnet/ip/ip.h>
+#include <vnet/fib/fib_path_list.h>
+#include <vnet/adj/adj_midchain.h>
+
+/**
+ * @brief pool of tunnel instances
+ */
+static mpls_tunnel_t *mpls_tunnel_pool;
+
+/**
+ * @brief Pool of free tunnel SW indices - i.e. recycled indices
+ */
+static u32 * mpls_tunnel_free_hw_if_indices;
+
+/**
+ * @brief DB of SW index to tunnel index
+ */
+static u32 *mpls_tunnel_db;
+
+/**
+ * @brief Get a tunnel object from a SW interface index
+ */
+static mpls_tunnel_t*
+mpls_tunnel_get_from_sw_if_index (u32 sw_if_index)
+{
+ if ((vec_len(mpls_tunnel_db) < sw_if_index) ||
+ (~0 == mpls_tunnel_db[sw_if_index]))
+ return (NULL);
+
+ return (pool_elt_at_index(mpls_tunnel_pool,
+ mpls_tunnel_db[sw_if_index]));
+}
+
+/**
+ * @brief Return true if the label stack is imp-null only
+ */
+static fib_forward_chain_type_t
+mpls_tunnel_get_fwd_chain_type (const mpls_tunnel_t *mt)
+{
+ if ((1 == vec_len(mt->mt_label_stack)) &&
+ (mt->mt_label_stack[0] == MPLS_IETF_IMPLICIT_NULL_LABEL))
+ {
+ /*
+ * the only label in the label stack is implicit null
+ * we need to build an IP chain.
+ */
+ if (FIB_PROTOCOL_IP4 == fib_path_list_get_proto(mt->mt_path_list))
+ {
+ return (FIB_FORW_CHAIN_TYPE_UNICAST_IP4);
+ }
+ else
+ {
+ return (FIB_FORW_CHAIN_TYPE_UNICAST_IP6);
+ }
+ }
+ else
+ {
+ return (FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS);
+ }
+}
+
+/**
+ * @brief Build a rewrite string for the MPLS tunnel.
+ *
+ * We have choices here;
+ * 1 - have an Adjacency with a zero length string and stack it on
+ * MPLS label objects
+ * 2 - put the label header rewrites in the adjacency string.
+ *
+ * We choose 2 since it results in fewer graph nodes in the egress path
+ */
+static u8*
+mpls_tunnel_build_rewrite (vnet_main_t * vnm,
+ u32 sw_if_index,
+ vnet_link_t link_type,
+ const void *dst_address)
+{
+ mpls_unicast_header_t *muh;
+ mpls_tunnel_t *mt;
+ u8 *rewrite;
+ u32 mti, ii;
+
+ rewrite = NULL;
+ mti = mpls_tunnel_db[sw_if_index];
+ mt = pool_elt_at_index(mpls_tunnel_pool, mti);
+
+ /*
+ * The vector must be allocated as u8 so the length is correct
+ */
+ ASSERT(0 < vec_len(mt->mt_label_stack));
+ vec_validate(rewrite, (sizeof(*muh) * vec_len(mt->mt_label_stack)) - 1);
+ ASSERT(rewrite);
+ muh = (mpls_unicast_header_t *)rewrite;
+
+ /*
+ * The last (inner most) label in the stack may be EOS, all the rest Non-EOS
+ */
+ for (ii = 0; ii < vec_len(mt->mt_label_stack)-1; ii++)
+ {
+ vnet_mpls_uc_set_label(&muh[ii].label_exp_s_ttl, mt->mt_label_stack[ii]);
+ vnet_mpls_uc_set_ttl(&muh[ii].label_exp_s_ttl, 255);
+ vnet_mpls_uc_set_exp(&muh[ii].label_exp_s_ttl, 0);
+ vnet_mpls_uc_set_s(&muh[ii].label_exp_s_ttl, MPLS_NON_EOS);
+ muh[ii].label_exp_s_ttl = clib_host_to_net_u32(muh[ii].label_exp_s_ttl);
+ }
+
+ vnet_mpls_uc_set_label(&muh[ii].label_exp_s_ttl, mt->mt_label_stack[ii]);
+ vnet_mpls_uc_set_ttl(&muh[ii].label_exp_s_ttl, 255);
+ vnet_mpls_uc_set_exp(&muh[ii].label_exp_s_ttl, 0);
+
+ if ((VNET_LINK_MPLS == link_type) &&
+ (mt->mt_label_stack[ii] != MPLS_IETF_IMPLICIT_NULL_LABEL))
+ {
+ vnet_mpls_uc_set_s(&muh[ii].label_exp_s_ttl, MPLS_NON_EOS);
+ }
+ else
+ {
+ vnet_mpls_uc_set_s(&muh[ii].label_exp_s_ttl, MPLS_EOS);
+ }
+
+ muh[ii].label_exp_s_ttl = clib_host_to_net_u32(muh[ii].label_exp_s_ttl);
+
+ return (rewrite);
+}
+
+/**
+ * mpls_tunnel_stack
+ *
+ * 'stack' (resolve the recursion for) the tunnel's midchain adjacency
+ */
+static void
+mpls_tunnel_stack (adj_index_t ai)
+{
+ ip_adjacency_t *adj;
+ mpls_tunnel_t *mt;
+ u32 sw_if_index;
+
+ adj = adj_get(ai);
+ sw_if_index = adj->rewrite_header.sw_if_index;
+
+ mt = mpls_tunnel_get_from_sw_if_index(sw_if_index);
+
+ if (NULL == mt)
+ return;
+
+ /*
+ * find the adjacency that is contributed by the FIB path-list
+ * that this tunnel resovles via, and use it as the next adj
+ * in the midchain
+ */
+ if (vnet_hw_interface_get_flags(vnet_get_main(),
+ mt->mt_hw_if_index) &
+ VNET_HW_INTERFACE_FLAG_LINK_UP)
+ {
+ dpo_id_t dpo = DPO_INVALID;
+
+ fib_path_list_contribute_forwarding(mt->mt_path_list,
+ mpls_tunnel_get_fwd_chain_type(mt),
+ &dpo);
+
+ if (DPO_LOAD_BALANCE == dpo.dpoi_type)
+ {
+ /*
+ * we don't support multiple paths, so no need to load-balance.
+ * pull the first and only choice and stack directly on that.
+ */
+ load_balance_t *lb;
+
+ lb = load_balance_get (dpo.dpoi_index);
+
+ ASSERT(1 == lb->lb_n_buckets);
+
+ dpo_copy(&dpo, load_balance_get_bucket_i (lb, 0));
+ }
+
+ adj_nbr_midchain_stack(ai, &dpo);
+ dpo_reset(&dpo);
+ }
+ else
+ {
+ adj_nbr_midchain_unstack(ai);
+ }
+}
+
+/**
+ * @brief Call back when restacking all adjacencies on a MPLS interface
+ */
+static adj_walk_rc_t
+mpls_adj_walk_cb (adj_index_t ai,
+ void *ctx)
+{
+ mpls_tunnel_stack(ai);
+
+ return (ADJ_WALK_RC_CONTINUE);
+}
+
+static void
+mpls_tunnel_restack (mpls_tunnel_t *mt)
+{
+ fib_protocol_t proto;
+
+ /*
+ * walk all the adjacencies on the MPLS interface and restack them
+ */
+ FOR_EACH_FIB_PROTOCOL(proto)
+ {
+ adj_nbr_walk(mt->mt_sw_if_index,
+ proto,
+ mpls_adj_walk_cb,
+ NULL);
+ }
+}
+
+static clib_error_t *
+mpls_tunnel_admin_up_down (vnet_main_t * vnm,
+ u32 hw_if_index,
+ u32 flags)
+{
+ vnet_hw_interface_t * hi;
+ mpls_tunnel_t *mt;
+
+ hi = vnet_get_hw_interface (vnm, hw_if_index);
+
+ mt = mpls_tunnel_get_from_sw_if_index(hi->sw_if_index);
+
+ if (NULL == mt)
+ return (NULL);
+
+ if (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP)
+ vnet_hw_interface_set_flags (vnm, hw_if_index,
+ VNET_HW_INTERFACE_FLAG_LINK_UP);
+ else
+ vnet_hw_interface_set_flags (vnm, hw_if_index, 0 /* down */);
+
+ mpls_tunnel_restack(mt);
+
+ return (NULL);
+}
+
+/**
+ * @brief Fixup the adj rewrite post encap. This is a no-op since the
+ * rewrite is a stack of labels.
+ */
+static void
+mpls_tunnel_fixup (vlib_main_t *vm,
+ ip_adjacency_t *adj,
+ vlib_buffer_t *b0)
+{
+}
+
+static void
+mpls_tunnel_update_adj (vnet_main_t * vnm,
+ u32 sw_if_index,
+ adj_index_t ai)
+{
+ adj_nbr_midchain_update_rewrite(
+ ai, mpls_tunnel_fixup,
+ ADJ_MIDCHAIN_FLAG_NONE,
+ mpls_tunnel_build_rewrite(vnm, sw_if_index,
+ adj_get_link_type(ai),
+ NULL));
+
+ mpls_tunnel_stack(ai);
+}
+
+static u8 *
+format_mpls_tunnel_name (u8 * s, va_list * args)
+{
+ u32 dev_instance = va_arg (*args, u32);
+ return format (s, "mpls-tunnel%d", dev_instance);
+}
+
+static u8 *
+format_mpls_tunnel_device (u8 * s, va_list * args)
+{
+ u32 dev_instance = va_arg (*args, u32);
+ CLIB_UNUSED (int verbose) = va_arg (*args, int);
+
+ return (format (s, "MPLS-tunnel: id %d\n", dev_instance));
+}
+
+/**
+ * @brief Packet trace structure
+ */
+typedef struct mpls_tunnel_trace_t_
+{
+ /**
+ * Tunnel-id / index in tunnel vector
+ */
+ u32 tunnel_id;
+} mpls_tunnel_trace_t;
+
+static u8 *
+format_mpls_tunnel_tx_trace (u8 * s,
+ va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ mpls_tunnel_trace_t * t = va_arg (*args, mpls_tunnel_trace_t *);
+
+ s = format (s, "MPLS: tunnel %d", t->tunnel_id);
+ return s;
+}
+
+/**
+ * @brief TX function. Only called L2. L3 traffic uses the adj-midchains
+ */
+static uword
+mpls_tunnel_tx (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ u32 next_index;
+ u32 * from, * to_next, n_left_from, n_left_to_next;
+ vnet_interface_output_runtime_t * rd = (void *) node->runtime_data;
+ const mpls_tunnel_t *mt;
+
+ mt = pool_elt_at_index(mpls_tunnel_pool, rd->dev_instance);
+
+ /* Vector of buffer / pkt indices we're supposed to process */
+ from = vlib_frame_vector_args (frame);
+
+ /* Number of buffers / pkts */
+ n_left_from = frame->n_vectors;
+
+ /* Speculatively send the first buffer to the last disposition we used */
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ /* set up to enqueue to our disposition with index = next_index */
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ /*
+ * FIXME DUAL LOOP
+ */
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ vlib_buffer_t * b0;
+ u32 bi0;
+
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer(vm, bi0);
+
+ vnet_buffer(b0)->ip.adj_index[VLIB_TX] = mt->mt_l2_adj;
+
+ if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ mpls_tunnel_trace_t *tr = vlib_add_trace (vm, node,
+ b0, sizeof (*tr));
+ tr->tunnel_id = rd->dev_instance;
+ }
+
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, mt->mt_l2_tx_arc);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ return frame->n_vectors;
+}
+
+VNET_DEVICE_CLASS (mpls_tunnel_class) = {
+ .name = "MPLS tunnel device",
+ .format_device_name = format_mpls_tunnel_name,
+ .format_device = format_mpls_tunnel_device,
+ .format_tx_trace = format_mpls_tunnel_tx_trace,
+ .tx_function = mpls_tunnel_tx,
+ .admin_up_down_function = mpls_tunnel_admin_up_down,
+};
+
+VNET_HW_INTERFACE_CLASS (mpls_tunnel_hw_interface_class) = {
+ .name = "MPLS-Tunnel",
+// .format_header = format_mpls_eth_header_with_length,
+// .unformat_header = unformat_mpls_eth_header,
+ .update_adjacency = mpls_tunnel_update_adj,
+ .build_rewrite = mpls_tunnel_build_rewrite,
+ .flags = VNET_HW_INTERFACE_CLASS_FLAG_P2P,
+};
+
+const mpls_tunnel_t *
+mpls_tunnel_get (u32 mti)
+{
+ return (pool_elt_at_index(mpls_tunnel_pool, mti));
+}
+
+/**
+ * @brief Walk all the MPLS tunnels
+ */
+void
+mpls_tunnel_walk (mpls_tunnel_walk_cb_t cb,
+ void *ctx)
+{
+ u32 mti;
+
+ pool_foreach_index(mti, mpls_tunnel_pool,
+ ({
+ cb(mti, ctx);
+ }));
+}
+
+void
+vnet_mpls_tunnel_del (u32 sw_if_index)
+{
+ mpls_tunnel_t *mt;
+
+ mt = mpls_tunnel_get_from_sw_if_index(sw_if_index);
+
+ if (NULL == mt)
+ return;
+
+ fib_path_list_child_remove(mt->mt_path_list,
+ mt->mt_sibling_index);
+ if (ADJ_INDEX_INVALID != mt->mt_l2_adj)
+ adj_unlock(mt->mt_l2_adj);
+
+ vec_free(mt->mt_label_stack);
+
+ vec_add1 (mpls_tunnel_free_hw_if_indices, mt->mt_hw_if_index);
+ pool_put(mpls_tunnel_pool, mt);
+ mpls_tunnel_db[sw_if_index] = ~0;
+}
+
+void
+vnet_mpls_tunnel_add (fib_route_path_t *rpaths,
+ mpls_label_t *label_stack,
+ u8 l2_only,
+ u32 *sw_if_index)
+{
+ vnet_hw_interface_t * hi;
+ mpls_tunnel_t *mt;
+ vnet_main_t * vnm;
+ u32 mti;
+
+ vnm = vnet_get_main();
+ pool_get(mpls_tunnel_pool, mt);
+ memset (mt, 0, sizeof (*mt));
+ mti = mt - mpls_tunnel_pool;
+ fib_node_init(&mt->mt_node, FIB_NODE_TYPE_MPLS_TUNNEL);
+ mt->mt_l2_adj = ADJ_INDEX_INVALID;
+
+ /*
+ * Create a new, or re=use and old, tunnel HW interface
+ */
+ if (vec_len (mpls_tunnel_free_hw_if_indices) > 0)
+ {
+ mt->mt_hw_if_index =
+ mpls_tunnel_free_hw_if_indices[vec_len(mpls_tunnel_free_hw_if_indices)-1];
+ _vec_len (mpls_tunnel_free_hw_if_indices) -= 1;
+ hi = vnet_get_hw_interface (vnm, mt->mt_hw_if_index);
+ hi->hw_instance = mti;
+ hi->dev_instance = mti;
+ }
+ else
+ {
+ mt->mt_hw_if_index = vnet_register_interface(
+ vnm,
+ mpls_tunnel_class.index,
+ mti,
+ mpls_tunnel_hw_interface_class.index,
+ mti);
+ hi = vnet_get_hw_interface(vnm, mt->mt_hw_if_index);
+ }
+
+ /*
+ * Add the new tunnel to the tunnel DB - key:SW if index
+ */
+ mt->mt_sw_if_index = hi->sw_if_index;
+ vec_validate_init_empty(mpls_tunnel_db, mt->mt_sw_if_index, ~0);
+ mpls_tunnel_db[mt->mt_sw_if_index] = mti;
+
+ /*
+ * construct a path-list from the path provided
+ */
+ mt->mt_path_list = fib_path_list_create(FIB_PATH_LIST_FLAG_SHARED, rpaths);
+ mt->mt_sibling_index = fib_path_list_child_add(mt->mt_path_list,
+ FIB_NODE_TYPE_MPLS_TUNNEL,
+ mti);
+
+ mt->mt_label_stack = vec_dup(label_stack);
+
+ if (l2_only)
+ {
+ mt->mt_l2_adj =
+ adj_nbr_add_or_lock(fib_path_list_get_proto(mt->mt_path_list),
+ VNET_LINK_ETHERNET,
+ &zero_addr,
+ mt->mt_sw_if_index);
+
+ mt->mt_l2_tx_arc = vlib_node_add_named_next(vlib_get_main(),
+ hi->tx_node_index,
+ "adj-l2-midchain");
+ }
+
+ *sw_if_index = mt->mt_sw_if_index;
+}
+
+static clib_error_t *
+vnet_create_mpls_tunnel_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ unformat_input_t _line_input, * line_input = &_line_input;
+ vnet_main_t * vnm = vnet_get_main();
+ u8 is_del = 0;
+ u8 l2_only = 0;
+ fib_route_path_t rpath, *rpaths = NULL;
+ mpls_label_t out_label = MPLS_LABEL_INVALID, *labels = NULL;
+ u32 sw_if_index;
+
+ memset(&rpath, 0, sizeof(rpath));
+
+ /* Get a line of input. */
+ if (! unformat_user (input, unformat_line_input, line_input))
+ return 0;
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "del %U",
+ unformat_vnet_sw_interface, vnm,
+ &sw_if_index))
+ is_del = 1;
+ else if (unformat (line_input, "add"))
+ is_del = 0;
+ else if (unformat (line_input, "out-label %U",
+ unformat_mpls_unicast_label, &out_label))
+ {
+ vec_add1(labels, out_label);
+ }
+ else if (unformat (line_input, "via %U %U",
+ unformat_ip4_address,
+ &rpath.frp_addr.ip4,
+ unformat_vnet_sw_interface, vnm,
+ &rpath.frp_sw_if_index))
+ {
+ rpath.frp_weight = 1;
+ rpath.frp_proto = FIB_PROTOCOL_IP4;
+ }
+
+ else if (unformat (line_input, "via %U %U",
+ unformat_ip6_address,
+ &rpath.frp_addr.ip6,
+ unformat_vnet_sw_interface, vnm,
+ &rpath.frp_sw_if_index))
+ {
+ rpath.frp_weight = 1;
+ rpath.frp_proto = FIB_PROTOCOL_IP6;
+ }
+ else if (unformat (line_input, "via %U",
+ unformat_ip6_address,
+ &rpath.frp_addr.ip6))
+ {
+ rpath.frp_fib_index = 0;
+ rpath.frp_weight = 1;
+ rpath.frp_sw_if_index = ~0;
+ rpath.frp_proto = FIB_PROTOCOL_IP6;
+ }
+ else if (unformat (line_input, "via %U",
+ unformat_ip4_address,
+ &rpath.frp_addr.ip4))
+ {
+ rpath.frp_fib_index = 0;
+ rpath.frp_weight = 1;
+ rpath.frp_sw_if_index = ~0;
+ rpath.frp_proto = FIB_PROTOCOL_IP4;
+ }
+ else if (unformat (line_input, "l2-only"))
+ l2_only = 1;
+ else
+ return clib_error_return (0, "unknown input '%U'",
+ format_unformat_error, line_input);
+ }
+
+ if (is_del)
+ {
+ vnet_mpls_tunnel_del(sw_if_index);
+ }
+ else
+ {
+ if (0 == vec_len(labels))
+ return clib_error_return (0, "No Output Labels '%U'",
+ format_unformat_error, line_input);
+
+ vec_add1(rpaths, rpath);
+ vnet_mpls_tunnel_add(rpaths, labels, l2_only, &sw_if_index);
+ }
+
+ vec_free(labels);
+ vec_free(rpaths);
+
+ return (NULL);
+}
+
+/*?
+ * This command create a uni-directional MPLS tunnel
+ *
+ * @cliexpar
+ * @cliexstart{create mpls tunnel}
+ * create mpls tunnel via 10.0.0.1 GigEthernet0/8/0 out-label 33 out-label 34
+ * @cliexend
+ ?*/
+VLIB_CLI_COMMAND (create_mpls_tunnel_command, static) = {
+ .path = "mpls tunnel",
+ .short_help =
+ "mpls tunnel via [addr] [interface] [out-labels]",
+ .function = vnet_create_mpls_tunnel_command_fn,
+};
+
+static u8 *
+format_mpls_tunnel (u8 * s, va_list * args)
+{
+ mpls_tunnel_t *mt = va_arg (*args, mpls_tunnel_t *);
+ int ii;
+
+ s = format(s, "mpls_tunnel%d: sw_if_index:%d hw_if_index:%d",
+ mt - mpls_tunnel_pool,
+ mt->mt_sw_if_index,
+ mt->mt_hw_if_index);
+ s = format(s, "\n label-stack:\n ");
+ for (ii = 0; ii < vec_len(mt->mt_label_stack); ii++)
+ {
+ s = format(s, "%d, ", mt->mt_label_stack[ii]);
+ }
+ s = format(s, "\n via:\n");
+ s = fib_path_list_format(mt->mt_path_list, s);
+ s = format(s, "\n");
+
+ return (s);
+}
+
+static clib_error_t *
+show_mpls_tunnel_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ mpls_tunnel_t * mt;
+ u32 mti = ~0;
+
+ if (pool_elts (mpls_tunnel_pool) == 0)
+ vlib_cli_output (vm, "No MPLS tunnels configured...");
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "%d", &mti))
+ ;
+ else
+ break;
+ }
+
+ if (~0 == mti)
+ {
+ pool_foreach (mt, mpls_tunnel_pool,
+ ({
+ vlib_cli_output (vm, "[@%d] %U",
+ mt - mpls_tunnel_pool,
+ format_mpls_tunnel, mt);
+ }));
+ }
+ else
+ {
+ if (pool_is_free_index(mpls_tunnel_pool, mti))
+ return clib_error_return (0, "Not atunnel index %d", mti);
+
+ mt = pool_elt_at_index(mpls_tunnel_pool, mti);
+
+ vlib_cli_output (vm, "[@%d] %U",
+ mt - mpls_tunnel_pool,
+ format_mpls_tunnel, mt);
+ }
+
+ return 0;
+}
+
+/*?
+ * This command to show MPLS tunnels
+ *
+ * @cliexpar
+ * @cliexstart{sh mpls tunnel 2}
+ * [@2] mpls_tunnel2: sw_if_index:5 hw_if_index:5
+ * label-stack:
+ * 3,
+ * via:
+ * index:26 locks:1 proto:ipv4 uPRF-list:26 len:1 itfs:[2, ]
+ * index:26 pl-index:26 ipv4 weight=1 attached-nexthop: oper-flags:resolved,
+ * 10.0.0.2 loop0
+ * [@0]: ipv4 via 10.0.0.2 loop0: IP4: de:ad:00:00:00:00 -> 00:00:11:aa:bb:cc
+ * @cliexend
+ ?*/
+VLIB_CLI_COMMAND (show_mpls_tunnel_command, static) = {
+ .path = "show mpls tunnel",
+ .function = show_mpls_tunnel_command_fn,
+};
+
+static mpls_tunnel_t *
+mpls_tunnel_from_fib_node (fib_node_t *node)
+{
+#if (CLIB_DEBUG > 0)
+ ASSERT(FIB_NODE_TYPE_MPLS_TUNNEL == node->fn_type);
+#endif
+ return ((mpls_tunnel_t*) (((char*)node) -
+ STRUCT_OFFSET_OF(mpls_tunnel_t, mt_node)));
+}
+
+/**
+ * Function definition to backwalk a FIB node
+ */
+static fib_node_back_walk_rc_t
+mpls_tunnel_back_walk (fib_node_t *node,
+ fib_node_back_walk_ctx_t *ctx)
+{
+ mpls_tunnel_restack(mpls_tunnel_from_fib_node(node));
+
+ return (FIB_NODE_BACK_WALK_CONTINUE);
+}
+
+/**
+ * Function definition to get a FIB node from its index
+ */
+static fib_node_t*
+mpls_tunnel_fib_node_get (fib_node_index_t index)
+{
+ mpls_tunnel_t * mt;
+
+ mt = pool_elt_at_index(mpls_tunnel_pool, index);
+
+ return (&mt->mt_node);
+}
+
+/**
+ * Function definition to inform the FIB node that its last lock has gone.
+ */
+static void
+mpls_tunnel_last_lock_gone (fib_node_t *node)
+{
+ /*
+ * The MPLS MPLS tunnel is a root of the graph. As such
+ * it never has children and thus is never locked.
+ */
+ ASSERT(0);
+}
+
+/*
+ * Virtual function table registered by MPLS MPLS tunnels
+ * for participation in the FIB object graph.
+ */
+const static fib_node_vft_t mpls_vft = {
+ .fnv_get = mpls_tunnel_fib_node_get,
+ .fnv_last_lock = mpls_tunnel_last_lock_gone,
+ .fnv_back_walk = mpls_tunnel_back_walk,
+};
+
+static clib_error_t *
+mpls_tunnel_init (vlib_main_t *vm)
+{
+ fib_node_register_type(FIB_NODE_TYPE_MPLS_TUNNEL, &mpls_vft);
+
+ return 0;
+}
+VLIB_INIT_FUNCTION(mpls_tunnel_init);
diff --git a/src/vnet/mpls/mpls_tunnel.h b/src/vnet/mpls/mpls_tunnel.h
new file mode 100644
index 00000000000..ee56c0fc8e3
--- /dev/null
+++ b/src/vnet/mpls/mpls_tunnel.h
@@ -0,0 +1,98 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __MPLS_TUNNEL_H__
+#define __MPLS_TUNNEL_H__
+
+#include <vnet/mpls/mpls.h>
+
+/**
+ * @brief A uni-directional MPLS tunnel
+ */
+typedef struct mpls_tunnel_t_
+{
+ /**
+ * @brief The tunnel hooks into the FIB control plane graph.
+ */
+ fib_node_t mt_node;
+
+ /**
+ * @brief If the tunnel is an L2 tunnel, this is the link type ETHERNET
+ * adjacency
+ */
+ adj_index_t mt_l2_adj;
+
+ /**
+ * @brief on a L2 tunnel this is the VLIB arc from the L2-tx to the l2-midchain
+ */
+ u32 mt_l2_tx_arc;
+
+ /**
+ * @brief The path-list over which the tunnel's destination is reachable
+ */
+ fib_node_index_t mt_path_list;
+
+ /**
+ * @brief sibling index on the path-list so notifications are received.
+ */
+ u32 mt_sibling_index;
+
+ /**
+ * @brief The Label stack to apply to egress packets
+ */
+ mpls_label_t *mt_label_stack;
+
+ /**
+ * @brief Flag to indicate the tunnel is only for L2 traffic, that is
+ * this tunnel belongs in a bridge domain.
+ */
+ u8 mt_l2_only;
+
+ /**
+ * @brief The HW interface index of the tunnel interfaces
+ */
+ u32 mt_hw_if_index;
+
+ /**
+ * @brief The SW interface index of the tunnel interfaces
+ */
+ u32 mt_sw_if_index;
+
+} mpls_tunnel_t;
+
+/**
+ * @brief Create a new MPLS tunnel
+ */
+extern void vnet_mpls_tunnel_add (fib_route_path_t *rpath,
+ mpls_label_t *label_stack,
+ u8 l2_only,
+ u32 *sw_if_index);
+
+extern void vnet_mpls_tunnel_del (u32 sw_if_index);
+
+extern const mpls_tunnel_t *mpls_tunnel_get(u32 index);
+
+/**
+ * @brief Callback function invoked while walking MPLS tunnels
+ */
+typedef void (*mpls_tunnel_walk_cb_t)(u32 index, void *ctx);
+
+/**
+ * @brief Walk all the MPLS tunnels
+ */
+extern void mpls_tunnel_walk(mpls_tunnel_walk_cb_t cb,
+ void *ctx);
+
+#endif
diff --git a/src/vnet/mpls/mpls_types.h b/src/vnet/mpls/mpls_types.h
new file mode 100644
index 00000000000..d7c629df832
--- /dev/null
+++ b/src/vnet/mpls/mpls_types.h
@@ -0,0 +1,39 @@
+#ifndef __MPLS_TYPES_H__
+#define __MPLS_TYPES_H__
+
+#define MPLS_IETF_MIN_LABEL 0x00000
+#define MPLS_IETF_MAX_LABEL 0xfffff
+
+#define MPLS_IETF_MIN_RESERVED_LABEL 0x00000
+#define MPLS_IETF_MAX_RESERVED_LABEL 0x0000f
+
+#define MPLS_IETF_MIN_UNRES_LABEL 0x00010
+#define MPLS_IETF_MAX_UNRES_LABEL 0xfffff
+
+#define MPLS_IETF_IPV4_EXPLICIT_NULL_LABEL 0x00000
+#define MPLS_IETF_ROUTER_ALERT_LABEL 0x00001
+#define MPLS_IETF_IPV6_EXPLICIT_NULL_LABEL 0x00002
+#define MPLS_IETF_IMPLICIT_NULL_LABEL 0x00003
+#define MPLS_IETF_ELI_LABEL 0x00007
+#define MPLS_IETF_GAL_LABEL 0x0000D
+
+#define MPLS_IETF_IPV4_EXPLICIT_NULL_STRING "ip4-explicit-null"
+#define MPLS_IETF_IPV4_EXPLICIT_NULL_BRIEF_STRING "e-nul"
+#define MPLS_IETF_IMPLICIT_NULL_STRING "implicit-null"
+#define MPLS_IETF_IMPLICIT_NULL_BRIEF_STRING "i-nul"
+#define MPLS_IETF_ROUTER_ALERT_STRING "router-alert"
+#define MPLS_IETF_ROUTER_ALERT_BRIEF_STRING "r-alt"
+#define MPLS_IETF_IPV6_EXPLICIT_NULL_STRING "ipv6-explicit-null"
+#define MPLS_IETF_IPV6_EXPLICIT_NULL_BRIEF_STRING "v6enl"
+#define MPLS_IETF_ELI_STRING "entropy-label-indicator"
+#define MPLS_IETF_ELI_BRIEF_STRING "eli"
+#define MPLS_IETF_GAL_STRING "gal"
+#define MPLS_IETF_GAL_BRIEF_STRING "gal"
+
+#define MPLS_LABEL_INVALID (MPLS_IETF_MAX_LABEL+1)
+
+#define MPLS_LABEL_IS_REAL(_lbl) \
+ (((_lbl) > MPLS_IETF_MIN_UNRES_LABEL) && \
+ ((_lbl) <= MPLS_IETF_MAX_UNRES_LABEL))
+
+#endif
diff --git a/src/vnet/mpls/node.c b/src/vnet/mpls/node.c
new file mode 100644
index 00000000000..1810091252e
--- /dev/null
+++ b/src/vnet/mpls/node.c
@@ -0,0 +1,303 @@
+/*
+ * node.c: MPLS input
+ *
+ * Copyright (c) 2012-2014 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vlib/vlib.h>
+#include <vnet/pg/pg.h>
+#include <vnet/mpls/mpls.h>
+#include <vnet/feature/feature.h>
+
+typedef struct {
+ u32 next_index;
+ u32 label_host_byte_order;
+} mpls_input_trace_t;
+
+static u8 *
+format_mpls_input_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ mpls_input_trace_t * t = va_arg (*args, mpls_input_trace_t *);
+ char * next_name;
+
+ next_name = "BUG!";
+
+#define _(a,b) if (t->next_index == MPLS_INPUT_NEXT_##a) next_name = b;
+ foreach_mpls_input_next;
+#undef _
+
+ s = format (s, "MPLS: next %s[%d] label %d ttl %d",
+ next_name, t->next_index,
+ vnet_mpls_uc_get_label(t->label_host_byte_order),
+ vnet_mpls_uc_get_ttl(t->label_host_byte_order));
+
+ return s;
+}
+
+vlib_node_registration_t mpls_input_node;
+
+typedef struct {
+ u32 last_label;
+ u32 last_inner_fib_index;
+ u32 last_outer_fib_index;
+ mpls_main_t * mpls_main;
+} mpls_input_runtime_t;
+
+static inline uword
+mpls_input_inline (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame)
+{
+ u32 n_left_from, next_index, * from, * to_next;
+ mpls_input_runtime_t * rt;
+ mpls_main_t * mm;
+ u32 cpu_index = os_get_cpu_number();
+ vlib_simple_counter_main_t * cm;
+ vnet_main_t * vnm = vnet_get_main();
+
+ from = vlib_frame_vector_args (from_frame);
+ n_left_from = from_frame->n_vectors;
+ rt = vlib_node_get_runtime_data (vm, mpls_input_node.index);
+ mm = rt->mpls_main;
+ /*
+ * Force an initial lookup every time, in case the control-plane
+ * changed the label->FIB mapping.
+ */
+ rt->last_label = ~0;
+
+ next_index = node->cached_next_index;
+
+ cm = vec_elt_at_index (vnm->interface_main.sw_if_counters,
+ VNET_INTERFACE_COUNTER_MPLS);
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame (vm, node, next_index,
+ to_next, n_left_to_next);
+
+ while (n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ u32 label0, bi0, next0, sw_if_index0;
+ u32 label1, bi1, next1, sw_if_index1;
+ mpls_unicast_header_t *h0, *h1;
+ vlib_buffer_t *b0, *b1;
+
+ /* Prefetch next iteration. */
+ {
+ vlib_buffer_t * p2, * p3;
+
+ p2 = vlib_get_buffer (vm, from[2]);
+ p3 = vlib_get_buffer (vm, from[3]);
+
+ vlib_prefetch_buffer_header (p2, STORE);
+ vlib_prefetch_buffer_header (p3, STORE);
+
+ CLIB_PREFETCH (p2->data, sizeof (h0[0]), STORE);
+ CLIB_PREFETCH (p3->data, sizeof (h1[0]), STORE);
+ }
+
+
+ bi0 = to_next[0] = from[0];
+ bi1 = to_next[1] = from[1];
+
+ from += 2;
+ to_next += 2;
+ n_left_from -= 2;
+ n_left_to_next -= 2;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ b1 = vlib_get_buffer (vm, bi1);
+
+ h0 = vlib_buffer_get_current (b0);
+ h1 = vlib_buffer_get_current (b1);
+
+ sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+ sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_RX];
+
+ label0 = clib_net_to_host_u32 (h0->label_exp_s_ttl);
+ label1 = clib_net_to_host_u32 (h1->label_exp_s_ttl);
+
+ /* TTL expired? */
+ if (PREDICT_FALSE(vnet_mpls_uc_get_ttl (label0) == 0))
+ {
+ next0 = MPLS_INPUT_NEXT_DROP;
+ b0->error = node->errors[MPLS_ERROR_TTL_EXPIRED];
+ }
+ else
+ {
+ next0 = MPLS_INPUT_NEXT_LOOKUP;
+ vnet_feature_arc_start(mm->input_feature_arc_index, sw_if_index0, &next0, b0);
+ vlib_increment_simple_counter (cm, cpu_index, sw_if_index0, 1);
+ }
+
+ if (PREDICT_FALSE(vnet_mpls_uc_get_ttl (label1) == 0))
+ {
+ next1 = MPLS_INPUT_NEXT_DROP;
+ b1->error = node->errors[MPLS_ERROR_TTL_EXPIRED];
+ }
+ else
+ {
+ next1 = MPLS_INPUT_NEXT_LOOKUP;
+ vnet_feature_arc_start(mm->input_feature_arc_index, sw_if_index1, &next1, b1);
+ vlib_increment_simple_counter (cm, cpu_index, sw_if_index1, 1);
+ }
+
+ if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ mpls_input_trace_t *tr = vlib_add_trace (vm, node,
+ b0, sizeof (*tr));
+ tr->next_index = next0;
+ tr->label_host_byte_order = label0;
+ }
+ if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ mpls_input_trace_t *tr = vlib_add_trace (vm, node,
+ b1, sizeof (*tr));
+ tr->next_index = next1;
+ tr->label_host_byte_order = label1;
+ }
+
+ vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, bi1, next0, next1);
+ }
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 bi0;
+ vlib_buffer_t * b0;
+ mpls_unicast_header_t * h0;
+ u32 label0;
+ u32 next0 = 0;
+ u32 sw_if_index0;
+
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ h0 = vlib_buffer_get_current (b0);
+ sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+
+ label0 = clib_net_to_host_u32 (h0->label_exp_s_ttl);
+ /* TTL expired? */
+ if (PREDICT_FALSE(vnet_mpls_uc_get_ttl (label0) == 0))
+ {
+ next0 = MPLS_INPUT_NEXT_DROP;
+ b0->error = node->errors[MPLS_ERROR_TTL_EXPIRED];
+ }
+ else
+ {
+ vnet_feature_arc_start(mm->input_feature_arc_index, sw_if_index0, &next0, b0);
+ vlib_increment_simple_counter (cm, cpu_index, sw_if_index0, 1);
+ }
+
+ if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ mpls_input_trace_t *tr = vlib_add_trace (vm, node,
+ b0, sizeof (*tr));
+ tr->next_index = next0;
+ tr->label_host_byte_order = label0;
+ }
+
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, next0);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+ vlib_node_increment_counter (vm, mpls_input_node.index,
+ MPLS_ERROR_PKTS_DECAP, from_frame->n_vectors);
+ return from_frame->n_vectors;
+}
+
+static uword
+mpls_input (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame)
+{
+ return mpls_input_inline (vm, node, from_frame);
+}
+
+static char * mpls_error_strings[] = {
+#define mpls_error(n,s) s,
+#include "error.def"
+#undef mpls_error
+};
+
+VLIB_REGISTER_NODE (mpls_input_node) = {
+ .function = mpls_input,
+ .name = "mpls-input",
+ /* Takes a vector of packets. */
+ .vector_size = sizeof (u32),
+
+ .runtime_data_bytes = sizeof(mpls_input_runtime_t),
+
+ .n_errors = MPLS_N_ERROR,
+ .error_strings = mpls_error_strings,
+
+ .n_next_nodes = MPLS_INPUT_N_NEXT,
+ .next_nodes = {
+#define _(s,n) [MPLS_INPUT_NEXT_##s] = n,
+ foreach_mpls_input_next
+#undef _
+ },
+
+ .format_buffer = format_mpls_unicast_header_net_byte_order,
+ .format_trace = format_mpls_input_trace,
+};
+
+VLIB_NODE_FUNCTION_MULTIARCH (mpls_input_node, mpls_input)
+
+static void
+mpls_setup_nodes (vlib_main_t * vm)
+{
+ mpls_input_runtime_t * rt;
+ pg_node_t * pn;
+
+ pn = pg_get_node (mpls_input_node.index);
+ pn->unformat_edit = unformat_pg_mpls_header;
+
+ rt = vlib_node_get_runtime_data (vm, mpls_input_node.index);
+ rt->last_label = (u32) ~0;
+ rt->last_inner_fib_index = 0;
+ rt->last_outer_fib_index = 0;
+ rt->mpls_main = &mpls_main;
+
+ ethernet_register_input_type (vm, ETHERNET_TYPE_MPLS_UNICAST,
+ mpls_input_node.index);
+}
+
+static clib_error_t * mpls_input_init (vlib_main_t * vm)
+{
+ clib_error_t * error;
+
+ error = vlib_call_init_function (vm, mpls_init);
+ if (error)
+ clib_error_report (error);
+
+ mpls_setup_nodes (vm);
+
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (mpls_input_init);
diff --git a/src/vnet/mpls/packet.h b/src/vnet/mpls/packet.h
new file mode 100644
index 00000000000..bc67445be89
--- /dev/null
+++ b/src/vnet/mpls/packet.h
@@ -0,0 +1,125 @@
+#ifndef included_vnet_mpls_packet_h
+#define included_vnet_mpls_packet_h
+
+/*
+ * MPLS packet format
+ *
+ * Copyright (c) 2012 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * A label value only, i.e. 20bits.
+ */
+typedef u32 mpls_label_t;
+
+typedef struct {
+ /* Label: top 20 bits [in network byte order] */
+ /* Experimental: 3 bits ... */
+ /* S (bottom of label stack): 1 bit */
+ /* TTL: 8 bits */
+ mpls_label_t label_exp_s_ttl;
+} mpls_unicast_header_t;
+
+typedef enum mpls_eos_bit_t_
+{
+ MPLS_NON_EOS = 0,
+ MPLS_EOS = 1,
+} mpls_eos_bit_t;
+
+#define MPLS_EOS_BITS { \
+ [MPLS_NON_EOS] = "neos", \
+ [MPLS_EOS] = "eos", \
+}
+
+#define FOR_EACH_MPLS_EOS_BIT(_eos) \
+ for (_eos = MPLS_NON_EOS; _eos <= MPLS_EOS; _eos++)
+
+#define MPLS_ENTRY_LABEL_OFFSET 0
+#define MPLS_ENTRY_LABEL_SHIFT 12
+#define MPLS_ENTRY_LABEL_MASK 0x000fffff
+#define MPLS_ENTRY_LABEL_BITS \
+ (MPLS_ENTRY_LABEL_MASK << MPLS_ENTRY_LABEL_SHIFT)
+
+#define MPLS_ENTRY_EXP_OFFSET 2 /* byte offset to EXP bits */
+#define MPLS_ENTRY_EXP_SHIFT 9
+#define MPLS_ENTRY_EXP_MASK 0x07
+#define MPLS_ENTRY_EXP(mpls) \
+ (((mpls)>>MPLS_ENTRY_EXP_SHIFT) & MPLS_ENTRY_EXP_MASK)
+#define MPLS_ENTRY_EXP_BITS \
+ (MPLS_ENTRY_EXP_MASK << MPLS_ENTRY_EXP_SHIFT)
+
+#define MPLS_ENTRY_EOS_OFFSET 2 /* byte offset to EOS bit */
+#define MPLS_ENTRY_EOS_SHIFT 8
+#define MPLS_ENTRY_EOS_MASK 0x01 /* EOS bit in its byte */
+#define MPLS_ENTRY_EOS(mpls) \
+ (((mpls) >> MPLS_ENTRY_EOS_SHIFT) & MPLS_ENTRY_EOS_MASK)
+#define MPLS_ENTRY_EOS_BIT (MPLS_ENTRY_EOS_MASK << MPLS_ENTRY_EOS_SHIFT)
+
+#define MPLS_ENTRY_TTL_OFFSET 3 /* byte offset to ttl field */
+#define MPLS_ENTRY_TTL_SHIFT 0
+#define MPLS_ENTRY_TTL_MASK 0xff
+#define MPLS_ENTRY_TTL(mpls) \
+ (((mpls) >> MPLS_ENTRY_TTL_SHIFT) & MPLS_ENTRY_TTL_MASK)
+#define MPLS_ENTRY_TTL_BITS \
+ (MPLS_ENTRY_TTL_MASK << MPLS_ENTRY_TTL_SHIFT)
+
+static inline u32 vnet_mpls_uc_get_label (mpls_label_t label_exp_s_ttl)
+{
+ return (label_exp_s_ttl>>MPLS_ENTRY_LABEL_SHIFT);
+}
+
+static inline u32 vnet_mpls_uc_get_exp (mpls_label_t label_exp_s_ttl)
+{
+ return (MPLS_ENTRY_EXP(label_exp_s_ttl));
+}
+
+static inline u32 vnet_mpls_uc_get_s (mpls_label_t label_exp_s_ttl)
+{
+ return (MPLS_ENTRY_EOS(label_exp_s_ttl));
+}
+
+static inline u32 vnet_mpls_uc_get_ttl (mpls_label_t label_exp_s_ttl)
+{
+ return (MPLS_ENTRY_TTL(label_exp_s_ttl));
+}
+
+static inline void vnet_mpls_uc_set_label (mpls_label_t *label_exp_s_ttl,
+ u32 value)
+{
+ *label_exp_s_ttl = (((*label_exp_s_ttl) & ~(MPLS_ENTRY_LABEL_BITS)) |
+ ((value & MPLS_ENTRY_LABEL_MASK) << MPLS_ENTRY_LABEL_SHIFT));
+}
+
+static inline void vnet_mpls_uc_set_exp (mpls_label_t *label_exp_s_ttl,
+ u32 exp)
+{
+ *label_exp_s_ttl = (((*label_exp_s_ttl) & ~(MPLS_ENTRY_EXP_BITS)) |
+ ((exp & MPLS_ENTRY_EXP_MASK) << MPLS_ENTRY_EXP_SHIFT));
+}
+
+static inline void vnet_mpls_uc_set_s (mpls_label_t *label_exp_s_ttl,
+ u32 eos)
+{
+ *label_exp_s_ttl = (((*label_exp_s_ttl) & ~(MPLS_ENTRY_EOS_BIT)) |
+ ((eos & MPLS_ENTRY_EOS_MASK) << MPLS_ENTRY_EOS_SHIFT));
+}
+
+static inline void vnet_mpls_uc_set_ttl (mpls_label_t *label_exp_s_ttl,
+ u32 ttl)
+{
+ *label_exp_s_ttl = (((*label_exp_s_ttl) & ~(MPLS_ENTRY_TTL_BITS)) |
+ ((ttl & MPLS_ENTRY_TTL_MASK)));
+}
+
+#endif /* included_vnet_mpls_packet_h */
diff --git a/src/vnet/mpls/pg.c b/src/vnet/mpls/pg.c
new file mode 100644
index 00000000000..6ff86e32f67
--- /dev/null
+++ b/src/vnet/mpls/pg.c
@@ -0,0 +1,71 @@
+/*
+ * pg.c: packet generator mpls interface
+ *
+ * Copyright (c) 2012 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vlib/vlib.h>
+#include <vnet/pg/pg.h>
+#include <vnet/gre/gre.h>
+#include <vnet/mpls/mpls.h>
+
+typedef struct {
+ pg_edit_t label;
+} pg_mpls_header_t;
+
+static inline void
+pg_mpls_header_init (pg_mpls_header_t * e)
+{
+ pg_edit_init (&e->label, mpls_unicast_header_t, label_exp_s_ttl);
+}
+
+uword
+unformat_pg_mpls_header (unformat_input_t * input, va_list * args)
+{
+ pg_stream_t * s = va_arg (*args, pg_stream_t *);
+ pg_mpls_header_t * h;
+ vlib_main_t * vm = vlib_get_main();
+ u32 group_index, error;
+
+ h = pg_create_edit_group (s, sizeof (h[0]), sizeof (mpls_unicast_header_t),
+ &group_index);
+ pg_mpls_header_init (h);
+
+ error = 1;
+ if (! unformat (input, "%U",
+ unformat_pg_edit,
+ unformat_mpls_label_net_byte_order, &h->label))
+ goto done;
+
+ {
+ pg_node_t * pg_node = 0;
+ vlib_node_t * ip_lookup_node;
+
+ ip_lookup_node = vlib_get_node_by_name (vm, (u8 *)"ip4-input");
+ ASSERT (ip_lookup_node);
+
+ pg_node = pg_get_node (ip_lookup_node->index);
+
+ if (pg_node && pg_node->unformat_edit
+ && unformat_user (input, pg_node->unformat_edit, s))
+ ;
+ }
+
+ error = 0;
+ done:
+ if (error)
+ pg_free_edit_group (s);
+ return error == 0;
+}
+
diff --git a/src/vnet/osi/node.c b/src/vnet/osi/node.c
new file mode 100644
index 00000000000..12075aa87d1
--- /dev/null
+++ b/src/vnet/osi/node.c
@@ -0,0 +1,326 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * osi_node.c: osi packet processing
+ *
+ * Copyright (c) 2010 Eliot Dresselhaus
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <vlib/vlib.h>
+#include <vnet/pg/pg.h>
+#include <vnet/osi/osi.h>
+#include <vnet/ppp/ppp.h>
+#include <vnet/hdlc/hdlc.h>
+#include <vnet/llc/llc.h>
+
+#define foreach_osi_input_next \
+ _ (PUNT, "error-punt") \
+ _ (DROP, "error-drop")
+
+typedef enum
+{
+#define _(s,n) OSI_INPUT_NEXT_##s,
+ foreach_osi_input_next
+#undef _
+ OSI_INPUT_N_NEXT,
+} osi_input_next_t;
+
+typedef struct
+{
+ u8 packet_data[32];
+} osi_input_trace_t;
+
+static u8 *
+format_osi_input_trace (u8 * s, va_list * va)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*va, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*va, vlib_node_t *);
+ osi_input_trace_t *t = va_arg (*va, osi_input_trace_t *);
+
+ s = format (s, "%U", format_osi_header, t->packet_data);
+
+ return s;
+}
+
+static uword
+osi_input (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * from_frame)
+{
+ osi_main_t *lm = &osi_main;
+ u32 n_left_from, next_index, *from, *to_next;
+
+ from = vlib_frame_vector_args (from_frame);
+ n_left_from = from_frame->n_vectors;
+
+ if (node->flags & VLIB_NODE_FLAG_TRACE)
+ vlib_trace_frame_buffers_only (vm, node,
+ from,
+ n_left_from,
+ sizeof (from[0]),
+ sizeof (osi_input_trace_t));
+
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ u32 bi0, bi1;
+ vlib_buffer_t *b0, *b1;
+ osi_header_t *h0, *h1;
+ u8 next0, next1, enqueue_code;
+
+ /* Prefetch next iteration. */
+ {
+ vlib_buffer_t *b2, *b3;
+
+ b2 = vlib_get_buffer (vm, from[2]);
+ b3 = vlib_get_buffer (vm, from[3]);
+
+ vlib_prefetch_buffer_header (b2, LOAD);
+ vlib_prefetch_buffer_header (b3, LOAD);
+
+ CLIB_PREFETCH (b2->data, sizeof (h0[0]), LOAD);
+ CLIB_PREFETCH (b3->data, sizeof (h1[0]), LOAD);
+ }
+
+ bi0 = from[0];
+ bi1 = from[1];
+ to_next[0] = bi0;
+ to_next[1] = bi1;
+ from += 2;
+ to_next += 2;
+ n_left_to_next -= 2;
+ n_left_from -= 2;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ b1 = vlib_get_buffer (vm, bi1);
+
+ h0 = (void *) (b0->data + b0->current_data);
+ h1 = (void *) (b1->data + b1->current_data);
+
+ next0 = lm->input_next_by_protocol[h0->protocol];
+ next1 = lm->input_next_by_protocol[h1->protocol];
+
+ b0->error =
+ node->errors[next0 ==
+ OSI_INPUT_NEXT_DROP ? OSI_ERROR_UNKNOWN_PROTOCOL :
+ OSI_ERROR_NONE];
+ b1->error =
+ node->errors[next1 ==
+ OSI_INPUT_NEXT_DROP ? OSI_ERROR_UNKNOWN_PROTOCOL :
+ OSI_ERROR_NONE];
+
+ enqueue_code = (next0 != next_index) + 2 * (next1 != next_index);
+
+ if (PREDICT_FALSE (enqueue_code != 0))
+ {
+ switch (enqueue_code)
+ {
+ case 1:
+ /* A B A */
+ to_next[-2] = bi1;
+ to_next -= 1;
+ n_left_to_next += 1;
+ vlib_set_next_frame_buffer (vm, node, next0, bi0);
+ break;
+
+ case 2:
+ /* A A B */
+ to_next -= 1;
+ n_left_to_next += 1;
+ vlib_set_next_frame_buffer (vm, node, next1, bi1);
+ break;
+
+ case 3:
+ /* A B B or A B C */
+ to_next -= 2;
+ n_left_to_next += 2;
+ vlib_set_next_frame_buffer (vm, node, next0, bi0);
+ vlib_set_next_frame_buffer (vm, node, next1, bi1);
+ if (next0 == next1)
+ {
+ vlib_put_next_frame (vm, node, next_index,
+ n_left_to_next);
+ next_index = next1;
+ vlib_get_next_frame (vm, node, next_index, to_next,
+ n_left_to_next);
+ }
+ }
+ }
+ }
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 bi0;
+ vlib_buffer_t *b0;
+ osi_header_t *h0;
+ u8 next0;
+
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+
+ h0 = (void *) (b0->data + b0->current_data);
+
+ next0 = lm->input_next_by_protocol[h0->protocol];
+
+ b0->error =
+ node->errors[next0 ==
+ OSI_INPUT_NEXT_DROP ? OSI_ERROR_UNKNOWN_PROTOCOL :
+ OSI_ERROR_NONE];
+
+ /* Sent packet to wrong next? */
+ if (PREDICT_FALSE (next0 != next_index))
+ {
+ /* Return old frame; remove incorrectly enqueued packet. */
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next + 1);
+
+ /* Send to correct next. */
+ next_index = next0;
+ vlib_get_next_frame (vm, node, next_index, to_next,
+ n_left_to_next);
+
+ to_next[0] = bi0;
+ to_next += 1;
+ n_left_to_next -= 1;
+ }
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ return from_frame->n_vectors;
+}
+
+static char *osi_error_strings[] = {
+#define _(f,s) s,
+ foreach_osi_error
+#undef _
+};
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (osi_input_node) = {
+ .function = osi_input,
+ .name = "osi-input",
+ /* Takes a vector of packets. */
+ .vector_size = sizeof (u32),
+
+ .n_errors = OSI_N_ERROR,
+ .error_strings = osi_error_strings,
+
+ .n_next_nodes = OSI_INPUT_N_NEXT,
+ .next_nodes = {
+#define _(s,n) [OSI_INPUT_NEXT_##s] = n,
+ foreach_osi_input_next
+#undef _
+ },
+
+ .format_buffer = format_osi_header_with_length,
+ .format_trace = format_osi_input_trace,
+ .unformat_buffer = unformat_osi_header,
+};
+/* *INDENT-ON* */
+
+static clib_error_t *
+osi_input_init (vlib_main_t * vm)
+{
+ clib_error_t *error = 0;
+ osi_main_t *lm = &osi_main;
+
+ if ((error = vlib_call_init_function (vm, osi_init)))
+ return error;
+
+ osi_setup_node (vm, osi_input_node.index);
+
+ {
+ int i;
+ for (i = 0; i < ARRAY_LEN (lm->input_next_by_protocol); i++)
+ lm->input_next_by_protocol[i] = OSI_INPUT_NEXT_DROP;
+ }
+
+ ppp_register_input_protocol (vm, PPP_PROTOCOL_osi, osi_input_node.index);
+ hdlc_register_input_protocol (vm, HDLC_PROTOCOL_osi, osi_input_node.index);
+ llc_register_input_protocol (vm, LLC_PROTOCOL_osi_layer1,
+ osi_input_node.index);
+ llc_register_input_protocol (vm, LLC_PROTOCOL_osi_layer2,
+ osi_input_node.index);
+ llc_register_input_protocol (vm, LLC_PROTOCOL_osi_layer3,
+ osi_input_node.index);
+ llc_register_input_protocol (vm, LLC_PROTOCOL_osi_layer4,
+ osi_input_node.index);
+ llc_register_input_protocol (vm, LLC_PROTOCOL_osi_layer5,
+ osi_input_node.index);
+
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (osi_input_init);
+
+void
+osi_register_input_protocol (osi_protocol_t protocol, u32 node_index)
+{
+ osi_main_t *lm = &osi_main;
+ vlib_main_t *vm = lm->vlib_main;
+ osi_protocol_info_t *pi;
+
+ {
+ clib_error_t *error = vlib_call_init_function (vm, osi_input_init);
+ if (error)
+ clib_error_report (error);
+ }
+
+ pi = osi_get_protocol_info (lm, protocol);
+ pi->node_index = node_index;
+ pi->next_index = vlib_node_add_next (vm, osi_input_node.index, node_index);
+
+ lm->input_next_by_protocol[protocol] = pi->next_index;
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/osi/osi.c b/src/vnet/osi/osi.c
new file mode 100644
index 00000000000..34c867f1bf5
--- /dev/null
+++ b/src/vnet/osi/osi.c
@@ -0,0 +1,201 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * osi.c: osi support
+ *
+ * Copyright (c) 2010 Eliot Dresselhaus
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <vnet/vnet.h>
+#include <vnet/osi/osi.h>
+
+/* Global main structure. */
+osi_main_t osi_main;
+
+u8 *
+format_osi_protocol (u8 * s, va_list * args)
+{
+ osi_protocol_t p = va_arg (*args, u32);
+ osi_main_t *pm = &osi_main;
+ osi_protocol_info_t *pi = osi_get_protocol_info (pm, p);
+
+ if (pi)
+ s = format (s, "%s", pi->name);
+ else
+ s = format (s, "0x%02x", p);
+
+ return s;
+}
+
+u8 *
+format_osi_header_with_length (u8 * s, va_list * args)
+{
+ osi_main_t *pm = &osi_main;
+ osi_header_t *h = va_arg (*args, osi_header_t *);
+ u32 max_header_bytes = va_arg (*args, u32);
+ osi_protocol_t p = h->protocol;
+ uword indent, header_bytes;
+
+ header_bytes = sizeof (h[0]);
+ if (max_header_bytes != 0 && header_bytes > max_header_bytes)
+ return format (s, "osi header truncated");
+
+ indent = format_get_indent (s);
+
+ s = format (s, "OSI %U", format_osi_protocol, p);
+
+ if (max_header_bytes != 0 && header_bytes > max_header_bytes)
+ {
+ osi_protocol_info_t *pi = osi_get_protocol_info (pm, p);
+ vlib_node_t *node = vlib_get_node (pm->vlib_main, pi->node_index);
+ if (node->format_buffer)
+ s = format (s, "\n%U%U",
+ format_white_space, indent,
+ node->format_buffer, (void *) (h + 1),
+ max_header_bytes - header_bytes);
+ }
+
+ return s;
+}
+
+u8 *
+format_osi_header (u8 * s, va_list * args)
+{
+ osi_header_t *h = va_arg (*args, osi_header_t *);
+ return format (s, "%U", format_osi_header_with_length, h, 0);
+}
+
+/* Returns osi protocol as an int in host byte order. */
+uword
+unformat_osi_protocol (unformat_input_t * input, va_list * args)
+{
+ u8 *result = va_arg (*args, u8 *);
+ osi_main_t *pm = &osi_main;
+ int p, i;
+
+ /* Numeric type. */
+ if (unformat (input, "0x%x", &p) || unformat (input, "%d", &p))
+ {
+ if (p >= (1 << 8))
+ return 0;
+ *result = p;
+ return 1;
+ }
+
+ /* Named type. */
+ if (unformat_user (input, unformat_vlib_number_by_name,
+ pm->protocol_info_by_name, &i))
+ {
+ osi_protocol_info_t *pi = vec_elt_at_index (pm->protocol_infos, i);
+ *result = pi->protocol;
+ return 1;
+ }
+
+ return 0;
+}
+
+uword
+unformat_osi_header (unformat_input_t * input, va_list * args)
+{
+ u8 **result = va_arg (*args, u8 **);
+ osi_header_t _h, *h = &_h;
+ u8 p;
+
+ if (!unformat (input, "%U", unformat_osi_protocol, &p))
+ return 0;
+
+ h->protocol = p;
+
+ /* Add header to result. */
+ {
+ void *p;
+ u32 n_bytes = sizeof (h[0]);
+
+ vec_add2 (*result, p, n_bytes);
+ clib_memcpy (p, h, n_bytes);
+ }
+
+ return 1;
+}
+
+static void
+add_protocol (osi_main_t * pm, osi_protocol_t protocol, char *protocol_name)
+{
+ osi_protocol_info_t *pi;
+ u32 i;
+
+ vec_add2 (pm->protocol_infos, pi, 1);
+ i = pi - pm->protocol_infos;
+
+ pi->name = protocol_name;
+ pi->protocol = protocol;
+ pi->next_index = pi->node_index = ~0;
+
+ hash_set (pm->protocol_info_by_protocol, protocol, i);
+ hash_set_mem (pm->protocol_info_by_name, pi->name, i);
+}
+
+static clib_error_t *
+osi_init (vlib_main_t * vm)
+{
+ clib_error_t *error = 0;
+ osi_main_t *pm = &osi_main;
+
+ /* init order dependency: llc_init -> osi_init */
+ if ((error = vlib_call_init_function (vm, llc_init)))
+ return error;
+
+ memset (pm, 0, sizeof (pm[0]));
+ pm->vlib_main = vm;
+
+ pm->protocol_info_by_name = hash_create_string (0, sizeof (uword));
+ pm->protocol_info_by_protocol = hash_create (0, sizeof (uword));
+
+#define _(f,n) add_protocol (pm, OSI_PROTOCOL_##f, #f);
+ foreach_osi_protocol;
+#undef _
+
+ return vlib_call_init_function (vm, osi_input_init);
+}
+
+VLIB_INIT_FUNCTION (osi_init);
+
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/osi/osi.h b/src/vnet/osi/osi.h
new file mode 100644
index 00000000000..ee21f0c3d68
--- /dev/null
+++ b/src/vnet/osi/osi.h
@@ -0,0 +1,171 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * osi.h: OSI definitions
+ *
+ * Copyright (c) 2008 Eliot Dresselhaus
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef included_osi_h
+#define included_osi_h
+
+#include <vnet/vnet.h>
+#include <vnet/pg/pg.h>
+
+#define foreach_osi_protocol \
+ _ (null, 0x0) \
+ _ (x_29, 0x01) \
+ _ (x_633, 0x03) \
+ _ (q_931, 0x08) \
+ _ (q_933, 0x08) \
+ _ (q_2931, 0x09) \
+ _ (q_2119, 0x0c) \
+ _ (snap, 0x80) \
+ _ (clnp, 0x81) \
+ _ (esis, 0x82) \
+ _ (isis, 0x83) \
+ _ (idrp, 0x85) \
+ _ (x25_esis, 0x8a) \
+ _ (iso10030, 0x8c) \
+ _ (iso11577, 0x8d) \
+ _ (ip6, 0x8e) \
+ _ (compressed, 0xb0) \
+ _ (sndcf, 0xc1) \
+ _ (ip4, 0xcc) \
+ _ (ppp, 0xcf)
+
+typedef enum
+{
+#define _(f,n) OSI_PROTOCOL_##f = n,
+ foreach_osi_protocol
+#undef _
+} osi_protocol_t;
+
+typedef struct
+{
+ u8 protocol;
+
+ u8 payload[0];
+} osi_header_t;
+
+typedef struct
+{
+ /* Name (a c string). */
+ char *name;
+
+ /* OSI protocol (SAP type). */
+ osi_protocol_t protocol;
+
+ /* Node which handles this type. */
+ u32 node_index;
+
+ /* Next index for this type. */
+ u32 next_index;
+} osi_protocol_info_t;
+
+#define foreach_osi_error \
+ _ (NONE, "no error") \
+ _ (UNKNOWN_PROTOCOL, "unknown osi protocol")
+
+typedef enum
+{
+#define _(f,s) OSI_ERROR_##f,
+ foreach_osi_error
+#undef _
+ OSI_N_ERROR,
+} osi_error_t;
+
+typedef struct
+{
+ vlib_main_t *vlib_main;
+
+ osi_protocol_info_t *protocol_infos;
+
+ /* Hash tables mapping name/protocol to protocol info index. */
+ uword *protocol_info_by_name, *protocol_info_by_protocol;
+
+ /* osi-input next index indexed by protocol. */
+ u8 input_next_by_protocol[256];
+} osi_main_t;
+
+always_inline osi_protocol_info_t *
+osi_get_protocol_info (osi_main_t * m, osi_protocol_t protocol)
+{
+ uword *p = hash_get (m->protocol_info_by_protocol, protocol);
+ return p ? vec_elt_at_index (m->protocol_infos, p[0]) : 0;
+}
+
+extern osi_main_t osi_main;
+
+/* Register given node index to take input for given osi type. */
+void osi_register_input_protocol (osi_protocol_t protocol, u32 node_index);
+
+void osi_set_adjacency (vnet_rewrite_header_t * rw,
+ uword max_data_bytes, osi_protocol_t protocol);
+
+format_function_t format_osi_protocol;
+format_function_t format_osi_header;
+format_function_t format_osi_header_with_length;
+
+/* Parse osi protocol as 0xXXXX or protocol name. */
+unformat_function_t unformat_osi_protocol;
+
+/* Parse osi header. */
+unformat_function_t unformat_osi_header;
+unformat_function_t unformat_pg_osi_header;
+
+always_inline void
+osi_setup_node (vlib_main_t * vm, u32 node_index)
+{
+ vlib_node_t *n = vlib_get_node (vm, node_index);
+ pg_node_t *pn = pg_get_node (node_index);
+
+ n->format_buffer = format_osi_header_with_length;
+ n->unformat_buffer = unformat_osi_header;
+ pn->unformat_edit = unformat_pg_osi_header;
+}
+
+void osi_register_input_protocol (osi_protocol_t protocol, u32 node_index);
+
+format_function_t format_osi_header;
+
+#endif /* included_osi_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/osi/pg.c b/src/vnet/osi/pg.c
new file mode 100644
index 00000000000..c87a869b28d
--- /dev/null
+++ b/src/vnet/osi/pg.c
@@ -0,0 +1,106 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * osi_pg.c: packet generator osi interface
+ *
+ * Copyright (c) 2008 Eliot Dresselhaus
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <vlib/vlib.h>
+#include <vnet/pg/pg.h>
+#include <vnet/osi/osi.h>
+
+typedef struct
+{
+ pg_edit_t protocol;
+} pg_osi_header_t;
+
+static inline void
+pg_osi_header_init (pg_osi_header_t * e)
+{
+ pg_edit_init (&e->protocol, osi_header_t, protocol);
+}
+
+uword
+unformat_pg_osi_header (unformat_input_t * input, va_list * args)
+{
+ pg_stream_t *s = va_arg (*args, pg_stream_t *);
+ pg_osi_header_t *h;
+ u32 group_index, error;
+
+ h = pg_create_edit_group (s, sizeof (h[0]), sizeof (osi_header_t),
+ &group_index);
+ pg_osi_header_init (h);
+
+ error = 1;
+ if (!unformat (input, "%U",
+ unformat_pg_edit, unformat_osi_protocol, &h->protocol))
+ goto done;
+
+ {
+ osi_main_t *pm = &osi_main;
+ osi_protocol_info_t *pi = 0;
+ pg_node_t *pg_node = 0;
+
+ if (h->protocol.type == PG_EDIT_FIXED)
+ {
+ u8 t = *h->protocol.values[PG_EDIT_LO];
+ pi = osi_get_protocol_info (pm, t);
+ if (pi && pi->node_index != ~0)
+ pg_node = pg_get_node (pi->node_index);
+ }
+
+ if (pg_node && pg_node->unformat_edit
+ && unformat_user (input, pg_node->unformat_edit, s))
+ ;
+
+ else if (!unformat_user (input, unformat_pg_payload, s))
+ goto done;
+ }
+
+ error = 0;
+done:
+ if (error)
+ pg_free_edit_group (s);
+ return error == 0;
+}
+
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/pg/cli.c b/src/vnet/pg/cli.c
new file mode 100644
index 00000000000..f5896b4326e
--- /dev/null
+++ b/src/vnet/pg/cli.c
@@ -0,0 +1,636 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * pg_cli.c: packet generator cli
+ *
+ * Copyright (c) 2008 Eliot Dresselhaus
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <sys/stat.h>
+
+#include <vnet/vnet.h>
+#include <vnet/pg/pg.h>
+
+#ifdef CLIB_UNIX
+#include <vnet/unix/pcap.h>
+#endif
+
+/* Root of all packet generator cli commands. */
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (vlib_cli_pg_command, static) = {
+ .path = "packet-generator",
+ .short_help = "Packet generator commands",
+};
+/* *INDENT-ON* */
+
+void
+pg_enable_disable (u32 stream_index, int is_enable)
+{
+ pg_main_t *pg = &pg_main;
+ pg_stream_t *s;
+
+ if (stream_index == ~0)
+ {
+ /* No stream specified: enable/disable all streams. */
+ /* *INDENT-OFF* */
+ pool_foreach (s, pg->streams, ({
+ pg_stream_enable_disable (pg, s, is_enable);
+ }));
+ /* *INDENT-ON* */
+ }
+ else
+ {
+ /* enable/disable specified stream. */
+ s = pool_elt_at_index (pg->streams, stream_index);
+ pg_stream_enable_disable (pg, s, is_enable);
+ }
+}
+
+clib_error_t *
+pg_capture (pg_capture_args_t * a)
+{
+ pg_main_t *pg = &pg_main;
+ pg_interface_t *pi;
+
+ if (a->is_enabled == 1)
+ {
+ struct stat sb;
+ if (stat ((char *) a->pcap_file_name, &sb) != -1)
+ return clib_error_return (0, "Cannot create pcap file");
+ }
+
+ pi = pool_elt_at_index (pg->interfaces, a->dev_instance);
+ vec_free (pi->pcap_file_name);
+ memset (&pi->pcap_main, 0, sizeof (pi->pcap_main));
+
+ if (a->is_enabled == 0)
+ return 0;
+
+ pi->pcap_file_name = a->pcap_file_name;
+ pi->pcap_main.file_name = (char *) pi->pcap_file_name;
+ pi->pcap_main.n_packets_to_capture = a->count;
+ pi->pcap_main.packet_type = PCAP_PACKET_TYPE_ethernet;
+
+ return 0;
+}
+
+static clib_error_t *
+enable_disable_stream (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ pg_main_t *pg = &pg_main;
+ int is_enable = cmd->function_arg != 0;
+ u32 stream_index = ~0;
+
+ if (unformat (input, "%U", unformat_eof))
+ ;
+ else if (unformat (input, "%U", unformat_hash_vec_string,
+ pg->stream_index_by_name, &stream_index))
+ ;
+ else
+ return clib_error_create ("unknown input `%U'",
+ format_unformat_error, input);
+
+ pg_enable_disable (stream_index, is_enable);
+
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (enable_streams_cli, static) = {
+ .path = "packet-generator enable-stream",
+ .short_help = "Enable packet generator streams",
+ .function = enable_disable_stream,
+ .function_arg = 1, /* is_enable */
+};
+/* *INDENT-ON* */
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (disable_streams_cli, static) = {
+ .path = "packet-generator disable-stream",
+ .short_help = "Disable packet generator streams",
+ .function = enable_disable_stream,
+ .function_arg = 0, /* is_enable */
+};
+/* *INDENT-ON* */
+
+static u8 *
+format_pg_stream (u8 * s, va_list * va)
+{
+ pg_stream_t *t = va_arg (*va, pg_stream_t *);
+ u8 *v;
+
+ if (!t)
+ return format (s, "%=16s%=12s%=16s%s",
+ "Name", "Enabled", "Count", "Parameters");
+
+ s = format (s, "%-16v%=12s%16Ld",
+ t->name,
+ pg_stream_is_enabled (t) ? "Yes" : "No",
+ t->n_packets_generated);
+
+ v = 0;
+
+ v = format (v, "limit %Ld, ", t->n_packets_limit);
+
+ v = format (v, "rate %.2e pps, ", t->rate_packets_per_second);
+
+ v = format (v, "size %d%c%d, ",
+ t->min_packet_bytes,
+ t->packet_size_edit_type == PG_EDIT_RANDOM ? '+' : '-',
+ t->max_packet_bytes);
+
+ v = format (v, "buffer-size %d, ", t->buffer_bytes);
+
+ v = format (v, "worker %d, ", t->worker_index);
+
+ if (v)
+ {
+ s = format (s, " %v", v);
+ vec_free (v);
+ }
+
+ return s;
+}
+
+static clib_error_t *
+show_streams (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ pg_main_t *pg = &pg_main;
+ pg_stream_t *s;
+
+ if (pool_elts (pg->streams) == 0)
+ {
+ vlib_cli_output (vm, "no streams currently defined");
+ goto done;
+ }
+
+ vlib_cli_output (vm, "%U", format_pg_stream, 0);
+ /* *INDENT-OFF* */
+ pool_foreach (s, pg->streams, ({
+ vlib_cli_output (vm, "%U", format_pg_stream, s);
+ }));
+ /* *INDENT-ON* */
+
+done:
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (show_streams_cli, static) = {
+ .path = "show packet-generator",
+ .short_help = "Show packet generator streams",
+ .function = show_streams,
+};
+/* *INDENT-ON* */
+
+static clib_error_t *
+pg_pcap_read (pg_stream_t * s, char *file_name)
+{
+#ifndef CLIB_UNIX
+ return clib_error_return (0, "no pcap support");
+#else
+ pcap_main_t pm;
+ clib_error_t *error;
+ memset (&pm, 0, sizeof (pm));
+ pm.file_name = file_name;
+ error = pcap_read (&pm);
+ s->replay_packet_templates = pm.packets_read;
+ s->min_packet_bytes = pm.min_packet_bytes;
+ s->max_packet_bytes = pm.max_packet_bytes;
+ s->buffer_bytes = pm.max_packet_bytes;
+ /* For PCAP buffers we never re-use buffers. */
+ s->flags |= PG_STREAM_FLAGS_DISABLE_BUFFER_RECYCLE;
+
+ if (s->n_packets_limit == 0)
+ s->n_packets_limit = vec_len (pm.packets_read);
+
+ return error;
+#endif /* CLIB_UNIX */
+}
+
+static uword
+unformat_pg_stream_parameter (unformat_input_t * input, va_list * args)
+{
+ pg_stream_t *s = va_arg (*args, pg_stream_t *);
+ f64 x;
+
+ if (unformat (input, "limit %f", &x))
+ s->n_packets_limit = x;
+
+ else if (unformat (input, "rate %f", &x))
+ s->rate_packets_per_second = x;
+
+ else if (unformat (input, "size %d-%d", &s->min_packet_bytes,
+ &s->max_packet_bytes))
+ s->packet_size_edit_type = PG_EDIT_INCREMENT;
+
+ else if (unformat (input, "size %d+%d", &s->min_packet_bytes,
+ &s->max_packet_bytes))
+ s->packet_size_edit_type = PG_EDIT_RANDOM;
+
+ else if (unformat (input, "buffer-size %d", &s->buffer_bytes))
+ ;
+
+ else
+ return 0;
+
+ return 1;
+}
+
+static clib_error_t *
+validate_stream (pg_stream_t * s)
+{
+ if (s->max_packet_bytes < s->min_packet_bytes)
+ return clib_error_create ("max-size < min-size");
+
+ if (s->buffer_bytes >= 4096 || s->buffer_bytes == 0)
+ return
+ clib_error_create ("buffer-size must be positive and < 4096, given %d",
+ s->buffer_bytes);
+
+ if (s->rate_packets_per_second < 0)
+ return clib_error_create ("negative rate");
+
+ return 0;
+}
+
+static clib_error_t *
+new_stream (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ clib_error_t *error = 0;
+ u8 *tmp = 0;
+ u32 hw_if_index;
+ unformat_input_t sub_input = { 0 };
+ int sub_input_given = 0;
+ vnet_main_t *vnm = vnet_get_main ();
+ pg_main_t *pg = &pg_main;
+ pg_stream_t s = { 0 };
+ char *pcap_file_name;
+
+ s.sw_if_index[VLIB_RX] = s.sw_if_index[VLIB_TX] = ~0;
+ s.node_index = ~0;
+ s.max_packet_bytes = s.min_packet_bytes = 64;
+ s.buffer_bytes = VLIB_BUFFER_DEFAULT_FREE_LIST_BYTES;
+ s.if_id = 0;
+ pcap_file_name = 0;
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "name %v", &tmp))
+ {
+ if (s.name)
+ vec_free (s.name);
+ s.name = tmp;
+ }
+
+ else if (unformat (input, "node %U",
+ unformat_vnet_hw_interface, vnm, &hw_if_index))
+ {
+ vnet_hw_interface_t *hi = vnet_get_hw_interface (vnm, hw_if_index);
+
+ s.node_index = hi->output_node_index;
+ s.sw_if_index[VLIB_TX] = hi->sw_if_index;
+ }
+
+ else if (unformat (input, "source pg%u", &s.if_id))
+ ;
+
+ else if (unformat (input, "node %U",
+ unformat_vlib_node, vm, &s.node_index))
+ ;
+
+ else if (unformat (input, "worker %u", &s.worker_index))
+ ;
+
+ else if (unformat (input, "interface %U",
+ unformat_vnet_sw_interface, vnm,
+ &s.sw_if_index[VLIB_RX]))
+ ;
+
+ else if (unformat (input, "pcap %s", &pcap_file_name))
+ ;
+
+ else if (!sub_input_given
+ && unformat (input, "data %U", unformat_input, &sub_input))
+ sub_input_given++;
+
+ else if (unformat_user (input, unformat_pg_stream_parameter, &s))
+ ;
+
+ else if (unformat (input, "no-recycle"))
+ s.flags |= PG_STREAM_FLAGS_DISABLE_BUFFER_RECYCLE;
+
+ else
+ {
+ error = clib_error_create ("unknown input `%U'",
+ format_unformat_error, input);
+ goto done;
+ }
+ }
+
+ error = validate_stream (&s);
+ if (error)
+ return error;
+
+ if (!sub_input_given && !pcap_file_name)
+ {
+ error = clib_error_create ("no packet data given");
+ goto done;
+ }
+
+ if (s.node_index == ~0)
+ {
+ if (pcap_file_name != 0)
+ {
+ vlib_node_t *n =
+ vlib_get_node_by_name (vm, (u8 *) "ethernet-input");
+ s.node_index = n->index;
+ }
+ else
+ {
+ error = clib_error_create ("output interface or node not given");
+ goto done;
+ }
+ }
+
+ {
+ pg_node_t *n;
+
+ if (s.node_index < vec_len (pg->nodes))
+ n = pg->nodes + s.node_index;
+ else
+ n = 0;
+
+ if (s.worker_index >= vlib_num_workers ())
+ s.worker_index = 0;
+
+ if (pcap_file_name != 0)
+ {
+ error = pg_pcap_read (&s, pcap_file_name);
+ if (error)
+ goto done;
+ vec_free (pcap_file_name);
+ }
+
+ else if (n && n->unformat_edit
+ && unformat_user (&sub_input, n->unformat_edit, &s))
+ ;
+
+ else if (!unformat_user (&sub_input, unformat_pg_payload, &s))
+ {
+ error = clib_error_create
+ ("failed to parse packet data from `%U'",
+ format_unformat_error, &sub_input);
+ goto done;
+ }
+ }
+
+ pg_stream_add (pg, &s);
+ return 0;
+
+done:
+ pg_stream_free (&s);
+ unformat_free (&sub_input);
+ return error;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (new_stream_cli, static) = {
+ .path = "packet-generator new",
+ .function = new_stream,
+ .short_help = "Create packet generator stream",
+ .long_help =
+ "Create packet generator stream\n"
+ "\n"
+ "Arguments:\n"
+ "\n"
+ "name STRING sets stream name\n"
+ "interface STRING interface for stream output \n"
+ "node NODE-NAME node for stream output\n"
+ "data STRING specifies packet data\n"
+ "pcap FILENAME read packet data from pcap file\n",
+};
+/* *INDENT-ON* */
+
+static clib_error_t *
+del_stream (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ pg_main_t *pg = &pg_main;
+ u32 i;
+
+ if (!unformat (input, "%U",
+ &unformat_hash_vec_string, pg->stream_index_by_name, &i))
+ return clib_error_create ("expected stream name `%U'",
+ format_unformat_error, input);
+
+ pg_stream_del (pg, i);
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (del_stream_cli, static) = {
+ .path = "packet-generator delete",
+ .function = del_stream,
+ .short_help = "Delete stream with given name",
+};
+/* *INDENT-ON* */
+
+static clib_error_t *
+change_stream_parameters (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ pg_main_t *pg = &pg_main;
+ pg_stream_t *s, s_new;
+ u32 stream_index = ~0;
+ clib_error_t *error;
+
+ if (unformat (input, "%U", unformat_hash_vec_string,
+ pg->stream_index_by_name, &stream_index))
+ ;
+ else
+ return clib_error_create ("expecting stream name; got `%U'",
+ format_unformat_error, input);
+
+ s = pool_elt_at_index (pg->streams, stream_index);
+ s_new = s[0];
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat_user (input, unformat_pg_stream_parameter, &s_new))
+ ;
+
+ else
+ return clib_error_create ("unknown input `%U'",
+ format_unformat_error, input);
+ }
+
+ error = validate_stream (&s_new);
+ if (!error)
+ s[0] = s_new;
+
+ return error;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (change_stream_parameters_cli, static) = {
+ .path = "packet-generator configure",
+ .short_help = "Change packet generator stream parameters",
+ .function = change_stream_parameters,
+};
+/* *INDENT-ON* */
+
+static clib_error_t *
+pg_capture_cmd_fn (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ clib_error_t *error = 0;
+ vnet_main_t *vnm = vnet_get_main ();
+ unformat_input_t _line_input, *line_input = &_line_input;
+ vnet_hw_interface_t *hi = 0;
+ u8 *pcap_file_name = 0;
+ u32 hw_if_index;
+ u32 is_disable = 0;
+ u32 count = ~0;
+
+ if (!unformat_user (input, unformat_line_input, line_input))
+ return 0;
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "%U",
+ unformat_vnet_hw_interface, vnm, &hw_if_index))
+ {
+ hi = vnet_get_hw_interface (vnm, hw_if_index);
+ }
+
+ else if (unformat (line_input, "pcap %s", &pcap_file_name))
+ ;
+ else if (unformat (line_input, "count %u", &count))
+ ;
+ else if (unformat (line_input, "disable"))
+ is_disable = 1;
+
+ else
+ {
+ error = clib_error_create ("unknown input `%U'",
+ format_unformat_error, input);
+ return error;
+ }
+ }
+
+ if (!hi)
+ return clib_error_return (0, "Please specify interface name");
+
+ if (hi->dev_class_index != pg_dev_class.index)
+ return clib_error_return (0, "Please specify packet-generator interface");
+
+ if (!pcap_file_name && is_disable == 0)
+ return clib_error_return (0, "Please specify pcap file name");
+
+ unformat_free (line_input);
+
+ pg_capture_args_t _a, *a = &_a;
+
+ a->hw_if_index = hw_if_index;
+ a->dev_instance = hi->dev_instance;
+ a->is_enabled = !is_disable;
+ a->pcap_file_name = pcap_file_name;
+ a->count = count;
+
+ error = pg_capture (a);
+ return error;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (pg_capture_cmd, static) = {
+ .path = "packet-generator capture",
+ .short_help = "packet-generator capture <interface name> pcap <filename> [count <n>]",
+ .function = pg_capture_cmd_fn,
+};
+/* *INDENT-ON* */
+
+static clib_error_t *
+create_pg_if_cmd_fn (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ pg_main_t *pg = &pg_main;
+ unformat_input_t _line_input, *line_input = &_line_input;
+ u32 if_id;
+
+ if (!unformat_user (input, unformat_line_input, line_input))
+ return 0;
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "interface pg%u", &if_id))
+ ;
+
+ else
+ return clib_error_create ("unknown input `%U'",
+ format_unformat_error, input);
+ }
+
+ unformat_free (line_input);
+
+ pg_interface_add_or_get (pg, if_id);
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (create_pg_if_cmd, static) = {
+ .path = "create packet-generator",
+ .short_help = "create packet-generator interface <interface name>",
+ .function = create_pg_if_cmd_fn,
+};
+/* *INDENT-ON* */
+
+/* Dummy init function so that we can be linked in. */
+static clib_error_t *
+pg_cli_init (vlib_main_t * vm)
+{
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (pg_cli_init);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/pg/edit.c b/src/vnet/pg/edit.c
new file mode 100644
index 00000000000..cb4d070fb19
--- /dev/null
+++ b/src/vnet/pg/edit.c
@@ -0,0 +1,186 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * pg_edit.c: packet generator edits
+ *
+ * Copyright (c) 2008 Eliot Dresselhaus
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <vlib/vlib.h>
+#include <vnet/pg/pg.h>
+
+static void
+pg_edit_set_value_helper (pg_edit_t * e, u64 value, u8 * result)
+{
+ int i, j, n_bits_left;
+ u8 *v, tmp[8];
+
+ v = tmp;
+
+ n_bits_left = e->n_bits;
+ i = 0;
+ j = e->lsb_bit_offset % BITS (v[0]);
+
+ if (n_bits_left > 0 && j != 0)
+ {
+ v[i] = (value & 0xff) << j;
+ value >>= BITS (v[0]) - j;
+ n_bits_left -= BITS (v[0]) - j;
+ i += 1;
+ }
+
+ while (n_bits_left > 0)
+ {
+ v[i] = value & 0xff;
+ value >>= 8;
+ n_bits_left -= 8;
+ i += 1;
+ }
+
+ /* Convert to network byte order. */
+ for (j = 0; j < i; j++)
+ result[j] = v[i - 1 - j];
+}
+
+void
+pg_edit_set_value (pg_edit_t * e, int hi_or_lo, u64 value)
+{
+ pg_edit_alloc_value (e, hi_or_lo);
+ pg_edit_set_value_helper (e, value, e->values[hi_or_lo]);
+}
+
+/* Parse an int either %d or 0x%x into network byte order. */
+uword
+unformat_pg_number (unformat_input_t * input, va_list * args)
+{
+ u8 *result = va_arg (*args, u8 *);
+ pg_edit_t *e = va_arg (*args, pg_edit_t *);
+ u64 value;
+
+ ASSERT (BITS (value) >= e->n_bits);
+
+ if (!unformat (input, "0x%X", sizeof (value), &value)
+ && !unformat (input, "%D", sizeof (value), &value))
+ return 0;
+
+ /* Number given does not fit into bit field. */
+ if (e->n_bits < 64 && value >= (u64) 1 << (u64) e->n_bits)
+ return 0;
+
+ pg_edit_set_value_helper (e, value, result);
+ return 1;
+}
+
+uword
+unformat_pg_edit (unformat_input_t * input, va_list * args)
+{
+ unformat_function_t *f = va_arg (*args, unformat_function_t *);
+ pg_edit_t *e = va_arg (*args, pg_edit_t *);
+
+ pg_edit_alloc_value (e, PG_EDIT_LO);
+ if (!unformat_user (input, f, e->values[PG_EDIT_LO], e))
+ return 0;
+
+ pg_edit_alloc_value (e, PG_EDIT_HI);
+ if (unformat (input, "-%U", f, e->values[PG_EDIT_HI], e))
+ e->type = PG_EDIT_INCREMENT;
+ else if (unformat (input, "+%U", f, e->values[PG_EDIT_HI], e))
+ e->type = PG_EDIT_RANDOM;
+ else
+ e->type = PG_EDIT_FIXED;
+
+ return 1;
+}
+
+uword
+unformat_pg_payload (unformat_input_t * input, va_list * args)
+{
+ pg_stream_t *s = va_arg (*args, pg_stream_t *);
+ vlib_main_t *vm = vlib_get_main ();
+ pg_edit_t *e;
+ u32 i, node_index, len, max_len;
+ u8 *v;
+
+ v = 0;
+
+ if (unformat (input, "incrementing %d", &len))
+ {
+ vec_resize (v, len);
+ for (i = 0; i < len; i++)
+ v[i] = i;
+ }
+ else if (unformat (input, "hex 0x%U", unformat_hex_string, &v))
+ ;
+
+ else if (unformat (input, "%U", unformat_vlib_node, vm, &node_index))
+ {
+ pg_node_t *pn = pg_get_node (node_index);
+ if (!pn->unformat_edit)
+ return 0;
+ return unformat (input, "%U", pn->unformat_edit, s);
+ }
+
+ else
+ return 0;
+
+ /* Length not including this payload. */
+ max_len = pg_edit_group_n_bytes (s, 0);
+ if (max_len + vec_len (v) >= s->max_packet_bytes)
+ {
+ if (s->max_packet_bytes >= max_len)
+ _vec_len (v) = s->max_packet_bytes - max_len;
+ else
+ _vec_len (v) = 0;
+ }
+
+ e = pg_create_edit_group (s, sizeof (e[0]), vec_len (v), 0);
+
+ e->type = PG_EDIT_FIXED;
+ e->n_bits = vec_len (v) * BITS (v[0]);
+
+ /* Least significant bit is at end of bitstream, since everything is always bigendian. */
+ e->lsb_bit_offset = e->n_bits - BITS (v[0]);
+
+ e->values[PG_EDIT_LO] = v;
+
+ return 1;
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/pg/edit.h b/src/vnet/pg/edit.h
new file mode 100644
index 00000000000..3bfdad575f5
--- /dev/null
+++ b/src/vnet/pg/edit.h
@@ -0,0 +1,210 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * pg_edit.h: packet generator edits
+ *
+ * Copyright (c) 2008 Eliot Dresselhaus
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef included_packet_generator_pg_edit_h
+#define included_packet_generator_pg_edit_h
+
+#include <vppinfra/format.h>
+#include <vppinfra/vec.h>
+
+typedef enum
+{
+ /* Invalid type used to poison edits. */
+ PG_EDIT_INVALID_TYPE,
+
+ /* Value is fixed: does not change for all packets in sequence. */
+ PG_EDIT_FIXED,
+
+ /* Value v increments between low and high values v_low <= v <= v_high. */
+ PG_EDIT_INCREMENT,
+
+ /* Random value between low and high values v_low <= v <= v_high. */
+ PG_EDIT_RANDOM,
+
+ /* Unspecified value; will be specified by some edit function. */
+ PG_EDIT_UNSPECIFIED,
+} pg_edit_type_t;
+
+typedef struct
+{
+ pg_edit_type_t type;
+
+ /* Bit offset within packet where value is to be written.
+ Bits are written in network byte order: high bits first.
+ This is the bit offset of the least significant bit: i.e. the
+ highest numbered byte * 8 plus bit offset within that byte.
+ Negative offsets encode special edits. */
+ i32 lsb_bit_offset;
+
+ /* Special offset indicating this edit is for packet length. */
+#define PG_EDIT_PACKET_LENGTH (-1)
+
+ /* Number of bits in edit. */
+ u32 n_bits;
+
+ /* Low and high values for this edit. Network byte order. */
+ u8 *values[2];
+#define PG_EDIT_LO 0
+#define PG_EDIT_HI 1
+
+ /* Last value used for increment edit type. */
+ u64 last_increment_value;
+} pg_edit_t;
+
+always_inline void
+pg_edit_free (pg_edit_t * e)
+{
+ int i;
+ for (i = 0; i < ARRAY_LEN (e->values); i++)
+ vec_free (e->values[i]);
+}
+
+#define pg_edit_init_bitfield(e,type,field,field_offset,field_n_bits) \
+do { \
+ u32 _bo; \
+ \
+ ASSERT ((field_offset) < STRUCT_BITS_OF (type, field)); \
+ \
+ /* Start byte offset. */ \
+ _bo = STRUCT_OFFSET_OF (type, field); \
+ \
+ /* Adjust for big endian byte order. */ \
+ _bo += ((STRUCT_BITS_OF (type, field) \
+ - (field_offset) - 1) / BITS (u8)); \
+ \
+ (e)->lsb_bit_offset = _bo * BITS (u8) + ((field_offset) % BITS (u8)); \
+ (e)->n_bits = (field_n_bits); \
+} while (0)
+
+/* Initialize edit for byte aligned fields. */
+#define pg_edit_init(e,type,field) \
+ pg_edit_init_bitfield(e,type,field,0,STRUCT_BITS_OF(type,field))
+
+static inline uword
+pg_edit_n_alloc_bytes (pg_edit_t * e)
+{
+ int i0, i1, n_bytes, n_bits_left;
+
+ i0 = e->lsb_bit_offset;
+ i1 = i0 % BITS (u8);
+
+ n_bytes = 0;
+ n_bits_left = e->n_bits;
+
+ if (n_bits_left > 0 && i1 != 0)
+ {
+ n_bytes++;
+ n_bits_left -= i1;
+ if (n_bits_left < 0)
+ n_bits_left = 0;
+ }
+
+ n_bytes += (n_bits_left / BITS (u8));
+ n_bytes += (n_bits_left % BITS (u8)) != 0;
+
+ return n_bytes;
+}
+
+static inline void
+pg_edit_alloc_value (pg_edit_t * e, int i)
+{
+ vec_validate (e->values[i], e->lsb_bit_offset / BITS (u8));
+}
+
+extern void pg_edit_set_value (pg_edit_t * e, int hi_or_lo, u64 value);
+
+static inline void
+pg_edit_set_fixed (pg_edit_t * e, u64 value)
+{
+ e->type = PG_EDIT_FIXED;
+ pg_edit_set_value (e, PG_EDIT_LO, value);
+}
+
+static inline void
+pg_edit_copy_type_and_values (pg_edit_t * dst, pg_edit_t * src)
+{
+ int i;
+ dst->type = src->type;
+ src->type = PG_EDIT_INVALID_TYPE;
+ for (i = 0; i < ARRAY_LEN (dst->values); i++)
+ {
+ dst->values[i] = src->values[i];
+ src->values[i] = 0;
+ }
+}
+
+static inline u64
+pg_edit_get_value (pg_edit_t * e, int hi_or_lo)
+{
+ u64 r = 0;
+ int i, n;
+ u8 *v = e->values[hi_or_lo];
+
+ n = round_pow2 (e->n_bits, BITS (u8)) / BITS (u8);
+
+ ASSERT (n <= vec_len (v));
+ ASSERT (n <= sizeof (r));
+
+ for (i = 0; i < n; i++)
+ r = (r << BITS (v[i])) + v[i];
+
+ return r;
+}
+
+static inline uword
+pg_edit_is_fixed_with_value (pg_edit_t * e, u64 value)
+{
+ return (e->type == PG_EDIT_FIXED
+ && value == pg_edit_get_value (e, PG_EDIT_LO));
+}
+
+uword unformat_pg_edit (unformat_input_t * input, va_list * args);
+uword unformat_pg_payload (unformat_input_t * input, va_list * args);
+uword unformat_pg_number (unformat_input_t * input, va_list * args);
+uword unformat_pg_interface (unformat_input_t * input, va_list * args);
+
+#endif /* included_packet_generator_pg_edit_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/pg/example.script b/src/vnet/pg/example.script
new file mode 100644
index 00000000000..0e29b9ecae6
--- /dev/null
+++ b/src/vnet/pg/example.script
@@ -0,0 +1,6 @@
+packet-generator new {
+ name x
+ limit 1
+ node ethernet-input
+ data { IP: 1.2.3 -> 4.5.6 incrementing 100 }
+}
diff --git a/src/vnet/pg/init.c b/src/vnet/pg/init.c
new file mode 100644
index 00000000000..631be25ea3c
--- /dev/null
+++ b/src/vnet/pg/init.c
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * pg_init.c: VLIB packet generator
+ *
+ * Copyright (c) 2008 Eliot Dresselhaus
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <vlib/vlib.h>
+#include <vnet/pg/pg.h>
+
+/* Global main structure. */
+pg_main_t pg_main;
+
+static clib_error_t *
+pg_init (vlib_main_t * vm)
+{
+ clib_error_t *error;
+ pg_main_t *pg = &pg_main;
+
+ pg->if_index_by_if_id = hash_create (0, sizeof (uword));
+
+ if ((error = vlib_call_init_function (vm, vnet_main_init)))
+ goto done;
+
+ if ((error = vlib_call_init_function (vm, pg_cli_init)))
+ goto done;
+
+done:
+ return error;
+}
+
+VLIB_INIT_FUNCTION (pg_init);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/pg/input.c b/src/vnet/pg/input.c
new file mode 100644
index 00000000000..e15faeb8564
--- /dev/null
+++ b/src/vnet/pg/input.c
@@ -0,0 +1,1667 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * pg_input.c: buffer generator input
+ *
+ * Copyright (c) 2008 Eliot Dresselhaus
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <vlib/vlib.h>
+#include <vnet/pg/pg.h>
+#include <vnet/vnet.h>
+#include <vnet/feature/feature.h>
+#include <vnet/devices/devices.h>
+
+static int
+validate_buffer_data2 (vlib_buffer_t * b, pg_stream_t * s,
+ u32 data_offset, u32 n_bytes)
+{
+ u8 *bd, *pd, *pm;
+ u32 i;
+
+ bd = b->data;
+ pd = s->fixed_packet_data + data_offset;
+ pm = s->fixed_packet_data_mask + data_offset;
+
+ if (pd + n_bytes >= vec_end (s->fixed_packet_data))
+ n_bytes = (pd < vec_end (s->fixed_packet_data)
+ ? vec_end (s->fixed_packet_data) - pd : 0);
+
+ for (i = 0; i < n_bytes; i++)
+ if ((bd[i] & pm[i]) != pd[i])
+ break;
+
+ if (i >= n_bytes)
+ return 1;
+
+ clib_warning ("buffer %U", format_vlib_buffer, b);
+ clib_warning ("differ at index %d", i);
+ clib_warning ("is %U", format_hex_bytes, bd, n_bytes);
+ clib_warning ("mask %U", format_hex_bytes, pm, n_bytes);
+ clib_warning ("expect %U", format_hex_bytes, pd, n_bytes);
+ return 0;
+}
+
+static int
+validate_buffer_data (vlib_buffer_t * b, pg_stream_t * s)
+{
+ return validate_buffer_data2 (b, s, 0, s->buffer_bytes);
+}
+
+always_inline void
+set_1 (void *a0,
+ u64 v0, u64 v_min, u64 v_max, u32 n_bits, u32 is_net_byte_order)
+{
+ ASSERT (v0 >= v_min && v0 <= v_max);
+ if (n_bits == BITS (u8))
+ {
+ ((u8 *) a0)[0] = v0;
+ }
+ else if (n_bits == BITS (u16))
+ {
+ if (is_net_byte_order)
+ v0 = clib_host_to_net_u16 (v0);
+ clib_mem_unaligned (a0, u16) = v0;
+ }
+ else if (n_bits == BITS (u32))
+ {
+ if (is_net_byte_order)
+ v0 = clib_host_to_net_u32 (v0);
+ clib_mem_unaligned (a0, u32) = v0;
+ }
+ else if (n_bits == BITS (u64))
+ {
+ if (is_net_byte_order)
+ v0 = clib_host_to_net_u64 (v0);
+ clib_mem_unaligned (a0, u64) = v0;
+ }
+}
+
+always_inline void
+set_2 (void *a0, void *a1,
+ u64 v0, u64 v1,
+ u64 v_min, u64 v_max,
+ u32 n_bits, u32 is_net_byte_order, u32 is_increment)
+{
+ ASSERT (v0 >= v_min && v0 <= v_max);
+ ASSERT (v1 >= v_min && v1 <= (v_max + is_increment));
+ if (n_bits == BITS (u8))
+ {
+ ((u8 *) a0)[0] = v0;
+ ((u8 *) a1)[0] = v1;
+ }
+ else if (n_bits == BITS (u16))
+ {
+ if (is_net_byte_order)
+ {
+ v0 = clib_host_to_net_u16 (v0);
+ v1 = clib_host_to_net_u16 (v1);
+ }
+ clib_mem_unaligned (a0, u16) = v0;
+ clib_mem_unaligned (a1, u16) = v1;
+ }
+ else if (n_bits == BITS (u32))
+ {
+ if (is_net_byte_order)
+ {
+ v0 = clib_host_to_net_u32 (v0);
+ v1 = clib_host_to_net_u32 (v1);
+ }
+ clib_mem_unaligned (a0, u32) = v0;
+ clib_mem_unaligned (a1, u32) = v1;
+ }
+ else if (n_bits == BITS (u64))
+ {
+ if (is_net_byte_order)
+ {
+ v0 = clib_host_to_net_u64 (v0);
+ v1 = clib_host_to_net_u64 (v1);
+ }
+ clib_mem_unaligned (a0, u64) = v0;
+ clib_mem_unaligned (a1, u64) = v1;
+ }
+}
+
+static_always_inline void
+do_set_fixed (pg_main_t * pg,
+ pg_stream_t * s,
+ u32 * buffers,
+ u32 n_buffers,
+ u32 n_bits,
+ u32 byte_offset, u32 is_net_byte_order, u64 v_min, u64 v_max)
+{
+ vlib_main_t *vm = vlib_get_main ();
+
+ while (n_buffers >= 4)
+ {
+ vlib_buffer_t *b0, *b1, *b2, *b3;
+ void *a0, *a1;
+
+ b0 = vlib_get_buffer (vm, buffers[0]);
+ b1 = vlib_get_buffer (vm, buffers[1]);
+ b2 = vlib_get_buffer (vm, buffers[2]);
+ b3 = vlib_get_buffer (vm, buffers[3]);
+ buffers += 2;
+ n_buffers -= 2;
+
+ a0 = (void *) b0 + byte_offset;
+ a1 = (void *) b1 + byte_offset;
+ CLIB_PREFETCH ((void *) b2 + byte_offset, sizeof (v_min), WRITE);
+ CLIB_PREFETCH ((void *) b3 + byte_offset, sizeof (v_min), WRITE);
+
+ set_2 (a0, a1, v_min, v_min, v_min, v_max, n_bits, is_net_byte_order,
+ /* is_increment */ 0);
+
+ ASSERT (validate_buffer_data (b0, s));
+ ASSERT (validate_buffer_data (b1, s));
+ }
+
+ while (n_buffers > 0)
+ {
+ vlib_buffer_t *b0;
+ void *a0;
+
+ b0 = vlib_get_buffer (vm, buffers[0]);
+ buffers += 1;
+ n_buffers -= 1;
+
+ a0 = (void *) b0 + byte_offset;
+
+ set_1 (a0, v_min, v_min, v_max, n_bits, is_net_byte_order);
+
+ ASSERT (validate_buffer_data (b0, s));
+ }
+}
+
+static_always_inline u64
+do_set_increment (pg_main_t * pg,
+ pg_stream_t * s,
+ u32 * buffers,
+ u32 n_buffers,
+ u32 n_bits,
+ u32 byte_offset,
+ u32 is_net_byte_order,
+ u32 want_sum, u64 * sum_result, u64 v_min, u64 v_max, u64 v)
+{
+ vlib_main_t *vm = vlib_get_main ();
+ u64 sum = 0;
+
+ ASSERT (v >= v_min && v <= v_max);
+
+ while (n_buffers >= 4)
+ {
+ vlib_buffer_t *b0, *b1, *b2, *b3;
+ void *a0, *a1;
+ u64 v_old;
+
+ b0 = vlib_get_buffer (vm, buffers[0]);
+ b1 = vlib_get_buffer (vm, buffers[1]);
+ b2 = vlib_get_buffer (vm, buffers[2]);
+ b3 = vlib_get_buffer (vm, buffers[3]);
+ buffers += 2;
+ n_buffers -= 2;
+
+ a0 = (void *) b0 + byte_offset;
+ a1 = (void *) b1 + byte_offset;
+ CLIB_PREFETCH ((void *) b2 + byte_offset, sizeof (v_min), WRITE);
+ CLIB_PREFETCH ((void *) b3 + byte_offset, sizeof (v_min), WRITE);
+
+ v_old = v;
+ v = v_old + 2;
+ v = v > v_max ? v_min : v;
+ set_2 (a0, a1,
+ v_old + 0, v_old + 1, v_min, v_max, n_bits, is_net_byte_order,
+ /* is_increment */ 1);
+
+ if (want_sum)
+ sum += 2 * v_old + 1;
+
+ if (PREDICT_FALSE (v_old + 1 > v_max))
+ {
+ if (want_sum)
+ sum -= 2 * v_old + 1;
+
+ v = v_old;
+ set_1 (a0, v + 0, v_min, v_max, n_bits, is_net_byte_order);
+ if (want_sum)
+ sum += v;
+ v += 1;
+
+ v = v > v_max ? v_min : v;
+ set_1 (a1, v + 0, v_min, v_max, n_bits, is_net_byte_order);
+ if (want_sum)
+ sum += v;
+ v += 1;
+ }
+
+ ASSERT (validate_buffer_data (b0, s));
+ ASSERT (validate_buffer_data (b1, s));
+ }
+
+ while (n_buffers > 0)
+ {
+ vlib_buffer_t *b0;
+ void *a0;
+ u64 v_old;
+
+ b0 = vlib_get_buffer (vm, buffers[0]);
+ buffers += 1;
+ n_buffers -= 1;
+
+ a0 = (void *) b0 + byte_offset;
+
+ v_old = v;
+ if (want_sum)
+ sum += v_old;
+ v += 1;
+ v = v > v_max ? v_min : v;
+
+ ASSERT (v_old >= v_min && v_old <= v_max);
+ set_1 (a0, v_old, v_min, v_max, n_bits, is_net_byte_order);
+
+ ASSERT (validate_buffer_data (b0, s));
+ }
+
+ if (want_sum)
+ *sum_result = sum;
+
+ return v;
+}
+
+static_always_inline void
+do_set_random (pg_main_t * pg,
+ pg_stream_t * s,
+ u32 * buffers,
+ u32 n_buffers,
+ u32 n_bits,
+ u32 byte_offset,
+ u32 is_net_byte_order,
+ u32 want_sum, u64 * sum_result, u64 v_min, u64 v_max)
+{
+ vlib_main_t *vm = vlib_get_main ();
+ u64 v_diff = v_max - v_min + 1;
+ u64 r_mask = max_pow2 (v_diff) - 1;
+ u64 v0, v1;
+ u64 sum = 0;
+ void *random_data;
+
+ random_data = clib_random_buffer_get_data
+ (&vm->random_buffer, n_buffers * n_bits / BITS (u8));
+
+ v0 = v1 = v_min;
+
+ while (n_buffers >= 4)
+ {
+ vlib_buffer_t *b0, *b1, *b2, *b3;
+ void *a0, *a1;
+ u64 r0 = 0, r1 = 0; /* warnings be gone */
+
+ b0 = vlib_get_buffer (vm, buffers[0]);
+ b1 = vlib_get_buffer (vm, buffers[1]);
+ b2 = vlib_get_buffer (vm, buffers[2]);
+ b3 = vlib_get_buffer (vm, buffers[3]);
+ buffers += 2;
+ n_buffers -= 2;
+
+ a0 = (void *) b0 + byte_offset;
+ a1 = (void *) b1 + byte_offset;
+ CLIB_PREFETCH ((void *) b2 + byte_offset, sizeof (v_min), WRITE);
+ CLIB_PREFETCH ((void *) b3 + byte_offset, sizeof (v_min), WRITE);
+
+ switch (n_bits)
+ {
+#define _(n) \
+ case BITS (u##n): \
+ { \
+ u##n * r = random_data; \
+ r0 = r[0]; \
+ r1 = r[1]; \
+ random_data = r + 2; \
+ } \
+ break;
+
+ _(8);
+ _(16);
+ _(32);
+ _(64);
+
+#undef _
+ }
+
+ /* Add power of 2 sized random number which may be out of range. */
+ v0 += r0 & r_mask;
+ v1 += r1 & r_mask;
+
+ /* Twice should be enough to reduce to v_min .. v_max range. */
+ v0 = v0 > v_max ? v0 - v_diff : v0;
+ v1 = v1 > v_max ? v1 - v_diff : v1;
+ v0 = v0 > v_max ? v0 - v_diff : v0;
+ v1 = v1 > v_max ? v1 - v_diff : v1;
+
+ if (want_sum)
+ sum += v0 + v1;
+
+ set_2 (a0, a1, v0, v1, v_min, v_max, n_bits, is_net_byte_order,
+ /* is_increment */ 0);
+
+ ASSERT (validate_buffer_data (b0, s));
+ ASSERT (validate_buffer_data (b1, s));
+ }
+
+ while (n_buffers > 0)
+ {
+ vlib_buffer_t *b0;
+ void *a0;
+ u64 r0 = 0; /* warnings be gone */
+
+ b0 = vlib_get_buffer (vm, buffers[0]);
+ buffers += 1;
+ n_buffers -= 1;
+
+ a0 = (void *) b0 + byte_offset;
+
+ switch (n_bits)
+ {
+#define _(n) \
+ case BITS (u##n): \
+ { \
+ u##n * r = random_data; \
+ r0 = r[0]; \
+ random_data = r + 1; \
+ } \
+ break;
+
+ _(8);
+ _(16);
+ _(32);
+ _(64);
+
+#undef _
+ }
+
+ /* Add power of 2 sized random number which may be out of range. */
+ v0 += r0 & r_mask;
+
+ /* Twice should be enough to reduce to v_min .. v_max range. */
+ v0 = v0 > v_max ? v0 - v_diff : v0;
+ v0 = v0 > v_max ? v0 - v_diff : v0;
+
+ if (want_sum)
+ sum += v0;
+
+ set_1 (a0, v0, v_min, v_max, n_bits, is_net_byte_order);
+
+ ASSERT (validate_buffer_data (b0, s));
+ }
+
+ if (want_sum)
+ *sum_result = sum;
+}
+
+#define _(i,t) \
+ clib_mem_unaligned (a##i, t) = \
+ clib_host_to_net_##t ((clib_net_to_host_mem_##t (a##i) &~ mask) \
+ | (v##i << shift))
+
+always_inline void
+setbits_1 (void *a0,
+ u64 v0,
+ u64 v_min, u64 v_max,
+ u32 max_bits, u32 n_bits, u64 mask, u32 shift)
+{
+ ASSERT (v0 >= v_min && v0 <= v_max);
+ if (max_bits == BITS (u8))
+ ((u8 *) a0)[0] = (((u8 *) a0)[0] & ~mask) | (v0 << shift);
+
+ else if (max_bits == BITS (u16))
+ {
+ _(0, u16);
+ }
+ else if (max_bits == BITS (u32))
+ {
+ _(0, u32);
+ }
+ else if (max_bits == BITS (u64))
+ {
+ _(0, u64);
+ }
+}
+
+always_inline void
+setbits_2 (void *a0, void *a1,
+ u64 v0, u64 v1,
+ u64 v_min, u64 v_max,
+ u32 max_bits, u32 n_bits, u64 mask, u32 shift, u32 is_increment)
+{
+ ASSERT (v0 >= v_min && v0 <= v_max);
+ ASSERT (v1 >= v_min && v1 <= v_max + is_increment);
+ if (max_bits == BITS (u8))
+ {
+ ((u8 *) a0)[0] = (((u8 *) a0)[0] & ~mask) | (v0 << shift);
+ ((u8 *) a1)[0] = (((u8 *) a1)[0] & ~mask) | (v1 << shift);
+ }
+
+ else if (max_bits == BITS (u16))
+ {
+ _(0, u16);
+ _(1, u16);
+ }
+ else if (max_bits == BITS (u32))
+ {
+ _(0, u32);
+ _(1, u32);
+ }
+ else if (max_bits == BITS (u64))
+ {
+ _(0, u64);
+ _(1, u64);
+ }
+}
+
+#undef _
+
+static_always_inline void
+do_setbits_fixed (pg_main_t * pg,
+ pg_stream_t * s,
+ u32 * buffers,
+ u32 n_buffers,
+ u32 max_bits,
+ u32 n_bits,
+ u32 byte_offset, u64 v_min, u64 v_max, u64 mask, u32 shift)
+{
+ vlib_main_t *vm = vlib_get_main ();
+
+ while (n_buffers >= 4)
+ {
+ vlib_buffer_t *b0, *b1, *b2, *b3;
+ void *a0, *a1;
+
+ b0 = vlib_get_buffer (vm, buffers[0]);
+ b1 = vlib_get_buffer (vm, buffers[1]);
+ b2 = vlib_get_buffer (vm, buffers[2]);
+ b3 = vlib_get_buffer (vm, buffers[3]);
+ buffers += 2;
+ n_buffers -= 2;
+
+ a0 = (void *) b0 + byte_offset;
+ a1 = (void *) b1 + byte_offset;
+ CLIB_PREFETCH ((void *) b2 + byte_offset, sizeof (v_min), WRITE);
+ CLIB_PREFETCH ((void *) b3 + byte_offset, sizeof (v_min), WRITE);
+
+ setbits_2 (a0, a1,
+ v_min, v_min, v_min, v_max, max_bits, n_bits, mask, shift,
+ /* is_increment */ 0);
+
+ ASSERT (validate_buffer_data (b0, s));
+ ASSERT (validate_buffer_data (b1, s));
+ }
+
+ while (n_buffers > 0)
+ {
+ vlib_buffer_t *b0;
+ void *a0;
+
+ b0 = vlib_get_buffer (vm, buffers[0]);
+ buffers += 1;
+ n_buffers -= 1;
+
+ a0 = (void *) b0 + byte_offset;
+
+ setbits_1 (a0, v_min, v_min, v_max, max_bits, n_bits, mask, shift);
+ ASSERT (validate_buffer_data (b0, s));
+ }
+}
+
+static_always_inline u64
+do_setbits_increment (pg_main_t * pg,
+ pg_stream_t * s,
+ u32 * buffers,
+ u32 n_buffers,
+ u32 max_bits,
+ u32 n_bits,
+ u32 byte_offset,
+ u64 v_min, u64 v_max, u64 v, u64 mask, u32 shift)
+{
+ vlib_main_t *vm = vlib_get_main ();
+
+ ASSERT (v >= v_min && v <= v_max);
+
+ while (n_buffers >= 4)
+ {
+ vlib_buffer_t *b0, *b1, *b2, *b3;
+ void *a0, *a1;
+ u64 v_old;
+
+ b0 = vlib_get_buffer (vm, buffers[0]);
+ b1 = vlib_get_buffer (vm, buffers[1]);
+ b2 = vlib_get_buffer (vm, buffers[2]);
+ b3 = vlib_get_buffer (vm, buffers[3]);
+ buffers += 2;
+ n_buffers -= 2;
+
+ a0 = (void *) b0 + byte_offset;
+ a1 = (void *) b1 + byte_offset;
+ CLIB_PREFETCH ((void *) b2 + byte_offset, sizeof (v_min), WRITE);
+ CLIB_PREFETCH ((void *) b3 + byte_offset, sizeof (v_min), WRITE);
+
+ v_old = v;
+ v = v_old + 2;
+ v = v > v_max ? v_min : v;
+ setbits_2 (a0, a1,
+ v_old + 0, v_old + 1,
+ v_min, v_max, max_bits, n_bits, mask, shift,
+ /* is_increment */ 1);
+
+ if (PREDICT_FALSE (v_old + 1 > v_max))
+ {
+ v = v_old;
+ setbits_1 (a0, v + 0, v_min, v_max, max_bits, n_bits, mask, shift);
+ v += 1;
+
+ v = v > v_max ? v_min : v;
+ setbits_1 (a1, v + 0, v_min, v_max, max_bits, n_bits, mask, shift);
+ v += 1;
+ }
+ ASSERT (validate_buffer_data (b0, s));
+ ASSERT (validate_buffer_data (b1, s));
+ }
+
+ while (n_buffers > 0)
+ {
+ vlib_buffer_t *b0;
+ void *a0;
+ u64 v_old;
+
+ b0 = vlib_get_buffer (vm, buffers[0]);
+ buffers += 1;
+ n_buffers -= 1;
+
+ a0 = (void *) b0 + byte_offset;
+
+ v_old = v;
+ v = v_old + 1;
+ v = v > v_max ? v_min : v;
+
+ ASSERT (v_old >= v_min && v_old <= v_max);
+ setbits_1 (a0, v_old, v_min, v_max, max_bits, n_bits, mask, shift);
+
+ ASSERT (validate_buffer_data (b0, s));
+ }
+
+ return v;
+}
+
+static_always_inline void
+do_setbits_random (pg_main_t * pg,
+ pg_stream_t * s,
+ u32 * buffers,
+ u32 n_buffers,
+ u32 max_bits,
+ u32 n_bits,
+ u32 byte_offset, u64 v_min, u64 v_max, u64 mask, u32 shift)
+{
+ vlib_main_t *vm = vlib_get_main ();
+ u64 v_diff = v_max - v_min + 1;
+ u64 r_mask = max_pow2 (v_diff) - 1;
+ u64 v0, v1;
+ void *random_data;
+
+ random_data = clib_random_buffer_get_data
+ (&vm->random_buffer, n_buffers * max_bits / BITS (u8));
+ v0 = v1 = v_min;
+
+ while (n_buffers >= 4)
+ {
+ vlib_buffer_t *b0, *b1, *b2, *b3;
+ void *a0, *a1;
+ u64 r0 = 0, r1 = 0; /* warnings be gone */
+
+ b0 = vlib_get_buffer (vm, buffers[0]);
+ b1 = vlib_get_buffer (vm, buffers[1]);
+ b2 = vlib_get_buffer (vm, buffers[2]);
+ b3 = vlib_get_buffer (vm, buffers[3]);
+ buffers += 2;
+ n_buffers -= 2;
+
+ a0 = (void *) b0 + byte_offset;
+ a1 = (void *) b1 + byte_offset;
+ CLIB_PREFETCH ((void *) b2 + byte_offset, sizeof (v_min), WRITE);
+ CLIB_PREFETCH ((void *) b3 + byte_offset, sizeof (v_min), WRITE);
+
+ switch (max_bits)
+ {
+#define _(n) \
+ case BITS (u##n): \
+ { \
+ u##n * r = random_data; \
+ r0 = r[0]; \
+ r1 = r[1]; \
+ random_data = r + 2; \
+ } \
+ break;
+
+ _(8);
+ _(16);
+ _(32);
+ _(64);
+
+#undef _
+ }
+
+ /* Add power of 2 sized random number which may be out of range. */
+ v0 += r0 & r_mask;
+ v1 += r1 & r_mask;
+
+ /* Twice should be enough to reduce to v_min .. v_max range. */
+ v0 = v0 > v_max ? v0 - v_diff : v0;
+ v1 = v1 > v_max ? v1 - v_diff : v1;
+ v0 = v0 > v_max ? v0 - v_diff : v0;
+ v1 = v1 > v_max ? v1 - v_diff : v1;
+
+ setbits_2 (a0, a1, v0, v1, v_min, v_max, max_bits, n_bits, mask, shift,
+ /* is_increment */ 0);
+
+ ASSERT (validate_buffer_data (b0, s));
+ ASSERT (validate_buffer_data (b1, s));
+ }
+
+ while (n_buffers > 0)
+ {
+ vlib_buffer_t *b0;
+ void *a0;
+ u64 r0 = 0; /* warnings be gone */
+
+ b0 = vlib_get_buffer (vm, buffers[0]);
+ buffers += 1;
+ n_buffers -= 1;
+
+ a0 = (void *) b0 + byte_offset;
+
+ switch (max_bits)
+ {
+#define _(n) \
+ case BITS (u##n): \
+ { \
+ u##n * r = random_data; \
+ r0 = r[0]; \
+ random_data = r + 1; \
+ } \
+ break;
+
+ _(8);
+ _(16);
+ _(32);
+ _(64);
+
+#undef _
+ }
+
+ /* Add power of 2 sized random number which may be out of range. */
+ v0 += r0 & r_mask;
+
+ /* Twice should be enough to reduce to v_min .. v_max range. */
+ v0 = v0 > v_max ? v0 - v_diff : v0;
+ v0 = v0 > v_max ? v0 - v_diff : v0;
+
+ setbits_1 (a0, v0, v_min, v_max, max_bits, n_bits, mask, shift);
+
+ ASSERT (validate_buffer_data (b0, s));
+ }
+}
+
+static u64
+do_it (pg_main_t * pg,
+ pg_stream_t * s,
+ u32 * buffers,
+ u32 n_buffers,
+ u32 lo_bit, u32 hi_bit,
+ u64 v_min, u64 v_max, u64 v, pg_edit_type_t edit_type)
+{
+ u32 max_bits, l0, l1, h1, start_bit;
+
+ if (v_min == v_max)
+ edit_type = PG_EDIT_FIXED;
+
+ l0 = lo_bit / BITS (u8);
+ l1 = lo_bit % BITS (u8);
+ h1 = hi_bit % BITS (u8);
+
+ start_bit = l0 * BITS (u8);
+
+ max_bits = hi_bit - start_bit;
+ ASSERT (max_bits <= 64);
+
+#define _(n) \
+ case (n): \
+ if (edit_type == PG_EDIT_INCREMENT) \
+ v = do_set_increment (pg, s, buffers, n_buffers, \
+ BITS (u##n), \
+ l0, \
+ /* is_net_byte_order */ 1, \
+ /* want sum */ 0, 0, \
+ v_min, v_max, \
+ v); \
+ else if (edit_type == PG_EDIT_RANDOM) \
+ do_set_random (pg, s, buffers, n_buffers, \
+ BITS (u##n), \
+ l0, \
+ /* is_net_byte_order */ 1, \
+ /* want sum */ 0, 0, \
+ v_min, v_max); \
+ else /* edit_type == PG_EDIT_FIXED */ \
+ do_set_fixed (pg, s, buffers, n_buffers, \
+ BITS (u##n), \
+ l0, \
+ /* is_net_byte_order */ 1, \
+ v_min, v_max); \
+ goto done;
+
+ if (l1 == 0 && h1 == 0)
+ {
+ switch (max_bits)
+ {
+ _(8);
+ _(16);
+ _(32);
+ _(64);
+ }
+ }
+
+#undef _
+
+ {
+ u64 mask;
+ u32 shift = l1;
+ u32 n_bits = max_bits;
+
+ max_bits = clib_max (max_pow2 (n_bits), 8);
+
+ mask = ((u64) 1 << (u64) n_bits) - 1;
+ mask &= ~(((u64) 1 << (u64) shift) - 1);
+
+ mask <<= max_bits - n_bits;
+ shift += max_bits - n_bits;
+
+ switch (max_bits)
+ {
+#define _(n) \
+ case (n): \
+ if (edit_type == PG_EDIT_INCREMENT) \
+ v = do_setbits_increment (pg, s, buffers, n_buffers, \
+ BITS (u##n), n_bits, \
+ l0, v_min, v_max, v, \
+ mask, shift); \
+ else if (edit_type == PG_EDIT_RANDOM) \
+ do_setbits_random (pg, s, buffers, n_buffers, \
+ BITS (u##n), n_bits, \
+ l0, v_min, v_max, \
+ mask, shift); \
+ else /* edit_type == PG_EDIT_FIXED */ \
+ do_setbits_fixed (pg, s, buffers, n_buffers, \
+ BITS (u##n), n_bits, \
+ l0, v_min, v_max, \
+ mask, shift); \
+ goto done;
+
+ _(8);
+ _(16);
+ _(32);
+ _(64);
+
+#undef _
+ }
+ }
+
+done:
+ return v;
+}
+
+static void
+pg_generate_set_lengths (pg_main_t * pg,
+ pg_stream_t * s, u32 * buffers, u32 n_buffers)
+{
+ u64 v_min, v_max, length_sum;
+ pg_edit_type_t edit_type;
+
+ v_min = s->min_packet_bytes;
+ v_max = s->max_packet_bytes;
+ edit_type = s->packet_size_edit_type;
+
+ if (edit_type == PG_EDIT_INCREMENT)
+ s->last_increment_packet_size
+ = do_set_increment (pg, s, buffers, n_buffers,
+ 8 * STRUCT_SIZE_OF (vlib_buffer_t, current_length),
+ STRUCT_OFFSET_OF (vlib_buffer_t, current_length),
+ /* is_net_byte_order */ 0,
+ /* want sum */ 1, &length_sum,
+ v_min, v_max, s->last_increment_packet_size);
+
+ else if (edit_type == PG_EDIT_RANDOM)
+ do_set_random (pg, s, buffers, n_buffers,
+ 8 * STRUCT_SIZE_OF (vlib_buffer_t, current_length),
+ STRUCT_OFFSET_OF (vlib_buffer_t, current_length),
+ /* is_net_byte_order */ 0,
+ /* want sum */ 1, &length_sum,
+ v_min, v_max);
+
+ else /* edit_type == PG_EDIT_FIXED */
+ {
+ do_set_fixed (pg, s, buffers, n_buffers,
+ 8 * STRUCT_SIZE_OF (vlib_buffer_t, current_length),
+ STRUCT_OFFSET_OF (vlib_buffer_t, current_length),
+ /* is_net_byte_order */ 0,
+ v_min, v_max);
+ length_sum = v_min * n_buffers;
+ }
+
+ {
+ vnet_main_t *vnm = vnet_get_main ();
+ vnet_interface_main_t *im = &vnm->interface_main;
+ vnet_sw_interface_t *si =
+ vnet_get_sw_interface (vnm, s->sw_if_index[VLIB_RX]);
+
+ vlib_increment_combined_counter (im->combined_sw_if_counters
+ + VNET_INTERFACE_COUNTER_RX,
+ os_get_cpu_number (),
+ si->sw_if_index, n_buffers, length_sum);
+ }
+
+}
+
+static void
+pg_generate_fix_multi_buffer_lengths (pg_main_t * pg,
+ pg_stream_t * s,
+ u32 * buffers, u32 n_buffers)
+{
+ vlib_main_t *vm = vlib_get_main ();
+ pg_buffer_index_t *pbi;
+ uword n_bytes_left;
+ static u32 *unused_buffers = 0;
+
+ while (n_buffers > 0)
+ {
+ vlib_buffer_t *b;
+ u32 bi;
+
+ bi = buffers[0];
+ b = vlib_get_buffer (vm, bi);
+
+ /* Current length here is length of whole packet. */
+ n_bytes_left = b->current_length;
+
+ pbi = s->buffer_indices;
+ while (1)
+ {
+ uword n = clib_min (n_bytes_left, s->buffer_bytes);
+
+ b->current_length = n;
+ n_bytes_left -= n;
+ if (n_bytes_left > 0)
+ b->flags |= VLIB_BUFFER_NEXT_PRESENT;
+ else
+ b->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
+
+ /* Return unused buffers to fifos. */
+ if (n == 0)
+ vec_add1 (unused_buffers, bi);
+
+ pbi++;
+ if (pbi >= vec_end (s->buffer_indices))
+ break;
+
+ bi = b->next_buffer;
+ b = vlib_get_buffer (vm, bi);
+ }
+ ASSERT (n_bytes_left == 0);
+
+ buffers += 1;
+ n_buffers -= 1;
+ }
+
+ if (vec_len (unused_buffers) > 0)
+ {
+ vlib_buffer_free_no_next (vm, unused_buffers, vec_len (unused_buffers));
+ _vec_len (unused_buffers) = 0;
+ }
+}
+
+static void
+pg_generate_edit (pg_main_t * pg,
+ pg_stream_t * s, u32 * buffers, u32 n_buffers)
+{
+ pg_edit_t *e;
+
+ vec_foreach (e, s->non_fixed_edits)
+ {
+ switch (e->type)
+ {
+ case PG_EDIT_RANDOM:
+ case PG_EDIT_INCREMENT:
+ {
+ u32 lo_bit, hi_bit;
+ u64 v_min, v_max;
+
+ v_min = pg_edit_get_value (e, PG_EDIT_LO);
+ v_max = pg_edit_get_value (e, PG_EDIT_HI);
+
+ hi_bit = (BITS (u8) * STRUCT_OFFSET_OF (vlib_buffer_t, data)
+ + BITS (u8) + e->lsb_bit_offset);
+ lo_bit = hi_bit - e->n_bits;
+
+ e->last_increment_value
+ = do_it (pg, s, buffers, n_buffers, lo_bit, hi_bit, v_min, v_max,
+ e->last_increment_value, e->type);
+ }
+ break;
+
+ case PG_EDIT_UNSPECIFIED:
+ break;
+
+ default:
+ /* Should not be any fixed edits left. */
+ ASSERT (0);
+ break;
+ }
+ }
+
+ /* Call any edit functions to e.g. completely IP lengths, checksums, ... */
+ {
+ int i;
+ for (i = vec_len (s->edit_groups) - 1; i >= 0; i--)
+ {
+ pg_edit_group_t *g = s->edit_groups + i;
+ if (g->edit_function)
+ g->edit_function (pg, s, g, buffers, n_buffers);
+ }
+ }
+}
+
+static void
+pg_set_next_buffer_pointers (pg_main_t * pg,
+ pg_stream_t * s,
+ u32 * buffers, u32 * next_buffers, u32 n_buffers)
+{
+ vlib_main_t *vm = vlib_get_main ();
+
+ while (n_buffers >= 4)
+ {
+ u32 ni0, ni1;
+ vlib_buffer_t *b0, *b1;
+
+ b0 = vlib_get_buffer (vm, buffers[0]);
+ b1 = vlib_get_buffer (vm, buffers[1]);
+ ni0 = next_buffers[0];
+ ni1 = next_buffers[1];
+
+ vlib_prefetch_buffer_with_index (vm, buffers[2], WRITE);
+ vlib_prefetch_buffer_with_index (vm, buffers[3], WRITE);
+
+ b0->flags |= VLIB_BUFFER_NEXT_PRESENT;
+ b1->flags |= VLIB_BUFFER_NEXT_PRESENT;
+ b0->next_buffer = ni0;
+ b1->next_buffer = ni1;
+
+ buffers += 2;
+ next_buffers += 2;
+ n_buffers -= 2;
+ }
+
+ while (n_buffers > 0)
+ {
+ u32 ni0;
+ vlib_buffer_t *b0;
+
+ b0 = vlib_get_buffer (vm, buffers[0]);
+ ni0 = next_buffers[0];
+ buffers += 1;
+ next_buffers += 1;
+ n_buffers -= 1;
+
+ b0->flags |= VLIB_BUFFER_NEXT_PRESENT;
+ b0->next_buffer = ni0;
+ }
+}
+
+static_always_inline void
+init_replay_buffers_inline (vlib_main_t * vm,
+ pg_stream_t * s,
+ u32 * buffers,
+ u32 n_buffers, u32 data_offset, u32 n_data)
+{
+ u32 n_left, *b, i, l;
+
+ n_left = n_buffers;
+ b = buffers;
+ i = s->current_replay_packet_index;
+ l = vec_len (s->replay_packet_templates);
+
+ while (n_left >= 1)
+ {
+ u32 bi0, n0;
+ vlib_buffer_t *b0;
+ u8 *d0;
+
+ bi0 = b[0];
+ b += 1;
+ n_left -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+
+ vnet_buffer (b0)->sw_if_index[VLIB_RX] = s->sw_if_index[VLIB_RX];
+ /* was s->sw_if_index[VLIB_TX]; */
+ vnet_buffer (b0)->sw_if_index[VLIB_TX] = (u32) ~ 0;
+
+ d0 = vec_elt (s->replay_packet_templates, i);
+
+ n0 = n_data;
+ if (data_offset + n_data >= vec_len (d0))
+ n0 = vec_len (d0) > data_offset ? vec_len (d0) - data_offset : 0;
+
+ b0->current_length = n0;
+
+ clib_memcpy (b0->data, d0 + data_offset, n0);
+ i = i + 1 == l ? 0 : i + 1;
+ }
+}
+
+static_always_inline void
+init_buffers_inline (vlib_main_t * vm,
+ pg_stream_t * s,
+ u32 * buffers,
+ u32 n_buffers, u32 data_offset, u32 n_data, u32 set_data)
+{
+ u32 n_left, *b;
+ u8 *data, *mask;
+
+ if (vec_len (s->replay_packet_templates) > 0)
+ return init_replay_buffers_inline (vm, s, buffers, n_buffers, data_offset,
+ n_data);
+
+ data = s->fixed_packet_data + data_offset;
+ mask = s->fixed_packet_data_mask + data_offset;
+ if (data + n_data >= vec_end (s->fixed_packet_data))
+ n_data = (data < vec_end (s->fixed_packet_data)
+ ? vec_end (s->fixed_packet_data) - data : 0);
+ if (n_data > 0)
+ {
+ ASSERT (data + n_data <= vec_end (s->fixed_packet_data));
+ ASSERT (mask + n_data <= vec_end (s->fixed_packet_data_mask));
+ }
+
+ n_left = n_buffers;
+ b = buffers;
+
+ while (n_left >= 4)
+ {
+ u32 bi0, bi1;
+ vlib_buffer_t *b0, *b1;
+
+ /* Prefetch next iteration. */
+ vlib_prefetch_buffer_with_index (vm, b[2], STORE);
+ vlib_prefetch_buffer_with_index (vm, b[3], STORE);
+
+ bi0 = b[0];
+ bi1 = b[1];
+ b += 2;
+ n_left -= 2;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ b1 = vlib_get_buffer (vm, bi1);
+
+ vnet_buffer (b0)->sw_if_index[VLIB_RX] =
+ vnet_buffer (b1)->sw_if_index[VLIB_RX] = s->sw_if_index[VLIB_RX];
+
+ vnet_buffer (b0)->sw_if_index[VLIB_TX] =
+ vnet_buffer (b1)->sw_if_index[VLIB_TX] = (u32) ~ 0;
+
+ if (set_data)
+ {
+ clib_memcpy (b0->data, data, n_data);
+ clib_memcpy (b1->data, data, n_data);
+ }
+ else
+ {
+ ASSERT (validate_buffer_data2 (b0, s, data_offset, n_data));
+ ASSERT (validate_buffer_data2 (b1, s, data_offset, n_data));
+ }
+ }
+
+ while (n_left >= 1)
+ {
+ u32 bi0;
+ vlib_buffer_t *b0;
+
+ bi0 = b[0];
+ b += 1;
+ n_left -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ vnet_buffer (b0)->sw_if_index[VLIB_RX] = s->sw_if_index[VLIB_RX];
+ /* s->sw_if_index[VLIB_TX]; */
+ vnet_buffer (b0)->sw_if_index[VLIB_TX] = (u32) ~ 0;
+
+ if (set_data)
+ clib_memcpy (b0->data, data, n_data);
+ else
+ ASSERT (validate_buffer_data2 (b0, s, data_offset, n_data));
+ }
+}
+
+static void
+pg_buffer_init (vlib_main_t * vm,
+ vlib_buffer_free_list_t * fl, u32 * buffers, u32 n_buffers)
+{
+ pg_main_t *pg = &pg_main;
+ pg_stream_t *s;
+ uword bi, si;
+
+ si = fl->buffer_init_function_opaque & pow2_mask (24);
+ bi = fl->buffer_init_function_opaque >> 24;
+
+ s = pool_elt_at_index (pg->streams, si);
+
+ init_buffers_inline (vm, s, buffers, n_buffers,
+ /* data_offset */ bi * s->buffer_bytes,
+ /* n_data */ s->buffer_bytes,
+ /* set_data */ 1);
+}
+
+static u32
+pg_stream_fill_helper (pg_main_t * pg,
+ pg_stream_t * s,
+ pg_buffer_index_t * bi,
+ u32 * buffers, u32 * next_buffers, u32 n_alloc)
+{
+ vlib_main_t *vm = vlib_get_main ();
+ vlib_buffer_free_list_t *f;
+ uword is_start_of_packet = bi == s->buffer_indices;
+ u32 n_allocated;
+
+ f = vlib_buffer_get_free_list (vm, bi->free_list_index);
+
+ /*
+ * Historically, the pg maintained its own free lists and
+ * device drivers tx paths would return pkts. With the DPDK,
+ * that doesn't happen.
+ */
+ if (DPDK == 0 && !(s->flags & PG_STREAM_FLAGS_DISABLE_BUFFER_RECYCLE))
+ f->buffer_init_function = pg_buffer_init;
+ f->buffer_init_function_opaque =
+ (s - pg->streams) | ((bi - s->buffer_indices) << 24);
+
+ if (is_start_of_packet)
+ vnet_buffer (&f->buffer_init_template)->sw_if_index[VLIB_RX]
+ = vnet_main.local_interface_sw_if_index;
+
+ n_allocated = vlib_buffer_alloc_from_free_list (vm,
+ buffers,
+ n_alloc,
+ bi->free_list_index);
+ if (n_allocated == 0)
+ return 0;
+
+ /*
+ * We can't assume we got all the buffers we asked for...
+ * This never worked until recently.
+ */
+ n_alloc = n_allocated;
+
+ /* Reinitialize buffers */
+ if (DPDK == 0 || CLIB_DEBUG > 0
+ || (s->flags & PG_STREAM_FLAGS_DISABLE_BUFFER_RECYCLE))
+ init_buffers_inline
+ (vm, s,
+ buffers,
+ n_alloc, (bi - s->buffer_indices) * s->buffer_bytes /* data offset */ ,
+ s->buffer_bytes,
+ /* set_data */
+ DPDK == 1 || (s->flags & PG_STREAM_FLAGS_DISABLE_BUFFER_RECYCLE) != 0);
+
+ if (next_buffers)
+ pg_set_next_buffer_pointers (pg, s, buffers, next_buffers, n_alloc);
+
+ if (is_start_of_packet)
+ {
+ if (vec_len (s->replay_packet_templates) > 0)
+ {
+ vnet_main_t *vnm = vnet_get_main ();
+ vnet_interface_main_t *im = &vnm->interface_main;
+ vnet_sw_interface_t *si =
+ vnet_get_sw_interface (vnm, s->sw_if_index[VLIB_RX]);
+ u32 l = 0;
+ u32 i;
+ for (i = 0; i < n_alloc; i++)
+ l += vlib_buffer_index_length_in_chain (vm, buffers[i]);
+ vlib_increment_combined_counter (im->combined_sw_if_counters
+ + VNET_INTERFACE_COUNTER_RX,
+ os_get_cpu_number (),
+ si->sw_if_index, n_alloc, l);
+ s->current_replay_packet_index += n_alloc;
+ s->current_replay_packet_index %=
+ vec_len (s->replay_packet_templates);
+ }
+ else
+ {
+ pg_generate_set_lengths (pg, s, buffers, n_alloc);
+ if (vec_len (s->buffer_indices) > 1)
+ pg_generate_fix_multi_buffer_lengths (pg, s, buffers, n_alloc);
+
+ pg_generate_edit (pg, s, buffers, n_alloc);
+ }
+ }
+
+ return n_alloc;
+}
+
+static u32
+pg_stream_fill (pg_main_t * pg, pg_stream_t * s, u32 n_buffers)
+{
+ pg_buffer_index_t *bi;
+ word i, n_in_fifo, n_alloc, n_free, n_added;
+ u32 *tail, *start, *end, *last_tail, *last_start;
+
+ bi = s->buffer_indices;
+
+ n_in_fifo = clib_fifo_elts (bi->buffer_fifo);
+ if (n_in_fifo >= n_buffers)
+ return n_in_fifo;
+
+ n_alloc = n_buffers - n_in_fifo;
+
+ /* Round up, but never generate more than limit. */
+ n_alloc = clib_max (VLIB_FRAME_SIZE, n_alloc);
+
+ if (s->n_packets_limit > 0
+ && s->n_packets_generated + n_in_fifo + n_alloc >= s->n_packets_limit)
+ {
+ n_alloc = s->n_packets_limit - s->n_packets_generated - n_in_fifo;
+ if (n_alloc < 0)
+ n_alloc = 0;
+ }
+
+ /* All buffer fifos should have the same size. */
+ if (CLIB_DEBUG > 0)
+ {
+ uword l = ~0, e;
+ vec_foreach (bi, s->buffer_indices)
+ {
+ e = clib_fifo_elts (bi->buffer_fifo);
+ if (bi == s->buffer_indices)
+ l = e;
+ ASSERT (l == e);
+ }
+ }
+
+ last_tail = last_start = 0;
+ n_added = n_alloc;
+
+ for (i = vec_len (s->buffer_indices) - 1; i >= 0; i--)
+ {
+ bi = vec_elt_at_index (s->buffer_indices, i);
+
+ n_free = clib_fifo_free_elts (bi->buffer_fifo);
+ if (n_free < n_alloc)
+ clib_fifo_resize (bi->buffer_fifo, n_alloc - n_free);
+
+ tail = clib_fifo_advance_tail (bi->buffer_fifo, n_alloc);
+ start = bi->buffer_fifo;
+ end = clib_fifo_end (bi->buffer_fifo);
+
+ if (tail + n_alloc <= end)
+ {
+ n_added =
+ pg_stream_fill_helper (pg, s, bi, tail, last_tail, n_alloc);
+ }
+ else
+ {
+ u32 n = clib_min (end - tail, n_alloc);
+ n_added = pg_stream_fill_helper (pg, s, bi, tail, last_tail, n);
+
+ if (n_added == n && n_alloc > n_added)
+ {
+ n_added += pg_stream_fill_helper
+ (pg, s, bi, start, last_start, n_alloc - n_added);
+ }
+ }
+
+ if (PREDICT_FALSE (n_added < n_alloc))
+ tail = clib_fifo_advance_tail (bi->buffer_fifo, n_added - n_alloc);
+
+ last_tail = tail;
+ last_start = start;
+
+ /* Verify that pkts in the fifo are properly allocated */
+ }
+
+ return n_in_fifo + n_added;
+}
+
+typedef struct
+{
+ u32 stream_index;
+
+ u32 packet_length;
+
+ /* Use pre data for packet data. */
+ vlib_buffer_t buffer;
+} pg_input_trace_t;
+
+static u8 *
+format_pg_input_trace (u8 * s, va_list * va)
+{
+ vlib_main_t *vm = va_arg (*va, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*va, vlib_node_t *);
+ pg_input_trace_t *t = va_arg (*va, pg_input_trace_t *);
+ pg_main_t *pg = &pg_main;
+ pg_stream_t *stream;
+ vlib_node_t *n;
+ uword indent = format_get_indent (s);
+
+ stream = 0;
+ if (!pool_is_free_index (pg->streams, t->stream_index))
+ stream = pool_elt_at_index (pg->streams, t->stream_index);
+
+ if (stream)
+ s = format (s, "stream %v", pg->streams[t->stream_index].name);
+ else
+ s = format (s, "stream %d", t->stream_index);
+
+ s = format (s, ", %d bytes", t->packet_length);
+
+ s = format (s, "\n%U%U",
+ format_white_space, indent, format_vlib_buffer, &t->buffer);
+
+ s = format (s, "\n%U", format_white_space, indent);
+
+ n = 0;
+ if (stream)
+ n = vlib_get_node (vm, stream->node_index);
+
+ if (n && n->format_buffer)
+ s = format (s, "%U", n->format_buffer,
+ t->buffer.pre_data, sizeof (t->buffer.pre_data));
+ else
+ s = format (s, "%U",
+ format_hex_bytes, t->buffer.pre_data,
+ ARRAY_LEN (t->buffer.pre_data));
+ return s;
+}
+
+static void
+pg_input_trace (pg_main_t * pg,
+ vlib_node_runtime_t * node,
+ pg_stream_t * s, u32 * buffers, u32 n_buffers)
+{
+ vlib_main_t *vm = vlib_get_main ();
+ u32 *b, n_left, stream_index, next_index;
+
+ n_left = n_buffers;
+ b = buffers;
+ stream_index = s - pg->streams;
+ next_index = s->next_index;
+
+ while (n_left >= 2)
+ {
+ u32 bi0, bi1;
+ vlib_buffer_t *b0, *b1;
+ pg_input_trace_t *t0, *t1;
+
+ bi0 = b[0];
+ bi1 = b[1];
+ b += 2;
+ n_left -= 2;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ b1 = vlib_get_buffer (vm, bi1);
+
+ vlib_trace_buffer (vm, node, next_index, b0, /* follow_chain */ 1);
+ vlib_trace_buffer (vm, node, next_index, b1, /* follow_chain */ 1);
+
+ t0 = vlib_add_trace (vm, node, b0, sizeof (t0[0]));
+ t1 = vlib_add_trace (vm, node, b1, sizeof (t1[0]));
+
+ t0->stream_index = stream_index;
+ t1->stream_index = stream_index;
+
+ t0->packet_length = vlib_buffer_length_in_chain (vm, b0);
+ t1->packet_length = vlib_buffer_length_in_chain (vm, b1);
+
+ clib_memcpy (&t0->buffer, b0, sizeof (b0[0]) - sizeof (b0->pre_data));
+ clib_memcpy (&t1->buffer, b1, sizeof (b1[0]) - sizeof (b1->pre_data));
+
+ clib_memcpy (t0->buffer.pre_data, b0->data,
+ sizeof (t0->buffer.pre_data));
+ clib_memcpy (t1->buffer.pre_data, b1->data,
+ sizeof (t1->buffer.pre_data));
+ }
+
+ while (n_left >= 1)
+ {
+ u32 bi0;
+ vlib_buffer_t *b0;
+ pg_input_trace_t *t0;
+
+ bi0 = b[0];
+ b += 1;
+ n_left -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+
+ vlib_trace_buffer (vm, node, next_index, b0, /* follow_chain */ 1);
+ t0 = vlib_add_trace (vm, node, b0, sizeof (t0[0]));
+
+ t0->stream_index = stream_index;
+ t0->packet_length = vlib_buffer_length_in_chain (vm, b0);
+ clib_memcpy (&t0->buffer, b0, sizeof (b0[0]) - sizeof (b0->pre_data));
+ clib_memcpy (t0->buffer.pre_data, b0->data,
+ sizeof (t0->buffer.pre_data));
+ }
+}
+
+static uword
+pg_generate_packets (vlib_node_runtime_t * node,
+ pg_main_t * pg,
+ pg_stream_t * s, uword n_packets_to_generate)
+{
+ vlib_main_t *vm = vlib_get_main ();
+ u32 *to_next, n_this_frame, n_left, n_trace, n_packets_in_fifo;
+ uword n_packets_generated;
+ pg_buffer_index_t *bi, *bi0;
+ u32 next_index = s->next_index;
+ vnet_feature_main_t *fm = &feature_main;
+ vnet_feature_config_main_t *cm;
+ u8 feature_arc_index = fm->device_input_feature_arc_index;
+ cm = &fm->feature_config_mains[feature_arc_index];
+ u32 current_config_index = ~(u32) 0;
+ int i;
+
+ bi0 = s->buffer_indices;
+
+ n_packets_in_fifo = pg_stream_fill (pg, s, n_packets_to_generate);
+ n_packets_to_generate = clib_min (n_packets_in_fifo, n_packets_to_generate);
+ n_packets_generated = 0;
+
+ if (PREDICT_FALSE
+ (vnet_have_features (feature_arc_index, s->sw_if_index[VLIB_RX])))
+ {
+ current_config_index =
+ vec_elt (cm->config_index_by_sw_if_index, s->sw_if_index[VLIB_RX]);
+ vnet_get_config_data (&cm->config_main, &current_config_index,
+ &next_index, 0);
+ }
+
+ while (n_packets_to_generate > 0)
+ {
+ u32 *head, *start, *end;
+
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left);
+
+ n_this_frame = n_packets_to_generate;
+ if (n_this_frame > n_left)
+ n_this_frame = n_left;
+
+ start = bi0->buffer_fifo;
+ end = clib_fifo_end (bi0->buffer_fifo);
+ head = clib_fifo_head (bi0->buffer_fifo);
+
+ if (head + n_this_frame <= end)
+ vlib_copy_buffers (to_next, head, n_this_frame);
+ else
+ {
+ u32 n = end - head;
+ vlib_copy_buffers (to_next + 0, head, n);
+ vlib_copy_buffers (to_next + n, start, n_this_frame - n);
+ }
+
+ vec_foreach (bi, s->buffer_indices)
+ clib_fifo_advance_head (bi->buffer_fifo, n_this_frame);
+
+ if (current_config_index != ~(u32) 0)
+ for (i = 0; i < n_this_frame; i++)
+ {
+ vlib_buffer_t *b;
+ b = vlib_get_buffer (vm, to_next[i]);
+ vnet_buffer (b)->device_input_feat.saved_next_index =
+ s->next_index;
+ vnet_buffer (b)->device_input_feat.buffer_advance = 0;
+ b->current_config_index = current_config_index;
+ b->feature_arc_index = feature_arc_index;
+ }
+
+ n_trace = vlib_get_trace_count (vm, node);
+ if (n_trace > 0)
+ {
+ u32 n = clib_min (n_trace, n_this_frame);
+ pg_input_trace (pg, node, s, to_next, n);
+ vlib_set_trace_count (vm, node, n_trace - n);
+ }
+ n_packets_to_generate -= n_this_frame;
+ n_packets_generated += n_this_frame;
+ n_left -= n_this_frame;
+ vlib_put_next_frame (vm, node, next_index, n_left);
+ }
+
+ return n_packets_generated;
+}
+
+static uword
+pg_input_stream (vlib_node_runtime_t * node, pg_main_t * pg, pg_stream_t * s)
+{
+ vlib_main_t *vm = vlib_get_main ();
+ uword n_packets;
+ f64 time_now, dt;
+
+ if (s->n_packets_limit > 0 && s->n_packets_generated >= s->n_packets_limit)
+ {
+ pg_stream_enable_disable (pg, s, /* want_enabled */ 0);
+ return 0;
+ }
+
+ /* Apply rate limit. */
+ time_now = vlib_time_now (vm);
+ if (s->time_last_generate == 0)
+ s->time_last_generate = time_now;
+
+ dt = time_now - s->time_last_generate;
+ s->time_last_generate = time_now;
+
+ n_packets = VLIB_FRAME_SIZE;
+ if (s->rate_packets_per_second > 0)
+ {
+ s->packet_accumulator += dt * s->rate_packets_per_second;
+ n_packets = s->packet_accumulator;
+
+ /* Never allow accumulator to grow if we get behind. */
+ s->packet_accumulator -= n_packets;
+ }
+
+ /* Apply fixed limit. */
+ if (s->n_packets_limit > 0
+ && s->n_packets_generated + n_packets > s->n_packets_limit)
+ n_packets = s->n_packets_limit - s->n_packets_generated;
+
+ /* Generate up to one frame's worth of packets. */
+ if (n_packets > VLIB_FRAME_SIZE)
+ n_packets = VLIB_FRAME_SIZE;
+
+ if (n_packets > 0)
+ n_packets = pg_generate_packets (node, pg, s, n_packets);
+
+ s->n_packets_generated += n_packets;
+
+ return n_packets;
+}
+
+uword
+pg_input (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ uword i;
+ pg_main_t *pg = &pg_main;
+ uword n_packets = 0;
+ u32 worker_index = 0;
+
+ if (vlib_num_workers ())
+ worker_index = vlib_get_current_worker_index ();
+
+ /* *INDENT-OFF* */
+ clib_bitmap_foreach (i, pg->enabled_streams[worker_index], ({
+ pg_stream_t *s = vec_elt_at_index (pg->streams, i);
+ n_packets += pg_input_stream (node, pg, s);
+ }));
+ /* *INDENT-ON* */
+
+ return n_packets;
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (pg_input_node) = {
+ .function = pg_input,
+ .name = "pg-input",
+ .sibling_of = "device-input",
+ .type = VLIB_NODE_TYPE_INPUT,
+
+ .format_trace = format_pg_input_trace,
+
+ /* Input node will be left disabled until a stream is active. */
+ .state = VLIB_NODE_STATE_DISABLED,
+};
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/pg/output.c b/src/vnet/pg/output.c
new file mode 100644
index 00000000000..3d1f2660e20
--- /dev/null
+++ b/src/vnet/pg/output.c
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * pg_output.c: packet generator output
+ *
+ * Copyright (c) 2008 Eliot Dresselhaus
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+#include <vnet/pg/pg.h>
+#include <vnet/ethernet/ethernet.h>
+
+uword
+pg_output (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ pg_main_t *pg = &pg_main;
+ u32 *buffers = vlib_frame_args (frame);
+ uword n_buffers = frame->n_vectors;
+ uword n_left = n_buffers;
+ vnet_interface_output_runtime_t *rd = (void *) node->runtime_data;
+ pg_interface_t *pif = pool_elt_at_index (pg->interfaces, rd->dev_instance);
+
+ if (PREDICT_FALSE (pif->lockp != 0))
+ while (__sync_lock_test_and_set (pif->lockp, 1))
+ ;
+
+ if (pif->pcap_file_name != 0)
+ {
+ while (n_left > 0)
+ {
+ n_left--;
+ u32 bi0 = buffers[0];
+ buffers++;
+
+ pcap_add_buffer (&pif->pcap_main, vm, bi0,
+ ETHERNET_MAX_PACKET_BYTES);
+ }
+ pcap_write (&pif->pcap_main);
+ }
+
+ vlib_buffer_free (vm, vlib_frame_args (frame), n_buffers);
+ if (PREDICT_FALSE (pif->lockp != 0))
+ *pif->lockp = 0;
+ return n_buffers;
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/pg/pg.h b/src/vnet/pg/pg.h
new file mode 100644
index 00000000000..a4027834035
--- /dev/null
+++ b/src/vnet/pg/pg.h
@@ -0,0 +1,383 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * pg.h: VLIB packet generator
+ *
+ * Copyright (c) 2008 Eliot Dresselhaus
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef included_vlib_pg_h
+#define included_vlib_pg_h
+
+#include <vlib/vlib.h> /* for VLIB_N_RX_TX */
+#include <vnet/pg/edit.h>
+#include <vppinfra/fifo.h> /* for buffer_fifo */
+#include <vnet/unix/pcap.h>
+#include <vnet/interface.h>
+
+extern vnet_device_class_t pg_dev_class;
+
+struct pg_main_t;
+struct pg_stream_t;
+
+typedef struct pg_edit_group_t
+{
+ /* Edits in this group. */
+ pg_edit_t *edits;
+
+ /* Vector of non-fixed edits for this group. */
+ pg_edit_t *non_fixed_edits;
+
+ /* Fixed edits for this group. */
+ u8 *fixed_packet_data;
+ u8 *fixed_packet_data_mask;
+
+ /* Byte offset where packet data begins. */
+ u32 start_byte_offset;
+
+ /* Number of packet bytes for this edit group. */
+ u32 n_packet_bytes;
+
+ /* Function to perform miscellaneous edits (e.g. set IP checksum, ...). */
+ void (*edit_function) (struct pg_main_t * pg,
+ struct pg_stream_t * s,
+ struct pg_edit_group_t * g,
+ u32 * buffers, u32 n_buffers);
+
+ /* Opaque data for edit function's use. */
+ uword edit_function_opaque;
+} pg_edit_group_t;
+
+/* Packets are made of multiple buffers chained together.
+ This struct keeps track of data per-chain index. */
+typedef struct
+{
+ /* Vector of buffer edits for this stream and buffer index. */
+ pg_edit_t *edits;
+
+ /* Buffers pre-initialized with fixed buffer data for this stream. */
+ u32 *buffer_fifo;
+
+ /* Buffer free list for this buffer index in stream. */
+ u32 free_list_index;
+} pg_buffer_index_t;
+
+typedef struct pg_stream_t
+{
+ /* Stream name. */
+ u8 *name;
+
+ u32 flags;
+
+ /* Stream is currently enabled. */
+#define PG_STREAM_FLAGS_IS_ENABLED (1 << 0)
+#define PG_STREAM_FLAGS_DISABLE_BUFFER_RECYCLE (1 << 1)
+
+ /* Edit groups are created by each protocol level (e.g. ethernet,
+ ip4, tcp, ...). */
+ pg_edit_group_t *edit_groups;
+
+ pg_edit_type_t packet_size_edit_type;
+
+ /* Min/max packet size. */
+ u32 min_packet_bytes, max_packet_bytes;
+
+ /* Vector of non-fixed edits for this stream.
+ All fixed edits are performed and placed into fixed_packet_data. */
+ pg_edit_t *non_fixed_edits;
+
+ /* Packet data with all fixed edits performed.
+ All packets in stream are initialized according with this data.
+ Mask specifies which bits of packet data are covered by fixed edits. */
+ u8 *fixed_packet_data, *fixed_packet_data_mask;
+
+ /* Size to use for buffers. 0 means use buffers big enough
+ for max_packet_bytes. */
+ u32 buffer_bytes;
+
+ /* Last packet length if packet size edit type is increment. */
+ u32 last_increment_packet_size;
+
+ /* Index into main interface pool for this stream. */
+ u32 pg_if_index;
+
+ /* Interface used to mark packets for this stream. May be different
+ than hw/sw index from pg main interface pool. They will be
+ different if this stream is being used generate buffers as if
+ they were received on a non-pg interface. For example, suppose you
+ are trying to test vlan code and you want to generate buffers that
+ appear to come from an ethernet interface. */
+ u32 sw_if_index[VLIB_N_RX_TX];
+
+ /* Node where stream's buffers get put. */
+ u32 node_index;
+
+ /* Worker thread index */
+ u32 worker_index;
+
+ /* Output next index to reach output node from stream input node. */
+ u32 next_index;
+
+ u32 if_id;
+
+ /* Number of packets currently generated. */
+ u64 n_packets_generated;
+
+ /* Stream is disabled when packet limit is reached.
+ Zero means no packet limit. */
+ u64 n_packets_limit;
+
+ /* Rate for this stream in packets/second.
+ Zero means unlimited rate. */
+ f64 rate_packets_per_second;
+
+ f64 time_last_generate;
+
+ f64 packet_accumulator;
+
+ pg_buffer_index_t *buffer_indices;
+
+ u8 **replay_packet_templates;
+ u32 current_replay_packet_index;
+} pg_stream_t;
+
+always_inline void
+pg_buffer_index_free (pg_buffer_index_t * bi)
+{
+ vec_free (bi->edits);
+ clib_fifo_free (bi->buffer_fifo);
+}
+
+always_inline void
+pg_edit_group_free (pg_edit_group_t * g)
+{
+ pg_edit_t *e;
+ vec_foreach (e, g->edits) pg_edit_free (e);
+ vec_free (g->edits);
+ vec_free (g->fixed_packet_data);
+ vec_free (g->fixed_packet_data_mask);
+}
+
+always_inline void
+pg_stream_free (pg_stream_t * s)
+{
+ pg_edit_group_t *g;
+ pg_edit_t *e;
+ vec_foreach (e, s->non_fixed_edits) pg_edit_free (e);
+ vec_free (s->non_fixed_edits);
+ vec_foreach (g, s->edit_groups) pg_edit_group_free (g);
+ vec_free (s->edit_groups);
+ vec_free (s->fixed_packet_data);
+ vec_free (s->fixed_packet_data_mask);
+ vec_free (s->name);
+
+ {
+ pg_buffer_index_t *bi;
+ vec_foreach (bi, s->buffer_indices) pg_buffer_index_free (bi);
+ vec_free (s->buffer_indices);
+ }
+}
+
+always_inline int
+pg_stream_is_enabled (pg_stream_t * s)
+{
+ return (s->flags & PG_STREAM_FLAGS_IS_ENABLED) != 0;
+}
+
+always_inline pg_edit_group_t *
+pg_stream_get_group (pg_stream_t * s, u32 group_index)
+{
+ return vec_elt_at_index (s->edit_groups, group_index);
+}
+
+always_inline void *
+pg_create_edit_group (pg_stream_t * s,
+ int n_edit_bytes, int n_packet_bytes, u32 * group_index)
+{
+ pg_edit_group_t *g;
+ int n_edits;
+
+ vec_add2 (s->edit_groups, g, 1);
+ if (group_index)
+ *group_index = g - s->edit_groups;
+
+ ASSERT (n_edit_bytes % sizeof (pg_edit_t) == 0);
+ n_edits = n_edit_bytes / sizeof (pg_edit_t);
+ vec_resize (g->edits, n_edits);
+
+ g->n_packet_bytes = n_packet_bytes;
+
+ return g->edits;
+}
+
+always_inline void *
+pg_add_edits (pg_stream_t * s, int n_edit_bytes, int n_packet_bytes,
+ u32 group_index)
+{
+ pg_edit_group_t *g = pg_stream_get_group (s, group_index);
+ pg_edit_t *e;
+ int n_edits;
+ ASSERT (n_edit_bytes % sizeof (pg_edit_t) == 0);
+ n_edits = n_edit_bytes / sizeof (pg_edit_t);
+ vec_add2 (g->edits, e, n_edits);
+ g->n_packet_bytes += n_packet_bytes;
+ return e;
+}
+
+always_inline void *
+pg_get_edit_group (pg_stream_t * s, u32 group_index)
+{
+ pg_edit_group_t *g = pg_stream_get_group (s, group_index);
+ return g->edits;
+}
+
+/* Number of bytes for all groups >= given group. */
+always_inline uword
+pg_edit_group_n_bytes (pg_stream_t * s, u32 group_index)
+{
+ pg_edit_group_t *g;
+ uword n_bytes = 0;
+
+ for (g = s->edit_groups + group_index; g < vec_end (s->edit_groups); g++)
+ n_bytes += g->n_packet_bytes;
+ return n_bytes;
+}
+
+always_inline void
+pg_free_edit_group (pg_stream_t * s)
+{
+ uword i = vec_len (s->edit_groups) - 1;
+ pg_edit_group_t *g = pg_stream_get_group (s, i);
+
+ pg_edit_group_free (g);
+ memset (g, 0, sizeof (g[0]));
+ _vec_len (s->edit_groups) = i;
+}
+
+typedef struct
+{
+ /* TX lock */
+ volatile u32 *lockp;
+
+ /* VLIB interface indices. */
+ u32 hw_if_index, sw_if_index;
+
+ /* Identifies stream for this interface. */
+ u32 id;
+
+ pcap_main_t pcap_main;
+ u8 *pcap_file_name;
+} pg_interface_t;
+
+/* Per VLIB node data. */
+typedef struct
+{
+ /* Parser function indexed by node index. */
+ unformat_function_t *unformat_edit;
+} pg_node_t;
+
+typedef struct pg_main_t
+{
+ /* Pool of streams. */
+ pg_stream_t *streams;
+
+ /* Bitmap indicating which streams are currently enabled. */
+ uword **enabled_streams;
+
+ /* Hash mapping name -> stream index. */
+ uword *stream_index_by_name;
+
+ /* Pool of interfaces. */
+ pg_interface_t *interfaces;
+ uword *if_index_by_if_id;
+
+ /* Per VLIB node information. */
+ pg_node_t *nodes;
+} pg_main_t;
+
+/* Global main structure. */
+extern pg_main_t pg_main;
+
+/* Global node. */
+extern vlib_node_registration_t pg_input_node;
+
+/* Buffer generator input, output node functions. */
+vlib_node_function_t pg_input, pg_output;
+
+/* Stream add/delete. */
+void pg_stream_del (pg_main_t * pg, uword index);
+void pg_stream_add (pg_main_t * pg, pg_stream_t * s_init);
+
+/* Enable/disable stream. */
+void pg_stream_enable_disable (pg_main_t * pg, pg_stream_t * s,
+ int is_enable);
+
+/* Find/create free packet-generator interface index. */
+u32 pg_interface_add_or_get (pg_main_t * pg, uword stream_index);
+
+always_inline pg_node_t *
+pg_get_node (uword node_index)
+{
+ pg_main_t *pg = &pg_main;
+ vec_validate (pg->nodes, node_index);
+ return pg->nodes + node_index;
+}
+
+void pg_edit_group_get_fixed_packet_data (pg_stream_t * s,
+ u32 group_index,
+ void *fixed_packet_data,
+ void *fixed_packet_data_mask);
+
+void pg_enable_disable (u32 stream_index, int is_enable);
+
+typedef struct
+{
+ u32 hw_if_index;
+ u32 dev_instance;
+ u8 is_enabled;
+ u8 *pcap_file_name;
+ u32 count;
+} pg_capture_args_t;
+
+clib_error_t *pg_capture (pg_capture_args_t * a);
+
+#endif /* included_vlib_pg_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/pg/stream.c b/src/vnet/pg/stream.c
new file mode 100644
index 00000000000..1ed7189ffc9
--- /dev/null
+++ b/src/vnet/pg/stream.c
@@ -0,0 +1,497 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * pg_stream.c: packet generator streams
+ *
+ * Copyright (c) 2008 Eliot Dresselhaus
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <vnet/vnet.h>
+#include <vnet/pg/pg.h>
+#include <vnet/ethernet/ethernet.h>
+#include <vnet/ip/ip.h>
+#include <vnet/mpls/mpls.h>
+#include <vnet/devices/devices.h>
+
+/* Mark stream active or inactive. */
+void
+pg_stream_enable_disable (pg_main_t * pg, pg_stream_t * s, int want_enabled)
+{
+ vlib_main_t *vm;
+ vnet_main_t *vnm = vnet_get_main ();
+ pg_interface_t *pi = pool_elt_at_index (pg->interfaces, s->pg_if_index);
+
+ want_enabled = want_enabled != 0;
+
+ if (pg_stream_is_enabled (s) == want_enabled)
+ /* No change necessary. */
+ return;
+
+ if (want_enabled)
+ s->n_packets_generated = 0;
+
+ /* Toggle enabled flag. */
+ s->flags ^= PG_STREAM_FLAGS_IS_ENABLED;
+
+ ASSERT (!pool_is_free (pg->streams, s));
+
+ vec_validate (pg->enabled_streams, s->worker_index);
+ pg->enabled_streams[s->worker_index] =
+ clib_bitmap_set (pg->enabled_streams[s->worker_index], s - pg->streams,
+ want_enabled);
+
+ if (want_enabled)
+ {
+ vnet_hw_interface_set_flags (vnm, pi->hw_if_index,
+ VNET_HW_INTERFACE_FLAG_LINK_UP);
+
+ vnet_sw_interface_set_flags (vnm, pi->sw_if_index,
+ VNET_SW_INTERFACE_FLAG_ADMIN_UP);
+ }
+
+ if (vlib_num_workers ())
+ vm = vlib_get_worker_vlib_main (s->worker_index);
+ else
+ vm = vlib_get_main ();
+
+ vlib_node_set_state (vm, pg_input_node.index,
+ (clib_bitmap_is_zero
+ (pg->enabled_streams[s->worker_index]) ?
+ VLIB_NODE_STATE_DISABLED : VLIB_NODE_STATE_POLLING));
+
+ s->packet_accumulator = 0;
+ s->time_last_generate = 0;
+}
+
+static u8 *
+format_pg_interface_name (u8 * s, va_list * args)
+{
+ pg_main_t *pg = &pg_main;
+ u32 if_index = va_arg (*args, u32);
+ pg_interface_t *pi;
+
+ pi = pool_elt_at_index (pg->interfaces, if_index);
+ s = format (s, "pg%d", pi->id);
+
+ return s;
+}
+
+static clib_error_t *
+pg_interface_admin_up_down (vnet_main_t * vnm, u32 hw_if_index, u32 flags)
+{
+ u32 hw_flags = 0;
+
+ if (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP)
+ hw_flags = VNET_HW_INTERFACE_FLAG_LINK_UP;
+
+ vnet_hw_interface_set_flags (vnm, hw_if_index, hw_flags);
+
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VNET_DEVICE_CLASS (pg_dev_class) = {
+ .name = "pg",
+ .tx_function = pg_output,
+ .format_device_name = format_pg_interface_name,
+ .admin_up_down_function = pg_interface_admin_up_down,
+};
+/* *INDENT-ON* */
+
+static u8 *
+pg_build_rewrite (vnet_main_t * vnm,
+ u32 sw_if_index,
+ vnet_link_t link_type, const void *dst_address)
+{
+ u8 *rewrite = NULL;
+ u16 *h;
+
+ vec_validate (rewrite, sizeof (*h) - 1);
+ h = (u16 *) rewrite;
+ h[0] = clib_host_to_net_u16 (vnet_link_to_l3_proto (link_type));
+
+ return (rewrite);
+}
+
+/* *INDENT-OFF* */
+VNET_HW_INTERFACE_CLASS (pg_interface_class,static) = {
+ .name = "Packet generator",
+ .build_rewrite = pg_build_rewrite,
+};
+/* *INDENT-ON* */
+
+static u32
+pg_eth_flag_change (vnet_main_t * vnm, vnet_hw_interface_t * hi, u32 flags)
+{
+ /* nothing for now */
+ return 0;
+}
+
+u32
+pg_interface_add_or_get (pg_main_t * pg, uword if_id)
+{
+ vnet_main_t *vnm = vnet_get_main ();
+ vlib_main_t *vm = vlib_get_main ();
+ pg_interface_t *pi;
+ vnet_hw_interface_t *hi;
+ uword *p;
+ u32 i;
+
+ p = hash_get (pg->if_index_by_if_id, if_id);
+
+ if (p)
+ {
+ return p[0];
+ }
+ else
+ {
+ u8 hw_addr[6];
+ f64 now = vlib_time_now (vm);
+ u32 rnd;
+
+ pool_get (pg->interfaces, pi);
+ i = pi - pg->interfaces;
+
+ rnd = (u32) (now * 1e6);
+ rnd = random_u32 (&rnd);
+ clib_memcpy (hw_addr + 2, &rnd, sizeof (rnd));
+ hw_addr[0] = 2;
+ hw_addr[1] = 0xfe;
+
+ pi->id = if_id;
+ ethernet_register_interface (vnm, pg_dev_class.index, i, hw_addr,
+ &pi->hw_if_index, pg_eth_flag_change);
+ hi = vnet_get_hw_interface (vnm, pi->hw_if_index);
+ pi->sw_if_index = hi->sw_if_index;
+
+ hash_set (pg->if_index_by_if_id, if_id, i);
+
+ if (vlib_num_workers ())
+ {
+ pi->lockp = clib_mem_alloc_aligned (CLIB_CACHE_LINE_BYTES,
+ CLIB_CACHE_LINE_BYTES);
+ *pi->lockp = 0;
+ }
+
+ ip4_sw_interface_enable_disable (pi->hw_if_index, 1);
+ ip6_sw_interface_enable_disable (pi->hw_if_index, 1);
+ mpls_sw_interface_enable_disable (&mpls_main, pi->hw_if_index, 1);
+ }
+
+ return i;
+}
+
+static void
+do_edit (pg_stream_t * stream,
+ pg_edit_group_t * g, pg_edit_t * e, uword want_commit)
+{
+ u32 i, i0, i1, mask, n_bits_left;
+ u8 *v, *s, *m;
+
+ i0 = e->lsb_bit_offset / BITS (u8);
+
+ /* Make space for edit in value and mask. */
+ vec_validate (g->fixed_packet_data, i0);
+ vec_validate (g->fixed_packet_data_mask, i0);
+
+ if (e->type != PG_EDIT_FIXED)
+ {
+ switch (e->type)
+ {
+ case PG_EDIT_RANDOM:
+ case PG_EDIT_INCREMENT:
+ e->last_increment_value = pg_edit_get_value (e, PG_EDIT_LO);
+ break;
+
+ default:
+ break;
+ }
+
+ if (want_commit)
+ {
+ ASSERT (e->type != PG_EDIT_INVALID_TYPE);
+ vec_add1 (g->non_fixed_edits, e[0]);
+ }
+ return;
+ }
+
+ s = g->fixed_packet_data;
+ m = g->fixed_packet_data_mask;
+
+ n_bits_left = e->n_bits;
+ i0 = e->lsb_bit_offset / BITS (u8);
+ i1 = e->lsb_bit_offset % BITS (u8);
+
+ v = e->values[PG_EDIT_LO];
+ i = pg_edit_n_alloc_bytes (e) - 1;
+
+ /* Odd low order bits?. */
+ if (i1 != 0 && n_bits_left > 0)
+ {
+ u32 n = clib_min (n_bits_left, BITS (u8) - i1);
+
+ mask = pow2_mask (n) << i1;
+
+ ASSERT (i0 < vec_len (s));
+ ASSERT (i < vec_len (v));
+ ASSERT ((v[i] & ~mask) == 0);
+
+ s[i0] |= v[i] & mask;
+ m[i0] |= mask;
+
+ i0--;
+ i--;
+ n_bits_left -= n;
+ }
+
+ /* Even bytes. */
+ while (n_bits_left >= 8)
+ {
+ ASSERT (i0 < vec_len (s));
+ ASSERT (i < vec_len (v));
+
+ s[i0] = v[i];
+ m[i0] = ~0;
+
+ i0--;
+ i--;
+ n_bits_left -= 8;
+ }
+
+ /* Odd high order bits. */
+ if (n_bits_left > 0)
+ {
+ mask = pow2_mask (n_bits_left);
+
+ ASSERT (i0 < vec_len (s));
+ ASSERT (i < vec_len (v));
+ ASSERT ((v[i] & ~mask) == 0);
+
+ s[i0] |= v[i] & mask;
+ m[i0] |= mask;
+ }
+
+ if (want_commit)
+ pg_edit_free (e);
+}
+
+void
+pg_edit_group_get_fixed_packet_data (pg_stream_t * s,
+ u32 group_index,
+ void *packet_data,
+ void *packet_data_mask)
+{
+ pg_edit_group_t *g = pg_stream_get_group (s, group_index);
+ pg_edit_t *e;
+
+ vec_foreach (e, g->edits) do_edit (s, g, e, /* want_commit */ 0);
+
+ clib_memcpy (packet_data, g->fixed_packet_data,
+ vec_len (g->fixed_packet_data));
+ clib_memcpy (packet_data_mask, g->fixed_packet_data_mask,
+ vec_len (g->fixed_packet_data_mask));
+}
+
+static void
+perform_fixed_edits (pg_stream_t * s)
+{
+ pg_edit_group_t *g;
+ pg_edit_t *e;
+ word i;
+
+ for (i = vec_len (s->edit_groups) - 1; i >= 0; i--)
+ {
+ g = vec_elt_at_index (s->edit_groups, i);
+ vec_foreach (e, g->edits) do_edit (s, g, e, /* want_commit */ 1);
+
+ /* All edits have either been performed or added to
+ g->non_fixed_edits. So, we can delete the vector. */
+ vec_free (g->edits);
+ }
+
+ vec_free (s->fixed_packet_data_mask);
+ vec_free (s->fixed_packet_data);
+ vec_foreach (g, s->edit_groups)
+ {
+ int i;
+ g->start_byte_offset = vec_len (s->fixed_packet_data);
+
+ /* Relocate and copy non-fixed edits from group to stream. */
+ vec_foreach (e, g->non_fixed_edits)
+ e->lsb_bit_offset += g->start_byte_offset * BITS (u8);
+
+ for (i = 0; i < vec_len (g->non_fixed_edits); i++)
+ ASSERT (g->non_fixed_edits[i].type != PG_EDIT_INVALID_TYPE);
+
+ vec_add (s->non_fixed_edits,
+ g->non_fixed_edits, vec_len (g->non_fixed_edits));
+ vec_free (g->non_fixed_edits);
+
+ vec_add (s->fixed_packet_data,
+ g->fixed_packet_data, vec_len (g->fixed_packet_data));
+ vec_add (s->fixed_packet_data_mask,
+ g->fixed_packet_data_mask, vec_len (g->fixed_packet_data_mask));
+ }
+}
+
+void
+pg_stream_add (pg_main_t * pg, pg_stream_t * s_init)
+{
+ vlib_main_t *vm = vlib_get_main ();
+ pg_stream_t *s;
+ uword *p;
+
+ if (!pg->stream_index_by_name)
+ pg->stream_index_by_name
+ = hash_create_vec (0, sizeof (s->name[0]), sizeof (uword));
+
+ /* Delete any old stream with the same name. */
+ if (s_init->name
+ && (p = hash_get_mem (pg->stream_index_by_name, s_init->name)))
+ {
+ pg_stream_del (pg, p[0]);
+ }
+
+ pool_get (pg->streams, s);
+ s[0] = s_init[0];
+
+ /* Give it a name. */
+ if (!s->name)
+ s->name = format (0, "stream%d", s - pg->streams);
+ else
+ s->name = vec_dup (s->name);
+
+ hash_set_mem (pg->stream_index_by_name, s->name, s - pg->streams);
+
+ /* Get fixed part of buffer data. */
+ if (s->edit_groups)
+ perform_fixed_edits (s);
+
+ /* Determine packet size. */
+ switch (s->packet_size_edit_type)
+ {
+ case PG_EDIT_INCREMENT:
+ case PG_EDIT_RANDOM:
+ if (s->min_packet_bytes == s->max_packet_bytes)
+ s->packet_size_edit_type = PG_EDIT_FIXED;
+ break;
+
+ default:
+ /* Get packet size from fixed edits. */
+ s->packet_size_edit_type = PG_EDIT_FIXED;
+ if (!s->replay_packet_templates)
+ s->min_packet_bytes = s->max_packet_bytes =
+ vec_len (s->fixed_packet_data);
+ break;
+ }
+
+ s->last_increment_packet_size = s->min_packet_bytes;
+
+ {
+ pg_buffer_index_t *bi;
+ int n;
+
+#if DPDK > 0
+ s->buffer_bytes = VLIB_BUFFER_DATA_SIZE;
+#endif
+
+ if (!s->buffer_bytes)
+ s->buffer_bytes = s->max_packet_bytes;
+
+ s->buffer_bytes = vlib_buffer_round_size (s->buffer_bytes);
+
+ n = s->max_packet_bytes / s->buffer_bytes;
+ n += (s->max_packet_bytes % s->buffer_bytes) != 0;
+
+ vec_resize (s->buffer_indices, n);
+
+ vec_foreach (bi, s->buffer_indices)
+ {
+ bi->free_list_index =
+ vlib_buffer_create_free_list (vm, s->buffer_bytes,
+ "pg stream %d buffer #%d",
+ s - pg->streams,
+ 1 + (bi - s->buffer_indices));
+ }
+ }
+
+ /* Find an interface to use. */
+ s->pg_if_index = pg_interface_add_or_get (pg, s->if_id);
+
+ {
+ pg_interface_t *pi = pool_elt_at_index (pg->interfaces, s->pg_if_index);
+ vlib_rx_or_tx_t rx_or_tx;
+
+ vlib_foreach_rx_tx (rx_or_tx)
+ {
+ if (s->sw_if_index[rx_or_tx] == ~0)
+ s->sw_if_index[rx_or_tx] = pi->sw_if_index;
+ }
+ }
+
+ /* Connect the graph. */
+ s->next_index = vlib_node_add_next (vm, device_input_node.index,
+ s->node_index);
+}
+
+void
+pg_stream_del (pg_main_t * pg, uword index)
+{
+ vlib_main_t *vm = vlib_get_main ();
+ pg_stream_t *s;
+ pg_buffer_index_t *bi;
+
+ s = pool_elt_at_index (pg->streams, index);
+
+ pg_stream_enable_disable (pg, s, /* want_enabled */ 0);
+ hash_unset_mem (pg->stream_index_by_name, s->name);
+
+ vec_foreach (bi, s->buffer_indices)
+ {
+ vlib_buffer_delete_free_list (vm, bi->free_list_index);
+ clib_fifo_free (bi->buffer_fifo);
+ }
+
+ pg_stream_free (s);
+ pool_put (pg->streams, s);
+}
+
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/pipeline.h b/src/vnet/pipeline.h
new file mode 100644
index 00000000000..a4aa5cf5277
--- /dev/null
+++ b/src/vnet/pipeline.h
@@ -0,0 +1,456 @@
+/*
+ * vnet/pipeline.h: software pipeline
+ *
+ * Copyright (c) 2012 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Usage example.
+ *
+ * #define NSTAGES 3 or whatever
+ *
+ * <Define pipeline stages>
+ *
+ * #include <vnet/pipeline.h>
+ *
+ * static uword my_node_fn (vlib_main_t * vm,
+ * vlib_node_runtime_t * node,
+ * vlib_frame_t * frame)
+ * {
+ * return dispatch_pipeline (vm, node, frame);
+ * }
+ *
+ */
+
+#ifndef NSTAGES
+#error files which #include <vnet/pipeline.h> must define NSTAGES
+#endif
+
+#ifndef STAGE_INLINE
+#define STAGE_INLINE inline
+#endif
+
+/*
+ * A prefetch stride of 2 is quasi-equivalent to doubling the number
+ * of stages with every other pipeline stage empty.
+ */
+
+/*
+ * This is a typical first pipeline stage, which prefetches
+ * buffer metadata and the first line of pkt data.
+ * To use it:
+ * #define stage0 generic_stage0
+ */
+static STAGE_INLINE void
+generic_stage0 (vlib_main_t * vm,
+ vlib_node_runtime_t * node, u32 buffer_index)
+{
+ /* generic default stage 0 here */
+ vlib_buffer_t *b = vlib_get_buffer (vm, buffer_index);
+ vlib_prefetch_buffer_header (b, STORE);
+ CLIB_PREFETCH (b->data, CLIB_CACHE_LINE_BYTES, STORE);
+}
+
+#if NSTAGES == 2
+
+static STAGE_INLINE uword
+dispatch_pipeline (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ u32 *from = vlib_frame_vector_args (frame);
+ u32 n_left_from, n_left_to_next, *to_next, next_index, next0;
+ int pi, pi_limit;
+
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ pi_limit = clib_min (n_left_from, n_left_to_next);
+
+ for (pi = 0; pi < NSTAGES - 1; pi++)
+ {
+ if (pi == pi_limit)
+ break;
+ stage0 (vm, node, from[pi]);
+ }
+
+ for (; pi < pi_limit; pi++)
+ {
+ stage0 (vm, node, from[pi]);
+ to_next[0] = from[pi - 1];
+ to_next++;
+ n_left_to_next--;
+ next0 = last_stage (vm, node, from[pi - 1]);
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ from[pi - 1], next0);
+ n_left_from--;
+ if ((int) n_left_to_next < 0 && n_left_from > 0)
+ vlib_get_next_frame (vm, node, next_index, to_next,
+ n_left_to_next);
+ }
+
+ for (; pi < (pi_limit + (NSTAGES - 1)); pi++)
+ {
+ if (((pi - 1) >= 0) && ((pi - 1) < pi_limit))
+ {
+ to_next[0] = from[pi - 1];
+ to_next++;
+ n_left_to_next--;
+ next0 = last_stage (vm, node, from[pi - 1]);
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ from[pi - 1], next0);
+ n_left_from--;
+ if ((int) n_left_to_next < 0 && n_left_from > 0)
+ vlib_get_next_frame (vm, node, next_index, to_next,
+ n_left_to_next);
+ }
+ }
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ from += pi_limit;
+ }
+ return frame->n_vectors;
+}
+#endif
+
+#if NSTAGES == 3
+static STAGE_INLINE uword
+dispatch_pipeline (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ u32 *from = vlib_frame_vector_args (frame);
+ u32 n_left_from, n_left_to_next, *to_next, next_index, next0;
+ int pi, pi_limit;
+
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ pi_limit = clib_min (n_left_from, n_left_to_next);
+
+ for (pi = 0; pi < NSTAGES - 1; pi++)
+ {
+ if (pi == pi_limit)
+ break;
+ stage0 (vm, node, from[pi]);
+ if (pi - 1 >= 0)
+ stage1 (vm, node, from[pi - 1]);
+ }
+
+ for (; pi < pi_limit; pi++)
+ {
+ stage0 (vm, node, from[pi]);
+ stage1 (vm, node, from[pi - 1]);
+ to_next[0] = from[pi - 2];
+ to_next++;
+ n_left_to_next--;
+ next0 = last_stage (vm, node, from[pi - 2]);
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ from[pi - 2], next0);
+ n_left_from--;
+ if ((int) n_left_to_next < 0 && n_left_from > 0)
+ vlib_get_next_frame (vm, node, next_index, to_next,
+ n_left_to_next);
+ }
+
+
+ for (; pi < (pi_limit + (NSTAGES - 1)); pi++)
+ {
+ if (((pi - 1) >= 0) && ((pi - 1) < pi_limit))
+ stage1 (vm, node, from[pi - 1]);
+ if (((pi - 2) >= 0) && ((pi - 2) < pi_limit))
+ {
+ to_next[0] = from[pi - 2];
+ to_next++;
+ n_left_to_next--;
+ next0 = last_stage (vm, node, from[pi - 2]);
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ from[pi - 2], next0);
+ n_left_from--;
+ if ((int) n_left_to_next < 0 && n_left_from > 0)
+ vlib_get_next_frame (vm, node, next_index, to_next,
+ n_left_to_next);
+ }
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ from += pi_limit;
+ }
+ return frame->n_vectors;
+}
+#endif
+
+#if NSTAGES == 4
+static STAGE_INLINE uword
+dispatch_pipeline (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ u32 *from = vlib_frame_vector_args (frame);
+ u32 n_left_from, n_left_to_next, *to_next, next_index, next0;
+ int pi, pi_limit;
+
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ pi_limit = clib_min (n_left_from, n_left_to_next);
+
+ for (pi = 0; pi < NSTAGES - 1; pi++)
+ {
+ if (pi == pi_limit)
+ break;
+ stage0 (vm, node, from[pi]);
+ if (pi - 1 >= 0)
+ stage1 (vm, node, from[pi - 1]);
+ if (pi - 2 >= 0)
+ stage2 (vm, node, from[pi - 2]);
+ }
+
+ for (; pi < pi_limit; pi++)
+ {
+ stage0 (vm, node, from[pi]);
+ stage1 (vm, node, from[pi - 1]);
+ stage2 (vm, node, from[pi - 2]);
+ to_next[0] = from[pi - 3];
+ to_next++;
+ n_left_to_next--;
+ next0 = last_stage (vm, node, from[pi - 3]);
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ from[pi - 3], next0);
+ n_left_from--;
+ if ((int) n_left_to_next < 0 && n_left_from > 0)
+ vlib_get_next_frame (vm, node, next_index, to_next,
+ n_left_to_next);
+ }
+
+
+ for (; pi < (pi_limit + (NSTAGES - 1)); pi++)
+ {
+ if (((pi - 1) >= 0) && ((pi - 1) < pi_limit))
+ stage1 (vm, node, from[pi - 1]);
+ if (((pi - 2) >= 0) && ((pi - 2) < pi_limit))
+ stage2 (vm, node, from[pi - 2]);
+ if (((pi - 3) >= 0) && ((pi - 3) < pi_limit))
+ {
+ to_next[0] = from[pi - 3];
+ to_next++;
+ n_left_to_next--;
+ next0 = last_stage (vm, node, from[pi - 3]);
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ from[pi - 3], next0);
+ n_left_from--;
+ if ((int) n_left_to_next < 0 && n_left_from > 0)
+ vlib_get_next_frame (vm, node, next_index, to_next,
+ n_left_to_next);
+ }
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ from += pi_limit;
+ }
+ return frame->n_vectors;
+}
+#endif
+
+
+#if NSTAGES == 5
+static STAGE_INLINE uword
+dispatch_pipeline (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ u32 *from = vlib_frame_vector_args (frame);
+ u32 n_left_from, n_left_to_next, *to_next, next_index, next0;
+ int pi, pi_limit;
+
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ pi_limit = clib_min (n_left_from, n_left_to_next);
+
+ for (pi = 0; pi < NSTAGES - 1; pi++)
+ {
+ if (pi == pi_limit)
+ break;
+ stage0 (vm, node, from[pi]);
+ if (pi - 1 >= 0)
+ stage1 (vm, node, from[pi - 1]);
+ if (pi - 2 >= 0)
+ stage2 (vm, node, from[pi - 2]);
+ if (pi - 3 >= 0)
+ stage3 (vm, node, from[pi - 3]);
+ }
+
+ for (; pi < pi_limit; pi++)
+ {
+ stage0 (vm, node, from[pi]);
+ stage1 (vm, node, from[pi - 1]);
+ stage2 (vm, node, from[pi - 2]);
+ stage3 (vm, node, from[pi - 3]);
+ to_next[0] = from[pi - 4];
+ to_next++;
+ n_left_to_next--;
+ next0 = last_stage (vm, node, from[pi - 4]);
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ from[pi - 4], next0);
+ n_left_from--;
+ if ((int) n_left_to_next < 0 && n_left_from > 0)
+ vlib_get_next_frame (vm, node, next_index, to_next,
+ n_left_to_next);
+ }
+
+
+ for (; pi < (pi_limit + (NSTAGES - 1)); pi++)
+ {
+ if (((pi - 1) >= 0) && ((pi - 1) < pi_limit))
+ stage1 (vm, node, from[pi - 1]);
+ if (((pi - 2) >= 0) && ((pi - 2) < pi_limit))
+ stage2 (vm, node, from[pi - 2]);
+ if (((pi - 3) >= 0) && ((pi - 3) < pi_limit))
+ stage3 (vm, node, from[pi - 3]);
+ if (((pi - 4) >= 0) && ((pi - 4) < pi_limit))
+ {
+ to_next[0] = from[pi - 4];
+ to_next++;
+ n_left_to_next--;
+ next0 = last_stage (vm, node, from[pi - 4]);
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ from[pi - 4], next0);
+ n_left_from--;
+ if ((int) n_left_to_next < 0 && n_left_from > 0)
+ vlib_get_next_frame (vm, node, next_index, to_next,
+ n_left_to_next);
+ }
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ from += pi_limit;
+ }
+ return frame->n_vectors;
+}
+#endif
+
+#if NSTAGES == 6
+static STAGE_INLINE uword
+dispatch_pipeline (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ u32 *from = vlib_frame_vector_args (frame);
+ u32 n_left_from, n_left_to_next, *to_next, next_index, next0;
+ int pi, pi_limit;
+
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ pi_limit = clib_min (n_left_from, n_left_to_next);
+
+ for (pi = 0; pi < NSTAGES - 1; pi++)
+ {
+ if (pi == pi_limit)
+ break;
+ stage0 (vm, node, from[pi]);
+ if (pi - 1 >= 0)
+ stage1 (vm, node, from[pi - 1]);
+ if (pi - 2 >= 0)
+ stage2 (vm, node, from[pi - 2]);
+ if (pi - 3 >= 0)
+ stage3 (vm, node, from[pi - 3]);
+ if (pi - 4 >= 0)
+ stage4 (vm, node, from[pi - 4]);
+ }
+
+ for (; pi < pi_limit; pi++)
+ {
+ stage0 (vm, node, from[pi]);
+ stage1 (vm, node, from[pi - 1]);
+ stage2 (vm, node, from[pi - 2]);
+ stage3 (vm, node, from[pi - 3]);
+ stage4 (vm, node, from[pi - 4]);
+ to_next[0] = from[pi - 5];
+ to_next++;
+ n_left_to_next--;
+ next0 = last_stage (vm, node, from[pi - 5]);
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ from[pi - 5], next0);
+ n_left_from--;
+ if ((int) n_left_to_next < 0 && n_left_from > 0)
+ vlib_get_next_frame (vm, node, next_index, to_next,
+ n_left_to_next);
+ }
+
+
+ for (; pi < (pi_limit + (NSTAGES - 1)); pi++)
+ {
+ if (((pi - 1) >= 0) && ((pi - 1) < pi_limit))
+ stage1 (vm, node, from[pi - 1]);
+ if (((pi - 2) >= 0) && ((pi - 2) < pi_limit))
+ stage2 (vm, node, from[pi - 2]);
+ if (((pi - 3) >= 0) && ((pi - 3) < pi_limit))
+ stage3 (vm, node, from[pi - 3]);
+ if (((pi - 4) >= 0) && ((pi - 4) < pi_limit))
+ stage4 (vm, node, from[pi - 4]);
+ if (((pi - 5) >= 0) && ((pi - 5) < pi_limit))
+ {
+ to_next[0] = from[pi - 5];
+ to_next++;
+ n_left_to_next--;
+ next0 = last_stage (vm, node, from[pi - 5]);
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ from[pi - 5], next0);
+ n_left_from--;
+ if ((int) n_left_to_next < 0 && n_left_from > 0)
+ vlib_get_next_frame (vm, node, next_index, to_next,
+ n_left_to_next);
+ }
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ from += pi_limit;
+ }
+ return frame->n_vectors;
+}
+#endif
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/plugin/p1.c b/src/vnet/plugin/p1.c
new file mode 100644
index 00000000000..3102eccea39
--- /dev/null
+++ b/src/vnet/plugin/p1.c
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * This file and in fact the entire directory shouldn't even exist.
+ *
+ * Unfortunately, various things malfunction when we try to go there.
+ * Plugin DLL's end up with their own copies of critical
+ * data structures. No one of these problems would be tough to fix,
+ * but there are quite a number of them.
+ */
+
+/*
+ * Make certain that plugin .dll's which reference the following functions
+ * can find them...
+ */
+
+#if DPDK > 0
+#define foreach_dpdk_plugin_reference \
+_(rte_calloc) \
+_(rte_free) \
+_(rte_malloc) \
+_(rte_zmalloc) \
+_(rte_malloc_virt2phy) \
+_(rte_eal_get_configuration)
+#else
+#define foreach_dpdk_plugin_reference
+#endif
+
+#define _(a) void a (void);
+foreach_dpdk_plugin_reference
+#undef _
+
+void *vnet_library_plugin_references[] =
+ {
+#define _(a) &a,
+ foreach_dpdk_plugin_reference
+#undef _
+ };
+
+void vnet_library_plugin_reference(void) { }
diff --git a/src/vnet/plugin/plugin.h b/src/vnet/plugin/plugin.h
new file mode 100644
index 00000000000..a14a5932b50
--- /dev/null
+++ b/src/vnet/plugin/plugin.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef included_vnet_plugin_h
+#define included_vnet_plugin_h
+
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+#include <vnet/pg/pg.h>
+#include <vnet/ethernet/ethernet.h>
+#include <vppinfra/error.h>
+
+/* Pointers to Genuine Vnet data structures handed to plugin .dll's */
+typedef struct {
+ vnet_main_t * vnet_main;
+ ethernet_main_t * ethernet_main;
+} vnet_plugin_handoff_t;
+
+void * vnet_get_handoff_structure (void);
+
+#endif /* included_vnet_plugin_h */
diff --git a/src/vnet/policer/node_funcs.c b/src/vnet/policer/node_funcs.c
new file mode 100644
index 00000000000..1f4997ff669
--- /dev/null
+++ b/src/vnet/policer/node_funcs.c
@@ -0,0 +1,938 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdint.h>
+
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+#include <vnet/policer/policer.h>
+#include <vnet/ip/ip.h>
+#include <vnet/classify/policer_classify.h>
+#include <vnet/classify/vnet_classify.h>
+
+#define IP4_NON_DSCP_BITS 0x03
+#define IP4_DSCP_SHIFT 2
+#define IP6_NON_DSCP_BITS 0xf03fffff
+#define IP6_DSCP_SHIFT 22
+
+/* Dispatch functions meant to be instantiated elsewhere */
+
+typedef struct
+{
+ u32 next_index;
+ u32 sw_if_index;
+ u32 policer_index;
+} vnet_policer_trace_t;
+
+/* packet trace format function */
+static u8 *
+format_policer_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ vnet_policer_trace_t *t = va_arg (*args, vnet_policer_trace_t *);
+
+ s = format (s, "VNET_POLICER: sw_if_index %d policer_index %d next %d",
+ t->sw_if_index, t->policer_index, t->next_index);
+ return s;
+}
+
+#define foreach_vnet_policer_error \
+_(TRANSMIT, "Packets Transmitted") \
+_(DROP, "Packets Dropped")
+
+typedef enum
+{
+#define _(sym,str) VNET_POLICER_ERROR_##sym,
+ foreach_vnet_policer_error
+#undef _
+ VNET_POLICER_N_ERROR,
+} vnet_policer_error_t;
+
+static char *vnet_policer_error_strings[] = {
+#define _(sym,string) string,
+ foreach_vnet_policer_error
+#undef _
+};
+
+static_always_inline void
+vnet_policer_mark (vlib_buffer_t * b, u8 dscp)
+{
+ ethernet_header_t *eh;
+ ip4_header_t *ip4h;
+ ip6_header_t *ip6h;
+ u16 type;
+
+ eh = (ethernet_header_t *) b->data;
+ type = clib_net_to_host_u16 (eh->type);
+
+ if (PREDICT_TRUE (type == ETHERNET_TYPE_IP4))
+ {
+ ip4h = (ip4_header_t *) & (b->data[sizeof (ethernet_header_t)]);;
+ ip4h->tos &= IP4_NON_DSCP_BITS;
+ ip4h->tos |= dscp << IP4_DSCP_SHIFT;
+ ip4h->checksum = ip4_header_checksum (ip4h);
+ }
+ else
+ {
+ if (PREDICT_TRUE (type == ETHERNET_TYPE_IP6))
+ {
+ ip6h = (ip6_header_t *) & (b->data[sizeof (ethernet_header_t)]);
+ ip6h->ip_version_traffic_class_and_flow_label &=
+ clib_host_to_net_u32 (IP6_NON_DSCP_BITS);
+ ip6h->ip_version_traffic_class_and_flow_label |=
+ clib_host_to_net_u32 (dscp << IP6_DSCP_SHIFT);
+ }
+ }
+}
+
+static_always_inline
+ u8 vnet_policer_police (vlib_main_t * vm,
+ vlib_buffer_t * b,
+ u32 policer_index,
+ u64 time_in_policer_periods,
+ policer_result_e packet_color)
+{
+ u8 act;
+ u32 len;
+ u32 col;
+ policer_read_response_type_st *pol;
+ vnet_policer_main_t *pm = &vnet_policer_main;
+
+ len = vlib_buffer_length_in_chain (vm, b);
+ pol = &pm->policers[policer_index];
+ col = vnet_police_packet (pol, len, packet_color, time_in_policer_periods);
+ act = pol->action[col];
+ if (PREDICT_TRUE (act == SSE2_QOS_ACTION_MARK_AND_TRANSMIT))
+ vnet_policer_mark (b, pol->mark_dscp[col]);
+
+ return act;
+}
+
+static inline uword
+vnet_policer_inline (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame, vnet_policer_index_t which)
+{
+ u32 n_left_from, *from, *to_next;
+ vnet_policer_next_t next_index;
+ vnet_policer_main_t *pm = &vnet_policer_main;
+ u64 time_in_policer_periods;
+ u32 transmitted = 0;
+
+ time_in_policer_periods =
+ clib_cpu_time_now () >> POLICER_TICKS_PER_PERIOD_SHIFT;
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ u32 bi0, bi1;
+ vlib_buffer_t *b0, *b1;
+ u32 next0, next1;
+ u32 sw_if_index0, sw_if_index1;
+ u32 pi0 = 0, pi1 = 0;
+ u8 act0, act1;
+
+ /* Prefetch next iteration. */
+ {
+ vlib_buffer_t *b2, *b3;
+
+ b2 = vlib_get_buffer (vm, from[2]);
+ b3 = vlib_get_buffer (vm, from[3]);
+
+ vlib_prefetch_buffer_header (b2, LOAD);
+ vlib_prefetch_buffer_header (b3, LOAD);
+ }
+
+ /* speculatively enqueue b0 and b1 to the current next frame */
+ to_next[0] = bi0 = from[0];
+ to_next[1] = bi1 = from[1];
+ from += 2;
+ to_next += 2;
+ n_left_from -= 2;
+ n_left_to_next -= 2;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ b1 = vlib_get_buffer (vm, bi1);
+
+ sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+ next0 = VNET_POLICER_NEXT_TRANSMIT;
+
+ sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_RX];
+ next1 = VNET_POLICER_NEXT_TRANSMIT;
+
+
+ if (which == VNET_POLICER_INDEX_BY_SW_IF_INDEX)
+ {
+ pi0 = pm->policer_index_by_sw_if_index[sw_if_index0];
+ pi1 = pm->policer_index_by_sw_if_index[sw_if_index1];
+ }
+
+ if (which == VNET_POLICER_INDEX_BY_OPAQUE)
+ {
+ pi0 = vnet_buffer (b0)->policer.index;
+ pi1 = vnet_buffer (b1)->policer.index;
+ }
+
+ if (which == VNET_POLICER_INDEX_BY_EITHER)
+ {
+ pi0 = vnet_buffer (b0)->policer.index;
+ pi0 = (pi0 != ~0) ? pi0 :
+ pm->policer_index_by_sw_if_index[sw_if_index0];
+ pi1 = vnet_buffer (b1)->policer.index;
+ pi1 = (pi1 != ~0) ? pi1 :
+ pm->policer_index_by_sw_if_index[sw_if_index1];
+ }
+
+ act0 = vnet_policer_police (vm, b0, pi0, time_in_policer_periods,
+ POLICE_CONFORM /* no chaining */ );
+
+ act1 = vnet_policer_police (vm, b1, pi1, time_in_policer_periods,
+ POLICE_CONFORM /* no chaining */ );
+
+ if (PREDICT_FALSE (act0 == SSE2_QOS_ACTION_DROP)) /* drop action */
+ {
+ next0 = VNET_POLICER_NEXT_DROP;
+ b0->error = node->errors[VNET_POLICER_ERROR_DROP];
+ }
+ else /* transmit or mark-and-transmit action */
+ {
+ transmitted++;
+ }
+
+ if (PREDICT_FALSE (act1 == SSE2_QOS_ACTION_DROP)) /* drop action */
+ {
+ next1 = VNET_POLICER_NEXT_DROP;
+ b1->error = node->errors[VNET_POLICER_ERROR_DROP];
+ }
+ else /* transmit or mark-and-transmit action */
+ {
+ transmitted++;
+ }
+
+
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)))
+ {
+ if (b0->flags & VLIB_BUFFER_IS_TRACED)
+ {
+ vnet_policer_trace_t *t =
+ vlib_add_trace (vm, node, b0, sizeof (*t));
+ t->sw_if_index = sw_if_index0;
+ t->next_index = next0;
+ }
+ if (b1->flags & VLIB_BUFFER_IS_TRACED)
+ {
+ vnet_policer_trace_t *t =
+ vlib_add_trace (vm, node, b1, sizeof (*t));
+ t->sw_if_index = sw_if_index1;
+ t->next_index = next1;
+ }
+ }
+
+ /* verify speculative enqueues, maybe switch current next frame */
+ vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, bi1, next0, next1);
+ }
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 bi0;
+ vlib_buffer_t *b0;
+ u32 next0;
+ u32 sw_if_index0;
+ u32 pi0 = 0;
+ u8 act0;
+
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+
+ sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+ next0 = VNET_POLICER_NEXT_TRANSMIT;
+
+ if (which == VNET_POLICER_INDEX_BY_SW_IF_INDEX)
+ pi0 = pm->policer_index_by_sw_if_index[sw_if_index0];
+
+ if (which == VNET_POLICER_INDEX_BY_OPAQUE)
+ pi0 = vnet_buffer (b0)->policer.index;
+
+ if (which == VNET_POLICER_INDEX_BY_EITHER)
+ {
+ pi0 = vnet_buffer (b0)->policer.index;
+ pi0 = (pi0 != ~0) ? pi0 :
+ pm->policer_index_by_sw_if_index[sw_if_index0];
+ }
+
+ act0 = vnet_policer_police (vm, b0, pi0, time_in_policer_periods,
+ POLICE_CONFORM /* no chaining */ );
+
+ if (PREDICT_FALSE (act0 == SSE2_QOS_ACTION_DROP)) /* drop action */
+ {
+ next0 = VNET_POLICER_NEXT_DROP;
+ b0->error = node->errors[VNET_POLICER_ERROR_DROP];
+ }
+ else /* transmit or mark-and-transmit action */
+ {
+ transmitted++;
+ }
+
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)
+ && (b0->flags & VLIB_BUFFER_IS_TRACED)))
+ {
+ vnet_policer_trace_t *t =
+ vlib_add_trace (vm, node, b0, sizeof (*t));
+ t->sw_if_index = sw_if_index0;
+ t->next_index = next0;
+ t->policer_index = pi0;
+ }
+
+ /* verify speculative enqueue, maybe switch current next frame */
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, next0);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ vlib_node_increment_counter (vm, node->node_index,
+ VNET_POLICER_ERROR_TRANSMIT, transmitted);
+ return frame->n_vectors;
+}
+
+uword
+vnet_policer_by_sw_if_index (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ return vnet_policer_inline (vm, node, frame,
+ VNET_POLICER_INDEX_BY_SW_IF_INDEX);
+}
+
+uword
+vnet_policer_by_opaque (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ return vnet_policer_inline (vm, node, frame, VNET_POLICER_INDEX_BY_OPAQUE);
+}
+
+uword
+vnet_policer_by_either (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ return vnet_policer_inline (vm, node, frame, VNET_POLICER_INDEX_BY_EITHER);
+}
+
+void
+vnet_policer_node_funcs_reference (void)
+{
+}
+
+
+#define TEST_CODE 1
+
+#ifdef TEST_CODE
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (policer_by_sw_if_index_node, static) = {
+ .function = vnet_policer_by_sw_if_index,
+ .name = "policer-by-sw-if-index",
+ .vector_size = sizeof (u32),
+ .format_trace = format_policer_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_errors = ARRAY_LEN(vnet_policer_error_strings),
+ .error_strings = vnet_policer_error_strings,
+
+ .n_next_nodes = VNET_POLICER_N_NEXT,
+
+ /* edit / add dispositions here */
+ .next_nodes = {
+ [VNET_POLICER_NEXT_TRANSMIT] = "ethernet-input",
+ [VNET_POLICER_NEXT_DROP] = "error-drop",
+ },
+};
+
+VLIB_NODE_FUNCTION_MULTIARCH (policer_by_sw_if_index_node,
+ vnet_policer_by_sw_if_index);
+/* *INDENT-ON* */
+
+
+int
+test_policer_add_del (u32 rx_sw_if_index, u8 * config_name, int is_add)
+{
+ vnet_policer_main_t *pm = &vnet_policer_main;
+ policer_read_response_type_st *template;
+ policer_read_response_type_st *policer;
+ vnet_hw_interface_t *rxhi;
+ uword *p;
+
+ rxhi = vnet_get_sup_hw_interface (pm->vnet_main, rx_sw_if_index);
+
+ /* Make sure caller didn't pass a vlan subif, etc. */
+ if (rxhi->sw_if_index != rx_sw_if_index)
+ return VNET_API_ERROR_INVALID_SW_IF_INDEX;
+
+ if (is_add)
+ {
+
+ p = hash_get_mem (pm->policer_config_by_name, config_name);
+
+ if (p == 0)
+ return -2;
+
+ template = pool_elt_at_index (pm->policer_templates, p[0]);
+
+ vnet_hw_interface_rx_redirect_to_node
+ (pm->vnet_main, rxhi->hw_if_index, policer_by_sw_if_index_node.index);
+
+ pool_get_aligned (pm->policers, policer, CLIB_CACHE_LINE_BYTES);
+
+ policer[0] = template[0];
+
+ vec_validate (pm->policer_index_by_sw_if_index, rx_sw_if_index);
+ pm->policer_index_by_sw_if_index[rx_sw_if_index]
+ = policer - pm->policers;
+ }
+ else
+ {
+ u32 pi;
+ vnet_hw_interface_rx_redirect_to_node (pm->vnet_main,
+ rxhi->hw_if_index,
+ ~0 /* disable */ );
+
+ pi = pm->policer_index_by_sw_if_index[rx_sw_if_index];
+ pm->policer_index_by_sw_if_index[rx_sw_if_index] = ~0;
+ pool_put_index (pm->policers, pi);
+ }
+
+ return 0;
+}
+
+static clib_error_t *
+test_policer_command_fn (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ vnet_policer_main_t *pm = &vnet_policer_main;
+ unformat_input_t _line_input, *line_input = &_line_input;
+ u32 rx_sw_if_index;
+ int rv;
+ u8 *config_name = 0;
+ int rx_set = 0;
+ int is_add = 1;
+ int is_show = 0;
+
+ /* Get a line of input. */
+ if (!unformat_user (input, unformat_line_input, line_input))
+ return 0;
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "intfc %U", unformat_vnet_sw_interface,
+ pm->vnet_main, &rx_sw_if_index))
+ rx_set = 1;
+ else if (unformat (line_input, "show"))
+ is_show = 1;
+ else if (unformat (line_input, "policer %s", &config_name))
+ ;
+ else if (unformat (line_input, "del"))
+ is_add = 0;
+ else
+ break;
+ }
+
+ if (rx_set == 0)
+ return clib_error_return (0, "interface not set");
+
+ if (is_show)
+ {
+ u32 pi = pm->policer_index_by_sw_if_index[rx_sw_if_index];
+ policer_read_response_type_st *policer;
+ policer = pool_elt_at_index (pm->policers, pi);
+
+ vlib_cli_output (vm, "%U", format_policer_instance, policer);
+ return 0;
+ }
+
+ if (is_add && config_name == 0)
+ {
+ return clib_error_return (0, "policer config name required");
+ }
+
+ rv = test_policer_add_del (rx_sw_if_index, config_name, is_add);
+
+ switch (rv)
+ {
+ case 0:
+ break;
+
+ default:
+ return clib_error_return
+ (0, "WARNING: vnet_vnet_policer_add_del returned %d", rv);
+ }
+
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (test_patch_command, static) = {
+ .path = "test policer",
+ .short_help =
+ "intfc <intfc> policer <policer-config-name> [del]",
+ .function = test_policer_command_fn,
+};
+/* *INDENT-ON* */
+
+#endif /* TEST_CODE */
+
+
+typedef struct
+{
+ u32 sw_if_index;
+ u32 next_index;
+ u32 table_index;
+ u32 offset;
+ u32 policer_index;
+} policer_classify_trace_t;
+
+static u8 *
+format_policer_classify_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ policer_classify_trace_t *t = va_arg (*args, policer_classify_trace_t *);
+
+ s = format (s, "POLICER_CLASSIFY: sw_if_index %d next %d table %d offset %d"
+ " policer_index %d",
+ t->sw_if_index, t->next_index, t->table_index, t->offset,
+ t->policer_index);
+ return s;
+}
+
+#define foreach_policer_classify_error \
+_(MISS, "Policer classify misses") \
+_(HIT, "Policer classify hits") \
+_(CHAIN_HIT, "Polcier classify hits after chain walk") \
+_(DROP, "Policer classify action drop")
+
+typedef enum
+{
+#define _(sym,str) POLICER_CLASSIFY_ERROR_##sym,
+ foreach_policer_classify_error
+#undef _
+ POLICER_CLASSIFY_N_ERROR,
+} policer_classify_error_t;
+
+static char *policer_classify_error_strings[] = {
+#define _(sym,string) string,
+ foreach_policer_classify_error
+#undef _
+};
+
+static inline uword
+policer_classify_inline (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame,
+ policer_classify_table_id_t tid)
+{
+ u32 n_left_from, *from, *to_next;
+ policer_classify_next_index_t next_index;
+ policer_classify_main_t *pcm = &policer_classify_main;
+ vnet_classify_main_t *vcm = pcm->vnet_classify_main;
+ f64 now = vlib_time_now (vm);
+ u32 hits = 0;
+ u32 misses = 0;
+ u32 chain_hits = 0;
+ u32 drop = 0;
+ u32 n_next_nodes;
+ u64 time_in_policer_periods;
+
+ time_in_policer_periods =
+ clib_cpu_time_now () >> POLICER_TICKS_PER_PERIOD_SHIFT;
+
+ n_next_nodes = node->n_next_nodes;
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+
+ /* First pass: compute hashes */
+ while (n_left_from > 2)
+ {
+ vlib_buffer_t *b0, *b1;
+ u32 bi0, bi1;
+ u8 *h0, *h1;
+ u32 sw_if_index0, sw_if_index1;
+ u32 table_index0, table_index1;
+ vnet_classify_table_t *t0, *t1;
+
+ /* Prefetch next iteration */
+ {
+ vlib_buffer_t *p1, *p2;
+
+ p1 = vlib_get_buffer (vm, from[1]);
+ p2 = vlib_get_buffer (vm, from[2]);
+
+ vlib_prefetch_buffer_header (p1, STORE);
+ CLIB_PREFETCH (p1->data, CLIB_CACHE_LINE_BYTES, STORE);
+ vlib_prefetch_buffer_header (p2, STORE);
+ CLIB_PREFETCH (p2->data, CLIB_CACHE_LINE_BYTES, STORE);
+ }
+
+ bi0 = from[0];
+ b0 = vlib_get_buffer (vm, bi0);
+ h0 = b0->data;
+
+ bi1 = from[1];
+ b1 = vlib_get_buffer (vm, bi1);
+ h1 = b1->data;
+
+ sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+ table_index0 =
+ pcm->classify_table_index_by_sw_if_index[tid][sw_if_index0];
+
+ sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_RX];
+ table_index1 =
+ pcm->classify_table_index_by_sw_if_index[tid][sw_if_index1];
+
+ t0 = pool_elt_at_index (vcm->tables, table_index0);
+
+ t1 = pool_elt_at_index (vcm->tables, table_index1);
+
+ vnet_buffer (b0)->l2_classify.hash =
+ vnet_classify_hash_packet (t0, (u8 *) h0);
+
+ vnet_classify_prefetch_bucket (t0, vnet_buffer (b0)->l2_classify.hash);
+
+ vnet_buffer (b1)->l2_classify.hash =
+ vnet_classify_hash_packet (t1, (u8 *) h1);
+
+ vnet_classify_prefetch_bucket (t1, vnet_buffer (b1)->l2_classify.hash);
+
+ vnet_buffer (b0)->l2_classify.table_index = table_index0;
+
+ vnet_buffer (b1)->l2_classify.table_index = table_index1;
+
+ from += 2;
+ n_left_from -= 2;
+ }
+
+ while (n_left_from > 0)
+ {
+ vlib_buffer_t *b0;
+ u32 bi0;
+ u8 *h0;
+ u32 sw_if_index0;
+ u32 table_index0;
+ vnet_classify_table_t *t0;
+
+ bi0 = from[0];
+ b0 = vlib_get_buffer (vm, bi0);
+ h0 = b0->data;
+
+ sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+ table_index0 =
+ pcm->classify_table_index_by_sw_if_index[tid][sw_if_index0];
+
+ t0 = pool_elt_at_index (vcm->tables, table_index0);
+ vnet_buffer (b0)->l2_classify.hash =
+ vnet_classify_hash_packet (t0, (u8 *) h0);
+
+ vnet_buffer (b0)->l2_classify.table_index = table_index0;
+ vnet_classify_prefetch_bucket (t0, vnet_buffer (b0)->l2_classify.hash);
+
+ from++;
+ n_left_from--;
+ }
+
+ next_index = node->cached_next_index;
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ /* Not enough load/store slots to dual loop... */
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 bi0;
+ vlib_buffer_t *b0;
+ u32 next0 = POLICER_CLASSIFY_NEXT_INDEX_DROP;
+ u32 table_index0;
+ vnet_classify_table_t *t0;
+ vnet_classify_entry_t *e0;
+ u64 hash0;
+ u8 *h0;
+ u8 act0;
+
+ /* Stride 3 seems to work best */
+ if (PREDICT_TRUE (n_left_from > 3))
+ {
+ vlib_buffer_t *p1 = vlib_get_buffer (vm, from[3]);
+ vnet_classify_table_t *tp1;
+ u32 table_index1;
+ u64 phash1;
+
+ table_index1 = vnet_buffer (p1)->l2_classify.table_index;
+
+ if (PREDICT_TRUE (table_index1 != ~0))
+ {
+ tp1 = pool_elt_at_index (vcm->tables, table_index1);
+ phash1 = vnet_buffer (p1)->l2_classify.hash;
+ vnet_classify_prefetch_entry (tp1, phash1);
+ }
+ }
+
+ /* Speculatively enqueue b0 to the current next frame */
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ h0 = b0->data;
+ table_index0 = vnet_buffer (b0)->l2_classify.table_index;
+ e0 = 0;
+ t0 = 0;
+
+ if (tid == POLICER_CLASSIFY_TABLE_L2)
+ {
+ /* Feature bitmap update */
+ vnet_buffer (b0)->l2.feature_bitmap &=
+ ~L2INPUT_FEAT_POLICER_CLAS;
+ /* Determine the next node */
+ next0 =
+ feat_bitmap_get_next_node_index (pcm->feat_next_node_index,
+ vnet_buffer (b0)->
+ l2.feature_bitmap);
+ }
+ else
+ vnet_get_config_data (pcm->vnet_config_main[tid],
+ &b0->current_config_index, &next0,
+ /* # bytes of config data */ 0);
+
+ vnet_buffer (b0)->l2_classify.opaque_index = ~0;
+
+ if (PREDICT_TRUE (table_index0 != ~0))
+ {
+ hash0 = vnet_buffer (b0)->l2_classify.hash;
+ t0 = pool_elt_at_index (vcm->tables, table_index0);
+ e0 = vnet_classify_find_entry (t0, (u8 *) h0, hash0, now);
+
+ if (e0)
+ {
+ act0 = vnet_policer_police (vm,
+ b0,
+ e0->next_index,
+ time_in_policer_periods,
+ e0->opaque_index);
+ if (PREDICT_FALSE (act0 == SSE2_QOS_ACTION_DROP))
+ {
+ next0 = POLICER_CLASSIFY_NEXT_INDEX_DROP;
+ b0->error = node->errors[POLICER_CLASSIFY_ERROR_DROP];
+ drop++;
+ }
+ hits++;
+ }
+ else
+ {
+ while (1)
+ {
+ if (PREDICT_TRUE (t0->next_table_index != ~0))
+ {
+ t0 = pool_elt_at_index (vcm->tables,
+ t0->next_table_index);
+ }
+ else
+ {
+ next0 = (t0->miss_next_index < n_next_nodes) ?
+ t0->miss_next_index : next0;
+ misses++;
+ break;
+ }
+
+ hash0 = vnet_classify_hash_packet (t0, (u8 *) h0);
+ e0 =
+ vnet_classify_find_entry (t0, (u8 *) h0, hash0, now);
+ if (e0)
+ {
+ act0 = vnet_policer_police (vm,
+ b0,
+ e0->next_index,
+ time_in_policer_periods,
+ e0->opaque_index);
+ if (PREDICT_FALSE (act0 == SSE2_QOS_ACTION_DROP))
+ {
+ next0 = POLICER_CLASSIFY_NEXT_INDEX_DROP;
+ b0->error =
+ node->errors[POLICER_CLASSIFY_ERROR_DROP];
+ drop++;
+ }
+ hits++;
+ chain_hits++;
+ break;
+ }
+ }
+ }
+ }
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)
+ && (b0->flags & VLIB_BUFFER_IS_TRACED)))
+ {
+ policer_classify_trace_t *t =
+ vlib_add_trace (vm, node, b0, sizeof (*t));
+ t->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+ t->next_index = next0;
+ t->table_index = t0 ? t0 - vcm->tables : ~0;
+ t->offset = (e0 && t0) ? vnet_classify_get_offset (t0, e0) : ~0;
+ t->policer_index = e0 ? e0->next_index : ~0;
+ }
+
+ /* Verify speculative enqueue, maybe switch current next frame */
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
+ n_left_to_next, bi0, next0);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ vlib_node_increment_counter (vm, node->node_index,
+ POLICER_CLASSIFY_ERROR_MISS, misses);
+ vlib_node_increment_counter (vm, node->node_index,
+ POLICER_CLASSIFY_ERROR_HIT, hits);
+ vlib_node_increment_counter (vm, node->node_index,
+ POLICER_CLASSIFY_ERROR_CHAIN_HIT, chain_hits);
+ vlib_node_increment_counter (vm, node->node_index,
+ POLICER_CLASSIFY_ERROR_DROP, drop);
+
+ return frame->n_vectors;
+}
+
+static uword
+ip4_policer_classify (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ return policer_classify_inline (vm, node, frame,
+ POLICER_CLASSIFY_TABLE_IP4);
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (ip4_policer_classify_node) = {
+ .function = ip4_policer_classify,
+ .name = "ip4-policer-classify",
+ .vector_size = sizeof (u32),
+ .format_trace = format_policer_classify_trace,
+ .n_errors = ARRAY_LEN(policer_classify_error_strings),
+ .error_strings = policer_classify_error_strings,
+ .n_next_nodes = POLICER_CLASSIFY_NEXT_INDEX_N_NEXT,
+ .next_nodes = {
+ [POLICER_CLASSIFY_NEXT_INDEX_DROP] = "error-drop",
+ },
+};
+
+VLIB_NODE_FUNCTION_MULTIARCH (ip4_policer_classify_node, ip4_policer_classify);
+/* *INDENT-ON* */
+
+static uword
+ip6_policer_classify (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ return policer_classify_inline (vm, node, frame,
+ POLICER_CLASSIFY_TABLE_IP6);
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (ip6_policer_classify_node) = {
+ .function = ip6_policer_classify,
+ .name = "ip6-policer-classify",
+ .vector_size = sizeof (u32),
+ .format_trace = format_policer_classify_trace,
+ .n_errors = ARRAY_LEN(policer_classify_error_strings),
+ .error_strings = policer_classify_error_strings,
+ .n_next_nodes = POLICER_CLASSIFY_NEXT_INDEX_N_NEXT,
+ .next_nodes = {
+ [POLICER_CLASSIFY_NEXT_INDEX_DROP] = "error-drop",
+ },
+};
+
+VLIB_NODE_FUNCTION_MULTIARCH (ip6_policer_classify_node, ip6_policer_classify);
+/* *INDENT-ON* */
+
+static uword
+l2_policer_classify (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ return policer_classify_inline (vm, node, frame, POLICER_CLASSIFY_TABLE_L2);
+}
+
+VLIB_REGISTER_NODE (l2_policer_classify_node) =
+{
+ .function = l2_policer_classify,.name = "l2-policer-classify",.vector_size =
+ sizeof (u32),.format_trace = format_policer_classify_trace,.n_errors =
+ ARRAY_LEN (policer_classify_error_strings),.error_strings =
+ policer_classify_error_strings,.n_next_nodes =
+ POLICER_CLASSIFY_NEXT_INDEX_N_NEXT,.next_nodes =
+ {
+ [POLICER_CLASSIFY_NEXT_INDEX_DROP] = "error-drop",}
+,};
+
+VLIB_NODE_FUNCTION_MULTIARCH (l2_policer_classify_node, l2_policer_classify);
+
+
+static clib_error_t *
+policer_classify_init (vlib_main_t * vm)
+{
+ policer_classify_main_t *pcm = &policer_classify_main;
+
+ pcm->vlib_main = vm;
+ pcm->vnet_main = vnet_get_main ();
+ pcm->vnet_classify_main = &vnet_classify_main;
+
+ /* Initialize L2 feature next-node indexes */
+ feat_bitmap_init_next_nodes (vm,
+ l2_policer_classify_node.index,
+ L2INPUT_N_FEAT,
+ l2input_get_feat_names (),
+ pcm->feat_next_node_index);
+
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (policer_classify_init);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/policer/police.h b/src/vnet/policer/police.h
new file mode 100644
index 00000000000..34bcf9ca5a8
--- /dev/null
+++ b/src/vnet/policer/police.h
@@ -0,0 +1,214 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef __POLICE_H__
+#define __POLICE_H__
+
+typedef enum
+{
+ POLICE_CONFORM = 0,
+ POLICE_EXCEED = 1,
+ POLICE_VIOLATE = 2,
+} policer_result_e;
+
+// This is the hardware representation of the policer.
+// To be multithread-safe, the policer is accessed through a spin-lock
+// on the lock field. (For a policer update operation, 24B needs to be
+// modified and this would be a challenge to do with atomic instructions.)
+// The structure is padded so that no other data is put into the same
+// 64B cache-line. This reduces cache-thrashing between threads.
+//
+// A note on scale:
+// The HW TSC tick is roughly one CPU clock cycle.
+// This is shifted to create a larger period, with a goal to be around 50usec.
+// The period time will vary based on CPU clock speed.
+// CPU speeds of 1Ghz to 8Ghz are targetted.
+// The shift amount is a constant 17 bits, resulting in a period between
+// 16usec (8Ghz CPU) and 131usec (1Ghz CPU).
+// The token_per_period computation takes into account the clock speed.
+//
+// The 32-bit bucket/limit supports about 850ms of burst on a 40GE port,
+// or 340ms on a 100GE port. If a larger burst is configued, then the
+// programmed value is simply capped at 2^32-1. If we needed to support
+// more than that, the bucket and limit fields could be expanded.
+//
+// tokens_per_period should be > 1000 to support 0.1% granularity.
+// To support lower rates (which would not meet this requirement), the packet
+// length, bucket, and limit values can be scaled. The scale is a power of 2
+// so the multiplication can be implemented as a shift. The control plane
+// computes the shift amount be the largest possible that still supports the
+// burst size. This makes the rate accuracy as high as possible.
+//
+// The 64-bit last_update_time supports a 4Ghz CPU without rollover for 100 years
+//
+// The lock field should be used for a spin-lock on the struct.
+
+#define POLICER_TICKS_PER_PERIOD_SHIFT 17
+#define POLICER_TICKS_PER_PERIOD (1 << POLICER_TICKS_PER_PERIOD_SHIFT)
+
+typedef struct
+{
+
+ u32 lock; // for exclusive access to the struct
+
+ u32 single_rate; // 1 = single rate policer, 0 = two rate policer
+ u32 color_aware; // for hierarchical policing
+ u32 scale; // power-of-2 shift amount for lower rates
+ u8 action[3];
+ u8 mark_dscp[3];
+ u8 pad[2];
+
+ // Fields are marked as 2R if they are only used for a 2-rate policer,
+ // and MOD if they are modified as part of the update operation.
+ // 1 token = 1 byte.
+
+ u32 cir_tokens_per_period; // # of tokens for each period
+ u32 pir_tokens_per_period; // 2R
+
+ u32 current_limit;
+ u32 current_bucket; // MOD
+ u32 extended_limit;
+ u32 extended_bucket; // MOD
+
+ u64 last_update_time; // MOD
+ u64 pad64;
+
+} policer_read_response_type_st;
+
+static inline policer_result_e
+vnet_police_packet (policer_read_response_type_st * policer,
+ u32 packet_length,
+ policer_result_e packet_color, u64 time)
+{
+ u64 n_periods;
+ u64 current_tokens, extended_tokens;
+ policer_result_e result;
+
+ // Scale packet length to support a wide range of speeds
+ packet_length = packet_length << policer->scale;
+
+ // Compute the number of policer periods that have passed since the last
+ // operation.
+ n_periods = time - policer->last_update_time;
+ policer->last_update_time = time;
+
+ // Since there is no background last-update-time adjustment, n_periods
+ // could grow large if the policer is idle for a long time. This could
+ // cause a 64-bit overflow when computing tokens_per_period * num_periods.
+ // It will overflow if log2(n_periods) + log2(tokens_per_period) > 64.
+ //
+ // To mitigate this, the policer configuration algorithm insures that
+ // tokens_per_period is less than 2^22, i.e. this is a 22 bit value not
+ // a 32-bit value. Thus overflow will only occur if n_periods > 64-22 or
+ // 42. 2^42 min-sized periods is 16us * 2^42, or 2 years. So this can
+ // rarely occur. If overflow does happen, the only effect will be that
+ // fewer tokens than the max burst will be added to the bucket for this
+ // packet. This constraint on tokens_per_period lets the ucode omit
+ // code to dynamically check for or prevent the overflow.
+
+ if (policer->single_rate)
+ {
+
+ // Compute number of tokens for this time period
+ current_tokens =
+ policer->current_bucket + n_periods * policer->cir_tokens_per_period;
+ if (current_tokens > policer->current_limit)
+ {
+ current_tokens = policer->current_limit;
+ }
+
+ extended_tokens =
+ policer->extended_bucket + n_periods * policer->cir_tokens_per_period;
+ if (extended_tokens > policer->extended_limit)
+ {
+ extended_tokens = policer->extended_limit;
+ }
+
+ // Determine color
+
+ if ((!policer->color_aware || (packet_color == POLICE_CONFORM))
+ && (current_tokens >= packet_length))
+ {
+ policer->current_bucket = current_tokens - packet_length;
+ policer->extended_bucket = extended_tokens - packet_length;
+ result = POLICE_CONFORM;
+ }
+ else if ((!policer->color_aware || (packet_color != POLICE_VIOLATE))
+ && (extended_tokens >= packet_length))
+ {
+ policer->current_bucket = current_tokens;
+ policer->extended_bucket = extended_tokens - packet_length;
+ result = POLICE_EXCEED;
+ }
+ else
+ {
+ policer->current_bucket = current_tokens;
+ policer->extended_bucket = extended_tokens;
+ result = POLICE_VIOLATE;
+ }
+
+ }
+ else
+ {
+ // Two-rate policer
+
+ // Compute number of tokens for this time period
+ current_tokens =
+ policer->current_bucket + n_periods * policer->cir_tokens_per_period;
+ extended_tokens =
+ policer->extended_bucket + n_periods * policer->pir_tokens_per_period;
+ if (current_tokens > policer->current_limit)
+ {
+ current_tokens = policer->current_limit;
+ }
+ if (extended_tokens > policer->extended_limit)
+ {
+ extended_tokens = policer->extended_limit;
+ }
+
+ // Determine color
+
+ if ((policer->color_aware && (packet_color == POLICE_VIOLATE))
+ || (extended_tokens < packet_length))
+ {
+ policer->current_bucket = current_tokens;
+ policer->extended_bucket = extended_tokens;
+ result = POLICE_VIOLATE;
+ }
+ else if ((policer->color_aware && (packet_color == POLICE_EXCEED))
+ || (current_tokens < packet_length))
+ {
+ policer->current_bucket = current_tokens;
+ policer->extended_bucket = extended_tokens - packet_length;
+ result = POLICE_EXCEED;
+ }
+ else
+ {
+ policer->current_bucket = current_tokens - packet_length;
+ policer->extended_bucket = extended_tokens - packet_length;
+ result = POLICE_CONFORM;
+ }
+ }
+ return result;
+}
+
+#endif // __POLICE_H__
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/policer/policer.c b/src/vnet/policer/policer.c
new file mode 100644
index 00000000000..290a6af57e2
--- /dev/null
+++ b/src/vnet/policer/policer.c
@@ -0,0 +1,528 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <stdint.h>
+#include <vnet/policer/policer.h>
+#include <vnet/classify/vnet_classify.h>
+
+clib_error_t *
+policer_add_del (vlib_main_t * vm,
+ u8 * name,
+ sse2_qos_pol_cfg_params_st * cfg,
+ u32 * policer_index, u8 is_add)
+{
+ vnet_policer_main_t *pm = &vnet_policer_main;
+ policer_read_response_type_st test_policer;
+ policer_read_response_type_st *policer;
+ uword *p;
+ u32 pi;
+ int rv;
+
+ p = hash_get_mem (pm->policer_config_by_name, name);
+
+ if (is_add == 0)
+ {
+ if (p == 0)
+ {
+ vec_free (name);
+ return clib_error_return (0, "No such policer configuration");
+ }
+ hash_unset_mem (pm->policer_config_by_name, name);
+ hash_unset_mem (pm->policer_index_by_name, name);
+ vec_free (name);
+ return 0;
+ }
+
+ if (p != 0)
+ {
+ vec_free (name);
+ return clib_error_return (0, "Policer already exists");
+ }
+
+ /* Vet the configuration before adding it to the table */
+ rv = sse2_pol_logical_2_physical (cfg, &test_policer);
+
+ if (rv == 0)
+ {
+ policer_read_response_type_st *pp;
+ sse2_qos_pol_cfg_params_st *cp;
+
+ pool_get (pm->configs, cp);
+ pool_get (pm->policer_templates, pp);
+
+ ASSERT (cp - pm->configs == pp - pm->policer_templates);
+
+ clib_memcpy (cp, cfg, sizeof (*cp));
+ clib_memcpy (pp, &test_policer, sizeof (*pp));
+
+ hash_set_mem (pm->policer_config_by_name, name, cp - pm->configs);
+ pool_get_aligned (pm->policers, policer, CLIB_CACHE_LINE_BYTES);
+ policer[0] = pp[0];
+ pi = policer - pm->policers;
+ hash_set_mem (pm->policer_index_by_name, name, pi);
+ *policer_index = pi;
+ }
+ else
+ {
+ vec_free (name);
+ return clib_error_return (0, "Config failed sanity check");
+ }
+
+ return 0;
+}
+
+u8 *
+format_policer_instance (u8 * s, va_list * va)
+{
+ policer_read_response_type_st *i
+ = va_arg (*va, policer_read_response_type_st *);
+
+ s = format (s, "policer at %llx: %s rate, %s color-aware\n",
+ i, i->single_rate ? "single" : "dual",
+ i->color_aware ? "is" : "not");
+ s = format (s, "cir %u tok/period, pir %u tok/period, scale %u\n",
+ i->cir_tokens_per_period, i->pir_tokens_per_period, i->scale);
+ s = format (s, "cur lim %u, cur bkt %u, ext lim %u, ext bkt %u\n",
+ i->current_limit,
+ i->current_bucket, i->extended_limit, i->extended_bucket);
+ s = format (s, "last update %llu\n", i->last_update_time);
+ return s;
+}
+
+static u8 *
+format_policer_round_type (u8 * s, va_list * va)
+{
+ sse2_qos_pol_cfg_params_st *c = va_arg (*va, sse2_qos_pol_cfg_params_st *);
+
+ if (c->rnd_type == SSE2_QOS_ROUND_TO_CLOSEST)
+ s = format (s, "closest");
+ else if (c->rnd_type == SSE2_QOS_ROUND_TO_UP)
+ s = format (s, "up");
+ else if (c->rnd_type == SSE2_QOS_ROUND_TO_DOWN)
+ s = format (s, "down");
+ else
+ s = format (s, "ILLEGAL");
+ return s;
+}
+
+
+static u8 *
+format_policer_rate_type (u8 * s, va_list * va)
+{
+ sse2_qos_pol_cfg_params_st *c = va_arg (*va, sse2_qos_pol_cfg_params_st *);
+
+ if (c->rate_type == SSE2_QOS_RATE_KBPS)
+ s = format (s, "kbps");
+ else if (c->rate_type == SSE2_QOS_RATE_PPS)
+ s = format (s, "pps");
+ else
+ s = format (s, "ILLEGAL");
+ return s;
+}
+
+static u8 *
+format_policer_type (u8 * s, va_list * va)
+{
+ sse2_qos_pol_cfg_params_st *c = va_arg (*va, sse2_qos_pol_cfg_params_st *);
+
+ if (c->rfc == SSE2_QOS_POLICER_TYPE_1R2C)
+ s = format (s, "1r2c");
+
+ else if (c->rfc == SSE2_QOS_POLICER_TYPE_1R3C_RFC_2697)
+ s = format (s, "1r3c");
+
+ else if (c->rfc == SSE2_QOS_POLICER_TYPE_2R3C_RFC_2698)
+ s = format (s, "2r3c-2698");
+
+ else if (c->rfc == SSE2_QOS_POLICER_TYPE_2R3C_RFC_4115)
+ s = format (s, "2r3c-4115");
+
+ else if (c->rfc == SSE2_QOS_POLICER_TYPE_2R3C_RFC_MEF5CF1)
+ s = format (s, "2r3c-mef5cf1");
+ else
+ s = format (s, "ILLEGAL");
+ return s;
+}
+
+static u8 *
+format_dscp (u8 * s, va_list * va)
+{
+ u32 i = va_arg (*va, u32);
+ char *t = 0;
+
+ switch (i)
+ {
+#define _(v,f,str) case VNET_DSCP_##f: t = str; break;
+ foreach_vnet_dscp
+#undef _
+ default:
+ return format (s, "ILLEGAL");
+ }
+ s = format (s, "%s", t);
+ return s;
+}
+
+static u8 *
+format_policer_action_type (u8 * s, va_list * va)
+{
+ sse2_qos_pol_action_params_st *a
+ = va_arg (*va, sse2_qos_pol_action_params_st *);
+
+ if (a->action_type == SSE2_QOS_ACTION_DROP)
+ s = format (s, "drop");
+ else if (a->action_type == SSE2_QOS_ACTION_TRANSMIT)
+ s = format (s, "transmit");
+ else if (a->action_type == SSE2_QOS_ACTION_MARK_AND_TRANSMIT)
+ s = format (s, "mark-and-transmit %U", format_dscp, a->dscp);
+ else
+ s = format (s, "ILLEGAL");
+ return s;
+}
+
+u8 *
+format_policer_config (u8 * s, va_list * va)
+{
+ sse2_qos_pol_cfg_params_st *c = va_arg (*va, sse2_qos_pol_cfg_params_st *);
+
+ s = format (s, "type %U cir %u eir %u cb %u eb %u\n",
+ format_policer_type, c,
+ c->rb.kbps.cir_kbps,
+ c->rb.kbps.eir_kbps, c->rb.kbps.cb_bytes, c->rb.kbps.eb_bytes);
+ s = format (s, "rate type %U, round type %U\n",
+ format_policer_rate_type, c, format_policer_round_type, c);
+ s = format (s, "conform action %U, exceed action %U, violate action %U\n",
+ format_policer_action_type, &c->conform_action,
+ format_policer_action_type, &c->exceed_action,
+ format_policer_action_type, &c->violate_action);
+ return s;
+}
+
+static uword
+unformat_policer_type (unformat_input_t * input, va_list * va)
+{
+ sse2_qos_pol_cfg_params_st *c = va_arg (*va, sse2_qos_pol_cfg_params_st *);
+
+ if (!unformat (input, "type"))
+ return 0;
+
+ if (unformat (input, "1r2c"))
+ c->rfc = SSE2_QOS_POLICER_TYPE_1R2C;
+ else if (unformat (input, "1r3c"))
+ c->rfc = SSE2_QOS_POLICER_TYPE_1R3C_RFC_2697;
+ else if (unformat (input, "2r3c-2698"))
+ c->rfc = SSE2_QOS_POLICER_TYPE_2R3C_RFC_2698;
+ else if (unformat (input, "2r3c-4115"))
+ c->rfc = SSE2_QOS_POLICER_TYPE_2R3C_RFC_4115;
+ else if (unformat (input, "2r3c-mef5cf1"))
+ c->rfc = SSE2_QOS_POLICER_TYPE_2R3C_RFC_MEF5CF1;
+ else
+ return 0;
+ return 1;
+}
+
+static uword
+unformat_policer_round_type (unformat_input_t * input, va_list * va)
+{
+ sse2_qos_pol_cfg_params_st *c = va_arg (*va, sse2_qos_pol_cfg_params_st *);
+
+ if (!unformat (input, "round"))
+ return 0;
+
+ if (unformat (input, "closest"))
+ c->rnd_type = SSE2_QOS_ROUND_TO_CLOSEST;
+ else if (unformat (input, "up"))
+ c->rnd_type = SSE2_QOS_ROUND_TO_UP;
+ else if (unformat (input, "down"))
+ c->rnd_type = SSE2_QOS_ROUND_TO_DOWN;
+ else
+ return 0;
+ return 1;
+}
+
+static uword
+unformat_policer_rate_type (unformat_input_t * input, va_list * va)
+{
+ sse2_qos_pol_cfg_params_st *c = va_arg (*va, sse2_qos_pol_cfg_params_st *);
+
+ if (!unformat (input, "rate"))
+ return 0;
+
+ if (unformat (input, "kbps"))
+ c->rate_type = SSE2_QOS_RATE_KBPS;
+ else if (unformat (input, "pps"))
+ c->rate_type = SSE2_QOS_RATE_PPS;
+ else
+ return 0;
+ return 1;
+}
+
+static uword
+unformat_policer_cir (unformat_input_t * input, va_list * va)
+{
+ sse2_qos_pol_cfg_params_st *c = va_arg (*va, sse2_qos_pol_cfg_params_st *);
+
+ if (unformat (input, "cir %u", &c->rb.kbps.cir_kbps))
+ return 1;
+ return 0;
+}
+
+static uword
+unformat_policer_eir (unformat_input_t * input, va_list * va)
+{
+ sse2_qos_pol_cfg_params_st *c = va_arg (*va, sse2_qos_pol_cfg_params_st *);
+
+ if (unformat (input, "eir %u", &c->rb.kbps.eir_kbps))
+ return 1;
+ return 0;
+}
+
+static uword
+unformat_policer_cb (unformat_input_t * input, va_list * va)
+{
+ sse2_qos_pol_cfg_params_st *c = va_arg (*va, sse2_qos_pol_cfg_params_st *);
+
+ if (unformat (input, "cb %u", &c->rb.kbps.cb_bytes))
+ return 1;
+ return 0;
+}
+
+static uword
+unformat_policer_eb (unformat_input_t * input, va_list * va)
+{
+ sse2_qos_pol_cfg_params_st *c = va_arg (*va, sse2_qos_pol_cfg_params_st *);
+
+ if (unformat (input, "eb %u", &c->rb.kbps.eb_bytes))
+ return 1;
+ return 0;
+}
+
+static uword
+unformat_dscp (unformat_input_t * input, va_list * va)
+{
+ u8 *r = va_arg (*va, u8 *);
+
+ if (0);
+#define _(v,f,str) else if (unformat (input, str)) *r = VNET_DSCP_##f;
+ foreach_vnet_dscp
+#undef _
+ else
+ return 0;
+ return 1;
+}
+
+static uword
+unformat_policer_action_type (unformat_input_t * input, va_list * va)
+{
+ sse2_qos_pol_action_params_st *a
+ = va_arg (*va, sse2_qos_pol_action_params_st *);
+
+ if (unformat (input, "drop"))
+ a->action_type = SSE2_QOS_ACTION_DROP;
+ else if (unformat (input, "transmit"))
+ a->action_type = SSE2_QOS_ACTION_TRANSMIT;
+ else if (unformat (input, "mark-and-transmit %U", unformat_dscp, &a->dscp))
+ a->action_type = SSE2_QOS_ACTION_MARK_AND_TRANSMIT;
+ else
+ return 0;
+ return 1;
+}
+
+static uword
+unformat_policer_action (unformat_input_t * input, va_list * va)
+{
+ sse2_qos_pol_cfg_params_st *c = va_arg (*va, sse2_qos_pol_cfg_params_st *);
+
+ if (unformat (input, "conform-action %U", unformat_policer_action_type,
+ &c->conform_action))
+ return 1;
+ else if (unformat (input, "exceed-action %U", unformat_policer_action_type,
+ &c->exceed_action))
+ return 1;
+ else if (unformat (input, "violate-action %U", unformat_policer_action_type,
+ &c->violate_action))
+ return 1;
+ return 0;
+}
+
+static uword
+unformat_policer_classify_next_index (unformat_input_t * input, va_list * va)
+{
+ u32 *r = va_arg (*va, u32 *);
+ vnet_policer_main_t *pm = &vnet_policer_main;
+ uword *p;
+ u8 *match_name = 0;
+
+ if (unformat (input, "%s", &match_name))
+ ;
+ else
+ return 0;
+
+ p = hash_get_mem (pm->policer_index_by_name, match_name);
+
+ if (p == 0)
+ return 0;
+
+ *r = p[0];
+
+ return 1;
+}
+
+static uword
+unformat_policer_classify_precolor (unformat_input_t * input, va_list * va)
+{
+ u32 *r = va_arg (*va, u32 *);
+
+ if (unformat (input, "conform-color"))
+ *r = POLICE_CONFORM;
+ else if (unformat (input, "exceed-color"))
+ *r = POLICE_EXCEED;
+ else
+ return 0;
+
+ return 1;
+}
+
+#define foreach_config_param \
+_(eb) \
+_(cb) \
+_(eir) \
+_(cir) \
+_(rate_type) \
+_(round_type) \
+_(type) \
+_(action)
+
+static clib_error_t *
+configure_policer_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ sse2_qos_pol_cfg_params_st c;
+ unformat_input_t _line_input, *line_input = &_line_input;
+ u8 is_add = 1;
+ u8 *name = 0;
+ u32 pi;
+
+ /* Get a line of input. */
+ if (!unformat_user (input, unformat_line_input, line_input))
+ return 0;
+
+ memset (&c, 0, sizeof (c));
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "del"))
+ is_add = 0;
+ else if (unformat (line_input, "name %s", &name))
+ ;
+ else if (unformat (line_input, "color-aware"))
+ c.color_aware = 1;
+
+#define _(a) else if (unformat (line_input, "%U", unformat_policer_##a, &c)) ;
+ foreach_config_param
+#undef _
+ else
+ return clib_error_return (0, "unknown input `%U'",
+ format_unformat_error, line_input);
+ }
+
+ unformat_free (line_input);
+
+ return policer_add_del (vm, name, &c, &pi, is_add);
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (configure_policer_command, static) = {
+ .path = "configure policer",
+ .short_help = "configure policer name <name> <params> ",
+ .function = configure_policer_command_fn,
+};
+/* *INDENT-ON* */
+
+static clib_error_t *
+show_policer_command_fn (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ vnet_policer_main_t *pm = &vnet_policer_main;
+ hash_pair_t *p;
+ u32 pool_index;
+ u8 *match_name = 0;
+ u8 *name;
+ sse2_qos_pol_cfg_params_st *config;
+ policer_read_response_type_st *templ;
+
+ (void) unformat (input, "name %s", &match_name);
+
+ /* *INDENT-OFF* */
+ hash_foreach_pair (p, pm->policer_config_by_name,
+ ({
+ name = (u8 *) p->key;
+ if (match_name == 0 || !strcmp((char *) name, (char *) match_name))
+ {
+ pool_index = p->value[0];
+ config = pool_elt_at_index (pm->configs, pool_index);
+ templ = pool_elt_at_index (pm->policer_templates, pool_index);
+ vlib_cli_output (vm, "Name \"%s\" %U ",
+ name, format_policer_config, config);
+ vlib_cli_output (vm, "Template %U",
+ format_policer_instance, templ);
+ vlib_cli_output (vm, "-----------");
+ }
+ }));
+ /* *INDENT-ON* */
+ return 0;
+}
+
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (show_policer_command, static) = {
+ .path = "show policer",
+ .short_help = "show policer [name]",
+ .function = show_policer_command_fn,
+};
+/* *INDENT-ON* */
+
+clib_error_t *
+policer_init (vlib_main_t * vm)
+{
+ vnet_policer_main_t *pm = &vnet_policer_main;
+ void vnet_policer_node_funcs_reference (void);
+
+ vnet_policer_node_funcs_reference ();
+
+ pm->vlib_main = vm;
+ pm->vnet_main = vnet_get_main ();
+
+ pm->policer_config_by_name = hash_create_string (0, sizeof (uword));
+ pm->policer_index_by_name = hash_create_string (0, sizeof (uword));
+
+ vnet_classify_register_unformat_policer_next_index_fn
+ (unformat_policer_classify_next_index);
+ vnet_classify_register_unformat_opaque_index_fn
+ (unformat_policer_classify_precolor);
+
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (policer_init);
+
+
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/policer/policer.h b/src/vnet/policer/policer.h
new file mode 100644
index 00000000000..8e2d7c79b7c
--- /dev/null
+++ b/src/vnet/policer/policer.h
@@ -0,0 +1,107 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef __included_policer_h__
+#define __included_policer_h__
+
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+
+#include <vnet/policer/xlate.h>
+#include <vnet/policer/police.h>
+
+typedef struct
+{
+ /* policer pool, aligned */
+ policer_read_response_type_st *policers;
+
+ /* config + template h/w policer instance parallel pools */
+ sse2_qos_pol_cfg_params_st *configs;
+ policer_read_response_type_st *policer_templates;
+
+ /* Config by name hash */
+ uword *policer_config_by_name;
+
+ /* Policer by name hash */
+ uword *policer_index_by_name;
+
+ /* Policer by sw_if_index vector */
+ u32 *policer_index_by_sw_if_index;
+
+ /* convenience */
+ vlib_main_t *vlib_main;
+ vnet_main_t *vnet_main;
+} vnet_policer_main_t;
+
+vnet_policer_main_t vnet_policer_main;
+
+typedef enum
+{
+ VNET_POLICER_INDEX_BY_SW_IF_INDEX,
+ VNET_POLICER_INDEX_BY_OPAQUE,
+ VNET_POLICER_INDEX_BY_EITHER,
+} vnet_policer_index_t;
+
+typedef enum
+{
+ VNET_POLICER_NEXT_TRANSMIT,
+ VNET_POLICER_NEXT_DROP,
+ VNET_POLICER_N_NEXT,
+} vnet_policer_next_t;
+
+#define foreach_vnet_dscp \
+ _(0 , CS0, "CS0") \
+ _(8 , CS1, "CS1") \
+ _(10, AF11, "AF11") \
+ _(12, AF12, "AF12") \
+ _(14, AF13, "AF13") \
+ _(16, CS2, "CS2") \
+ _(18, AF21, "AF21") \
+ _(20, AF22, "AF22") \
+ _(22, AF23, "AF23") \
+ _(24, CS3, "CS3") \
+ _(26, AF31, "AF31") \
+ _(28, AF32, "AF32") \
+ _(30, AF33, "AF33") \
+ _(32, CS4, "CS4") \
+ _(34, AF41, "AF41") \
+ _(36, AF42, "AF42") \
+ _(38, AF43, "AF43") \
+ _(40, CS5, "CS5") \
+ _(46, EF, "EF") \
+ _(48, CS6, "CS6") \
+ _(50, CS7, "CS7")
+
+typedef enum
+{
+#define _(v,f,str) VNET_DSCP_##f = v,
+ foreach_vnet_dscp
+#undef _
+} vnet_dscp_t;
+
+u8 *format_policer_instance (u8 * s, va_list * va);
+clib_error_t *policer_add_del (vlib_main_t * vm,
+ u8 * name,
+ sse2_qos_pol_cfg_params_st * cfg,
+ u32 * policer_index, u8 is_add);
+
+#endif /* __included_policer_h__ */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/policer/xlate.c b/src/vnet/policer/xlate.c
new file mode 100644
index 00000000000..74a6eb23d0a
--- /dev/null
+++ b/src/vnet/policer/xlate.c
@@ -0,0 +1,1505 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <string.h>
+#include <stddef.h>
+#include <stdio.h>
+#include <assert.h>
+#include <math.h>
+#include <stdint.h>
+
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+
+#include <vnet/policer/xlate.h>
+#include <vnet/policer/police.h>
+
+#define INTERNAL_SS 1
+
+/* debugs */
+#define SSE2_QOS_DEBUG_ERROR(msg, args...) \
+ fformat(stderr, msg "\n", ##args);
+
+#define SSE2_QOS_DEBUG_INFO(msg, args...) \
+ fformat(stderr, msg "\n", ##args);
+
+
+#define SSE2_QOS_TR_ERR(TpParms...)
+// {
+// }
+
+#define SSE2_QOS_TR_INFO(TpParms...)
+
+#ifndef MIN
+#define MIN(x,y) (((x)<(y))?(x):(y))
+#endif
+
+#ifndef MAX
+#define MAX(x,y) (((x)>(y))?(x):(y))
+#endif
+
+#define IPE_POLICER_FULL_WRITE_REQUEST_M40AH_OFFSET 0
+#define IPE_POLICER_FULL_WRITE_REQUEST_M40AH_MASK 8
+#define IPE_POLICER_FULL_WRITE_REQUEST_M40AH_SHIFT 24
+
+#define IPE_POLICER_FULL_WRITE_REQUEST_TYPE_OFFSET 2
+#define IPE_POLICER_FULL_WRITE_REQUEST_TYPE_MASK 2
+#define IPE_POLICER_FULL_WRITE_REQUEST_TYPE_SHIFT 10
+
+#define IPE_POLICER_FULL_WRITE_REQUEST_CMD_OFFSET 3
+#define IPE_POLICER_FULL_WRITE_REQUEST_CMD_MASK 2
+#define IPE_POLICER_FULL_WRITE_REQUEST_CMD_SHIFT 0
+
+#define IPE_POLICER_FULL_WRITE_REQUEST_M40AL_OFFSET 4
+#define IPE_POLICER_FULL_WRITE_REQUEST_M40AL_MASK 32
+#define IPE_POLICER_FULL_WRITE_REQUEST_M40AL_SHIFT 0
+
+#define IPE_POLICER_FULL_WRITE_REQUEST_RFC_OFFSET 8
+#define IPE_POLICER_FULL_WRITE_REQUEST_RFC_MASK 2
+#define IPE_POLICER_FULL_WRITE_REQUEST_RFC_SHIFT 30
+
+#define IPE_POLICER_FULL_WRITE_REQUEST_AN_OFFSET 8
+#define IPE_POLICER_FULL_WRITE_REQUEST_AN_MASK 1
+#define IPE_POLICER_FULL_WRITE_REQUEST_AN_SHIFT 29
+
+#define IPE_POLICER_FULL_WRITE_REQUEST_REXP_OFFSET 8
+#define IPE_POLICER_FULL_WRITE_REQUEST_REXP_MASK 4
+#define IPE_POLICER_FULL_WRITE_REQUEST_REXP_SHIFT 22
+
+#define IPE_POLICER_FULL_WRITE_REQUEST_ARM_OFFSET 9
+#define IPE_POLICER_FULL_WRITE_REQUEST_ARM_MASK 11
+#define IPE_POLICER_FULL_WRITE_REQUEST_ARM_SHIFT 11
+
+#define IPE_POLICER_FULL_WRITE_REQUEST_PRM_OFFSET 10
+#define IPE_POLICER_FULL_WRITE_REQUEST_PRM_MASK 11
+#define IPE_POLICER_FULL_WRITE_REQUEST_PRM_SHIFT 0
+
+#define IPE_POLICER_FULL_WRITE_REQUEST_CBLE_OFFSET 12
+#define IPE_POLICER_FULL_WRITE_REQUEST_CBLE_MASK 5
+#define IPE_POLICER_FULL_WRITE_REQUEST_CBLE_SHIFT 27
+
+#define IPE_POLICER_FULL_WRITE_REQUEST_CBLM_OFFSET 12
+#define IPE_POLICER_FULL_WRITE_REQUEST_CBLM_MASK 7
+#define IPE_POLICER_FULL_WRITE_REQUEST_CBLM_SHIFT 20
+
+#define IPE_POLICER_FULL_WRITE_REQUEST_EBLE_OFFSET 13
+#define IPE_POLICER_FULL_WRITE_REQUEST_EBLE_MASK 5
+#define IPE_POLICER_FULL_WRITE_REQUEST_EBLE_SHIFT 15
+
+#define IPE_POLICER_FULL_WRITE_REQUEST_EBLM_OFFSET 14
+#define IPE_POLICER_FULL_WRITE_REQUEST_EBLM_MASK 7
+#define IPE_POLICER_FULL_WRITE_REQUEST_EBLM_SHIFT 8
+
+#define IPE_POLICER_FULL_WRITE_REQUEST_CB_OFFSET 16
+#define IPE_POLICER_FULL_WRITE_REQUEST_CB_MASK 31
+#define IPE_POLICER_FULL_WRITE_REQUEST_CB_SHIFT 0
+
+#define IPE_POLICER_FULL_WRITE_REQUEST_EB_OFFSET 20
+#define IPE_POLICER_FULL_WRITE_REQUEST_EB_MASK 31
+#define IPE_POLICER_FULL_WRITE_REQUEST_EB_SHIFT 0
+
+#define IPE_RFC_RFC2697 0x00000000
+#define IPE_RFC_RFC2698 0x00000001
+#define IPE_RFC_RFC4115 0x00000002
+#define IPE_RFC_MEF5CF1 0x00000003
+
+/* End of constants copied from sse_ipe_desc_fmt.h */
+
+/* Misc Policer specific definitions */
+#define SSE2_QOS_POLICER_FIXED_PKT_SIZE 256
+
+// TODO check what can be provided by hw macro based on ASIC
+#define SSE2_QOS_POL_TICKS_PER_SEC 1000LL /* 1 tick = 1 ms */
+
+/*
+ * Default burst, in ms (byte format)
+ */
+#define SSE2_QOS_POL_DEF_BURST_BYTE 100
+
+/*
+ * Minimum burst needs to be such that the largest packet size is accomodated
+ */
+// Do we need to get it from some lib?
+#define SSE2_QOS_POL_MIN_BURST_BYTE 9*1024
+
+
+/*
+ * Flag to indicate if AN is employed or not
+ * 1 - TRUE, 0 - FALSE
+ */
+#define SSE2_QOS_POL_ALLOW_NEGATIVE 1
+
+// Various Macros to take care of policer calculations
+
+#define SSE2_QOS_POL_COMM_BKT_MAX \
+ (1<<IPE_POLICER_FULL_WRITE_REQUEST_CB_MASK)
+#define SSE2_QOS_POL_EXTD_BKT_MAX \
+ (1<<IPE_POLICER_FULL_WRITE_REQUEST_EB_MASK)
+
+#define SSE2_QOS_POL_RATE_EXP_SIZE \
+ (IPE_POLICER_FULL_WRITE_REQUEST_REXP_MASK)
+#define SSE2_QOS_POL_RATE_EXP_MAX ((1<<SSE2_QOS_POL_RATE_EXP_SIZE) - 1)
+#define SSE2_QOS_POL_AVG_RATE_MANT_SIZE \
+ (IPE_POLICER_FULL_WRITE_REQUEST_ARM_MASK)
+#define SSE2_QOS_POL_AVG_RATE_MANT_MAX \
+ ((1<< SSE2_QOS_POL_AVG_RATE_MANT_SIZE) - 1)
+#define SSE2_QOS_POL_AVG_RATE_MAX \
+ (SSE2_QOS_POL_AVG_RATE_MANT_MAX << \
+ SSE2_QOS_POL_RATE_EXP_MAX)
+
+#define SSE2_QOS_POL_PEAK_RATE_MANT_SIZE \
+ (IPE_POLICER_FULL_WRITE_REQUEST_PRM_MASK)
+#define SSE2_QOS_POL_PEAK_RATE_MANT_MAX \
+ ((1<<SSE2_QOS_POL_PEAK_RATE_MANT_SIZE) - 1)
+#define SSE2_QOS_POL_PEAK_RATE_MAX \
+ (SSE2_QOS_POL_PEAK_RATE_MANT_MAX << \
+ SSE2_QOS_POL_RATE_EXP_MAX)
+
+#define SSE2_QOS_POL_COMM_BKT_LIMIT_MANT_SIZE \
+ (IPE_POLICER_FULL_WRITE_REQUEST_CBLM_MASK)
+#define SSE2_QOS_POL_COMM_BKT_LIMIT_MANT_MAX \
+ ((1<<SSE2_QOS_POL_COMM_BKT_LIMIT_MANT_SIZE) - 1)
+#define SSE2_QOS_POL_COMM_BKT_LIMIT_EXP_SIZE \
+ (IPE_POLICER_FULL_WRITE_REQUEST_CBLE_MASK)
+#define SSE2_QOS_POL_COMM_BKT_LIMIT_EXP_MAX \
+ ((1<<SSE2_QOS_POL_COMM_BKT_LIMIT_EXP_SIZE) - 1)
+#define SSE2_QOS_POL_COMM_BKT_LIMIT_MAX \
+ ((u64)SSE2_QOS_POL_COMM_BKT_LIMIT_MANT_MAX << \
+ (u64)SSE2_QOS_POL_COMM_BKT_LIMIT_EXP_MAX)
+
+#define SSE2_QOS_POL_EXTD_BKT_LIMIT_MANT_SIZE \
+ (IPE_POLICER_FULL_WRITE_REQUEST_EBLM_MASK)
+#define SSE2_QOS_POL_EXTD_BKT_LIMIT_MANT_MAX \
+ ((1<<SSE2_QOS_POL_EXTD_BKT_LIMIT_MANT_SIZE) - 1)
+#define SSE2_QOS_POL_EXTD_BKT_LIMIT_EXP_SIZE \
+ (IPE_POLICER_FULL_WRITE_REQUEST_EBLE_MASK)
+#define SSE2_QOS_POL_EXTD_BKT_LIMIT_EXP_MAX \
+ ((1<<SSE2_QOS_POL_EXTD_BKT_LIMIT_EXP_SIZE) - 1)
+#define SSE2_QOS_POL_EXT_BKT_LIMIT_MAX \
+ ((u64)SSE2_QOS_POL_EXTD_BKT_LIMIT_MANT_MAX << \
+ (u64)SSE2_QOS_POL_EXTD_BKT_LIMIT_EXP_MAX)
+
+/*
+ * Rates determine the units of the bucket
+ * 256.114688 Gbps < Rate 8 byte units
+ * 128.057344 Gbps < Rate <= 256.114688 Gbps 4 byte units
+ * 64.028672 Gbps < Rate <= 128.057344 Gbps 2 byte units
+ * Rate <= 64.028672 Gbps 1 byte units
+ *
+ * The code uses bytes per tick as oppose to Gigabits per second.
+ */
+#define RATE256 (256114688000LL / 8LL / SSE2_QOS_POL_TICKS_PER_SEC)
+#define RATE128 (128057344000LL / 8LL / SSE2_QOS_POL_TICKS_PER_SEC)
+#define RATE64 ( 64028672000LL / 8LL / SSE2_QOS_POL_TICKS_PER_SEC)
+
+#define RATE_OVER256_UNIT 8LL
+#define RATE_128TO256_UNIT 4LL
+#define RATE_64TO128_UNIT 2LL
+
+static int
+sse2_qos_pol_round (u64 numerator,
+ u64 denominator,
+ u64 * rounded_value, sse2_qos_round_type_en round_type)
+{
+ int rc = 0;
+
+ if (denominator == 0)
+ {
+ SSE2_QOS_DEBUG_ERROR ("Illegal denominator");
+ SSE2_QOS_TR_ERR (SSE2_QOSRM_TP_ERR_59);
+ return (EINVAL);
+ }
+
+ switch (round_type)
+ {
+ case SSE2_QOS_ROUND_TO_CLOSEST:
+ *rounded_value = ((numerator + (denominator >> 1)) / denominator);
+ break;
+
+ case SSE2_QOS_ROUND_TO_UP:
+ *rounded_value = (numerator / denominator);
+ if ((*rounded_value * denominator) < numerator)
+ {
+ *rounded_value += 1;
+ }
+ break;
+
+ case SSE2_QOS_ROUND_TO_DOWN:
+ *rounded_value = (numerator / denominator);
+ break;
+
+ case SSE2_QOS_ROUND_INVALID:
+ default:
+ SSE2_QOS_DEBUG_ERROR ("Illegal round type");
+ SSE2_QOS_TR_ERR (SSE2_QOS_TP_ERR_60, round_type);
+ rc = EINVAL;
+ break;
+ }
+ return (rc);
+}
+
+
+static int
+sse2_pol_validate_cfg_params (sse2_qos_pol_cfg_params_st * cfg)
+{
+ u64 numer, denom, rnd_value;
+ u32 cir_hw, eir_hw;
+ int rc = 0;
+
+ if ((cfg->rfc == SSE2_QOS_POLICER_TYPE_2R3C_RFC_2698) &&
+ (cfg->rb.kbps.eir_kbps < cfg->rb.kbps.cir_kbps))
+ {
+ SSE2_QOS_DEBUG_ERROR ("CIR (%u kbps) is greater than PIR (%u kbps)",
+ cfg->rb.kbps.cir_kbps, cfg->rb.kbps.eir_kbps);
+ SSE2_QOS_TR_ERR (SSE2_QOS_TP_ERR_39, cfg->rb.kbps.cir_kbps,
+ cfg->rb.kbps.eir_kbps);
+ return (EINVAL);
+ }
+
+ /*
+ * convert rates to bytes-per-tick
+ */
+ numer = (u64) (cfg->rb.kbps.cir_kbps);
+ denom = (u64) (8 * SSE2_QOS_POL_TICKS_PER_SEC) / 1000;
+ rc = sse2_qos_pol_round (numer, denom, &rnd_value,
+ (sse2_qos_round_type_en) cfg->rnd_type);
+ if (rc != 0)
+ {
+ SSE2_QOS_DEBUG_ERROR ("Unable to convert CIR to bytes/tick format");
+ // Error traced
+ return (rc);
+ }
+ cir_hw = (u32) rnd_value;
+
+ numer = (u64) (cfg->rb.kbps.eir_kbps);
+ rc = sse2_qos_pol_round (numer, denom, &rnd_value,
+ (sse2_qos_round_type_en) cfg->rnd_type);
+ if (rc != 0)
+ {
+ SSE2_QOS_DEBUG_ERROR ("Unable to convert EIR to bytes/tick format");
+ // Error traced
+ return (rc);
+ }
+ eir_hw = (u32) rnd_value;
+
+ if (cir_hw > SSE2_QOS_POL_AVG_RATE_MAX)
+ {
+ SSE2_QOS_DEBUG_ERROR ("hw cir (%u bytes/tick) is greater than the "
+ "max supported value (%u)", cir_hw,
+ SSE2_QOS_POL_AVG_RATE_MAX);
+ SSE2_QOS_TR_ERR (SSE2_QOS_TP_ERR_84, cir_hw, SSE2_QOS_POL_AVG_RATE_MAX);
+ return (EINVAL);
+ }
+
+ if (eir_hw > SSE2_QOS_POL_PEAK_RATE_MAX)
+ {
+ SSE2_QOS_DEBUG_ERROR ("hw eir (%u bytes/tick) is greater than the "
+ "max supported value (%u). Capping it to the max. "
+ "supported value", eir_hw,
+ SSE2_QOS_POL_PEAK_RATE_MAX);
+ SSE2_QOS_TR_ERR (SSE2_QOS_TP_ERR_85, eir_hw,
+ SSE2_QOS_POL_PEAK_RATE_MAX);
+ return (EINVAL);
+ }
+ /*
+ * CIR = 0, with bc != 0 is not allowed
+ */
+ if ((cfg->rb.kbps.cir_kbps == 0) && cfg->rb.kbps.cb_bytes)
+ {
+ SSE2_QOS_DEBUG_ERROR ("CIR = 0 with bc != 0");
+ SSE2_QOS_TR_ERR (SSE2_QOS_TP_ERR_55);
+ return (EINVAL);
+ }
+
+ if ((cfg->rb.kbps.eir_kbps == 0) &&
+ (cfg->rfc > SSE2_QOS_POLICER_TYPE_1R3C_RFC_2697))
+ {
+ SSE2_QOS_DEBUG_ERROR ("EIR = 0 for a 2R3C policer (rfc: %u)", cfg->rfc);
+ SSE2_QOS_TR_ERR (SSE2_QOS_TP_ERR_23, cfg->rb.kbps.eir_kbps, cfg->rfc);
+ return (EINVAL);
+ }
+
+ if (cfg->rb.kbps.eir_kbps &&
+ (cfg->rfc < SSE2_QOS_POLICER_TYPE_2R3C_RFC_2698))
+ {
+ SSE2_QOS_DEBUG_ERROR ("EIR: %u kbps for a 1-rate policer (rfc: %u)",
+ cfg->rb.kbps.eir_kbps, cfg->rfc);
+ SSE2_QOS_TR_ERR (SSE2_QOS_TP_ERR_23, cfg->rb.kbps.eir_kbps, cfg->rfc);
+ return (EINVAL);
+ }
+
+ if ((cfg->rfc == SSE2_QOS_POLICER_TYPE_1R2C) && cfg->rb.kbps.eb_bytes)
+ {
+ SSE2_QOS_DEBUG_ERROR ("For a 1R1B policer, EB burst cannot be > 0");
+ SSE2_QOS_TR_ERR (SSE2_QOS_TP_ERR_56);
+ return (EINVAL);
+ }
+
+ return (0);
+}
+
+static void
+sse2_qos_convert_value_to_exp_mant_fmt (u64 value,
+ u16 max_exp_value,
+ u16 max_mant_value,
+ sse2_qos_round_type_en type,
+ u8 * exp, u32 * mant)
+{
+ u64 rnd_value;
+ u64 temp_mant;
+ u8 temp_exp;
+
+ /*
+ * Select the lowest possible exp, and the largest possible mant
+ */
+ temp_exp = 0;
+ temp_mant = value;
+ while (temp_exp <= max_exp_value)
+ {
+ if (temp_mant <= max_mant_value)
+ {
+ break;
+ }
+
+ temp_exp++;
+ rnd_value = 0;
+ (void) sse2_qos_pol_round ((u64) value, (u64) (1 << temp_exp),
+ &rnd_value, type);
+ temp_mant = rnd_value;
+ }
+
+ if (temp_exp > max_exp_value)
+ {
+ /*
+ * CAP mant to its max value, and decrement exp
+ */
+ temp_exp--;
+ temp_mant = max_mant_value;
+ }
+
+ *exp = temp_exp;
+ *mant = (u32) temp_mant;
+
+ SSE2_QOS_DEBUG_INFO ("value: 0x%llx, mant: %u, exp: %u", value, *mant,
+ *exp);
+ return;
+}
+
+static int
+sse2_pol_convert_cfg_rates_to_hw (sse2_qos_pol_cfg_params_st * cfg,
+ sse2_qos_pol_hw_params_st * hw)
+{
+ int rc = 0;
+ u32 cir_hw, eir_hw, hi_mant, hi_rate, cir_rnded, eir_rnded, eir_kbps;
+ u64 numer, denom, rnd_value;
+ u8 exp;
+
+ /*
+ * convert rates to bytes-per-tick (tick is 1ms)
+ * For rate conversion, the denominator is gonna be the same
+ */
+ denom = (u64) ((SSE2_QOS_POL_TICKS_PER_SEC * 8) / 1000);
+ numer = (u64) (cfg->rb.kbps.cir_kbps);
+ rc = sse2_qos_pol_round (numer, denom, &rnd_value,
+ (sse2_qos_round_type_en) cfg->rnd_type);
+ if (rc != 0)
+ {
+ SSE2_QOS_DEBUG_ERROR
+ ("Rounding error, rate: %d kbps, rounding_type: %d",
+ cfg->rb.kbps.cir_kbps, cfg->rnd_type);
+ // Error is traced
+ return (rc);
+ }
+ cir_hw = (u32) rnd_value;
+
+ if (cfg->rb.kbps.cir_kbps && (cir_hw == 0))
+ {
+ /*
+ * After rounding, cir_hw = 0. Bump it up
+ */
+ cir_hw = 1;
+ }
+
+ if (cfg->rfc == SSE2_QOS_POLICER_TYPE_1R2C)
+ {
+ eir_kbps = 0;
+ }
+ else if (cfg->rfc == SSE2_QOS_POLICER_TYPE_1R3C_RFC_2697)
+ {
+ eir_kbps = cfg->rb.kbps.cir_kbps;
+ }
+ else if (cfg->rfc == SSE2_QOS_POLICER_TYPE_2R3C_RFC_4115)
+ {
+ eir_kbps = cfg->rb.kbps.eir_kbps - cfg->rb.kbps.cir_kbps;
+ }
+ else
+ {
+ eir_kbps = cfg->rb.kbps.eir_kbps;
+ }
+
+ numer = (u64) eir_kbps;
+ rc = sse2_qos_pol_round (numer, denom, &rnd_value,
+ (sse2_qos_round_type_en) cfg->rnd_type);
+ if (rc != 0)
+ {
+ SSE2_QOS_DEBUG_ERROR
+ ("Rounding error, rate: %d kbps, rounding_type: %d", eir_kbps,
+ cfg->rnd_type);
+ // Error is traced
+ return (rc);
+ }
+ eir_hw = (u32) rnd_value;
+
+ if (eir_kbps && (eir_hw == 0))
+ {
+ /*
+ * After rounding, eir_hw = 0. Bump it up
+ */
+ eir_hw = 1;
+ }
+
+ SSE2_QOS_DEBUG_INFO ("cir_hw: %u bytes/tick, eir_hw: %u bytes/tick", cir_hw,
+ eir_hw);
+
+ if (cir_hw > eir_hw)
+ {
+ hi_rate = cir_hw;
+ }
+ else
+ {
+ hi_rate = eir_hw;
+ }
+
+ if ((cir_hw == 0) && (eir_hw == 0))
+ {
+ /*
+ * Both the rates are 0. Use exp = 15, and set the RFC to 4115. Also
+ * set AN = 0
+ */
+ exp = (u8) SSE2_QOS_POL_RATE_EXP_MAX;
+ hi_mant = 0;
+ hw->rfc = IPE_RFC_RFC4115;
+ hw->allow_negative = 0;
+ }
+ else
+ {
+ sse2_qos_convert_value_to_exp_mant_fmt (hi_rate,
+ (u16) SSE2_QOS_POL_RATE_EXP_MAX,
+ (u16)
+ SSE2_QOS_POL_AVG_RATE_MANT_MAX,
+ (sse2_qos_round_type_en)
+ cfg->rnd_type, &exp, &hi_mant);
+ }
+
+ denom = (1ULL << exp);
+ if (hi_rate == eir_hw)
+ {
+ hw->peak_rate_man = (u16) hi_mant;
+ rc = sse2_qos_pol_round ((u64) cir_hw, denom, &rnd_value,
+ (sse2_qos_round_type_en) cfg->rnd_type);
+ hw->avg_rate_man = (u16) rnd_value;
+ }
+ else
+ {
+ hw->avg_rate_man = (u16) hi_mant;
+ rc = sse2_qos_pol_round ((u64) eir_hw, denom, &rnd_value,
+ (sse2_qos_round_type_en) cfg->rnd_type);
+ hw->peak_rate_man = (u16) rnd_value;
+ }
+ if (rc != 0)
+ {
+ SSE2_QOS_DEBUG_ERROR ("Rounding error");
+ // Error is traced
+ return (rc);
+ }
+ hw->rate_exp = exp;
+
+ if ((hw->avg_rate_man == 0) && (cfg->rb.kbps.cir_kbps))
+ {
+ /*
+ * cir was reduced to 0 during rounding. Bump it up
+ */
+ hw->avg_rate_man = 1;
+ SSE2_QOS_DEBUG_INFO ("CIR = 0 during rounding. Bump it up to %u "
+ "bytes/tick", (hw->avg_rate_man << hw->rate_exp));
+ }
+
+ if ((hw->peak_rate_man == 0) && eir_kbps)
+ {
+ /*
+ * eir was reduced to 0 during rounding. Bump it up
+ */
+ hw->peak_rate_man = 1;
+ SSE2_QOS_DEBUG_INFO ("EIR = 0 during rounding. Bump it up to %u "
+ "bytes/tick", (hw->peak_rate_man << hw->rate_exp));
+ }
+
+ cir_rnded = (hw->avg_rate_man << hw->rate_exp);
+ eir_rnded = (hw->peak_rate_man << hw->rate_exp);
+
+ SSE2_QOS_DEBUG_INFO ("Configured(rounded) values, cir: %u "
+ "kbps (mant: %u, exp: %u, rate: %u bytes/tick)",
+ cfg->rb.kbps.cir_kbps, hw->avg_rate_man,
+ hw->rate_exp, cir_rnded);
+
+ SSE2_QOS_DEBUG_INFO ("Configured(rounded) values, eir: %u "
+ "kbps (mant: %u, exp: %u, rate: %u bytes/tick)",
+ cfg->rb.kbps.eir_kbps, hw->peak_rate_man,
+ hw->rate_exp, eir_rnded);
+
+ return (rc);
+}
+
+/*****
+ * NAME
+ * sse2_pol_get_bkt_max
+ *
+ * PARAMETERS
+ * rate_hw - either the averate rate or peak rate
+ * bkt_max - bit width in the current bucket or extended bucket
+ *
+ * RETURNS
+ * u64 - maximum token bytes for the current or extended bucket
+ *
+ * DESCRIPTION
+ * The current bucket or extended bucket fields are in units of either
+ * 1,2,4,8 bytes based on the average or peak rate respective to current
+ * or extended bucket.
+ *
+ * To get the actual maximum number of bytes that can be stored in the
+ * field, the value must be multiplied by the units of either 1,2,4,8
+ * bytes based on the rate.
+ *****/
+u64
+sse2_pol_get_bkt_max (u64 rate_hw, u64 bkt_max)
+{
+ if (rate_hw <= RATE64)
+ {
+ return (bkt_max - 1);
+ }
+ else if (rate_hw <= RATE128)
+ {
+ return ((bkt_max * RATE_64TO128_UNIT) - RATE_64TO128_UNIT);
+ }
+ else if (rate_hw <= RATE256)
+ {
+ return ((bkt_max * RATE_128TO256_UNIT) - RATE_128TO256_UNIT);
+ }
+ /* rate must be over 256 */
+ return ((bkt_max * RATE_OVER256_UNIT) - RATE_OVER256_UNIT);
+}
+
+/*****
+ * NAME
+ * sse2_pol_get_bkt_value
+ *
+ * PARAMETERS
+ * rate_hw - either the averate rate or peak rate
+ * byte_value - bytes for this token bucket
+ *
+ * RETURNS
+ * u64 - unit value for the current or extended bucket field
+ *
+ * DESCRIPTION
+ * The current bucket or extended bucket fields are in units of either
+ * 1,2,4,8 bytes based on the average or peak rate respective to current
+ * or extended bucket.
+ *
+ * To get the units that can be stored in the field, the byte value must
+ * be divided by the units of either 1,2,4,8 bytes based on the rate.
+ *****/
+u64
+sse2_pol_get_bkt_value (u64 rate_hw, u64 byte_value)
+{
+ if (rate_hw <= RATE64)
+ {
+ return (byte_value);
+ }
+ else if (rate_hw <= RATE128)
+ {
+ return (byte_value / RATE_64TO128_UNIT);
+ }
+ else if (rate_hw <= RATE256)
+ {
+ return (byte_value / RATE_128TO256_UNIT);
+ }
+ /* rate must be over 256 */
+ return (byte_value / RATE_OVER256_UNIT);
+}
+
+static void
+sse2_pol_rnd_burst_byte_fmt (u64 cfg_burst,
+ u16 max_exp_value,
+ u16 max_mant_value,
+ u32 max_bkt_value,
+ u32 rate_hw,
+ u8 * exp, u32 * mant, u32 * bkt_value)
+{
+ u64 bkt_max = max_bkt_value;
+ u64 bkt_limit_max;
+ u64 rnd_burst;
+ u64 temp_bkt_value;
+
+ bkt_limit_max = ((u64) max_mant_value << (u64) max_exp_value);
+ bkt_max = sse2_pol_get_bkt_max (rate_hw, bkt_max);
+ bkt_max = MIN (bkt_max, bkt_limit_max);
+ if (!cfg_burst)
+ {
+ /*
+ * If configured burst = 0, compute the burst to be 100ms at a given
+ * rate. Note that for rate_hw = 0, exp = mant = 0.
+ */
+ cfg_burst = (u64) rate_hw *(u64) SSE2_QOS_POL_DEF_BURST_BYTE;
+ }
+
+ if (cfg_burst > bkt_max)
+ {
+ SSE2_QOS_DEBUG_ERROR ("burst 0x%llx bytes is greater than the max. "
+ "supported value 0x%llx bytes. Capping it to the "
+ "max", cfg_burst, bkt_max);
+ SSE2_QOS_TR_INFO (SSE2_QOS_TP_INFO_38,
+ (uint) cfg_burst, (uint) bkt_max);
+ cfg_burst = bkt_max;
+ }
+
+ if (cfg_burst < SSE2_QOS_POL_MIN_BURST_BYTE)
+ {
+ /*
+ * Bump up the burst value ONLY if the cfg_burst is non-zero AND
+ * less than the min. supported value
+ */
+ SSE2_QOS_DEBUG_INFO ("burst 0x%llx bytes is less than the min "
+ "supported value %u bytes. Rounding it up to "
+ "the min", cfg_burst, SSE2_QOS_POL_MIN_BURST_BYTE);
+ SSE2_QOS_TR_INFO (SSE2_QOS_TP_INFO_39, (uint) cfg_burst,
+ SSE2_QOS_POL_MIN_BURST_BYTE);
+ cfg_burst = SSE2_QOS_POL_MIN_BURST_BYTE;
+ }
+
+ sse2_qos_convert_value_to_exp_mant_fmt (cfg_burst,
+ max_exp_value,
+ max_mant_value,
+ SSE2_QOS_ROUND_TO_DOWN, exp, mant);
+
+ /* Bucket value is based on rate. */
+ rnd_burst = ((u64) (*mant) << (u64) (*exp));
+ temp_bkt_value = sse2_pol_get_bkt_value (rate_hw, rnd_burst);
+ *bkt_value = (u32) temp_bkt_value;
+}
+
+static int
+sse2_pol_convert_cfg_burst_to_hw (sse2_qos_pol_cfg_params_st * cfg,
+ sse2_qos_pol_hw_params_st * hw)
+{
+ u8 temp_exp;
+ u32 temp_mant, rate_hw;
+ u64 eb_bytes;
+ u32 bkt_value;
+
+ /*
+ * compute Committed Burst
+ */
+ SSE2_QOS_DEBUG_INFO ("Compute commit burst ...");
+ rate_hw = (hw->avg_rate_man) << (hw->rate_exp);
+ sse2_pol_rnd_burst_byte_fmt (cfg->rb.kbps.cb_bytes,
+ (u16) SSE2_QOS_POL_COMM_BKT_LIMIT_EXP_MAX,
+ (u16) SSE2_QOS_POL_COMM_BKT_LIMIT_MANT_MAX,
+ (u32) SSE2_QOS_POL_COMM_BKT_MAX,
+ rate_hw, &temp_exp, &temp_mant, &bkt_value);
+ SSE2_QOS_DEBUG_INFO ("Committed burst, burst_limit: 0x%llx mant : %u, "
+ "exp: %u, rnded: 0x%llx cb:%u bytes",
+ cfg->rb.kbps.cb_bytes, temp_mant, temp_exp,
+ ((u64) temp_mant << (u64) temp_exp), bkt_value);
+
+ hw->comm_bkt_limit_exp = temp_exp;
+ hw->comm_bkt_limit_man = (u8) temp_mant;
+ hw->comm_bkt = bkt_value;
+
+ /*
+ * compute Exceed Burst
+ */
+ SSE2_QOS_DEBUG_INFO ("Compute exceed burst ...");
+
+ if (cfg->rfc == SSE2_QOS_POLICER_TYPE_1R2C)
+ {
+ /*
+ * For 1R2C, hw uses 2R3C (RFC-4115). As such, the Exceed Bucket
+ * params are set to 0. Recommendation is to use EB_exp = max_exp (=15)
+ * and EB_mant = 0
+ */
+ hw->extd_bkt_limit_exp = (u8) SSE2_QOS_POL_EXTD_BKT_LIMIT_EXP_MAX;
+ hw->extd_bkt_limit_man = 0;
+ SSE2_QOS_DEBUG_INFO ("Excess burst, burst: 0x%llx mant: %u, "
+ "exp: %u, rnded: 0x%llx bytes",
+ cfg->rb.kbps.eb_bytes, hw->extd_bkt_limit_man,
+ hw->extd_bkt_limit_exp,
+ ((u64) hw->extd_bkt_limit_man <<
+ (u64) hw->extd_bkt_limit_exp));
+ SSE2_QOS_TR_INFO (SSE2_QOS_TP_INFO_20, (uint) cfg->rb.kbps.eb_bytes,
+ hw->extd_bkt_limit_man, hw->extd_bkt_limit_exp);
+ return (0);
+ }
+
+ if (cfg->rfc == SSE2_QOS_POLICER_TYPE_1R3C_RFC_2697)
+ {
+ eb_bytes = cfg->rb.kbps.cb_bytes + cfg->rb.kbps.eb_bytes;
+ }
+ else if (cfg->rfc == SSE2_QOS_POLICER_TYPE_2R3C_RFC_4115)
+ {
+ eb_bytes = cfg->rb.kbps.eb_bytes - cfg->rb.kbps.cb_bytes;
+ }
+ else
+ {
+ eb_bytes = cfg->rb.kbps.eb_bytes;
+ }
+
+ rate_hw = (hw->peak_rate_man) << (hw->rate_exp);
+ sse2_pol_rnd_burst_byte_fmt (eb_bytes,
+ (u16) SSE2_QOS_POL_EXTD_BKT_LIMIT_EXP_MAX,
+ (u16) SSE2_QOS_POL_EXTD_BKT_LIMIT_MANT_MAX,
+ (u32) SSE2_QOS_POL_EXTD_BKT_MAX,
+ rate_hw, &temp_exp, &temp_mant, &bkt_value);
+
+ SSE2_QOS_DEBUG_INFO ("Excess burst, burst_limit: 0x%llx mant: %u, "
+ "exp: %u, rnded: 0x%llx eb:%u bytes",
+ cfg->rb.kbps.eb_bytes, temp_mant, temp_exp,
+ ((u64) temp_mant << (u64) temp_exp), bkt_value);
+
+ hw->extd_bkt_limit_exp = (u8) temp_exp;
+ hw->extd_bkt_limit_man = (u8) temp_mant;
+ hw->extd_bkt = bkt_value;
+
+ return (0);
+}
+
+
+/*
+ * Input: configured parameter values in 'cfg'.
+ * Output: h/w programmable parameter values in 'hw'.
+ * Return: success or failure code.
+ */
+static int
+sse2_pol_convert_cfg_to_hw_params (sse2_qos_pol_cfg_params_st * cfg,
+ sse2_qos_pol_hw_params_st * hw)
+{
+ int rc = 0;
+
+ /*
+ * clear the hw_params
+ */
+ memset (hw, 0, sizeof (sse2_qos_pol_hw_params_st));
+
+ hw->allow_negative = SSE2_QOS_POL_ALLOW_NEGATIVE;
+
+ if ((cfg->rfc == SSE2_QOS_POLICER_TYPE_1R2C) ||
+ (cfg->rfc == SSE2_QOS_POLICER_TYPE_2R3C_RFC_4115))
+ {
+ hw->rfc = IPE_RFC_RFC4115;
+ }
+ else if (cfg->rfc == SSE2_QOS_POLICER_TYPE_1R3C_RFC_2697)
+ {
+ hw->rfc = IPE_RFC_RFC2697;
+ }
+ else if (cfg->rfc == SSE2_QOS_POLICER_TYPE_2R3C_RFC_2698)
+ {
+ hw->rfc = IPE_RFC_RFC2698;
+ }
+ else if (cfg->rfc == SSE2_QOS_POLICER_TYPE_2R3C_RFC_MEF5CF1)
+ {
+ hw->rfc = IPE_RFC_MEF5CF1;
+ }
+ else
+ {
+ SSE2_QOS_DEBUG_ERROR ("Invalid RFC type %d\n", cfg->rfc);
+ SSE2_QOS_TR_ERR (SSE2_QOS_TP_ERR_61, cfg->rfc);
+ return (EINVAL);
+ }
+
+ rc = sse2_pol_convert_cfg_rates_to_hw (cfg, hw);
+ if (rc != 0)
+ {
+ SSE2_QOS_DEBUG_ERROR ("Unable to convert config rates to hw. Error: %d",
+ rc);
+ // Error is traced
+ return (rc);
+ }
+
+ rc = sse2_pol_convert_cfg_burst_to_hw (cfg, hw);
+ if (rc != 0)
+ {
+ SSE2_QOS_DEBUG_ERROR ("Unable to convert config burst to hw. Error: %d",
+ rc);
+ // Error is traced
+ return (rc);
+ }
+
+ return 0;
+}
+
+
+u32
+sse2_qos_convert_pps_to_kbps (u32 rate_pps)
+{
+ // sse2_qos_ship_inc_counter(SSE2_QOS_SHIP_COUNTER_TYPE_API_CNT,
+ // SSE2_QOS_SHIP_CNT_POL_CONV_PPS_TO_KBPS);
+
+ u64 numer, rnd_value = 0;
+
+ numer = (u64) ((u64) rate_pps *
+ (u64) SSE2_QOS_POLICER_FIXED_PKT_SIZE * 8LL);
+ (void) sse2_qos_pol_round (numer, 1000LL, &rnd_value,
+ SSE2_QOS_ROUND_TO_CLOSEST);
+
+ return ((u32) rnd_value);
+}
+
+u32
+sse2_qos_convert_burst_ms_to_bytes (u32 burst_ms, u32 rate_kbps)
+{
+ u64 numer, rnd_value = 0;
+
+ //sse2_qos_ship_inc_counter(SSE2_QOS_SHIP_COUNTER_TYPE_API_CNT,
+ // SSE2_QOS_SHIP_CNT_POL_CONV_BURST_MS_TO_BYTES);
+
+ numer = (u64) ((u64) burst_ms * (u64) rate_kbps);
+
+ (void) sse2_qos_pol_round (numer, 8LL, &rnd_value,
+ SSE2_QOS_ROUND_TO_CLOSEST);
+
+ return ((u32) rnd_value);
+}
+
+
+/*
+ * Input: configured parameters in 'cfg'.
+ * Output: h/w parameters are returned in 'hw',
+ * Return: Status, success or failure code.
+ */
+int
+sse2_pol_compute_hw_params (sse2_qos_pol_cfg_params_st * cfg,
+ sse2_qos_pol_hw_params_st * hw)
+{
+ int rc = 0;
+
+ if (!cfg || !hw)
+ {
+ SSE2_QOS_DEBUG_ERROR ("Illegal parameters");
+ return (-1);
+ }
+
+ /*
+ * Validate the police config params being presented to RM
+ */
+ rc = sse2_pol_validate_cfg_params (cfg);
+ if (rc != 0)
+ {
+ SSE2_QOS_DEBUG_ERROR ("Config parameter validation failed. Error: %d",
+ rc);
+ // Error is traced
+ return (-1);
+ }
+
+ /*
+ * first round configured values to h/w supported values. This func
+ * also determines whether 'tick' or 'byte' format
+ */
+ rc = sse2_pol_convert_cfg_to_hw_params (cfg, hw);
+ if (rc != 0)
+ {
+ SSE2_QOS_DEBUG_ERROR ("Unable to convert config params to hw params. "
+ "Error: %d", rc);
+ SSE2_QOS_TR_ERR (SSE2_QOS_TP_ERR_53, rc);
+ return (-1);
+ }
+
+ return 0;
+}
+
+
+#if defined (INTERNAL_SS) || defined (X86)
+
+// For initializing the x86 policer format
+
+/*
+ * Return the number of hardware TSC timer ticks per second for the dataplane.
+ * This is approximately, but not exactly, the clock speed.
+ */
+static u64
+get_tsc_hz (void)
+{
+ f64 cpu_freq;
+
+ cpu_freq = os_cpu_clock_frequency ();
+ return (u64) cpu_freq;
+}
+
+/*
+ * Convert rates into bytes_per_period and scale.
+ * Return 0 if ok or 1 if error.
+ */
+static int
+compute_policer_params (u64 hz, // CPU speed in clocks per second
+ u64 cir_rate, // in bytes per second
+ u64 pir_rate, // in bytes per second
+ u32 * current_limit, // in bytes, output may scale the input
+ u32 * extended_limit, // in bytes, output may scale the input
+ u32 * cir_bytes_per_period,
+ u32 * pir_bytes_per_period, u32 * scale)
+{
+ double period;
+ double internal_cir_bytes_per_period;
+ double internal_pir_bytes_per_period;
+ u32 max;
+ u32 scale_shift;
+ u32 scale_amount;
+ u32 __attribute__ ((unused)) orig_current_limit = *current_limit;
+
+ // Compute period. For 1Ghz-to-8Ghz CPUs, the period will be in
+ // the range of 16 to 116 usec.
+ period = ((double) hz) / ((double) POLICER_TICKS_PER_PERIOD);
+
+ // Determine bytes per period for each rate
+ internal_cir_bytes_per_period = (double) cir_rate / period;
+ internal_pir_bytes_per_period = (double) pir_rate / period;
+
+ // Scale if possible. Scaling helps rate accuracy, but is contrained
+ // by the scaled rates and limits fitting in 32-bits.
+ // In addition, we need to insure the scaled rate is no larger than
+ // 2^22 tokens per period. This allows the dataplane to ignore overflow
+ // in the tokens-per-period multiplication since it could only
+ // happen if the policer were idle for more than a year.
+ // This is not really a constraint because 100Gbps at 1Ghz is only
+ // 1.6M tokens per period.
+#define MAX_RATE_SHIFT 10
+ max = MAX (*current_limit, *extended_limit);
+ max = MAX (max, (u32) internal_cir_bytes_per_period << MAX_RATE_SHIFT);
+ max = MAX (max, (u32) internal_pir_bytes_per_period << MAX_RATE_SHIFT);
+ scale_shift = __builtin_clz (max);
+
+ scale_amount = 1 << scale_shift;
+ *scale = scale_shift;
+
+ // Scale the limits
+ *current_limit = *current_limit << scale_shift;
+ *extended_limit = *extended_limit << scale_shift;
+
+ // Scale the rates
+ internal_cir_bytes_per_period =
+ internal_cir_bytes_per_period * ((double) scale_amount);
+ internal_pir_bytes_per_period =
+ internal_pir_bytes_per_period * ((double) scale_amount);
+
+ // Make sure the new rates are reasonable
+ // Only needed for very low rates with large bursts
+ if (internal_cir_bytes_per_period < 1.0)
+ {
+ internal_cir_bytes_per_period = 1.0;
+ }
+ if (internal_pir_bytes_per_period < 1.0)
+ {
+ internal_pir_bytes_per_period = 1.0;
+ }
+
+ *cir_bytes_per_period = (u32) internal_cir_bytes_per_period;
+ *pir_bytes_per_period = (u32) internal_pir_bytes_per_period;
+
+// #define PRINT_X86_POLICE_PARAMS
+#ifdef PRINT_X86_POLICE_PARAMS
+ {
+ u64 effective_BPS;
+
+ // This value actually slightly conservative because it doesn't take into account
+ // the partial period at the end of a second. This really matters only for very low
+ // rates.
+ effective_BPS =
+ (((u64) (*cir_bytes_per_period * (u64) period)) >> *scale);
+
+ printf ("hz=%llu, cir_rate=%llu, limit=%u => "
+ "periods-per-sec=%d usec-per-period=%d => "
+ "scale=%d cir_BPP=%u, scaled_limit=%u => "
+ "effective BPS=%llu, accuracy=%f\n",
+ // input values
+ (unsigned long long) hz,
+ (unsigned long long) cir_rate, orig_current_limit,
+ // computed values
+ (u32) (period), // periods per second
+ (u32) (1000.0 * 1000.0 / period), // in usec
+ *scale, *cir_bytes_per_period, *current_limit,
+ // accuracy
+ (unsigned long long) effective_BPS,
+ (double) cir_rate / (double) effective_BPS);
+ }
+#endif
+
+ return 0; // ok
+}
+
+
+/*
+ * Input: configured parameters in 'cfg'.
+ * Output: h/w parameters are returned in 'hw',
+ * Return: Status, success or failure code.
+ */
+int
+x86_pol_compute_hw_params (sse2_qos_pol_cfg_params_st * cfg,
+ policer_read_response_type_st * hw)
+{
+ const int BYTES_PER_KBIT = (1000 / 8);
+ u64 hz;
+ u32 cap;
+
+ if (!cfg || !hw)
+ {
+ SSE2_QOS_DEBUG_ERROR ("Illegal parameters");
+ return (-1);
+ }
+
+ hz = get_tsc_hz ();
+ hw->last_update_time = 0;
+
+ // Cap the bursts to 32-bits. This allows up to almost one second of
+ // burst on a 40GE interface, which should be fine for x86.
+ cap =
+ (cfg->rb.kbps.cb_bytes > 0xFFFFFFFF) ? 0xFFFFFFFF : cfg->rb.kbps.cb_bytes;
+ hw->current_limit = cap;
+ cap =
+ (cfg->rb.kbps.eb_bytes > 0xFFFFFFFF) ? 0xFFFFFFFF : cfg->rb.kbps.eb_bytes;
+ hw->extended_limit = cap;
+
+ if ((cfg->rb.kbps.cir_kbps == 0) && (cfg->rb.kbps.cb_bytes == 0)
+ && (cfg->rb.kbps.eb_bytes == 0))
+ {
+ // This is a uninitialized, always-violate policer
+ hw->single_rate = 1;
+ hw->cir_tokens_per_period = 0;
+ return 0;
+ }
+
+ if ((cfg->rfc == SSE2_QOS_POLICER_TYPE_1R2C) ||
+ (cfg->rfc == SSE2_QOS_POLICER_TYPE_1R3C_RFC_2697))
+ {
+ // Single-rate policer
+
+ hw->single_rate = 1;
+
+ if ((cfg->rfc == SSE2_QOS_POLICER_TYPE_1R2C) && cfg->rb.kbps.eb_bytes)
+ {
+ SSE2_QOS_DEBUG_ERROR
+ ("Policer parameter validation failed -- 1R2C.");
+ return (-1);
+ }
+
+ if ((cfg->rb.kbps.cir_kbps == 0) ||
+ (cfg->rb.kbps.eir_kbps != 0) ||
+ ((cfg->rb.kbps.cb_bytes == 0) && (cfg->rb.kbps.eb_bytes == 0)))
+ {
+ SSE2_QOS_DEBUG_ERROR ("Policer parameter validation failed -- 1R.");
+ return (-1);
+ }
+
+ if (compute_policer_params (hz,
+ (u64) cfg->rb.kbps.cir_kbps *
+ BYTES_PER_KBIT, 0, &hw->current_limit,
+ &hw->extended_limit,
+ &hw->cir_tokens_per_period,
+ &hw->pir_tokens_per_period, &hw->scale))
+ {
+ SSE2_QOS_DEBUG_ERROR ("Policer parameter computation failed.");
+ return (-1);
+ }
+
+ }
+ else if ((cfg->rfc == SSE2_QOS_POLICER_TYPE_2R3C_RFC_2698) ||
+ (cfg->rfc == SSE2_QOS_POLICER_TYPE_2R3C_RFC_4115))
+ {
+ // Two-rate policer
+
+ if ((cfg->rb.kbps.cir_kbps == 0) || (cfg->rb.kbps.eir_kbps == 0)
+ || (cfg->rb.kbps.eir_kbps < cfg->rb.kbps.cir_kbps)
+ || (cfg->rb.kbps.cb_bytes == 0) || (cfg->rb.kbps.eb_bytes == 0))
+ {
+ SSE2_QOS_DEBUG_ERROR ("Config parameter validation failed.");
+ return (-1);
+ }
+
+ if (compute_policer_params (hz,
+ (u64) cfg->rb.kbps.cir_kbps *
+ BYTES_PER_KBIT,
+ (u64) cfg->rb.kbps.eir_kbps *
+ BYTES_PER_KBIT, &hw->current_limit,
+ &hw->extended_limit,
+ &hw->cir_tokens_per_period,
+ &hw->pir_tokens_per_period, &hw->scale))
+ {
+ SSE2_QOS_DEBUG_ERROR ("Policer parameter computation failed.");
+ return (-1);
+ }
+
+ }
+ else
+ {
+ SSE2_QOS_DEBUG_ERROR
+ ("Config parameter validation failed. RFC not supported");
+ return (-1);
+ }
+
+ hw->current_bucket = hw->current_limit;
+ hw->extended_bucket = hw->extended_limit;
+
+ return 0;
+}
+#endif
+
+
+/*
+ * Input: configured parameters in 'cfg'.
+ * Output: physical structure is returned in 'phys',
+ * Return: Status, success or failure code.
+ */
+int
+sse2_pol_logical_2_physical (sse2_qos_pol_cfg_params_st * cfg,
+ policer_read_response_type_st * phys)
+{
+ int rc;
+ sse2_qos_pol_hw_params_st pol_hw;
+ sse2_qos_pol_cfg_params_st kbps_cfg;
+
+ memset (phys, 0, sizeof (policer_read_response_type_st));
+ memset (&kbps_cfg, 0, sizeof (sse2_qos_pol_cfg_params_st));
+
+ if (!cfg)
+ {
+ SSE2_QOS_DEBUG_ERROR ("Illegal parameters");
+ return (-1);
+ }
+
+ switch (cfg->rate_type)
+ {
+ case SSE2_QOS_RATE_KBPS:
+ /* copy all the data into kbps_cfg */
+ kbps_cfg.rb.kbps.cir_kbps = cfg->rb.kbps.cir_kbps;
+ kbps_cfg.rb.kbps.eir_kbps = cfg->rb.kbps.eir_kbps;
+ kbps_cfg.rb.kbps.cb_bytes = cfg->rb.kbps.cb_bytes;
+ kbps_cfg.rb.kbps.eb_bytes = cfg->rb.kbps.eb_bytes;
+ break;
+ case SSE2_QOS_RATE_PPS:
+ kbps_cfg.rb.kbps.cir_kbps =
+ sse2_qos_convert_pps_to_kbps (cfg->rb.pps.cir_pps);
+ kbps_cfg.rb.kbps.eir_kbps =
+ sse2_qos_convert_pps_to_kbps (cfg->rb.pps.eir_pps);
+ kbps_cfg.rb.kbps.cb_bytes = sse2_qos_convert_burst_ms_to_bytes ((u32)
+ cfg->
+ rb.pps.cb_ms,
+ kbps_cfg.rb.
+ kbps.cir_kbps);
+ kbps_cfg.rb.kbps.eb_bytes =
+ sse2_qos_convert_burst_ms_to_bytes ((u32) cfg->rb.pps.eb_ms,
+ kbps_cfg.rb.kbps.eir_kbps);
+ break;
+ default:
+ SSE2_QOS_DEBUG_ERROR ("Illegal rate type");
+ return (-1);
+ }
+
+ /* rate type is now converted to kbps */
+ kbps_cfg.rate_type = SSE2_QOS_RATE_KBPS;
+ kbps_cfg.rnd_type = cfg->rnd_type;
+ kbps_cfg.rfc = cfg->rfc;
+
+ phys->action[POLICE_CONFORM] = cfg->conform_action.action_type;
+ phys->mark_dscp[POLICE_CONFORM] = cfg->conform_action.dscp;
+ phys->action[POLICE_EXCEED] = cfg->exceed_action.action_type;
+ phys->mark_dscp[POLICE_EXCEED] = cfg->exceed_action.dscp;
+ phys->action[POLICE_VIOLATE] = cfg->violate_action.action_type;
+ phys->mark_dscp[POLICE_VIOLATE] = cfg->violate_action.dscp;
+
+ phys->color_aware = cfg->color_aware;
+
+#if !defined (INTERNAL_SS) && !defined (X86)
+ // convert logical into hw params which involves qos calculations
+ rc = sse2_pol_compute_hw_params (&kbps_cfg, &pol_hw);
+ if (rc == -1)
+ {
+ SSE2_QOS_DEBUG_ERROR ("Unable to compute hw param. Error: %d", rc);
+ return (rc);
+ }
+
+ // convert hw params into the physical
+ phys->rfc = pol_hw.rfc;
+ phys->an = pol_hw.allow_negative;
+ phys->rexp = pol_hw.rate_exp;
+ phys->arm = pol_hw.avg_rate_man;
+ phys->prm = pol_hw.peak_rate_man;
+ phys->cble = pol_hw.comm_bkt_limit_exp;
+ phys->cblm = pol_hw.comm_bkt_limit_man;
+ phys->eble = pol_hw.extd_bkt_limit_exp;
+ phys->eblm = pol_hw.extd_bkt_limit_man;
+ phys->cb = pol_hw.comm_bkt;
+ phys->eb = pol_hw.extd_bkt;
+
+ /* for debugging purposes, the bucket token values can be overwritten */
+ if (cfg->overwrite_bucket)
+ {
+ phys->cb = cfg->current_bucket;
+ phys->eb = cfg->extended_bucket;
+ }
+#else
+ // convert logical into hw params which involves qos calculations
+ rc = x86_pol_compute_hw_params (&kbps_cfg, phys);
+ if (rc == -1)
+ {
+ SSE2_QOS_DEBUG_ERROR ("Unable to compute hw param. Error: %d", rc);
+ return (rc);
+ }
+
+ /* for debugging purposes, the bucket token values can be overwritten */
+ if (cfg->overwrite_bucket)
+ {
+ phys->current_bucket = cfg->current_bucket;
+ phys->extended_bucket = cfg->extended_bucket;
+ }
+
+ // Touch to avoid compiler warning for X86
+ pol_hw.allow_negative = pol_hw.allow_negative;
+
+#endif // if !defined (INTERNAL_SS) && !defined (X86)
+
+ return 0;
+}
+
+
+static void
+sse2_qos_convert_pol_bucket_to_hw_fmt (policer_read_response_type_st * bkt,
+ sse2_qos_pol_hw_params_st * hw_fmt)
+{
+ memset (hw_fmt, 0, sizeof (sse2_qos_pol_hw_params_st));
+#if !defined (INTERNAL_SS) && !defined (X86)
+ hw_fmt->rfc = (u8) bkt->rfc;
+ hw_fmt->allow_negative = (u8) bkt->an;
+ hw_fmt->rate_exp = (u8) bkt->rexp;
+ hw_fmt->avg_rate_man = (u16) bkt->arm;
+ hw_fmt->peak_rate_man = (u16) bkt->prm;
+ hw_fmt->comm_bkt_limit_man = (u8) bkt->cblm;
+ hw_fmt->comm_bkt_limit_exp = (u8) bkt->cble;
+ hw_fmt->extd_bkt_limit_man = (u8) bkt->eblm;
+ hw_fmt->extd_bkt_limit_exp = (u8) bkt->eble;
+ hw_fmt->extd_bkt = bkt->eb;
+ hw_fmt->comm_bkt = bkt->cb;
+#endif // if !defined (INTERNAL_SS) && !defined (X86)
+}
+
+/*
+ * Input: h/w programmable parameter values in 'hw'
+ * Output: configured parameter values in 'cfg'
+ * Return: Status, success or failure code.
+ */
+static int
+sse2_pol_convert_hw_to_cfg_params (sse2_qos_pol_hw_params_st * hw,
+ sse2_qos_pol_cfg_params_st * cfg)
+{
+ u64 temp_rate;
+
+ if ((hw == NULL) || (cfg == NULL))
+ {
+ return EINVAL;
+ }
+
+ if ((hw->rfc == IPE_RFC_RFC4115) &&
+ !(hw->peak_rate_man << hw->rate_exp) && !(hw->extd_bkt_limit_man))
+ {
+ /*
+ * For a 1R2C, we set EIR = 0, EB = 0
+ */
+ cfg->rfc = SSE2_QOS_POLICER_TYPE_1R2C;
+ }
+ else if (hw->rfc == IPE_RFC_RFC2697)
+ {
+ cfg->rfc = SSE2_QOS_POLICER_TYPE_1R3C_RFC_2697;
+ }
+ else if (hw->rfc == IPE_RFC_RFC2698)
+ {
+ cfg->rfc = SSE2_QOS_POLICER_TYPE_2R3C_RFC_2698;
+ }
+ else if (hw->rfc == IPE_RFC_RFC4115)
+ {
+ cfg->rfc = SSE2_QOS_POLICER_TYPE_2R3C_RFC_4115;
+ }
+ else if (hw->rfc == IPE_RFC_MEF5CF1)
+ {
+ cfg->rfc = SSE2_QOS_POLICER_TYPE_2R3C_RFC_MEF5CF1;
+ }
+ else
+ {
+ return EINVAL;
+ }
+
+ temp_rate = (((u64) hw->avg_rate_man << hw->rate_exp) * 8LL *
+ SSE2_QOS_POL_TICKS_PER_SEC) / 1000;
+ cfg->rb.kbps.cir_kbps = (u32) temp_rate;
+
+ temp_rate = (((u64) hw->peak_rate_man << hw->rate_exp) * 8LL *
+ SSE2_QOS_POL_TICKS_PER_SEC) / 1000;
+ cfg->rb.kbps.eir_kbps = (u32) temp_rate;
+
+ cfg->rb.kbps.cb_bytes = ((u64) hw->comm_bkt_limit_man <<
+ (u64) hw->comm_bkt_limit_exp);
+ cfg->rb.kbps.eb_bytes = ((u64) hw->extd_bkt_limit_man <<
+ (u64) hw->extd_bkt_limit_exp);
+
+ if (cfg->rfc == SSE2_QOS_POLICER_TYPE_1R3C_RFC_2697)
+ {
+ /*
+ * For 1R3C in the hardware, EB = sum(CB, EB). Also, EIR = CIR. Restore
+ * values such that the configured params don't reflect this adjustment
+ */
+ cfg->rb.kbps.eb_bytes = (cfg->rb.kbps.eb_bytes - cfg->rb.kbps.cb_bytes);
+ cfg->rb.kbps.eir_kbps = 0;
+ }
+ else if (cfg->rfc == SSE2_QOS_POLICER_TYPE_2R3C_RFC_4115)
+ {
+ /*
+ * For 4115 in the hardware is excess rate and burst, but EA provides
+ * peak-rate, so adjust it to be eir
+ */
+ cfg->rb.kbps.eir_kbps += cfg->rb.kbps.cir_kbps;
+ cfg->rb.kbps.eb_bytes += cfg->rb.kbps.cb_bytes;
+ }
+ /* h/w conversion to cfg is in kbps */
+ cfg->rate_type = SSE2_QOS_RATE_KBPS;
+ cfg->overwrite_bucket = 0;
+ cfg->current_bucket = hw->comm_bkt;
+ cfg->extended_bucket = hw->extd_bkt;
+
+ SSE2_QOS_DEBUG_INFO ("configured params, cir: %u kbps, eir: %u kbps, cb "
+ "burst: 0x%llx bytes, eb burst: 0x%llx bytes",
+ cfg->rb.kbps.cir_kbps, cfg->rb.kbps.eir_kbps,
+ cfg->rb.kbps.cb_bytes, cfg->rb.kbps.eb_bytes);
+ SSE2_QOS_TR_INFO (SSE2_QOS_TP_INFO_22, cfg->rb.kbps.cir_kbps,
+ cfg->rb.kbps.eir_kbps,
+ (uint) cfg->rb.kbps.cb_bytes,
+ (uint) cfg->rb.kbps.eb_bytes);
+
+ return 0;
+}
+
+u32
+sse2_qos_convert_kbps_to_pps (u32 rate_kbps)
+{
+ u64 numer, denom, rnd_value = 0;
+
+ // sse_qosrm_ship_inc_counter(SSE2_QOS_SHIP_COUNTER_TYPE_API_CNT,
+ // SSE2_QOS_SHIP_CNT_POL_CONV_KBPS_TO_PPS);
+
+ numer = (u64) ((u64) rate_kbps * 1000LL);
+ denom = (u64) ((u64) SSE2_QOS_POLICER_FIXED_PKT_SIZE * 8LL);
+
+ (void) sse2_qos_pol_round (numer, denom, &rnd_value,
+ SSE2_QOS_ROUND_TO_CLOSEST);
+
+ return ((u32) rnd_value);
+}
+
+u32
+sse2_qos_convert_burst_bytes_to_ms (u64 burst_bytes, u32 rate_kbps)
+{
+ u64 numer, denom, rnd_value = 0;
+
+ //sse_qosrm_ship_inc_counter(SSE2_QOS_SHIP_COUNTER_TYPE_API_CNT,
+ // SSE2_QOS_SHIP_CNT_POL_CONV_BYTES_TO_BURST_MS);
+
+ numer = burst_bytes * 8LL;
+ denom = (u64) rate_kbps;
+
+ (void) sse2_qos_pol_round (numer, denom, &rnd_value,
+ SSE2_QOS_ROUND_TO_CLOSEST);
+
+ return ((u32) rnd_value);
+}
+
+/*
+ * Input: physical structure in 'phys', rate_type in cfg
+ * Output: configured parameters in 'cfg'.
+ * Return: Status, success or failure code.
+ */
+int
+sse2_pol_physical_2_logical (policer_read_response_type_st * phys,
+ sse2_qos_pol_cfg_params_st * cfg)
+{
+ int rc;
+ sse2_qos_pol_hw_params_st pol_hw;
+ sse2_qos_pol_cfg_params_st kbps_cfg;
+
+ memset (&pol_hw, 0, sizeof (sse2_qos_pol_hw_params_st));
+ memset (&kbps_cfg, 0, sizeof (sse2_qos_pol_cfg_params_st));
+
+ if (!phys)
+ {
+ SSE2_QOS_DEBUG_ERROR ("Illegal parameters");
+ return (-1);
+ }
+
+ sse2_qos_convert_pol_bucket_to_hw_fmt (phys, &pol_hw);
+
+ rc = sse2_pol_convert_hw_to_cfg_params (&pol_hw, &kbps_cfg);
+ if (rc != 0)
+ {
+ SSE2_QOS_DEBUG_ERROR ("Unable to convert hw params to config params. "
+ "Error: %d", rc);
+ return (-1);
+ }
+
+ /* check what rate type is required */
+ switch (cfg->rate_type)
+ {
+ case SSE2_QOS_RATE_KBPS:
+ /* copy all the data into kbps_cfg */
+ cfg->rb.kbps.cir_kbps = kbps_cfg.rb.kbps.cir_kbps;
+ cfg->rb.kbps.eir_kbps = kbps_cfg.rb.kbps.eir_kbps;
+ cfg->rb.kbps.cb_bytes = kbps_cfg.rb.kbps.cb_bytes;
+ cfg->rb.kbps.eb_bytes = kbps_cfg.rb.kbps.eb_bytes;
+ break;
+ case SSE2_QOS_RATE_PPS:
+ cfg->rb.pps.cir_pps =
+ sse2_qos_convert_kbps_to_pps (kbps_cfg.rb.kbps.cir_kbps);
+ cfg->rb.pps.eir_pps =
+ sse2_qos_convert_kbps_to_pps (kbps_cfg.rb.kbps.eir_kbps);
+ cfg->rb.pps.cb_ms =
+ sse2_qos_convert_burst_bytes_to_ms (kbps_cfg.rb.kbps.cb_bytes,
+ kbps_cfg.rb.kbps.cir_kbps);
+ cfg->rb.pps.eb_ms =
+ sse2_qos_convert_burst_bytes_to_ms (kbps_cfg.rb.kbps.eb_bytes,
+ kbps_cfg.rb.kbps.eir_kbps);
+ break;
+ default:
+ SSE2_QOS_DEBUG_ERROR ("Illegal rate type");
+ return (-1);
+ }
+
+ /* cfg->rate_type remains what it was */
+ cfg->rnd_type = kbps_cfg.rnd_type;
+ cfg->rfc = kbps_cfg.rfc;
+ cfg->overwrite_bucket = kbps_cfg.overwrite_bucket;
+ cfg->current_bucket = kbps_cfg.current_bucket;
+ cfg->extended_bucket = kbps_cfg.extended_bucket;
+
+ return 0;
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/policer/xlate.h b/src/vnet/policer/xlate.h
new file mode 100644
index 00000000000..16742f80a47
--- /dev/null
+++ b/src/vnet/policer/xlate.h
@@ -0,0 +1,186 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*---------------------------------------------------------------------------
+ * from gdp_logical_qos.h
+ *---------------------------------------------------------------------------
+ */
+
+#ifndef __included_xlate_h__
+#define __included_xlate_h__
+
+#include <vnet/policer/police.h>
+
+/*
+ * edt: * enum sse2_qos_policer_type_en
+ * Defines type of policer to be allocated
+ */
+typedef enum sse2_qos_policer_type_en_
+{
+ SSE2_QOS_POLICER_TYPE_1R2C = 0,
+ SSE2_QOS_POLICER_TYPE_1R3C_RFC_2697 = 1,
+ SSE2_QOS_POLICER_TYPE_2R3C_RFC_2698 = 2,
+ SSE2_QOS_POLICER_TYPE_2R3C_RFC_4115 = 3,
+ SSE2_QOS_POLICER_TYPE_2R3C_RFC_MEF5CF1 = 4,
+ SSE2_QOS_POLICER_TYPE_MAX
+} sse2_qos_policer_type_en;
+
+/*
+ * edt: * enum
+ * Enum used to define type of rounding used when calculating policer values
+ */
+typedef enum
+{
+ SSE2_QOS_ROUND_TO_CLOSEST = 0,
+ SSE2_QOS_ROUND_TO_UP,
+ SSE2_QOS_ROUND_TO_DOWN,
+ SSE2_QOS_ROUND_INVALID
+} sse2_qos_round_type_en;
+
+/*
+ * edt: * enum
+ * Enum used to define type of rate for configuration, either pps or kbps.
+ * If kbps, then burst is in bytes, if pps, then burst is in ms.
+ *
+ * Default of zero is kbps, which is inline with how it is programmed
+ * in actual hardware. However, the warning is that this is reverse logic
+ * of units_in_bits field in sse2_static_policer_parameters_st, which is
+ * inline with sse_punt_drop.h.
+ */
+typedef enum
+{
+ SSE2_QOS_RATE_KBPS = 0,
+ SSE2_QOS_RATE_PPS,
+ SSE2_QOS_RATE_INVALID
+} sse2_qos_rate_type_en;
+
+/*
+ * edt: * enum
+ * Defines type of policer actions.
+ */
+typedef enum
+{
+ SSE2_QOS_ACTION_DROP = 0,
+ SSE2_QOS_ACTION_TRANSMIT,
+ SSE2_QOS_ACTION_MARK_AND_TRANSMIT
+} sse2_qos_action_type_en;
+
+/*
+ * edt * struct sse2_qos_pol_action_params_st
+ * This structure is used to hold user configured police action parameters.
+ *
+ * element: action_type
+ * Action type (see sse2_qos_action_type_en).
+ * elemtnt: dscp
+ * DSCP value to set when action is SSE2_QOS_ACTION_MARK_AND_TRANSMIT.
+ */
+typedef struct sse2_qos_pol_action_params_st_
+{
+ u8 action_type;
+ u8 dscp;
+} sse2_qos_pol_action_params_st;
+
+/*
+ * edt: * struct sse2_qos_pol_cfg_params_st
+ *
+ * Description:
+ * This structure is used to hold user configured policing parameters.
+ *
+ * element: cir_kbps
+ * CIR in kbps.
+ * element: eir_kbps
+ * EIR or PIR in kbps.
+ * element: cb_bytes
+ * Committed Burst in bytes.
+ * element: eb_bytes
+ * Excess or Peak Burst in bytes.
+ * element: cir_pps
+ * CIR in pps.
+ * element: eir_pps
+ * EIR or PIR in pps.
+ * element: cb_ms
+ * Committed Burst in milliseconds.
+ * element: eb_ms
+ * Excess or Peak Burst in milliseconds.
+ * element: rate_type
+ * Indicates the union if in kbps/bytes or pps/ms.
+ * element: rfc
+ * Policer algorithm - 1R2C, 1R3C (2697), 2R3C (2698) or 2R3C (4115). See
+ * sse_qos_policer_type_en
+ * element: rnd_type
+ * Rounding type (see sse_qos_round_type_en). Needed when policer values
+ * need to be rounded. Caller can decide on type of rounding used
+ */
+typedef struct sse2_qos_pol_cfg_params_st_
+{
+ union
+ {
+ struct
+ {
+ u32 cir_kbps;
+ u32 eir_kbps;
+ u64 cb_bytes;
+ u64 eb_bytes;
+ } kbps;
+ struct
+ {
+ u32 cir_pps;
+ u32 eir_pps;
+ u64 cb_ms;
+ u64 eb_ms;
+ } pps;
+ } rb; /* rate burst config */
+ u8 rate_type; /* sse2_qos_rate_type_en */
+ u8 rnd_type; /* sse2_qos_round_type_en */
+ u8 rfc; /* sse2_qos_policer_type_en */
+ u8 color_aware;
+ u8 overwrite_bucket; /* for debugging purposes */
+ u32 current_bucket; /* for debugging purposes */
+ u32 extended_bucket; /* for debugging purposes */
+ sse2_qos_pol_action_params_st conform_action;
+ sse2_qos_pol_action_params_st exceed_action;
+ sse2_qos_pol_action_params_st violate_action;
+} sse2_qos_pol_cfg_params_st;
+
+
+typedef struct sse2_qos_pol_hw_params_st_
+{
+ u8 rfc;
+ u8 allow_negative;
+ u8 rate_exp;
+ u16 avg_rate_man;
+ u16 peak_rate_man;
+ u8 comm_bkt_limit_exp;
+ u8 comm_bkt_limit_man;
+ u8 extd_bkt_limit_exp;
+ u8 extd_bkt_limit_man;
+ u32 comm_bkt;
+ u32 extd_bkt;
+} sse2_qos_pol_hw_params_st;
+
+
+int
+sse2_pol_logical_2_physical (sse2_qos_pol_cfg_params_st * cfg,
+ policer_read_response_type_st * phys);
+
+
+#endif /* __included_xlate_h__ */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/ppp/error.def b/src/vnet/ppp/error.def
new file mode 100644
index 00000000000..ba645408582
--- /dev/null
+++ b/src/vnet/ppp/error.def
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * ppp_error.def: ppp errors
+ *
+ * Copyright (c) 2008 Eliot Dresselhaus
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+ppp_error (NONE, "no error")
+ppp_error (UNKNOWN_PROTOCOL, "unknown ppp protocol")
+ppp_error (UNKNOWN_ADDRESS_CONTROL, "address, control != 0xff03")
diff --git a/src/vnet/ppp/node.c b/src/vnet/ppp/node.c
new file mode 100644
index 00000000000..4f1f6a715e6
--- /dev/null
+++ b/src/vnet/ppp/node.c
@@ -0,0 +1,368 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * ppp_node.c: ppp packet processing
+ *
+ * Copyright (c) 2010 Eliot Dresselhaus
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <vlib/vlib.h>
+#include <vnet/pg/pg.h>
+#include <vnet/ppp/ppp.h>
+#include <vppinfra/sparse_vec.h>
+
+#define foreach_ppp_input_next \
+ _ (PUNT, "error-punt") \
+ _ (DROP, "error-drop")
+
+typedef enum
+{
+#define _(s,n) PPP_INPUT_NEXT_##s,
+ foreach_ppp_input_next
+#undef _
+ PPP_INPUT_N_NEXT,
+} ppp_input_next_t;
+
+typedef struct
+{
+ u8 packet_data[32];
+} ppp_input_trace_t;
+
+static u8 *
+format_ppp_input_trace (u8 * s, va_list * va)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*va, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*va, vlib_node_t *);
+ ppp_input_trace_t *t = va_arg (*va, ppp_input_trace_t *);
+
+ s = format (s, "%U", format_ppp_header, t->packet_data);
+
+ return s;
+}
+
+typedef struct
+{
+ /* Sparse vector mapping ppp protocol in network byte order
+ to next index. */
+ u16 *next_by_protocol;
+
+ u32 *sparse_index_by_next_index;
+} ppp_input_runtime_t;
+
+static uword
+ppp_input (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * from_frame)
+{
+ ppp_input_runtime_t *rt = (void *) node->runtime_data;
+ u32 n_left_from, next_index, i_next, *from, *to_next;
+
+ from = vlib_frame_vector_args (from_frame);
+ n_left_from = from_frame->n_vectors;
+
+ if (node->flags & VLIB_NODE_FLAG_TRACE)
+ vlib_trace_frame_buffers_only (vm, node,
+ from,
+ n_left_from,
+ sizeof (from[0]),
+ sizeof (ppp_input_trace_t));
+
+ next_index = node->cached_next_index;
+ i_next = vec_elt (rt->sparse_index_by_next_index, next_index);
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ u32 bi0, bi1;
+ vlib_buffer_t *b0, *b1;
+ ppp_header_t *h0, *h1;
+ u32 i0, i1, protocol0, protocol1, enqueue_code;
+
+ /* Prefetch next iteration. */
+ {
+ vlib_buffer_t *p2, *p3;
+
+ p2 = vlib_get_buffer (vm, from[2]);
+ p3 = vlib_get_buffer (vm, from[3]);
+
+ vlib_prefetch_buffer_header (p2, LOAD);
+ vlib_prefetch_buffer_header (p3, LOAD);
+
+ CLIB_PREFETCH (p2->data, sizeof (h0[0]), LOAD);
+ CLIB_PREFETCH (p3->data, sizeof (h1[0]), LOAD);
+ }
+
+ bi0 = from[0];
+ bi1 = from[1];
+ to_next[0] = bi0;
+ to_next[1] = bi1;
+ from += 2;
+ to_next += 2;
+ n_left_to_next -= 2;
+ n_left_from -= 2;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ b1 = vlib_get_buffer (vm, bi1);
+
+ h0 = (void *) (b0->data + b0->current_data);
+ h1 = (void *) (b1->data + b1->current_data);
+
+ b0->current_data += sizeof (h0[0]);
+ b1->current_data += sizeof (h1[0]);
+
+ b0->current_length -= sizeof (h0[0]);
+ b1->current_length -= sizeof (h1[0]);
+
+ /* Index sparse array with network byte order. */
+ protocol0 = h0->protocol;
+ protocol1 = h1->protocol;
+ sparse_vec_index2 (rt->next_by_protocol, protocol0, protocol1, &i0,
+ &i1);
+
+ b0->error =
+ node->errors[i0 ==
+ SPARSE_VEC_INVALID_INDEX ? PPP_ERROR_UNKNOWN_PROTOCOL
+ : PPP_ERROR_NONE];
+ b1->error =
+ node->errors[i1 ==
+ SPARSE_VEC_INVALID_INDEX ? PPP_ERROR_UNKNOWN_PROTOCOL
+ : PPP_ERROR_NONE];
+
+ enqueue_code = (i0 != i_next) + 2 * (i1 != i_next);
+
+ if (PREDICT_FALSE (enqueue_code != 0))
+ {
+ switch (enqueue_code)
+ {
+ case 1:
+ /* A B A */
+ to_next[-2] = bi1;
+ to_next -= 1;
+ n_left_to_next += 1;
+ vlib_set_next_frame_buffer (vm, node,
+ vec_elt (rt->next_by_protocol,
+ i0), bi0);
+ break;
+
+ case 2:
+ /* A A B */
+ to_next -= 1;
+ n_left_to_next += 1;
+ vlib_set_next_frame_buffer (vm, node,
+ vec_elt (rt->next_by_protocol,
+ i1), bi1);
+ break;
+
+ case 3:
+ /* A B B or A B C */
+ to_next -= 2;
+ n_left_to_next += 2;
+ vlib_set_next_frame_buffer (vm, node,
+ vec_elt (rt->next_by_protocol,
+ i0), bi0);
+ vlib_set_next_frame_buffer (vm, node,
+ vec_elt (rt->next_by_protocol,
+ i1), bi1);
+ if (i0 == i1)
+ {
+ vlib_put_next_frame (vm, node, next_index,
+ n_left_to_next);
+ i_next = i1;
+ next_index = vec_elt (rt->next_by_protocol, i_next);
+ vlib_get_next_frame (vm, node, next_index, to_next,
+ n_left_to_next);
+ }
+ }
+ }
+ }
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 bi0;
+ vlib_buffer_t *b0;
+ ppp_header_t *h0;
+ u32 i0, protocol0;
+
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+
+ h0 = (void *) (b0->data + b0->current_data);
+
+ b0->current_data += sizeof (h0[0]);
+ b0->current_length -= sizeof (h0[0]);
+
+ protocol0 = h0->protocol;
+ i0 = sparse_vec_index (rt->next_by_protocol, protocol0);
+
+ b0->error =
+ node->errors[i0 ==
+ SPARSE_VEC_INVALID_INDEX ? PPP_ERROR_UNKNOWN_PROTOCOL
+ : PPP_ERROR_NONE];
+
+ /* Sent packet to wrong next? */
+ if (PREDICT_FALSE (i0 != i_next))
+ {
+ /* Return old frame; remove incorrectly enqueued packet. */
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next + 1);
+
+ /* Send to correct next. */
+ i_next = i0;
+ next_index = vec_elt (rt->next_by_protocol, i_next);
+ vlib_get_next_frame (vm, node, next_index,
+ to_next, n_left_to_next);
+ to_next[0] = bi0;
+ to_next += 1;
+ n_left_to_next -= 1;
+ }
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ return from_frame->n_vectors;
+}
+
+static char *ppp_error_strings[] = {
+#define ppp_error(n,s) s,
+#include "error.def"
+#undef ppp_error
+};
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (ppp_input_node) = {
+ .function = ppp_input,
+ .name = "ppp-input",
+ /* Takes a vector of packets. */
+ .vector_size = sizeof (u32),
+
+ .runtime_data_bytes = sizeof (ppp_input_runtime_t),
+
+ .n_errors = PPP_N_ERROR,
+ .error_strings = ppp_error_strings,
+
+ .n_next_nodes = PPP_INPUT_N_NEXT,
+ .next_nodes = {
+#define _(s,n) [PPP_INPUT_NEXT_##s] = n,
+ foreach_ppp_input_next
+#undef _
+ },
+
+ .format_buffer = format_ppp_header_with_length,
+ .format_trace = format_ppp_input_trace,
+ .unformat_buffer = unformat_ppp_header,
+};
+/* *INDENT-ON* */
+
+static clib_error_t *
+ppp_input_init (vlib_main_t * vm)
+{
+ ppp_input_runtime_t *rt;
+
+ {
+ clib_error_t *error = vlib_call_init_function (vm, ppp_init);
+ if (error)
+ clib_error_report (error);
+ }
+
+ ppp_setup_node (vm, ppp_input_node.index);
+
+ rt = vlib_node_get_runtime_data (vm, ppp_input_node.index);
+
+ rt->next_by_protocol = sparse_vec_new
+ ( /* elt bytes */ sizeof (rt->next_by_protocol[0]),
+ /* bits in index */ BITS (((ppp_header_t *) 0)->protocol));
+
+ vec_validate (rt->sparse_index_by_next_index, PPP_INPUT_NEXT_DROP);
+ vec_validate (rt->sparse_index_by_next_index, PPP_INPUT_NEXT_PUNT);
+ rt->sparse_index_by_next_index[PPP_INPUT_NEXT_DROP]
+ = SPARSE_VEC_INVALID_INDEX;
+ rt->sparse_index_by_next_index[PPP_INPUT_NEXT_PUNT]
+ = SPARSE_VEC_INVALID_INDEX;
+
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (ppp_input_init);
+
+void
+ppp_register_input_protocol (vlib_main_t * vm,
+ ppp_protocol_t protocol, u32 node_index)
+{
+ ppp_main_t *em = &ppp_main;
+ ppp_protocol_info_t *pi;
+ ppp_input_runtime_t *rt;
+ u16 *n;
+ u32 i;
+
+ {
+ clib_error_t *error = vlib_call_init_function (vm, ppp_input_init);
+ if (error)
+ clib_error_report (error);
+ }
+
+ pi = ppp_get_protocol_info (em, protocol);
+ pi->node_index = node_index;
+ pi->next_index = vlib_node_add_next (vm, ppp_input_node.index, node_index);
+
+ /* Setup ppp protocol -> next index sparse vector mapping. */
+ rt = vlib_node_get_runtime_data (vm, ppp_input_node.index);
+ n =
+ sparse_vec_validate (rt->next_by_protocol,
+ clib_host_to_net_u16 (protocol));
+ n[0] = pi->next_index;
+
+ /* Rebuild next index -> sparse index inverse mapping when sparse vector
+ is updated. */
+ vec_validate (rt->sparse_index_by_next_index, pi->next_index);
+ for (i = 1; i < vec_len (rt->next_by_protocol); i++)
+ rt->sparse_index_by_next_index[rt->next_by_protocol[i]] = i;
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/ppp/packet.h b/src/vnet/ppp/packet.h
new file mode 100644
index 00000000000..cab9743de92
--- /dev/null
+++ b/src/vnet/ppp/packet.h
@@ -0,0 +1,199 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef included_vnet_ppp_packet_h
+#define included_vnet_ppp_packet_h
+
+/*
+ * PPP packet format
+ *
+ * Copyright (c) 2009 Eliot Dresselhaus
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/*
+See http://www.iana.org/assignments/ppp-numbers.
+
+The Point-to-Point Protocol (PPP) Data Link Layer [146,147,175]
+contains a 16 bit Protocol field to identify the the encapsulated
+protocol. The Protocol field is consistent with the ISO 3309 (HDLC)
+extension mechanism for Address fields. All Protocols MUST be
+assigned such that the least significant bit of the most significant
+octet equals "0", and the least significant bit of the least
+significant octet equals "1".
+*/
+
+#define foreach_ppp_protocol \
+_ (0x0001, padding) \
+_ (0x0003, rohc_small_cid) \
+_ (0x0005, rohc_large_cid) \
+_ (0x0021, ip4) \
+_ (0x0023, osi) \
+_ (0x0025, xerox_ns_idp) \
+_ (0x0027, decnet) \
+_ (0x0029, appletalk) \
+_ (0x002b, ipx) \
+_ (0x002d, vj_compressed_tcp) \
+_ (0x002f, vj_uncompressed_tcp) \
+_ (0x0031, bpdu) \
+_ (0x0033, streams) \
+_ (0x0035, vines) \
+_ (0x0039, appletalk_eddp) \
+_ (0x003b, appletalk_smart_buffered) \
+_ (0x003d, multilink) \
+_ (0x003f, netbios_framing) \
+_ (0x0041, cisco) \
+_ (0x0043, timeplex) \
+_ (0x0045, fujitsu_lblb) \
+_ (0x0047, dca_remote_lan) \
+_ (0x0049, sdtp) \
+_ (0x004b, sna_over_802_2) \
+_ (0x004d, sna) \
+_ (0x004f, ip6_header_compression) \
+_ (0x0051, knx) \
+_ (0x0053, encryption) \
+_ (0x0055, link_encryption) \
+_ (0x0057, ip6) \
+_ (0x0059, ppp_mux) \
+_ (0x005b, vendor_specific_a) \
+_ (0x0061, rtp_iphc_full_header) \
+_ (0x0063, rtp_iphc_compressed_tcp) \
+_ (0x0065, rtp_iphc_compressed_non_tcp) \
+_ (0x0067, rtp_iphc_compressed_udp_8) \
+_ (0x0069, rtp_iphc_compressed_rtp_8) \
+_ (0x006f, stampede) \
+_ (0x0073, mp_plus) \
+_ (0x007d, control) \
+_ (0x00c1, ntcits_ipi) \
+_ (0x00cf, ppp_nlpid) \
+_ (0x00fb, multilink_compression) \
+_ (0x00fd, compressed_datagram) \
+_ (0x0201, 802_1d_hello) \
+_ (0x0203, ibm_source_routing) \
+_ (0x0205, dec_lanbridge) \
+_ (0x0207, cdp) \
+_ (0x0209, netcs) \
+_ (0x020b, stp) \
+_ (0x020d, edp) \
+_ (0x0211, oscp_a) \
+_ (0x0213, oscp_b) \
+_ (0x0231, luxcom) \
+_ (0x0233, sigma) \
+_ (0x0235, apple_client_server) \
+_ (0x0281, mpls_unicast) \
+_ (0x0283, mpls_multicast) \
+_ (0x0285, ieee_p1284_4) \
+_ (0x0287, tetra) \
+_ (0x0289, multichannel_flow_treatment) \
+_ (0x2063, rtp_iphc_compressed_tcp_no_delta) \
+_ (0x2065, rtp_iphc_context_state) \
+_ (0x2067, rtp_iphc_compressed_udp_16) \
+_ (0x2069, rtp_iphc_compressed_rtp_16) \
+_ (0x4001, cray) \
+_ (0x4003, cdpd) \
+_ (0x4005, expand) \
+_ (0x4007, odsicp) \
+_ (0x4009, docsis_dll) \
+_ (0x400B, cetacean) \
+_ (0x4021, lzs) \
+_ (0x4023, reftek) \
+_ (0x4025, fibre_channel) \
+_ (0x4027, emit) \
+_ (0x405b, vendor_specific_b) \
+_ (0xc021, lcp) \
+_ (0xc023, pap) \
+_ (0xc025, link_quality_report) \
+_ (0xc027, shiva_password) \
+_ (0xc029, cbcp) \
+_ (0xc02b, bacp) \
+_ (0xc02d, bap) \
+_ (0xc05b, vendor_specific_password) \
+_ (0xc081, container_control) \
+_ (0xc223, chap) \
+_ (0xc225, rsa) \
+_ (0xc227, extensible_authentication) \
+_ (0xc229, mitsubishi_security_info) \
+_ (0xc26f, stampede_authorization) \
+_ (0xc281, proprietary_authentication_a) \
+_ (0xc283, proprietary_authentication_b) \
+_ (0xc481, proprietary_node_id_authentication)
+
+typedef enum
+{
+#define _(n,f) PPP_PROTOCOL_##f = n,
+ foreach_ppp_protocol
+#undef _
+} ppp_protocol_t;
+
+/* PPP Link Control Protocol (LCP) and Internet Protocol Control Protocol (IPCP) Codes
+
+The Point-to-Point Protocol (PPP) Link Control Protocol (LCP),
+the Compression Control Protocol (CCP), Internet Protocol Control
+Protocol (IPCP), and other control protocols, contain an 8 bit
+Code field which identifies the type of packet. */
+
+#define foreach_ppp_lcp_code \
+_ (0, vendor_specific) \
+_ (1, configure_request) \
+_ (2, configure_ack) \
+_ (3, configure_nak) \
+_ (4, configure_reject) \
+_ (5, terminate_request) \
+_ (6, terminate_ack) \
+_ (7, code_reject) \
+_ (8, protocol_reject) \
+_ (9, echo_request) \
+_ (10, echo_reply) \
+_ (11, discard_request) \
+_ (12, identification) \
+_ (13, time_remaining) \
+_ (14, reset_request) \
+_ (15, reset_reply)
+
+typedef struct
+{
+ /* Set to 0xff 0x03 */
+ u8 address, control;
+
+ /* Layer 3 protocol for this packet. */
+ u16 protocol;
+} ppp_header_t;
+
+#endif /* included_vnet_ppp_packet_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/ppp/pg.c b/src/vnet/ppp/pg.c
new file mode 100644
index 00000000000..0b46ccb9052
--- /dev/null
+++ b/src/vnet/ppp/pg.c
@@ -0,0 +1,114 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * ppp_pg.c: packet generator ppp interface
+ *
+ * Copyright (c) 2008 Eliot Dresselhaus
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <vlib/vlib.h>
+#include <vnet/pg/pg.h>
+#include <vnet/ppp/ppp.h>
+
+typedef struct
+{
+ pg_edit_t address;
+ pg_edit_t control;
+ pg_edit_t protocol;
+} pg_ppp_header_t;
+
+static inline void
+pg_ppp_header_init (pg_ppp_header_t * e)
+{
+ pg_edit_init (&e->address, ppp_header_t, address);
+ pg_edit_init (&e->control, ppp_header_t, control);
+ pg_edit_init (&e->protocol, ppp_header_t, protocol);
+}
+
+uword
+unformat_pg_ppp_header (unformat_input_t * input, va_list * args)
+{
+ pg_stream_t *s = va_arg (*args, pg_stream_t *);
+ pg_ppp_header_t *h;
+ u32 group_index, error;
+
+ h = pg_create_edit_group (s, sizeof (h[0]), sizeof (ppp_header_t),
+ &group_index);
+ pg_ppp_header_init (h);
+
+ pg_edit_set_fixed (&h->address, 0xff);
+ pg_edit_set_fixed (&h->control, 0x03);
+
+ error = 1;
+ if (!unformat (input, "%U",
+ unformat_pg_edit,
+ unformat_ppp_protocol_net_byte_order, &h->protocol))
+ goto done;
+
+ {
+ ppp_main_t *pm = &ppp_main;
+ ppp_protocol_info_t *pi = 0;
+ pg_node_t *pg_node = 0;
+
+ if (h->protocol.type == PG_EDIT_FIXED)
+ {
+ u16 t = *(u16 *) h->protocol.values[PG_EDIT_LO];
+ pi = ppp_get_protocol_info (pm, clib_net_to_host_u16 (t));
+ if (pi && pi->node_index != ~0)
+ pg_node = pg_get_node (pi->node_index);
+ }
+
+ if (pg_node && pg_node->unformat_edit
+ && unformat_user (input, pg_node->unformat_edit, s))
+ ;
+
+ else if (!unformat_user (input, unformat_pg_payload, s))
+ goto done;
+ }
+
+ error = 0;
+done:
+ if (error)
+ pg_free_edit_group (s);
+ return error == 0;
+}
+
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/ppp/ppp.c b/src/vnet/ppp/ppp.c
new file mode 100644
index 00000000000..a0eefbadc3b
--- /dev/null
+++ b/src/vnet/ppp/ppp.c
@@ -0,0 +1,261 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * ppp.c: ppp support
+ *
+ * Copyright (c) 2010 Eliot Dresselhaus
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <vnet/vnet.h>
+#include <vnet/ppp/ppp.h>
+
+/* Global main structure. */
+ppp_main_t ppp_main;
+
+u8 *
+format_ppp_protocol (u8 * s, va_list * args)
+{
+ ppp_protocol_t p = va_arg (*args, u32);
+ ppp_main_t *pm = &ppp_main;
+ ppp_protocol_info_t *pi = ppp_get_protocol_info (pm, p);
+
+ if (pi)
+ s = format (s, "%s", pi->name);
+ else
+ s = format (s, "0x%04x", p);
+
+ return s;
+}
+
+u8 *
+format_ppp_header_with_length (u8 * s, va_list * args)
+{
+ ppp_main_t *pm = &ppp_main;
+ ppp_header_t *h = va_arg (*args, ppp_header_t *);
+ u32 max_header_bytes = va_arg (*args, u32);
+ ppp_protocol_t p = clib_net_to_host_u16 (h->protocol);
+ uword indent, header_bytes;
+
+ header_bytes = sizeof (h[0]);
+ if (max_header_bytes != 0 && header_bytes > max_header_bytes)
+ return format (s, "ppp header truncated");
+
+ indent = format_get_indent (s);
+
+ s = format (s, "PPP %U", format_ppp_protocol, p);
+
+ if (h->address != 0xff)
+ s = format (s, ", address 0x%02x", h->address);
+ if (h->control != 0x03)
+ s = format (s, ", control 0x%02x", h->control);
+
+ if (max_header_bytes != 0 && header_bytes > max_header_bytes)
+ {
+ ppp_protocol_info_t *pi = ppp_get_protocol_info (pm, p);
+ vlib_node_t *node = vlib_get_node (pm->vlib_main, pi->node_index);
+ if (node->format_buffer)
+ s = format (s, "\n%U%U",
+ format_white_space, indent,
+ node->format_buffer, (void *) (h + 1),
+ max_header_bytes - header_bytes);
+ }
+
+ return s;
+}
+
+u8 *
+format_ppp_header (u8 * s, va_list * args)
+{
+ ppp_header_t *h = va_arg (*args, ppp_header_t *);
+ return format (s, "%U", format_ppp_header_with_length, h, 0);
+}
+
+/* Returns ppp protocol as an int in host byte order. */
+uword
+unformat_ppp_protocol_host_byte_order (unformat_input_t * input,
+ va_list * args)
+{
+ u16 *result = va_arg (*args, u16 *);
+ ppp_main_t *pm = &ppp_main;
+ int p, i;
+
+ /* Numeric type. */
+ if (unformat (input, "0x%x", &p) || unformat (input, "%d", &p))
+ {
+ if (p >= (1 << 16))
+ return 0;
+ *result = p;
+ return 1;
+ }
+
+ /* Named type. */
+ if (unformat_user (input, unformat_vlib_number_by_name,
+ pm->protocol_info_by_name, &i))
+ {
+ ppp_protocol_info_t *pi = vec_elt_at_index (pm->protocol_infos, i);
+ *result = pi->protocol;
+ return 1;
+ }
+
+ return 0;
+}
+
+uword
+unformat_ppp_protocol_net_byte_order (unformat_input_t * input,
+ va_list * args)
+{
+ u16 *result = va_arg (*args, u16 *);
+ if (!unformat_user (input, unformat_ppp_protocol_host_byte_order, result))
+ return 0;
+ *result = clib_host_to_net_u16 ((u16) * result);
+ return 1;
+}
+
+uword
+unformat_ppp_header (unformat_input_t * input, va_list * args)
+{
+ u8 **result = va_arg (*args, u8 **);
+ ppp_header_t _h, *h = &_h;
+ u16 p;
+
+ if (!unformat (input, "%U", unformat_ppp_protocol_host_byte_order, &p))
+ return 0;
+
+ h->address = 0xff;
+ h->control = 0x03;
+ h->protocol = clib_host_to_net_u16 (p);
+
+ /* Add header to result. */
+ {
+ void *p;
+ u32 n_bytes = sizeof (h[0]);
+
+ vec_add2 (*result, p, n_bytes);
+ clib_memcpy (p, h, n_bytes);
+ }
+
+ return 1;
+}
+
+static u8 *
+ppp_build_rewrite (vnet_main_t * vnm,
+ u32 sw_if_index,
+ vnet_link_t link_type, const void *dst_hw_address)
+{
+ ppp_header_t *h;
+ u8 *rewrite = NULL;
+ ppp_protocol_t protocol;
+
+ switch (link_type)
+ {
+#define _(a,b) case VNET_LINK_##a: protocol = PPP_PROTOCOL_##b; break
+ _(IP4, ip4);
+ _(IP6, ip6);
+ _(MPLS, mpls_unicast);
+#undef _
+ default:
+ return (NULL);
+ }
+
+ vec_validate (rewrite, sizeof (*h) - 1);
+ h = (ppp_header_t *) rewrite;
+ h->address = 0xff;
+ h->control = 0x03;
+ h->protocol = clib_host_to_net_u16 (protocol);
+
+ return (rewrite);
+}
+
+/* *INDENT-OFF* */
+VNET_HW_INTERFACE_CLASS (ppp_hw_interface_class) = {
+ .name = "PPP",
+ .format_header = format_ppp_header_with_length,
+ .unformat_header = unformat_ppp_header,
+ .build_rewrite = ppp_build_rewrite,
+ .flags = VNET_HW_INTERFACE_CLASS_FLAG_P2P,
+};
+/* *INDENT-ON* */
+
+static void
+add_protocol (ppp_main_t * pm, ppp_protocol_t protocol, char *protocol_name)
+{
+ ppp_protocol_info_t *pi;
+ u32 i;
+
+ vec_add2 (pm->protocol_infos, pi, 1);
+ i = pi - pm->protocol_infos;
+
+ pi->name = protocol_name;
+ pi->protocol = protocol;
+ pi->next_index = pi->node_index = ~0;
+
+ hash_set (pm->protocol_info_by_protocol, protocol, i);
+ hash_set_mem (pm->protocol_info_by_name, pi->name, i);
+}
+
+static clib_error_t *
+ppp_init (vlib_main_t * vm)
+{
+ ppp_main_t *pm = &ppp_main;
+
+ memset (pm, 0, sizeof (pm[0]));
+ pm->vlib_main = vm;
+
+ pm->protocol_info_by_name = hash_create_string (0, sizeof (uword));
+ pm->protocol_info_by_protocol = hash_create (0, sizeof (uword));
+
+#define _(n,s) add_protocol (pm, PPP_PROTOCOL_##s, #s);
+ foreach_ppp_protocol;
+#undef _
+
+ return vlib_call_init_function (vm, ppp_input_init);
+}
+
+VLIB_INIT_FUNCTION (ppp_init);
+
+ppp_main_t *
+ppp_get_main (vlib_main_t * vm)
+{
+ vlib_call_init_function (vm, ppp_init);
+ return &ppp_main;
+}
+
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/ppp/ppp.h b/src/vnet/ppp/ppp.h
new file mode 100644
index 00000000000..fdc205b61e7
--- /dev/null
+++ b/src/vnet/ppp/ppp.h
@@ -0,0 +1,135 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * ppp.h: types/functions for ppp.
+ *
+ * Copyright (c) 2008 Eliot Dresselhaus
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef included_ppp_h
+#define included_ppp_h
+
+#include <vnet/vnet.h>
+#include <vnet/ppp/packet.h>
+#include <vnet/pg/pg.h>
+
+extern vnet_hw_interface_class_t ppp_hw_interface_class;
+
+typedef enum
+{
+#define ppp_error(n,s) PPP_ERROR_##n,
+#include <vnet/ppp/error.def>
+#undef ppp_error
+ PPP_N_ERROR,
+} ppp_error_t;
+
+typedef struct
+{
+ /* Name (a c string). */
+ char *name;
+
+ /* PPP protocol type in host byte order. */
+ ppp_protocol_t protocol;
+
+ /* Node which handles this type. */
+ u32 node_index;
+
+ /* Next index for this type. */
+ u32 next_index;
+} ppp_protocol_info_t;
+
+typedef struct
+{
+ vlib_main_t *vlib_main;
+
+ ppp_protocol_info_t *protocol_infos;
+
+ /* Hash tables mapping name/protocol to protocol info index. */
+ uword *protocol_info_by_name, *protocol_info_by_protocol;
+} ppp_main_t;
+
+always_inline ppp_protocol_info_t *
+ppp_get_protocol_info (ppp_main_t * em, ppp_protocol_t protocol)
+{
+ uword *p = hash_get (em->protocol_info_by_protocol, protocol);
+ return p ? vec_elt_at_index (em->protocol_infos, p[0]) : 0;
+}
+
+extern ppp_main_t ppp_main;
+
+/* Register given node index to take input for given ppp type. */
+void
+ppp_register_input_type (vlib_main_t * vm,
+ ppp_protocol_t protocol, u32 node_index);
+
+void ppp_set_adjacency (vnet_rewrite_header_t * rw,
+ uword max_data_bytes, ppp_protocol_t protocol);
+
+format_function_t format_ppp_protocol;
+format_function_t format_ppp_header;
+format_function_t format_ppp_header_with_length;
+
+/* Parse ppp protocol as 0xXXXX or protocol name.
+ In either host or network byte order. */
+unformat_function_t unformat_ppp_protocol_host_byte_order;
+unformat_function_t unformat_ppp_protocol_net_byte_order;
+
+/* Parse ppp header. */
+unformat_function_t unformat_ppp_header;
+unformat_function_t unformat_pg_ppp_header;
+
+always_inline void
+ppp_setup_node (vlib_main_t * vm, u32 node_index)
+{
+ vlib_node_t *n = vlib_get_node (vm, node_index);
+ pg_node_t *pn = pg_get_node (node_index);
+
+ n->format_buffer = format_ppp_header_with_length;
+ n->unformat_buffer = unformat_ppp_header;
+ pn->unformat_edit = unformat_pg_ppp_header;
+}
+
+void
+ppp_register_input_protocol (vlib_main_t * vm,
+ ppp_protocol_t protocol, u32 node_index);
+
+#endif /* included_ppp_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/replication.c b/src/vnet/replication.c
new file mode 100644
index 00000000000..561c86cdfa4
--- /dev/null
+++ b/src/vnet/replication.c
@@ -0,0 +1,293 @@
+/*
+ * replication.c : packet replication
+ *
+ * Copyright (c) 2013 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+#include <vppinfra/error.h>
+#include <vnet/ip/ip4_packet.h>
+#include <vnet/replication.h>
+
+
+replication_main_t replication_main;
+
+
+replication_context_t *
+replication_prep (vlib_main_t * vm,
+ vlib_buffer_t * b0, u32 recycle_node_index, u32 l2_packet)
+{
+ replication_main_t *rm = &replication_main;
+ replication_context_t *ctx;
+ uword cpu_number = vm->cpu_index;
+ ip4_header_t *ip;
+ u32 ctx_id;
+
+ /* Allocate a context, reserve context 0 */
+ if (PREDICT_FALSE (rm->contexts[cpu_number] == 0))
+ pool_get_aligned (rm->contexts[cpu_number], ctx, CLIB_CACHE_LINE_BYTES);
+
+ pool_get_aligned (rm->contexts[cpu_number], ctx, CLIB_CACHE_LINE_BYTES);
+ ctx_id = ctx - rm->contexts[cpu_number];
+
+ /* Save state from vlib buffer */
+ ctx->saved_free_list_index = b0->free_list_index;
+ ctx->current_data = b0->current_data;
+
+ /* Set up vlib buffer hooks */
+ b0->recycle_count = ctx_id;
+ b0->free_list_index = rm->recycle_list_index;
+ b0->flags |= VLIB_BUFFER_RECYCLE;
+
+ /* Save feature state */
+ ctx->recycle_node_index = recycle_node_index;
+
+ /* Save vnet state */
+ clib_memcpy (ctx->vnet_buffer, vnet_buffer (b0),
+ sizeof (vnet_buffer_opaque_t));
+
+ /* Save packet contents */
+ ctx->l2_packet = l2_packet;
+ ip = (ip4_header_t *) vlib_buffer_get_current (b0);
+ if (l2_packet)
+ {
+ /* Save ethernet header */
+ ctx->l2_header[0] = ((u64 *) ip)[0];
+ ctx->l2_header[1] = ((u64 *) ip)[1];
+ ctx->l2_header[2] = ((u64 *) ip)[2];
+ /* set ip to the true ip header */
+ ip = (ip4_header_t *) (((u8 *) ip) + vnet_buffer (b0)->l2.l2_len);
+ }
+
+ /*
+ * Copy L3 fields.
+ * We need to save TOS for ip4 and ip6 packets.
+ * Fortunately the TOS field is
+ * in the first two bytes of both the ip4 and ip6 headers.
+ */
+ ctx->ip_tos = *((u16 *) (ip));
+
+ /*
+ * Save the ip4 checksum as well. We just blindly save the corresponding two
+ * bytes even for ip6 packets.
+ */
+ ctx->ip4_checksum = ip->checksum;
+
+ return ctx;
+}
+
+
+replication_context_t *
+replication_recycle (vlib_main_t * vm, vlib_buffer_t * b0, u32 is_last)
+{
+ replication_main_t *rm = &replication_main;
+ replication_context_t *ctx;
+ uword cpu_number = vm->cpu_index;
+ ip4_header_t *ip;
+
+ /* Get access to the replication context */
+ ctx = pool_elt_at_index (rm->contexts[cpu_number], b0->recycle_count);
+
+ /* Restore vnet buffer state */
+ clib_memcpy (vnet_buffer (b0), ctx->vnet_buffer,
+ sizeof (vnet_buffer_opaque_t));
+
+ /* Restore the packet start (current_data) and length */
+ vlib_buffer_advance (b0, ctx->current_data - b0->current_data);
+
+ /* Restore packet contents */
+ ip = (ip4_header_t *) vlib_buffer_get_current (b0);
+ if (ctx->l2_packet)
+ {
+ /* Restore ethernet header */
+ ((u64 *) ip)[0] = ctx->l2_header[0];
+ ((u64 *) ip)[1] = ctx->l2_header[1];
+ ((u64 *) ip)[2] = ctx->l2_header[2];
+ /* set ip to the true ip header */
+ ip = (ip4_header_t *) (((u8 *) ip) + vnet_buffer (b0)->l2.l2_len);
+ }
+
+ // Restore L3 fields
+ *((u16 *) (ip)) = ctx->ip_tos;
+ ip->checksum = ctx->ip4_checksum;
+
+ if (is_last)
+ {
+ /*
+ * This is the last replication in the list.
+ * Restore original buffer free functionality.
+ */
+ b0->free_list_index = ctx->saved_free_list_index;
+ b0->flags &= ~VLIB_BUFFER_RECYCLE;
+
+ /* Free context back to its pool */
+ pool_put (rm->contexts[cpu_number], ctx);
+ }
+
+ return ctx;
+}
+
+
+
+/*
+ * fish pkts back from the recycle queue/freelist
+ * un-flatten the context chains
+ */
+static void
+replication_recycle_callback (vlib_main_t * vm, vlib_buffer_free_list_t * fl)
+{
+ vlib_frame_t *f = 0;
+ u32 n_left_from;
+ u32 n_left_to_next = 0;
+ u32 n_this_frame = 0;
+ u32 *from;
+ u32 *to_next = 0;
+ u32 bi0, pi0;
+ vlib_buffer_t *b0;
+ int i;
+ replication_main_t *rm = &replication_main;
+ replication_context_t *ctx;
+ u32 feature_node_index = 0;
+ uword cpu_number = vm->cpu_index;
+
+ /*
+ * All buffers in the list are destined to the same recycle node.
+ * Pull the recycle node index from the first buffer.
+ * Note: this could be sped up if the node index were stuffed into
+ * the freelist itself.
+ */
+ if (vec_len (fl->aligned_buffers) > 0)
+ {
+ bi0 = fl->aligned_buffers[0];
+ b0 = vlib_get_buffer (vm, bi0);
+ ctx = pool_elt_at_index (rm->contexts[cpu_number], b0->recycle_count);
+ feature_node_index = ctx->recycle_node_index;
+ }
+ else if (vec_len (fl->unaligned_buffers) > 0)
+ {
+ bi0 = fl->unaligned_buffers[0];
+ b0 = vlib_get_buffer (vm, bi0);
+ ctx = pool_elt_at_index (rm->contexts[cpu_number], b0->recycle_count);
+ feature_node_index = ctx->recycle_node_index;
+ }
+
+ /* aligned, unaligned buffers */
+ for (i = 0; i < 2; i++)
+ {
+ if (i == 0)
+ {
+ from = fl->aligned_buffers;
+ n_left_from = vec_len (from);
+ }
+ else
+ {
+ from = fl->unaligned_buffers;
+ n_left_from = vec_len (from);
+ }
+
+ while (n_left_from > 0)
+ {
+ if (PREDICT_FALSE (n_left_to_next == 0))
+ {
+ if (f)
+ {
+ f->n_vectors = n_this_frame;
+ vlib_put_frame_to_node (vm, feature_node_index, f);
+ }
+
+ f = vlib_get_frame_to_node (vm, feature_node_index);
+ to_next = vlib_frame_vector_args (f);
+ n_left_to_next = VLIB_FRAME_SIZE;
+ n_this_frame = 0;
+ }
+
+ bi0 = from[0];
+ if (PREDICT_TRUE (n_left_from > 1))
+ {
+ pi0 = from[1];
+ vlib_prefetch_buffer_with_index (vm, pi0, LOAD);
+ }
+
+ b0 = vlib_get_buffer (vm, bi0);
+
+ /* Mark that this buffer was just recycled */
+ b0->flags |= VLIB_BUFFER_IS_RECYCLED;
+
+#if (CLIB_DEBUG > 0)
+#if DPDK == 0
+ vlib_buffer_set_known_state (vm, bi0, VLIB_BUFFER_KNOWN_ALLOCATED);
+#endif
+#endif
+
+ /* If buffer is traced, mark frame as traced */
+ if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
+ f->flags |= VLIB_FRAME_TRACE;
+
+ to_next[0] = bi0;
+
+ from++;
+ to_next++;
+ n_this_frame++;
+ n_left_to_next--;
+ n_left_from--;
+ }
+ }
+
+ vec_reset_length (fl->aligned_buffers);
+ vec_reset_length (fl->unaligned_buffers);
+
+ if (f)
+ {
+ ASSERT (n_this_frame);
+ f->n_vectors = n_this_frame;
+ vlib_put_frame_to_node (vm, feature_node_index, f);
+ }
+}
+
+clib_error_t *
+replication_init (vlib_main_t * vm)
+{
+ replication_main_t *rm = &replication_main;
+ vlib_buffer_main_t *bm = vm->buffer_main;
+ vlib_buffer_free_list_t *fl;
+ __attribute__ ((unused)) replication_context_t *ctx;
+ vlib_thread_main_t *tm = vlib_get_thread_main ();
+
+ rm->vlib_main = vm;
+ rm->vnet_main = vnet_get_main ();
+ rm->recycle_list_index =
+ vlib_buffer_create_free_list (vm, 1024 /* fictional */ ,
+ "replication-recycle");
+
+ fl = pool_elt_at_index (bm->buffer_free_list_pool, rm->recycle_list_index);
+
+ fl->buffers_added_to_freelist_function = replication_recycle_callback;
+
+ /* Verify the replication context is the expected size */
+ ASSERT (sizeof (replication_context_t) == 128); /* 2 cache lines */
+
+ vec_validate (rm->contexts, tm->n_vlib_mains - 1);
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (replication_init);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/replication.h b/src/vnet/replication.h
new file mode 100644
index 00000000000..5dc554c97b3
--- /dev/null
+++ b/src/vnet/replication.h
@@ -0,0 +1,136 @@
+/*
+ * replication.h : packet replication
+ *
+ * Copyright (c) 2013 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef included_replication_h
+#define included_replication_h
+
+
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+#include <vnet/replication.h>
+
+
+typedef struct
+{
+ /* The entire vnet buffer header restored for each replica */
+ u8 vnet_buffer[32]; /* 16B aligned to allow vector unit copy */
+ u8 reserved[32]; /* space for future expansion of vnet buffer header */
+
+ /* feature state used during this replication */
+ u64 feature_replicas; /* feature's id for its set of replicas */
+ u32 feature_counter; /* feature's current index into set of replicas */
+ u32 recycle_node_index; /* feature's recycle node index */
+
+ /*
+ * data saved from the start of replication and restored
+ * at the end of replication
+ */
+ u32 saved_free_list_index; /* from vlib buffer */
+
+ /* data saved from the original packet and restored for each replica */
+ u64 l2_header[3]; /* 24B (must be at least 22B for l2 packets) */
+ u16 ip_tos; /* v4 and v6 */
+ u16 ip4_checksum; /* needed for v4 only */
+
+ /* data saved from the vlib buffer header and restored for each replica */
+ i16 current_data; /* offset of first byte of packet in packet data */
+ u8 pad[6]; /* to 64B */
+ u8 l2_packet; /* flag for l2 vs l3 packet data */
+
+} replication_context_t; /* 128B */
+
+
+typedef struct
+{
+
+ u32 recycle_list_index;
+
+ /* per-thread pools of replication contexts */
+ replication_context_t **contexts;
+
+ vlib_main_t *vlib_main;
+ vnet_main_t *vnet_main;
+
+} replication_main_t;
+
+
+extern replication_main_t replication_main;
+
+
+/* Return 1 if this buffer just came from the replication recycle handler. */
+always_inline u32
+replication_is_recycled (vlib_buffer_t * b0)
+{
+ return b0->flags & VLIB_BUFFER_IS_RECYCLED;
+}
+
+/*
+ * Clear the recycle flag. If buffer came from the replication recycle
+ * handler, this flag must be cleared before the packet is transmitted again.
+ */
+always_inline void
+replication_clear_recycled (vlib_buffer_t * b0)
+{
+ b0->flags &= ~VLIB_BUFFER_IS_RECYCLED;
+}
+
+/*
+ * Return the active replication context if this buffer has
+ * been recycled, otherwise return 0. (Note that this essentially
+ * restricts access to the replication context to the replication
+ * feature's prep and recycle nodes.)
+ */
+always_inline replication_context_t *
+replication_get_ctx (vlib_buffer_t * b0)
+{
+ replication_main_t *rm = &replication_main;
+
+ return replication_is_recycled (b0) ?
+ pool_elt_at_index (rm->contexts[os_get_cpu_number ()],
+ b0->recycle_count) : 0;
+}
+
+/* Prefetch the replication context for this buffer, if it exists */
+always_inline void
+replication_prefetch_ctx (vlib_buffer_t * b0)
+{
+ replication_context_t *ctx = replication_get_ctx (b0);
+
+ if (ctx)
+ {
+ CLIB_PREFETCH (ctx, (2 * CLIB_CACHE_LINE_BYTES), STORE);
+ }
+}
+
+replication_context_t *replication_prep (vlib_main_t * vm,
+ vlib_buffer_t * b0,
+ u32 recycle_node_index,
+ u32 l2_packet);
+
+replication_context_t *replication_recycle (vlib_main_t * vm,
+ vlib_buffer_t * b0, u32 is_last);
+
+
+#endif
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/rewrite.c b/src/vnet/rewrite.c
new file mode 100644
index 00000000000..53d548bc8ae
--- /dev/null
+++ b/src/vnet/rewrite.c
@@ -0,0 +1,329 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * rewrite.c: packet rewrite
+ *
+ * Copyright (c) 2008 Eliot Dresselhaus
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <vnet/vnet.h>
+#include <vnet/ip/lookup.h>
+
+void
+vnet_rewrite_copy_slow_path (vnet_rewrite_data_t * p0,
+ vnet_rewrite_data_t * rw0,
+ word n_left, uword most_likely_size)
+{
+ uword n_done =
+ round_pow2 (most_likely_size, sizeof (rw0[0])) / sizeof (rw0[0]);
+
+ p0 -= n_done;
+ rw0 -= n_done;
+
+ /* As we enter the cleanup loop, p0 and rw0 point to the last chunk written
+ by the fast path. Hence, the constant 1, which the
+ vnet_rewrite_copy_one macro renders as p0[-1] = rw0[-1]. */
+
+ while (n_left > 0)
+ {
+ vnet_rewrite_copy_one (p0, rw0, 1);
+ p0--;
+ rw0--;
+ n_left--;
+ }
+}
+
+u8 *
+format_vnet_rewrite (u8 * s, va_list * args)
+{
+ vlib_main_t *vm = va_arg (*args, vlib_main_t *);
+ vnet_rewrite_header_t *rw = va_arg (*args, vnet_rewrite_header_t *);
+ u32 max_data_bytes = va_arg (*args, u32);
+ CLIB_UNUSED (uword indent) = va_arg (*args, u32);
+ vnet_main_t *vnm = vnet_get_main ();
+ vlib_node_t *next;
+
+ next = vlib_get_next_node (vm, rw->node_index, rw->next_index);
+
+ if (rw->sw_if_index != ~0)
+ {
+ vnet_sw_interface_t *si;
+ si = vnet_get_sw_interface (vnm, rw->sw_if_index);
+ s = format (s, "%U: ", format_vnet_sw_interface_name, vnm, si);
+ }
+ else
+ s = format (s, "%v: ", next->name);
+
+ /* Format rewrite string. */
+ if (rw->data_bytes > 0)
+
+ s = format (s, "%U",
+ next->format_buffer ? next->format_buffer : format_hex_bytes,
+ rw->data + max_data_bytes - rw->data_bytes, rw->data_bytes);
+
+ return s;
+}
+
+u8 *
+format_vnet_rewrite_header (u8 * s, va_list * args)
+{
+ vlib_main_t *vm = va_arg (*args, vlib_main_t *);
+ vnet_rewrite_header_t *rw = va_arg (*args, vnet_rewrite_header_t *);
+ u8 *packet_data = va_arg (*args, u8 *);
+ u32 packet_data_bytes = va_arg (*args, u32);
+ vlib_node_t *next;
+
+ next = vlib_get_next_node (vm, rw->node_index, rw->next_index);
+
+ /* Format rewrite string. */
+ s = format (s, "%U",
+ next->format_buffer ? next->format_buffer : format_hex_bytes,
+ packet_data, packet_data_bytes);
+
+ return s;
+}
+
+uword
+unformat_vnet_rewrite (unformat_input_t * input, va_list * args)
+{
+ vlib_main_t *vm = va_arg (*args, vlib_main_t *);
+ vnet_rewrite_header_t *rw = va_arg (*args, vnet_rewrite_header_t *);
+ u32 max_data_bytes = va_arg (*args, u32);
+ vnet_main_t *vnm = vnet_get_main ();
+ vlib_node_t *next;
+ u32 next_index, sw_if_index, max_packet_bytes, error;
+ u8 *rw_data;
+
+ rw_data = 0;
+ sw_if_index = ~0;
+ max_packet_bytes = ~0;
+ error = 1;
+
+ /* Parse sw interface. */
+ if (unformat (input, "%U", unformat_vnet_sw_interface, vnm, &sw_if_index))
+ {
+ vnet_hw_interface_t *hi;
+
+ hi = vnet_get_sup_hw_interface (vnm, sw_if_index);
+
+ next_index = hi->output_node_index;
+ max_packet_bytes = hi->max_l3_packet_bytes[VLIB_RX];
+ }
+
+ else if (unformat (input, "%U", unformat_vlib_node, vm, &next_index))
+ ;
+
+ else
+ goto done;
+
+ next = vlib_get_node (vm, next_index);
+
+ if (next->unformat_buffer
+ && unformat_user (input, next->unformat_buffer, &rw_data))
+ ;
+
+ else if (unformat_user (input, unformat_hex_string, &rw_data)
+ || unformat (input, "0x%U", unformat_hex_string, &rw_data))
+ ;
+
+ else
+ goto done;
+
+ /* Re-write does not fit. */
+ if (vec_len (rw_data) >= max_data_bytes)
+ goto done;
+
+ {
+ u32 tmp;
+
+ if (unformat (input, "mtu %d", &tmp)
+ && tmp < (1 << BITS (rw->max_l3_packet_bytes)))
+ max_packet_bytes = tmp;
+ }
+
+ error = 0;
+ rw->sw_if_index = sw_if_index;
+ rw->max_l3_packet_bytes = max_packet_bytes;
+ rw->next_index = vlib_node_add_next (vm, rw->node_index, next_index);
+ vnet_rewrite_set_data_internal (rw, max_data_bytes, rw_data,
+ vec_len (rw_data));
+
+done:
+ vec_free (rw_data);
+ return error == 0;
+}
+
+u32
+vnet_tx_node_index_for_sw_interface (vnet_main_t * vnm, u32 sw_if_index)
+{
+ vnet_hw_interface_t *hw = vnet_get_sup_hw_interface (vnm, sw_if_index);
+ return (hw->output_node_index);
+}
+
+void
+vnet_rewrite_init (vnet_main_t * vnm,
+ u32 sw_if_index,
+ u32 this_node, u32 next_node, vnet_rewrite_header_t * rw)
+{
+ rw->sw_if_index = sw_if_index;
+ rw->node_index = this_node;
+ rw->next_index = vlib_node_add_next (vnm->vlib_main, this_node, next_node);
+ rw->max_l3_packet_bytes =
+ vnet_sw_interface_get_mtu (vnm, sw_if_index, VLIB_TX);
+}
+
+void
+vnet_rewrite_for_sw_interface (vnet_main_t * vnm,
+ vnet_link_t link_type,
+ u32 sw_if_index,
+ u32 node_index,
+ void *dst_address,
+ vnet_rewrite_header_t * rw,
+ u32 max_rewrite_bytes)
+{
+
+ vnet_hw_interface_t *hw = vnet_get_sup_hw_interface (vnm, sw_if_index);
+ vnet_hw_interface_class_t *hc =
+ vnet_get_hw_interface_class (vnm, hw->hw_class_index);
+ u8 *rewrite = NULL;
+
+ vnet_rewrite_init (vnm, sw_if_index, node_index,
+ vnet_tx_node_index_for_sw_interface (vnm, sw_if_index),
+ rw);
+
+ ASSERT (hc->build_rewrite);
+ rewrite = hc->build_rewrite (vnm, sw_if_index, link_type, dst_address);
+
+ ASSERT (vec_len (rewrite) < max_rewrite_bytes);
+ vnet_rewrite_set_data_internal (rw, max_rewrite_bytes, rewrite,
+ vec_len (rewrite));
+ vec_free (rewrite);
+}
+
+void
+vnet_rewrite_for_tunnel (vnet_main_t * vnm,
+ u32 tx_sw_if_index,
+ u32 rewrite_node_index,
+ u32 post_rewrite_node_index,
+ vnet_rewrite_header_t * rw,
+ u8 * rewrite_data, u32 rewrite_length)
+{
+ ip_adjacency_t *adj = 0;
+ /*
+ * Installed into vnet_buffer(b)->sw_if_index[VLIB_TX] e.g.
+ * by ip4_rewrite_inline. If the post-rewrite node injects into
+ * ipX-forward, this will be interpreted as a FIB number.
+ */
+ rw->sw_if_index = tx_sw_if_index;
+ rw->node_index = rewrite_node_index;
+ rw->next_index = vlib_node_add_next (vnm->vlib_main, rewrite_node_index,
+ post_rewrite_node_index);
+ rw->max_l3_packet_bytes = (u16) ~ 0; /* we can't know at this point */
+
+ ASSERT (rewrite_length < sizeof (adj->rewrite_data));
+ /* Leave room for ethernet + VLAN tag */
+ vnet_rewrite_set_data_internal (rw, sizeof (adj->rewrite_data),
+ rewrite_data, rewrite_length);
+}
+
+void
+serialize_vnet_rewrite (serialize_main_t * m, va_list * va)
+{
+ vnet_rewrite_header_t *rw = va_arg (*va, vnet_rewrite_header_t *);
+ u32 max_data_bytes = va_arg (*va, u32);
+ u8 *p;
+
+ serialize_integer (m, rw->sw_if_index, sizeof (rw->sw_if_index));
+ serialize_integer (m, rw->data_bytes, sizeof (rw->data_bytes));
+ serialize_integer (m, rw->max_l3_packet_bytes,
+ sizeof (rw->max_l3_packet_bytes));
+ p = serialize_get (m, rw->data_bytes);
+ clib_memcpy (p, vnet_rewrite_get_data_internal (rw, max_data_bytes),
+ rw->data_bytes);
+}
+
+void
+unserialize_vnet_rewrite (serialize_main_t * m, va_list * va)
+{
+ vnet_rewrite_header_t *rw = va_arg (*va, vnet_rewrite_header_t *);
+ u32 max_data_bytes = va_arg (*va, u32);
+ u8 *p;
+
+ /* It is up to user to fill these in. */
+ rw->node_index = ~0;
+ rw->next_index = ~0;
+
+ unserialize_integer (m, &rw->sw_if_index, sizeof (rw->sw_if_index));
+ unserialize_integer (m, &rw->data_bytes, sizeof (rw->data_bytes));
+ unserialize_integer (m, &rw->max_l3_packet_bytes,
+ sizeof (rw->max_l3_packet_bytes));
+ p = unserialize_get (m, rw->data_bytes);
+ clib_memcpy (vnet_rewrite_get_data_internal (rw, max_data_bytes), p,
+ rw->data_bytes);
+}
+
+u8 *
+vnet_build_rewrite_for_sw_interface (vnet_main_t * vnm,
+ u32 sw_if_index,
+ vnet_link_t link_type,
+ const void *dst_address)
+{
+ vnet_hw_interface_t *hw = vnet_get_sup_hw_interface (vnm, sw_if_index);
+ vnet_hw_interface_class_t *hc =
+ vnet_get_hw_interface_class (vnm, hw->hw_class_index);
+
+ ASSERT (hc->build_rewrite);
+ return (hc->build_rewrite (vnm, sw_if_index, link_type, dst_address));
+}
+
+
+void
+vnet_update_adjacency_for_sw_interface (vnet_main_t * vnm,
+ u32 sw_if_index, u32 ai)
+{
+ vnet_hw_interface_t *hw = vnet_get_sup_hw_interface (vnm, sw_if_index);
+ vnet_hw_interface_class_t *hc =
+ vnet_get_hw_interface_class (vnm, hw->hw_class_index);
+
+ ASSERT (hc->update_adjacency);
+ hc->update_adjacency (vnm, sw_if_index, ai);
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/rewrite.h b/src/vnet/rewrite.h
new file mode 100644
index 00000000000..00c1efbdc5e
--- /dev/null
+++ b/src/vnet/rewrite.h
@@ -0,0 +1,305 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * rewrite.h: packet rewrite
+ *
+ * Copyright (c) 2008 Eliot Dresselhaus
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef included_vnet_rewrite_h
+#define included_vnet_rewrite_h
+
+#include <vlib/vlib.h>
+#include <vnet/l3_types.h>
+
+/* Consider using vector types for speed? */
+typedef uword vnet_rewrite_data_t;
+
+/* *INDENT-OFF* */
+typedef CLIB_PACKED (struct {
+ /* Interface to mark re-written packets with. */
+ u32 sw_if_index;
+
+ /* Packet processing node where rewrite happens. */
+ u32 node_index;
+
+ /* Next node to feed after packet rewrite is done. */
+ u16 next_index;
+
+ /* Number of bytes in rewrite data. */
+ u16 data_bytes;
+
+ /* Max packet size layer 3 (MTU) for output interface.
+ Used for MTU check after packet rewrite. */
+ u16 max_l3_packet_bytes;
+
+ /* Rewrite string starting at end and going backwards. */
+ u8 data[0];
+}) vnet_rewrite_header_t;
+/* *INDENT-ON* */
+
+/*
+ Helper macro for declaring rewrite string w/ given max-size.
+
+ Typical usage:
+ typedef struct {
+ //
+ int a, b;
+
+ // Total adjacency is 64 bytes.
+ vnet_rewrite_declare(64 - 2*sizeof(int)) rw;
+ } my_adjacency_t;
+*/
+#define vnet_declare_rewrite(total_bytes) \
+struct { \
+ vnet_rewrite_header_t rewrite_header; \
+ \
+ u8 rewrite_data[(total_bytes) - sizeof (vnet_rewrite_header_t)]; \
+}
+
+always_inline void
+vnet_rewrite_clear_data_internal (vnet_rewrite_header_t * rw, int max_size)
+{
+ /* Sanity check values carefully for this memset operation */
+ ASSERT ((max_size > 0) && (max_size < VLIB_BUFFER_PRE_DATA_SIZE));
+
+ rw->data_bytes = 0;
+ memset (rw->data, 0xfe, max_size);
+}
+
+always_inline void
+vnet_rewrite_set_data_internal (vnet_rewrite_header_t * rw,
+ int max_size, void *data, int data_bytes)
+{
+ /* Sanity check values carefully for this memset operation */
+ ASSERT ((max_size > 0) && (max_size < VLIB_BUFFER_PRE_DATA_SIZE));
+ ASSERT ((data_bytes >= 0) && (data_bytes < max_size));
+
+ rw->data_bytes = data_bytes;
+ clib_memcpy (rw->data + max_size - data_bytes, data, data_bytes);
+ memset (rw->data, 0xfe, max_size - data_bytes);
+}
+
+#define vnet_rewrite_set_data(rw,data,data_bytes) \
+ vnet_rewrite_set_data_internal (&((rw).rewrite_header), \
+ sizeof ((rw).rewrite_data), \
+ (data), \
+ (data_bytes))
+
+always_inline void *
+vnet_rewrite_get_data_internal (vnet_rewrite_header_t * rw, int max_size)
+{
+ ASSERT (rw->data_bytes <= max_size);
+ return rw->data + max_size - rw->data_bytes;
+}
+
+#define vnet_rewrite_get_data(rw) \
+ vnet_rewrite_get_data_internal (&((rw).rewrite_header), sizeof ((rw).rewrite_data))
+
+always_inline void
+vnet_rewrite_copy_one (vnet_rewrite_data_t * p0, vnet_rewrite_data_t * rw0,
+ int i)
+{
+ p0[-i] = rw0[-i];
+}
+
+void vnet_rewrite_copy_slow_path (vnet_rewrite_data_t * p0,
+ vnet_rewrite_data_t * rw0,
+ word n_left, uword most_likely_size);
+
+/* *INDENT-OFF* */
+typedef CLIB_PACKED (struct {
+ u64 a;
+ u32 b;
+ u16 c;
+}) eh_copy_t;
+/* *INDENT-ON* */
+
+always_inline void
+_vnet_rewrite_one_header (vnet_rewrite_header_t * h0,
+ void *packet0, int max_size, int most_likely_size)
+{
+ vnet_rewrite_data_t *p0 = packet0;
+ vnet_rewrite_data_t *rw0 = (vnet_rewrite_data_t *) (h0->data + max_size);
+ word n_left0;
+
+ /* 0xfefe => poisoned adjacency => crash */
+ ASSERT (h0->data_bytes != 0xfefe);
+
+ if (PREDICT_TRUE (h0->data_bytes == sizeof (eh_copy_t)))
+ {
+ eh_copy_t *s, *d;
+ s = (eh_copy_t *) (h0->data + max_size - sizeof (eh_copy_t));
+ d = (eh_copy_t *) (((u8 *) packet0) - sizeof (eh_copy_t));
+ clib_memcpy (d, s, sizeof (eh_copy_t));
+ return;
+ }
+
+
+#define _(i) \
+ do { \
+ if (most_likely_size > ((i)-1)*sizeof (vnet_rewrite_data_t)) \
+ vnet_rewrite_copy_one (p0, rw0, (i)); \
+ } while (0)
+
+ _(4);
+ _(3);
+ _(2);
+ _(1);
+
+#undef _
+
+ n_left0 = (int)
+ (((int) h0->data_bytes - most_likely_size) + (sizeof (rw0[0]) - 1))
+ / (int) sizeof (rw0[0]);
+ if (PREDICT_FALSE (n_left0 > 0))
+ vnet_rewrite_copy_slow_path (p0, rw0, n_left0, most_likely_size);
+}
+
+always_inline void
+_vnet_rewrite_two_headers (vnet_rewrite_header_t * h0,
+ vnet_rewrite_header_t * h1,
+ void *packet0,
+ void *packet1, int max_size, int most_likely_size)
+{
+ vnet_rewrite_data_t *p0 = packet0;
+ vnet_rewrite_data_t *p1 = packet1;
+ vnet_rewrite_data_t *rw0 = (vnet_rewrite_data_t *) (h0->data + max_size);
+ vnet_rewrite_data_t *rw1 = (vnet_rewrite_data_t *) (h1->data + max_size);
+ word n_left0, n_left1;
+ int slow_path;
+
+ /* 0xfefe => poisoned adjacency => crash */
+ ASSERT (h0->data_bytes != 0xfefe);
+ ASSERT (h1->data_bytes != 0xfefe);
+
+ /* Arithmetic calculation: bytes0 == bytes1 == 14 */
+ slow_path = h0->data_bytes ^ h1->data_bytes;
+ slow_path += h0->data_bytes ^ sizeof (eh_copy_t);
+
+ if (PREDICT_TRUE (slow_path == 0))
+ {
+ eh_copy_t *s0, *d0, *s1, *d1;
+ s0 = (eh_copy_t *) (h0->data + max_size - sizeof (eh_copy_t));
+ d0 = (eh_copy_t *) (((u8 *) packet0) - sizeof (eh_copy_t));
+ clib_memcpy (d0, s0, sizeof (eh_copy_t));
+ s1 = (eh_copy_t *) (h1->data + max_size - sizeof (eh_copy_t));
+ d1 = (eh_copy_t *) (((u8 *) packet1) - sizeof (eh_copy_t));
+ clib_memcpy (d1, s1, sizeof (eh_copy_t));
+ return;
+ }
+
+#define _(i) \
+ do { \
+ if (most_likely_size > ((i)-1)*sizeof (vnet_rewrite_data_t)) \
+ { \
+ vnet_rewrite_copy_one (p0, rw0, (i)); \
+ vnet_rewrite_copy_one (p1, rw1, (i)); \
+ } \
+ } while (0)
+
+ _(4);
+ _(3);
+ _(2);
+ _(1);
+
+#undef _
+
+ n_left0 = (int)
+ (((int) h0->data_bytes - most_likely_size) + (sizeof (rw0[0]) - 1))
+ / (int) sizeof (rw0[0]);
+ n_left1 = (int)
+ (((int) h1->data_bytes - most_likely_size) + (sizeof (rw1[0]) - 1))
+ / (int) sizeof (rw1[0]);
+
+ if (PREDICT_FALSE (n_left0 > 0 || n_left1 > 0))
+ {
+ vnet_rewrite_copy_slow_path (p0, rw0, n_left0, most_likely_size);
+ vnet_rewrite_copy_slow_path (p1, rw1, n_left1, most_likely_size);
+ }
+}
+
+#define vnet_rewrite_one_header(rw0,p0,most_likely_size) \
+ _vnet_rewrite_one_header (&((rw0).rewrite_header), (p0), \
+ sizeof ((rw0).rewrite_data), \
+ (most_likely_size))
+
+#define vnet_rewrite_two_headers(rw0,rw1,p0,p1,most_likely_size) \
+ _vnet_rewrite_two_headers (&((rw0).rewrite_header), &((rw1).rewrite_header), \
+ (p0), (p1), \
+ sizeof ((rw0).rewrite_data), \
+ (most_likely_size))
+
+#define VNET_REWRITE_FOR_SW_INTERFACE_ADDRESS_BROADCAST ((void *) 0)
+/** Deprecated */
+void vnet_rewrite_for_sw_interface (struct vnet_main_t *vnm,
+ vnet_link_t packet_type,
+ u32 sw_if_index,
+ u32 node_index,
+ void *dst_address,
+ vnet_rewrite_header_t * rw,
+ u32 max_rewrite_bytes);
+
+u32 vnet_tx_node_index_for_sw_interface (struct vnet_main_t *vnm,
+ u32 sw_if_index);
+
+void vnet_rewrite_init (struct vnet_main_t *vnm,
+ u32 sw_if_index,
+ u32 this_node,
+ u32 next_node, vnet_rewrite_header_t * rw);
+
+u8 *vnet_build_rewrite_for_sw_interface (struct vnet_main_t *vnm,
+ u32 sw_if_index,
+ vnet_link_t packet_type,
+ const void *dst_address);
+void vnet_update_adjacency_for_sw_interface (struct vnet_main_t *vnm,
+ u32 sw_if_index, u32 ai);
+
+/* Parser for unformat header & rewrite string. */
+unformat_function_t unformat_vnet_rewrite;
+
+format_function_t format_vnet_rewrite;
+format_function_t format_vnet_rewrite_header;
+
+serialize_function_t serialize_vnet_rewrite, unserialize_vnet_rewrite;
+
+#endif /* included_vnet_rewrite_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/snap/node.c b/src/vnet/snap/node.c
new file mode 100644
index 00000000000..884ff32444b
--- /dev/null
+++ b/src/vnet/snap/node.c
@@ -0,0 +1,353 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * snap_node.c: snap packet processing
+ *
+ * Copyright (c) 2010 Eliot Dresselhaus
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <vlib/vlib.h>
+#include <vnet/pg/pg.h>
+#include <vnet/llc/llc.h>
+#include <vnet/snap/snap.h>
+
+typedef enum
+{
+ SNAP_INPUT_NEXT_DROP,
+ SNAP_INPUT_NEXT_PUNT,
+ SNAP_INPUT_NEXT_ETHERNET_TYPE,
+ SNAP_INPUT_N_NEXT,
+} snap_input_next_t;
+
+typedef struct
+{
+ u8 packet_data[32];
+} snap_input_trace_t;
+
+static u8 *
+format_snap_input_trace (u8 * s, va_list * va)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*va, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*va, vlib_node_t *);
+ snap_input_trace_t *t = va_arg (*va, snap_input_trace_t *);
+
+ s = format (s, "%U", format_snap_header, t->packet_data);
+
+ return s;
+}
+
+static uword
+snap_input (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * from_frame)
+{
+ snap_main_t *sm = &snap_main;
+ u32 n_left_from, next_index, *from, *to_next;
+
+ from = vlib_frame_vector_args (from_frame);
+ n_left_from = from_frame->n_vectors;
+
+ if (node->flags & VLIB_NODE_FLAG_TRACE)
+ vlib_trace_frame_buffers_only (vm, node,
+ from,
+ n_left_from,
+ sizeof (from[0]),
+ sizeof (snap_input_trace_t));
+
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ u32 bi0, bi1;
+ vlib_buffer_t *b0, *b1;
+ snap_header_t *h0, *h1;
+ snap_protocol_info_t *pi0, *pi1;
+ u8 next0, next1, is_ethernet0, is_ethernet1, len0, len1,
+ enqueue_code;
+ u32 oui0, oui1;
+
+ /* Prefetch next iteration. */
+ {
+ vlib_buffer_t *b2, *b3;
+
+ b2 = vlib_get_buffer (vm, from[2]);
+ b3 = vlib_get_buffer (vm, from[3]);
+
+ vlib_prefetch_buffer_header (b2, LOAD);
+ vlib_prefetch_buffer_header (b3, LOAD);
+
+ CLIB_PREFETCH (b2->data, sizeof (h0[0]), LOAD);
+ CLIB_PREFETCH (b3->data, sizeof (h1[0]), LOAD);
+ }
+
+ bi0 = from[0];
+ bi1 = from[1];
+ to_next[0] = bi0;
+ to_next[1] = bi1;
+ from += 2;
+ to_next += 2;
+ n_left_to_next -= 2;
+ n_left_from -= 2;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ b1 = vlib_get_buffer (vm, bi1);
+
+ h0 = (void *) (b0->data + b0->current_data);
+ h1 = (void *) (b1->data + b1->current_data);
+
+ oui0 = snap_header_get_oui (h0);
+ oui1 = snap_header_get_oui (h1);
+
+ is_ethernet0 = oui0 == IEEE_OUI_ethernet;
+ is_ethernet1 = oui1 == IEEE_OUI_ethernet;
+
+ len0 = sizeof (h0[0]) - (is_ethernet0 ? sizeof (h0->protocol) : 0);
+ len1 = sizeof (h1[0]) - (is_ethernet1 ? sizeof (h1->protocol) : 0);
+
+ b0->current_data += len0;
+ b1->current_data += len1;
+
+ b0->current_length -= len0;
+ b1->current_length -= len1;
+
+ pi0 = snap_get_protocol_info (sm, h0);
+ pi1 = snap_get_protocol_info (sm, h1);
+
+ next0 = pi0 ? pi0->next_index : SNAP_INPUT_NEXT_DROP;
+ next1 = pi1 ? pi1->next_index : SNAP_INPUT_NEXT_DROP;
+
+ next0 = is_ethernet0 ? SNAP_INPUT_NEXT_ETHERNET_TYPE : next0;
+ next1 = is_ethernet1 ? SNAP_INPUT_NEXT_ETHERNET_TYPE : next1;
+
+ /* In case of error. */
+ b0->error = node->errors[SNAP_ERROR_UNKNOWN_PROTOCOL];
+ b1->error = node->errors[SNAP_ERROR_UNKNOWN_PROTOCOL];
+
+ enqueue_code = (next0 != next_index) + 2 * (next1 != next_index);
+
+ if (PREDICT_FALSE (enqueue_code != 0))
+ {
+ switch (enqueue_code)
+ {
+ case 1:
+ /* A B A */
+ to_next[-2] = bi1;
+ to_next -= 1;
+ n_left_to_next += 1;
+ vlib_set_next_frame_buffer (vm, node, next0, bi0);
+ break;
+
+ case 2:
+ /* A A B */
+ to_next -= 1;
+ n_left_to_next += 1;
+ vlib_set_next_frame_buffer (vm, node, next1, bi1);
+ break;
+
+ case 3:
+ /* A B B or A B C */
+ to_next -= 2;
+ n_left_to_next += 2;
+ vlib_set_next_frame_buffer (vm, node, next0, bi0);
+ vlib_set_next_frame_buffer (vm, node, next1, bi1);
+ if (next0 == next1)
+ {
+ vlib_put_next_frame (vm, node, next_index,
+ n_left_to_next);
+ next_index = next1;
+ vlib_get_next_frame (vm, node, next_index, to_next,
+ n_left_to_next);
+ }
+ }
+ }
+ }
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 bi0;
+ vlib_buffer_t *b0;
+ snap_header_t *h0;
+ snap_protocol_info_t *pi0;
+ u8 next0, is_ethernet0, len0;
+ u32 oui0;
+
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+
+ h0 = (void *) (b0->data + b0->current_data);
+
+ oui0 = snap_header_get_oui (h0);
+
+ is_ethernet0 = oui0 == IEEE_OUI_ethernet;
+
+ len0 = sizeof (h0[0]) - (is_ethernet0 ? sizeof (h0->protocol) : 0);
+
+ b0->current_data += len0;
+
+ b0->current_length -= len0;
+
+ pi0 = snap_get_protocol_info (sm, h0);
+
+ next0 = pi0 ? pi0->next_index : SNAP_INPUT_NEXT_DROP;
+
+ next0 = is_ethernet0 ? SNAP_INPUT_NEXT_ETHERNET_TYPE : next0;
+
+ /* In case of error. */
+ b0->error = node->errors[SNAP_ERROR_UNKNOWN_PROTOCOL];
+
+ /* Sent packet to wrong next? */
+ if (PREDICT_FALSE (next0 != next_index))
+ {
+ /* Return old frame; remove incorrectly enqueued packet. */
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next + 1);
+
+ /* Send to correct next. */
+ next_index = next0;
+ vlib_get_next_frame (vm, node, next_index,
+ to_next, n_left_to_next);
+
+ to_next[0] = bi0;
+ to_next += 1;
+ n_left_to_next -= 1;
+ }
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ return from_frame->n_vectors;
+}
+
+static char *snap_error_strings[] = {
+#define _(f,s) s,
+ foreach_snap_error
+#undef _
+};
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (snap_input_node) = {
+ .function = snap_input,
+ .name = "snap-input",
+ /* Takes a vector of packets. */
+ .vector_size = sizeof (u32),
+
+ .n_errors = SNAP_N_ERROR,
+ .error_strings = snap_error_strings,
+
+ .n_next_nodes = SNAP_INPUT_N_NEXT,
+ .next_nodes = {
+ [SNAP_INPUT_NEXT_DROP] = "error-drop",
+ [SNAP_INPUT_NEXT_PUNT] = "error-punt",
+ [SNAP_INPUT_NEXT_ETHERNET_TYPE] = "ethernet-input-type",
+ },
+
+ .format_buffer = format_snap_header_with_length,
+ .format_trace = format_snap_input_trace,
+ .unformat_buffer = unformat_snap_header,
+};
+/* *INDENT-ON* */
+
+static clib_error_t *
+snap_input_init (vlib_main_t * vm)
+{
+ {
+ clib_error_t *error = vlib_call_init_function (vm, snap_init);
+ if (error)
+ clib_error_report (error);
+ }
+
+ snap_setup_node (vm, snap_input_node.index);
+
+ llc_register_input_protocol (vm, LLC_PROTOCOL_snap, snap_input_node.index);
+
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (snap_input_init);
+
+void
+snap_register_input_protocol (vlib_main_t * vm,
+ char *name,
+ u32 ieee_oui, u16 protocol, u32 node_index)
+{
+ snap_main_t *sm = &snap_main;
+ snap_protocol_info_t *pi;
+ snap_header_t h;
+ snap_oui_and_protocol_t key;
+
+ {
+ clib_error_t *error = vlib_call_init_function (vm, snap_input_init);
+ if (error)
+ clib_error_report (error);
+ }
+
+ h.protocol = clib_host_to_net_u16 (protocol);
+ h.oui[0] = (ieee_oui >> 16) & 0xff;
+ h.oui[1] = (ieee_oui >> 8) & 0xff;
+ h.oui[2] = (ieee_oui >> 0) & 0xff;
+ pi = snap_get_protocol_info (sm, &h);
+ if (pi)
+ return;
+
+ vec_add2 (sm->protocols, pi, 1);
+
+ pi->name = format (0, "%s", name);
+ pi->node_index = node_index;
+ pi->next_index = vlib_node_add_next (vm, snap_input_node.index, node_index);
+
+ key.oui = ieee_oui;
+ key.protocol = clib_host_to_net_u16 (protocol);
+
+ mhash_set (&sm->protocol_hash, &key, pi - sm->protocols, /* old_value */ 0);
+ hash_set_mem (sm->protocol_info_by_name, name, pi - sm->protocols);
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/snap/pg.c b/src/vnet/snap/pg.c
new file mode 100644
index 00000000000..aad125f35b6
--- /dev/null
+++ b/src/vnet/snap/pg.c
@@ -0,0 +1,116 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * snap_pg.c: packet generator snap interface
+ *
+ * Copyright (c) 2008 Eliot Dresselhaus
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <vlib/vlib.h>
+#include <vnet/pg/pg.h>
+#include <vnet/snap/snap.h>
+
+typedef struct
+{
+ pg_edit_t oui;
+ pg_edit_t protocol;
+} pg_snap_header_t;
+
+static inline void
+pg_snap_header_init (pg_snap_header_t * e)
+{
+ pg_edit_init (&e->oui, snap_header_t, oui);
+ pg_edit_init (&e->protocol, snap_header_t, protocol);
+}
+
+uword
+unformat_pg_snap_header (unformat_input_t * input, va_list * args)
+{
+ pg_stream_t *s = va_arg (*args, pg_stream_t *);
+ pg_snap_header_t *h;
+ u32 group_index, error;
+
+ h = pg_create_edit_group (s, sizeof (h[0]), sizeof (snap_header_t),
+ &group_index);
+ pg_snap_header_init (h);
+
+ error = 1;
+ if (!unformat (input, "%U -> %U",
+ unformat_pg_edit,
+ unformat_snap_protocol, &h->oui, &h->protocol))
+ goto done;
+
+ {
+ snap_main_t *pm = &snap_main;
+ snap_protocol_info_t *pi = 0;
+ pg_node_t *pg_node = 0;
+
+ if (h->oui.type == PG_EDIT_FIXED && h->protocol.type == PG_EDIT_FIXED)
+ {
+ u8 *o = h->oui.values[PG_EDIT_LO];
+ u8 *p = h->protocol.values[PG_EDIT_LO];
+ snap_header_t h;
+
+ h.oui[0] = o[0];
+ h.oui[1] = o[1];
+ h.oui[2] = o[2];
+ h.protocol = *(u16 *) p;
+ pi = snap_get_protocol_info (pm, &h);
+ if (pi && pi->node_index != ~0)
+ pg_node = pg_get_node (pi->node_index);
+ }
+
+ if (pg_node && pg_node->unformat_edit
+ && unformat_user (input, pg_node->unformat_edit, s))
+ ;
+
+ else if (!unformat_user (input, unformat_pg_payload, s))
+ goto done;
+ }
+
+ error = 0;
+done:
+ if (error)
+ pg_free_edit_group (s);
+ return error == 0;
+}
+
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/snap/snap.c b/src/vnet/snap/snap.c
new file mode 100644
index 00000000000..64482bfcfbb
--- /dev/null
+++ b/src/vnet/snap/snap.c
@@ -0,0 +1,204 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * snap.c: snap support
+ *
+ * Copyright (c) 2010 Eliot Dresselhaus
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <vnet/vnet.h>
+#include <vnet/snap/snap.h>
+#include <vnet/ethernet/ethernet.h>
+
+/* Global main structure. */
+snap_main_t snap_main;
+
+static u8 *
+format_cisco_snap_protocol (u8 * s, va_list * args)
+{
+ snap_header_t *h = va_arg (*args, snap_header_t *);
+ u16 protocol = clib_net_to_host_u16 (h->protocol);
+ char *t = 0;
+ switch (protocol)
+ {
+#define _(n,f) case n: t = #f; break;
+ foreach_snap_cisco_protocol;
+#undef _
+ default:
+ break;
+ }
+ if (t)
+ return format (s, "%s", t);
+ else
+ return format (s, "unknown 0x%x", protocol);
+}
+
+u8 *
+format_snap_protocol (u8 * s, va_list * args)
+{
+ snap_header_t *h = va_arg (*args, snap_header_t *);
+ u32 oui = snap_header_get_oui (h);
+ u16 protocol = clib_net_to_host_u16 (h->protocol);
+
+ switch (oui)
+ {
+ case IEEE_OUI_ethernet:
+ return format (s, "ethernet %U", format_ethernet_type, h->protocol);
+
+ case IEEE_OUI_cisco:
+ return format (s, "cisco %U", format_cisco_snap_protocol, h);
+
+ default:
+ return format (s, "oui 0x%06x 0x%04x", oui, protocol);
+ }
+}
+
+u8 *
+format_snap_header_with_length (u8 * s, va_list * args)
+{
+ snap_main_t *sm = &snap_main;
+ snap_header_t *h = va_arg (*args, snap_header_t *);
+ snap_protocol_info_t *pi = snap_get_protocol_info (sm, h);
+ u32 max_header_bytes = va_arg (*args, u32);
+ uword indent, header_bytes;
+
+ header_bytes = sizeof (h[0]);
+ if (max_header_bytes != 0 && header_bytes > max_header_bytes)
+ return format (s, "snap header truncated");
+
+ indent = format_get_indent (s);
+
+ s = format (s, "SNAP %U", format_snap_protocol, h);
+
+ if (max_header_bytes != 0 && header_bytes > max_header_bytes && pi != 0)
+ {
+ vlib_node_t *node = vlib_get_node (sm->vlib_main, pi->node_index);
+ if (node->format_buffer)
+ s = format (s, "\n%U%U",
+ format_white_space, indent,
+ node->format_buffer, (void *) (h + 1),
+ max_header_bytes - header_bytes);
+ }
+
+ return s;
+}
+
+u8 *
+format_snap_header (u8 * s, va_list * args)
+{
+ snap_header_t *h = va_arg (*args, snap_header_t *);
+ return format (s, "%U", format_snap_header_with_length, h, 0);
+}
+
+/* Returns snap protocol as an int in host byte order. */
+uword
+unformat_snap_protocol (unformat_input_t * input, va_list * args)
+{
+ snap_header_t *result = va_arg (*args, snap_header_t *);
+ snap_main_t *sm = &snap_main;
+ snap_oui_and_protocol_t p;
+ u32 i;
+
+ /* Numeric type. */
+ if (unformat (input, "0x%x 0x%x", &p.oui, &p.protocol))
+ {
+ if (p.oui >= (1 << 24))
+ return 0;
+ if (p.protocol >= (1 << 16))
+ return 0;
+ }
+
+ /* Named type. */
+ else if (unformat_user (input, unformat_vlib_number_by_name,
+ sm->protocol_info_by_name, &i))
+ {
+ snap_protocol_info_t *pi = vec_elt_at_index (sm->protocols, i);
+ p = pi->oui_and_protocol;
+ }
+
+ else
+ return 0;
+
+ snap_header_set_protocol (result, &p);
+ return 1;
+}
+
+uword
+unformat_snap_header (unformat_input_t * input, va_list * args)
+{
+ u8 **result = va_arg (*args, u8 **);
+ snap_header_t _h, *h = &_h;
+
+ if (!unformat (input, "%U", unformat_snap_protocol, h))
+ return 0;
+
+ /* Add header to result. */
+ {
+ void *p;
+ u32 n_bytes = sizeof (h[0]);
+
+ vec_add2 (*result, p, n_bytes);
+ clib_memcpy (p, h, n_bytes);
+ }
+
+ return 1;
+}
+
+static clib_error_t *
+snap_init (vlib_main_t * vm)
+{
+ snap_main_t *sm = &snap_main;
+
+ memset (sm, 0, sizeof (sm[0]));
+ sm->vlib_main = vm;
+
+ mhash_init (&sm->protocol_hash, sizeof (uword),
+ sizeof (snap_oui_and_protocol_t));
+
+ sm->protocol_info_by_name
+ = hash_create_string ( /* elts */ 0, sizeof (uword));
+
+ return vlib_call_init_function (vm, snap_input_init);
+}
+
+VLIB_INIT_FUNCTION (snap_init);
+
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/snap/snap.h b/src/vnet/snap/snap.h
new file mode 100644
index 00000000000..dbba15bf3f7
--- /dev/null
+++ b/src/vnet/snap/snap.h
@@ -0,0 +1,209 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * snap.h: SNAP definitions
+ *
+ * Copyright (c) 2008 Eliot Dresselhaus
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef included_snap_h
+#define included_snap_h
+
+#include <vnet/vnet.h>
+#include <vnet/pg/pg.h>
+
+#define foreach_ieee_oui \
+ _ (0x000000, ethernet) \
+ _ (0x00000c, cisco)
+
+typedef enum
+{
+#define _(n,f) IEEE_OUI_##f = n,
+ foreach_ieee_oui
+#undef _
+} ieee_oui_t;
+
+#define foreach_snap_cisco_protocol \
+ _ (0x0102, drip) \
+ _ (0x0104, port_aggregation_protocol) \
+ _ (0x0105, mls_hello) \
+ _ (0x010b, per_vlan_spanning_tree) \
+ _ (0x010c, vlan_bridge) \
+ _ (0x0111, unidirectional_link_detection) \
+ _ (0x2000, cdp) \
+ _ (0x2001, cgmp) \
+ _ (0x2003, vtp) \
+ _ (0x2004, dtp) \
+ _ (0x200a, stp_uplink_fast)
+
+typedef enum
+{
+#define _(n,f) SNAP_cisco_##f = n,
+ foreach_snap_cisco_protocol
+#undef _
+} snap_cisco_protocol_t;
+
+typedef union
+{
+ /* *INDENT-OFF* */
+ CLIB_PACKED (struct {
+ /* OUI: organization unique identifier. */
+ u8 oui[3];
+
+ /* Per-OUI protocol. */
+ u16 protocol;
+ });
+ /* *INDENT-ON* */
+
+ u8 as_u8[5];
+} snap_header_t;
+
+typedef struct
+{
+ u32 oui;
+ u32 protocol;
+} snap_oui_and_protocol_t;
+
+typedef struct
+{
+ /* Name vector string. */
+ u8 *name;
+
+ snap_oui_and_protocol_t oui_and_protocol;
+
+ /* Node which handles this type. */
+ u32 node_index;
+
+ /* snap-input next index for this type. */
+ u32 next_index;
+} snap_protocol_info_t;
+
+always_inline void
+snap_header_set_protocol (snap_header_t * h, snap_oui_and_protocol_t * p)
+{
+ u16 protocol = p->protocol;
+ u32 oui = p->oui;
+ h->protocol = clib_host_to_net_u16 (protocol);
+ h->oui[0] = (oui >> 16) & 0xff;
+ h->oui[1] = (oui >> 8) & 0xff;
+ h->oui[2] = (oui >> 0) & 0xff;
+}
+
+#define foreach_snap_error \
+ _ (NONE, "no error") \
+ _ (UNKNOWN_PROTOCOL, "unknown oui/snap protocol")
+
+typedef enum
+{
+#define _(f,s) SNAP_ERROR_##f,
+ foreach_snap_error
+#undef _
+ SNAP_N_ERROR,
+} snap_error_t;
+
+typedef struct
+{
+ vlib_main_t *vlib_main;
+
+ /* Vector of known SNAP oui/protocol pairs. */
+ snap_protocol_info_t *protocols;
+
+ /* Hash table mapping oui/protocol to protocol index. */
+ mhash_t protocol_hash;
+
+ /* Hash table mapping protocol by name. */
+ uword *protocol_info_by_name;
+} snap_main_t;
+
+always_inline u32
+snap_header_get_oui (snap_header_t * h)
+{
+ return (h->oui[0] << 16) | (h->oui[1] << 8) | h->oui[2];
+}
+
+always_inline snap_protocol_info_t *
+snap_get_protocol_info (snap_main_t * sm, snap_header_t * h)
+{
+ snap_oui_and_protocol_t key;
+ uword *p;
+
+ key.oui = snap_header_get_oui (h);
+ key.protocol = h->protocol;
+
+ p = mhash_get (&sm->protocol_hash, &key);
+ return p ? vec_elt_at_index (sm->protocols, p[0]) : 0;
+}
+
+snap_main_t snap_main;
+
+/* Register given node index to take input for given snap type. */
+void
+snap_register_input_protocol (vlib_main_t * vm,
+ char *name,
+ u32 ieee_oui, u16 protocol, u32 node_index);
+
+void snap_set_adjacency (vnet_rewrite_header_t * rw,
+ uword max_data_bytes, u32 ieee_oui, u16 protocol);
+
+format_function_t format_snap_protocol;
+format_function_t format_snap_header;
+format_function_t format_snap_header_with_length;
+
+/* Parse snap protocol as 0xXXXX or protocol name. */
+unformat_function_t unformat_snap_protocol;
+
+/* Parse snap header. */
+unformat_function_t unformat_snap_header;
+unformat_function_t unformat_pg_snap_header;
+
+always_inline void
+snap_setup_node (vlib_main_t * vm, u32 node_index)
+{
+ vlib_node_t *n = vlib_get_node (vm, node_index);
+ pg_node_t *pn = pg_get_node (node_index);
+
+ n->format_buffer = format_snap_header_with_length;
+ n->unformat_buffer = unformat_snap_header;
+ pn->unformat_edit = unformat_pg_snap_header;
+}
+
+#endif /* included_snap_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/span/node.c b/src/vnet/span/node.c
new file mode 100644
index 00000000000..50d642c2f8b
--- /dev/null
+++ b/src/vnet/span/node.c
@@ -0,0 +1,286 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+#include <vppinfra/error.h>
+
+#include <vnet/span/span.h>
+
+#include <vppinfra/error.h>
+#include <vppinfra/elog.h>
+
+vlib_node_registration_t span_node;
+
+/* packet trace format function */
+u8 *
+format_span_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ span_trace_t *t = va_arg (*args, span_trace_t *);
+
+ vnet_main_t *vnm = &vnet_main;
+ s = format (s, "SPAN: mirrored %U -> %U",
+ format_vnet_sw_if_index_name, vnm, t->src_sw_if_index,
+ format_vnet_sw_if_index_name, vnm, t->mirror_sw_if_index);
+
+ return s;
+}
+
+#define foreach_span_error \
+_(HITS, "SPAN incomming packets processed")
+
+typedef enum
+{
+#define _(sym,str) SPAN_ERROR_##sym,
+ foreach_span_error
+#undef _
+ SPAN_N_ERROR,
+} span_error_t;
+
+static char *span_error_strings[] = {
+#define _(sym,string) string,
+ foreach_span_error
+#undef _
+};
+
+static_always_inline void
+span_mirror (vlib_main_t * vm, span_interface_t * si0, vlib_buffer_t * b0,
+ vlib_frame_t ** mirror_frames, int is_rx)
+{
+ vlib_buffer_t *c0;
+ vnet_main_t *vnm = &vnet_main;
+ u32 *to_mirror_next = 0;
+ u32 i;
+
+ if (is_rx != 0 && si0->num_rx_mirror_ports == 0)
+ return;
+
+ if (is_rx == 0 && si0->num_tx_mirror_ports == 0)
+ return;
+
+ /* Don't do it again */
+ if (PREDICT_FALSE (b0->flags & VNET_BUFFER_SPAN_CLONE))
+ return;
+
+ /* *INDENT-OFF* */
+ clib_bitmap_foreach (i, is_rx ? si0->rx_mirror_ports : si0->tx_mirror_ports, (
+ {
+ if (mirror_frames[i] == 0)
+ mirror_frames[i] = vnet_get_frame_to_sw_interface (vnm, i);
+ to_mirror_next = vlib_frame_vector_args (mirror_frames[i]);
+ to_mirror_next += mirror_frames[i]->n_vectors;
+ c0 = vlib_buffer_copy (vm, b0);
+ vnet_buffer (c0)->sw_if_index[VLIB_TX] = i;
+ c0->flags |= VNET_BUFFER_SPAN_CLONE;
+ to_mirror_next[0] = vlib_get_buffer_index (vm, c0);
+ mirror_frames[i]->n_vectors++;
+ }));
+ /* *INDENT-ON* */
+}
+
+static_always_inline uword
+span_node_inline_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
+ vlib_frame_t * frame, int is_rx)
+{
+ span_main_t *sm = &span_main;
+ vnet_main_t *vnm = &vnet_main;
+ u32 n_left_from, *from, *to_next;
+ u32 n_span_packets = 0;
+ u32 next_index;
+ u32 sw_if_index;
+ static __thread vlib_frame_t **mirror_frames = 0;
+ vlib_rx_or_tx_t rxtx = is_rx ? VLIB_RX : VLIB_TX;
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+
+ vec_validate_aligned (mirror_frames, sm->max_sw_if_index,
+ CLIB_CACHE_LINE_BYTES);
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ u32 bi0;
+ u32 bi1;
+ vlib_buffer_t *b0;
+ vlib_buffer_t *b1;
+ span_interface_t *si0, *si1;
+ u32 sw_if_index0;
+ u32 next0 = 0;
+ u32 sw_if_index1;
+ u32 next1 = 0;
+
+ /* speculatively enqueue b0, b1 to the current next frame */
+ to_next[0] = bi0 = from[0];
+ to_next[1] = bi1 = from[1];
+ to_next += 2;
+ n_left_to_next -= 2;
+ from += 2;
+ n_left_from -= 2;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ b1 = vlib_get_buffer (vm, bi1);
+ sw_if_index0 = vnet_buffer (b0)->sw_if_index[rxtx];
+ sw_if_index1 = vnet_buffer (b1)->sw_if_index[rxtx];
+ si0 = vec_elt_at_index (sm->interfaces, sw_if_index0);
+ si1 = vec_elt_at_index (sm->interfaces, sw_if_index1);
+
+ span_mirror (vm, si0, b0, mirror_frames, is_rx);
+ span_mirror (vm, si1, b1, mirror_frames, is_rx);
+
+ vnet_feature_next (sw_if_index0, &next0, b0);
+ vnet_feature_next (sw_if_index1, &next1, b1);
+
+ if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ span_trace_t *t = vlib_add_trace (vm, node, b0, sizeof (*t));
+ t->src_sw_if_index = sw_if_index0;
+ //t->mirror_sw_if_index = si0->mirror_sw_if_index;
+ }
+
+ if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ span_trace_t *t = vlib_add_trace (vm, node, b1, sizeof (*t));
+ t->src_sw_if_index = sw_if_index1;
+ //t->mirror_sw_if_index = si1->mirror_sw_if_index;
+ }
+ /* verify speculative enqueue, maybe switch current next frame */
+ vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, bi1, next0, next1);
+ }
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 bi0;
+ vlib_buffer_t *b0;
+ span_interface_t *si0;
+ u32 sw_if_index0;
+ u32 next0 = 0;
+
+ /* speculatively enqueue b0 to the current next frame */
+ to_next[0] = bi0 = from[0];
+ to_next += 1;
+ n_left_to_next -= 1;
+ from += 1;
+ n_left_from -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ sw_if_index0 = vnet_buffer (b0)->sw_if_index[rxtx];
+ si0 = vec_elt_at_index (sm->interfaces, sw_if_index0);
+ span_mirror (vm, si0, b0, mirror_frames, is_rx);
+
+ vnet_feature_next (sw_if_index0, &next0, b0);
+
+ if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ span_trace_t *t = vlib_add_trace (vm, node, b0, sizeof (*t));
+ t->src_sw_if_index = sw_if_index0;
+ }
+ /* verify speculative enqueue, maybe switch current next frame */
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
+ n_left_to_next, bi0, next0);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+
+ for (sw_if_index = 0; sw_if_index < vec_len (mirror_frames); sw_if_index++)
+ {
+ if (mirror_frames[sw_if_index] == 0)
+ continue;
+
+ vnet_put_frame_to_sw_interface (vnm, sw_if_index,
+ mirror_frames[sw_if_index]);
+ mirror_frames[sw_if_index] = 0;
+ }
+ vlib_node_increment_counter (vm, span_node.index, SPAN_ERROR_HITS,
+ n_span_packets);
+
+ return frame->n_vectors;
+}
+
+static uword
+span_input_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ return span_node_inline_fn (vm, node, frame, 1);
+}
+
+static uword
+span_output_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ return span_node_inline_fn (vm, node, frame, 0);
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (span_input_node) = {
+ .function = span_input_node_fn,
+ .name = "span-input",
+ .vector_size = sizeof (u32),
+ .format_trace = format_span_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_errors = ARRAY_LEN(span_error_strings),
+ .error_strings = span_error_strings,
+
+ .n_next_nodes = 0,
+
+ /* edit / add dispositions here */
+ .next_nodes = {
+ [0] = "error-drop",
+ },
+};
+
+VLIB_NODE_FUNCTION_MULTIARCH (span_input_node, span_input_node_fn)
+
+VLIB_REGISTER_NODE (span_output_node) = {
+ .function = span_output_node_fn,
+ .name = "span-output",
+ .vector_size = sizeof (u32),
+ .format_trace = format_span_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_errors = ARRAY_LEN(span_error_strings),
+ .error_strings = span_error_strings,
+
+ .n_next_nodes = 0,
+
+ /* edit / add dispositions here */
+ .next_nodes = {
+ [0] = "error-drop",
+ },
+};
+
+VLIB_NODE_FUNCTION_MULTIARCH (span_output_node, span_output_node_fn)
+
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/span/span.api b/src/vnet/span/span.api
new file mode 100644
index 00000000000..4babdd834ee
--- /dev/null
+++ b/src/vnet/span/span.api
@@ -0,0 +1,60 @@
+/* Hey Emacs use -*- mode: C -*- */
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+ /** \brief Enable/Disable span to mirror traffic from one interface to another
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context which was passed in the request
+ @param sw_if_index_from - interface to be mirorred
+ @param sw_if_index_to - interface where the traffic is mirrored
+ @param state - 0 = disabled, 1 = rx enabled, 2 = tx enabled, 3 tx & rx enabled
+*/
+define sw_interface_span_enable_disable {
+ u32 client_index;
+ u32 context;
+ u32 sw_if_index_from;
+ u32 sw_if_index_to;
+ u8 state;
+};
+
+/** \brief Reply to SPAN enable/disable request
+ @param context - sender context which was passed in the request
+*/
+define sw_interface_span_enable_disable_reply {
+ u32 context;
+ i32 retval;
+};
+
+/** \brief SPAN dump request
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+*/
+define sw_interface_span_dump {
+ u32 client_index;
+ u32 context;
+};
+
+/** \brief Reply to SPAN dump request
+ @param context - sender context which was passed in the request
+ @param sw_if_index_from - mirorred interface
+ @param sw_if_index_to - interface where the traffic is mirrored
+ @param state - 0 = disabled, 1 = rx enabled, 2 = tx enabled, 3 tx & rx enabled
+*/
+define sw_interface_span_details {
+ u32 context;
+ u32 sw_if_index_from;
+ u32 sw_if_index_to;
+ u8 state;
+};
diff --git a/src/vnet/span/span.c b/src/vnet/span/span.c
new file mode 100644
index 00000000000..7b5816c79f2
--- /dev/null
+++ b/src/vnet/span/span.c
@@ -0,0 +1,197 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vlib/vlib.h>
+#include <vppinfra/error.h>
+#include <vnet/feature/feature.h>
+
+#include <vnet/span/span.h>
+
+int
+span_add_delete_entry (vlib_main_t * vm,
+ u32 src_sw_if_index, u32 dst_sw_if_index, u8 state)
+{
+ span_main_t *sm = &span_main;
+ span_interface_t *si;
+ u32 new_num_rx_mirror_ports, new_num_tx_mirror_ports;
+
+ if (state > 3)
+ return VNET_API_ERROR_UNIMPLEMENTED;
+
+ if ((src_sw_if_index == ~0) || (dst_sw_if_index == ~0 && state > 0)
+ || (src_sw_if_index == dst_sw_if_index))
+ return VNET_API_ERROR_INVALID_INTERFACE;
+
+ vnet_sw_interface_t *sw_if;
+
+ sw_if = vnet_get_sw_interface (vnet_get_main (), src_sw_if_index);
+ if (sw_if->type == VNET_SW_INTERFACE_TYPE_SUB)
+ return VNET_API_ERROR_UNIMPLEMENTED;
+
+ vec_validate_aligned (sm->interfaces, src_sw_if_index,
+ CLIB_CACHE_LINE_BYTES);
+ si = vec_elt_at_index (sm->interfaces, src_sw_if_index);
+
+ si->rx_mirror_ports = clib_bitmap_set (si->rx_mirror_ports, dst_sw_if_index,
+ (state & 1) != 0);
+ si->tx_mirror_ports = clib_bitmap_set (si->tx_mirror_ports, dst_sw_if_index,
+ (state & 2) != 0);
+
+ new_num_rx_mirror_ports = clib_bitmap_count_set_bits (si->rx_mirror_ports);
+ new_num_tx_mirror_ports = clib_bitmap_count_set_bits (si->tx_mirror_ports);
+
+ if (new_num_rx_mirror_ports == 1 && si->num_rx_mirror_ports == 0)
+ vnet_feature_enable_disable ("device-input", "span-input",
+ src_sw_if_index, 1, 0, 0);
+
+ if (new_num_rx_mirror_ports == 0 && si->num_rx_mirror_ports == 1)
+ vnet_feature_enable_disable ("device-input", "span-input",
+ src_sw_if_index, 0, 0, 0);
+
+ if (new_num_rx_mirror_ports == 1 && si->num_rx_mirror_ports == 0)
+ vnet_feature_enable_disable ("device-input", "span-output",
+ src_sw_if_index, 1, 0, 0);
+
+ if (new_num_rx_mirror_ports == 0 && si->num_rx_mirror_ports == 1)
+ vnet_feature_enable_disable ("device-input", "span-output",
+ src_sw_if_index, 0, 0, 0);
+
+ si->num_rx_mirror_ports = new_num_rx_mirror_ports;
+ si->num_tx_mirror_ports = new_num_tx_mirror_ports;
+
+ if (dst_sw_if_index > sm->max_sw_if_index)
+ sm->max_sw_if_index = dst_sw_if_index;
+
+ return 0;
+}
+
+static clib_error_t *
+set_interface_span_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ span_main_t *sm = &span_main;
+ u32 src_sw_if_index = ~0;
+ u32 dst_sw_if_index = ~0;
+ u8 state = 3;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "%U", unformat_vnet_sw_interface,
+ sm->vnet_main, &src_sw_if_index))
+ ;
+ else if (unformat (input, "destination %U", unformat_vnet_sw_interface,
+ sm->vnet_main, &dst_sw_if_index))
+ ;
+ else if (unformat (input, "disable"))
+ state = 0;
+ else if (unformat (input, "rx"))
+ state = 1;
+ else if (unformat (input, "tx"))
+ state = 2;
+ else if (unformat (input, "both"))
+ state = 3;
+ else
+ break;
+ }
+
+ int rv =
+ span_add_delete_entry (vm, src_sw_if_index, dst_sw_if_index, state);
+ if (rv == VNET_API_ERROR_INVALID_INTERFACE)
+ return clib_error_return (0, "Invalid interface");
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (set_interface_span_command, static) = {
+ .path = "set interface span",
+ .short_help = "set interface span <if-name> [disable | destination <if-name> [both|rx|tx]]",
+ .function = set_interface_span_command_fn,
+};
+/* *INDENT-ON* */
+
+static clib_error_t *
+show_interfaces_span_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ span_main_t *sm = &span_main;
+ span_interface_t *si;
+ vnet_main_t *vnm = &vnet_main;
+ u8 header = 1;
+ char *states[] = { "none", "rx", "tx", "both" };
+ u8 *s = 0;
+
+ /* *INDENT-OFF* */
+ vec_foreach (si, sm->interfaces)
+ if (si->num_rx_mirror_ports || si->num_tx_mirror_ports)
+ {
+ clib_bitmap_t *b;
+ u32 i;
+ b = clib_bitmap_dup_or (si->rx_mirror_ports, si->tx_mirror_ports);
+ if (header)
+ {
+ vlib_cli_output (vm, "%-40s %s", "Source interface",
+ "Mirror interface (direction)");
+ header = 0;
+ }
+ s = format (s, "%U", format_vnet_sw_if_index_name, vnm,
+ si - sm->interfaces);
+ clib_bitmap_foreach (i, b, (
+ {
+ int state;
+ state = (clib_bitmap_get (si->rx_mirror_ports, i) +
+ clib_bitmap_get (si->tx_mirror_ports, i) * 2);
+
+ vlib_cli_output (vm, "%-40v %U (%s)", s,
+ format_vnet_sw_if_index_name, vnm, i,
+ states[state]);
+ vec_reset_length (s);
+ }));
+ clib_bitmap_free (b);
+ }
+ /* *INDENT-ON* */
+ vec_free (s);
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (show_interfaces_span_command, static) = {
+ .path = "show interfaces span",
+ .short_help = "Shows SPAN mirror table",
+ .function = show_interfaces_span_command_fn,
+};
+/* *INDENT-ON* */
+
+static clib_error_t *
+span_init (vlib_main_t * vm)
+{
+ span_main_t *sm = &span_main;
+
+ sm->vlib_main = vm;
+ sm->vnet_main = vnet_get_main ();
+
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (span_init);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/span/span.h b/src/vnet/span/span.h
new file mode 100644
index 00000000000..a98b010bf61
--- /dev/null
+++ b/src/vnet/span/span.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __span_h__
+#define __span_h__
+
+#include <vnet/vnet.h>
+#include <vnet/ip/ip.h>
+
+typedef struct
+{
+ clib_bitmap_t *rx_mirror_ports;
+ clib_bitmap_t *tx_mirror_ports;
+ u32 num_rx_mirror_ports;
+ u32 num_tx_mirror_ports;
+} span_interface_t;
+
+typedef struct
+{
+ /* per-interface vector of span instances */
+ span_interface_t *interfaces;
+
+ /* biggest sw_if_index used so far */
+ u32 max_sw_if_index;
+
+ /* convenience */
+ vlib_main_t *vlib_main;
+ vnet_main_t *vnet_main;
+} span_main_t;
+
+span_main_t span_main;
+
+typedef struct
+{
+ u32 src_sw_if_index; /* mirrored interface index */
+ u32 mirror_sw_if_index; /* output interface index */
+} span_trace_t;
+
+#endif /* __span_h__ */
+
+int
+span_add_delete_entry (vlib_main_t * vm, u32 src_sw_if_index,
+ u32 dst_sw_if_index, u8 is_add);
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/span/span.md b/src/vnet/span/span.md
new file mode 100644
index 00000000000..ee3f814f5c3
--- /dev/null
+++ b/src/vnet/span/span.md
@@ -0,0 +1,65 @@
+# VPP SPAN implementation
+
+This is a memo intended to contain documentation of the VPP SPAN implementation.
+Everything that is not directly obvious should come here.
+
+
+## Switched Port Analyzer (SPAN)
+Port mirroring is used on a network switch to send a copy of network packets seen on one switch port to a network monitoring connection on another switch port.
+Can be used by network engineers or administrators to measure performnce, analyze and debug data or diagnose errors on a network.
+
+### RX traffic node
+There is one static node to mirror incomming packets.
+* span-input: Creates a copy of incomming buffer due to incomming buffers can be reused internally.
+
+Chaining: dpdk-input -> span-input ->
+* original buffer is sent to ethernet-input for processing
+* buffer copy is sent to interface-output
+
+### Configuration
+SPAN supports the following CLI configuration commands:
+
+#### Enable/Disable SPAN (CLI)
+ set interface span <if-name> [disable | destination <if-name>]
+
+<if-name>: mirrored interface name
+destination <if-name>: monitoring interface name
+disable: delete mirroring
+
+#### Enable/Disabl SPAN (API)
+SPAN supports the following API configuration command:
+ sw_interface_span_enable_disable src GigabitEthernet0/8/0 dst GigabitEthernet0/9/0
+ sw_interface_span_enable_disable src_sw_if_index 1 dst_sw_if_index 2
+
+src/src_sw_if_index: mirrored interface name
+dst/dst_sw_if_index: monitoring interface name
+
+#### Remove SPAN entry (API)
+SPAN supports the following API configuration command:
+ sw_interface_span_enable_disable src_sw_if_index 1 dst_sw_if_index 2 disable
+
+src_sw_if_index: mirrored interface name
+dst_sw_if_index: monitoring interface name
+
+### Configuration example
+
+Mirror all packets on interface GigabitEthernet0/10/0 to interface GigabitEthernet0/11/0.
+
+Configure IPv4 addresses on mirrored interface:
+set interface ip address GigabitEthernet0/10/0 192.168.1.13/24
+set interface state GigabitEthernet0/10/0 up
+
+Configure IPv4 addresses on monitoring interface:
+set interface ip address GigabitEthernet0/11/0 192.168.2.13/24
+set interface state GigabitEthernet0/11/0 up
+
+Configure SPAN
+set span src GigabitEthernet0/10/0 dst GigabitEthernet0/11/0
+
+### Operational data
+
+Active SPAN mirroring CLI show command:
+ show interfaces span
+
+Active SPAN mirroring API dump command:
+ sw_interface_span_dump
diff --git a/src/vnet/span/span_api.c b/src/vnet/span/span_api.c
new file mode 100644
index 00000000000..b4565663eb9
--- /dev/null
+++ b/src/vnet/span/span_api.c
@@ -0,0 +1,153 @@
+/*
+ *------------------------------------------------------------------
+ * span_api.c - span mirroring api
+ *
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+
+#include <vnet/vnet.h>
+#include <vlibmemory/api.h>
+
+#include <vnet/interface.h>
+#include <vnet/api_errno.h>
+#include <vnet/span/span.h>
+
+#include <vnet/vnet_msg_enum.h>
+
+#define vl_typedefs /* define message structures */
+#include <vnet/vnet_all_api_h.h>
+#undef vl_typedefs
+
+#define vl_endianfun /* define message structures */
+#include <vnet/vnet_all_api_h.h>
+#undef vl_endianfun
+
+/* instantiate all the print functions we know about */
+#define vl_print(handle, ...) vlib_cli_output (handle, __VA_ARGS__)
+#define vl_printfun
+#include <vnet/vnet_all_api_h.h>
+#undef vl_printfun
+
+#include <vlibapi/api_helper_macros.h>
+
+#define foreach_vpe_api_msg \
+_(SW_INTERFACE_SPAN_ENABLE_DISABLE, sw_interface_span_enable_disable) \
+_(SW_INTERFACE_SPAN_DUMP, sw_interface_span_dump) \
+
+static void
+ vl_api_sw_interface_span_enable_disable_t_handler
+ (vl_api_sw_interface_span_enable_disable_t * mp)
+{
+ vl_api_sw_interface_span_enable_disable_reply_t *rmp;
+ int rv;
+
+ vlib_main_t *vm = vlib_get_main ();
+
+ rv = span_add_delete_entry (vm, ntohl (mp->sw_if_index_from),
+ ntohl (mp->sw_if_index_to), mp->state);
+
+ REPLY_MACRO (VL_API_SW_INTERFACE_SPAN_ENABLE_DISABLE_REPLY);
+}
+
+static void
+vl_api_sw_interface_span_dump_t_handler (vl_api_sw_interface_span_dump_t * mp)
+{
+
+ unix_shared_memory_queue_t *q;
+ span_interface_t *si;
+ vl_api_sw_interface_span_details_t *rmp;
+ span_main_t *sm = &span_main;
+
+ q = vl_api_client_index_to_input_queue (mp->client_index);
+ if (!q)
+ return;
+
+ /* *INDENT-OFF* */
+ vec_foreach (si, sm->interfaces)
+ if (si->num_rx_mirror_ports || si->num_tx_mirror_ports)
+ {
+ clib_bitmap_t *b;
+ u32 i;
+ b = clib_bitmap_dup_or (si->rx_mirror_ports, si->tx_mirror_ports);
+ clib_bitmap_foreach (i, b, (
+ {
+ rmp = vl_msg_api_alloc (sizeof (*rmp));
+ memset (rmp, 0, sizeof (*rmp));
+ rmp->_vl_msg_id = ntohs (VL_API_SW_INTERFACE_SPAN_DETAILS);
+ rmp->context = mp->context;
+
+ rmp->sw_if_index_from = htonl (si - sm->interfaces);
+ rmp->sw_if_index_to = htonl (i);
+ rmp->state = (u8) (clib_bitmap_get (si->rx_mirror_ports, i) +
+ clib_bitmap_get (si->tx_mirror_ports, i) * 2);
+
+ vl_msg_api_send_shmem (q, (u8 *) & rmp);
+ }));
+ clib_bitmap_free (b);
+ }
+ /* *INDENT-ON* */
+}
+
+/*
+ * vpe_api_hookup
+ * Add vpe's API message handlers to the table.
+ * vlib has alread mapped shared memory and
+ * added the client registration handlers.
+ * See .../vlib-api/vlibmemory/memclnt_vlib.c:memclnt_process()
+ */
+#define vl_msg_name_crc_list
+#include <vnet/vnet_all_api_h.h>
+#undef vl_msg_name_crc_list
+
+static void
+setup_message_id_table (api_main_t * am)
+{
+#define _(id,n,crc) vl_msg_api_add_msg_name_crc (am, #n "_" #crc, id);
+ foreach_vl_msg_name_crc_span;
+#undef _
+}
+
+static clib_error_t *
+span_api_hookup (vlib_main_t * vm)
+{
+ api_main_t *am = &api_main;
+
+#define _(N,n) \
+ vl_msg_api_set_handlers(VL_API_##N, #n, \
+ vl_api_##n##_t_handler, \
+ vl_noop_handler, \
+ vl_api_##n##_t_endian, \
+ vl_api_##n##_t_print, \
+ sizeof(vl_api_##n##_t), 1);
+ foreach_vpe_api_msg;
+#undef _
+
+ /*
+ * Set up the (msg_name, crc, message-id) table
+ */
+ setup_message_id_table (am);
+
+ return 0;
+}
+
+VLIB_API_INIT_FUNCTION (span_api_hookup);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/sr/dir.dox b/src/vnet/sr/dir.dox
new file mode 100644
index 00000000000..a98b202c93e
--- /dev/null
+++ b/src/vnet/sr/dir.dox
@@ -0,0 +1,25 @@
+/*
+ *
+ * Copyright (c) 2013 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ @dir
+ @brief Segment Routing code
+
+ An implementation of Segment Routing as per:
+ draft-previdi-6man-segment-routing-header-05
+
+ See file: rfc_draft_05.txt
+
+*/ \ No newline at end of file
diff --git a/src/vnet/sr/examples/sr_multicastmap.script b/src/vnet/sr/examples/sr_multicastmap.script
new file mode 100644
index 00000000000..20bf7dc0eb7
--- /dev/null
+++ b/src/vnet/sr/examples/sr_multicastmap.script
@@ -0,0 +1,4 @@
+sr_tunnel_add_del name sr2 src ::a:1:1:0:6 dst ff15::2/128 next ::a:1:1:0:f next ::a:1:1:0:1a next ff15::1 tag ::a:1:1:0:7 clean
+sr_tunnel_add_del name sr3 src ::b:1:1:0:6 dst ff16::2/128 next ::a:1:1:0:13 next ::a:1:1:0:1a next ff15::1 tag ::a:1:1:0:7 clean
+sr_policy_add_del name pol1 tunnel sr2 tunnel sr3
+sr_multicast_map_add_del address ff15::1 sr-policy pol1
diff --git a/src/vnet/sr/rfc_draft_05.txt b/src/vnet/sr/rfc_draft_05.txt
new file mode 100644
index 00000000000..bc41c181ea4
--- /dev/null
+++ b/src/vnet/sr/rfc_draft_05.txt
@@ -0,0 +1,1265 @@
+Network Working Group S. Previdi, Ed.
+Internet-Draft C. Filsfils
+Intended status: Standards Track Cisco Systems, Inc.
+Expires: June 12, 2015 B. Field
+ Comcast
+ I. Leung
+ Rogers Communications
+ December 9, 2014
+
+
+ IPv6 Segment Routing Header (SRH)
+ draft-previdi-6man-segment-routing-header-05
+
+Abstract
+
+ Segment Routing (SR) allows a node to steer a packet through a
+ controlled set of instructions, called segments, by prepending a SR
+ header to the packet. A segment can represent any instruction,
+ topological or service-based. SR allows to enforce a flow through
+ any path (topological, or application/service based) while
+ maintaining per-flow state only at the ingress node to the SR domain.
+
+ Segment Routing can be applied to the IPv6 data plane with the
+ addition of a new type of Routing Extension Header. This draft
+ describes the Segment Routing Extension Header Type and how it is
+ used by SR capable nodes.
+
+Requirements Language
+
+ The key words "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL NOT",
+ "SHOULD", "SHOULD NOT", "RECOMMENDED", "MAY", and "OPTIONAL" in this
+ document are to be interpreted as described in RFC 2119 [RFC2119].
+
+Status of This Memo
+
+ This Internet-Draft is submitted in full conformance with the
+ provisions of BCP 78 and BCP 79.
+
+ Internet-Drafts are working documents of the Internet Engineering
+ Task Force (IETF). Note that other groups may also distribute
+ working documents as Internet-Drafts. The list of current Internet-
+ Drafts is at http://datatracker.ietf.org/drafts/current/.
+
+ Internet-Drafts are draft documents valid for a maximum of six months
+ and may be updated, replaced, or obsoleted by other documents at any
+ time. It is inappropriate to use Internet-Drafts as reference
+ material or to cite them other than as "work in progress."
+
+
+
+
+Previdi, et al. Expires June 12, 2015 [Page 1]
+
+Internet-Draft IPv6 Segment Routing Header (SRH) December 2014
+
+
+ This Internet-Draft will expire on June 12, 2015.
+
+Copyright Notice
+
+ Copyright (c) 2014 IETF Trust and the persons identified as the
+ document authors. All rights reserved.
+
+ This document is subject to BCP 78 and the IETF Trust's Legal
+ Provisions Relating to IETF Documents
+ (http://trustee.ietf.org/license-info) in effect on the date of
+ publication of this document. Please review these documents
+ carefully, as they describe your rights and restrictions with respect
+ to this document. Code Components extracted from this document must
+ include Simplified BSD License text as described in Section 4.e of
+ the Trust Legal Provisions and are provided without warranty as
+ described in the Simplified BSD License.
+
+Table of Contents
+
+ 1. Structure of this document . . . . . . . . . . . . . . . . . 3
+ 2. Segment Routing Documents . . . . . . . . . . . . . . . . . . 3
+ 3. Introduction . . . . . . . . . . . . . . . . . . . . . . . . 3
+ 3.1. Data Planes supporting Segment Routing . . . . . . . . . 4
+ 3.2. Illustration . . . . . . . . . . . . . . . . . . . . . . 4
+ 4. Abstract Routing Model . . . . . . . . . . . . . . . . . . . 7
+ 4.1. Segment Routing Global Block (SRGB) . . . . . . . . . . . 8
+ 4.2. Traffic Engineering with SR . . . . . . . . . . . . . . . 9
+ 4.3. Segment Routing Database . . . . . . . . . . . . . . . . 10
+ 5. IPv6 Instantiation of Segment Routing . . . . . . . . . . . . 10
+ 5.1. Segment Identifiers (SIDs) and SRGB . . . . . . . . . . . 10
+ 5.1.1. Node-SID . . . . . . . . . . . . . . . . . . . . . . 11
+ 5.1.2. Adjacency-SID . . . . . . . . . . . . . . . . . . . . 11
+ 5.2. Segment Routing Extension Header (SRH) . . . . . . . . . 11
+ 5.2.1. SRH and RFC2460 behavior . . . . . . . . . . . . . . 15
+ 6. SRH Procedures . . . . . . . . . . . . . . . . . . . . . . . 15
+ 6.1. Segment Routing Operations . . . . . . . . . . . . . . . 15
+ 6.2. Segment Routing Node Functions . . . . . . . . . . . . . 16
+ 6.2.1. Ingress SR Node . . . . . . . . . . . . . . . . . . . 16
+ 6.2.2. Transit Non-SR Capable Node . . . . . . . . . . . . . 18
+ 6.2.3. SR Intra Segment Transit Node . . . . . . . . . . . . 18
+ 6.2.4. SR Segment Endpoint Node . . . . . . . . . . . . . . 18
+ 6.3. FRR Flag Settings . . . . . . . . . . . . . . . . . . . . 18
+ 7. SR and Tunneling . . . . . . . . . . . . . . . . . . . . . . 18
+ 8. Example Use Case . . . . . . . . . . . . . . . . . . . . . . 19
+ 9. IANA Considerations . . . . . . . . . . . . . . . . . . . . . 21
+ 10. Manageability Considerations . . . . . . . . . . . . . . . . 21
+ 11. Security Considerations . . . . . . . . . . . . . . . . . . . 21
+ 12. Contributors . . . . . . . . . . . . . . . . . . . . . . . . 21
+
+
+
+Previdi, et al. Expires June 12, 2015 [Page 2]
+
+Internet-Draft IPv6 Segment Routing Header (SRH) December 2014
+
+
+ 13. Acknowledgements . . . . . . . . . . . . . . . . . . . . . . 21
+ 14. References . . . . . . . . . . . . . . . . . . . . . . . . . 21
+ 14.1. Normative References . . . . . . . . . . . . . . . . . . 21
+ 14.2. Informative References . . . . . . . . . . . . . . . . . 21
+ Authors' Addresses . . . . . . . . . . . . . . . . . . . . . . . 22
+
+1. Structure of this document
+
+ Section 3 gives an introduction on SR for IPv6 networks.
+
+ Section 4 describes the Segment Routing abstract model.
+
+ Section 5 defines the Segment Routing Header (SRH) allowing
+ instantiation of SR over IPv6 dataplane.
+
+ Section 6 details the procedures of the Segment Routing Header.
+
+2. Segment Routing Documents
+
+ Segment Routing terminology is defined in
+ [I-D.filsfils-spring-segment-routing].
+
+ Segment Routing use cases are described in
+ [I-D.filsfils-spring-segment-routing-use-cases].
+
+ Segment Routing IPv6 use cases are described in
+ [I-D.ietf-spring-ipv6-use-cases].
+
+ Segment Routing protocol extensions are defined in
+ [I-D.ietf-isis-segment-routing-extensions], and
+ [I-D.psenak-ospf-segment-routing-ospfv3-extension].
+
+ The security mechanisms of the Segment Routing Header (SRH) are
+ described in [I-D.vyncke-6man-segment-routing-security].
+
+3. Introduction
+
+ Segment Routing (SR), defined in
+ [I-D.filsfils-spring-segment-routing], allows a node to steer a
+ packet through a controlled set of instructions, called segments, by
+ prepending a SR header to the packet. A segment can represent any
+ instruction, topological or service-based. SR allows to enforce a
+ flow through any path (topological or service/application based)
+ while maintaining per-flow state only at the ingress node to the SR
+ domain. Segments can be derived from different components: IGP, BGP,
+ Services, Contexts, Locators, etc. The list of segment forming the
+ path is called the Segment List and is encoded in the packet header.
+
+
+
+
+Previdi, et al. Expires June 12, 2015 [Page 3]
+
+Internet-Draft IPv6 Segment Routing Header (SRH) December 2014
+
+
+ SR allows the use of strict and loose source based routing paradigms
+ without requiring any additional signaling protocols in the
+ infrastructure hence delivering an excellent scalability property.
+
+ The source based routing model described in
+ [I-D.filsfils-spring-segment-routing] is inherited from the ones
+ proposed by [RFC1940] and [RFC2460]. The source based routing model
+ offers the support for explicit routing capability.
+
+3.1. Data Planes supporting Segment Routing
+
+ Segment Routing (SR), can be instantiated over MPLS
+ ([I-D.filsfils-spring-segment-routing-mpls]) and IPv6. This document
+ defines its instantiation over the IPv6 data-plane based on the use-
+ cases defined in [I-D.ietf-spring-ipv6-use-cases].
+
+ Segment Routing for IPv6 (SR-IPv6) is required in networks where MPLS
+ data-plane is not used or, when combined with SR-MPLS, in networks
+ where MPLS is used in the core and IPv6 is used at the edge (home
+ networks, datacenters).
+
+ This document defines a new type of Routing Header (originally
+ defined in [RFC2460]) called the Segment Routing Header (SRH) in
+ order to convey the Segment List in the packet header as defined in
+ [I-D.filsfils-spring-segment-routing]. Mechanisms through which
+ segment are known and advertised are outside the scope of this
+ document.
+
+3.2. Illustration
+
+ In the context of Figure 1 where all the links have the same IGP
+ cost, let us assume that a packet P enters the SR domain at an
+ ingress edge router I and that the operator requests the following
+ requirements for packet P:
+
+ The local service S offered by node B must be applied to packet P.
+
+ The links AB and CE cannot be used to transport the packet P.
+
+ Any node N along the journey of the packet should be able to
+ determine where the packet P entered the SR domain and where it
+ will exit. The intermediate node should be able to determine the
+ paths from the ingress edge router to itself, and from itself to
+ the egress edge router.
+
+ Per-flow State for packet P should only be created at the ingress
+ edge router.
+
+
+
+
+Previdi, et al. Expires June 12, 2015 [Page 4]
+
+Internet-Draft IPv6 Segment Routing Header (SRH) December 2014
+
+
+ The operator can forbid, for security reasons, anyone outside the
+ operator domain to exploit its intra-domain SR capabilities.
+
+ I---A---B---C---E
+ \ | / \ /
+ \ | / F
+ \|/
+ D
+
+ Figure 1: An illustration of SR properties
+
+ All these properties may be realized by instructing the ingress SR
+ edge router I to push the following abstract SR header on the packet
+ P.
+
+ +---------------------------------------------------------------+
+ | | |
+ | Abstract SR Header | |
+ | | |
+ | {SD, SB, SS, SF, SE}, Ptr, SI, SE | Transported |
+ | ^ | | Packet |
+ | | | | P |
+ | +---------------------+ | |
+ | | |
+ +---------------------------------------------------------------+
+
+ Figure 2: Packet P at node I
+
+ The abstract SR header contains a source route encoded as a list of
+ segments {SD, SB, SS, SF, SE}, a pointer (Ptr) and the identification
+ of the ingress and egress SR edge routers (segments SI and SE).
+
+ A segment identifies a topological instruction or a service
+ instruction. A segment can either be global or local. The
+ instruction associated with a global segment is recognized and
+ executed by any SR-capable node in the domain. The instruction
+ associated with a local segment is only supported by the specific
+ node that originates it.
+
+ Let us assume some IGP (i.e.: ISIS and OSPF) extensions to define a
+ "Node Segment" as a global instruction within the IGP domain to
+ forward a packet along the shortest path to the specified node. Let
+ us further assume that within the SR domain illustrated in Figure 1,
+ segments SI, SD, SB, SE and SF respectively identify IGP node
+ segments to I, D, B, E and F.
+
+ Let us assume that node B identifies its local service S with local
+ segment SS.
+
+
+
+Previdi, et al. Expires June 12, 2015 [Page 5]
+
+Internet-Draft IPv6 Segment Routing Header (SRH) December 2014
+
+
+ With all of this in mind, let us describe the journey of the packet
+ P.
+
+ The packet P reaches the ingress SR edge router. I pushes the SR
+ header illustrated in Figure 2 and sets the pointer to the first
+ segment of the list (SD).
+
+ SD is an instruction recognized by all the nodes in the SR domain
+ which causes the packet to be forwarded along the shortest path to D.
+
+ Once at D, the pointer is incremented and the next segment is
+ executed (SB).
+
+ SB is an instruction recognized by all the nodes in the SR domain
+ which causes the packet to be forwarded along the shortest path to B.
+
+ Once at B, the pointer is incremented and the next segment is
+ executed (SS).
+
+ SS is an instruction only recognized by node B which causes the
+ packet to receive service S.
+
+ Once the service applied, the next segment is executed (SF) which
+ causes the packet to be forwarded along the shortest path to F.
+
+ Once at F, the pointer is incremented and the next segment is
+ executed (SE).
+
+ SE is an instruction recognized by all the nodes in the SR domain
+ which causes the packet to be forwarded along the shortest path to E.
+
+ E then removes the SR header and the packet continues its journey
+ outside the SR domain.
+
+ All of the requirements are met.
+
+ First, the packet P has not used links AB and CE: the shortest-path
+ from I to D is I-A-D, the shortest-path from D to B is D-B, the
+ shortest-path from B to F is B-C-F and the shortest-path from F to E
+ is F-E, hence the packet path through the SR domain is I-A-D-B-C-F-E
+ and the links AB and CE have been avoided.
+
+ Second, the service S supported by B has been applied on packet P.
+
+ Third, any node along the packet path is able to identify the service
+ and topological journey of the packet within the SR domain. For
+ example, node C receives the packet illustrated in Figure 3 and hence
+ is able to infer where the packet entered the SR domain (SI), how it
+
+
+
+Previdi, et al. Expires June 12, 2015 [Page 6]
+
+Internet-Draft IPv6 Segment Routing Header (SRH) December 2014
+
+
+ got up to itself {SD, SB, SS, SE}, where it will exit the SR domain
+ (SE) and how it will do so {SF, SE}.
+
+ +---------------------------------------------------------------+
+ | | |
+ | SR Header | |
+ | | |
+ | {SD, SB, SS, SF, SE}, Ptr, SI, SE | Transported |
+ | ^ | | Packet |
+ | | | | P |
+ | +--------+ | |
+ | | |
+ +---------------------------------------------------------------+
+
+ Figure 3: Packet P at node C
+
+ Fourth, only node I maintains per-flow state for packet P. The
+ entire program of topological and service instructions to be executed
+ by the SR domain on packet P is encoded by the ingress edge router I
+ in the SR header in the form of a list of segments where each segment
+ identifies a specific instruction. No further per-flow state is
+ required along the packet path. The per-flow state is in the SR
+ header and travels with the packet. Intermediate nodes only hold
+ states related to the IGP global node segments and the local IGP
+ adjacency segments. These segments are not per-flow specific and
+ hence scale very well. Typically, an intermediate node would
+ maintain in the order of 100's to 1000's global node segments and in
+ the order of 10's to 100 of local adjacency segments. Typically the
+ SR IGP forwarding table is expected to be much less than 10000
+ entries.
+
+ Fifth, the SR header is inserted at the entrance to the domain and
+ removed at the exit of the operator domain. For security reasons,
+ the operator can forbid anyone outside its domain to use its intra-
+ domain SR capability.
+
+4. Abstract Routing Model
+
+ At the entrance of the SR domain, the ingress SR edge router pushes
+ the SR header on top of the packet. At the exit of the SR domain,
+ the egress SR edge router removes the SR header.
+
+ The abstract SR header contains an ordered list of segments, a
+ pointer identifying the next segment to process and the
+ identifications of the ingress and egress SR edge routers on the path
+ of this packet. The pointer identifies the segment that MUST be used
+ by the receiving router to process the packet. This segment is
+ called the active segment.
+
+
+
+Previdi, et al. Expires June 12, 2015 [Page 7]
+
+Internet-Draft IPv6 Segment Routing Header (SRH) December 2014
+
+
+ A property of SR is that the entire source route of the packet,
+ including the identity of the ingress and egress edge routers is
+ always available with the packet. This allows for interesting
+ accounting and service applications.
+
+ We define three SR-header operations:
+
+ "PUSH": an SR header is pushed on an IP packet, or additional
+ segments are added at the head of the segment list. The pointer
+ is moved to the first entry of the added segments.
+
+ "NEXT": the active segment is completed, the pointer is moved to
+ the next segment in the list.
+
+ "CONTINUE": the active segment is not completed, the pointer is
+ left unchanged.
+
+ In the future, other SR-header management operations may be defined.
+
+ As the packet travels through the SR domain, the pointer is
+ incremented through the ordered list of segments and the source route
+ encoded by the SR ingress edge node is executed.
+
+ A node processes an incoming packet according to the instruction
+ associated with the active segment.
+
+ Any instruction might be associated with a segment: for example, an
+ intra-domain topological strict or loose forwarding instruction, a
+ service instruction, etc.
+
+ At minimum, a segment instruction must define two elements: the
+ identity of the next-hop to forward the packet to (this could be the
+ same node or a context within the node) and which SR-header
+ management operation to execute.
+
+ Each segment is known in the network through a Segment Identifier
+ (SID). The terms "segment" and "SID" are interchangeable.
+
+4.1. Segment Routing Global Block (SRGB)
+
+ In the SR abstract model, a segment is identified by a Segment
+ Routing Identifier (SID). The SR abstract model doesn't mandate a
+ specific format for the SID (IPv6 address or other formats).
+
+ In Segment Routing IPv6 the SID is an IPv6 address. Therefore, the
+ SRGB is materialized by the global IPv6 address space which
+ represents the set of IPv6 routable addresses in the SR domain. The
+ following rules apply:
+
+
+
+Previdi, et al. Expires June 12, 2015 [Page 8]
+
+Internet-Draft IPv6 Segment Routing Header (SRH) December 2014
+
+
+ o Each node of the SR domain MUST be configured with the Segment
+ Routing Global Block (SRGB).
+
+ o All global segments must be allocated from the SRGB. Any SR
+ capable node MUST be able to process any global segment advertised
+ by any other node within the SR domain.
+
+ o Any segment outside the SRGB has a local significance and is
+ called a "local segment". An SR-capable node MUST be able to
+ process the local segments it originates. An SR-capable node MUST
+ NOT support the instruction associated with a local segment
+ originated by a remote node.
+
+4.2. Traffic Engineering with SR
+
+ An SR Traffic Engineering policy is composed of two elements: a flow
+ classification and a segment-list to prepend on the packets of the
+ flow.
+
+ In SR, this per-flow state only exists at the ingress edge node where
+ the policy is defined and the SR header is pushed.
+
+ It is outside the scope of the document to define the process that
+ leads to the instantiation at a node N of an SR Traffic Engineering
+ policy.
+
+ [I-D.filsfils-spring-segment-routing-use-cases] illustrates various
+ alternatives:
+
+ N is deriving this policy automatically (e.g. FRR).
+
+ N is provisioned explicitly by the operator.
+
+ N is provisioned by a controller or server (e.g.: SDN Controller).
+
+ N is provisioned by the operator with a high-level policy which is
+ mapped into a path thanks to a local CSPF-based computation (e.g.
+ affinity/SRLG exclusion).
+
+ N could also be provisioned by other means.
+
+ [I-D.filsfils-spring-segment-routing-use-cases] explains why the
+ majority of use-cases require very short segment-lists, hence
+ minimizing the performance impact, if any, of inserting and
+ transporting the segment list.
+
+
+
+
+
+
+Previdi, et al. Expires June 12, 2015 [Page 9]
+
+Internet-Draft IPv6 Segment Routing Header (SRH) December 2014
+
+
+ A SDN controller, which desires to instantiate at node N an SR
+ Traffic Engineering policy, collects the SR capability of node N such
+ as to ensure that the policy meets its capability.
+
+4.3. Segment Routing Database
+
+ The Segment routing Database (SRDB) is a set of entries where each
+ entry is identified by a SID. The instruction associated with each
+ entry at least defines the identity of the next-hop to which the
+ packet should be forwarded and what operation should be performed on
+ the SR header (PUSH, CONTINUE, NEXT).
+
+ +---------+-----------+---------------------------------+
+ | Segment | Next-Hop | SR Header operation |
+ +---------+-----------+---------------------------------+
+ | Sk | M | CONTINUE |
+ | Sj | N | NEXT |
+ | Sl | NAT Srvc | NEXT |
+ | Sm | FW srvc | NEXT |
+ | Sn | Q | NEXT |
+ | etc. | etc. | etc. |
+ +---------+-----------+---------------------------------+
+
+ Figure 4: SR Database
+
+ Each SR-capable node maintains its local SRDB. SRDB entries can
+ either derive from local policy or from protocol segment
+ advertisement.
+
+5. IPv6 Instantiation of Segment Routing
+
+5.1. Segment Identifiers (SIDs) and SRGB
+
+ Segment Routing, as described in
+ [I-D.filsfils-spring-segment-routing], defines Node-SID and
+ Adjacency-SID. When SR is used over IPv6 data-plane the following
+ applies.
+
+ The SRGB is the global IPv6 address space which represents the set of
+ IPv6 routable addresses in the SR domain.
+
+ Node SIDs are IPv6 addresses part of the SRGB (i.e.: routable
+ addresses). Adjacency-SIDs are IPv6 addresses which may not be part
+ of the global IPv6 address space.
+
+
+
+
+
+
+
+Previdi, et al. Expires June 12, 2015 [Page 10]
+
+Internet-Draft IPv6 Segment Routing Header (SRH) December 2014
+
+
+5.1.1. Node-SID
+
+ The Node-SID identifies a node. With SR-IPv6 the Node-SID is an IPv6
+ prefix that the operator configured on the node and that is used as
+ the node identifier. Typically, in case of a router, this is the
+ IPv6 address of the node loopback interface. Therefore, SR-IPv6 does
+ not require any additional SID advertisement for the Node Segment.
+ The Node-SID is in fact the IPv6 address of the node.
+
+5.1.2. Adjacency-SID
+
+ In the SR architecture defined in
+ [I-D.filsfils-spring-segment-routing] the Adjacency-SID (or Adj-SID)
+ identifies a given interface and may be local or global (depending on
+ how it is advertised). A node may advertise one (or more) Adj-SIDs
+ allocated to a given interface so to force the forwarding of the
+ packet (when received with that particular Adj-SID) into the
+ interface regardless the routing entry for the packet destination.
+ The semantic of the Adj-SID is:
+
+ Send out the packet to the interface this prefix is allocated to.
+
+ When SR is applied to IPv6, any SID is in a global IPv6 address and
+ therefore, an Adj-SID has a global significance (i.e.: the IPv6
+ address representing the SID is a global address). In other words, a
+ node that advertises the Adj-SID in the form of a global IPv6 address
+ representing the link/adjacency the packet has to be forwarded to,
+ will apply to the Adj-SID a global significance.
+
+ Advertisement of Adj-SID may be done using multiple mechanisms among
+ which the ones described in ISIS and OSPF protocol extensions:
+ [I-D.ietf-isis-segment-routing-extensions] and
+ [I-D.psenak-ospf-segment-routing-ospfv3-extension]. The distinction
+ between local and global significance of the Adj-SID is given in the
+ encoding of the Adj-SID advertisement.
+
+5.2. Segment Routing Extension Header (SRH)
+
+ A new type of the Routing Header (originally defined in [RFC2460]) is
+ defined: the Segment Routing Header (SRH) which has a new Routing
+ Type, (suggested value 4) to be assigned by IANA.
+
+ As an example, if an explicit path is to be constructed across a core
+ network running ISIS or OSPF, the segment list will contain SIDs
+ representing the nodes across the path (loose or strict) which,
+ usually, are the IPv6 loopback interface address of each node. If
+ the path is across service or application entities, the segment list
+
+
+
+
+Previdi, et al. Expires June 12, 2015 [Page 11]
+
+Internet-Draft IPv6 Segment Routing Header (SRH) December 2014
+
+
+ contains the IPv6 addresses of these services or application
+ instances.
+
+ The Segment Routing Header (SRH) is defined as follows:
+
+
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Next Header | Hdr Ext Len | Routing Type | Segments Left |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | First Segment | Flags | HMAC Key ID |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | |
+ | Segment List[0] (128 bits ipv6 address) |
+ | |
+ | |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | |
+ | |
+ ...
+ | |
+ | |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | |
+ | Segment List[n] (128 bits ipv6 address) |
+ | |
+ | |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | |
+ | Policy List[0] (optional) |
+ | |
+ | |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | |
+ | Policy List[1] (optional) |
+ | |
+ | |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | |
+ | Policy List[2] (optional) |
+ | |
+ | |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | |
+ | |
+ | |
+ | HMAC (256 bits) |
+
+
+
+Previdi, et al. Expires June 12, 2015 [Page 12]
+
+Internet-Draft IPv6 Segment Routing Header (SRH) December 2014
+
+
+ | (optional) |
+ | |
+ | |
+ | |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+ where:
+
+ o Next Header: 8-bit selector. Identifies the type of header
+ immediately following the SRH.
+
+ o Hdr Ext Len: 8-bit unsigned integer, is the length of the SRH
+ header in 8-octet units, not including the first 8 octets.
+
+ o Routing Type: TBD, to be assigned by IANA (suggested value: 4).
+
+ o Segments Left. Defined in [RFC2460], it contains the index, in
+ the Segment List, of the next segment to inspect. Segments Left
+ is decremented at each segment and it is used as an index in the
+ segment list.
+
+ o First Segment: offset in the SRH, not including the first 8 octets
+ and expressed in 16-octet units, pointing to the last element of
+ the segment list, which is in fact the first segment of the
+ segment routing path.
+
+ o Flags: 16 bits of flags. Following flags are defined:
+
+ 1
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ |C|P|R|R| Policy Flags |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+ C-flag: Clean-up flag. Set when the SRH has to be removed from
+ the packet when packet reaches the last segment.
+
+ P-flag: Protected flag. Set when the packet has been rerouted
+ through FRR mechanism by a SR endpoint node. See Section 6.3
+ for more details.
+
+ R-flags. Reserved and for future use.
+
+ Policy Flags. Define the type of the IPv6 addresses encoded
+ into the Policy List (see below). The following have been
+ defined:
+
+
+
+
+
+Previdi, et al. Expires June 12, 2015 [Page 13]
+
+Internet-Draft IPv6 Segment Routing Header (SRH) December 2014
+
+
+ Bits 4-6: determine the type of the first element after the
+ segment list.
+
+ Bits 7-9: determine the type of the second element.
+
+ Bits 10-12: determine the type of the third element.
+
+ Bits 13-15: determine the type of the fourth element.
+
+ The following values are used for the type:
+
+ 0x0: Not present. If value is set to 0x0, it means the
+ element represented by these bits is not present.
+
+ 0x1: SR Ingress.
+
+ 0x2: SR Egress.
+
+ 0x3: Original Source Address.
+
+ o HMAC Key ID and HMAC field, and their use are defined in
+ [I-D.vyncke-6man-segment-routing-security].
+
+ o Segment List[n]: 128 bit IPv6 addresses representing the nth
+ segment in the Segment List. The Segment List is encoded starting
+ from the last segment of the path. I.e., the first element of the
+ segment list (Segment List [0]) contains the last segment of the
+ path while the last segment of the Segment List (Segment List[n])
+ contains the first segment of the path. The index contained in
+ "Segments Left" identifies the current active segment.
+
+ o Policy List. Optional addresses representing specific nodes in
+ the SR path such as:
+
+ SR Ingress: a 128 bit generic identifier representing the
+ ingress in the SR domain (i.e.: it needs not to be a valid IPv6
+ address).
+
+ SR Egress: a 128 bit generic identifier representing the egress
+ in the SR domain (i.e.: it needs not to be a valid IPv6
+ address).
+
+ Original Source Address: IPv6 address originally present in the
+ SA field of the packet.
+
+ The segments in the Policy List are encoded after the segment list
+ and they are optional. If none are in the SRH, all bits of the
+ Policy List Flags MUST be set to 0x0.
+
+
+
+Previdi, et al. Expires June 12, 2015 [Page 14]
+
+Internet-Draft IPv6 Segment Routing Header (SRH) December 2014
+
+
+5.2.1. SRH and RFC2460 behavior
+
+ The SRH being a new type of the Routing Header, it also has the same
+ properties:
+
+ SHOULD only appear once in the packet.
+
+ Only the router whose address is in the DA field of the packet
+ header MUST inspect the SRH.
+
+ Therefore, Segment Routing in IPv6 networks implies that the segment
+ identifier (i.e.: the IPv6 address of the segment) is moved into the
+ DA of the packet.
+
+ The DA of the packet changes at each segment termination/completion
+ and therefore the original DA of the packet MUST be encoded as the
+ last segment of the path.
+
+ As illustrated in Section 3.2, nodes that are within the path of a
+ segment will forward packets based on the DA of the packet without
+ inspecting the SRH. This ensures full interoperability between SR-
+ capable and non-SR-capable nodes.
+
+6. SRH Procedures
+
+ In this section we describe the different procedures on the SRH.
+
+6.1. Segment Routing Operations
+
+ When Segment Routing is instantiated over the IPv6 data plane the
+ following applies:
+
+ o The segment list is encoded in the SRH.
+
+ o The active segment is in the destination address of the packet.
+
+ o The Segment Routing CONTINUE operation (as described in
+ [I-D.filsfils-spring-segment-routing]) is implemented as a
+ regular/plain IPv6 operation consisting of DA based forwarding.
+
+ o The NEXT operation is implemented through the update of the DA
+ with the value represented by the Next Segment field in the SRH.
+
+ o The PUSH operation is implemented through the insertion of the SRH
+ or the insertion of additional segments in the SRH segment list.
+
+
+
+
+
+
+Previdi, et al. Expires June 12, 2015 [Page 15]
+
+Internet-Draft IPv6 Segment Routing Header (SRH) December 2014
+
+
+6.2. Segment Routing Node Functions
+
+ SR packets are forwarded to segments endpoints (i.e.: nodes whose
+ address is in the DA field of the packet). The segment endpoint,
+ when receiving a SR packet destined to itself, does:
+
+ o Inspect the SRH.
+
+ o Determine the next active segment.
+
+ o Update the Segments Left field (or, if requested, remove the SRH
+ from the packet).
+
+ o Update the DA.
+
+ o Send the packet to the next segment.
+
+ The procedures applied to the SRH are related to the node function.
+ Following nodes functions are defined:
+
+ Ingress SR Node.
+
+ Transit Non-SR Node.
+
+ Transit SR Intra Segment Node.
+
+ SR Endpoint Node.
+
+6.2.1. Ingress SR Node
+
+ Ingress Node can be a router at the edge of the SR domain or a SR-
+ capable host. The ingress SR node may obtain the segment list by
+ either:
+
+ Local path computation.
+
+ Local configuration.
+
+ Interaction with an SDN controller delivering the path as a
+ complete SRH.
+
+ Any other mechanism (mechanisms through which the path is acquired
+ are outside the scope of this document).
+
+ When creating the SRH (either at ingress node or in the SDN
+ controller) the following is done:
+
+ Next Header and Hdr Ext Len fields are set according to [RFC2460].
+
+
+
+Previdi, et al. Expires June 12, 2015 [Page 16]
+
+Internet-Draft IPv6 Segment Routing Header (SRH) December 2014
+
+
+ Routing Type field is set as TBD (SRH).
+
+ The Segment List is built with the FIRST segment of the path
+ encoded in the LAST element of the Segment List. Subsequent
+ segments are encoded on top of the first segment. Finally, the
+ LAST segment of the path is encoded in the FIRST element of the
+ Segment List. In other words, the Segment List is encoded in the
+ reverse order of the path.
+
+ The original DA of the packet is encoded as the last segment of
+ the path (encoded in the first element of the Segment List).
+
+ the DA of the packet is set with the value of the first segment
+ (found in the last element of the segment list).
+
+ the Segments Left field is set to n-1 where n is the number of
+ elements in the Segment List.
+
+ The packet is sent out towards the first segment (i.e.:
+ represented in the packet DA).
+
+6.2.1.1. Security at Ingress
+
+ The procedures related to the Segment Routing security are detailed
+ in [I-D.vyncke-6man-segment-routing-security].
+
+ In the case where the SR domain boundaries are not under control of
+ the network operator (e.g.: when the SR domain edge is in a home
+ network), it is important to authenticate and validate the content of
+ any SRH being received by the network operator. In such case, the
+ security procedure described in
+ [I-D.vyncke-6man-segment-routing-security] is to be used.
+
+ The ingress node (e.g.: the host in the home network) requests the
+ SRH from a control system (e.g.: an SDN controller) which delivers
+ the SRH with its HMAC signature on it.
+
+ Then, the home network host can send out SR packets (with an SRH on
+ it) that will be validated at the ingress of the network operator
+ infrastructure.
+
+ The ingress node of the network operator infrastructure, is
+ configured in order to validate the incoming SRH HMACs in order to
+ allow only packets having correct SRH according to their SA/DA
+ addresses.
+
+
+
+
+
+
+Previdi, et al. Expires June 12, 2015 [Page 17]
+
+Internet-Draft IPv6 Segment Routing Header (SRH) December 2014
+
+
+6.2.2. Transit Non-SR Capable Node
+
+ SR is interoperable with plain IPv6 forwarding. Any non SR-capable
+ node will forward SR packets solely based on the DA. There's no SRH
+ inspection. This ensures full interoperability between SR and non-SR
+ nodes.
+
+6.2.3. SR Intra Segment Transit Node
+
+ Only the node whose address is in DA inspects and processes the SRH
+ (according to [RFC2460]). An intra segment transit node is not in
+ the DA and its forwarding is based on DA and its SR-IPv6 FIB.
+
+6.2.4. SR Segment Endpoint Node
+
+ The SR segment endpoint node is the node whose address is in the DA.
+ The segment endpoint node inspects the SRH and does:
+
+ 1. IF DA = myself (segment endpoint)
+ 2. IF Segments Left > 0 THEN
+ decrement Segments Left
+ update DA with Segment List[Segments Left]
+ 3. ELSE IF Segments List[Segments Left] <> DA THEN
+ update DA with Segments List[Segments Left]
+ IF Clean-up bit is set THEN remove the SRH
+ 4. ELSE give the packet to next PID (application)
+ End of processing.
+ 5. Forward the packet out
+
+6.3. FRR Flag Settings
+
+ A node supporting SR and doing Fast Reroute (as described in
+ [I-D.filsfils-spring-segment-routing-use-cases], when rerouting
+ packets through FRR mechanisms, SHOULD inspect the rerouted packet
+ header and look for the SRH. If the SRH is present, the rerouting
+ node SHOULD set the Protected bit on all rerouted packets.
+
+7. SR and Tunneling
+
+ Encapsulation can be realized in two different ways with SR-IPv6:
+
+ Outer encapsulation.
+
+ SRH with SA/DA original addresses.
+
+ Outer encapsulation tunneling is the traditional method where an
+ additional IPv6 header is prepended to the packet. The original IPv6
+ header being encapsulated, everything is preserved and the packet is
+
+
+
+Previdi, et al. Expires June 12, 2015 [Page 18]
+
+Internet-Draft IPv6 Segment Routing Header (SRH) December 2014
+
+
+ switched/routed according to the outer header (that could contain a
+ SRH).
+
+ SRH allows encoding both original SA and DA, hence an operator may
+ decide to change the SA/DA at ingress and restore them at egress.
+ This can be achieved without outer encapsulation, by changing SA/DA
+ and encoding the original SA in the Policy List and in the original
+ DA in the Segment List.
+
+8. Example Use Case
+
+ A more detailed description of use cases are available in
+ [I-D.ietf-spring-ipv6-use-cases]. In this section, a simple SR-IPv6
+ example is illustrated.
+
+ In the topology described in Figure 6 it is assumed an end-to-end SR
+ deployment. Therefore SR is supported by all nodes from A to J.
+
+ Home Network | Backbone | Datacenter
+ | |
+ | +---+ +---+ +---+ | +---+ |
+ +---|---| C |---| D |---| E |---|---| I |---|
+ | | +---+ +---+ +---+ | +---+ |
+ | | | | | | | | +---+
+ +---+ +---+ | | | | | | |--| X |
+ | A |---| B | | +---+ +---+ +---+ | +---+ | +---+
+ +---+ +---+ | | F |---| G |---| H |---|---| J |---|
+ | +---+ +---+ +---+ | +---+ |
+ | |
+ | +-----------+
+ | SDN |
+ | Orch/Ctlr |
+ +-----------+
+
+ Figure 6: Sample SR topology
+
+ The following workflow applies to packets sent by host A and destined
+ to server X.
+
+
+
+
+
+
+
+
+
+
+
+
+
+Previdi, et al. Expires June 12, 2015 [Page 19]
+
+Internet-Draft IPv6 Segment Routing Header (SRH) December 2014
+
+
+ . Host A sends a request for a path to server X to the SDN
+ controller or orchestration system.
+
+ . The SDN controller/orchestrator builds a SRH with:
+ . Segment List: C, F, J, X
+ . HMAC
+ that satisfies the requirements expressed in the request
+ by host A and based on policies applicable to host A.
+
+ . Host A receives the SRH and insert it into the packet.
+ The packet has now:
+ . SA: A
+ . DA: C
+ . SRH with
+ . SL: X, J, F, C
+ . Segments Left: 3 (i.e.: Segment List size - 1)
+ . PL: C (ingress), J (egress)
+ Note that X is the last segment and C is the
+ first segment (i.e.: the SL is encoded in the reverse
+ path order).
+ . HMAC
+
+ . When packet arrives in C (first segment), C does:
+ . Validate the HMAC of the SRH.
+ . Decrement Segments Left by one: 2
+ . Update the DA with the next segment found in
+ Segment List[2]. DA is set to F.
+ . Forward the packet to F.
+
+ . When packet arrives in F (second segment), F does:
+ . Decrement Segments Left by one: 1
+ . Update the DA with the next segment found in
+ Segment List[1]. DA is set to J.
+ . Forward the packet to J.
+
+ . Packet travels across G and H nodes which do plain
+ IPv6 forwarding based on DA. No inspection of SRH needs
+ to be done in these nodes. However, any SR capable node
+ is allowed to set the Protected bit in case of FRR
+ protection.
+
+ . When packet arrives in J (third segment), J does:
+ . Decrement Segments Left by one: 0
+ . Update the DA with the next segment found in
+ Segment List[0]. DA is set to X.
+ . If the cleanup bit is set, then node J will strip out
+ the SRH from the packet.
+ . Forward the packet to X.
+
+
+
+Previdi, et al. Expires June 12, 2015 [Page 20]
+
+Internet-Draft IPv6 Segment Routing Header (SRH) December 2014
+
+
+ The packet arrives in the server that may or may not support SR. The
+ return traffic, from server to host, may be sent using the same
+ procedures.
+
+9. IANA Considerations
+
+ TBD
+
+10. Manageability Considerations
+
+ TBD
+
+11. Security Considerations
+
+ Security mechanisms applied to Segment Routing over IPv6 networks are
+ detailed in [I-D.vyncke-6man-segment-routing-security].
+
+12. Contributors
+
+ The authors would like to thank Dave Barach, John Leddy, John
+ Brzozowski, Pierre Francois, Nagendra Kumar, Mark Townsley, Christian
+ Martin, Roberta Maglione, Eric Vyncke, James Connolly, David Lebrun
+ and Fred Baker for their contribution to this document.
+
+13. Acknowledgements
+
+ TBD
+
+14. References
+
+14.1. Normative References
+
+ [RFC2119] Bradner, S., "Key words for use in RFCs to Indicate
+ Requirement Levels", BCP 14, RFC 2119, March 1997.
+
+ [RFC2460] Deering, S. and R. Hinden, "Internet Protocol, Version 6
+ (IPv6) Specification", RFC 2460, December 1998.
+
+14.2. Informative References
+
+ [I-D.filsfils-spring-segment-routing]
+ Filsfils, C., Previdi, S., Bashandy, A., Decraene, B.,
+ Litkowski, S., Horneffer, M., Milojevic, I., Shakir, R.,
+ Ytti, S., Henderickx, W., Tantsura, J., and E. Crabbe,
+ "Segment Routing Architecture", draft-filsfils-spring-
+ segment-routing-04 (work in progress), July 2014.
+
+
+
+
+
+Previdi, et al. Expires June 12, 2015 [Page 21]
+
+Internet-Draft IPv6 Segment Routing Header (SRH) December 2014
+
+
+ [I-D.filsfils-spring-segment-routing-mpls]
+ Filsfils, C., Previdi, S., Bashandy, A., Decraene, B.,
+ Litkowski, S., Horneffer, M., Milojevic, I., Shakir, R.,
+ Ytti, S., Henderickx, W., Tantsura, J., and E. Crabbe,
+ "Segment Routing with MPLS data plane", draft-filsfils-
+ spring-segment-routing-mpls-03 (work in progress), August
+ 2014.
+
+ [I-D.filsfils-spring-segment-routing-use-cases]
+ Filsfils, C., Francois, P., Previdi, S., Decraene, B.,
+ Litkowski, S., Horneffer, M., Milojevic, I., Shakir, R.,
+ Ytti, S., Henderickx, W., Tantsura, J., Kini, S., and E.
+ Crabbe, "Segment Routing Use Cases", draft-filsfils-
+ spring-segment-routing-use-cases-01 (work in progress),
+ October 2014.
+
+ [I-D.ietf-isis-segment-routing-extensions]
+ Previdi, S., Filsfils, C., Bashandy, A., Gredler, H.,
+ Litkowski, S., Decraene, B., and J. Tantsura, "IS-IS
+ Extensions for Segment Routing", draft-ietf-isis-segment-
+ routing-extensions-03 (work in progress), October 2014.
+
+ [I-D.ietf-spring-ipv6-use-cases]
+ Brzozowski, J., Leddy, J., Leung, I., Previdi, S.,
+ Townsley, W., Martin, C., Filsfils, C., and R. Maglione,
+ "IPv6 SPRING Use Cases", draft-ietf-spring-ipv6-use-
+ cases-03 (work in progress), November 2014.
+
+ [I-D.psenak-ospf-segment-routing-ospfv3-extension]
+ Psenak, P., Previdi, S., Filsfils, C., Gredler, H.,
+ Shakir, R., Henderickx, W., and J. Tantsura, "OSPFv3
+ Extensions for Segment Routing", draft-psenak-ospf-
+ segment-routing-ospfv3-extension-02 (work in progress),
+ July 2014.
+
+ [I-D.vyncke-6man-segment-routing-security]
+ Vyncke, E. and S. Previdi, "IPv6 Segment Routing Header
+ (SRH) Security Considerations", July 2014.
+
+ [RFC1940] Estrin, D., Li, T., Rekhter, Y., Varadhan, K., and D.
+ Zappala, "Source Demand Routing: Packet Format and
+ Forwarding Specification (Version 1)", RFC 1940, May 1996.
+
+Authors' Addresses
+
+
+
+
+
+
+
+Previdi, et al. Expires June 12, 2015 [Page 22]
+
+Internet-Draft IPv6 Segment Routing Header (SRH) December 2014
+
+
+ Stefano Previdi (editor)
+ Cisco Systems, Inc.
+ Via Del Serafico, 200
+ Rome 00142
+ Italy
+
+ Email: sprevidi@cisco.com
+
+
+ Clarence Filsfils
+ Cisco Systems, Inc.
+ Brussels
+ BE
+
+ Email: cfilsfil@cisco.com
+
+
+ Brian Field
+ Comcast
+ 4100 East Dry Creek Road
+ Centennial, CO 80122
+ US
+
+ Email: Brian_Field@cable.comcast.com
+
+
+ Ida Leung
+ Rogers Communications
+ 8200 Dixie Road
+ Brampton, ON L6T 0C1
+ CA
+
+ Email: Ida.Leung@rci.rogers.com
diff --git a/src/vnet/sr/sr.c b/src/vnet/sr/sr.c
new file mode 100644
index 00000000000..5d0275d992a
--- /dev/null
+++ b/src/vnet/sr/sr.c
@@ -0,0 +1,3333 @@
+/*
+ * sr.c: ipv6 segment routing
+ *
+ * Copyright (c) 2013 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @file
+ * @brief Segment Routing main functions
+ *
+ */
+#include <vnet/vnet.h>
+#include <vnet/sr/sr.h>
+#include <vnet/fib/ip6_fib.h>
+#include <vnet/dpo/dpo.h>
+
+#include <openssl/hmac.h>
+
+ip6_sr_main_t sr_main;
+static vlib_node_registration_t sr_local_node;
+
+/**
+ * @brief Dynamically added SR DPO type
+ */
+static dpo_type_t sr_dpo_type;
+
+/**
+ * @brief Use passed HMAC key in ip6_sr_header_t in OpenSSL HMAC routines
+ *
+ * @param sm ip6_sr_main_t *
+ * @param ip ip6_header_t *
+ * @param sr ip6_sr_header_t *
+ */
+void
+sr_fix_hmac (ip6_sr_main_t * sm, ip6_header_t * ip, ip6_sr_header_t * sr)
+{
+ u32 key_index;
+ static u8 *keybuf;
+ u8 *copy_target;
+ int first_segment;
+ ip6_address_t *addrp;
+ int i;
+ ip6_sr_hmac_key_t *hmac_key;
+ u32 sig_len;
+
+ key_index = sr->hmac_key;
+
+ /* No signature? Pass... */
+ if (key_index == 0)
+ return;
+
+ /* We don't know about this key? Fail... */
+ if (key_index >= vec_len (sm->hmac_keys))
+ return;
+
+ hmac_key = sm->hmac_keys + key_index;
+
+ vec_reset_length (keybuf);
+
+ /* pkt ip6 src address */
+ vec_add2 (keybuf, copy_target, sizeof (ip6_address_t));
+ clib_memcpy (copy_target, ip->src_address.as_u8, sizeof (ip6_address_t));
+
+ /* first segment */
+ vec_add2 (keybuf, copy_target, 1);
+ copy_target[0] = sr->first_segment;
+
+ /* octet w/ bit 0 = "clean" flag */
+ vec_add2 (keybuf, copy_target, 1);
+ copy_target[0]
+ = (sr->flags & clib_host_to_net_u16 (IP6_SR_HEADER_FLAG_CLEANUP))
+ ? 0x80 : 0;
+
+ /* hmac key id */
+ vec_add2 (keybuf, copy_target, 1);
+ copy_target[0] = sr->hmac_key;
+
+ first_segment = sr->first_segment;
+
+ addrp = sr->segments;
+
+ /* segments */
+ for (i = 0; i <= first_segment; i++)
+ {
+ vec_add2 (keybuf, copy_target, sizeof (ip6_address_t));
+ clib_memcpy (copy_target, addrp->as_u8, sizeof (ip6_address_t));
+ addrp++;
+ }
+
+ addrp++;
+
+ HMAC_CTX_init (sm->hmac_ctx);
+ if (!HMAC_Init (sm->hmac_ctx, hmac_key->shared_secret,
+ vec_len (hmac_key->shared_secret), sm->md))
+ clib_warning ("barf1");
+ if (!HMAC_Update (sm->hmac_ctx, keybuf, vec_len (keybuf)))
+ clib_warning ("barf2");
+ if (!HMAC_Final (sm->hmac_ctx, (unsigned char *) addrp, &sig_len))
+ clib_warning ("barf3");
+ HMAC_CTX_cleanup (sm->hmac_ctx);
+}
+
+/**
+ * @brief Format function for decoding various SR flags
+ *
+ * @param s u8 * - formatted string
+ * @param args va_list * - u16 flags
+ *
+ * @return formatted output string u8 *
+ */
+u8 *
+format_ip6_sr_header_flags (u8 * s, va_list * args)
+{
+ u16 flags = (u16) va_arg (*args, int);
+ u8 pl_flag;
+ int bswap_needed = va_arg (*args, int);
+ int i;
+
+ if (bswap_needed)
+ flags = clib_host_to_net_u16 (flags);
+
+ if (flags & IP6_SR_HEADER_FLAG_CLEANUP)
+ s = format (s, "cleanup ");
+
+ if (flags & IP6_SR_HEADER_FLAG_PROTECTED)
+ s = format (s, "reroute ");
+
+ s = format (s, "pl: ");
+ for (i = 1; i <= 4; i++)
+ {
+ pl_flag = ip6_sr_policy_list_flags (flags, i);
+ s = format (s, "[%d] ", i);
+
+ switch (pl_flag)
+ {
+ case IP6_SR_HEADER_FLAG_PL_ELT_NOT_PRESENT:
+ s = format (s, "NotPr ");
+ break;
+ case IP6_SR_HEADER_FLAG_PL_ELT_INGRESS_PE:
+ s = format (s, "InPE ");
+ break;
+ case IP6_SR_HEADER_FLAG_PL_ELT_EGRESS_PE:
+ s = format (s, "EgPE ");
+ break;
+
+ case IP6_SR_HEADER_FLAG_PL_ELT_ORIG_SRC_ADDR:
+ s = format (s, "OrgSrc ");
+ break;
+ }
+ }
+ return s;
+}
+
+/**
+ * @brief Format function for decoding ip6_sr_header_t
+ *
+ * @param s u8 * - formatted string
+ * @param args va_list * - ip6_sr_header_t
+ *
+ * @return formatted output string u8 *
+ */
+u8 *
+format_ip6_sr_header (u8 * s, va_list * args)
+{
+ ip6_sr_header_t *h = va_arg (*args, ip6_sr_header_t *);
+ ip6_address_t placeholder_addr =
+ { {254, 254, 254, 254, 254, 254, 254, 254, 254, 254, 254, 254, 254, 254,
+ 254, 254}
+ };
+ int print_hmac = va_arg (*args, int);
+ int i, pl_index, max_segs;
+ int flags_host_byte_order = clib_net_to_host_u16 (h->flags);
+
+ s = format (s, "next proto %d, len %d, type %d",
+ h->protocol, (h->length << 3) + 8, h->type);
+ s = format (s, "\n segs left %d, first_segment %d, hmac key %d",
+ h->segments_left, h->first_segment, h->hmac_key);
+ s = format (s, "\n flags %U", format_ip6_sr_header_flags,
+ flags_host_byte_order, 0 /* bswap needed */ );
+
+ /*
+ * Header length is in 8-byte units (minus one), so
+ * divide by 2 to ascertain the number of ip6 addresses in the
+ * segment list
+ */
+ max_segs = (h->length >> 1);
+
+ if (!print_hmac && h->hmac_key)
+ max_segs -= 2;
+
+ s = format (s, "\n Segments (in processing order):");
+
+ for (i = h->first_segment; i >= 1; i--)
+ s = format (s, "\n %U", format_ip6_address, h->segments + i);
+ if (ip6_address_is_equal (&placeholder_addr, h->segments))
+ s = format (s, "\n (empty placeholder)");
+ else
+ s = format (s, "\n %U", format_ip6_address, h->segments);
+
+ s = format (s, "\n Policy List:");
+
+ pl_index = 1; /* to match the RFC text */
+ for (i = (h->first_segment + 1); i < max_segs; i++, pl_index++)
+ {
+ char *tag;
+ char *tags[] = { " ", "InPE: ", "EgPE: ", "OrgSrc: " };
+
+ tag = tags[0];
+ if (pl_index >= 1 && pl_index <= 4)
+ {
+ int this_pl_flag = ip6_sr_policy_list_flags
+ (flags_host_byte_order, pl_index);
+ tag = tags[this_pl_flag];
+ }
+
+ s = format (s, "\n %s%U", tag, format_ip6_address, h->segments + i);
+ }
+
+ return s;
+}
+
+/**
+ * @brief Format function for decoding ip6_sr_header_t with length
+ *
+ * @param s u8 * - formatted string
+ * @param args va_list * - ip6_header_t + ip6_sr_header_t
+ *
+ * @return formatted output string u8 *
+ */
+u8 *
+format_ip6_sr_header_with_length (u8 * s, va_list * args)
+{
+ ip6_header_t *h = va_arg (*args, ip6_header_t *);
+ u32 max_header_bytes = va_arg (*args, u32);
+ uword header_bytes;
+
+ header_bytes = sizeof (h[0]) + sizeof (ip6_sr_header_t);
+ if (max_header_bytes != 0 && header_bytes > max_header_bytes)
+ return format (s, "ip6_sr header truncated");
+
+ s = format (s, "IP6: %U\n", format_ip6_header, h, max_header_bytes);
+ s =
+ format (s, "SR: %U\n", format_ip6_sr_header, (ip6_sr_header_t *) (h + 1),
+ 0 /* print_hmac */ , max_header_bytes);
+ return s;
+}
+
+/**
+ * @brief Defined valid next nodes
+ * @note Cannot call replicate yet without DPDK
+*/
+#if DPDK > 0
+#define foreach_sr_rewrite_next \
+_(ERROR, "error-drop") \
+_(IP6_LOOKUP, "ip6-lookup") \
+_(SR_LOCAL, "sr-local") \
+_(SR_REPLICATE,"sr-replicate")
+#else
+#define foreach_sr_rewrite_next \
+_(ERROR, "error-drop") \
+_(IP6_LOOKUP, "ip6-lookup") \
+_(SR_LOCAL, "sr-local")
+#endif /* DPDK */
+
+/**
+ * @brief Struct for defined valid next nodes
+*/
+typedef enum
+{
+#define _(s,n) SR_REWRITE_NEXT_##s,
+ foreach_sr_rewrite_next
+#undef _
+ SR_REWRITE_N_NEXT,
+} sr_rewrite_next_t;
+
+/**
+ * @brief Struct for data for SR rewrite packet trace
+ */
+typedef struct
+{
+ ip6_address_t src, dst;
+ u16 length;
+ u32 next_index;
+ u32 tunnel_index;
+ u8 sr[256];
+} sr_rewrite_trace_t;
+
+/**
+ * @brief Error strings for SR rewrite
+ */
+static char *sr_rewrite_error_strings[] = {
+#define sr_error(n,s) s,
+#include "sr_error.def"
+#undef sr_error
+};
+
+/**
+ * @brief Struct for SR rewrite error strings
+ */
+typedef enum
+{
+#define sr_error(n,s) SR_REWRITE_ERROR_##n,
+#include "sr_error.def"
+#undef sr_error
+ SR_REWRITE_N_ERROR,
+} sr_rewrite_error_t;
+
+
+/**
+ * @brief Format function for SR rewrite trace.
+ */
+u8 *
+format_sr_rewrite_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ sr_rewrite_trace_t *t = va_arg (*args, sr_rewrite_trace_t *);
+ ip6_sr_main_t *sm = &sr_main;
+ ip6_sr_tunnel_t *tun = pool_elt_at_index (sm->tunnels, t->tunnel_index);
+ ip6_fib_t *rx_fib, *tx_fib;
+
+ rx_fib = ip6_fib_get (tun->rx_fib_index);
+ tx_fib = ip6_fib_get (tun->tx_fib_index);
+
+ s = format
+ (s, "SR-REWRITE: next %s ip6 src %U dst %U len %u\n"
+ " rx-fib-id %d tx-fib-id %d\n%U",
+ (t->next_index == SR_REWRITE_NEXT_SR_LOCAL)
+ ? "sr-local" : "ip6-lookup",
+ format_ip6_address, &t->src,
+ format_ip6_address, &t->dst, t->length,
+ rx_fib->table_id, tx_fib->table_id,
+ format_ip6_sr_header, t->sr, 0 /* print_hmac */ );
+ return s;
+}
+
+/**
+ * @brief Main processing dual-loop for Segment Routing Rewrite
+ * @node sr-rewrite
+ *
+ * @param vm vlib_main_t *
+ * @param node vlib_node_runtime_t *
+ * @param from_frame vlib_frame_t *
+ *
+ * @return from_frame->n_vectors uword
+ */
+static uword
+sr_rewrite (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * from_frame)
+{
+ u32 n_left_from, next_index, *from, *to_next;
+ ip6_sr_main_t *sm = &sr_main;
+ u32 (*sr_local_cb) (vlib_main_t *, vlib_node_runtime_t *,
+ vlib_buffer_t *, ip6_header_t *, ip6_sr_header_t *);
+ sr_local_cb = sm->sr_local_cb;
+
+ from = vlib_frame_vector_args (from_frame);
+ n_left_from = from_frame->n_vectors;
+
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ /* Note 2x loop disabled */
+ while (0 && n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ u32 bi0, bi1;
+ vlib_buffer_t *b0, *b1;
+ ip6_header_t *ip0, *ip1;
+ ip6_sr_header_t *sr0, *sr1;
+ ip6_sr_tunnel_t *t0, *t1;
+ u32 next0 = SR_REWRITE_NEXT_IP6_LOOKUP;
+ u32 next1 = SR_REWRITE_NEXT_IP6_LOOKUP;
+ u16 new_l0 = 0;
+ u16 new_l1 = 0;
+
+ /* Prefetch next iteration. */
+ {
+ vlib_buffer_t *p2, *p3;
+
+ p2 = vlib_get_buffer (vm, from[2]);
+ p3 = vlib_get_buffer (vm, from[3]);
+
+ vlib_prefetch_buffer_header (p2, LOAD);
+ vlib_prefetch_buffer_header (p3, LOAD);
+ }
+
+ bi0 = from[0];
+ bi1 = from[1];
+ to_next[0] = bi0;
+ to_next[1] = bi1;
+ from += 2;
+ to_next += 2;
+ n_left_to_next -= 2;
+ n_left_from -= 2;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ b1 = vlib_get_buffer (vm, bi1);
+
+ /*
+ * $$$ parse through header(s) to pick the point
+ * where we punch in the SR extention header
+ */
+ t0 =
+ pool_elt_at_index (sm->tunnels,
+ vnet_buffer (b0)->ip.adj_index[VLIB_TX]);
+ t1 =
+ pool_elt_at_index (sm->tunnels,
+ vnet_buffer (b1)->ip.adj_index[VLIB_TX]);
+
+ ASSERT (VLIB_BUFFER_PRE_DATA_SIZE
+ >= ((word) vec_len (t0->rewrite)) + b0->current_data);
+ ASSERT (VLIB_BUFFER_PRE_DATA_SIZE
+ >= ((word) vec_len (t1->rewrite)) + b1->current_data);
+
+ vnet_buffer (b0)->sw_if_index[VLIB_TX] = t0->tx_fib_index;
+ vnet_buffer (b1)->sw_if_index[VLIB_TX] = t1->tx_fib_index;
+
+ ip0 = vlib_buffer_get_current (b0);
+ ip1 = vlib_buffer_get_current (b1);
+#if DPDK > 0 /* Cannot call replication node yet without DPDK */
+ /* add a replication node */
+ if (PREDICT_FALSE (t0->policy_index != ~0))
+ {
+ vnet_buffer (b0)->ip.save_protocol = t0->policy_index;
+ next0 = SR_REWRITE_NEXT_SR_REPLICATE;
+ sr0 = (ip6_sr_header_t *) (t0->rewrite);
+ goto processnext;
+ }
+#endif /* DPDK */
+
+ /*
+ * SR-unaware service chaining case: pkt coming back from
+ * service has the original dst address, and will already
+ * have an SR header. If so, send it to sr-local
+ */
+ if (PREDICT_FALSE (ip0->protocol == IPPROTO_IPV6_ROUTE))
+ {
+ vlib_buffer_advance (b0, sizeof (ip0));
+ sr0 = (ip6_sr_header_t *) (ip0 + 1);
+ new_l0 = clib_net_to_host_u16 (ip0->payload_length);
+ next0 = SR_REWRITE_NEXT_SR_LOCAL;
+ }
+ else
+ {
+ u32 len_bytes = sizeof (ip6_header_t);
+ u8 next_hdr = ip0->protocol;
+
+ /* HBH must immediately follow ipv6 header */
+ if (PREDICT_FALSE
+ (ip0->protocol == IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS))
+ {
+ ip6_hop_by_hop_ext_t *ext_hdr =
+ (ip6_hop_by_hop_ext_t *) ip6_next_header (ip0);
+ len_bytes +=
+ ip6_ext_header_len ((ip6_ext_header_t *) ext_hdr);
+ /* Ignoring the sr_local for now, if RH follows HBH here */
+ next_hdr = ext_hdr->next_hdr;
+ ext_hdr->next_hdr = IPPROTO_IPV6_ROUTE;
+ }
+ else
+ {
+ ip0->protocol = IPPROTO_IPV6_ROUTE; /* routing extension header */
+ }
+ /*
+ * Copy data before the punch-in point left by the
+ * required amount. Assume (for the moment) that only
+ * the main packet header needs to be copied.
+ */
+ clib_memcpy (((u8 *) ip0) - vec_len (t0->rewrite),
+ ip0, len_bytes);
+ vlib_buffer_advance (b0, -(word) vec_len (t0->rewrite));
+ ip0 = vlib_buffer_get_current (b0);
+ sr0 = (ip6_sr_header_t *) ((u8 *) ip0 + len_bytes);
+ /* $$$ tune */
+ clib_memcpy (sr0, t0->rewrite, vec_len (t0->rewrite));
+
+ /* Fix the next header chain */
+ sr0->protocol = next_hdr;
+
+ new_l0 = clib_net_to_host_u16 (ip0->payload_length) +
+ vec_len (t0->rewrite);
+ ip0->payload_length = clib_host_to_net_u16 (new_l0);
+
+ /* Copy dst address into the DA slot in the segment list */
+ clib_memcpy (sr0->segments, ip0->dst_address.as_u64,
+ sizeof (ip6_address_t));
+ /* Rewrite the ip6 dst address with the first hop */
+ clib_memcpy (ip0->dst_address.as_u64, t0->first_hop.as_u64,
+ sizeof (ip6_address_t));
+
+ sr_fix_hmac (sm, ip0, sr0);
+
+ next0 = sr_local_cb ? sr_local_cb (vm, node, b0, ip0, sr0) :
+ next0;
+
+ /*
+ * Ignore "do not rewrite" shtik in this path
+ */
+ if (PREDICT_FALSE (next0 & 0x80000000))
+ {
+ next0 ^= 0xFFFFFFFF;
+ if (PREDICT_FALSE (next0 == SR_REWRITE_NEXT_ERROR))
+ b0->error = node->errors[SR_REWRITE_ERROR_APP_CALLBACK];
+ }
+ }
+#if DPDK > 0 /* Cannot call replication node yet without DPDK */
+ processnext:
+ /* add a replication node */
+ if (PREDICT_FALSE (t1->policy_index != ~0))
+ {
+ vnet_buffer (b1)->ip.save_protocol = t1->policy_index;
+ next1 = SR_REWRITE_NEXT_SR_REPLICATE;
+ sr1 = (ip6_sr_header_t *) (t1->rewrite);
+ goto trace00;
+ }
+#endif /* DPDK */
+ if (PREDICT_FALSE (ip1->protocol == IPPROTO_IPV6_ROUTE))
+ {
+ vlib_buffer_advance (b1, sizeof (ip1));
+ sr1 = (ip6_sr_header_t *) (ip1 + 1);
+ new_l1 = clib_net_to_host_u16 (ip1->payload_length);
+ next1 = SR_REWRITE_NEXT_SR_LOCAL;
+ }
+ else
+ {
+ u32 len_bytes = sizeof (ip6_header_t);
+ u8 next_hdr = ip1->protocol;
+
+ /* HBH must immediately follow ipv6 header */
+ if (PREDICT_FALSE
+ (ip1->protocol == IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS))
+ {
+ ip6_hop_by_hop_ext_t *ext_hdr =
+ (ip6_hop_by_hop_ext_t *) ip6_next_header (ip1);
+ len_bytes +=
+ ip6_ext_header_len ((ip6_ext_header_t *) ext_hdr);
+ /* Ignoring the sr_local for now, if RH follows HBH here */
+ next_hdr = ext_hdr->next_hdr;
+ ext_hdr->next_hdr = IPPROTO_IPV6_ROUTE;
+ }
+ else
+ {
+ ip1->protocol = IPPROTO_IPV6_ROUTE;
+ }
+ /*
+ * Copy data before the punch-in point left by the
+ * required amount. Assume (for the moment) that only
+ * the main packet header needs to be copied.
+ */
+ clib_memcpy (((u8 *) ip1) - vec_len (t1->rewrite),
+ ip1, len_bytes);
+ vlib_buffer_advance (b1, -(word) vec_len (t1->rewrite));
+ ip1 = vlib_buffer_get_current (b1);
+ sr1 = (ip6_sr_header_t *) ((u8 *) ip1 + len_bytes);
+ clib_memcpy (sr1, t1->rewrite, vec_len (t1->rewrite));
+
+ sr1->protocol = next_hdr;
+ new_l1 = clib_net_to_host_u16 (ip1->payload_length) +
+ vec_len (t1->rewrite);
+ ip1->payload_length = clib_host_to_net_u16 (new_l1);
+
+ /* Copy dst address into the DA slot in the segment list */
+ clib_memcpy (sr1->segments, ip1->dst_address.as_u64,
+ sizeof (ip6_address_t));
+ /* Rewrite the ip6 dst address with the first hop */
+ clib_memcpy (ip1->dst_address.as_u64, t1->first_hop.as_u64,
+ sizeof (ip6_address_t));
+
+ sr_fix_hmac (sm, ip1, sr1);
+
+ next1 = sr_local_cb ? sr_local_cb (vm, node, b1, ip1, sr1) :
+ next1;
+
+ /*
+ * Ignore "do not rewrite" shtik in this path
+ */
+ if (PREDICT_FALSE (next1 & 0x80000000))
+ {
+ next1 ^= 0xFFFFFFFF;
+ if (PREDICT_FALSE (next1 == SR_REWRITE_NEXT_ERROR))
+ b1->error = node->errors[SR_REWRITE_ERROR_APP_CALLBACK];
+ }
+ }
+#if DPDK > 0 /* Cannot run replicate without DPDK and only replicate uses this label */
+ trace00:
+#endif /* DPDK */
+
+ if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ sr_rewrite_trace_t *tr = vlib_add_trace (vm, node,
+ b0, sizeof (*tr));
+ tr->tunnel_index = t0 - sm->tunnels;
+ clib_memcpy (tr->src.as_u8, ip0->src_address.as_u8,
+ sizeof (tr->src.as_u8));
+ clib_memcpy (tr->dst.as_u8, ip0->dst_address.as_u8,
+ sizeof (tr->dst.as_u8));
+ tr->length = new_l0;
+ tr->next_index = next0;
+ if (sr0)
+ clib_memcpy (tr->sr, sr0, sizeof (tr->sr));
+ }
+ if (PREDICT_FALSE (b1->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ sr_rewrite_trace_t *tr = vlib_add_trace (vm, node,
+ b1, sizeof (*tr));
+ tr->tunnel_index = t1 - sm->tunnels;
+ clib_memcpy (tr->src.as_u8, ip1->src_address.as_u8,
+ sizeof (tr->src.as_u8));
+ clib_memcpy (tr->dst.as_u8, ip1->dst_address.as_u8,
+ sizeof (tr->dst.as_u8));
+ tr->length = new_l1;
+ tr->next_index = next1;
+ if (sr1)
+ clib_memcpy (tr->sr, sr1, sizeof (tr->sr));
+ }
+ vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, bi1, next0, next1);
+ }
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 bi0;
+ vlib_buffer_t *b0;
+ ip6_header_t *ip0 = 0;
+ ip6_sr_header_t *sr0 = 0;
+ ip6_sr_tunnel_t *t0;
+ u32 next0 = SR_REWRITE_NEXT_IP6_LOOKUP;
+ u16 new_l0 = 0;
+
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+
+
+ /*
+ * $$$ parse through header(s) to pick the point
+ * where we punch in the SR extention header
+ */
+ t0 =
+ pool_elt_at_index (sm->tunnels,
+ vnet_buffer (b0)->ip.adj_index[VLIB_TX]);
+#if DPDK > 0 /* Cannot call replication node yet without DPDK */
+ /* add a replication node */
+ if (PREDICT_FALSE (t0->policy_index != ~0))
+ {
+ vnet_buffer (b0)->ip.save_protocol = t0->policy_index;
+ next0 = SR_REWRITE_NEXT_SR_REPLICATE;
+ sr0 = (ip6_sr_header_t *) (t0->rewrite);
+ goto trace0;
+ }
+#endif /* DPDK */
+
+ ASSERT (VLIB_BUFFER_PRE_DATA_SIZE
+ >= ((word) vec_len (t0->rewrite)) + b0->current_data);
+
+ vnet_buffer (b0)->sw_if_index[VLIB_TX] = t0->tx_fib_index;
+
+ ip0 = vlib_buffer_get_current (b0);
+
+ /*
+ * SR-unaware service chaining case: pkt coming back from
+ * service has the original dst address, and will already
+ * have an SR header. If so, send it to sr-local
+ */
+ if (PREDICT_FALSE (ip0->protocol == IPPROTO_IPV6_ROUTE))
+ {
+ vlib_buffer_advance (b0, sizeof (ip0));
+ sr0 = (ip6_sr_header_t *) (ip0 + 1);
+ new_l0 = clib_net_to_host_u16 (ip0->payload_length);
+ next0 = SR_REWRITE_NEXT_SR_LOCAL;
+ }
+ else
+ {
+ u32 len_bytes = sizeof (ip6_header_t);
+ u8 next_hdr = ip0->protocol;
+
+ /* HBH must immediately follow ipv6 header */
+ if (PREDICT_FALSE
+ (ip0->protocol == IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS))
+ {
+ ip6_hop_by_hop_ext_t *ext_hdr =
+ (ip6_hop_by_hop_ext_t *) ip6_next_header (ip0);
+ len_bytes +=
+ ip6_ext_header_len ((ip6_ext_header_t *) ext_hdr);
+ next_hdr = ext_hdr->next_hdr;
+ ext_hdr->next_hdr = IPPROTO_IPV6_ROUTE;
+ /* Ignoring the sr_local for now, if RH follows HBH here */
+ }
+ else
+ {
+ ip0->protocol = IPPROTO_IPV6_ROUTE; /* routing extension header */
+ }
+ /*
+ * Copy data before the punch-in point left by the
+ * required amount. Assume (for the moment) that only
+ * the main packet header needs to be copied.
+ */
+ clib_memcpy (((u8 *) ip0) - vec_len (t0->rewrite),
+ ip0, len_bytes);
+ vlib_buffer_advance (b0, -(word) vec_len (t0->rewrite));
+ ip0 = vlib_buffer_get_current (b0);
+ sr0 = (ip6_sr_header_t *) ((u8 *) ip0 + len_bytes);
+ /* $$$ tune */
+ clib_memcpy (sr0, t0->rewrite, vec_len (t0->rewrite));
+
+ /* Fix the next header chain */
+ sr0->protocol = next_hdr;
+ new_l0 = clib_net_to_host_u16 (ip0->payload_length) +
+ vec_len (t0->rewrite);
+ ip0->payload_length = clib_host_to_net_u16 (new_l0);
+
+ /* Copy dst address into the DA slot in the segment list */
+ clib_memcpy (sr0->segments, ip0->dst_address.as_u64,
+ sizeof (ip6_address_t));
+ /* Rewrite the ip6 dst address with the first hop */
+ clib_memcpy (ip0->dst_address.as_u64, t0->first_hop.as_u64,
+ sizeof (ip6_address_t));
+
+ sr_fix_hmac (sm, ip0, sr0);
+
+ next0 = sr_local_cb ? sr_local_cb (vm, node, b0, ip0, sr0) :
+ next0;
+
+ /*
+ * Ignore "do not rewrite" shtik in this path
+ */
+ if (PREDICT_FALSE (next0 & 0x80000000))
+ {
+ next0 ^= 0xFFFFFFFF;
+ if (PREDICT_FALSE (next0 == SR_REWRITE_NEXT_ERROR))
+ b0->error = node->errors[SR_REWRITE_ERROR_APP_CALLBACK];
+ }
+ }
+#if DPDK > 0 /* Cannot run replicate without DPDK and only replicate uses this label */
+ trace0:
+#endif /* DPDK */
+
+ if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ sr_rewrite_trace_t *tr = vlib_add_trace (vm, node,
+ b0, sizeof (*tr));
+ tr->tunnel_index = t0 - sm->tunnels;
+ if (ip0)
+ {
+ memcpy (tr->src.as_u8, ip0->src_address.as_u8,
+ sizeof (tr->src.as_u8));
+ memcpy (tr->dst.as_u8, ip0->dst_address.as_u8,
+ sizeof (tr->dst.as_u8));
+ }
+ tr->length = new_l0;
+ tr->next_index = next0;
+ if (sr0)
+ clib_memcpy (tr->sr, sr0, sizeof (tr->sr));
+ }
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, next0);
+ }
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+ return from_frame->n_vectors;
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (sr_rewrite_node) = {
+ .function = sr_rewrite,
+ .name = "sr-rewrite",
+ /* Takes a vector of packets. */
+ .vector_size = sizeof (u32),
+ .format_trace = format_sr_rewrite_trace,
+ .format_buffer = format_ip6_sr_header_with_length,
+
+ .n_errors = SR_REWRITE_N_ERROR,
+ .error_strings = sr_rewrite_error_strings,
+
+ .runtime_data_bytes = 0,
+
+ .n_next_nodes = SR_REWRITE_N_NEXT,
+ .next_nodes = {
+#define _(s,n) [SR_REWRITE_NEXT_##s] = n,
+ foreach_sr_rewrite_next
+#undef _
+ },
+};
+
+VLIB_NODE_FUNCTION_MULTIARCH (sr_rewrite_node, sr_rewrite)
+/* *INDENT-ON* */
+
+static int
+ip6_delete_route_no_next_hop (ip6_address_t * dst_address_arg,
+ u32 dst_address_length, u32 rx_table_id)
+{
+ fib_prefix_t pfx = {
+ .fp_len = dst_address_length,
+ .fp_proto = FIB_PROTOCOL_IP6,
+ .fp_addr = {
+ .ip6 = *dst_address_arg,
+ }
+ };
+
+ fib_table_entry_delete (fib_table_id_find_fib_index (FIB_PROTOCOL_IP6,
+ rx_table_id),
+ &pfx, FIB_SOURCE_SR);
+
+ return 0;
+}
+
+/**
+ * @brief Find or add if not found - HMAC shared secret
+ *
+ * @param sm ip6_sr_main_t *
+ * @param secret u8 *
+ * @param indexp u32 *
+ *
+ * @return ip6_sr_hmac_key_t *
+ */
+static ip6_sr_hmac_key_t *
+find_or_add_shared_secret (ip6_sr_main_t * sm, u8 * secret, u32 * indexp)
+{
+ uword *p;
+ ip6_sr_hmac_key_t *key = 0;
+ int i;
+
+ p = hash_get_mem (sm->hmac_key_by_shared_secret, secret);
+
+ if (p)
+ {
+ key = vec_elt_at_index (sm->hmac_keys, p[0]);
+ if (indexp)
+ *indexp = p[0];
+ return (key);
+ }
+
+ /* Specific key ID? */
+ if (indexp && *indexp)
+ {
+ vec_validate (sm->hmac_keys, *indexp);
+ key = sm->hmac_keys + *indexp;
+ }
+ else
+ {
+ for (i = 0; i < vec_len (sm->hmac_keys); i++)
+ {
+ if (sm->hmac_keys[i].shared_secret == 0)
+ {
+ key = sm->hmac_keys + i;
+ goto found;
+ }
+ }
+ vec_validate (sm->hmac_keys, i);
+ key = sm->hmac_keys + i;
+ found:
+ ;
+ }
+
+ key->shared_secret = vec_dup (secret);
+
+ hash_set_mem (sm->hmac_key_by_shared_secret, key->shared_secret,
+ key - sm->hmac_keys);
+
+ if (indexp)
+ *indexp = key - sm->hmac_keys;
+ return (key);
+}
+
+/**
+ * @brief Add or Delete a Segment Routing tunnel.
+ *
+ * @param a ip6_sr_add_del_tunnel_args_t *
+ *
+ * @return retval int
+ */
+int
+ip6_sr_add_del_tunnel (ip6_sr_add_del_tunnel_args_t * a)
+{
+ ip6_main_t *im = &ip6_main;
+ ip6_sr_tunnel_key_t key;
+ ip6_sr_tunnel_t *t;
+ uword *p, *n;
+ ip6_sr_header_t *h = 0;
+ u32 header_length;
+ ip6_address_t *addrp, *this_address;
+ ip6_sr_main_t *sm = &sr_main;
+ u8 *key_copy;
+ u32 rx_fib_index, tx_fib_index;
+ u32 hmac_key_index_u32;
+ u8 hmac_key_index = 0;
+ ip6_sr_policy_t *pt;
+ int i;
+ dpo_id_t dpo = DPO_INVALID;
+
+ /* Make sure that the rx FIB exists */
+ p = hash_get (im->fib_index_by_table_id, a->rx_table_id);
+
+ if (p == 0)
+ return -3;
+
+ /* remember the FIB index */
+ rx_fib_index = p[0];
+
+ /* Make sure that the supplied FIB exists */
+ p = hash_get (im->fib_index_by_table_id, a->tx_table_id);
+
+ if (p == 0)
+ return -4;
+
+ /* remember the FIB index */
+ tx_fib_index = p[0];
+
+ clib_memcpy (key.src.as_u8, a->src_address->as_u8, sizeof (key.src));
+ clib_memcpy (key.dst.as_u8, a->dst_address->as_u8, sizeof (key.dst));
+
+ /* When adding a tunnel:
+ * - If a "name" is given, it must not exist.
+ * - The "key" is always checked, and must not exist.
+ * When deleting a tunnel:
+ * - If the "name" is given, and it exists, then use it.
+ * - If the "name" is not given, use the "key".
+ * - If the "name" and the "key" are given, then both must point to the same
+ * thing.
+ */
+
+ /* Lookup the key */
+ p = hash_get_mem (sm->tunnel_index_by_key, &key);
+
+ /* If the name is given, look it up */
+ if (a->name)
+ n = hash_get_mem (sm->tunnel_index_by_name, a->name);
+ else
+ n = 0;
+
+ /* validate key/name parameters */
+ if (!a->is_del) /* adding a tunnel */
+ {
+ if (a->name && n) /* name given & exists already */
+ return -1;
+ if (p) /* key exists already */
+ return -1;
+ }
+ else /* deleting a tunnel */
+ {
+ if (!p) /* key doesn't exist */
+ return -2;
+ if (a->name && !n) /* name given & it doesn't exist */
+ return -2;
+
+ if (n) /* name given & found */
+ {
+ if (n[0] != p[0]) /* name and key do not point to the same thing */
+ return -2;
+ }
+ }
+
+
+ if (a->is_del) /* delete the tunnel */
+ {
+ hash_pair_t *hp;
+
+ /* Delete existing tunnel */
+ t = pool_elt_at_index (sm->tunnels, p[0]);
+
+ ip6_delete_route_no_next_hop (&t->key.dst, t->dst_mask_width,
+ a->rx_table_id);
+ vec_free (t->rewrite);
+ /* Remove tunnel from any policy if associated */
+ if (t->policy_index != ~0)
+ {
+ pt = pool_elt_at_index (sm->policies, t->policy_index);
+ for (i = 0; i < vec_len (pt->tunnel_indices); i++)
+ {
+ if (pt->tunnel_indices[i] == t - sm->tunnels)
+ {
+ vec_delete (pt->tunnel_indices, 1, i);
+ goto found;
+ }
+ }
+ clib_warning ("Tunnel index %d not found in policy_index %d",
+ t - sm->tunnels, pt - sm->policies);
+ found:
+ /* If this is last tunnel in the policy, clean up the policy too */
+ if (vec_len (pt->tunnel_indices) == 0)
+ {
+ hash_unset_mem (sm->policy_index_by_policy_name, pt->name);
+ vec_free (pt->name);
+ pool_put (sm->policies, pt);
+ }
+ }
+
+ /* Clean up the tunnel by name */
+ if (t->name)
+ {
+ hash_unset_mem (sm->tunnel_index_by_name, t->name);
+ vec_free (t->name);
+ }
+ pool_put (sm->tunnels, t);
+ hp = hash_get_pair (sm->tunnel_index_by_key, &key);
+ key_copy = (void *) (hp->key);
+ hash_unset_mem (sm->tunnel_index_by_key, &key);
+ vec_free (key_copy);
+ return 0;
+ }
+
+ /* create a new tunnel */
+ pool_get (sm->tunnels, t);
+ memset (t, 0, sizeof (*t));
+ t->policy_index = ~0;
+
+ clib_memcpy (&t->key, &key, sizeof (t->key));
+ t->dst_mask_width = a->dst_mask_width;
+ t->rx_fib_index = rx_fib_index;
+ t->tx_fib_index = tx_fib_index;
+
+ if (!vec_len (a->segments))
+ /* there must be at least one segment... */
+ return -4;
+
+ /* The first specified hop goes right into the dst address */
+ clib_memcpy (&t->first_hop, &a->segments[0], sizeof (ip6_address_t));
+
+ /*
+ * Create the sr header rewrite string
+ * The list of segments needs an extra slot for the ultimate destination
+ * which is taken from the packet we add the SRH to.
+ */
+ header_length = sizeof (*h) +
+ sizeof (ip6_address_t) * (vec_len (a->segments) + 1 + vec_len (a->tags));
+
+ if (a->shared_secret)
+ {
+ /* Allocate a new key slot if we don't find the secret key */
+ hmac_key_index_u32 = 0;
+ (void) find_or_add_shared_secret (sm, a->shared_secret,
+ &hmac_key_index_u32);
+
+ /* Hey Vinz Clortho: Gozzer is pissed.. you're out of keys! */
+ if (hmac_key_index_u32 >= 256)
+ return -5;
+ hmac_key_index = hmac_key_index_u32;
+ header_length += SHA256_DIGEST_LENGTH;
+ }
+
+ vec_validate (t->rewrite, header_length - 1);
+
+ h = (ip6_sr_header_t *) t->rewrite;
+
+ h->protocol = 0xFF; /* we don't know yet */
+
+ h->length = (header_length / 8) - 1;
+ h->type = ROUTING_HEADER_TYPE_SR;
+
+ /* first_segment and segments_left need to have the index of the last
+ * element in the list; a->segments has one element less than ends up
+ * in the header (it does not have the DA in it), so vec_len(a->segments)
+ * is the value we want.
+ */
+ h->first_segment = h->segments_left = vec_len (a->segments);
+
+ if (a->shared_secret)
+ h->hmac_key = hmac_key_index & 0xFF;
+
+ h->flags = a->flags_net_byte_order;
+
+ /* Paint on the segment list, in reverse.
+ * This is offset by one to leave room at the start for the ultimate
+ * destination.
+ */
+ addrp = h->segments + vec_len (a->segments);
+
+ vec_foreach (this_address, a->segments)
+ {
+ clib_memcpy (addrp->as_u8, this_address->as_u8, sizeof (ip6_address_t));
+ addrp--;
+ }
+
+ /*
+ * Since the ultimate destination address is not yet known, set that slot
+ * to a value we will instantly recognize as bogus.
+ */
+ memset (h->segments, 0xfe, sizeof (ip6_address_t));
+
+ /* Paint on the tag list, not reversed */
+ addrp = h->segments + vec_len (a->segments);
+
+ vec_foreach (this_address, a->tags)
+ {
+ clib_memcpy (addrp->as_u8, this_address->as_u8, sizeof (ip6_address_t));
+ addrp++;
+ }
+
+ key_copy = vec_new (ip6_sr_tunnel_key_t, 1);
+ clib_memcpy (key_copy, &key, sizeof (ip6_sr_tunnel_key_t));
+ hash_set_mem (sm->tunnel_index_by_key, key_copy, t - sm->tunnels);
+
+ /*
+ * Stick the tunnel index into the rewrite header.
+ *
+ * Unfortunately, inserting an SR header according to the various
+ * RFC's requires parsing through the ip6 header, perhaps consing a
+ * buffer onto the head of the vlib_buffer_t, etc. We don't use the
+ * normal reverse bcopy rewrite code.
+ *
+ * We don't handle ugly RFC-related cases yet, but I'm sure PL will complain
+ * at some point...
+ */
+ dpo_set (&dpo, sr_dpo_type, DPO_PROTO_IP6, t - sm->tunnels);
+
+ fib_prefix_t pfx = {
+ .fp_proto = FIB_PROTOCOL_IP6,
+ .fp_len = a->dst_mask_width,
+ .fp_addr = {
+ .ip6 = *a->dst_address,
+ }
+ };
+ fib_table_entry_special_dpo_add (rx_fib_index,
+ &pfx,
+ FIB_SOURCE_SR,
+ FIB_ENTRY_FLAG_EXCLUSIVE, &dpo);
+ dpo_reset (&dpo);
+
+ if (a->policy_name)
+ {
+ p = hash_get_mem (sm->policy_index_by_policy_name, a->policy_name);
+ if (p)
+ {
+ pt = pool_elt_at_index (sm->policies, p[0]);
+ }
+ else /* no policy, lets create one */
+ {
+ pool_get (sm->policies, pt);
+ memset (pt, 0, sizeof (*pt));
+ pt->name = format (0, "%s%c", a->policy_name, 0);
+ hash_set_mem (sm->policy_index_by_policy_name, pt->name,
+ pt - sm->policies);
+ p = hash_get_mem (sm->policy_index_by_policy_name, a->policy_name);
+ }
+ vec_add1 (pt->tunnel_indices, t - sm->tunnels);
+ if (p == 0)
+ clib_warning ("p is NULL!");
+ t->policy_index = p ? p[0] : ~0; /* equiv. to (pt - sm->policies) */
+ }
+
+ if (a->name)
+ {
+ t->name = format (0, "%s%c", a->name, 0);
+ hash_set_mem (sm->tunnel_index_by_name, t->name, t - sm->tunnels);
+ }
+
+ return 0;
+}
+
+/**
+ * @brief no-op lock function.
+ * The lifetime of the SR entry is managed by the control plane
+ */
+static void
+sr_dpo_lock (dpo_id_t * dpo)
+{
+}
+
+/**
+ * @brief no-op unlock function.
+ * The lifetime of the SR entry is managed by the control plane
+ */
+static void
+sr_dpo_unlock (dpo_id_t * dpo)
+{
+}
+
+u8 *
+format_sr_dpo (u8 * s, va_list * args)
+{
+ index_t index = va_arg (*args, index_t);
+ CLIB_UNUSED (u32 indent) = va_arg (*args, u32);
+
+ return (format (s, "SR: tunnel:[%d]", index));
+}
+
+const static dpo_vft_t sr_vft = {
+ .dv_lock = sr_dpo_lock,
+ .dv_unlock = sr_dpo_unlock,
+ .dv_format = format_sr_dpo,
+};
+
+const static char *const sr_ip6_nodes[] = {
+ "sr-rewrite",
+ NULL,
+};
+
+const static char *const *const sr_nodes[DPO_PROTO_NUM] = {
+ [DPO_PROTO_IP6] = sr_ip6_nodes,
+};
+
+/**
+ * @brief CLI parser for Add or Delete a Segment Routing tunnel.
+ *
+ * @param vm vlib_main_t *
+ * @param input unformat_input_t *
+ * @param cmd vlib_cli_command_t *
+ *
+ * @return error clib_error_t *
+ */
+static clib_error_t *
+sr_add_del_tunnel_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ int is_del = 0;
+ ip6_address_t src_address;
+ int src_address_set = 0;
+ ip6_address_t dst_address;
+ u32 dst_mask_width;
+ int dst_address_set = 0;
+ u16 flags = 0;
+ u8 *shared_secret = 0;
+ u8 *name = 0;
+ u8 *policy_name = 0;
+ u32 rx_table_id = 0;
+ u32 tx_table_id = 0;
+ ip6_address_t *segments = 0;
+ ip6_address_t *this_seg;
+ ip6_address_t *tags = 0;
+ ip6_address_t *this_tag;
+ ip6_sr_add_del_tunnel_args_t _a, *a = &_a;
+ ip6_address_t next_address, tag;
+ int pl_index;
+ int rv;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "del"))
+ is_del = 1;
+ else if (unformat (input, "rx-fib-id %d", &rx_table_id))
+ ;
+ else if (unformat (input, "tx-fib-id %d", &tx_table_id))
+ ;
+ else if (unformat (input, "src %U", unformat_ip6_address, &src_address))
+ src_address_set = 1;
+ else if (unformat (input, "name %s", &name))
+ ;
+ else if (unformat (input, "policy %s", &policy_name))
+ ;
+ else if (unformat (input, "dst %U/%d",
+ unformat_ip6_address, &dst_address, &dst_mask_width))
+ dst_address_set = 1;
+ else if (unformat (input, "next %U", unformat_ip6_address,
+ &next_address))
+ {
+ vec_add2 (segments, this_seg, 1);
+ clib_memcpy (this_seg->as_u8, next_address.as_u8,
+ sizeof (*this_seg));
+ }
+ else if (unformat (input, "tag %U", unformat_ip6_address, &tag))
+ {
+ vec_add2 (tags, this_tag, 1);
+ clib_memcpy (this_tag->as_u8, tag.as_u8, sizeof (*this_tag));
+ }
+ else if (unformat (input, "clean"))
+ flags |= IP6_SR_HEADER_FLAG_CLEANUP;
+ else if (unformat (input, "protected"))
+ flags |= IP6_SR_HEADER_FLAG_PROTECTED;
+ else if (unformat (input, "key %s", &shared_secret))
+ /* Do not include the trailing NULL byte. Guaranteed interop issue */
+ _vec_len (shared_secret) -= 1;
+ else if (unformat (input, "InPE %d", &pl_index))
+ {
+ if (pl_index <= 0 || pl_index > 4)
+ {
+ pl_index_range_error:
+ return clib_error_return
+ (0, "Policy List Element Index %d out of range (1-4)",
+ pl_index);
+
+ }
+ flags |= IP6_SR_HEADER_FLAG_PL_ELT_INGRESS_PE
+ << ip6_sr_policy_list_shift_from_index (pl_index);
+ }
+ else if (unformat (input, "EgPE %d", &pl_index))
+ {
+ if (pl_index <= 0 || pl_index > 4)
+ goto pl_index_range_error;
+ flags |= IP6_SR_HEADER_FLAG_PL_ELT_EGRESS_PE
+ << ip6_sr_policy_list_shift_from_index (pl_index);
+ }
+ else if (unformat (input, "OrgSrc %d", &pl_index))
+ {
+ if (pl_index <= 0 || pl_index > 4)
+ goto pl_index_range_error;
+ flags |= IP6_SR_HEADER_FLAG_PL_ELT_ORIG_SRC_ADDR
+ << ip6_sr_policy_list_shift_from_index (pl_index);
+ }
+ else
+ break;
+ }
+
+ if (!src_address_set)
+ return clib_error_return (0, "src address required");
+
+ if (!dst_address_set)
+ return clib_error_return (0, "dst address required");
+
+ if (!segments)
+ return clib_error_return (0, "at least one sr segment required");
+
+ memset (a, 0, sizeof (*a));
+ a->src_address = &src_address;
+ a->dst_address = &dst_address;
+ a->dst_mask_width = dst_mask_width;
+ a->segments = segments;
+ a->tags = tags;
+ a->flags_net_byte_order = clib_host_to_net_u16 (flags);
+ a->is_del = is_del;
+ a->rx_table_id = rx_table_id;
+ a->tx_table_id = tx_table_id;
+ a->shared_secret = shared_secret;
+
+ if (vec_len (name))
+ a->name = name;
+ else
+ a->name = 0;
+
+ if (vec_len (policy_name))
+ a->policy_name = policy_name;
+ else
+ a->policy_name = 0;
+
+ rv = ip6_sr_add_del_tunnel (a);
+
+ vec_free (segments);
+ vec_free (tags);
+ vec_free (shared_secret);
+
+ switch (rv)
+ {
+ case 0:
+ break;
+
+ case -1:
+ return clib_error_return (0, "SR tunnel src %U dst %U already exists",
+ format_ip6_address, &src_address,
+ format_ip6_address, &dst_address);
+
+ case -2:
+ return clib_error_return (0, "SR tunnel src %U dst %U does not exist",
+ format_ip6_address, &src_address,
+ format_ip6_address, &dst_address);
+
+ case -3:
+ return clib_error_return (0, "FIB table %d does not exist",
+ rx_table_id);
+
+ case -4:
+ return clib_error_return (0, "At least one segment is required");
+
+ default:
+ return clib_error_return (0, "BUG: ip6_sr_add_del_tunnel returns %d",
+ rv);
+ }
+
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (sr_tunnel_command, static) = {
+ .path = "sr tunnel",
+ .short_help =
+ "sr tunnel [del] [name <name>] src <addr> dst <addr> [next <addr>] "
+ "[clean] [reroute] [key <secret>] [policy <policy_name>]"
+ "[rx-fib-id <fib_id>] [tx-fib-id <fib_id>]",
+ .function = sr_add_del_tunnel_command_fn,
+};
+/* *INDENT-ON* */
+
+/**
+ * @brief Display Segment Routing tunnel
+ *
+ * @param vm vlib_main_t *
+ * @param t ip6_sr_tunnel_t *
+ *
+ */
+void
+ip6_sr_tunnel_display (vlib_main_t * vm, ip6_sr_tunnel_t * t)
+{
+ ip6_sr_main_t *sm = &sr_main;
+ ip6_fib_t *rx_fib, *tx_fib;
+ ip6_sr_policy_t *pt;
+
+ rx_fib = ip6_fib_get (t->rx_fib_index);
+ tx_fib = ip6_fib_get (t->tx_fib_index);
+
+ if (t->name)
+ vlib_cli_output (vm, "sr tunnel name: %s", (char *) t->name);
+
+ vlib_cli_output (vm, "src %U dst %U first hop %U",
+ format_ip6_address, &t->key.src,
+ format_ip6_address, &t->key.dst,
+ format_ip6_address, &t->first_hop);
+ vlib_cli_output (vm, " rx-fib-id %d tx-fib-id %d",
+ rx_fib->table_id, tx_fib->table_id);
+ vlib_cli_output (vm, " sr: %U", format_ip6_sr_header, t->rewrite,
+ 0 /* print_hmac */ );
+
+ if (t->policy_index != ~0)
+ {
+ pt = pool_elt_at_index (sm->policies, t->policy_index);
+ vlib_cli_output (vm, "sr policy: %s", (char *) pt->name);
+ }
+ vlib_cli_output (vm, "-------");
+
+ return;
+}
+
+/**
+ * @brief CLI Parser for Display Segment Routing tunnel
+ *
+ * @param vm vlib_main_t *
+ * @param input unformat_input_t *
+ * @param cmd vlib_cli_command_t *
+ *
+ * @return error clib_error_t *
+ */
+static clib_error_t *
+show_sr_tunnel_fn (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ static ip6_sr_tunnel_t **tunnels;
+ ip6_sr_tunnel_t *t;
+ ip6_sr_main_t *sm = &sr_main;
+ int i;
+ uword *p = 0;
+ u8 *name = 0;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "name %s", &name))
+ {
+ p = hash_get_mem (sm->tunnel_index_by_name, name);
+ if (!p)
+ vlib_cli_output (vm, "No SR tunnel with name: %s. Showing all.",
+ name);
+ }
+ else
+ break;
+ }
+
+ vec_reset_length (tunnels);
+
+ if (!p) /* Either name parm not passed or no tunnel with that name found, show all */
+ {
+ /* *INDENT-OFF* */
+ pool_foreach (t, sm->tunnels,
+ ({
+ vec_add1 (tunnels, t);
+ }));
+ /* *INDENT-ON* */
+ }
+ else /* Just show the one tunnel by name */
+ vec_add1 (tunnels, &sm->tunnels[p[0]]);
+
+ if (vec_len (tunnels) == 0)
+ vlib_cli_output (vm, "No SR tunnels configured");
+
+ for (i = 0; i < vec_len (tunnels); i++)
+ {
+ t = tunnels[i];
+ ip6_sr_tunnel_display (vm, t);
+ }
+
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (show_sr_tunnel_command, static) = {
+ .path = "show sr tunnel",
+ .short_help = "show sr tunnel [name <sr-tunnel-name>]",
+ .function = show_sr_tunnel_fn,
+};
+/* *INDENT-ON* */
+
+/**
+ * @brief Add or Delete a Segment Routing policy
+ *
+ * @param a ip6_sr_add_del_policy_args_t *
+ *
+ * @return retval int
+ */
+int
+ip6_sr_add_del_policy (ip6_sr_add_del_policy_args_t * a)
+{
+ ip6_sr_main_t *sm = &sr_main;
+ uword *p;
+ ip6_sr_tunnel_t *t = 0;
+ ip6_sr_policy_t *policy;
+ u32 *tunnel_indices = 0;
+ int i;
+
+
+
+ if (a->is_del)
+ {
+ p = hash_get_mem (sm->policy_index_by_policy_name, a->name);
+ if (!p)
+ return -6; /* policy name not found */
+
+ policy = pool_elt_at_index (sm->policies, p[0]);
+
+ vec_foreach_index (i, policy->tunnel_indices)
+ {
+ t = pool_elt_at_index (sm->tunnels, policy->tunnel_indices[i]);
+ t->policy_index = ~0;
+ }
+ hash_unset_mem (sm->policy_index_by_policy_name, a->name);
+ pool_put (sm->policies, policy);
+ return 0;
+ }
+
+
+ if (!vec_len (a->tunnel_names))
+ return -3; /*tunnel name is required case */
+
+ vec_reset_length (tunnel_indices);
+ /* Check tunnel names, add tunnel_index to policy */
+ for (i = 0; i < vec_len (a->tunnel_names); i++)
+ {
+ p = hash_get_mem (sm->tunnel_index_by_name, a->tunnel_names[i]);
+ if (!p)
+ return -4; /* tunnel name not found case */
+
+ t = pool_elt_at_index (sm->tunnels, p[0]);
+ /*
+ No need to check t==0. -3 condition above ensures name
+ */
+ if (t->policy_index != ~0)
+ return -5; /* tunnel name already associated with a policy */
+
+ /* Add to tunnel indicies */
+ vec_add1 (tunnel_indices, p[0]);
+ }
+
+ /* Add policy to ip6_sr_main_t */
+ pool_get (sm->policies, policy);
+ policy->name = a->name;
+ policy->tunnel_indices = tunnel_indices;
+ hash_set_mem (sm->policy_index_by_policy_name, policy->name,
+ policy - sm->policies);
+
+ /* Yes, this could be construed as overkill but the last thing you should do is set
+ the policy_index on the tunnel after everything is set in ip6_sr_main_t.
+ If this is deemed overly cautious, could set this in the vec_len(tunnel_names) loop.
+ */
+ for (i = 0; i < vec_len (policy->tunnel_indices); i++)
+ {
+ t = pool_elt_at_index (sm->tunnels, policy->tunnel_indices[i]);
+ t->policy_index = policy - sm->policies;
+ }
+
+ return 0;
+}
+
+/**
+ * @brief CLI Parser for Add or Delete a Segment Routing policy
+ *
+ * @param vm vlib_main_t *
+ * @param input unformat_input_t *
+ * @param cmd vlib_cli_command_t *
+ *
+ * @return error clib_error_t *
+ */
+static clib_error_t *
+sr_add_del_policy_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ int is_del = 0;
+ u8 **tunnel_names = 0;
+ u8 *tunnel_name = 0;
+ u8 *name = 0;
+ ip6_sr_add_del_policy_args_t _a, *a = &_a;
+ int rv;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "del"))
+ is_del = 1;
+ else if (unformat (input, "name %s", &name))
+ ;
+ else if (unformat (input, "tunnel %s", &tunnel_name))
+ {
+ if (tunnel_name)
+ {
+ vec_add1 (tunnel_names, tunnel_name);
+ tunnel_name = 0;
+ }
+ }
+ else
+ break;
+ }
+
+ if (!name)
+ return clib_error_return (0, "name of SR policy required");
+
+
+ memset (a, 0, sizeof (*a));
+
+ a->is_del = is_del;
+ a->name = name;
+ a->tunnel_names = tunnel_names;
+
+ rv = ip6_sr_add_del_policy (a);
+
+ vec_free (tunnel_names);
+
+ switch (rv)
+ {
+ case 0:
+ break;
+
+ case -3:
+ return clib_error_return (0,
+ "tunnel name to associate to SR policy is required");
+
+ case -4:
+ return clib_error_return (0, "tunnel name not found");
+
+ case -5:
+ return clib_error_return (0, "tunnel already associated with policy");
+
+ case -6:
+ return clib_error_return (0, "policy name %s not found", name);
+
+ case -7:
+ return clib_error_return (0, "TODO: deleting policy name %s", name);
+
+ default:
+ return clib_error_return (0, "BUG: ip6_sr_add_del_policy returns %d",
+ rv);
+
+ }
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (sr_policy_command, static) = {
+ .path = "sr policy",
+ .short_help =
+ "sr policy [del] name <policy-name> tunnel <sr-tunnel-name> [tunnel <sr-tunnel-name>]*",
+ .function = sr_add_del_policy_command_fn,
+};
+/* *INDENT-ON* */
+
+/**
+ * @brief CLI Parser for Displaying Segment Routing policy
+ *
+ * @param vm vlib_main_t *
+ * @param input unformat_input_t *
+ * @param cmd vlib_cli_command_t *
+ *
+ * @return error clib_error_t *
+ */
+static clib_error_t *
+show_sr_policy_fn (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ static ip6_sr_policy_t **policies;
+ ip6_sr_policy_t *policy;
+ ip6_sr_tunnel_t *t;
+ ip6_sr_main_t *sm = &sr_main;
+ int i, j;
+ uword *p = 0;
+ u8 *name = 0;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "name %s", &name))
+ {
+ p = hash_get_mem (sm->policy_index_by_policy_name, name);
+ if (!p)
+ vlib_cli_output (vm,
+ "policy with name %s not found. Showing all.",
+ name);
+ }
+ else
+ break;
+ }
+
+ vec_reset_length (policies);
+
+ if (!p) /* Either name parm not passed or no policy with that name found, show all */
+ {
+ /* *INDENT-OFF* */
+ pool_foreach (policy, sm->policies,
+ ({
+ vec_add1 (policies, policy);
+ }));
+ /* *INDENT-ON* */
+ }
+ else /* Just show the one policy by name and a summary of tunnel names */
+ {
+ policy = pool_elt_at_index (sm->policies, p[0]);
+ vec_add1 (policies, policy);
+ }
+
+ if (vec_len (policies) == 0)
+ vlib_cli_output (vm, "No SR policies configured");
+
+ for (i = 0; i < vec_len (policies); i++)
+ {
+ policy = policies[i];
+
+ if (policy->name)
+ vlib_cli_output (vm, "SR policy name: %s", (char *) policy->name);
+ for (j = 0; j < vec_len (policy->tunnel_indices); j++)
+ {
+ t = pool_elt_at_index (sm->tunnels, policy->tunnel_indices[j]);
+ ip6_sr_tunnel_display (vm, t);
+ }
+ }
+
+ return 0;
+
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (show_sr_policy_command, static) = {
+ .path = "show sr policy",
+ .short_help = "show sr policy [name <sr-policy-name>]",
+ .function = show_sr_policy_fn,
+};
+/* *INDENT-ON* */
+
+/**
+ * @brief Add or Delete a mapping of IP6 multicast address
+ * to Segment Routing policy.
+ *
+ * @param a ip6_sr_add_del_multicastmap_args_t *
+ *
+ * @return retval int
+ */
+int
+ip6_sr_add_del_multicastmap (ip6_sr_add_del_multicastmap_args_t * a)
+{
+ uword *p;
+ ip6_sr_tunnel_t *t;
+ ip6_sr_main_t *sm = &sr_main;
+ ip6_sr_policy_t *pt;
+
+ if (a->is_del)
+ {
+ /* clean up the adjacency */
+ p =
+ hash_get_mem (sm->policy_index_by_multicast_address,
+ a->multicast_address);
+ }
+ else
+ {
+ /* Get our policy by policy_name */
+ p = hash_get_mem (sm->policy_index_by_policy_name, a->policy_name);
+
+ }
+ if (!p)
+ return -1;
+
+ pt = pool_elt_at_index (sm->policies, p[0]);
+
+ /*
+ Get the first tunnel associated with policy populate the fib adjacency.
+ From there, since this tunnel will have it's policy_index != ~0 it will
+ be the trigger in the dual_loop to pull up the policy and make a copy-rewrite
+ for each tunnel in the policy
+ */
+
+ t = pool_elt_at_index (sm->tunnels, pt->tunnel_indices[0]);
+
+ /*
+ * Stick the tunnel index into the rewrite header.
+ *
+ * Unfortunately, inserting an SR header according to the various
+ * RFC's requires parsing through the ip6 header, perhaps consing a
+ * buffer onto the head of the vlib_buffer_t, etc. We don't use the
+ * normal reverse bcopy rewrite code.
+ *
+ * We don't handle ugly RFC-related cases yet, but I'm sure PL will complain
+ * at some point...
+ */
+ dpo_id_t dpo = DPO_INVALID;
+
+ dpo_set (&dpo, sr_dpo_type, DPO_PROTO_IP6, t - sm->tunnels);
+
+ /* Construct a FIB entry for multicast using the rx/tx fib from the first tunnel */
+ fib_prefix_t pfx = {
+ .fp_proto = FIB_PROTOCOL_IP6,
+ .fp_len = 128,
+ .fp_addr = {
+ .ip6 = *a->multicast_address,
+ }
+ };
+ fib_table_entry_special_dpo_add (t->rx_fib_index,
+ &pfx,
+ FIB_SOURCE_SR,
+ FIB_ENTRY_FLAG_EXCLUSIVE, &dpo);
+ dpo_reset (&dpo);
+
+ u8 *mcast_copy = 0;
+ mcast_copy = vec_new (ip6_address_t, 1);
+ memcpy (mcast_copy, a->multicast_address, sizeof (ip6_address_t));
+
+ if (a->is_del)
+ {
+ hash_unset_mem (sm->policy_index_by_multicast_address, mcast_copy);
+ vec_free (mcast_copy);
+ return 0;
+ }
+ /* else */
+
+ hash_set_mem (sm->policy_index_by_multicast_address, mcast_copy,
+ pt - sm->policies);
+
+
+ return 0;
+}
+
+/**
+ * @brief CLI Parser for Adding or Delete a mapping of IP6 multicast address
+ * to Segment Routing policy.
+ *
+ * @param vm vlib_main_t *
+ * @param input unformat_input_t *
+ * @param cmd vlib_cli_command_t *
+ *
+ * @return error clib_error_t *
+ */
+static clib_error_t *
+sr_add_del_multicast_map_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ int is_del = 0;
+ ip6_address_t multicast_address;
+ u8 *policy_name = 0;
+ int multicast_address_set = 0;
+ ip6_sr_add_del_multicastmap_args_t _a, *a = &_a;
+ int rv;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "del"))
+ is_del = 1;
+ else
+ if (unformat
+ (input, "address %U", unformat_ip6_address, &multicast_address))
+ multicast_address_set = 1;
+ else if (unformat (input, "sr-policy %s", &policy_name))
+ ;
+ else
+ break;
+ }
+
+ if (!is_del && !policy_name)
+ return clib_error_return (0, "name of sr policy required");
+
+ if (!multicast_address_set)
+ return clib_error_return (0, "multicast address required");
+
+ memset (a, 0, sizeof (*a));
+
+ a->is_del = is_del;
+ a->multicast_address = &multicast_address;
+ a->policy_name = policy_name;
+
+#if DPDK > 0 /*Cannot call replicate or configure multicast map yet without DPDK */
+ rv = ip6_sr_add_del_multicastmap (a);
+#else
+ return clib_error_return (0,
+ "cannot use multicast replicate spray case without DPDK installed");
+#endif /* DPDK */
+
+ switch (rv)
+ {
+ case 0:
+ break;
+ case -1:
+ return clib_error_return (0, "no policy with name: %s", policy_name);
+
+ case -2:
+ return clib_error_return (0, "multicast map someting ");
+
+ case -3:
+ return clib_error_return (0,
+ "tunnel name to associate to SR policy is required");
+
+ case -7:
+ return clib_error_return (0, "TODO: deleting policy name %s",
+ policy_name);
+
+ default:
+ return clib_error_return (0, "BUG: ip6_sr_add_del_policy returns %d",
+ rv);
+
+ }
+ return 0;
+
+}
+
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (sr_multicast_map_command, static) = {
+ .path = "sr multicast-map",
+ .short_help =
+ "sr multicast-map address <multicast-ip6-address> sr-policy <sr-policy-name> [del]",
+ .function = sr_add_del_multicast_map_command_fn,
+};
+/* *INDENT-ON* */
+
+/**
+ * @brief CLI Parser for Displaying a mapping of IP6 multicast address
+ * to Segment Routing policy.
+ *
+ * @param vm vlib_main_t *
+ * @param input unformat_input_t *
+ * @param cmd vlib_cli_command_t *
+ *
+ * @return error clib_error_t *
+ */
+static clib_error_t *
+show_sr_multicast_map_fn (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ ip6_sr_main_t *sm = &sr_main;
+ u8 *key = 0;
+ u32 value;
+ ip6_address_t multicast_address;
+ ip6_sr_policy_t *pt;
+
+ /* pull all entries from the hash table into vector for display */
+
+ /* *INDENT-OFF* */
+ hash_foreach_mem (key, value, sm->policy_index_by_multicast_address,
+ ({
+ if (!key)
+ vlib_cli_output (vm, "no multicast maps configured");
+ else
+ {
+ multicast_address = *((ip6_address_t *)key);
+ pt = pool_elt_at_index (sm->policies, value);
+ if (pt)
+ {
+ vlib_cli_output (vm, "address: %U policy: %s",
+ format_ip6_address, &multicast_address,
+ pt->name);
+ }
+ else
+ vlib_cli_output (vm, "BUG: policy not found for address: %U with policy index %d",
+ format_ip6_address, &multicast_address,
+ value);
+
+ }
+
+ }));
+ /* *INDENT-ON* */
+
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (show_sr_multicast_map_command, static) = {
+ .path = "show sr multicast-map",
+ .short_help = "show sr multicast-map",
+ .function = show_sr_multicast_map_fn,
+};
+/* *INDENT-ON* */
+
+
+#define foreach_sr_fix_dst_addr_next \
+_(DROP, "error-drop")
+
+/**
+ * @brief Struct for valid next-nodes for SR fix destination address node
+ */
+typedef enum
+{
+#define _(s,n) SR_FIX_DST_ADDR_NEXT_##s,
+ foreach_sr_fix_dst_addr_next
+#undef _
+ SR_FIX_DST_ADDR_N_NEXT,
+} sr_fix_dst_addr_next_t;
+
+/**
+ * @brief Error strings for SR Fix Destination rewrite
+ */
+static char *sr_fix_dst_error_strings[] = {
+#define sr_fix_dst_error(n,s) s,
+#include "sr_fix_dst_error.def"
+#undef sr_fix_dst_error
+};
+
+/**
+ * @brief Struct for errors for SR Fix Destination rewrite
+ */
+typedef enum
+{
+#define sr_fix_dst_error(n,s) SR_FIX_DST_ERROR_##n,
+#include "sr_fix_dst_error.def"
+#undef sr_fix_dst_error
+ SR_FIX_DST_N_ERROR,
+} sr_fix_dst_error_t;
+
+/**
+ * @brief Information for fix address trace
+ */
+typedef struct
+{
+ ip6_address_t src, dst;
+ u32 next_index;
+ u32 adj_index;
+ u8 sr[256];
+} sr_fix_addr_trace_t;
+
+/**
+ * @brief Formatter for fix address trace
+ */
+u8 *
+format_sr_fix_addr_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ sr_fix_addr_trace_t *t = va_arg (*args, sr_fix_addr_trace_t *);
+ vnet_hw_interface_t *hi = 0;
+ ip_adjacency_t *adj;
+ ip6_main_t *im = &ip6_main;
+ ip_lookup_main_t *lm = &im->lookup_main;
+ vnet_main_t *vnm = vnet_get_main ();
+
+ if (t->adj_index != ~0)
+ {
+ adj = ip_get_adjacency (lm, t->adj_index);
+ hi = vnet_get_sup_hw_interface (vnm, adj->rewrite_header.sw_if_index);
+ }
+
+ s = format (s, "SR-FIX_ADDR: next %s ip6 src %U dst %U\n",
+ (t->next_index == SR_FIX_DST_ADDR_NEXT_DROP)
+ ? "drop" : "output",
+ format_ip6_address, &t->src, format_ip6_address, &t->dst);
+ if (t->next_index != SR_FIX_DST_ADDR_NEXT_DROP)
+ {
+ s =
+ format (s, "%U\n", format_ip6_sr_header, t->sr, 1 /* print_hmac */ );
+ s =
+ format (s, " output via %s",
+ hi ? (char *) (hi->name) : "Invalid adj");
+ }
+ return s;
+}
+
+/**
+ * @brief Fix SR destination address - dual-loop
+ *
+ * @node sr-fix-dst-addr
+ * @param vm vlib_main_t *
+ * @param node vlib_node_runtime_t *
+ * @param from_frame vlib_frame_t *
+ *
+ * @return from_frame->n_vectors uword
+ */
+static uword
+sr_fix_dst_addr (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * from_frame)
+{
+ u32 n_left_from, next_index, *from, *to_next;
+ ip6_main_t *im = &ip6_main;
+ ip_lookup_main_t *lm = &im->lookup_main;
+
+ from = vlib_frame_vector_args (from_frame);
+ n_left_from = from_frame->n_vectors;
+
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+#if 0
+ while (0 && n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ u32 bi0, bi1;
+ __attribute__ ((unused)) vlib_buffer_t *b0, *b1;
+ u32 next0 = SR_FIX_DST_ADDR_NEXT_DROP;
+ u32 next1 = SR_FIX_DST_ADDR_NEXT_DROP;
+
+ /* Prefetch next iteration. */
+ {
+ vlib_buffer_t *p2, *p3;
+
+ p2 = vlib_get_buffer (vm, from[2]);
+ p3 = vlib_get_buffer (vm, from[3]);
+
+ vlib_prefetch_buffer_header (p2, LOAD);
+ vlib_prefetch_buffer_header (p3, LOAD);
+ }
+
+ bi0 = from[0];
+ bi1 = from[1];
+ to_next[0] = bi0;
+ to_next[1] = bi1;
+ from += 2;
+ to_next += 2;
+ n_left_to_next -= 2;
+ n_left_from -= 2;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ b1 = vlib_get_buffer (vm, bi1);
+
+
+ vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, bi1, next0, next1);
+ }
+#endif
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 bi0;
+ vlib_buffer_t *b0;
+ ip6_header_t *ip0;
+ ip_adjacency_t *adj0;
+ ip6_sr_header_t *sr0;
+ u32 next0 = SR_FIX_DST_ADDR_NEXT_DROP;
+ ip6_address_t *new_dst0;
+ ethernet_header_t *eh0;
+
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+
+ adj0 =
+ ip_get_adjacency (lm, vnet_buffer (b0)->ip.adj_index[VLIB_TX]);
+ next0 = adj0->mcast_group_index;
+
+ /* We should be pointing at an Ethernet header... */
+ eh0 = vlib_buffer_get_current (b0);
+ ip0 = (ip6_header_t *) (eh0 + 1);
+ sr0 = (ip6_sr_header_t *) (ip0 + 1);
+
+ /* We'd better find an SR header... */
+ if (PREDICT_FALSE (ip0->protocol != IPPROTO_IPV6_ROUTE))
+ {
+ b0->error = node->errors[SR_FIX_DST_ERROR_NO_SR_HEADER];
+ goto do_trace0;
+ }
+ else
+ {
+ /*
+ * We get here from sr_rewrite or sr_local, with
+ * sr->segments_left pointing at the (copy of the original) dst
+ * address. Use it, then increment sr0->segments_left.
+ */
+
+ /* Out of segments? Turf the packet */
+ if (PREDICT_FALSE (sr0->segments_left == 0))
+ {
+ b0->error = node->errors[SR_FIX_DST_ERROR_NO_MORE_SEGMENTS];
+ goto do_trace0;
+ }
+
+ /*
+ * Rewrite the packet with the original dst address
+ * We assume that the last segment (in processing order) contains
+ * the original dst address. The list is reversed, so sr0->segments
+ * contains the original dst address.
+ */
+ new_dst0 = sr0->segments;
+ ip0->dst_address.as_u64[0] = new_dst0->as_u64[0];
+ ip0->dst_address.as_u64[1] = new_dst0->as_u64[1];
+ }
+
+ do_trace0:
+
+ if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ sr_fix_addr_trace_t *t = vlib_add_trace (vm, node,
+ b0, sizeof (*t));
+ t->next_index = next0;
+ t->adj_index = ~0;
+
+ if (next0 != SR_FIX_DST_ADDR_NEXT_DROP)
+ {
+ t->adj_index = vnet_buffer (b0)->ip.adj_index[VLIB_TX];
+ clib_memcpy (t->src.as_u8, ip0->src_address.as_u8,
+ sizeof (t->src.as_u8));
+ clib_memcpy (t->dst.as_u8, ip0->dst_address.as_u8,
+ sizeof (t->dst.as_u8));
+ clib_memcpy (t->sr, sr0, sizeof (t->sr));
+ }
+ }
+
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, next0);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+ return from_frame->n_vectors;
+}
+
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (sr_fix_dst_addr_node) = {
+ .function = sr_fix_dst_addr,
+ .name = "sr-fix-dst-addr",
+ /* Takes a vector of packets. */
+ .vector_size = sizeof (u32),
+ .format_trace = format_sr_fix_addr_trace,
+ .format_buffer = format_ip6_sr_header_with_length,
+
+ .runtime_data_bytes = 0,
+
+ .n_errors = SR_FIX_DST_N_ERROR,
+ .error_strings = sr_fix_dst_error_strings,
+
+ .n_next_nodes = SR_FIX_DST_ADDR_N_NEXT,
+ .next_nodes = {
+#define _(s,n) [SR_FIX_DST_ADDR_NEXT_##s] = n,
+ foreach_sr_fix_dst_addr_next
+#undef _
+ },
+};
+
+VLIB_NODE_FUNCTION_MULTIARCH (sr_fix_dst_addr_node, sr_fix_dst_addr)
+/* *INDENT-ON* */
+
+static clib_error_t *
+sr_init (vlib_main_t * vm)
+{
+ ip6_sr_main_t *sm = &sr_main;
+ clib_error_t *error = 0;
+ vlib_node_t *ip6_lookup_node, *ip6_rewrite_node;
+
+ if ((error = vlib_call_init_function (vm, ip_main_init)))
+ return error;
+
+ if ((error = vlib_call_init_function (vm, ip6_lookup_init)))
+ return error;
+
+ sm->vlib_main = vm;
+ sm->vnet_main = vnet_get_main ();
+
+ vec_validate (sm->hmac_keys, 0);
+ sm->hmac_keys[0].shared_secret = (u8 *) 0xdeadbeef;
+
+ sm->tunnel_index_by_key =
+ hash_create_mem (0, sizeof (ip6_sr_tunnel_key_t), sizeof (uword));
+
+ sm->tunnel_index_by_name = hash_create_string (0, sizeof (uword));
+
+ sm->policy_index_by_policy_name = hash_create_string (0, sizeof (uword));
+
+ sm->policy_index_by_multicast_address =
+ hash_create_mem (0, sizeof (ip6_address_t), sizeof (uword));
+
+ sm->hmac_key_by_shared_secret = hash_create_string (0, sizeof (uword));
+
+ ip6_register_protocol (IPPROTO_IPV6_ROUTE, sr_local_node.index);
+
+ ip6_lookup_node = vlib_get_node_by_name (vm, (u8 *) "ip6-lookup");
+ ASSERT (ip6_lookup_node);
+
+ ip6_rewrite_node = vlib_get_node_by_name (vm, (u8 *) "ip6-rewrite");
+ ASSERT (ip6_rewrite_node);
+
+#if DPDK > 0 /* Cannot run replicate without DPDK */
+ /* Add a disposition to sr_replicate for the sr multicast replicate node */
+ sm->ip6_lookup_sr_replicate_index =
+ vlib_node_add_next (vm, ip6_lookup_node->index, sr_replicate_node.index);
+#endif /* DPDK */
+
+ /* Add a disposition to ip6_rewrite for the sr dst address hack node */
+ sm->ip6_rewrite_sr_next_index =
+ vlib_node_add_next (vm, ip6_rewrite_node->index,
+ sr_fix_dst_addr_node.index);
+
+ OpenSSL_add_all_digests ();
+
+ sm->md = (void *) EVP_get_digestbyname ("sha1");
+ sm->hmac_ctx = clib_mem_alloc (sizeof (HMAC_CTX));
+
+ sr_dpo_type = dpo_register_new_type (&sr_vft, sr_nodes);
+
+ return error;
+}
+
+VLIB_INIT_FUNCTION (sr_init);
+
+/**
+ * @brief Definition of next-nodes for SR local
+ */
+#define foreach_sr_local_next \
+ _ (ERROR, "error-drop") \
+ _ (IP6_LOOKUP, "ip6-lookup")
+
+/**
+ * @brief Struct for definition of next-nodes for SR local
+ */
+typedef enum
+{
+#define _(s,n) SR_LOCAL_NEXT_##s,
+ foreach_sr_local_next
+#undef _
+ SR_LOCAL_N_NEXT,
+} sr_local_next_t;
+
+/**
+ * @brief Struct for packet trace of SR local
+ */
+typedef struct
+{
+ u8 next_index;
+ u8 sr_valid;
+ ip6_address_t src, dst;
+ u16 length;
+ u8 sr[256];
+} sr_local_trace_t;
+
+/**
+ * @brief Definition of SR local error-strings
+ */
+static char *sr_local_error_strings[] = {
+#define sr_error(n,s) s,
+#include "sr_error.def"
+#undef sr_error
+};
+
+/**
+ * @brief Struct for definition of SR local error-strings
+ */
+typedef enum
+{
+#define sr_error(n,s) SR_LOCAL_ERROR_##n,
+#include "sr_error.def"
+#undef sr_error
+ SR_LOCAL_N_ERROR,
+} sr_local_error_t;
+
+/**
+ * @brief Format SR local trace
+ *
+ * @param s u8 *
+ * @param args va_list *
+ *
+ * @return s u8 *
+ */
+u8 *
+format_sr_local_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ sr_local_trace_t *t = va_arg (*args, sr_local_trace_t *);
+
+ s = format (s, "SR-LOCAL: src %U dst %U len %u next_index %d",
+ format_ip6_address, &t->src,
+ format_ip6_address, &t->dst, t->length, t->next_index);
+ if (t->sr_valid)
+ s =
+ format (s, "\n %U", format_ip6_sr_header, t->sr, 1 /* print_hmac */ );
+ else
+ s = format (s, "\n popped SR header");
+
+ return s;
+}
+
+
+/* $$$$ fixme: smp, don't copy data, cache input, output (maybe) */
+/**
+ * @brief Validate the SR HMAC
+ *
+ * @param sm ip6_sr_main_t *
+ * @param ip ip6_header_t *
+ * @param sr ip6_sr_header_t *
+ *
+ * @return retval int
+ */
+static int
+sr_validate_hmac (ip6_sr_main_t * sm, ip6_header_t * ip, ip6_sr_header_t * sr)
+{
+ u32 key_index;
+ static u8 *keybuf;
+ u8 *copy_target;
+ int first_segment;
+ ip6_address_t *addrp;
+ int i;
+ ip6_sr_hmac_key_t *hmac_key;
+ static u8 *signature;
+ u32 sig_len;
+
+ key_index = sr->hmac_key;
+
+ /* No signature? Pass... */
+ if (key_index == 0)
+ return 0;
+
+ /* We don't know about this key? Fail... */
+ if (key_index >= vec_len (sm->hmac_keys))
+ return 1;
+
+ vec_validate (signature, SHA256_DIGEST_LENGTH - 1);
+
+ hmac_key = sm->hmac_keys + key_index;
+
+ vec_reset_length (keybuf);
+
+ /* pkt ip6 src address */
+ vec_add2 (keybuf, copy_target, sizeof (ip6_address_t));
+ clib_memcpy (copy_target, ip->src_address.as_u8, sizeof (ip6_address_t));
+
+ /* last segment */
+ vec_add2 (keybuf, copy_target, 1);
+ copy_target[0] = sr->first_segment;
+
+ /* octet w/ bit 0 = "clean" flag */
+ vec_add2 (keybuf, copy_target, 1);
+ copy_target[0]
+ = (sr->flags & clib_host_to_net_u16 (IP6_SR_HEADER_FLAG_CLEANUP))
+ ? 0x80 : 0;
+
+ /* hmac key id */
+ vec_add2 (keybuf, copy_target, 1);
+ copy_target[0] = sr->hmac_key;
+
+ first_segment = sr->first_segment;
+
+ addrp = sr->segments;
+
+ /* segments */
+ for (i = 0; i <= first_segment; i++)
+ {
+ vec_add2 (keybuf, copy_target, sizeof (ip6_address_t));
+ clib_memcpy (copy_target, addrp->as_u8, sizeof (ip6_address_t));
+ addrp++;
+ }
+
+ if (sm->is_debug)
+ clib_warning ("verify key index %d keybuf: %U", key_index,
+ format_hex_bytes, keybuf, vec_len (keybuf));
+
+ /* shared secret */
+
+ /* SHA1 is shorter than SHA-256 */
+ memset (signature, 0, vec_len (signature));
+
+ HMAC_CTX_init (sm->hmac_ctx);
+ if (!HMAC_Init (sm->hmac_ctx, hmac_key->shared_secret,
+ vec_len (hmac_key->shared_secret), sm->md))
+ clib_warning ("barf1");
+ if (!HMAC_Update (sm->hmac_ctx, keybuf, vec_len (keybuf)))
+ clib_warning ("barf2");
+ if (!HMAC_Final (sm->hmac_ctx, signature, &sig_len))
+ clib_warning ("barf3");
+ HMAC_CTX_cleanup (sm->hmac_ctx);
+
+ if (sm->is_debug)
+ clib_warning ("computed signature len %d, value %U", sig_len,
+ format_hex_bytes, signature, vec_len (signature));
+
+ /* Point at the SHA signature in the packet */
+ addrp++;
+ if (sm->is_debug)
+ clib_warning ("read signature %U", format_hex_bytes, addrp,
+ SHA256_DIGEST_LENGTH);
+
+ return memcmp (signature, addrp, SHA256_DIGEST_LENGTH);
+}
+
+/**
+ * @brief SR local node
+ * @node sr-local
+ *
+ * @param vm vlib_main_t *
+ * @param node vlib_node_runtime_t *
+ * @param from_frame vlib_frame_t *
+ *
+ * @return from_frame->n_vectors uword
+ */
+static uword
+sr_local (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * from_frame)
+{
+ u32 n_left_from, next_index, *from, *to_next;
+ ip6_sr_main_t *sm = &sr_main;
+ u32 (*sr_local_cb) (vlib_main_t *, vlib_node_runtime_t *,
+ vlib_buffer_t *, ip6_header_t *, ip6_sr_header_t *);
+ sr_local_cb = sm->sr_local_cb;
+
+ from = vlib_frame_vector_args (from_frame);
+ n_left_from = from_frame->n_vectors;
+
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ u32 bi0, bi1;
+ vlib_buffer_t *b0, *b1;
+ ip6_header_t *ip0, *ip1;
+ ip6_sr_header_t *sr0, *sr1;
+ ip6_address_t *new_dst0, *new_dst1;
+ u32 next0 = SR_LOCAL_NEXT_IP6_LOOKUP;
+ u32 next1 = SR_LOCAL_NEXT_IP6_LOOKUP;
+
+ /* Prefetch next iteration. */
+ {
+ vlib_buffer_t *p2, *p3;
+
+ p2 = vlib_get_buffer (vm, from[2]);
+ p3 = vlib_get_buffer (vm, from[3]);
+
+ vlib_prefetch_buffer_header (p2, LOAD);
+ vlib_prefetch_buffer_header (p3, LOAD);
+
+ CLIB_PREFETCH (p2->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
+ CLIB_PREFETCH (p3->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
+ }
+
+ bi0 = from[0];
+ bi1 = from[1];
+ to_next[0] = bi0;
+ to_next[1] = bi1;
+ from += 2;
+ to_next += 2;
+ n_left_to_next -= 2;
+ n_left_from -= 2;
+
+
+ b0 = vlib_get_buffer (vm, bi0);
+ ip0 = vlib_buffer_get_current (b0);
+ sr0 = (ip6_sr_header_t *) (ip0 + 1);
+ if (PREDICT_FALSE
+ (ip0->protocol == IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS))
+ {
+ ip6_hop_by_hop_ext_t *ext_hdr =
+ (ip6_hop_by_hop_ext_t *) ip6_next_header (ip0);
+ sr0 =
+ (ip6_sr_header_t *) ip6_ext_next_header ((ip6_ext_header_t *)
+ ext_hdr);
+ }
+
+ if (PREDICT_FALSE (sr0->type != ROUTING_HEADER_TYPE_SR))
+ {
+ next0 = SR_LOCAL_NEXT_ERROR;
+ b0->error =
+ node->errors[SR_LOCAL_ERROR_BAD_ROUTING_HEADER_TYPE];
+ goto do_trace0;
+ }
+
+ /* Out of segments? Turf the packet */
+ if (PREDICT_FALSE (sr0->segments_left == 0))
+ {
+ next0 = SR_LOCAL_NEXT_ERROR;
+ b0->error = node->errors[SR_LOCAL_ERROR_NO_MORE_SEGMENTS];
+ goto do_trace0;
+ }
+
+ if (PREDICT_FALSE (sm->validate_hmac))
+ {
+ if (sr_validate_hmac (sm, ip0, sr0))
+ {
+ next0 = SR_LOCAL_NEXT_ERROR;
+ b0->error = node->errors[SR_LOCAL_ERROR_HMAC_INVALID];
+ goto do_trace0;
+ }
+ }
+
+ next0 = sr_local_cb ? sr_local_cb (vm, node, b0, ip0, sr0) : next0;
+
+ /*
+ * To suppress rewrite, return ~SR_LOCAL_NEXT_xxx
+ */
+ if (PREDICT_FALSE (next0 & 0x80000000))
+ {
+ next0 ^= 0xFFFFFFFF;
+ if (PREDICT_FALSE (next0 == SR_LOCAL_NEXT_ERROR))
+ b0->error = node->errors[SR_LOCAL_ERROR_APP_CALLBACK];
+ }
+ else
+ {
+ u32 segment_index0;
+
+ segment_index0 = sr0->segments_left - 1;
+
+ /* Rewrite the packet */
+ new_dst0 = (ip6_address_t *) (sr0->segments + segment_index0);
+ ip0->dst_address.as_u64[0] = new_dst0->as_u64[0];
+ ip0->dst_address.as_u64[1] = new_dst0->as_u64[1];
+
+ if (PREDICT_TRUE (sr0->segments_left > 0))
+ sr0->segments_left -= 1;
+ }
+
+ /* End of the path. Clean up the SR header, or not */
+ if (PREDICT_FALSE
+ (sr0->segments_left == 0 &&
+ (sr0->flags &
+ clib_host_to_net_u16 (IP6_SR_HEADER_FLAG_CLEANUP))))
+ {
+ u64 *copy_dst0, *copy_src0;
+ u16 new_l0;
+ u32 copy_len_u64s0 = 0;
+ int i;
+
+ /*
+ * Copy the ip6 header right by the (real) length of the
+ * sr header.
+ */
+ if (PREDICT_FALSE
+ (ip0->protocol == IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS))
+ {
+ ip6_hop_by_hop_ext_t *ext_hdr =
+ (ip6_hop_by_hop_ext_t *) ip6_next_header (ip0);
+ copy_len_u64s0 =
+ (((ip6_ext_header_t *) ext_hdr)->n_data_u64s) + 1;
+ ext_hdr->next_hdr = sr0->protocol;
+ }
+ else
+ {
+ ip0->protocol = sr0->protocol;
+ }
+ vlib_buffer_advance (b0, (sr0->length + 1) * 8);
+
+ new_l0 = clib_net_to_host_u16 (ip0->payload_length) -
+ (sr0->length + 1) * 8;
+ ip0->payload_length = clib_host_to_net_u16 (new_l0);
+
+ copy_src0 = (u64 *) ip0;
+ copy_dst0 = copy_src0 + (sr0->length + 1);
+
+ copy_dst0[4 + copy_len_u64s0] = copy_src0[4 + copy_len_u64s0];
+ copy_dst0[3 + copy_len_u64s0] = copy_src0[3 + copy_len_u64s0];
+ copy_dst0[2 + copy_len_u64s0] = copy_src0[2 + copy_len_u64s0];
+ copy_dst0[1 + copy_len_u64s0] = copy_src0[1 + copy_len_u64s0];
+ copy_dst0[0 + copy_len_u64s0] = copy_src0[0 + copy_len_u64s0];
+
+ for (i = copy_len_u64s0 - 1; i >= 0; i--)
+ {
+ copy_dst0[i] = copy_src0[i];
+ }
+
+ sr0 = 0;
+ }
+
+ do_trace0:
+ if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ sr_local_trace_t *tr = vlib_add_trace (vm, node,
+ b0, sizeof (*tr));
+ clib_memcpy (tr->src.as_u8, ip0->src_address.as_u8,
+ sizeof (tr->src.as_u8));
+ clib_memcpy (tr->dst.as_u8, ip0->dst_address.as_u8,
+ sizeof (tr->dst.as_u8));
+ tr->length = vlib_buffer_length_in_chain (vm, b0);
+ tr->next_index = next0;
+ tr->sr_valid = sr0 != 0;
+ if (tr->sr_valid)
+ clib_memcpy (tr->sr, sr0, sizeof (tr->sr));
+ }
+
+ b1 = vlib_get_buffer (vm, bi1);
+ ip1 = vlib_buffer_get_current (b1);
+ sr1 = (ip6_sr_header_t *) (ip1 + 1);
+ if (PREDICT_FALSE
+ (ip1->protocol == IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS))
+ {
+
+ ip6_hop_by_hop_ext_t *ext_hdr =
+ (ip6_hop_by_hop_ext_t *) ip6_next_header (ip1);
+ sr1 =
+ (ip6_sr_header_t *) ip6_ext_next_header ((ip6_ext_header_t *)
+ ext_hdr);
+ }
+
+ if (PREDICT_FALSE (sr1->type != ROUTING_HEADER_TYPE_SR))
+ {
+ next1 = SR_LOCAL_NEXT_ERROR;
+ b1->error =
+ node->errors[SR_LOCAL_ERROR_BAD_ROUTING_HEADER_TYPE];
+ goto do_trace1;
+ }
+
+ /* Out of segments? Turf the packet */
+ if (PREDICT_FALSE (sr1->segments_left == 0))
+ {
+ next1 = SR_LOCAL_NEXT_ERROR;
+ b1->error = node->errors[SR_LOCAL_ERROR_NO_MORE_SEGMENTS];
+ goto do_trace1;
+ }
+
+ if (PREDICT_FALSE (sm->validate_hmac))
+ {
+ if (sr_validate_hmac (sm, ip1, sr1))
+ {
+ next1 = SR_LOCAL_NEXT_ERROR;
+ b1->error = node->errors[SR_LOCAL_ERROR_HMAC_INVALID];
+ goto do_trace1;
+ }
+ }
+
+ next1 = sr_local_cb ? sr_local_cb (vm, node, b1, ip1, sr1) : next1;
+
+ /*
+ * To suppress rewrite, return ~SR_LOCAL_NEXT_xxx
+ */
+ if (PREDICT_FALSE (next1 & 0x80000000))
+ {
+ next1 ^= 0xFFFFFFFF;
+ if (PREDICT_FALSE (next1 == SR_LOCAL_NEXT_ERROR))
+ b1->error = node->errors[SR_LOCAL_ERROR_APP_CALLBACK];
+ }
+ else
+ {
+ u32 segment_index1;
+
+ segment_index1 = sr1->segments_left - 1;
+
+ /* Rewrite the packet */
+ new_dst1 = (ip6_address_t *) (sr1->segments + segment_index1);
+ ip1->dst_address.as_u64[0] = new_dst1->as_u64[0];
+ ip1->dst_address.as_u64[1] = new_dst1->as_u64[1];
+
+ if (PREDICT_TRUE (sr1->segments_left > 0))
+ sr1->segments_left -= 1;
+ }
+
+ /* End of the path. Clean up the SR header, or not */
+ if (PREDICT_FALSE
+ (sr1->segments_left == 0 &&
+ (sr1->flags &
+ clib_host_to_net_u16 (IP6_SR_HEADER_FLAG_CLEANUP))))
+ {
+ u64 *copy_dst1, *copy_src1;
+ u16 new_l1;
+ u32 copy_len_u64s1 = 0;
+ int i;
+
+ /*
+ * Copy the ip6 header right by the (real) length of the
+ * sr header.
+ */
+ if (PREDICT_FALSE
+ (ip1->protocol == IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS))
+ {
+ ip6_hop_by_hop_ext_t *ext_hdr =
+ (ip6_hop_by_hop_ext_t *) ip6_next_header (ip1);
+ copy_len_u64s1 =
+ (((ip6_ext_header_t *) ext_hdr)->n_data_u64s) + 1;
+ ext_hdr->next_hdr = sr1->protocol;
+ }
+ else
+ {
+ ip1->protocol = sr1->protocol;
+ }
+ vlib_buffer_advance (b1, (sr1->length + 1) * 8);
+
+ new_l1 = clib_net_to_host_u16 (ip1->payload_length) -
+ (sr1->length + 1) * 8;
+ ip1->payload_length = clib_host_to_net_u16 (new_l1);
+
+ copy_src1 = (u64 *) ip1;
+ copy_dst1 = copy_src1 + (sr1->length + 1);
+
+ copy_dst1[4 + copy_len_u64s1] = copy_src1[4 + copy_len_u64s1];
+ copy_dst1[3 + copy_len_u64s1] = copy_src1[3 + copy_len_u64s1];
+ copy_dst1[2 + copy_len_u64s1] = copy_src1[2 + copy_len_u64s1];
+ copy_dst1[1 + copy_len_u64s1] = copy_src1[1 + copy_len_u64s1];
+ copy_dst1[0 + copy_len_u64s1] = copy_src1[0 + copy_len_u64s1];
+
+ for (i = copy_len_u64s1 - 1; i >= 0; i--)
+ {
+ copy_dst1[i] = copy_src1[i];
+ }
+
+ sr1 = 0;
+ }
+
+ do_trace1:
+ if (PREDICT_FALSE (b1->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ sr_local_trace_t *tr = vlib_add_trace (vm, node,
+ b1, sizeof (*tr));
+ clib_memcpy (tr->src.as_u8, ip1->src_address.as_u8,
+ sizeof (tr->src.as_u8));
+ clib_memcpy (tr->dst.as_u8, ip1->dst_address.as_u8,
+ sizeof (tr->dst.as_u8));
+ tr->length = vlib_buffer_length_in_chain (vm, b1);
+ tr->next_index = next1;
+ tr->sr_valid = sr1 != 0;
+ if (tr->sr_valid)
+ clib_memcpy (tr->sr, sr1, sizeof (tr->sr));
+ }
+
+ vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, bi1, next0, next1);
+ }
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 bi0;
+ vlib_buffer_t *b0;
+ ip6_header_t *ip0 = 0;
+ ip6_sr_header_t *sr0;
+ ip6_address_t *new_dst0;
+ u32 next0 = SR_LOCAL_NEXT_IP6_LOOKUP;
+
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ ip0 = vlib_buffer_get_current (b0);
+ sr0 = (ip6_sr_header_t *) (ip0 + 1);
+
+ if (PREDICT_FALSE
+ (ip0->protocol == IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS))
+ {
+ ip6_hop_by_hop_ext_t *ext_hdr =
+ (ip6_hop_by_hop_ext_t *) ip6_next_header (ip0);
+ sr0 =
+ (ip6_sr_header_t *) ip6_ext_next_header ((ip6_ext_header_t *)
+ ext_hdr);
+ }
+ if (PREDICT_FALSE (sr0->type != ROUTING_HEADER_TYPE_SR))
+ {
+ next0 = SR_LOCAL_NEXT_ERROR;
+ b0->error =
+ node->errors[SR_LOCAL_ERROR_BAD_ROUTING_HEADER_TYPE];
+ goto do_trace;
+ }
+
+ /* Out of segments? Turf the packet */
+ if (PREDICT_FALSE (sr0->segments_left == 0))
+ {
+ next0 = SR_LOCAL_NEXT_ERROR;
+ b0->error = node->errors[SR_LOCAL_ERROR_NO_MORE_SEGMENTS];
+ goto do_trace;
+ }
+
+ if (PREDICT_FALSE (sm->validate_hmac))
+ {
+ if (sr_validate_hmac (sm, ip0, sr0))
+ {
+ next0 = SR_LOCAL_NEXT_ERROR;
+ b0->error = node->errors[SR_LOCAL_ERROR_HMAC_INVALID];
+ goto do_trace;
+ }
+ }
+
+ next0 = sr_local_cb ? sr_local_cb (vm, node, b0, ip0, sr0) : next0;
+
+ /*
+ * To suppress rewrite, return ~SR_LOCAL_NEXT_xxx
+ */
+ if (PREDICT_FALSE (next0 & 0x80000000))
+ {
+ next0 ^= 0xFFFFFFFF;
+ if (PREDICT_FALSE (next0 == SR_LOCAL_NEXT_ERROR))
+ b0->error = node->errors[SR_LOCAL_ERROR_APP_CALLBACK];
+ }
+ else
+ {
+ u32 segment_index0;
+
+ segment_index0 = sr0->segments_left - 1;
+
+ /* Rewrite the packet */
+ new_dst0 = (ip6_address_t *) (sr0->segments + segment_index0);
+ ip0->dst_address.as_u64[0] = new_dst0->as_u64[0];
+ ip0->dst_address.as_u64[1] = new_dst0->as_u64[1];
+
+ if (PREDICT_TRUE (sr0->segments_left > 0))
+ sr0->segments_left -= 1;
+ }
+
+ /* End of the path. Clean up the SR header, or not */
+ if (PREDICT_FALSE
+ (sr0->segments_left == 0 &&
+ (sr0->flags &
+ clib_host_to_net_u16 (IP6_SR_HEADER_FLAG_CLEANUP))))
+ {
+ u64 *copy_dst0, *copy_src0;
+ u16 new_l0;
+ u32 copy_len_u64s0 = 0;
+ int i;
+
+ /*
+ * Copy the ip6 header right by the (real) length of the
+ * sr header.
+ */
+ if (PREDICT_FALSE
+ (ip0->protocol == IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS))
+ {
+ ip6_hop_by_hop_ext_t *ext_hdr =
+ (ip6_hop_by_hop_ext_t *) ip6_next_header (ip0);
+ copy_len_u64s0 =
+ (((ip6_ext_header_t *) ext_hdr)->n_data_u64s) + 1;
+ ext_hdr->next_hdr = sr0->protocol;
+ }
+ else
+ {
+ ip0->protocol = sr0->protocol;
+ }
+
+ vlib_buffer_advance (b0, (sr0->length + 1) * 8);
+
+ new_l0 = clib_net_to_host_u16 (ip0->payload_length) -
+ (sr0->length + 1) * 8;
+ ip0->payload_length = clib_host_to_net_u16 (new_l0);
+
+ copy_src0 = (u64 *) ip0;
+ copy_dst0 = copy_src0 + (sr0->length + 1);
+ copy_dst0[4 + copy_len_u64s0] = copy_src0[4 + copy_len_u64s0];
+ copy_dst0[3 + copy_len_u64s0] = copy_src0[3 + copy_len_u64s0];
+ copy_dst0[2 + copy_len_u64s0] = copy_src0[2 + copy_len_u64s0];
+ copy_dst0[1 + copy_len_u64s0] = copy_src0[1 + copy_len_u64s0];
+ copy_dst0[0 + copy_len_u64s0] = copy_src0[0 + copy_len_u64s0];
+
+ for (i = copy_len_u64s0 - 1; i >= 0; i--)
+ {
+ copy_dst0[i] = copy_src0[i];
+ }
+
+ sr0 = 0;
+ }
+
+ do_trace:
+ if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ sr_local_trace_t *tr = vlib_add_trace (vm, node,
+ b0, sizeof (*tr));
+ clib_memcpy (tr->src.as_u8, ip0->src_address.as_u8,
+ sizeof (tr->src.as_u8));
+ clib_memcpy (tr->dst.as_u8, ip0->dst_address.as_u8,
+ sizeof (tr->dst.as_u8));
+ tr->length = vlib_buffer_length_in_chain (vm, b0);
+ tr->next_index = next0;
+ tr->sr_valid = sr0 != 0;
+ if (tr->sr_valid)
+ clib_memcpy (tr->sr, sr0, sizeof (tr->sr));
+ }
+
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, next0);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+ vlib_node_increment_counter (vm, sr_local_node.index,
+ SR_LOCAL_ERROR_PKTS_PROCESSED,
+ from_frame->n_vectors);
+ return from_frame->n_vectors;
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (sr_local_node, static) = {
+ .function = sr_local,
+ .name = "sr-local",
+ /* Takes a vector of packets. */
+ .vector_size = sizeof (u32),
+ .format_trace = format_sr_local_trace,
+
+ .runtime_data_bytes = 0,
+
+ .n_errors = SR_LOCAL_N_ERROR,
+ .error_strings = sr_local_error_strings,
+
+ .n_next_nodes = SR_LOCAL_N_NEXT,
+ .next_nodes = {
+#define _(s,n) [SR_LOCAL_NEXT_##s] = n,
+ foreach_sr_local_next
+#undef _
+ },
+};
+
+VLIB_NODE_FUNCTION_MULTIARCH (sr_local_node, sr_local)
+/* *INDENT-ON* */
+
+ip6_sr_main_t *
+sr_get_main (vlib_main_t * vm)
+{
+ vlib_call_init_function (vm, sr_init);
+ ASSERT (sr_local_node.index);
+ return &sr_main;
+}
+
+/**
+ * @brief CLI parser for SR fix destination rewrite node
+ *
+ * @param vm vlib_main_t *
+ * @param input unformat_input_t *
+ * @param cmd vlib_cli_command_t *
+ *
+ * @return error clib_error_t *
+ */
+static clib_error_t *
+set_ip6_sr_rewrite_fn (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ fib_prefix_t pfx = {
+ .fp_proto = FIB_PROTOCOL_IP6,
+ .fp_len = 128,
+ };
+ u32 fib_index = 0;
+ u32 fib_id = 0;
+ u32 adj_index;
+ ip_adjacency_t *adj;
+ vnet_hw_interface_t *hi;
+ u32 sw_if_index;
+ ip6_sr_main_t *sm = &sr_main;
+ vnet_main_t *vnm = vnet_get_main ();
+ fib_node_index_t fei;
+
+ if (!unformat (input, "%U", unformat_ip6_address, &pfx.fp_addr.ip6))
+ return clib_error_return (0, "ip6 address missing in '%U'",
+ format_unformat_error, input);
+
+ if (unformat (input, "rx-table-id %d", &fib_id))
+ {
+ fib_index = fib_table_id_find_fib_index (FIB_PROTOCOL_IP6, fib_id);
+ if (fib_index == ~0)
+ return clib_error_return (0, "fib-id %d not found", fib_id);
+ }
+
+ fei = fib_table_lookup_exact_match (fib_index, &pfx);
+
+ if (FIB_NODE_INDEX_INVALID == fei)
+ return clib_error_return (0, "no match for %U",
+ format_ip6_address, &pfx.fp_addr.ip6);
+
+ adj_index = fib_entry_get_adj_for_source (fei, FIB_SOURCE_SR);
+
+ if (ADJ_INDEX_INVALID == adj_index)
+ return clib_error_return (0, "%U not SR sourced",
+ format_ip6_address, &pfx.fp_addr.ip6);
+
+ adj = adj_get (adj_index);
+
+ if (adj->lookup_next_index != IP_LOOKUP_NEXT_REWRITE)
+ return clib_error_return (0, "%U unresolved (not a rewrite adj)",
+ format_ip6_address, &pfx.fp_addr.ip6);
+
+ adj->rewrite_header.next_index = sm->ip6_rewrite_sr_next_index;
+
+ sw_if_index = adj->rewrite_header.sw_if_index;
+ hi = vnet_get_sup_hw_interface (vnm, sw_if_index);
+ adj->rewrite_header.node_index = sr_fix_dst_addr_node.index;
+
+ /* $$$$$ hack... steal the mcast group index */
+ adj->mcast_group_index =
+ vlib_node_add_next (vm, sr_fix_dst_addr_node.index,
+ hi->output_node_index);
+
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (set_ip6_sr_rewrite, static) = {
+ .path = "set ip6 sr rewrite",
+ .short_help = "set ip6 sr rewrite <ip6-address> [fib-id <id>]",
+ .function = set_ip6_sr_rewrite_fn,
+};
+/* *INDENT-ON* */
+
+/**
+ * @brief Register a callback routine to set next0 in sr_local
+ *
+ * @param cb void *
+ */
+void
+vnet_register_sr_app_callback (void *cb)
+{
+ ip6_sr_main_t *sm = &sr_main;
+
+ sm->sr_local_cb = cb;
+}
+
+/**
+ * @brief Test routine for validation of HMAC
+ */
+static clib_error_t *
+test_sr_hmac_validate_fn (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ ip6_sr_main_t *sm = &sr_main;
+
+ if (unformat (input, "validate on"))
+ sm->validate_hmac = 1;
+ else if (unformat (input, "chunk-offset off"))
+ sm->validate_hmac = 0;
+ else
+ return clib_error_return (0, "expected validate on|off in '%U'",
+ format_unformat_error, input);
+
+ vlib_cli_output (vm, "hmac signature validation %s",
+ sm->validate_hmac ? "on" : "off");
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (test_sr_hmac_validate, static) = {
+ .path = "test sr hmac",
+ .short_help = "test sr hmac validate [on|off]",
+ .function = test_sr_hmac_validate_fn,
+};
+/* *INDENT-ON* */
+
+/**
+ * @brief Add or Delete HMAC key
+ *
+ * @param sm ip6_sr_main_t *
+ * @param key_id u32
+ * @param shared_secret u8 *
+ * @param is_del u8
+ *
+ * @return retval i32
+ */
+// $$$ fixme shouldn't return i32
+i32
+sr_hmac_add_del_key (ip6_sr_main_t * sm, u32 key_id, u8 * shared_secret,
+ u8 is_del)
+{
+ u32 index;
+ ip6_sr_hmac_key_t *key;
+
+ if (is_del == 0)
+ {
+ /* Specific key in use? Fail. */
+ if (key_id && vec_len (sm->hmac_keys) > key_id
+ && sm->hmac_keys[key_id].shared_secret)
+ return -2;
+
+ index = key_id;
+ key = find_or_add_shared_secret (sm, shared_secret, &index);
+ ASSERT (index == key_id);
+ return 0;
+ }
+
+ /* delete */
+
+ if (key_id) /* delete by key ID */
+ {
+ if (vec_len (sm->hmac_keys) <= key_id)
+ return -3;
+
+ key = sm->hmac_keys + key_id;
+
+ hash_unset_mem (sm->hmac_key_by_shared_secret, key->shared_secret);
+ vec_free (key->shared_secret);
+ return 0;
+ }
+
+ index = 0;
+ key = find_or_add_shared_secret (sm, shared_secret, &index);
+ hash_unset_mem (sm->hmac_key_by_shared_secret, key->shared_secret);
+ vec_free (key->shared_secret);
+ return 0;
+}
+
+
+static clib_error_t *
+sr_hmac_add_del_key_fn (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ ip6_sr_main_t *sm = &sr_main;
+ u8 is_del = 0;
+ u32 key_id = 0;
+ u8 key_id_set = 0;
+ u8 *shared_secret = 0;
+ i32 rv;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "del"))
+ is_del = 1;
+ else if (unformat (input, "id %d", &key_id))
+ key_id_set = 1;
+ else if (unformat (input, "key %s", &shared_secret))
+ {
+ /* Do not include the trailing NULL byte. Guaranteed interop issue */
+ _vec_len (shared_secret) -= 1;
+ }
+ else
+ break;
+ }
+
+ if (is_del == 0 && shared_secret == 0)
+ return clib_error_return (0, "shared secret must be set to add a key");
+
+ if (shared_secret == 0 && key_id_set == 0)
+ return clib_error_return (0, "shared secret and key id both unset");
+
+ rv = sr_hmac_add_del_key (sm, key_id, shared_secret, is_del);
+
+ vec_free (shared_secret);
+
+ switch (rv)
+ {
+ case 0:
+ break;
+
+ default:
+ return clib_error_return (0, "sr_hmac_add_del_key returned %d", rv);
+ }
+
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (sr_hmac, static) = {
+ .path = "sr hmac",
+ .short_help = "sr hmac [del] id <nn> key <str>",
+ .function = sr_hmac_add_del_key_fn,
+};
+/* *INDENT-ON* */
+
+/**
+ * @brief CLI parser for show HMAC key shared secrets
+ *
+ * @param vm vlib_main_t *
+ * @param input unformat_input_t *
+ * @param cmd vlib_cli_command_t *
+ *
+ * @return error clib_error_t *
+ */
+static clib_error_t *
+show_sr_hmac_fn (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ ip6_sr_main_t *sm = &sr_main;
+ int i;
+
+ for (i = 1; i < vec_len (sm->hmac_keys); i++)
+ {
+ if (sm->hmac_keys[i].shared_secret)
+ vlib_cli_output (vm, "[%d]: %v", i, sm->hmac_keys[i].shared_secret);
+ }
+
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (show_sr_hmac, static) = {
+ .path = "show sr hmac",
+ .short_help = "show sr hmac",
+ .function = show_sr_hmac_fn,
+};
+/* *INDENT-ON* */
+
+/**
+ * @brief Test for SR debug flag
+ *
+ * @param vm vlib_main_t *
+ * @param input unformat_input_t *
+ * @param cmd vlib_cli_command_t *
+ *
+ * @return error clib_error_t *
+ */
+static clib_error_t *
+test_sr_debug_fn (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ ip6_sr_main_t *sm = &sr_main;
+
+ if (unformat (input, "on"))
+ sm->is_debug = 1;
+ else if (unformat (input, "off"))
+ sm->is_debug = 0;
+ else
+ return clib_error_return (0, "expected on|off in '%U'",
+ format_unformat_error, input);
+
+ vlib_cli_output (vm, "debug trace now %s", sm->is_debug ? "on" : "off");
+
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (test_sr_debug, static) = {
+ .path = "test sr debug",
+ .short_help = "test sr debug on|off",
+ .function = test_sr_debug_fn,
+};
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/sr/sr.h b/src/vnet/sr/sr.h
new file mode 100644
index 00000000000..610b36996f3
--- /dev/null
+++ b/src/vnet/sr/sr.h
@@ -0,0 +1,262 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * @file
+ * @brief Segment Routing header
+ *
+ * @note sr_replicate only works using DPDK today
+ */
+#ifndef included_vnet_sr_h
+#define included_vnet_sr_h
+
+#include <vnet/vnet.h>
+#include <vnet/sr/sr_packet.h>
+#include <vnet/ip/ip6_packet.h>
+
+#include <openssl/opensslconf.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <openssl/crypto.h>
+#include <openssl/sha.h>
+#include <openssl/opensslv.h>
+#include <openssl/hmac.h>
+
+/**
+ * @brief Segment Route tunnel key
+ */
+typedef struct
+{
+ ip6_address_t src;
+ ip6_address_t dst;
+} ip6_sr_tunnel_key_t;
+
+/**
+ * @brief Segment Route tunnel
+ */
+typedef struct
+{
+ /** src, dst address */
+ ip6_sr_tunnel_key_t key;
+
+ /** Pptional tunnel name */
+ u8 *name;
+
+ /** Mask width for FIB entry */
+ u32 dst_mask_width;
+
+ /** First hop, to save 1 elt in the segment list */
+ ip6_address_t first_hop;
+
+ /** RX Fib index */
+ u32 rx_fib_index;
+ /** TX Fib index */
+ u32 tx_fib_index;
+
+ /** The actual ip6 SR header */
+ u8 *rewrite;
+
+ /** Indicates that this tunnel is part of a policy comprising
+ of multiple tunnels. If == ~0 tunnel is not part of a policy */
+ u32 policy_index;
+} ip6_sr_tunnel_t;
+
+/**
+ * @brief Shared secret for keyed-hash message authentication code (HMAC).
+ */
+typedef struct
+{
+ u8 *shared_secret;
+} ip6_sr_hmac_key_t;
+
+/**
+ * @brief Args required for add/del tunnel.
+ *
+ * Else we end up passing a LOT of parameters around.
+ */
+typedef struct
+{
+ /** Key (header imposition case) */
+ ip6_address_t *src_address;
+ ip6_address_t *dst_address;
+ u32 dst_mask_width;
+ u32 rx_table_id;
+ u32 tx_table_id;
+
+ /** optional name argument - for referencing SR tunnel/policy by name */
+ u8 *name;
+
+ /** optional policy name */
+ u8 *policy_name;
+
+ /** segment list, when inserting an ip6 SR header */
+ ip6_address_t *segments;
+
+ /**
+ * "Tag" list, aka segments inserted at the end of the list,
+ * past last_seg
+ */
+ ip6_address_t *tags;
+
+ /** Shared secret => generate SHA-256 HMAC security fields */
+ u8 *shared_secret;
+
+ /** Flags, e.g. cleanup, policy-list flags */
+ u16 flags_net_byte_order;
+
+ /** Delete the tunnnel? */
+ u8 is_del;
+} ip6_sr_add_del_tunnel_args_t;
+
+/**
+ * @brief Args for creating a policy.
+ *
+ * Typically used for multicast replication.
+ * ie a multicast address can be associated with a policy,
+ * then replicated across a number of unicast SR tunnels.
+ */
+typedef struct
+{
+ /** policy name */
+ u8 *name;
+
+ /** tunnel names */
+ u8 **tunnel_names;
+
+ /** Delete the policy? */
+ u8 is_del;
+} ip6_sr_add_del_policy_args_t;
+
+/**
+ * @brief Segment Routing policy.
+ *
+ * Typically used for multicast replication.
+ * ie a multicast address can be associated with a policy,
+ * then replicated across a number of unicast SR tunnels.
+ */
+typedef struct
+{
+ /** name of policy */
+ u8 *name;
+
+ /** vector to SR tunnel index */
+ u32 *tunnel_indices;
+
+} ip6_sr_policy_t;
+
+/**
+ * @brief Args for mapping of multicast address to policy name.
+ *
+ * Typically used for multicast replication.
+ * ie a multicast address can be associated with a policy,
+ * then replicated across a number of unicast SR tunnels.
+ */
+typedef struct
+{
+ /** multicast IP6 address */
+ ip6_address_t *multicast_address;
+
+ /** name of policy to map to */
+ u8 *policy_name;
+
+ /** Delete the mapping */
+ u8 is_del;
+
+} ip6_sr_add_del_multicastmap_args_t;
+
+/**
+ * @brief Segment Routing state.
+ */
+typedef struct
+{
+ /** pool of tunnel instances, sr entry only */
+ ip6_sr_tunnel_t *tunnels;
+
+ /** find an sr "tunnel" by its outer-IP src/dst */
+ uword *tunnel_index_by_key;
+
+ /** find an sr "tunnel" by its name */
+ uword *tunnel_index_by_name;
+
+ /** policy pool */
+ ip6_sr_policy_t *policies;
+
+ /** find a policy by name */
+ uword *policy_index_by_policy_name;
+
+ /** multicast address to policy mapping */
+ uword *policy_index_by_multicast_address;
+
+ /** hmac key id by shared secret */
+ uword *hmac_key_by_shared_secret;
+
+ /** ip6-rewrite next index for reinstalling the original dst address */
+ u32 ip6_rewrite_sr_next_index;
+
+ /** ip6-replicate next index for multicast tunnel */
+ u32 ip6_lookup_sr_replicate_index;
+
+ /** application API callback */
+ void *sr_local_cb;
+
+ /** validate hmac keys */
+ u8 validate_hmac;
+
+ /** pool of hmac keys */
+ ip6_sr_hmac_key_t *hmac_keys;
+
+ /** Openssl var */
+ EVP_MD *md;
+ /** Openssl var */
+ HMAC_CTX *hmac_ctx;
+
+ /** enable debug spew */
+ u8 is_debug;
+
+ /** convenience */
+ vlib_main_t *vlib_main;
+ /** convenience */
+ vnet_main_t *vnet_main;
+} ip6_sr_main_t;
+
+ip6_sr_main_t sr_main;
+
+format_function_t format_ip6_sr_header;
+format_function_t format_ip6_sr_header_with_length;
+
+vlib_node_registration_t ip6_sr_input_node;
+
+#if DPDK > 0
+extern vlib_node_registration_t sr_replicate_node;
+#endif /* DPDK */
+
+int ip6_sr_add_del_tunnel (ip6_sr_add_del_tunnel_args_t * a);
+int ip6_sr_add_del_policy (ip6_sr_add_del_policy_args_t * a);
+int ip6_sr_add_del_multicastmap (ip6_sr_add_del_multicastmap_args_t * a);
+
+void vnet_register_sr_app_callback (void *cb);
+
+void sr_fix_hmac (ip6_sr_main_t * sm, ip6_header_t * ip,
+ ip6_sr_header_t * sr);
+
+#endif /* included_vnet_sr_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/sr/sr_error.def b/src/vnet/sr/sr_error.def
new file mode 100644
index 00000000000..62d021fd47b
--- /dev/null
+++ b/src/vnet/sr/sr_error.def
@@ -0,0 +1,20 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+sr_error (NONE, "no error")
+sr_error (BAD_ROUTING_HEADER_TYPE, "bad routing header type (not 4)")
+sr_error (NO_MORE_SEGMENTS, "out of SR segment drops")
+sr_error (PKTS_PROCESSED, "SR packets processed")
+sr_error (APP_CALLBACK, "SR application callback errors")
+sr_error (HMAC_INVALID, "SR packets with invalid HMAC signatures")
diff --git a/src/vnet/sr/sr_fix_dst_error.def b/src/vnet/sr/sr_fix_dst_error.def
new file mode 100644
index 00000000000..48fe7af6c98
--- /dev/null
+++ b/src/vnet/sr/sr_fix_dst_error.def
@@ -0,0 +1,17 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+sr_fix_dst_error (NONE, "no error")
+sr_fix_dst_error (NO_SR_HEADER, "no SR header present")
+sr_fix_dst_error (NO_MORE_SEGMENTS, "no more SR segments")
diff --git a/src/vnet/sr/sr_packet.h b/src/vnet/sr/sr_packet.h
new file mode 100644
index 00000000000..179b94c2dc7
--- /dev/null
+++ b/src/vnet/sr/sr_packet.h
@@ -0,0 +1,251 @@
+#ifndef included_vnet_sr_packet_h
+#define included_vnet_sr_packet_h
+
+#include <vnet/ip/ip.h>
+
+/*
+ * ipv6 segment-routing header format
+ *
+ * Copyright (c) 2013 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @file
+ * @brief The Segment Routing Header (SRH).
+ *
+ * The Segment Routing Header (SRH) is defined in the diagram below.
+ *
+ *
+ * 0 1 2 3
+ * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Next Header | Hdr Ext Len | Routing Type | Segments Left |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | First Segment | Flags | HMAC Key ID |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | |
+ * | Segment List[0] (128 bits ipv6 address) |
+ * | |
+ * | |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | |
+ * | |
+ * ...
+ * | |
+ * | |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | |
+ * | Segment List[n] (128 bits ipv6 address) |
+ * | |
+ * | |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | |
+ * | Policy List[0] (optional) |
+ * | |
+ * | |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | |
+ * | Policy List[1] (optional) |
+ * | |
+ * | |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | |
+ * | Policy List[2] (optional) |
+ * | |
+ * | |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | |
+ * | |
+ * | |
+ * | HMAC (256 bits) |
+ * | (optional) |
+ * | |
+ * | |
+ * | |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * where:
+ *
+ * o Next Header: 8-bit selector. Identifies the type of header
+ * immediately following the SRH.
+ *
+ * o Hdr Ext Len: 8-bit unsigned integer, is the length of the SRH
+ * header in 8-octet units, not including the first 8 octets.
+ *
+ * o Routing Type: TBD, to be assigned by IANA (suggested value: 4).
+ *
+ * o Segments Left. Defined in [RFC2460], it contains the index, in
+ * the Segment List, of the next segment to inspect. Segments Left
+ * is decremented at each segment and it is used as an index in the
+ * segment list.
+ *
+ * o First Segment: offset in the SRH, not including the first 8 octets
+ * and expressed in 16-octet units, pointing to the last element of
+ * the segment list, which is in fact the first segment of the
+ * segment routing path.
+ *
+ * o Flags: 16 bits of flags. Following flags are defined:
+ *
+ * 1
+ * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |C|P|R|R| Policy Flags |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * C-flag: Clean-up flag. Set when the SRH has to be removed from
+ * the packet when packet reaches the last segment.
+ *
+ * P-flag: Protected flag. Set when the packet has been rerouted
+ * through FRR mechanism by a SR endpoint node. See Section 6.3
+ * for more details.
+ *
+ * R-flags. Reserved and for future use.
+ *
+ * Policy Flags. Define the type of the IPv6 addresses encoded
+ * into the Policy List (see below). The following have been
+ * defined:
+ *
+ * Bits 4-6: determine the type of the first element after the
+ * segment list.
+ *
+ * Bits 7-9: determine the type of the second element.
+ *
+ * Bits 10-12: determine the type of the third element.
+ *
+ * Bits 13-15: determine the type of the fourth element.
+ *
+ * The following values are used for the type:
+ *
+ * 0x0: Not present. If value is set to 0x0, it means the
+ * element represented by these bits is not present.
+ *
+ * 0x1: SR Ingress.
+ *
+ * 0x2: SR Egress.
+ *
+ * 0x3: Original Source Address.
+ *
+ * o HMAC Key ID and HMAC field, and their use are defined in
+ * [I-D.vyncke-6man-segment-routing-security].
+ *
+ * o Segment List[n]: 128 bit IPv6 addresses representing the nth
+ * segment in the Segment List. The Segment List is encoded starting
+ * from the last segment of the path. I.e., the first element of the
+ * segment list (Segment List [0]) contains the last segment of the
+ * path while the last segment of the Segment List (Segment List[n])
+ * contains the first segment of the path. The index contained in
+ * "Segments Left" identifies the current active segment.
+ *
+ * o Policy List. Optional addresses representing specific nodes in
+ * the SR path such as:
+ *
+ * SR Ingress: a 128 bit generic identifier representing the
+ * ingress in the SR domain (i.e.: it needs not to be a valid IPv6
+ * address).
+ *
+ * SR Egress: a 128 bit generic identifier representing the egress
+ * in the SR domain (i.e.: it needs not to be a valid IPv6
+ * address).
+ *
+ * Original Source Address: IPv6 address originally present in the
+ * SA field of the packet.
+ *
+ * The segments in the Policy List are encoded after the segment list
+ * and they are optional. If none are in the SRH, all bits of the
+ * Policy List Flags MUST be set to 0x0.
+ */
+
+#ifndef IPPROTO_IPV6_ROUTE
+#define IPPROTO_IPV6_ROUTE 43
+#endif
+
+#define ROUTING_HEADER_TYPE_SR 4
+/**
+ @brief SR header struct.
+*/
+typedef struct
+{
+ /** Protocol for next header. */
+ u8 protocol;
+
+ /**
+ * Length of routing header in 8 octet units,
+ * not including the first 8 octets
+ */
+ u8 length;
+
+ /** Type of routing header; type 4 = segement routing */
+ u8 type;
+
+ /** Next segment in the segment list */
+ u8 segments_left;
+
+ /**
+ * Policy list pointer: offset in the SRH of the policy
+ * list - in 16-octet units - not including the first 8 octets.
+ */
+ u8 first_segment;
+
+ /** Flag bits */
+#define IP6_SR_HEADER_FLAG_CLEANUP (0x8000)
+ /** Flag bits */
+#define IP6_SR_HEADER_FLAG_PROTECTED (0x4000)
+ /** Flag bits */
+#define IP6_SR_HEADER_FLAG_RESERVED (0x3000)
+ /** Flag bits */
+#define IP6_SR_HEADER_FLAG_PL_ELT_NOT_PRESENT (0x0)
+ /** Flag bits */
+#define IP6_SR_HEADER_FLAG_PL_ELT_INGRESS_PE (0x1)
+ /** Flag bits */
+#define IP6_SR_HEADER_FLAG_PL_ELT_EGRESS_PE (0x2)
+ /** Flag bits */
+#define IP6_SR_HEADER_FLAG_PL_ELT_ORIG_SRC_ADDR (0x3)
+ /** values 0x4 - 0x7 are reserved */
+ u16 flags;
+ u8 hmac_key;
+
+ /** The segment + policy list elts */
+ ip6_address_t segments[0];
+} __attribute__ ((packed)) ip6_sr_header_t;
+
+static inline int
+ip6_sr_policy_list_shift_from_index (int pl_index)
+{
+ return (-3 * pl_index) + 12;
+}
+
+/** pl_index is one-origined */
+static inline int
+ip6_sr_policy_list_flags (u16 flags_host_byte_order, int pl_index)
+{
+ int shift;
+
+ if (pl_index <= 0 || pl_index > 4)
+ return 0;
+
+ shift = (-3 * pl_index) + 12;
+ flags_host_byte_order >>= shift;
+
+ return (flags_host_byte_order & 7);
+}
+
+#endif /* included_vnet_sr_packet_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/sr/sr_replicate.c b/src/vnet/sr/sr_replicate.c
new file mode 100644
index 00000000000..5f9de5042af
--- /dev/null
+++ b/src/vnet/sr/sr_replicate.c
@@ -0,0 +1,490 @@
+/*
+ * sr_replicate.c: ipv6 segment routing replicator for multicast
+ *
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * @file
+ * @brief Functions for replicating packets across SR tunnels.
+ *
+ * Leverages rte_pktmbuf_clone() so there is no memcpy for
+ * invariant parts of the packet.
+ *
+ * @note Currently requires DPDK
+*/
+
+#if DPDK > 0 /* Cannot run replicate without DPDK */
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+#include <vnet/pg/pg.h>
+#include <vnet/sr/sr.h>
+#include <vnet/devices/dpdk/dpdk.h>
+#include <vnet/ip/ip.h>
+#include <vnet/fib/ip6_fib.h>
+
+#include <vppinfra/hash.h>
+#include <vppinfra/error.h>
+#include <vppinfra/elog.h>
+
+/**
+ * @brief sr_replicate state.
+ *
+*/
+typedef struct
+{
+ /* convenience */
+ vlib_main_t *vlib_main;
+ vnet_main_t *vnet_main;
+} sr_replicate_main_t;
+
+sr_replicate_main_t sr_replicate_main;
+
+/**
+ * @brief Information to display in packet trace.
+ *
+*/
+typedef struct
+{
+ ip6_address_t src, dst;
+ u16 length;
+ u32 next_index;
+ u32 tunnel_index;
+ u8 sr[256];
+} sr_replicate_trace_t;
+
+/**
+ * @brief packet trace format function.
+ *
+ * @param *s u8 used for string output
+ * @param *args va_list structured input to va_arg to output @ref sr_replicate_trace_t
+ * @return *s u8 - formatted trace output
+*/
+static u8 *
+format_sr_replicate_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ sr_replicate_trace_t *t = va_arg (*args, sr_replicate_trace_t *);
+ ip6_sr_main_t *sm = &sr_main;
+ ip6_sr_tunnel_t *tun = pool_elt_at_index (sm->tunnels, t->tunnel_index);
+ ip6_fib_t *rx_fib, *tx_fib;
+
+ rx_fib = ip6_fib_get (tun->rx_fib_index);
+ tx_fib = ip6_fib_get (tun->tx_fib_index);
+
+ s = format
+ (s, "SR-REPLICATE: next %s ip6 src %U dst %U len %u\n"
+ " rx-fib-id %d tx-fib-id %d\n%U",
+ "ip6-lookup",
+ format_ip6_address, &t->src,
+ format_ip6_address, &t->dst, t->length,
+ rx_fib->table_id, tx_fib->table_id,
+ format_ip6_sr_header, t->sr, 0 /* print_hmac */ );
+ return s;
+
+}
+
+#define foreach_sr_replicate_error \
+_(REPLICATED, "sr packets replicated") \
+_(NO_BUFFERS, "error allocating buffers for replicas") \
+_(NO_REPLICAS, "no replicas were needed") \
+_(NO_BUFFER_DROPS, "sr no buffer drops")
+
+/**
+ * @brief Struct for SR replicate errors
+ */
+typedef enum
+{
+#define _(sym,str) SR_REPLICATE_ERROR_##sym,
+ foreach_sr_replicate_error
+#undef _
+ SR_REPLICATE_N_ERROR,
+} sr_replicate_error_t;
+
+/**
+ * @brief Error strings for SR replicate
+ */
+static char *sr_replicate_error_strings[] = {
+#define _(sym,string) string,
+ foreach_sr_replicate_error
+#undef _
+};
+
+/**
+ * @brief Defines next-nodes for packet processing.
+ *
+*/
+typedef enum
+{
+ SR_REPLICATE_NEXT_IP6_LOOKUP,
+ SR_REPLICATE_N_NEXT,
+} sr_replicate_next_t;
+
+/**
+ * @brief Single loop packet replicator.
+ *
+ * @node sr-replicate
+ * @param vm vlib_main_t
+ * @return frame->n_vectors uword
+*/
+static uword
+sr_replicate_node_fn (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ u32 n_left_from, *from, *to_next;
+ sr_replicate_next_t next_index;
+ int pkts_replicated = 0;
+ ip6_sr_main_t *sm = &sr_main;
+ int no_buffer_drops = 0;
+ vlib_buffer_free_list_t *fl;
+ unsigned socket_id = rte_socket_id ();
+ vlib_buffer_main_t *bm = vm->buffer_main;
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+
+ fl = vlib_buffer_get_free_list (vm, VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX);
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 bi0, hdr_bi0;
+ vlib_buffer_t *b0, *orig_b0;
+ struct rte_mbuf *orig_mb0 = 0, *hdr_mb0 = 0, *clone0 = 0;
+ struct rte_mbuf **hdr_vec = 0, **rte_mbuf_vec = 0;
+ ip6_sr_policy_t *pol0 = 0;
+ ip6_sr_tunnel_t *t0 = 0;
+ ip6_sr_header_t *hdr_sr0 = 0;
+ ip6_header_t *ip0 = 0, *hdr_ip0 = 0;
+ int num_replicas = 0;
+ int i;
+ u32 len_bytes = sizeof (ip6_header_t);
+ u8 next_hdr, ip_next_hdr = IPPROTO_IPV6_ROUTE;
+
+ bi0 = from[0];
+
+ b0 = vlib_get_buffer (vm, bi0);
+ orig_b0 = b0;
+
+ pol0 = pool_elt_at_index (sm->policies,
+ vnet_buffer (b0)->ip.save_protocol);
+
+ ip0 = vlib_buffer_get_current (b0);
+ /* Skip forward to the punch-in point */
+ vlib_buffer_advance (b0, sizeof (*ip0));
+ next_hdr = ip0->protocol;
+
+ /* HBH must immediately follow ipv6 header */
+ if (PREDICT_FALSE
+ (ip0->protocol == IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS))
+ {
+ ip6_hop_by_hop_ext_t *ext_hdr =
+ (ip6_hop_by_hop_ext_t *) ip6_next_header (ip0);
+ u32 ext_hdr_len = 0;
+ ext_hdr_len = ip6_ext_header_len ((ip6_ext_header_t *) ext_hdr);
+ len_bytes += ext_hdr_len;
+ next_hdr = ext_hdr->next_hdr;
+ ext_hdr->next_hdr = IPPROTO_IPV6_ROUTE;
+ ip_next_hdr = IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS;
+ /* Skip forward to the punch-in point */
+ vlib_buffer_advance (b0, ext_hdr_len);
+
+ }
+
+ orig_mb0 = rte_mbuf_from_vlib_buffer (b0);
+
+ i16 delta0 = vlib_buffer_length_in_chain (vm, orig_b0)
+ - (i16) orig_mb0->pkt_len;
+
+ u16 new_data_len0 = (u16) ((i16) orig_mb0->data_len + delta0);
+ u16 new_pkt_len0 = (u16) ((i16) orig_mb0->pkt_len + delta0);
+
+ orig_mb0->data_len = new_data_len0;
+ orig_mb0->pkt_len = new_pkt_len0;
+ orig_mb0->data_off += (u16) (b0->current_data);
+
+ /*
+ Before entering loop determine if we can allocate:
+ - all the new HEADER RTE_MBUFs and assign them to a vector
+ - all the clones
+
+ if successful, then iterate over vectors of resources
+
+ */
+ num_replicas = vec_len (pol0->tunnel_indices);
+
+ if (PREDICT_FALSE (num_replicas == 0))
+ {
+ b0->error = node->errors[SR_REPLICATE_ERROR_NO_REPLICAS];
+ goto do_trace0;
+ }
+
+ vec_reset_length (hdr_vec);
+ vec_reset_length (rte_mbuf_vec);
+
+ for (i = 0; i < num_replicas; i++)
+ {
+ uint8_t nb_seg;
+ struct rte_mbuf *clone0i;
+ vlib_buffer_t *clone0_c, *clone_b0;
+
+ t0 = vec_elt_at_index (sm->tunnels, pol0->tunnel_indices[i]);
+ hdr_mb0 = rte_pktmbuf_alloc (bm->pktmbuf_pools[socket_id]);
+
+ if (i < (num_replicas - 1))
+ {
+ /* Not the last tunnel to process */
+ clone0 = rte_pktmbuf_clone
+ (orig_mb0, bm->pktmbuf_pools[socket_id]);
+ if (clone0 == 0)
+ goto clone_fail;
+ nb_seg = 0;
+ clone0i = clone0;
+ clone0_c = NULL;
+ while ((clone0->nb_segs >= 1) && (nb_seg < clone0->nb_segs))
+ {
+
+ clone_b0 = vlib_buffer_from_rte_mbuf (clone0i);
+ vlib_buffer_init_for_free_list (clone_b0, fl);
+
+ ASSERT ((clone_b0->flags & VLIB_BUFFER_NEXT_PRESENT) ==
+ 0);
+ ASSERT (clone_b0->current_data == 0);
+
+ clone_b0->current_data =
+ (clone0i->buf_addr + clone0i->data_off) -
+ (void *) clone_b0->data;
+
+ clone_b0->current_length = clone0i->data_len;
+ if (PREDICT_FALSE (clone0_c != NULL))
+ {
+ clone0_c->flags |= VLIB_BUFFER_NEXT_PRESENT;
+ clone0_c->next_buffer =
+ vlib_get_buffer_index (vm, clone_b0);
+ }
+ clone0_c = clone_b0;
+ clone0i = clone0i->next;
+ nb_seg++;
+ }
+ }
+ else
+ /* First tunnel to process, use original MB */
+ clone0 = orig_mb0;
+
+
+ if (PREDICT_FALSE (!clone0 || !hdr_mb0))
+ {
+ clone_fail:
+ b0->error = node->errors[SR_REPLICATE_ERROR_NO_BUFFERS];
+
+ vec_foreach_index (i, rte_mbuf_vec)
+ {
+ rte_pktmbuf_free (rte_mbuf_vec[i]);
+ }
+ vec_free (rte_mbuf_vec);
+
+ vec_foreach_index (i, hdr_vec)
+ {
+ rte_pktmbuf_free (hdr_vec[i]);
+ }
+ vec_free (hdr_vec);
+
+ goto do_trace0;
+ }
+
+ vec_add1 (hdr_vec, hdr_mb0);
+ vec_add1 (rte_mbuf_vec, clone0);
+
+ }
+
+ for (i = 0; i < num_replicas; i++)
+ {
+ vlib_buffer_t *hdr_b0;
+ u16 new_l0 = 0;
+
+ t0 = vec_elt_at_index (sm->tunnels, pol0->tunnel_indices[i]);
+ /* Our replicas */
+ hdr_mb0 = hdr_vec[i];
+ clone0 = rte_mbuf_vec[i];
+
+ hdr_mb0->data_len = len_bytes + vec_len (t0->rewrite);
+ hdr_mb0->pkt_len = hdr_mb0->data_len +
+ vlib_buffer_length_in_chain (vm, orig_b0);
+
+ hdr_b0 = vlib_buffer_from_rte_mbuf (hdr_mb0);
+
+ vlib_buffer_init_for_free_list (hdr_b0, fl);
+
+ memcpy (hdr_b0->data, ip0, len_bytes);
+ memcpy (hdr_b0->data + len_bytes, t0->rewrite,
+ vec_len (t0->rewrite));
+
+ hdr_b0->current_data = 0;
+ hdr_b0->current_length = len_bytes + vec_len (t0->rewrite);
+ hdr_b0->flags = orig_b0->flags | VLIB_BUFFER_NEXT_PRESENT;
+ hdr_b0->trace_index = orig_b0->trace_index;
+ vnet_buffer (hdr_b0)->l2_classify.opaque_index = 0;
+
+ hdr_b0->total_length_not_including_first_buffer =
+ hdr_mb0->pkt_len - hdr_b0->current_length;
+ vnet_buffer (hdr_b0)->sw_if_index[VLIB_TX] = t0->tx_fib_index;
+
+ hdr_ip0 = (ip6_header_t *) hdr_b0->data;
+ new_l0 = clib_net_to_host_u16 (ip0->payload_length) +
+ vec_len (t0->rewrite);
+ hdr_ip0->payload_length = clib_host_to_net_u16 (new_l0);
+ hdr_sr0 = (ip6_sr_header_t *) ((u8 *) hdr_ip0 + len_bytes);
+ /* $$$ tune */
+ clib_memcpy (hdr_sr0, t0->rewrite, vec_len (t0->rewrite));
+ hdr_sr0->protocol = next_hdr;
+ hdr_ip0->protocol = ip_next_hdr;
+
+ /* Copy dst address into the DA slot in the segment list */
+ clib_memcpy (hdr_sr0->segments, ip0->dst_address.as_u64,
+ sizeof (ip6_address_t));
+
+ /* Rewrite the ip6 dst address */
+ hdr_ip0->dst_address.as_u64[0] = t0->first_hop.as_u64[0];
+ hdr_ip0->dst_address.as_u64[1] = t0->first_hop.as_u64[1];
+
+ sr_fix_hmac (sm, hdr_ip0, hdr_sr0);
+
+ /* prepend new header to invariant piece */
+ hdr_mb0->next = clone0;
+ hdr_b0->next_buffer =
+ vlib_get_buffer_index (vm,
+ vlib_buffer_from_rte_mbuf (clone0));
+
+ /* update header's fields */
+ hdr_mb0->pkt_len =
+ (uint16_t) (hdr_mb0->data_len + clone0->pkt_len);
+ hdr_mb0->nb_segs = (uint8_t) (clone0->nb_segs + 1);
+
+ /* copy metadata from source packet */
+ hdr_mb0->port = clone0->port;
+ hdr_mb0->vlan_tci = clone0->vlan_tci;
+ hdr_mb0->vlan_tci_outer = clone0->vlan_tci_outer;
+ hdr_mb0->tx_offload = clone0->tx_offload;
+ hdr_mb0->hash = clone0->hash;
+
+ hdr_mb0->ol_flags = clone0->ol_flags & ~(IND_ATTACHED_MBUF);
+
+ __rte_mbuf_sanity_check (hdr_mb0, 1);
+
+ hdr_bi0 = vlib_get_buffer_index (vm, hdr_b0);
+
+ to_next[0] = hdr_bi0;
+ to_next += 1;
+ n_left_to_next -= 1;
+
+ if (n_left_to_next == 0)
+ {
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ vlib_get_next_frame (vm, node, next_index,
+ to_next, n_left_to_next);
+
+ }
+ pkts_replicated++;
+ }
+
+ from += 1;
+ n_left_from -= 1;
+
+ do_trace0:
+ if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ sr_replicate_trace_t *tr = vlib_add_trace (vm, node,
+ b0, sizeof (*tr));
+ tr->tunnel_index = t0 - sm->tunnels;
+ tr->length = 0;
+ if (hdr_ip0)
+ {
+ memcpy (tr->src.as_u8, hdr_ip0->src_address.as_u8,
+ sizeof (tr->src.as_u8));
+ memcpy (tr->dst.as_u8, hdr_ip0->dst_address.as_u8,
+ sizeof (tr->dst.as_u8));
+ if (hdr_ip0->payload_length)
+ tr->length = clib_net_to_host_u16
+ (hdr_ip0->payload_length);
+ }
+ tr->next_index = next_index;
+ if (hdr_sr0)
+ memcpy (tr->sr, hdr_sr0, sizeof (tr->sr));
+ }
+
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ vlib_node_increment_counter (vm, sr_replicate_node.index,
+ SR_REPLICATE_ERROR_REPLICATED,
+ pkts_replicated);
+
+ vlib_node_increment_counter (vm, sr_replicate_node.index,
+ SR_REPLICATE_ERROR_NO_BUFFER_DROPS,
+ no_buffer_drops);
+
+ return frame->n_vectors;
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (sr_replicate_node) = {
+ .function = sr_replicate_node_fn,
+ .name = "sr-replicate",
+ .vector_size = sizeof (u32),
+ .format_trace = format_sr_replicate_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_errors = ARRAY_LEN(sr_replicate_error_strings),
+ .error_strings = sr_replicate_error_strings,
+
+ .n_next_nodes = SR_REPLICATE_N_NEXT,
+
+ .next_nodes = {
+ [SR_REPLICATE_NEXT_IP6_LOOKUP] = "ip6-lookup",
+ },
+};
+
+VLIB_NODE_FUNCTION_MULTIARCH (sr_replicate_node, sr_replicate_node_fn)
+/* *INDENT-ON* */
+
+clib_error_t *
+sr_replicate_init (vlib_main_t * vm)
+{
+ sr_replicate_main_t *msm = &sr_replicate_main;
+
+ msm->vlib_main = vm;
+ msm->vnet_main = vnet_get_main ();
+
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (sr_replicate_init);
+
+#endif /* DPDK */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/srp/format.c b/src/vnet/srp/format.c
new file mode 100644
index 00000000000..a0250cc976f
--- /dev/null
+++ b/src/vnet/srp/format.c
@@ -0,0 +1,147 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * srp_format.c: srp formatting/parsing.
+ *
+ * Copyright (c) 2008 Eliot Dresselhaus
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <vlib/vlib.h>
+#include <vnet/srp/srp.h>
+#include <vnet/ethernet/ethernet.h>
+
+static u8 * format_srp_mode (u8 * s, va_list * args)
+{
+ u32 mode = va_arg (*args, u32);
+ char * t = 0;
+ switch (mode)
+ {
+#define _(f) case SRP_MODE_##f: t = #f; break;
+ foreach_srp_mode
+#undef _
+ default: t = 0; break;
+ }
+ if (t)
+ s = format (s, "%s", t);
+ else
+ s = format (s, "unknown 0x%x", mode);
+
+ return s;
+}
+
+u8 * format_srp_header_with_length (u8 * s, va_list * args)
+{
+ srp_and_ethernet_header_t * h = va_arg (*args, srp_and_ethernet_header_t *);
+ u32 max_header_bytes = va_arg (*args, u32);
+ ethernet_main_t * em = &ethernet_main;
+ uword indent, header_bytes;
+
+ header_bytes = sizeof (h[0]);
+ if (max_header_bytes != 0 && header_bytes > max_header_bytes)
+ return format (s, "srp header truncated");
+
+ indent = format_get_indent (s);
+
+ s = format (s, "mode %U, ring %s, priority %d, ttl %d",
+ format_srp_mode, h->srp.mode,
+ h->srp.is_inner_ring ? "inner" : "outer",
+ h->srp.priority, h->srp.ttl);
+
+ s = format (s, "\n%U%U: %U -> %U",
+ format_white_space, indent,
+ format_ethernet_type, clib_net_to_host_u16 (h->ethernet.type),
+ format_ethernet_address, h->ethernet.src_address,
+ format_ethernet_address, h->ethernet.dst_address);
+
+ if (max_header_bytes != 0 && header_bytes < max_header_bytes)
+ {
+ ethernet_type_info_t * ti;
+ vlib_node_t * node;
+
+ ti = ethernet_get_type_info (em, h->ethernet.type);
+ node = ti ? vlib_get_node (em->vlib_main, ti->node_index) : 0;
+ if (node && node->format_buffer)
+ s = format (s, "\n%U%U",
+ format_white_space, indent,
+ node->format_buffer, (void *) h + header_bytes,
+ max_header_bytes - header_bytes);
+ }
+
+ return s;
+}
+
+u8 * format_srp_header (u8 * s, va_list * args)
+{
+ srp_header_t * m = va_arg (*args, srp_header_t *);
+ return format (s, "%U", format_srp_header_with_length, m, 0);
+}
+
+uword
+unformat_srp_header (unformat_input_t * input, va_list * args)
+{
+ u8 ** result = va_arg (*args, u8 **);
+ srp_and_ethernet_header_t * h;
+
+ {
+ void * p;
+ vec_add2 (*result, p, sizeof (h[0]));
+ h = p;
+ }
+
+ if (! unformat (input, "%U: %U -> %U",
+ unformat_ethernet_type_net_byte_order, &h->ethernet.type,
+ unformat_ethernet_address, &h->ethernet.src_address,
+ unformat_ethernet_address, &h->ethernet.dst_address))
+ return 0;
+
+ h->srp.mode = SRP_MODE_data;
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ u32 x;
+
+ if (unformat (input, "control"))
+ h->srp.mode = SRP_MODE_control_pass_to_host;
+
+ else if (unformat (input, "pri %d", &x))
+ h->srp.priority = x;
+
+ else if (unformat (input, "ttl %d", &x))
+ h->srp.ttl = x;
+
+ else
+ return 0;
+ }
+
+ return 1;
+}
diff --git a/src/vnet/srp/interface.c b/src/vnet/srp/interface.c
new file mode 100644
index 00000000000..d427cc3c523
--- /dev/null
+++ b/src/vnet/srp/interface.c
@@ -0,0 +1,458 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * srp_interface.c: srp interfaces
+ *
+ * Copyright (c) 2008 Eliot Dresselhaus
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <vnet/vnet.h>
+#include <vnet/pg/pg.h>
+#include <vnet/srp/srp.h>
+
+static u8*
+srp_build_rewrite (vnet_main_t * vnm,
+ u32 sw_if_index,
+ vnet_link_t link_type,
+ const void * dst_address)
+{
+ vnet_hw_interface_t * hw = vnet_get_sup_hw_interface (vnm, sw_if_index);
+ srp_main_t * sm = &srp_main;
+ srp_and_ethernet_header_t * h;
+ u8* rewrite = NULL;
+ u16 type;
+ uword n_bytes = sizeof (h[0]);
+
+ switch (link_type) {
+#define _(a,b) case VNET_LINK_##a: type = ETHERNET_TYPE_##b; break
+ _ (IP4, IP4);
+ _ (IP6, IP6);
+ _ (MPLS, MPLS_UNICAST);
+ _ (ARP, ARP);
+#undef _
+ default:
+ return (NULL);
+ }
+
+ vec_validate(rewrite, n_bytes-1);
+ h = (srp_and_ethernet_header_t *)rewrite;
+
+ clib_memcpy (h->ethernet.src_address, hw->hw_address, sizeof (h->ethernet.src_address));
+ if (dst_address)
+ clib_memcpy (h->ethernet.dst_address, dst_address, sizeof (h->ethernet.dst_address));
+ else
+ memset (h->ethernet.dst_address, ~0, sizeof (h->ethernet.dst_address)); /* broadcast */
+
+ h->ethernet.type = clib_host_to_net_u16 (type);
+
+ h->srp.as_u16 = 0;
+ h->srp.mode = SRP_MODE_data;
+ h->srp.ttl = sm->default_data_ttl;
+ srp_header_compute_parity (&h->srp);
+
+ return (rewrite);
+}
+
+static void srp_register_interface_helper (u32 * hw_if_indices_by_side, u32 redistribute);
+
+void serialize_srp_main (serialize_main_t * m, va_list * va)
+{
+ srp_main_t * sm = &srp_main;
+ srp_interface_t * si;
+
+ serialize_integer (m, pool_elts (sm->interface_pool), sizeof (u32));
+ pool_foreach (si, sm->interface_pool, ({
+ serialize_integer (m, si->rings[SRP_RING_OUTER].hw_if_index, sizeof (u32));
+ serialize_integer (m, si->rings[SRP_RING_INNER].hw_if_index, sizeof (u32));
+ }));
+}
+
+void unserialize_srp_main (serialize_main_t * m, va_list * va)
+{
+ u32 i, n_ifs, hw_if_indices[SRP_N_RING];
+
+ unserialize_integer (m, &n_ifs, sizeof (u32));
+ for (i = 0; i < n_ifs; i++)
+ {
+ unserialize_integer (m, &hw_if_indices[SRP_RING_OUTER], sizeof (u32));
+ unserialize_integer (m, &hw_if_indices[SRP_RING_INNER], sizeof (u32));
+ srp_register_interface_helper (hw_if_indices, /* redistribute */ 0);
+ }
+}
+
+static void serialize_srp_register_interface_msg (serialize_main_t * m, va_list * va)
+{
+ u32 * hw_if_indices = va_arg (*va, u32 *);
+ serialize_integer (m, hw_if_indices[SRP_SIDE_A], sizeof (hw_if_indices[SRP_SIDE_A]));
+ serialize_integer (m, hw_if_indices[SRP_SIDE_B], sizeof (hw_if_indices[SRP_SIDE_B]));
+}
+
+static void unserialize_srp_register_interface_msg (serialize_main_t * m, va_list * va)
+{
+ CLIB_UNUSED (mc_main_t * mcm) = va_arg (*va, mc_main_t *);
+ u32 hw_if_indices[SRP_N_SIDE];
+ srp_main_t * sm = &srp_main;
+ uword * p;
+
+ unserialize_integer (m, &hw_if_indices[SRP_SIDE_A], sizeof (hw_if_indices[SRP_SIDE_A]));
+ unserialize_integer (m, &hw_if_indices[SRP_SIDE_B], sizeof (hw_if_indices[SRP_SIDE_B]));
+
+ p = hash_get (sm->srp_register_interface_waiting_process_pool_index_by_hw_if_index,
+ hw_if_indices[0]);
+ if (p)
+ {
+ vlib_one_time_waiting_process_t * wp = pool_elt_at_index (sm->srp_register_interface_waiting_process_pool, p[0]);
+ vlib_signal_one_time_waiting_process (mcm->vlib_main, wp);
+ pool_put (sm->srp_register_interface_waiting_process_pool, wp);
+ hash_unset (sm->srp_register_interface_waiting_process_pool_index_by_hw_if_index,
+ hw_if_indices[0]);
+ }
+ else
+ srp_register_interface_helper (hw_if_indices, /* redistribute */ 0);
+}
+
+MC_SERIALIZE_MSG (srp_register_interface_msg, static) = {
+ .name = "vnet_srp_register_interface",
+ .serialize = serialize_srp_register_interface_msg,
+ .unserialize = unserialize_srp_register_interface_msg,
+};
+
+static void srp_register_interface_helper (u32 * hw_if_indices_by_side, u32 redistribute)
+{
+ vnet_main_t * vnm = vnet_get_main();
+ srp_main_t * sm = &srp_main;
+ vlib_main_t * vm = sm->vlib_main;
+ srp_interface_t * si;
+ vnet_hw_interface_t * hws[SRP_N_RING];
+ uword s, * p;
+
+ if (vm->mc_main && redistribute)
+ {
+ vlib_one_time_waiting_process_t * wp;
+ mc_serialize (vm->mc_main, &srp_register_interface_msg, hw_if_indices_by_side);
+ pool_get (sm->srp_register_interface_waiting_process_pool, wp);
+ hash_set (sm->srp_register_interface_waiting_process_pool_index_by_hw_if_index,
+ hw_if_indices_by_side[0],
+ wp - sm->srp_register_interface_waiting_process_pool);
+ vlib_current_process_wait_for_one_time_event (vm, wp);
+ }
+
+ /* Check if interface has already been registered. */
+ p = hash_get (sm->interface_index_by_hw_if_index, hw_if_indices_by_side[0]);
+ if (p)
+ {
+ si = pool_elt_at_index (sm->interface_pool, p[0]);
+ }
+ else
+ {
+ pool_get (sm->interface_pool, si);
+ memset (si, 0, sizeof (si[0]));
+ }
+ for (s = 0; s < SRP_N_SIDE; s++)
+ {
+ hws[s] = vnet_get_hw_interface (vnm, hw_if_indices_by_side[s]);
+ si->rings[s].ring = s;
+ si->rings[s].hw_if_index = hw_if_indices_by_side[s];
+ si->rings[s].sw_if_index = hws[s]->sw_if_index;
+ hash_set (sm->interface_index_by_hw_if_index, hw_if_indices_by_side[s], si - sm->interface_pool);
+ }
+
+ /* Inherit MAC address from outer ring. */
+ clib_memcpy (si->my_address, hws[SRP_RING_OUTER]->hw_address,
+ vec_len (hws[SRP_RING_OUTER]->hw_address));
+
+ /* Default time to wait to restore signal. */
+ si->config.wait_to_restore_idle_delay = 60;
+ si->config.ips_tx_interval = 1;
+}
+
+void srp_register_interface (u32 * hw_if_indices_by_side)
+{
+ srp_register_interface_helper (hw_if_indices_by_side, /* redistribute */ 1);
+}
+
+void srp_interface_set_hw_wrap_function (u32 hw_if_index, srp_hw_wrap_function_t * f)
+{
+ srp_interface_t * si = srp_get_interface_from_vnet_hw_interface (hw_if_index);
+ si->hw_wrap_function = f;
+}
+
+void srp_interface_set_hw_enable_function (u32 hw_if_index, srp_hw_enable_function_t * f)
+{
+ srp_interface_t * si = srp_get_interface_from_vnet_hw_interface (hw_if_index);
+ si->hw_enable_function = f;
+}
+
+void srp_interface_enable_ips (u32 hw_if_index)
+{
+ srp_main_t * sm = &srp_main;
+ srp_interface_t * si = srp_get_interface_from_vnet_hw_interface (hw_if_index);
+
+ si->ips_process_enable = 1;
+
+ vlib_node_set_state (sm->vlib_main, srp_ips_process_node.index, VLIB_NODE_STATE_POLLING);
+}
+
+static uword
+srp_is_valid_class_for_interface (vnet_main_t * vnm, u32 hw_if_index, u32 hw_class_index)
+{
+ srp_interface_t * si = srp_get_interface_from_vnet_hw_interface (hw_if_index);
+
+ if (! si)
+ return 0;
+
+ /* Both sides must be admin down. */
+ if (vnet_sw_interface_is_admin_up (vnm, si->rings[SRP_RING_OUTER].sw_if_index))
+ return 0;
+ if (vnet_sw_interface_is_admin_up (vnm, si->rings[SRP_RING_INNER].sw_if_index))
+ return 0;
+
+ return 1;
+}
+
+static void
+srp_interface_hw_class_change (vnet_main_t * vnm, u32 hw_if_index,
+ u32 old_hw_class_index, u32 new_hw_class_index)
+{
+ srp_main_t * sm = &srp_main;
+ srp_interface_t * si = srp_get_interface_from_vnet_hw_interface (hw_if_index);
+ vnet_hw_interface_t * hi;
+ vnet_device_class_t * dc;
+ u32 r, to_srp;
+
+ if (!si) {
+ clib_warning ("srp interface no set si = 0");
+ return;
+ }
+
+ to_srp = new_hw_class_index == srp_hw_interface_class.index;
+
+ /* Changing class on either outer or inner rings implies changing the class
+ of the other. */
+ for (r = 0; r < SRP_N_RING; r++)
+ {
+ srp_interface_ring_t * ir = &si->rings[r];
+
+ hi = vnet_get_hw_interface (vnm, ir->hw_if_index);
+ dc = vnet_get_device_class (vnm, hi->dev_class_index);
+
+ /* hw_if_index itself will be handled by caller. */
+ if (ir->hw_if_index != hw_if_index)
+ {
+ vnet_hw_interface_init_for_class (vnm, ir->hw_if_index,
+ new_hw_class_index,
+ to_srp ? si - sm->interface_pool : ~0);
+
+ if (dc->hw_class_change)
+ dc->hw_class_change (vnm, ir->hw_if_index, new_hw_class_index);
+ }
+ else
+ hi->hw_instance = to_srp ? si - sm->interface_pool : ~0;
+ }
+
+ if (si->hw_enable_function)
+ si->hw_enable_function (si, /* enable */ to_srp);
+}
+
+VNET_HW_INTERFACE_CLASS (srp_hw_interface_class) = {
+ .name = "SRP",
+ .format_address = format_ethernet_address,
+ .format_header = format_srp_header_with_length,
+ .format_device = format_srp_device,
+ .unformat_hw_address = unformat_ethernet_address,
+ .unformat_header = unformat_srp_header,
+ .build_rewrite = srp_build_rewrite,
+ .update_adjacency = ethernet_update_adjacency,
+ .is_valid_class_for_interface = srp_is_valid_class_for_interface,
+ .hw_class_change = srp_interface_hw_class_change,
+};
+
+static void serialize_srp_interface_config_msg (serialize_main_t * m, va_list * va)
+{
+ srp_interface_t * si = va_arg (*va, srp_interface_t *);
+ srp_main_t * sm = &srp_main;
+
+ ASSERT (! pool_is_free (sm->interface_pool, si));
+ serialize_integer (m, si - sm->interface_pool, sizeof (u32));
+ serialize (m, serialize_f64, si->config.wait_to_restore_idle_delay);
+ serialize (m, serialize_f64, si->config.ips_tx_interval);
+}
+
+static void unserialize_srp_interface_config_msg (serialize_main_t * m, va_list * va)
+{
+ CLIB_UNUSED (mc_main_t * mcm) = va_arg (*va, mc_main_t *);
+ srp_main_t * sm = &srp_main;
+ srp_interface_t * si;
+ u32 si_index;
+
+ unserialize_integer (m, &si_index, sizeof (u32));
+ si = pool_elt_at_index (sm->interface_pool, si_index);
+ unserialize (m, unserialize_f64, &si->config.wait_to_restore_idle_delay);
+ unserialize (m, unserialize_f64, &si->config.ips_tx_interval);
+}
+
+MC_SERIALIZE_MSG (srp_interface_config_msg, static) = {
+ .name = "vnet_srp_interface_config",
+ .serialize = serialize_srp_interface_config_msg,
+ .unserialize = unserialize_srp_interface_config_msg,
+};
+
+void srp_interface_get_interface_config (u32 hw_if_index, srp_interface_config_t * c)
+{
+ srp_interface_t * si = srp_get_interface_from_vnet_hw_interface (hw_if_index);
+ ASSERT (si != 0);
+ c[0] = si->config;
+}
+
+void srp_interface_set_interface_config (u32 hw_if_index, srp_interface_config_t * c)
+{
+ srp_main_t * sm = &srp_main;
+ vlib_main_t * vm = sm->vlib_main;
+ srp_interface_t * si = srp_get_interface_from_vnet_hw_interface (hw_if_index);
+ ASSERT (si != 0);
+ if (memcmp (&si->config, &c[0], sizeof (c[0])))
+ {
+ si->config = c[0];
+ if (vm->mc_main)
+ mc_serialize (vm->mc_main, &srp_interface_config_msg, si);
+ }
+}
+
+#if DEBUG > 0
+
+#define VNET_SIMULATED_SRP_TX_NEXT_SRP_INPUT VNET_INTERFACE_TX_N_NEXT
+
+/* Echo packets back to srp input. */
+static uword
+simulated_srp_interface_tx (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ u32 n_left_from, n_left_to_next, n_copy, * from, * to_next;
+ u32 next_index = VNET_SIMULATED_SRP_TX_NEXT_SRP_INPUT;
+ u32 i;
+ vlib_buffer_t * b;
+
+ n_left_from = frame->n_vectors;
+ from = vlib_frame_args (frame);
+
+ while (n_left_from > 0)
+ {
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ n_copy = clib_min (n_left_from, n_left_to_next);
+
+ clib_memcpy (to_next, from, n_copy * sizeof (from[0]));
+ n_left_to_next -= n_copy;
+ n_left_from -= n_copy;
+ for (i = 0; i < n_copy; i++)
+ {
+ b = vlib_get_buffer (vm, from[i]);
+ /* TX interface will be fake eth; copy to RX for benefit of srp-input. */
+ b->sw_if_index[VLIB_RX] = b->sw_if_index[VLIB_TX];
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ return n_left_from;
+}
+
+static u8 * format_simulated_srp_name (u8 * s, va_list * args)
+{
+ u32 dev_instance = va_arg (*args, u32);
+ return format (s, "fake-srp%d", dev_instance);
+}
+
+VNET_DEVICE_CLASS (srp_simulated_device_class,static) = {
+ .name = "Simulated srp",
+ .format_device_name = format_simulated_srp_name,
+ .tx_function = simulated_srp_interface_tx,
+};
+
+static clib_error_t *
+create_simulated_srp_interfaces (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ vnet_main_t * vnm = vnet_get_main();
+ u8 address[6];
+ u32 hw_if_index;
+ vnet_hw_interface_t * hi;
+ static u32 instance;
+
+ if (! unformat_user (input, unformat_ethernet_address, &address))
+ {
+ memset (address, 0, sizeof (address));
+ address[0] = 0xde;
+ address[1] = 0xad;
+ address[5] = instance;
+ }
+
+ hw_if_index = vnet_register_interface (vnm,
+ srp_simulated_device_class.index,
+ instance++,
+ srp_hw_interface_class.index, 0);
+
+ hi = vnet_get_hw_interface (vnm, hw_if_index);
+
+ srp_setup_node (vm, hi->output_node_index);
+
+ hi->min_packet_bytes = 40 + 16;
+
+ /* Standard default ethernet MTU. */
+ hi->max_l3_packet_bytes[VLIB_RX] = hi->max_l3_packet_bytes[VLIB_TX] = 1500;
+
+ vec_free (hi->hw_address);
+ vec_add (hi->hw_address, address, sizeof (address));
+
+ {
+ uword slot;
+
+ slot = vlib_node_add_named_next_with_slot
+ (vm, hi->tx_node_index,
+ "srp-input",
+ VNET_SIMULATED_SRP_TX_NEXT_SRP_INPUT);
+ ASSERT (slot == VNET_SIMULATED_SRP_TX_NEXT_SRP_INPUT);
+ }
+
+ return /* no error */ 0;
+}
+
+static VLIB_CLI_COMMAND (create_simulated_srp_interface_command) = {
+ .path = "srp create-interfaces",
+ .short_help = "Create simulated srp interface",
+ .function = create_simulated_srp_interfaces,
+};
+#endif
diff --git a/src/vnet/srp/node.c b/src/vnet/srp/node.c
new file mode 100644
index 00000000000..897be254624
--- /dev/null
+++ b/src/vnet/srp/node.c
@@ -0,0 +1,932 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * node.c: srp packet processing
+ *
+ * Copyright (c) 2011 Eliot Dresselhaus
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <vlib/vlib.h>
+#include <vnet/ip/ip_packet.h> /* for ip_csum_fold */
+#include <vnet/srp/srp.h>
+
+typedef struct {
+ u8 packet_data[32];
+} srp_input_trace_t;
+
+static u8 * format_srp_input_trace (u8 * s, va_list * va)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*va, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*va, vlib_node_t *);
+ srp_input_trace_t * t = va_arg (*va, srp_input_trace_t *);
+
+ s = format (s, "%U", format_srp_header, t->packet_data);
+
+ return s;
+}
+
+typedef enum {
+ SRP_INPUT_NEXT_ERROR,
+ SRP_INPUT_NEXT_ETHERNET_INPUT,
+ SRP_INPUT_NEXT_CONTROL,
+ SRP_INPUT_N_NEXT,
+} srp_input_next_t;
+
+typedef struct {
+ u8 next_index;
+ u8 buffer_advance;
+ u16 error;
+} srp_input_disposition_t;
+
+static srp_input_disposition_t srp_input_disposition_by_mode[8] = {
+ [SRP_MODE_reserved0] = {
+ .next_index = SRP_INPUT_NEXT_ERROR,
+ .error = SRP_ERROR_UNKNOWN_MODE,
+ },
+ [SRP_MODE_reserved1] = {
+ .next_index = SRP_INPUT_NEXT_ERROR,
+ .error = SRP_ERROR_UNKNOWN_MODE,
+ },
+ [SRP_MODE_reserved2] = {
+ .next_index = SRP_INPUT_NEXT_ERROR,
+ .error = SRP_ERROR_UNKNOWN_MODE,
+ },
+ [SRP_MODE_reserved3] = {
+ .next_index = SRP_INPUT_NEXT_ERROR,
+ .error = SRP_ERROR_UNKNOWN_MODE,
+ },
+ [SRP_MODE_keep_alive] = {
+ .next_index = SRP_INPUT_NEXT_ERROR,
+ .error = SRP_ERROR_KEEP_ALIVE_DROPPED,
+ },
+ [SRP_MODE_data] = {
+ .next_index = SRP_INPUT_NEXT_ETHERNET_INPUT,
+ .buffer_advance = sizeof (srp_header_t),
+ },
+ [SRP_MODE_control_pass_to_host] = {
+ .next_index = SRP_INPUT_NEXT_CONTROL,
+ },
+ [SRP_MODE_control_locally_buffered_for_host] = {
+ .next_index = SRP_INPUT_NEXT_CONTROL,
+ },
+};
+
+static uword
+srp_input (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame)
+{
+ vnet_main_t * vnm = vnet_get_main();
+ srp_main_t * sm = &srp_main;
+ u32 n_left_from, next_index, * from, * to_next;
+
+ from = vlib_frame_vector_args (from_frame);
+ n_left_from = from_frame->n_vectors;
+
+ if (node->flags & VLIB_NODE_FLAG_TRACE)
+ vlib_trace_frame_buffers_only (vm, node,
+ from,
+ n_left_from,
+ sizeof (from[0]),
+ sizeof (srp_input_trace_t));
+
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ u32 bi0, bi1, sw_if_index0, sw_if_index1;
+ vlib_buffer_t * b0, * b1;
+ u8 next0, next1, error0, error1;
+ srp_header_t * s0, * s1;
+ srp_input_disposition_t * d0, * d1;
+ vnet_hw_interface_t * hi0, * hi1;
+ srp_interface_t * si0, * si1;
+
+ /* Prefetch next iteration. */
+ {
+ vlib_buffer_t * b2, * b3;
+
+ b2 = vlib_get_buffer (vm, from[2]);
+ b3 = vlib_get_buffer (vm, from[3]);
+
+ vlib_prefetch_buffer_header (b2, LOAD);
+ vlib_prefetch_buffer_header (b3, LOAD);
+
+ CLIB_PREFETCH (b2->data, sizeof (srp_header_t), LOAD);
+ CLIB_PREFETCH (b3->data, sizeof (srp_header_t), LOAD);
+ }
+
+ bi0 = from[0];
+ bi1 = from[1];
+ to_next[0] = bi0;
+ to_next[1] = bi1;
+ from += 2;
+ to_next += 2;
+ n_left_to_next -= 2;
+ n_left_from -= 2;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ b1 = vlib_get_buffer (vm, bi1);
+
+ s0 = (void *) (b0->data + b0->current_data);
+ s1 = (void *) (b1->data + b1->current_data);
+
+ /* Data packets are always assigned to side A (outer ring) interface. */
+ sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+ sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_RX];
+
+ hi0 = vnet_get_sup_hw_interface (vnm, sw_if_index0);
+ hi1 = vnet_get_sup_hw_interface (vnm, sw_if_index1);
+
+ si0 = pool_elt_at_index (sm->interface_pool, hi0->hw_instance);
+ si1 = pool_elt_at_index (sm->interface_pool, hi1->hw_instance);
+
+ sw_if_index0 = (s0->mode == SRP_MODE_data
+ ? si0->rings[SRP_RING_OUTER].sw_if_index
+ : sw_if_index0);
+ sw_if_index1 = (s1->mode == SRP_MODE_data
+ ? si1->rings[SRP_RING_OUTER].sw_if_index
+ : sw_if_index1);
+
+ vnet_buffer (b0)->sw_if_index[VLIB_RX] = sw_if_index0;
+ vnet_buffer (b1)->sw_if_index[VLIB_RX] = sw_if_index1;
+
+ d0 = srp_input_disposition_by_mode + s0->mode;
+ d1 = srp_input_disposition_by_mode + s1->mode;
+
+ next0 = d0->next_index;
+ next1 = d1->next_index;
+
+ error0 = d0->error;
+ error1 = d1->error;
+
+ vlib_buffer_advance (b0, d0->buffer_advance);
+ vlib_buffer_advance (b1, d1->buffer_advance);
+
+ b0->error = node->errors[error0];
+ b1->error = node->errors[error1];
+
+ vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, bi1, next0, next1);
+ }
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 bi0, sw_if_index0;
+ vlib_buffer_t * b0;
+ u8 next0, error0;
+ srp_header_t * s0;
+ srp_input_disposition_t * d0;
+ srp_interface_t * si0;
+ vnet_hw_interface_t * hi0;
+
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_to_next -= 1;
+ n_left_from -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+
+ s0 = (void *) (b0->data + b0->current_data);
+
+ /* Data packets are always assigned to side A (outer ring) interface. */
+ sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+
+ hi0 = vnet_get_sup_hw_interface (vnm, sw_if_index0);
+
+ si0 = pool_elt_at_index (sm->interface_pool, hi0->hw_instance);
+
+ sw_if_index0 = (s0->mode == SRP_MODE_data
+ ? si0->rings[SRP_RING_OUTER].sw_if_index
+ : sw_if_index0);
+
+ vnet_buffer (b0)->sw_if_index[VLIB_RX] = sw_if_index0;
+
+ d0 = srp_input_disposition_by_mode + s0->mode;
+
+ next0 = d0->next_index;
+
+ error0 = d0->error;
+
+ vlib_buffer_advance (b0, d0->buffer_advance);
+
+ b0->error = node->errors[error0];
+
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, next0);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ return from_frame->n_vectors;
+}
+
+static char * srp_error_strings[] = {
+#define _(f,s) s,
+ foreach_srp_error
+#undef _
+};
+
+static vlib_node_registration_t srp_input_node = {
+ .function = srp_input,
+ .name = "srp-input",
+ /* Takes a vector of packets. */
+ .vector_size = sizeof (u32),
+
+ .n_errors = SRP_N_ERROR,
+ .error_strings = srp_error_strings,
+
+ .n_next_nodes = SRP_INPUT_N_NEXT,
+ .next_nodes = {
+ [SRP_INPUT_NEXT_ERROR] = "error-drop",
+ [SRP_INPUT_NEXT_ETHERNET_INPUT] = "ethernet-input",
+ [SRP_INPUT_NEXT_CONTROL] = "srp-control",
+ },
+
+ .format_buffer = format_srp_header_with_length,
+ .format_trace = format_srp_input_trace,
+ .unformat_buffer = unformat_srp_header,
+};
+
+static uword
+srp_topology_packet (vlib_main_t * vm, u32 sw_if_index, u8 ** contents)
+{
+ vnet_main_t * vnm = vnet_get_main();
+ vnet_hw_interface_t * hi = vnet_get_sup_hw_interface (vnm, sw_if_index);
+ srp_topology_header_t * t;
+ srp_topology_mac_binding_t * mb;
+ u32 nb, nmb;
+
+ t = (void *) *contents;
+
+ nb = clib_net_to_host_u16 (t->n_bytes_of_data_that_follows);
+ nmb = (nb - sizeof (t->originator_address)) / sizeof (mb[0]);
+ if (vec_len (*contents) < sizeof (t[0]) + nmb * sizeof (mb[0]))
+ return SRP_ERROR_TOPOLOGY_BAD_LENGTH;
+
+ /* Fill in our source MAC address. */
+ clib_memcpy (t->ethernet.src_address, hi->hw_address, vec_len (hi->hw_address));
+
+ /* Make space for our MAC binding. */
+ vec_resize (*contents, sizeof (srp_topology_mac_binding_t));
+ t = (void *) *contents;
+ t->n_bytes_of_data_that_follows = clib_host_to_net_u16 (nb + sizeof (mb[0]));
+
+ mb = t->bindings + nmb;
+
+ mb->flags =
+ ((t->srp.is_inner_ring ? SRP_TOPOLOGY_MAC_BINDING_FLAG_IS_INNER_RING : 0)
+ | (/* is wrapped FIXME */ 0));
+ clib_memcpy (mb->address, hi->hw_address, vec_len (hi->hw_address));
+
+ t->control.checksum
+ = ~ip_csum_fold (ip_incremental_checksum (0, &t->control,
+ vec_len (*contents) - STRUCT_OFFSET_OF (srp_generic_control_header_t, control)));
+
+ {
+ vlib_frame_t * f = vlib_get_frame_to_node (vm, hi->output_node_index);
+ vlib_buffer_t * b;
+ u32 * to_next = vlib_frame_vector_args (f);
+ u32 bi;
+
+ bi = vlib_buffer_add_data (vm, VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX,
+ /* buffer to append to */ 0,
+ *contents, vec_len (*contents));
+ b = vlib_get_buffer (vm, bi);
+ vnet_buffer (b)->sw_if_index[VLIB_RX] = vnet_buffer (b)->sw_if_index[VLIB_TX] = sw_if_index;
+ to_next[0] = bi;
+ f->n_vectors = 1;
+ vlib_put_frame_to_node (vm, hi->output_node_index, f);
+ }
+
+ return SRP_ERROR_CONTROL_PACKETS_PROCESSED;
+}
+
+typedef uword (srp_control_handler_function_t) (vlib_main_t * vm,
+ u32 sw_if_index,
+ u8 ** contents);
+
+static uword
+srp_control_input (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame)
+{
+ u32 n_left_from, next_index, * from, * to_next;
+ vlib_node_runtime_t * error_node;
+ static u8 * contents;
+
+ error_node = vlib_node_get_runtime (vm, srp_input_node.index);
+
+ from = vlib_frame_vector_args (from_frame);
+ n_left_from = from_frame->n_vectors;
+
+ if (node->flags & VLIB_NODE_FLAG_TRACE)
+ vlib_trace_frame_buffers_only (vm, node,
+ from,
+ n_left_from,
+ sizeof (from[0]),
+ sizeof (srp_input_trace_t));
+
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 bi0, l2_len0, l3_len0;
+ vlib_buffer_t * b0;
+ u8 next0, error0;
+ srp_generic_control_header_t * s0;
+
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_to_next -= 1;
+ n_left_from -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+
+ s0 = (void *) (b0->data + b0->current_data);
+ l2_len0 = vlib_buffer_length_in_chain (vm, b0);
+ l3_len0 = l2_len0 - STRUCT_OFFSET_OF (srp_generic_control_header_t, control);
+
+ error0 = SRP_ERROR_CONTROL_PACKETS_PROCESSED;
+
+ error0 = s0->control.version != 0 ? SRP_ERROR_CONTROL_VERSION_NON_ZERO : error0;
+
+ {
+ u16 save0 = s0->control.checksum;
+ u16 computed0;
+ s0->control.checksum = 0;
+ computed0 = ~ip_csum_fold (ip_incremental_checksum (0, &s0->control, l3_len0));
+ error0 = save0 != computed0 ? SRP_ERROR_CONTROL_BAD_CHECKSUM : error0;
+ }
+
+ if (error0 == SRP_ERROR_CONTROL_PACKETS_PROCESSED)
+ {
+ static srp_control_handler_function_t * t[SRP_N_CONTROL_PACKET_TYPE] = {
+ [SRP_CONTROL_PACKET_TYPE_topology] = srp_topology_packet,
+ };
+ srp_control_handler_function_t * f;
+
+ f = 0;
+ if (s0->control.type < ARRAY_LEN (t))
+ f = t[s0->control.type];
+
+ if (f)
+ {
+ vec_validate (contents, l2_len0 - 1);
+ vlib_buffer_contents (vm, bi0, contents);
+ error0 = f (vm, vnet_buffer (b0)->sw_if_index[VLIB_RX], &contents);
+ }
+ else
+ error0 = SRP_ERROR_UNKNOWN_CONTROL;
+ }
+
+ b0->error = error_node->errors[error0];
+ next0 = 0;
+
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, next0);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ return from_frame->n_vectors;
+}
+
+static vlib_node_registration_t srp_control_input_node = {
+ .function = srp_control_input,
+ .name = "srp-control",
+ /* Takes a vector of packets. */
+ .vector_size = sizeof (u32),
+
+ .n_next_nodes = 1,
+ .next_nodes = {
+ [0] = "error-drop",
+ },
+
+ .format_buffer = format_srp_header_with_length,
+ .format_trace = format_srp_input_trace,
+ .unformat_buffer = unformat_srp_header,
+};
+
+static u8 * format_srp_ips_request_type (u8 * s, va_list * args)
+{
+ u32 x = va_arg (*args, u32);
+ char * t = 0;
+ switch (x)
+ {
+#define _(f,n) case SRP_IPS_REQUEST_##f: t = #f; break;
+ foreach_srp_ips_request_type
+#undef _
+ default:
+ return format (s, "unknown 0x%x", x);
+ }
+ return format (s, "%U", format_c_identifier, t);
+}
+
+static u8 * format_srp_ips_status (u8 * s, va_list * args)
+{
+ u32 x = va_arg (*args, u32);
+ char * t = 0;
+ switch (x)
+ {
+#define _(f,n) case SRP_IPS_STATUS_##f: t = #f; break;
+ foreach_srp_ips_status
+#undef _
+ default:
+ return format (s, "unknown 0x%x", x);
+ }
+ return format (s, "%U", format_c_identifier, t);
+}
+
+static u8 * format_srp_ips_state (u8 * s, va_list * args)
+{
+ u32 x = va_arg (*args, u32);
+ char * t = 0;
+ switch (x)
+ {
+#define _(f) case SRP_IPS_STATE_##f: t = #f; break;
+ foreach_srp_ips_state
+#undef _
+ default:
+ return format (s, "unknown 0x%x", x);
+ }
+ return format (s, "%U", format_c_identifier, t);
+}
+
+static u8 * format_srp_ring (u8 * s, va_list * args)
+{
+ u32 ring = va_arg (*args, u32);
+ return format (s, "%s", ring == SRP_RING_INNER ? "inner" : "outer");
+}
+
+static u8 * format_srp_ips_header (u8 * s, va_list * args)
+{
+ srp_ips_header_t * h = va_arg (*args, srp_ips_header_t *);
+
+ s = format (s, "%U, %U, %U, %s-path",
+ format_srp_ips_request_type, h->request_type,
+ format_ethernet_address, h->originator_address,
+ format_srp_ips_status, h->status,
+ h->is_long_path ? "long" : "short");
+
+ return s;
+}
+
+static u8 * format_srp_interface (u8 * s, va_list * args)
+{
+ srp_interface_t * si = va_arg (*args, srp_interface_t *);
+ srp_interface_ring_t * ir;
+
+ s = format (s, "address %U, IPS state %U",
+ format_ethernet_address, si->my_address,
+ format_srp_ips_state, si->current_ips_state);
+ for (ir = si->rings; ir < si->rings + SRP_N_RING; ir++)
+ if (ir->rx_neighbor_address_valid)
+ s = format (s, ", %U neighbor %U",
+ format_srp_ring, ir->ring,
+ format_ethernet_address, ir->rx_neighbor_address);
+
+ return s;
+}
+
+u8 * format_srp_device (u8 * s, va_list * args)
+{
+ u32 hw_if_index = va_arg (*args, u32);
+ CLIB_UNUSED (int verbose) = va_arg (*args, int);
+ vnet_main_t * vnm = vnet_get_main();
+ srp_main_t * sm = &srp_main;
+ vnet_hw_interface_t * hi = vnet_get_hw_interface (vnm, hw_if_index);
+ srp_interface_t * si = pool_elt_at_index (sm->interface_pool, hi->hw_instance);
+ return format (s, "%U", format_srp_interface, si);
+}
+
+always_inline srp_interface_t *
+srp_get_interface (u32 sw_if_index, srp_ring_type_t * ring)
+{
+ vnet_main_t * vnm = vnet_get_main();
+ srp_main_t * sm = &srp_main;
+ vnet_hw_interface_t * hi = vnet_get_sup_hw_interface (vnm, sw_if_index);
+ srp_interface_t * si;
+
+ ASSERT (hi->hw_class_index == srp_hw_interface_class.index);
+ si = pool_elt_at_index (sm->interface_pool, hi->hw_instance);
+
+ ASSERT (si->rings[SRP_RING_INNER].hw_if_index == hi->hw_if_index
+ || si->rings[SRP_RING_OUTER].hw_if_index == hi->hw_if_index);
+ if (ring)
+ *ring =
+ (hi->hw_if_index == si->rings[SRP_RING_INNER].hw_if_index
+ ? SRP_RING_INNER
+ : SRP_RING_OUTER);
+
+ return si;
+}
+
+static void init_ips_packet (srp_interface_t * si,
+ srp_ring_type_t tx_ring,
+ srp_ips_header_t * i)
+{
+ memset (i, 0, sizeof (i[0]));
+
+ i->srp.ttl = 1;
+ i->srp.is_inner_ring = tx_ring;
+ i->srp.priority = 7;
+ i->srp.mode = SRP_MODE_control_locally_buffered_for_host;
+ srp_header_compute_parity (&i->srp);
+
+ clib_memcpy (&i->ethernet.src_address, &si->my_address, sizeof (si->my_address));
+ i->ethernet.type = clib_host_to_net_u16 (ETHERNET_TYPE_SRP_CONTROL);
+
+ /* Checksum will be filled in later. */
+ i->control.version = 0;
+ i->control.type = SRP_CONTROL_PACKET_TYPE_ips;
+ i->control.ttl = 255;
+
+ clib_memcpy (&i->originator_address, &si->my_address, sizeof (si->my_address));
+}
+
+static void tx_ips_packet (srp_interface_t * si,
+ srp_ring_type_t tx_ring,
+ srp_ips_header_t * i)
+{
+ srp_main_t * sm = &srp_main;
+ vnet_main_t * vnm = vnet_get_main();
+ vlib_main_t * vm = sm->vlib_main;
+ vnet_hw_interface_t * hi = vnet_get_hw_interface (vnm, si->rings[tx_ring].hw_if_index);
+ vlib_frame_t * f;
+ vlib_buffer_t * b;
+ u32 * to_next, bi;
+
+ if (! vnet_sw_interface_is_admin_up (vnm, hi->sw_if_index))
+ return;
+ if (hi->hw_class_index != srp_hw_interface_class.index)
+ return;
+
+ i->control.checksum
+ = ~ip_csum_fold (ip_incremental_checksum (0, &i->control,
+ sizeof (i[0]) - STRUCT_OFFSET_OF (srp_ips_header_t, control)));
+
+ bi = vlib_buffer_add_data (vm, VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX,
+ /* buffer to append to */ 0,
+ i, sizeof (i[0]));
+
+ /* FIXME trace. */
+ if (0)
+ clib_warning ("%U %U",
+ format_vnet_sw_if_index_name, vnm, hi->sw_if_index,
+ format_srp_ips_header, i);
+
+ b = vlib_get_buffer (vm, bi);
+ vnet_buffer (b)->sw_if_index[VLIB_RX] = vnet_buffer (b)->sw_if_index[VLIB_TX] = hi->sw_if_index;
+
+ f = vlib_get_frame_to_node (vm, hi->output_node_index);
+ to_next = vlib_frame_vector_args (f);
+ to_next[0] = bi;
+ f->n_vectors = 1;
+ vlib_put_frame_to_node (vm, hi->output_node_index, f);
+}
+
+static void serialize_srp_interface_state_msg (serialize_main_t * m, va_list * va)
+{
+ srp_interface_t * si = va_arg (*va, srp_interface_t *);
+ srp_main_t * sm = &srp_main;
+ int r;
+
+ ASSERT (! pool_is_free (sm->interface_pool, si));
+ serialize_integer (m, si - sm->interface_pool, sizeof (u32));
+ serialize_likely_small_unsigned_integer (m, si->current_ips_state);
+ for (r = 0; r < SRP_N_RING; r++)
+ {
+ srp_interface_ring_t * ir = &si->rings[r];
+ void * p;
+ serialize_likely_small_unsigned_integer (m, ir->rx_neighbor_address_valid);
+ if (ir->rx_neighbor_address_valid)
+ {
+ p = serialize_get (m, sizeof (ir->rx_neighbor_address));
+ clib_memcpy (p, ir->rx_neighbor_address, sizeof (ir->rx_neighbor_address));
+ }
+ serialize_likely_small_unsigned_integer (m, ir->waiting_to_restore);
+ if (ir->waiting_to_restore)
+ serialize (m, serialize_f64, ir->wait_to_restore_start_time);
+ }
+}
+
+static void unserialize_srp_interface_state_msg (serialize_main_t * m, va_list * va)
+{
+ CLIB_UNUSED (mc_main_t * mcm) = va_arg (*va, mc_main_t *);
+ srp_main_t * sm = &srp_main;
+ srp_interface_t * si;
+ u32 si_index, r;
+
+ unserialize_integer (m, &si_index, sizeof (u32));
+ si = pool_elt_at_index (sm->interface_pool, si_index);
+ si->current_ips_state = unserialize_likely_small_unsigned_integer (m);
+ for (r = 0; r < SRP_N_RING; r++)
+ {
+ srp_interface_ring_t * ir = &si->rings[r];
+ void * p;
+ ir->rx_neighbor_address_valid = unserialize_likely_small_unsigned_integer (m);
+ if (ir->rx_neighbor_address_valid)
+ {
+ p = unserialize_get (m, sizeof (ir->rx_neighbor_address));
+ clib_memcpy (ir->rx_neighbor_address, p, sizeof (ir->rx_neighbor_address));
+ }
+ ir->waiting_to_restore = unserialize_likely_small_unsigned_integer (m);
+ if (ir->waiting_to_restore)
+ unserialize (m, unserialize_f64, &ir->wait_to_restore_start_time);
+ }
+}
+
+MC_SERIALIZE_MSG (srp_interface_state_msg, static) = {
+ .name = "vnet_srp_interface_state",
+ .serialize = serialize_srp_interface_state_msg,
+ .unserialize = unserialize_srp_interface_state_msg,
+};
+
+static int requests_switch (srp_ips_request_type_t r)
+{
+ static u8 t[16] = {
+ [SRP_IPS_REQUEST_forced_switch] = 1,
+ [SRP_IPS_REQUEST_manual_switch] = 1,
+ [SRP_IPS_REQUEST_signal_fail] = 1,
+ [SRP_IPS_REQUEST_signal_degrade] = 1,
+ };
+ return (int) r < ARRAY_LEN (t) ? t[r] : 0;
+}
+
+/* Called when an IPS control packet is received on given interface. */
+void srp_ips_rx_packet (u32 sw_if_index, srp_ips_header_t * h)
+{
+ vnet_main_t * vnm = vnet_get_main();
+ vlib_main_t * vm = srp_main.vlib_main;
+ srp_ring_type_t rx_ring;
+ srp_interface_t * si = srp_get_interface (sw_if_index, &rx_ring);
+ srp_interface_ring_t * ir = &si->rings[rx_ring];
+ int si_needs_broadcast = 0;
+
+ /* FIXME trace. */
+ if (0)
+ clib_warning ("%U %U %U",
+ format_time_interval, "h:m:s:u", vlib_time_now (vm),
+ format_vnet_sw_if_index_name, vnm, sw_if_index,
+ format_srp_ips_header, h);
+
+ /* Ignore self-generated IPS packets. */
+ if (! memcmp (h->originator_address, si->my_address, sizeof (h->originator_address)))
+ goto done;
+
+ /* Learn neighbor address from short path messages. */
+ if (! h->is_long_path)
+ {
+ if (ir->rx_neighbor_address_valid
+ && memcmp (ir->rx_neighbor_address, h->originator_address, sizeof (ir->rx_neighbor_address)))
+ {
+ ASSERT (0);
+ }
+ ir->rx_neighbor_address_valid = 1;
+ clib_memcpy (ir->rx_neighbor_address, h->originator_address, sizeof (ir->rx_neighbor_address));
+ }
+
+ switch (si->current_ips_state)
+ {
+ case SRP_IPS_STATE_idle:
+ /* Received {REQ,NEIGHBOR,W,S} in idle state: wrap. */
+ if (requests_switch (h->request_type)
+ && ! h->is_long_path
+ && h->status == SRP_IPS_STATUS_wrapped)
+ {
+ srp_ips_header_t to_tx[2];
+
+ si_needs_broadcast = 1;
+ si->current_ips_state = SRP_IPS_STATE_wrapped;
+ si->hw_wrap_function (si->rings[SRP_SIDE_A].hw_if_index, /* enable_wrap */ 1);
+ si->hw_wrap_function (si->rings[SRP_SIDE_B].hw_if_index, /* enable_wrap */ 1);
+
+ init_ips_packet (si, rx_ring ^ 0, &to_tx[0]);
+ to_tx[0].request_type = SRP_IPS_REQUEST_idle;
+ to_tx[0].status = SRP_IPS_STATUS_wrapped;
+ to_tx[0].is_long_path = 0;
+ tx_ips_packet (si, rx_ring ^ 0, &to_tx[0]);
+
+ init_ips_packet (si, rx_ring ^ 1, &to_tx[1]);
+ to_tx[1].request_type = h->request_type;
+ to_tx[1].status = SRP_IPS_STATUS_wrapped;
+ to_tx[1].is_long_path = 1;
+ tx_ips_packet (si, rx_ring ^ 1, &to_tx[1]);
+ }
+ break;
+
+ case SRP_IPS_STATE_wrapped:
+ if (! h->is_long_path
+ && h->request_type == SRP_IPS_REQUEST_idle
+ && h->status == SRP_IPS_STATUS_idle)
+ {
+ si_needs_broadcast = 1;
+ si->current_ips_state = SRP_IPS_STATE_idle;
+ si->hw_wrap_function (si->rings[SRP_SIDE_A].hw_if_index, /* enable_wrap */ 0);
+ si->hw_wrap_function (si->rings[SRP_SIDE_B].hw_if_index, /* enable_wrap */ 0);
+ }
+ break;
+
+ case SRP_IPS_STATE_pass_thru:
+ /* FIXME */
+ break;
+
+ default:
+ abort ();
+ break;
+ }
+
+ done:
+ if (vm->mc_main && si_needs_broadcast)
+ mc_serialize (vm->mc_main, &srp_interface_state_msg, si);
+}
+
+/* Preform local IPS request on given interface. */
+void srp_ips_local_request (u32 sw_if_index, srp_ips_request_type_t request)
+{
+ vnet_main_t * vnm = vnet_get_main();
+ srp_main_t * sm = &srp_main;
+ vlib_main_t * vm = sm->vlib_main;
+ srp_ring_type_t rx_ring;
+ srp_interface_t * si = srp_get_interface (sw_if_index, &rx_ring);
+ srp_interface_ring_t * ir = &si->rings[rx_ring];
+ int si_needs_broadcast = 0;
+
+ if (request == SRP_IPS_REQUEST_wait_to_restore)
+ {
+ if (si->current_ips_state != SRP_IPS_STATE_wrapped)
+ return;
+ if (! ir->waiting_to_restore)
+ {
+ ir->wait_to_restore_start_time = vlib_time_now (sm->vlib_main);
+ ir->waiting_to_restore = 1;
+ si_needs_broadcast = 1;
+ }
+ }
+ else
+ {
+ /* FIXME handle local signal fail. */
+ si_needs_broadcast = ir->waiting_to_restore;
+ ir->wait_to_restore_start_time = 0;
+ ir->waiting_to_restore = 0;
+ }
+
+ /* FIXME trace. */
+ if (0)
+ clib_warning ("%U %U",
+ format_vnet_sw_if_index_name, vnm, sw_if_index,
+ format_srp_ips_request_type, request);
+
+ if (vm->mc_main && si_needs_broadcast)
+ mc_serialize (vm->mc_main, &srp_interface_state_msg, si);
+}
+
+static void maybe_send_ips_message (srp_interface_t * si)
+{
+ srp_main_t * sm = &srp_main;
+ srp_ips_header_t to_tx[2];
+ srp_ring_type_t rx_ring = SRP_RING_OUTER;
+ srp_interface_ring_t * r0 = &si->rings[rx_ring ^ 0];
+ srp_interface_ring_t * r1 = &si->rings[rx_ring ^ 1];
+ f64 now = vlib_time_now (sm->vlib_main);
+
+ if (! si->ips_process_enable)
+ return;
+
+ if (si->current_ips_state == SRP_IPS_STATE_wrapped
+ && r0->waiting_to_restore
+ && r1->waiting_to_restore
+ && now >= r0->wait_to_restore_start_time + si->config.wait_to_restore_idle_delay
+ && now >= r1->wait_to_restore_start_time + si->config.wait_to_restore_idle_delay)
+ {
+ si->current_ips_state = SRP_IPS_STATE_idle;
+ r0->waiting_to_restore = r1->waiting_to_restore = 0;
+ r0->wait_to_restore_start_time = r1->wait_to_restore_start_time = 0;
+ }
+
+ if (si->current_ips_state != SRP_IPS_STATE_idle)
+ return;
+
+ init_ips_packet (si, rx_ring ^ 0, &to_tx[0]);
+ init_ips_packet (si, rx_ring ^ 1, &to_tx[1]);
+
+ if (si->current_ips_state == SRP_IPS_STATE_idle)
+ {
+ to_tx[0].request_type = to_tx[1].request_type = SRP_IPS_REQUEST_idle;
+ to_tx[0].status = to_tx[1].status = SRP_IPS_STATUS_idle;
+ to_tx[0].is_long_path = to_tx[1].is_long_path = 0;
+ }
+
+ else if (si->current_ips_state == SRP_IPS_STATE_wrapped)
+ {
+ to_tx[0].request_type =
+ (si->rings[rx_ring ^ 0].waiting_to_restore
+ ? SRP_IPS_REQUEST_wait_to_restore
+ : SRP_IPS_REQUEST_signal_fail);
+ to_tx[1].request_type =
+ (si->rings[rx_ring ^ 1].waiting_to_restore
+ ? SRP_IPS_REQUEST_wait_to_restore
+ : SRP_IPS_REQUEST_signal_fail);
+ to_tx[0].status = to_tx[1].status = SRP_IPS_STATUS_wrapped;
+ to_tx[0].is_long_path = 0;
+ to_tx[1].is_long_path = 1;
+ }
+
+ tx_ips_packet (si, rx_ring ^ 0, &to_tx[0]);
+ tx_ips_packet (si, rx_ring ^ 1, &to_tx[1]);
+}
+
+static uword
+srp_ips_process (vlib_main_t * vm,
+ vlib_node_runtime_t * rt,
+ vlib_frame_t * f)
+{
+ srp_main_t * sm = &srp_main;
+ srp_interface_t * si;
+
+ while (1)
+ {
+ pool_foreach (si, sm->interface_pool, ({
+ maybe_send_ips_message (si);
+ }));
+ vlib_process_suspend (vm, 1.0);
+ }
+
+ return 0;
+}
+
+vlib_node_registration_t srp_ips_process_node = {
+ .function = srp_ips_process,
+ .type = VLIB_NODE_TYPE_PROCESS,
+ .name = "srp-ips-process",
+ .state = VLIB_NODE_STATE_DISABLED,
+};
+
+static clib_error_t * srp_init (vlib_main_t * vm)
+{
+ srp_main_t * sm = &srp_main;
+
+ sm->default_data_ttl = 255;
+ sm->vlib_main = vm;
+ vlib_register_node (vm, &srp_ips_process_node);
+ vlib_register_node (vm, &srp_input_node);
+ vlib_register_node (vm, &srp_control_input_node);
+ srp_setup_node (vm, srp_input_node.index);
+
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (srp_init);
diff --git a/src/vnet/srp/packet.h b/src/vnet/srp/packet.h
new file mode 100644
index 00000000000..96dab648b32
--- /dev/null
+++ b/src/vnet/srp/packet.h
@@ -0,0 +1,204 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * srp/packet.h: srp packet format.
+ *
+ * Copyright (c) 2008 Eliot Dresselhaus
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef included_srp_packet_h
+#define included_srp_packet_h
+
+#include <vppinfra/byte_order.h>
+#include <vppinfra/bitops.h>
+#include <vnet/ethernet/packet.h>
+
+/* SRP version 2. */
+
+#define foreach_srp_mode \
+ _ (reserved0) \
+ _ (reserved1) \
+ _ (reserved2) \
+ _ (reserved3) \
+ _ (control_pass_to_host) \
+ _ (control_locally_buffered_for_host) \
+ _ (keep_alive) \
+ _ (data)
+
+typedef enum {
+#define _(f) SRP_MODE_##f,
+ foreach_srp_mode
+#undef _
+ SRP_N_MODE,
+} srp_mode_t;
+
+typedef union {
+ /* For computing parity bit. */
+ u16 as_u16;
+
+ struct {
+ u8 ttl;
+
+#if CLIB_ARCH_IS_BIG_ENDIAN
+ u8 is_inner_ring : 1;
+ u8 mode : 3;
+ u8 priority : 3;
+ u8 parity : 1;
+#endif
+#if CLIB_ARCH_IS_LITTLE_ENDIAN
+ u8 parity : 1;
+ u8 priority : 3;
+ u8 mode : 3;
+ u8 is_inner_ring : 1;
+#endif
+ };
+} srp_header_t;
+
+always_inline void
+srp_header_compute_parity (srp_header_t * h)
+{
+ h->parity = 0;
+ h->parity = count_set_bits (h->as_u16) ^ 1; /* odd parity */
+}
+
+typedef struct {
+ srp_header_t srp;
+ ethernet_header_t ethernet;
+} srp_and_ethernet_header_t;
+
+#define foreach_srp_control_packet_type \
+ _ (reserved) \
+ _ (topology) \
+ _ (ips)
+
+typedef enum {
+#define _(f) SRP_CONTROL_PACKET_TYPE_##f,
+ foreach_srp_control_packet_type
+#undef _
+ SRP_N_CONTROL_PACKET_TYPE,
+} srp_control_packet_type_t;
+
+typedef CLIB_PACKED (struct {
+ /* Set to 0. */
+ u8 version;
+
+ srp_control_packet_type_t type : 8;
+
+ /* IP4-like checksum of packet starting with start of control header. */
+ u16 checksum;
+
+ u16 ttl;
+}) srp_control_header_t;
+
+typedef struct {
+ srp_header_t srp;
+ ethernet_header_t ethernet;
+ srp_control_header_t control;
+} srp_generic_control_header_t;
+
+typedef struct {
+ u8 flags;
+#define SRP_TOPOLOGY_MAC_BINDING_FLAG_IS_INNER_RING (1 << 6)
+#define SRP_TOPOLOGY_MAC_BINDING_FLAG_IS_WRAPPED (1 << 5)
+
+ /* MAC address. */
+ u8 address[6];
+} srp_topology_mac_binding_t;
+
+typedef CLIB_PACKED (struct {
+ srp_header_t srp;
+ ethernet_header_t ethernet;
+ srp_control_header_t control;
+
+ /* Length in bytes of data that follows. */
+ u16 n_bytes_of_data_that_follows;
+
+ /* MAC address of originator of this topology request. */
+ u8 originator_address[6];
+
+ /* Bindings follow. */
+ srp_topology_mac_binding_t bindings[0];
+}) srp_topology_header_t;
+
+#define foreach_srp_ips_request_type \
+ _ (idle, 0x0) \
+ _ (wait_to_restore, 0x5) \
+ _ (manual_switch, 0x6) \
+ _ (signal_degrade, 0x8) \
+ _ (signal_fail, 0xb) \
+ _ (forced_switch, 0xd)
+
+typedef enum {
+#define _(f,n) SRP_IPS_REQUEST_##f = n,
+ foreach_srp_ips_request_type
+#undef _
+} srp_ips_request_type_t;
+
+#define foreach_srp_ips_status \
+ _ (idle, 0x0) \
+ _ (wrapped, 0x2)
+
+typedef enum {
+#define _(f,n) SRP_IPS_STATUS_##f = n,
+ foreach_srp_ips_status
+#undef _
+} srp_ips_status_t;
+
+typedef struct {
+ srp_header_t srp;
+ ethernet_header_t ethernet;
+ srp_control_header_t control;
+ u8 originator_address[6];
+
+ union {
+ u8 ips_octet;
+
+ struct {
+#if CLIB_ARCH_IS_BIG_ENDIAN
+ u8 request_type : 4;
+ u8 is_long_path : 1;
+ u8 status : 3;
+#endif
+#if CLIB_ARCH_IS_LITTLE_ENDIAN
+ u8 status : 3;
+ u8 is_long_path : 1;
+ u8 request_type : 4;
+#endif
+ };
+ };
+
+ u8 reserved;
+} srp_ips_header_t;
+
+#endif /* included_srp_packet_h */
diff --git a/src/vnet/srp/pg.c b/src/vnet/srp/pg.c
new file mode 100644
index 00000000000..54f1a3bba18
--- /dev/null
+++ b/src/vnet/srp/pg.c
@@ -0,0 +1,157 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * srp/pg.c: packet generator srp interface
+ *
+ * Copyright (c) 2008 Eliot Dresselhaus
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <vlib/vlib.h>
+#include <vnet/pg/pg.h>
+#include <vnet/srp/srp.h>
+#include <vnet/ethernet/ethernet.h>
+
+typedef struct {
+ pg_edit_t ttl;
+ pg_edit_t is_inner_ring;
+ pg_edit_t mode;
+ pg_edit_t priority;
+ pg_edit_t parity;
+ pg_edit_t type;
+ pg_edit_t src_address;
+ pg_edit_t dst_address;
+} pg_srp_header_t;
+
+static inline void
+pg_srp_header_init (pg_srp_header_t * e)
+{
+ pg_edit_init (&e->ttl, srp_and_ethernet_header_t, srp.ttl);
+ pg_edit_init_bitfield (&e->is_inner_ring, srp_and_ethernet_header_t,
+ srp.as_u16,
+ 7, 1);
+ pg_edit_init_bitfield (&e->mode, srp_and_ethernet_header_t,
+ srp.as_u16,
+ 4, 3);
+ pg_edit_init_bitfield (&e->priority, srp_and_ethernet_header_t,
+ srp.as_u16,
+ 1, 3);
+ pg_edit_init_bitfield (&e->parity, srp_and_ethernet_header_t,
+ srp.as_u16,
+ 0, 1);
+ pg_edit_init (&e->type, srp_and_ethernet_header_t, ethernet.type);
+ pg_edit_init (&e->src_address, srp_and_ethernet_header_t, ethernet.src_address);
+ pg_edit_init (&e->dst_address, srp_and_ethernet_header_t, ethernet.dst_address);
+}
+
+uword
+unformat_pg_srp_header (unformat_input_t * input, va_list * args)
+{
+ pg_stream_t * s = va_arg (*args, pg_stream_t *);
+ pg_srp_header_t * e;
+ u32 error, group_index;
+
+ e = pg_create_edit_group (s, sizeof (e[0]), sizeof (srp_header_t),
+ &group_index);
+ pg_srp_header_init (e);
+
+ error = 1;
+ if (! unformat (input, "%U: %U -> %U",
+ unformat_pg_edit,
+ unformat_ethernet_type_net_byte_order, &e->type,
+ unformat_pg_edit,
+ unformat_ethernet_address, &e->src_address,
+ unformat_pg_edit,
+ unformat_ethernet_address, &e->dst_address))
+ goto done;
+
+ {
+ srp_header_t h;
+
+ h.as_u16 = 0;
+ h.mode = SRP_MODE_data;
+ h.ttl = 255;
+ h.parity = count_set_bits (h.as_u16) ^ 1;
+
+ pg_edit_set_fixed (&e->mode, h.mode);
+ pg_edit_set_fixed (&e->ttl, h.ttl);
+ pg_edit_set_fixed (&e->is_inner_ring, h.is_inner_ring);
+ pg_edit_set_fixed (&e->priority, h.priority);
+ pg_edit_set_fixed (&e->parity, h.parity);
+ }
+
+ error = 0;
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "mode %U",
+ unformat_pg_edit,
+ unformat_pg_number, &e->mode))
+ ;
+ else if (unformat (input, "ttl %U",
+ unformat_pg_edit,
+ unformat_pg_number, &e->ttl))
+ ;
+ else if (unformat (input, "priority %U",
+ unformat_pg_edit,
+ unformat_pg_number, &e->priority))
+ ;
+ else
+ break;
+ }
+
+ {
+ ethernet_main_t * em = &ethernet_main;
+ ethernet_type_info_t * ti = 0;
+ pg_node_t * pg_node = 0;
+
+ if (e->type.type == PG_EDIT_FIXED)
+ {
+ u16 t = *(u16 *) e->type.values[PG_EDIT_LO];
+ ti = ethernet_get_type_info (em, clib_net_to_host_u16 (t));
+ if (ti && ti->node_index != ~0)
+ pg_node = pg_get_node (ti->node_index);
+ }
+
+ if (pg_node && pg_node->unformat_edit
+ && unformat_user (input, pg_node->unformat_edit, s))
+ ;
+ else if (! unformat_user (input, unformat_pg_payload, s))
+ goto done;
+ }
+
+ done:
+ if (error)
+ pg_free_edit_group (s);
+ return error == 0;
+}
+
diff --git a/src/vnet/srp/srp.h b/src/vnet/srp/srp.h
new file mode 100644
index 00000000000..5288ebe4759
--- /dev/null
+++ b/src/vnet/srp/srp.h
@@ -0,0 +1,222 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * srp.h: types/functions for srp.
+ *
+ * Copyright (c) 2008 Eliot Dresselhaus
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef included_srp_h
+#define included_srp_h
+
+#include <vnet/vnet.h>
+#include <vnet/srp/packet.h>
+#include <vnet/ethernet/ethernet.h>
+#include <vnet/pg/pg.h>
+
+extern vnet_hw_interface_class_t srp_hw_interface_class;
+
+/* See RFC 2892. */
+#define foreach_srp_ips_state \
+ _ (idle) \
+ _ (pass_thru) \
+ _ (wrapped)
+
+typedef enum {
+#define _(f) SRP_IPS_STATE_##f,
+ foreach_srp_ips_state
+#undef _
+ SRP_N_IPS_STATE,
+} srp_ips_state_t;
+
+typedef enum {
+ SRP_RING_OUTER,
+ SRP_RING_INNER,
+ SRP_N_RING = 2,
+ SRP_SIDE_A = SRP_RING_OUTER, /* outer rx, inner tx */
+ SRP_SIDE_B = SRP_RING_INNER, /* inner rx, outer tx */
+ SRP_N_SIDE = 2,
+} srp_ring_type_t;
+
+typedef struct {
+ srp_ring_type_t ring;
+
+ /* Hardware interface for this ring/side. */
+ u32 hw_if_index;
+
+ /* Software interface corresponding to hardware interface. */
+ u32 sw_if_index;
+
+ /* Mac address of neighbor on RX fiber. */
+ u8 rx_neighbor_address[6];
+
+ u8 rx_neighbor_address_valid;
+
+ /* True if we are waiting to restore signal. */
+ u8 waiting_to_restore;
+
+ /* Time stamp when signal became valid. */
+ f64 wait_to_restore_start_time;
+} srp_interface_ring_t;
+
+struct srp_interface_t;
+typedef void (srp_hw_wrap_function_t) (u32 hw_if_index, u32 wrap_enable);
+typedef void (srp_hw_enable_function_t) (struct srp_interface_t * si, u32 wrap_enable);
+
+typedef struct {
+ /* Delay between wait to restore event and entering idle state in seconds. */
+ f64 wait_to_restore_idle_delay;
+
+ /* Number of seconds between sending ips messages to neighbors. */
+ f64 ips_tx_interval;
+} srp_interface_config_t;
+
+typedef struct srp_interface_t {
+ /* Current IPS state. */
+ srp_ips_state_t current_ips_state;
+
+ /* Address for this interface. */
+ u8 my_address[6];
+
+ /* Enable IPS process handling for this interface. */
+ u8 ips_process_enable;
+
+ srp_interface_ring_t rings[SRP_N_RING];
+
+ /* Configurable parameters. */
+ srp_interface_config_t config;
+
+ srp_hw_wrap_function_t * hw_wrap_function;
+
+ srp_hw_enable_function_t * hw_enable_function;
+} srp_interface_t;
+
+typedef struct {
+ vlib_main_t * vlib_main;
+
+ /* Pool of SRP interfaces. */
+ srp_interface_t * interface_pool;
+
+ uword * interface_index_by_hw_if_index;
+
+ /* TTL to use for outgoing data packets. */
+ u32 default_data_ttl;
+
+ vlib_one_time_waiting_process_t * srp_register_interface_waiting_process_pool;
+
+ uword * srp_register_interface_waiting_process_pool_index_by_hw_if_index;
+} srp_main_t;
+
+/* Registers sides A/B hardware interface as being SRP capable. */
+void srp_register_interface (u32 * hw_if_indices);
+
+/* Enable sending IPS messages for interface implied by given vlib hardware interface. */
+void srp_interface_enable_ips (u32 hw_if_index);
+
+/* Set function to wrap hardware side of SRP interface. */
+void srp_interface_set_hw_wrap_function (u32 hw_if_index, srp_hw_wrap_function_t * f);
+
+void srp_interface_set_hw_enable_function (u32 hw_if_index, srp_hw_enable_function_t * f);
+
+extern vlib_node_registration_t srp_ips_process_node;
+
+/* Called when an IPS control packet is received on given interface. */
+void srp_ips_rx_packet (u32 sw_if_index, srp_ips_header_t * ips_packet);
+
+/* Preform local IPS request on given interface. */
+void srp_ips_local_request (u32 sw_if_index, srp_ips_request_type_t request);
+
+always_inline void
+srp_ips_link_change (u32 sw_if_index, u32 link_is_up)
+{
+ srp_ips_local_request (sw_if_index,
+ link_is_up
+ ? SRP_IPS_REQUEST_wait_to_restore
+ : SRP_IPS_REQUEST_signal_fail);
+}
+
+void srp_interface_get_interface_config (u32 hw_if_index, srp_interface_config_t * c);
+void srp_interface_set_interface_config (u32 hw_if_index, srp_interface_config_t * c);
+
+srp_main_t srp_main;
+
+always_inline srp_interface_t *
+srp_get_interface_from_vnet_hw_interface (u32 hw_if_index)
+{
+ srp_main_t * sm = &srp_main;
+ uword * p = hash_get (sm->interface_index_by_hw_if_index, hw_if_index);
+ return p ? pool_elt_at_index (sm->interface_pool, p[0]) : 0;
+}
+
+u8 * format_srp_header (u8 * s, va_list * args);
+u8 * format_srp_header_with_length (u8 * s, va_list * args);
+u8 * format_srp_device (u8 * s, va_list * args);
+
+/* Parse srp header. */
+uword
+unformat_srp_header (unformat_input_t * input, va_list * args);
+
+uword unformat_pg_srp_header (unformat_input_t * input, va_list * args);
+
+always_inline void
+srp_setup_node (vlib_main_t * vm, u32 node_index)
+{
+ vlib_node_t * n = vlib_get_node (vm, node_index);
+ pg_node_t * pn = pg_get_node (node_index);
+ n->format_buffer = format_srp_header_with_length;
+ n->unformat_buffer = unformat_srp_header;
+ pn->unformat_edit = unformat_pg_srp_header;
+}
+
+#define foreach_srp_error \
+ _ (NONE, "no error") \
+ _ (UNKNOWN_MODE, "unknown mode in SRP header") \
+ _ (KEEP_ALIVE_DROPPED, "v1 keep alive mode in SRP header") \
+ _ (CONTROL_PACKETS_PROCESSED, "control packets processed") \
+ _ (IPS_PACKETS_PROCESSED, "IPS packets processed") \
+ _ (UNKNOWN_CONTROL, "unknown control packet") \
+ _ (CONTROL_VERSION_NON_ZERO, "control packet with non-zero version") \
+ _ (CONTROL_BAD_CHECKSUM, "control packet with bad checksum") \
+ _ (TOPOLOGY_BAD_LENGTH, "topology packet with bad length")
+
+typedef enum {
+#define _(n,s) SRP_ERROR_##n,
+ foreach_srp_error
+#undef _
+ SRP_N_ERROR,
+} srp_error_t;
+
+serialize_function_t serialize_srp_main, unserialize_srp_main;
+
+#endif /* included_srp_h */
diff --git a/src/vnet/unix/gdb_funcs.c b/src/vnet/unix/gdb_funcs.c
new file mode 100644
index 00000000000..cfb4b247800
--- /dev/null
+++ b/src/vnet/unix/gdb_funcs.c
@@ -0,0 +1,171 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * @file
+ * @brief Host utility functions
+ */
+#include <vppinfra/format.h>
+#include <vlib/vlib.h>
+
+#include <vlib/threads.h>
+
+
+
+/**
+ * @brief GDB callable function: vl - Return vector length of vector
+ *
+ * @param *p - void - address of vector
+ *
+ * @return length - u32
+ *
+ */
+u32 vl(void *p)
+{
+ return vec_len (p);
+}
+
+/**
+ * @brief GDB callable function: pe - call pool_elts - number of elements in a pool
+ *
+ * @param *v - void - address of pool
+ *
+ * @return number - uword
+ *
+ */
+uword pe (void *v)
+{
+ return (pool_elts(v));
+}
+
+/**
+ * @brief GDB callable function: pifi - call pool_is_free_index - is passed index free?
+ *
+ * @param *p - void - address of pool
+ * @param *index - u32
+ *
+ * @return 0|1 - int
+ *
+ */
+int pifi (void *p, u32 index)
+{
+ return pool_is_free_index (p, index);
+}
+
+/**
+ * @brief GDB callable function: debug_hex_bytes - return formatted hex string
+ *
+ * @param *s - u8
+ * @param n - u32 - number of bytes to format
+ *
+ */
+void debug_hex_bytes (u8 *s, u32 n)
+{
+ fformat (stderr, "%U\n", format_hex_bytes, s, n);
+}
+
+/**
+ * @brief GDB callable function: vlib_dump_frame_ownership
+ *
+ */
+void vlib_dump_frame_ownership (void)
+{
+ vlib_main_t * vm = vlib_get_main();
+ vlib_node_main_t * nm = &vm->node_main;
+ vlib_node_runtime_t * this_node_runtime;
+ vlib_next_frame_t * nf;
+ u32 first_nf_index;
+ u32 index;
+
+ vec_foreach(this_node_runtime, nm->nodes_by_type[VLIB_NODE_TYPE_INTERNAL])
+ {
+ first_nf_index = this_node_runtime->next_frame_index;
+
+ for (index = first_nf_index; index < first_nf_index +
+ this_node_runtime->n_next_nodes; index++)
+ {
+ vlib_node_runtime_t * owned_runtime;
+ nf = vec_elt_at_index (vm->node_main.next_frames, index);
+ if (nf->flags & VLIB_FRAME_OWNER)
+ {
+ owned_runtime = vec_elt_at_index (nm->nodes_by_type[0],
+ nf->node_runtime_index);
+ fformat(stderr,
+ "%s next index %d owns enqueue rights to %s\n",
+ nm->nodes[this_node_runtime->node_index]->name,
+ index - first_nf_index,
+ nm->nodes[owned_runtime->node_index]->name);
+ fformat (stderr, " nf index %d nf->frame_index %d\n",
+ nf - vm->node_main.next_frames,
+ nf->frame_index);
+ }
+ }
+ }
+}
+
+/**
+ * @brief GDB callable function: vlib_runtime_index_to_node_name
+ *
+ * Takes node index and will return the node name.
+ *
+ * @param index - u32
+ */
+void vlib_runtime_index_to_node_name (u32 index)
+{
+ vlib_main_t * vm = vlib_get_main();
+ vlib_node_main_t * nm = &vm->node_main;
+
+ if (index > vec_len (nm->nodes))
+ {
+ fformat(stderr, "%d out of range, max %d\n", vec_len(nm->nodes));
+ return;
+ }
+
+ fformat(stderr, "node runtime index %d name %s\n", index, nm->nodes[index]->name);
+}
+
+
+/**
+ * @brief GDB callable function: show_gdb_command_fn - show gdb
+ *
+ * Shows list of functions for VPP available in GDB
+ *
+ * @return error - clib_error_t
+ */
+static clib_error_t *
+show_gdb_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ vlib_cli_output (vm, "vl(p) returns vec_len(p)");
+ vlib_cli_output (vm, "pe(p) returns pool_elts(p)");
+ vlib_cli_output (vm, "pifi(p, i) returns pool_is_free_index(p, i)");
+ vlib_cli_output (vm, "debug_hex_bytes (ptr, n_bytes) dumps n_bytes in hex");
+ vlib_cli_output (vm, "vlib_dump_frame_ownership() does what it says");
+ vlib_cli_output (vm, "vlib_runtime_index_to_node_name (index) prints NN");
+
+ return 0;
+}
+
+VLIB_CLI_COMMAND (show_gdb_funcs_command, static) = {
+ .path = "show gdb",
+ .short_help = "Describe functions which can be called from gdb",
+ .function = show_gdb_command_fn,
+};
+
+/* Cafeteria plan, maybe you don't want these functions */
+clib_error_t *
+gdb_func_init (vlib_main_t * vm) { return 0; }
+
+VLIB_INIT_FUNCTION (gdb_func_init);
diff --git a/src/vnet/unix/pcap.c b/src/vnet/unix/pcap.c
new file mode 100644
index 00000000000..bba225f74ab
--- /dev/null
+++ b/src/vnet/unix/pcap.c
@@ -0,0 +1,241 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * pcap.c: libpcap packet capture format
+ *
+ * Copyright (c) 2008 Eliot Dresselhaus
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <vnet/unix/pcap.h>
+#include <sys/fcntl.h>
+
+/**
+ * @file
+ * @brief PCAP function.
+ *
+ * Usage:
+ *
+ * <code><pre>
+ * \#include <vnet/unix/pcap.h>
+ *
+ * static pcap_main_t pcap = {
+ * .file_name = "/tmp/ip4",
+ * .n_packets_to_capture = 2,
+ * .packet_type = PCAP_PACKET_TYPE_ip,
+ * };
+ * </pre></code>
+ *
+ * To add a buffer:
+ *
+ * <code><pre>pcap_add_buffer (&pcap, vm, pi0, 128);</pre></code>
+ *
+ * File will be written after @c n_packets_to_capture or call to pcap_write (&amp;pcap).
+ *
+*/
+
+/**
+ * @brief Close PCAP file
+ *
+ * @return rc - clib_error_t
+ *
+ */
+clib_error_t *
+pcap_close (pcap_main_t * pm)
+{
+ close (pm->file_descriptor);
+ pm->flags &= ~PCAP_MAIN_INIT_DONE;
+ pm->file_descriptor = -1;
+ return 0;
+}
+
+/**
+ * @brief Write PCAP file
+ *
+ * @return rc - clib_error_t
+ *
+ */
+clib_error_t *
+pcap_write (pcap_main_t * pm)
+{
+ clib_error_t * error = 0;
+
+ if (! (pm->flags & PCAP_MAIN_INIT_DONE))
+ {
+ pcap_file_header_t fh;
+ int n;
+
+ if (! pm->file_name)
+ pm->file_name = "/tmp/vnet.pcap";
+
+ pm->file_descriptor = open (pm->file_name, O_CREAT | O_TRUNC | O_WRONLY, 0664);
+ if (pm->file_descriptor < 0)
+ {
+ error = clib_error_return_unix (0, "failed to open `%s'", pm->file_name);
+ goto done;
+ }
+
+ pm->flags |= PCAP_MAIN_INIT_DONE;
+ pm->n_packets_captured = 0;
+ pm->n_pcap_data_written = 0;
+
+ /* Write file header. */
+ memset (&fh, 0, sizeof (fh));
+ fh.magic = 0xa1b2c3d4;
+ fh.major_version = 2;
+ fh.minor_version = 4;
+ fh.time_zone = 0;
+ fh.max_packet_size_in_bytes = 1 << 16;
+ fh.packet_type = pm->packet_type;
+ n = write (pm->file_descriptor, &fh, sizeof (fh));
+ if (n != sizeof (fh))
+ {
+ if (n < 0)
+ error = clib_error_return_unix (0, "write file header `%s'", pm->file_name);
+ else
+ error = clib_error_return (0, "short write of file header `%s'", pm->file_name);
+ goto done;
+ }
+ }
+
+ while (vec_len (pm->pcap_data) > pm->n_pcap_data_written)
+ {
+ int n = vec_len (pm->pcap_data) - pm->n_pcap_data_written;
+
+ n = write (pm->file_descriptor,
+ vec_elt_at_index (pm->pcap_data, pm->n_pcap_data_written), n);
+
+ if (n < 0 && unix_error_is_fatal (errno))
+ {
+ error = clib_error_return_unix (0, "write `%s'", pm->file_name);
+ goto done;
+ }
+ pm->n_pcap_data_written += n;
+ }
+
+ if (pm->n_pcap_data_written >= vec_len (pm->pcap_data))
+ {
+ vec_reset_length (pm->pcap_data);
+ pm->n_pcap_data_written = 0;
+ }
+
+ if (pm->n_packets_captured >= pm->n_packets_to_capture)
+ pcap_close(pm);
+
+ done:
+ if (error)
+ {
+ if (pm->file_descriptor >= 0)
+ close (pm->file_descriptor);
+ }
+ return error;
+}
+
+/**
+ * @brief Read PCAP file
+ *
+ * @return rc - clib_error_t
+ *
+ */
+clib_error_t * pcap_read (pcap_main_t * pm)
+{
+ clib_error_t * error = 0;
+ int fd, need_swap, n;
+ pcap_file_header_t fh;
+ pcap_packet_header_t ph;
+
+ fd = open (pm->file_name, O_RDONLY);
+ if (fd < 0)
+ {
+ error = clib_error_return_unix (0, "open `%s'", pm->file_name);
+ goto done;
+ }
+
+ if (read (fd, &fh, sizeof (fh)) != sizeof (fh))
+ {
+ error = clib_error_return_unix (0, "read file header `%s'", pm->file_name);
+ goto done;
+ }
+
+ need_swap = 0;
+ if (fh.magic == 0xd4c3b2a1)
+ {
+ need_swap = 1;
+#define _(t,f) fh.f = clib_byte_swap_##t (fh.f);
+ foreach_pcap_file_header;
+#undef _
+ }
+
+ if (fh.magic != 0xa1b2c3d4)
+ {
+ error = clib_error_return (0, "bad magic `%s'", pm->file_name);
+ goto done;
+ }
+
+ pm->min_packet_bytes = 0;
+ pm->max_packet_bytes = 0;
+ while ((n = read (fd, &ph, sizeof (ph))) != 0)
+ {
+ u8 * data;
+
+ if (need_swap)
+ {
+#define _(t,f) ph.f = clib_byte_swap_##t (ph.f);
+ foreach_pcap_packet_header;
+#undef _
+ }
+
+ data = vec_new (u8, ph.n_bytes_in_packet);
+ if (read (fd, data, ph.n_packet_bytes_stored_in_file) != ph.n_packet_bytes_stored_in_file)
+ {
+ error = clib_error_return (0, "short read `%s'", pm->file_name);
+ goto done;
+ }
+
+ if (vec_len (pm->packets_read) == 0)
+ pm->min_packet_bytes = pm->max_packet_bytes = ph.n_bytes_in_packet;
+ else
+ {
+ pm->min_packet_bytes = clib_min (pm->min_packet_bytes, ph.n_bytes_in_packet);
+ pm->max_packet_bytes = clib_max (pm->max_packet_bytes, ph.n_bytes_in_packet);
+ }
+
+ vec_add1 (pm->packets_read, data);
+ }
+
+ done:
+ if (fd >= 0)
+ close (fd);
+ return error;
+
+}
diff --git a/src/vnet/unix/pcap.h b/src/vnet/unix/pcap.h
new file mode 100644
index 00000000000..6aaf32bef7e
--- /dev/null
+++ b/src/vnet/unix/pcap.h
@@ -0,0 +1,230 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * pcap.h: libpcap packet capture format
+ *
+ * Copyright (c) 2008 Eliot Dresselhaus
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+/**
+ * @file
+ * @brief PCAP utility definitions
+ */
+#ifndef included_vnet_pcap_h
+#define included_vnet_pcap_h
+
+#include <vlib/vlib.h>
+
+/**
+ * @brief Packet types supported by PCAP
+ *
+ * null 0
+ * ethernet 1
+ * ppp 9
+ * ip 12
+ * hdlc 104
+ */
+#define foreach_vnet_pcap_packet_type \
+ _ (null, 0) \
+ _ (ethernet, 1) \
+ _ (ppp, 9) \
+ _ (ip, 12) \
+ _ (hdlc, 104)
+
+typedef enum {
+#define _(f,n) PCAP_PACKET_TYPE_##f = (n),
+ foreach_vnet_pcap_packet_type
+#undef _
+} pcap_packet_type_t;
+
+#define foreach_pcap_file_header \
+ /** 0xa1b2c3d4 host byte order. \
+ 0xd4c3b2a1 => need to byte swap everything. */ \
+ _ (u32, magic) \
+ \
+ /** Currently major 2 minor 4. */ \
+ _ (u16, major_version) \
+ _ (u16, minor_version) \
+ \
+ /** 0 for GMT. */ \
+ _ (u32, time_zone) \
+ \
+ /** Accuracy of timestamps. Typically set to 0. */ \
+ _ (u32, sigfigs) \
+ \
+ /** Size of largest packet in file. */ \
+ _ (u32, max_packet_size_in_bytes) \
+ \
+ /** One of vnet_pcap_packet_type_t. */ \
+ _ (u32, packet_type)
+
+/** File header struct */
+typedef struct {
+#define _(t, f) t f;
+ foreach_pcap_file_header
+#undef _
+} pcap_file_header_t;
+
+#define foreach_pcap_packet_header \
+ /** Time stamp in seconds */ \
+ _ (u32, time_in_sec) \
+ /** Time stamp in microseconds. */ \
+ _ (u32, time_in_usec) \
+ \
+ /** Number of bytes stored in file. */ \
+ _ (u32, n_packet_bytes_stored_in_file) \
+ /** Number of bytes in actual packet. */ \
+ _ (u32, n_bytes_in_packet)
+
+/** Packet header. */
+typedef struct {
+#define _(t, f) t f;
+ foreach_pcap_packet_header
+#undef _
+
+ /** Packet data follows. */
+ u8 data[0];
+} pcap_packet_header_t;
+
+/**
+ * @brief PCAP main state data structure
+ */
+typedef struct {
+ /** File name of pcap output. */
+ char * file_name;
+
+ /** Number of packets to capture. */
+ u32 n_packets_to_capture;
+
+ /** Packet type */
+ pcap_packet_type_t packet_type;
+
+ /** Number of packets currently captured. */
+ u32 n_packets_captured;
+
+ /** flags */
+ u32 flags;
+#define PCAP_MAIN_INIT_DONE (1 << 0)
+
+ /** File descriptor for reading/writing. */
+ int file_descriptor;
+
+ /** Bytes written */
+ u32 n_pcap_data_written;
+
+ /** Vector of pcap data. */
+ u8 * pcap_data;
+
+ /** Packets read from file. */
+ u8 ** packets_read;
+
+ /** Min/Max Packet bytes */
+ u32 min_packet_bytes, max_packet_bytes;
+} pcap_main_t;
+
+/** Write out data to output file. */
+clib_error_t * pcap_write (pcap_main_t * pm);
+
+/** Read data from file. */
+clib_error_t * pcap_read (pcap_main_t * pm);
+
+/**
+ * @brief Add packet
+ *
+ * @param *pm - pcap_main_t
+ * @param time_now - f64
+ * @param n_bytes_in_trace - u32
+ * @param n_bytes_in_packet - u32
+ *
+ * @return Packet Data
+ *
+ */
+static inline void *
+pcap_add_packet (pcap_main_t * pm,
+ f64 time_now,
+ u32 n_bytes_in_trace,
+ u32 n_bytes_in_packet)
+{
+ pcap_packet_header_t * h;
+ u8 * d;
+
+ vec_add2 (pm->pcap_data, d, sizeof (h[0]) + n_bytes_in_trace);
+ h = (void *) (d);
+ h->time_in_sec = time_now;
+ h->time_in_usec = 1e6*(time_now - h->time_in_sec);
+ h->n_packet_bytes_stored_in_file = n_bytes_in_trace;
+ h->n_bytes_in_packet = n_bytes_in_packet;
+ pm->n_packets_captured++;
+ return h->data;
+}
+
+/**
+ * @brief Add buffer (vlib_buffer_t) to the trace
+ *
+ * @param *pm - pcap_main_t
+ * @param *vm - vlib_main_t
+ * @param buffer_index - u32
+ * @param n_bytes_in_trace - u32
+ *
+ */
+static inline void
+pcap_add_buffer (pcap_main_t * pm,
+ vlib_main_t * vm, u32 buffer_index,
+ u32 n_bytes_in_trace)
+{
+ vlib_buffer_t * b = vlib_get_buffer (vm, buffer_index);
+ u32 n = vlib_buffer_length_in_chain (vm, b);
+ i32 n_left = clib_min (n_bytes_in_trace, n);
+ f64 time_now = vlib_time_now (vm);
+ void * d;
+
+ d = pcap_add_packet (pm, time_now, n_left, n);
+ while (1)
+ {
+ u32 copy_length = clib_min ((u32) n_left, b->current_length);
+ clib_memcpy (d, b->data + b->current_data, copy_length);
+ n_left -= b->current_length;
+ if (n_left <= 0)
+ break;
+ d += b->current_length;
+ ASSERT (b->flags & VLIB_BUFFER_NEXT_PRESENT);
+ b = vlib_get_buffer (vm, b->next_buffer);
+ }
+
+ /** Flush output vector. */
+ if (vec_len (pm->pcap_data) >= 64*1024
+ || pm->n_packets_captured >= pm->n_packets_to_capture)
+ pcap_write (pm);
+}
+
+#endif /* included_vnet_pcap_h */
diff --git a/src/vnet/unix/pcap2pg.c b/src/vnet/unix/pcap2pg.c
new file mode 100644
index 00000000000..217a61f4cb4
--- /dev/null
+++ b/src/vnet/unix/pcap2pg.c
@@ -0,0 +1,182 @@
+/*
+ * pcap2pg.c: convert pcap input to pg input
+ *
+ * Copyright (c) 2013 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * @file
+ * @brief Functions to convert PCAP file format to VPP PG (Packet Generator)
+ *
+ */
+#include <vnet/unix/pcap.h>
+#include <vnet/ethernet/packet.h>
+#include <stdio.h>
+
+pcap_main_t pcap_main;
+
+/**
+ * @brief char * to seed a PG file
+ */
+static char * pg_fmt =
+ "packet-generator new {\n"
+ " name s%d\n"
+ " limit 1\n"
+ " size %d-%d\n"
+ " node ethernet-input\n";
+
+
+/**
+ * @brief Packet Generator Stream boilerplate
+ *
+ * @param *ofp - FILE
+ * @param i - int
+ * @param *pkt - u8
+ */
+void stream_boilerplate (FILE *ofp, int i, u8 * pkt)
+{
+ fformat(ofp, pg_fmt, i, vec_len(pkt), vec_len(pkt));
+}
+
+/**
+ * @brief Conversion of PCAP file to PG file format
+ *
+ * @param *pm - pcap_main_t
+ * @param *ofp - FILE
+ *
+ * @return rc - int
+ *
+ */
+int pcap2pg (pcap_main_t * pm, FILE *ofp)
+{
+ int i, j;
+ u8 *pkt;
+
+ for (i = 0; i < vec_len (pm->packets_read); i++)
+ {
+ int offset;
+ ethernet_header_t * h;
+ u64 ethertype;
+
+ pkt = pm->packets_read[i];
+ h = (ethernet_header_t *)pkt;
+
+ stream_boilerplate (ofp, i, pkt);
+
+ fformat (ofp, " data {\n");
+
+ ethertype = clib_net_to_host_u16 (h->type);
+
+ /**
+ * In vnet terms, packet generator interfaces are not ethernets.
+ * They don't have vlan tables.
+ * This transforms captured 802.1q VLAN packets into
+ * regular Ethernet packets.
+ */
+ if (ethertype == 0x8100 /* 802.1q vlan */)
+ {
+ u16 * vlan_ethertype = (u16 *)(h+1);
+ ethertype = clib_net_to_host_u16(vlan_ethertype[0]);
+ offset = 18;
+ }
+ else
+ offset = 14;
+
+ fformat (ofp,
+ " 0x%04x: %02x%02x.%02x%02x.%02x%02x"
+ " -> %02x%02x.%02x%02x.%02x%02x\n",
+ ethertype,
+ h->src_address[0],
+ h->src_address[1],
+ h->src_address[2],
+ h->src_address[3],
+ h->src_address[4],
+ h->src_address[5],
+ h->dst_address[0],
+ h->dst_address[1],
+ h->dst_address[2],
+ h->dst_address[3],
+ h->dst_address[4],
+ h->dst_address[5]);
+
+ fformat (ofp, " hex 0x");
+
+ for (j = offset; j < vec_len (pkt); j++)
+ fformat (ofp, "%02x", pkt[j]);
+
+ fformat (ofp, " }\n");
+ fformat (ofp, "}\n\n");
+ }
+ return 0;
+}
+
+/**
+ * @brief pcap2pg.
+ * usage: pcap2pg -i <input-file> [-o <output-file>]
+ */
+int main (int argc, char **argv)
+{
+ unformat_input_t input;
+ pcap_main_t * pm = &pcap_main;
+ u8 * input_file = 0, * output_file = 0;
+ FILE * ofp;
+ clib_error_t * error;
+
+ unformat_init_command_line (&input, argv);
+
+ while (unformat_check_input (&input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat(&input, "-i %s", &input_file)
+ || unformat (&input, "input %s", &input_file))
+ ;
+ else if (unformat (&input, "-o %s", &output_file)
+ || unformat (&input, "output %s", &output_file))
+ ;
+ else
+ {
+ usage:
+ fformat(stderr,
+ "usage: pcap2pg -i <input-file> [-o <output-file>]\n");
+ exit (1);
+ }
+ }
+
+ if (input_file == 0)
+ goto usage;
+
+ pm->file_name = (char *)input_file;
+ error = pcap_read (pm);
+
+ if (error)
+ {
+ clib_error_report (error);
+ exit (1);
+ }
+
+ if (output_file)
+ {
+ ofp = fopen ((char *)output_file, "rw");
+ if (ofp == NULL)
+ clib_unix_warning ("Couldn't create '%s'", output_file);
+ exit (1);
+ }
+ else
+ {
+ ofp = stdout;
+ }
+
+ pcap2pg (pm, ofp);
+
+ fclose (ofp);
+ exit (0);
+}
diff --git a/src/vnet/unix/tap.api b/src/vnet/unix/tap.api
new file mode 100644
index 00000000000..9b16eadbf86
--- /dev/null
+++ b/src/vnet/unix/tap.api
@@ -0,0 +1,123 @@
+/*
+ * Copyright (c) 2015-2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/** \file
+
+ This file defines vpe control-plane API messages for
+ the Linux kernel TAP device driver
+*/
+
+/** \brief Initialize a new tap interface with the given paramters
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param use_random_mac - let the system generate a unique mac address
+ @param tap_name - name to associate with the new interface
+ @param mac_address - mac addr to assign to the interface if use_radom not set
+*/
+define tap_connect
+{
+ u32 client_index;
+ u32 context;
+ u8 use_random_mac;
+ u8 tap_name[64];
+ u8 mac_address[6];
+ u8 renumber;
+ u32 custom_dev_instance;
+ u8 tag[64];
+};
+
+/** \brief Reply for tap connect request
+ @param context - returned sender context, to match reply w/ request
+ @param retval - return code
+ @param sw_if_index - software index allocated for the new tap interface
+*/
+define tap_connect_reply
+{
+ u32 context;
+ i32 retval;
+ u32 sw_if_index;
+};
+
+/** \brief Modify a tap interface with the given paramters
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param sw_if_index - interface index of existing tap interface
+ @param use_random_mac - let the system generate a unique mac address
+ @param tap_name - name to associate with the new interface
+ @param mac_address - mac addr to assign to the interface if use_radom not set
+*/
+define tap_modify
+{
+ u32 client_index;
+ u32 context;
+ u32 sw_if_index;
+ u8 use_random_mac;
+ u8 tap_name[64];
+ u8 mac_address[6];
+ u8 renumber;
+ u32 custom_dev_instance;
+};
+
+/** \brief Reply for tap modify request
+ @param context - returned sender context, to match reply w/ request
+ @param retval - return code
+ @param sw_if_index - software index if the modified tap interface
+*/
+define tap_modify_reply
+{
+ u32 context;
+ i32 retval;
+ u32 sw_if_index;
+};
+
+/** \brief Delete tap interface
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param sw_if_index - interface index of existing tap interface
+*/
+define tap_delete
+{
+ u32 client_index;
+ u32 context;
+ u32 sw_if_index;
+};
+
+/** \brief Reply for tap delete request
+ @param context - returned sender context, to match reply w/ request
+ @param retval - return code
+*/
+define tap_delete_reply
+{
+ u32 context;
+ i32 retval;
+};
+
+/** \brief Dump tap interfaces request */
+define sw_interface_tap_dump
+{
+ u32 client_index;
+ u32 context;
+};
+
+/** \brief Reply for tap dump request
+ @param sw_if_index - software index of tap interface
+ @param dev_name - Linux tap device name
+*/
+define sw_interface_tap_details
+{
+ u32 context;
+ u32 sw_if_index;
+ u8 dev_name[64];
+};
diff --git a/src/vnet/unix/tap_api.c b/src/vnet/unix/tap_api.c
new file mode 100644
index 00000000000..99b79ba2e70
--- /dev/null
+++ b/src/vnet/unix/tap_api.c
@@ -0,0 +1,257 @@
+/*
+ *------------------------------------------------------------------
+ * tap_api.c - vnet tap device driver API support
+ *
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+
+#include <vnet/vnet.h>
+#include <vlibmemory/api.h>
+
+#include <vnet/interface.h>
+#include <vnet/api_errno.h>
+#include <vnet/ethernet/ethernet.h>
+#include <vnet/ip/ip.h>
+#include <vnet/unix/tuntap.h>
+#include <vnet/unix/tapcli.h>
+
+#include <vnet/vnet_msg_enum.h>
+
+#define vl_typedefs /* define message structures */
+#include <vnet/vnet_all_api_h.h>
+#undef vl_typedefs
+
+#define vl_endianfun /* define message structures */
+#include <vnet/vnet_all_api_h.h>
+#undef vl_endianfun
+
+/* instantiate all the print functions we know about */
+#define vl_print(handle, ...) vlib_cli_output (handle, __VA_ARGS__)
+#define vl_printfun
+#include <vnet/vnet_all_api_h.h>
+#undef vl_printfun
+
+#include <vlibapi/api_helper_macros.h>
+
+#define foreach_tap_api_msg \
+_(TAP_CONNECT, tap_connect) \
+_(TAP_MODIFY, tap_modify) \
+_(TAP_DELETE, tap_delete) \
+_(SW_INTERFACE_TAP_DUMP, sw_interface_tap_dump)
+
+#define vl_msg_name_crc_list
+#include <vnet/unix/tap.api.h>
+#undef vl_msg_name_crc_list
+
+/*
+ * WARNING: replicated pending api refactor completion
+ */
+static void
+send_sw_interface_flags_deleted (vpe_api_main_t * am,
+ unix_shared_memory_queue_t * q,
+ u32 sw_if_index)
+{
+ vl_api_sw_interface_set_flags_t *mp;
+
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ memset (mp, 0, sizeof (*mp));
+ mp->_vl_msg_id = ntohs (VL_API_SW_INTERFACE_SET_FLAGS);
+ mp->sw_if_index = ntohl (sw_if_index);
+
+ mp->admin_up_down = 0;
+ mp->link_up_down = 0;
+ mp->deleted = 1;
+ vl_msg_api_send_shmem (q, (u8 *) & mp);
+}
+
+static void
+vl_api_tap_connect_t_handler (vl_api_tap_connect_t * mp)
+{
+ vlib_main_t *vm = vlib_get_main ();
+ int rv;
+ vl_api_tap_connect_reply_t *rmp;
+ vnet_main_t *vnm = vnet_get_main ();
+ unix_shared_memory_queue_t *q;
+ u32 sw_if_index = (u32) ~ 0;
+ u8 *tag;
+
+ rv = vnet_tap_connect_renumber (vm, mp->tap_name,
+ mp->use_random_mac ? 0 : mp->mac_address,
+ &sw_if_index, mp->renumber,
+ ntohl (mp->custom_dev_instance));
+
+ /* Add tag if supplied */
+ if (rv == 0 && mp->tag[0])
+ {
+ mp->tag[ARRAY_LEN (mp->tag) - 1] = 0;
+ tag = format (0, "%s%c", mp->tag, 0);
+ vnet_set_sw_interface_tag (vnm, tag, sw_if_index);
+ }
+
+ q = vl_api_client_index_to_input_queue (mp->client_index);
+ if (!q)
+ return;
+
+ rmp = vl_msg_api_alloc (sizeof (*rmp));
+ rmp->_vl_msg_id = ntohs (VL_API_TAP_CONNECT_REPLY);
+ rmp->context = mp->context;
+ rmp->retval = ntohl (rv);
+ rmp->sw_if_index = ntohl (sw_if_index);
+
+ vl_msg_api_send_shmem (q, (u8 *) & rmp);
+}
+
+static void
+vl_api_tap_modify_t_handler (vl_api_tap_modify_t * mp)
+{
+ int rv;
+ vl_api_tap_modify_reply_t *rmp;
+ unix_shared_memory_queue_t *q;
+ u32 sw_if_index = (u32) ~ 0;
+ vlib_main_t *vm = vlib_get_main ();
+
+ rv = vnet_tap_modify (vm, ntohl (mp->sw_if_index), mp->tap_name,
+ mp->use_random_mac ? 0 : mp->mac_address,
+ &sw_if_index, mp->renumber,
+ ntohl (mp->custom_dev_instance));
+
+ q = vl_api_client_index_to_input_queue (mp->client_index);
+ if (!q)
+ return;
+
+ rmp = vl_msg_api_alloc (sizeof (*rmp));
+ rmp->_vl_msg_id = ntohs (VL_API_TAP_MODIFY_REPLY);
+ rmp->context = mp->context;
+ rmp->retval = ntohl (rv);
+ rmp->sw_if_index = ntohl (sw_if_index);
+
+ vl_msg_api_send_shmem (q, (u8 *) & rmp);
+}
+
+static void
+vl_api_tap_delete_t_handler (vl_api_tap_delete_t * mp)
+{
+ vlib_main_t *vm = vlib_get_main ();
+ int rv;
+ vpe_api_main_t *vam = &vpe_api_main;
+ vl_api_tap_delete_reply_t *rmp;
+ unix_shared_memory_queue_t *q;
+ u32 sw_if_index = ntohl (mp->sw_if_index);
+
+ rv = vnet_tap_delete (vm, sw_if_index);
+ if (!rv)
+ {
+ vnet_main_t *vnm = vnet_get_main ();
+ vnet_clear_sw_interface_tag (vnm, sw_if_index);
+ }
+
+ q = vl_api_client_index_to_input_queue (mp->client_index);
+ if (!q)
+ return;
+
+ rmp = vl_msg_api_alloc (sizeof (*rmp));
+ rmp->_vl_msg_id = ntohs (VL_API_TAP_DELETE_REPLY);
+ rmp->context = mp->context;
+ rmp->retval = ntohl (rv);
+
+ vl_msg_api_send_shmem (q, (u8 *) & rmp);
+
+ if (!rv)
+ send_sw_interface_flags_deleted (vam, q, sw_if_index);
+}
+
+static void
+send_sw_interface_tap_details (vpe_api_main_t * am,
+ unix_shared_memory_queue_t * q,
+ tapcli_interface_details_t * tap_if,
+ u32 context)
+{
+ vl_api_sw_interface_tap_details_t *mp;
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ memset (mp, 0, sizeof (*mp));
+ mp->_vl_msg_id = ntohs (VL_API_SW_INTERFACE_TAP_DETAILS);
+ mp->sw_if_index = ntohl (tap_if->sw_if_index);
+ strncpy ((char *) mp->dev_name,
+ (char *) tap_if->dev_name, ARRAY_LEN (mp->dev_name) - 1);
+ mp->context = context;
+
+ vl_msg_api_send_shmem (q, (u8 *) & mp);
+}
+
+static void
+vl_api_sw_interface_tap_dump_t_handler (vl_api_sw_interface_tap_dump_t * mp)
+{
+ int rv = 0;
+ vpe_api_main_t *am = &vpe_api_main;
+ unix_shared_memory_queue_t *q;
+ tapcli_interface_details_t *tapifs = NULL;
+ tapcli_interface_details_t *tap_if = NULL;
+
+ q = vl_api_client_index_to_input_queue (mp->client_index);
+ if (q == 0)
+ return;
+
+ rv = vnet_tap_dump_ifs (&tapifs);
+ if (rv)
+ return;
+
+ vec_foreach (tap_if, tapifs)
+ {
+ send_sw_interface_tap_details (am, q, tap_if, mp->context);
+ }
+
+ vec_free (tapifs);
+}
+
+static void
+setup_message_id_table (api_main_t * am)
+{
+#define _(id,n,crc) vl_msg_api_add_msg_name_crc (am, #n "_" #crc, id);
+ foreach_vl_msg_name_crc_tap;
+#undef _
+}
+
+static clib_error_t *
+tap_api_hookup (vlib_main_t * vm)
+{
+ api_main_t *am = &api_main;
+
+#define _(N,n) \
+ vl_msg_api_set_handlers(VL_API_##N, #n, \
+ vl_api_##n##_t_handler, \
+ vl_noop_handler, \
+ vl_api_##n##_t_endian, \
+ vl_api_##n##_t_print, \
+ sizeof(vl_api_##n##_t), 1);
+ foreach_tap_api_msg;
+#undef _
+
+ /*
+ * Set up the (msg_name, crc, message-id) table
+ */
+ setup_message_id_table (am);
+
+ return 0;
+}
+
+VLIB_API_INIT_FUNCTION (tap_api_hookup);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/unix/tapcli.c b/src/vnet/unix/tapcli.c
new file mode 100644
index 00000000000..9862a2bda50
--- /dev/null
+++ b/src/vnet/unix/tapcli.c
@@ -0,0 +1,1328 @@
+/*
+ *------------------------------------------------------------------
+ * tapcli.c - dynamic tap interface hookup
+ *
+ * Copyright (c) 2009 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+/**
+ * @file
+ * @brief dynamic tap interface hookup
+ */
+
+#include <fcntl.h> /* for open */
+#include <sys/ioctl.h>
+#include <sys/socket.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <sys/uio.h> /* for iovec */
+#include <netinet/in.h>
+
+#include <linux/if_arp.h>
+#include <linux/if_tun.h>
+
+#include <vlib/vlib.h>
+#include <vlib/unix/unix.h>
+
+#include <vnet/ip/ip.h>
+
+#include <vnet/ethernet/ethernet.h>
+
+#include <vnet/feature/feature.h>
+#include <vnet/devices/devices.h>
+#include <vnet/unix/tapcli.h>
+
+static vnet_device_class_t tapcli_dev_class;
+static vnet_hw_interface_class_t tapcli_interface_class;
+static vlib_node_registration_t tapcli_rx_node;
+
+static void tapcli_nopunt_frame (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame);
+/**
+ * @brief Struct for the tapcli interface
+ */
+typedef struct {
+ u32 unix_fd;
+ u32 unix_file_index;
+ u32 provision_fd;
+ /** For counters */
+ u32 sw_if_index;
+ u32 hw_if_index;
+ u32 is_promisc;
+ struct ifreq ifr;
+ u32 per_interface_next_index;
+ /** for delete */
+ u8 active;
+} tapcli_interface_t;
+
+/**
+ * @brief Struct for RX trace
+ */
+typedef struct {
+ u16 sw_if_index;
+} tapcli_rx_trace_t;
+
+/**
+ * @brief Function to format TAP CLI trace
+ *
+ * @param *s - u8 - formatting string
+ * @param *va - va_list
+ *
+ * @return *s - u8 - formatted string
+ *
+ */
+u8 * format_tapcli_rx_trace (u8 * s, va_list * va)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*va, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*va, vlib_node_t *);
+ vnet_main_t * vnm = vnet_get_main();
+ tapcli_rx_trace_t * t = va_arg (*va, tapcli_rx_trace_t *);
+ s = format (s, "%U", format_vnet_sw_if_index_name,
+ vnm, t->sw_if_index);
+ return s;
+}
+
+/**
+ * @brief TAPCLI main state struct
+ */
+typedef struct {
+ /** Vector of iovecs for readv/writev calls. */
+ struct iovec * iovecs;
+
+ /** Vector of VLIB rx buffers to use. We allocate them in blocks
+ of VLIB_FRAME_SIZE (256). */
+ u32 * rx_buffers;
+
+ /** tap device destination MAC address. Required, or Linux drops pkts */
+ u8 ether_dst_mac[6];
+
+ /** Interface MTU in bytes and # of default sized buffers. */
+ u32 mtu_bytes, mtu_buffers;
+
+ /** Vector of tap interfaces */
+ tapcli_interface_t * tapcli_interfaces;
+
+ /** Vector of deleted tap interfaces */
+ u32 * tapcli_inactive_interfaces;
+
+ /** Bitmap of tap interfaces with pending reads */
+ uword * pending_read_bitmap;
+
+ /** Hash table to find tapcli interface given hw_if_index */
+ uword * tapcli_interface_index_by_sw_if_index;
+
+ /** Hash table to find tapcli interface given unix fd */
+ uword * tapcli_interface_index_by_unix_fd;
+
+ /** renumbering table */
+ u32 * show_dev_instance_by_real_dev_instance;
+
+ /** 1 => disable CLI */
+ int is_disabled;
+
+ /** convenience - vlib_main_t */
+ vlib_main_t * vlib_main;
+ /** convenience - vnet_main_t */
+ vnet_main_t * vnet_main;
+ /** convenience - unix_main_t */
+ unix_main_t * unix_main;
+} tapcli_main_t;
+
+static tapcli_main_t tapcli_main;
+
+/**
+ * @brief tapcli TX node function
+ * @node tap-cli-tx
+ *
+ * Output node, writes the buffers comprising the incoming frame
+ * to the tun/tap device, aka hands them to the Linux kernel stack.
+ *
+ * @param *vm - vlib_main_t
+ * @param *node - vlib_node_runtime_t
+ * @param *frame - vlib_frame_t
+ *
+ * @return n_packets - uword
+ *
+ */
+static uword
+tapcli_tx (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ u32 * buffers = vlib_frame_args (frame);
+ uword n_packets = frame->n_vectors;
+ tapcli_main_t * tm = &tapcli_main;
+ tapcli_interface_t * ti;
+ int i;
+
+ for (i = 0; i < n_packets; i++)
+ {
+ struct iovec * iov;
+ vlib_buffer_t * b;
+ uword l;
+ vnet_hw_interface_t * hw;
+ uword * p;
+ u32 tx_sw_if_index;
+
+ b = vlib_get_buffer (vm, buffers[i]);
+
+ tx_sw_if_index = vnet_buffer(b)->sw_if_index[VLIB_TX];
+ if (tx_sw_if_index == (u32)~0)
+ tx_sw_if_index = vnet_buffer(b)->sw_if_index[VLIB_RX];
+
+ ASSERT(tx_sw_if_index != (u32)~0);
+
+ /* Use the sup intfc to finesse vlan subifs */
+ hw = vnet_get_sup_hw_interface (tm->vnet_main, tx_sw_if_index);
+ tx_sw_if_index = hw->sw_if_index;
+
+ p = hash_get (tm->tapcli_interface_index_by_sw_if_index,
+ tx_sw_if_index);
+ if (p == 0)
+ {
+ clib_warning ("sw_if_index %d unknown", tx_sw_if_index);
+ /* $$$ leak, but this should never happen... */
+ continue;
+ }
+ else
+ ti = vec_elt_at_index (tm->tapcli_interfaces, p[0]);
+
+ /* Re-set iovecs if present. */
+ if (tm->iovecs)
+ _vec_len (tm->iovecs) = 0;
+
+ /* VLIB buffer chain -> Unix iovec(s). */
+ vec_add2 (tm->iovecs, iov, 1);
+ iov->iov_base = b->data + b->current_data;
+ iov->iov_len = l = b->current_length;
+
+ if (PREDICT_FALSE (b->flags & VLIB_BUFFER_NEXT_PRESENT))
+ {
+ do {
+ b = vlib_get_buffer (vm, b->next_buffer);
+
+ vec_add2 (tm->iovecs, iov, 1);
+
+ iov->iov_base = b->data + b->current_data;
+ iov->iov_len = b->current_length;
+ l += b->current_length;
+ } while (b->flags & VLIB_BUFFER_NEXT_PRESENT);
+ }
+
+ if (writev (ti->unix_fd, tm->iovecs, vec_len (tm->iovecs)) < l)
+ clib_unix_warning ("writev");
+ }
+
+ vlib_buffer_free(vm, vlib_frame_vector_args(frame), frame->n_vectors);
+
+ return n_packets;
+}
+
+VLIB_REGISTER_NODE (tapcli_tx_node,static) = {
+ .function = tapcli_tx,
+ .name = "tapcli-tx",
+ .type = VLIB_NODE_TYPE_INTERNAL,
+ .vector_size = 4,
+};
+
+/**
+ * @brief Dispatch tapcli RX node function for node tap_cli_rx
+ *
+ *
+ * @param *vm - vlib_main_t
+ * @param *node - vlib_node_runtime_t
+ * @param *ti - tapcli_interface_t
+ *
+ * @return n_packets - uword
+ *
+ */
+static uword tapcli_rx_iface(vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ tapcli_interface_t * ti)
+{
+ tapcli_main_t * tm = &tapcli_main;
+ const uword buffer_size = VLIB_BUFFER_DATA_SIZE;
+ u32 n_trace = vlib_get_trace_count (vm, node);
+ u8 set_trace = 0;
+
+ vnet_main_t *vnm;
+ vnet_sw_interface_t * si;
+ u8 admin_down;
+ u32 next = node->cached_next_index;
+ u32 n_left_to_next, next_index;
+ u32 *to_next;
+
+ vnm = vnet_get_main();
+ si = vnet_get_sw_interface (vnm, ti->sw_if_index);
+ admin_down = !(si->flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP);
+
+ vlib_get_next_frame(vm, node, next, to_next, n_left_to_next);
+
+ while (n_left_to_next) { // Fill at most one vector
+ vlib_buffer_t *b_first, *b, *prev;
+ u32 bi_first, bi;
+ word n_bytes_in_packet;
+ int j, n_bytes_left;
+
+ if (PREDICT_FALSE(vec_len(tm->rx_buffers) < tm->mtu_buffers)) {
+ uword len = vec_len(tm->rx_buffers);
+ _vec_len(tm->rx_buffers) +=
+ vlib_buffer_alloc_from_free_list(vm, &tm->rx_buffers[len],
+ VLIB_FRAME_SIZE - len, VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX);
+ if (PREDICT_FALSE(vec_len(tm->rx_buffers) < tm->mtu_buffers)) {
+ vlib_node_increment_counter(vm, tapcli_rx_node.index,
+ TAPCLI_ERROR_BUFFER_ALLOC,
+ tm->mtu_buffers - vec_len(tm->rx_buffers));
+ break;
+ }
+ }
+
+ uword i_rx = vec_len (tm->rx_buffers) - 1;
+
+ /* Allocate RX buffers from end of rx_buffers.
+ Turn them into iovecs to pass to readv. */
+ vec_validate (tm->iovecs, tm->mtu_buffers - 1);
+ for (j = 0; j < tm->mtu_buffers; j++) {
+ b = vlib_get_buffer (vm, tm->rx_buffers[i_rx - j]);
+ tm->iovecs[j].iov_base = b->data;
+ tm->iovecs[j].iov_len = buffer_size;
+ }
+
+ n_bytes_left = readv (ti->unix_fd, tm->iovecs, tm->mtu_buffers);
+ n_bytes_in_packet = n_bytes_left;
+ if (n_bytes_left <= 0) {
+ if (errno != EAGAIN) {
+ vlib_node_increment_counter(vm, tapcli_rx_node.index,
+ TAPCLI_ERROR_READ, 1);
+ }
+ break;
+ }
+
+ bi_first = tm->rx_buffers[i_rx];
+ b = b_first = vlib_get_buffer (vm, tm->rx_buffers[i_rx]);
+ prev = NULL;
+
+ while (1) {
+ b->current_length = n_bytes_left < buffer_size ? n_bytes_left : buffer_size;
+ n_bytes_left -= buffer_size;
+
+ if (prev) {
+ prev->next_buffer = bi;
+ prev->flags |= VLIB_BUFFER_NEXT_PRESENT;
+ }
+ prev = b;
+
+ /* last segment */
+ if (n_bytes_left <= 0)
+ break;
+
+ i_rx--;
+ bi = tm->rx_buffers[i_rx];
+ b = vlib_get_buffer (vm, bi);
+ }
+
+ _vec_len (tm->rx_buffers) = i_rx;
+
+ b_first->total_length_not_including_first_buffer =
+ (n_bytes_in_packet > buffer_size) ? n_bytes_in_packet - buffer_size : 0;
+ b_first->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
+
+ VLIB_BUFFER_TRACE_TRAJECTORY_INIT(b_first);
+
+ vnet_buffer (b_first)->sw_if_index[VLIB_RX] = ti->sw_if_index;
+ vnet_buffer (b_first)->sw_if_index[VLIB_TX] = (u32)~0;
+
+ b_first->error = node->errors[TAPCLI_ERROR_NONE];
+ next_index = VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT;
+ next_index = (ti->per_interface_next_index != ~0) ?
+ ti->per_interface_next_index : next_index;
+ next_index = admin_down ? VNET_DEVICE_INPUT_NEXT_DROP : next_index;
+
+ to_next[0] = bi_first;
+ to_next++;
+ n_left_to_next--;
+
+ vnet_feature_start_device_input_x1 (ti->sw_if_index, &next_index,
+ b_first, 0);
+
+ vlib_validate_buffer_enqueue_x1 (vm, node, next,
+ to_next, n_left_to_next,
+ bi_first, next_index);
+
+ /* Interface counters for tapcli interface. */
+ if (PREDICT_TRUE(!admin_down)) {
+ vlib_increment_combined_counter (
+ vnet_main.interface_main.combined_sw_if_counters
+ + VNET_INTERFACE_COUNTER_RX,
+ os_get_cpu_number(), ti->sw_if_index,
+ 1, n_bytes_in_packet);
+
+ if (PREDICT_FALSE(n_trace > 0)) {
+ vlib_trace_buffer (vm, node, next_index,
+ b_first, /* follow_chain */ 1);
+ n_trace--;
+ set_trace = 1;
+ tapcli_rx_trace_t *t0 = vlib_add_trace (vm, node, b_first, sizeof (*t0));
+ t0->sw_if_index = si->sw_if_index;
+ }
+ }
+ }
+ vlib_put_next_frame (vm, node, next, n_left_to_next);
+ if (set_trace)
+ vlib_set_trace_count (vm, node, n_trace);
+ return VLIB_FRAME_SIZE - n_left_to_next;
+}
+
+/**
+ * @brief tapcli RX node function
+ * @node tap-cli-rx
+ *
+ * Input node from the Kernel tun/tap device
+ *
+ * @param *vm - vlib_main_t
+ * @param *node - vlib_node_runtime_t
+ * @param *frame - vlib_frame_t
+ *
+ * @return n_packets - uword
+ *
+ */
+static uword
+tapcli_rx (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ tapcli_main_t * tm = &tapcli_main;
+ static u32 * ready_interface_indices;
+ tapcli_interface_t * ti;
+ int i;
+ u32 total_count = 0;
+
+ vec_reset_length (ready_interface_indices);
+ clib_bitmap_foreach (i, tm->pending_read_bitmap,
+ ({
+ vec_add1 (ready_interface_indices, i);
+ }));
+
+ if (vec_len (ready_interface_indices) == 0)
+ return 0;
+
+ for (i = 0; i < vec_len(ready_interface_indices); i++)
+ {
+ tm->pending_read_bitmap =
+ clib_bitmap_set (tm->pending_read_bitmap,
+ ready_interface_indices[i], 0);
+
+ ti = vec_elt_at_index (tm->tapcli_interfaces, ready_interface_indices[i]);
+ total_count += tapcli_rx_iface(vm, node, ti);
+ }
+ return total_count; //This might return more than 256.
+}
+
+/** TAPCLI error strings */
+static char * tapcli_rx_error_strings[] = {
+#define _(sym,string) string,
+ foreach_tapcli_error
+#undef _
+};
+
+VLIB_REGISTER_NODE (tapcli_rx_node, static) = {
+ .function = tapcli_rx,
+ .name = "tapcli-rx",
+ .sibling_of = "device-input",
+ .type = VLIB_NODE_TYPE_INPUT,
+ .state = VLIB_NODE_STATE_INTERRUPT,
+ .vector_size = 4,
+ .n_errors = TAPCLI_N_ERROR,
+ .error_strings = tapcli_rx_error_strings,
+ .format_trace = format_tapcli_rx_trace,
+};
+
+
+/**
+ * @brief Gets called when file descriptor is ready from epoll.
+ *
+ * @param *uf - unix_file_t
+ *
+ * @return error - clib_error_t
+ *
+ */
+static clib_error_t * tapcli_read_ready (unix_file_t * uf)
+{
+ vlib_main_t * vm = vlib_get_main();
+ tapcli_main_t * tm = &tapcli_main;
+ uword * p;
+
+ /** Schedule the rx node */
+ vlib_node_set_interrupt_pending (vm, tapcli_rx_node.index);
+
+ p = hash_get (tm->tapcli_interface_index_by_unix_fd, uf->file_descriptor);
+
+ /** Mark the specific tap interface ready-to-read */
+ if (p)
+ tm->pending_read_bitmap = clib_bitmap_set (tm->pending_read_bitmap,
+ p[0], 1);
+ else
+ clib_warning ("fd %d not in hash table", uf->file_descriptor);
+
+ return 0;
+}
+
+/**
+ * @brief CLI function for TAPCLI configuration
+ *
+ * @param *vm - vlib_main_t
+ * @param *input - unformat_input_t
+ *
+ * @return error - clib_error_t
+ *
+ */
+static clib_error_t *
+tapcli_config (vlib_main_t * vm, unformat_input_t * input)
+{
+ tapcli_main_t *tm = &tapcli_main;
+ const uword buffer_size = VLIB_BUFFER_DATA_SIZE;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "mtu %d", &tm->mtu_bytes))
+ ;
+ else if (unformat (input, "disable"))
+ tm->is_disabled = 1;
+ else
+ return clib_error_return (0, "unknown input `%U'",
+ format_unformat_error, input);
+ }
+
+ if (tm->is_disabled)
+ return 0;
+
+ if (geteuid())
+ {
+ clib_warning ("tapcli disabled: must be superuser");
+ tm->is_disabled = 1;
+ return 0;
+ }
+
+ tm->mtu_buffers = (tm->mtu_bytes + (buffer_size - 1)) / buffer_size;
+
+ return 0;
+}
+
+/**
+ * @brief Renumber TAPCLI interface
+ *
+ * @param *hi - vnet_hw_interface_t
+ * @param new_dev_instance - u32
+ *
+ * @return rc - int
+ *
+ */
+static int tap_name_renumber (vnet_hw_interface_t * hi,
+ u32 new_dev_instance)
+{
+ tapcli_main_t *tm = &tapcli_main;
+
+ vec_validate_init_empty (tm->show_dev_instance_by_real_dev_instance,
+ hi->dev_instance, ~0);
+
+ tm->show_dev_instance_by_real_dev_instance [hi->dev_instance] =
+ new_dev_instance;
+
+ return 0;
+}
+
+VLIB_CONFIG_FUNCTION (tapcli_config, "tapcli");
+
+/**
+ * @brief Free "no punt" frame
+ *
+ * @param *vm - vlib_main_t
+ * @param *node - vlib_node_runtime_t
+ * @param *frame - vlib_frame_t
+ *
+ */
+static void
+tapcli_nopunt_frame (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ u32 * buffers = vlib_frame_args (frame);
+ uword n_packets = frame->n_vectors;
+ vlib_buffer_free (vm, buffers, n_packets);
+ vlib_frame_free (vm, node, frame);
+}
+
+VNET_HW_INTERFACE_CLASS (tapcli_interface_class,static) = {
+ .name = "tapcli",
+ .flags = VNET_HW_INTERFACE_CLASS_FLAG_P2P,
+};
+
+/**
+ * @brief Formatter for TAPCLI interface name
+ *
+ * @param *s - formatter string
+ * @param *args - va_list
+ *
+ * @return *s - formatted string
+ *
+ */
+static u8 * format_tapcli_interface_name (u8 * s, va_list * args)
+{
+ u32 i = va_arg (*args, u32);
+ u32 show_dev_instance = ~0;
+ tapcli_main_t * tm = &tapcli_main;
+
+ if (i < vec_len (tm->show_dev_instance_by_real_dev_instance))
+ show_dev_instance = tm->show_dev_instance_by_real_dev_instance[i];
+
+ if (show_dev_instance != ~0)
+ i = show_dev_instance;
+
+ s = format (s, "tap-%d", i);
+ return s;
+}
+
+/**
+ * @brief Modify interface flags for TAPCLI interface
+ *
+ * @param *vnm - vnet_main_t
+ * @param *hw - vnet_hw_interface_t
+ * @param flags - u32
+ *
+ * @return rc - u32
+ *
+ */
+static u32 tapcli_flag_change (vnet_main_t * vnm,
+ vnet_hw_interface_t * hw,
+ u32 flags)
+{
+ tapcli_main_t *tm = &tapcli_main;
+ tapcli_interface_t *ti;
+
+ ti = vec_elt_at_index (tm->tapcli_interfaces, hw->dev_instance);
+
+ if (flags & ETHERNET_INTERFACE_FLAG_MTU)
+ {
+ const uword buffer_size = VLIB_BUFFER_DATA_SIZE;
+ tm->mtu_bytes = hw->max_packet_bytes;
+ tm->mtu_buffers = (tm->mtu_bytes + (buffer_size - 1)) / buffer_size;
+ }
+ else
+ {
+ struct ifreq ifr;
+ u32 want_promisc;
+
+ memcpy (&ifr, &ti->ifr, sizeof (ifr));
+
+ /* get flags, modify to bring up interface... */
+ if (ioctl (ti->provision_fd, SIOCGIFFLAGS, &ifr) < 0)
+ {
+ clib_unix_warning ("Couldn't get interface flags for %s", hw->name);
+ return 0;
+ }
+
+ want_promisc = (flags & ETHERNET_INTERFACE_FLAG_ACCEPT_ALL) != 0;
+
+ if (want_promisc == ti->is_promisc)
+ return 0;
+
+ if (flags & ETHERNET_INTERFACE_FLAG_ACCEPT_ALL)
+ ifr.ifr_flags |= IFF_PROMISC;
+ else
+ ifr.ifr_flags &= ~(IFF_PROMISC);
+
+ /* get flags, modify to bring up interface... */
+ if (ioctl (ti->provision_fd, SIOCSIFFLAGS, &ifr) < 0)
+ {
+ clib_unix_warning ("Couldn't set interface flags for %s", hw->name);
+ return 0;
+ }
+
+ ti->is_promisc = want_promisc;
+ }
+
+ return 0;
+}
+
+/**
+ * @brief Setting the TAP interface's next processing node
+ *
+ * @param *vnm - vnet_main_t
+ * @param hw_if_index - u32
+ * @param node_index - u32
+ *
+ */
+static void tapcli_set_interface_next_node (vnet_main_t *vnm,
+ u32 hw_if_index,
+ u32 node_index)
+{
+ tapcli_main_t *tm = &tapcli_main;
+ tapcli_interface_t *ti;
+ vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
+
+ ti = vec_elt_at_index (tm->tapcli_interfaces, hw->dev_instance);
+
+ /** Shut off redirection */
+ if (node_index == ~0)
+ {
+ ti->per_interface_next_index = node_index;
+ return;
+ }
+
+ ti->per_interface_next_index =
+ vlib_node_add_next (tm->vlib_main, tapcli_rx_node.index, node_index);
+}
+
+/**
+ * @brief Set link_state == admin_state otherwise things like ip6 neighbor discovery breaks
+ *
+ * @param *vnm - vnet_main_t
+ * @param hw_if_index - u32
+ * @param flags - u32
+ *
+ * @return error - clib_error_t
+ */
+static clib_error_t *
+tapcli_interface_admin_up_down (vnet_main_t * vnm, u32 hw_if_index, u32 flags)
+{
+ uword is_admin_up = (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) != 0;
+ u32 hw_flags;
+ u32 speed_duplex = VNET_HW_INTERFACE_FLAG_FULL_DUPLEX
+ | VNET_HW_INTERFACE_FLAG_SPEED_1G;
+
+ if (is_admin_up)
+ hw_flags = VNET_HW_INTERFACE_FLAG_LINK_UP | speed_duplex;
+ else
+ hw_flags = speed_duplex;
+
+ vnet_hw_interface_set_flags (vnm, hw_if_index, hw_flags);
+ return 0;
+}
+
+VNET_DEVICE_CLASS (tapcli_dev_class,static) = {
+ .name = "tapcli",
+ .tx_function = tapcli_tx,
+ .format_device_name = format_tapcli_interface_name,
+ .rx_redirect_to_node = tapcli_set_interface_next_node,
+ .name_renumber = tap_name_renumber,
+ .admin_up_down_function = tapcli_interface_admin_up_down,
+};
+
+/**
+ * @brief Dump TAP interfaces
+ *
+ * @param **out_tapids - tapcli_interface_details_t
+ *
+ * @return rc - int
+ *
+ */
+int vnet_tap_dump_ifs (tapcli_interface_details_t **out_tapids)
+{
+ tapcli_main_t * tm = &tapcli_main;
+ tapcli_interface_t * ti;
+
+ tapcli_interface_details_t * r_tapids = NULL;
+ tapcli_interface_details_t * tapid = NULL;
+
+ vec_foreach (ti, tm->tapcli_interfaces) {
+ if (!ti->active)
+ continue;
+ vec_add2(r_tapids, tapid, 1);
+ tapid->sw_if_index = ti->sw_if_index;
+ strncpy((char *)tapid->dev_name, ti->ifr.ifr_name, sizeof (ti->ifr.ifr_name)-1);
+ }
+
+ *out_tapids = r_tapids;
+
+ return 0;
+}
+
+/**
+ * @brief Get tap interface from inactive interfaces or create new
+ *
+ * @return interface - tapcli_interface_t
+ *
+ */
+static tapcli_interface_t *tapcli_get_new_tapif()
+{
+ tapcli_main_t * tm = &tapcli_main;
+ tapcli_interface_t *ti = NULL;
+
+ int inactive_cnt = vec_len(tm->tapcli_inactive_interfaces);
+ // if there are any inactive ifaces
+ if (inactive_cnt > 0) {
+ // take last
+ u32 ti_idx = tm->tapcli_inactive_interfaces[inactive_cnt - 1];
+ if (vec_len(tm->tapcli_interfaces) > ti_idx) {
+ ti = vec_elt_at_index (tm->tapcli_interfaces, ti_idx);
+ clib_warning("reusing tap interface");
+ }
+ // "remove" from inactive list
+ _vec_len(tm->tapcli_inactive_interfaces) -= 1;
+ }
+
+ // ti was not retrieved from inactive ifaces - create new
+ if (!ti)
+ vec_add2 (tm->tapcli_interfaces, ti, 1);
+
+ return ti;
+}
+
+/**
+ * @brief Connect a TAP interface
+ *
+ * @param vm - vlib_main_t
+ * @param intfc_name - u8
+ * @param hwaddr_arg - u8
+ * @param sw_if_indexp - u32
+ *
+ * @return rc - int
+ *
+ */
+int vnet_tap_connect (vlib_main_t * vm, u8 * intfc_name, u8 *hwaddr_arg,
+ u32 * sw_if_indexp)
+{
+ tapcli_main_t * tm = &tapcli_main;
+ tapcli_interface_t * ti = NULL;
+ struct ifreq ifr;
+ int flags;
+ int dev_net_tun_fd;
+ int dev_tap_fd = -1;
+ clib_error_t * error;
+ u8 hwaddr [6];
+ int rv = 0;
+
+ if (tm->is_disabled)
+ {
+ return VNET_API_ERROR_FEATURE_DISABLED;
+ }
+
+ flags = IFF_TAP | IFF_NO_PI;
+
+ if ((dev_net_tun_fd = open ("/dev/net/tun", O_RDWR)) < 0)
+ return VNET_API_ERROR_SYSCALL_ERROR_1;
+
+ memset (&ifr, 0, sizeof (ifr));
+ strncpy(ifr.ifr_name, (char *) intfc_name, sizeof (ifr.ifr_name)-1);
+ ifr.ifr_flags = flags;
+ if (ioctl (dev_net_tun_fd, TUNSETIFF, (void *)&ifr) < 0)
+ {
+ rv = VNET_API_ERROR_SYSCALL_ERROR_2;
+ goto error;
+ }
+
+ /* Open a provisioning socket */
+ if ((dev_tap_fd = socket(PF_PACKET, SOCK_RAW,
+ htons(ETH_P_ALL))) < 0 )
+ {
+ rv = VNET_API_ERROR_SYSCALL_ERROR_3;
+ goto error;
+ }
+
+ /* Find the interface index. */
+ {
+ struct ifreq ifr;
+ struct sockaddr_ll sll;
+
+ memset (&ifr, 0, sizeof(ifr));
+ strncpy (ifr.ifr_name, (char *) intfc_name, sizeof (ifr.ifr_name)-1);
+ if (ioctl (dev_tap_fd, SIOCGIFINDEX, &ifr) < 0 )
+ {
+ rv = VNET_API_ERROR_SYSCALL_ERROR_4;
+ goto error;
+ }
+
+ /* Bind the provisioning socket to the interface. */
+ memset(&sll, 0, sizeof(sll));
+ sll.sll_family = AF_PACKET;
+ sll.sll_ifindex = ifr.ifr_ifindex;
+ sll.sll_protocol = htons(ETH_P_ALL);
+
+ if (bind(dev_tap_fd, (struct sockaddr*) &sll, sizeof(sll)) < 0)
+ {
+ rv = VNET_API_ERROR_SYSCALL_ERROR_5;
+ goto error;
+ }
+ }
+
+ /* non-blocking I/O on /dev/tapX */
+ {
+ int one = 1;
+ if (ioctl (dev_net_tun_fd, FIONBIO, &one) < 0)
+ {
+ rv = VNET_API_ERROR_SYSCALL_ERROR_6;
+ goto error;
+ }
+ }
+ ifr.ifr_mtu = tm->mtu_bytes;
+ if (ioctl (dev_tap_fd, SIOCSIFMTU, &ifr) < 0)
+ {
+ rv = VNET_API_ERROR_SYSCALL_ERROR_7;
+ goto error;
+ }
+
+ /* get flags, modify to bring up interface... */
+ if (ioctl (dev_tap_fd, SIOCGIFFLAGS, &ifr) < 0)
+ {
+ rv = VNET_API_ERROR_SYSCALL_ERROR_8;
+ goto error;
+ }
+
+ ifr.ifr_flags |= (IFF_UP | IFF_RUNNING);
+
+ if (ioctl (dev_tap_fd, SIOCSIFFLAGS, &ifr) < 0)
+ {
+ rv = VNET_API_ERROR_SYSCALL_ERROR_9;
+ goto error;
+ }
+
+ ti = tapcli_get_new_tapif();
+ ti->per_interface_next_index = ~0;
+
+ if (hwaddr_arg != 0)
+ clib_memcpy(hwaddr, hwaddr_arg, 6);
+ else
+ {
+ f64 now = vlib_time_now(vm);
+ u32 rnd;
+ rnd = (u32) (now * 1e6);
+ rnd = random_u32 (&rnd);
+
+ memcpy (hwaddr+2, &rnd, sizeof(rnd));
+ hwaddr[0] = 2;
+ hwaddr[1] = 0xfe;
+ }
+
+ error = ethernet_register_interface
+ (tm->vnet_main,
+ tapcli_dev_class.index,
+ ti - tm->tapcli_interfaces /* device instance */,
+ hwaddr /* ethernet address */,
+ &ti->hw_if_index,
+ tapcli_flag_change);
+
+ if (error)
+ {
+ clib_error_report (error);
+ rv = VNET_API_ERROR_INVALID_REGISTRATION;
+ goto error;
+ }
+
+ {
+ unix_file_t template = {0};
+ template.read_function = tapcli_read_ready;
+ template.file_descriptor = dev_net_tun_fd;
+ ti->unix_file_index = unix_file_add (&unix_main, &template);
+ ti->unix_fd = dev_net_tun_fd;
+ ti->provision_fd = dev_tap_fd;
+ clib_memcpy (&ti->ifr, &ifr, sizeof (ifr));
+ }
+
+ {
+ vnet_hw_interface_t * hw;
+ hw = vnet_get_hw_interface (tm->vnet_main, ti->hw_if_index);
+ hw->min_supported_packet_bytes = TAP_MTU_MIN;
+ hw->max_supported_packet_bytes = TAP_MTU_MAX;
+ hw->max_l3_packet_bytes[VLIB_RX] = hw->max_l3_packet_bytes[VLIB_TX] = hw->max_supported_packet_bytes - sizeof(ethernet_header_t);
+ ti->sw_if_index = hw->sw_if_index;
+ if (sw_if_indexp)
+ *sw_if_indexp = hw->sw_if_index;
+ }
+
+ ti->active = 1;
+
+ hash_set (tm->tapcli_interface_index_by_sw_if_index, ti->sw_if_index,
+ ti - tm->tapcli_interfaces);
+
+ hash_set (tm->tapcli_interface_index_by_unix_fd, ti->unix_fd,
+ ti - tm->tapcli_interfaces);
+
+ return rv;
+
+ error:
+ close (dev_net_tun_fd);
+ if (dev_tap_fd >= 0)
+ close (dev_tap_fd);
+
+ return rv;
+}
+
+/**
+ * @brief Renumber a TAP interface
+ *
+ * @param *vm - vlib_main_t
+ * @param *intfc_name - u8
+ * @param *hwaddr_arg - u8
+ * @param *sw_if_indexp - u32
+ * @param renumber - u8
+ * @param custom_dev_instance - u32
+ *
+ * @return rc - int
+ *
+ */
+int vnet_tap_connect_renumber (vlib_main_t * vm, u8 * intfc_name,
+ u8 *hwaddr_arg, u32 * sw_if_indexp,
+ u8 renumber, u32 custom_dev_instance)
+{
+ int rv = vnet_tap_connect(vm, intfc_name, hwaddr_arg, sw_if_indexp);
+
+ if (!rv && renumber)
+ vnet_interface_name_renumber (*sw_if_indexp, custom_dev_instance);
+
+ return rv;
+}
+
+/**
+ * @brief Disconnect TAP CLI interface
+ *
+ * @param *ti - tapcli_interface_t
+ *
+ * @return rc - int
+ *
+ */
+static int tapcli_tap_disconnect (tapcli_interface_t *ti)
+{
+ int rv = 0;
+ vnet_main_t * vnm = vnet_get_main();
+ tapcli_main_t * tm = &tapcli_main;
+ u32 sw_if_index = ti->sw_if_index;
+
+ // bring interface down
+ vnet_sw_interface_set_flags (vnm, sw_if_index, 0);
+
+ if (ti->unix_file_index != ~0) {
+ unix_file_del (&unix_main, unix_main.file_pool + ti->unix_file_index);
+ ti->unix_file_index = ~0;
+ }
+ else
+ close(ti->unix_fd);
+
+ hash_unset (tm->tapcli_interface_index_by_unix_fd, ti->unix_fd);
+ hash_unset (tm->tapcli_interface_index_by_sw_if_index, ti->sw_if_index);
+ close(ti->provision_fd);
+ ti->unix_fd = -1;
+ ti->provision_fd = -1;
+
+ return rv;
+}
+
+/**
+ * @brief Delete TAP interface
+ *
+ * @param *vm - vlib_main_t
+ * @param sw_if_index - u32
+ *
+ * @return rc - int
+ *
+ */
+int vnet_tap_delete(vlib_main_t *vm, u32 sw_if_index)
+{
+ int rv = 0;
+ tapcli_main_t * tm = &tapcli_main;
+ tapcli_interface_t *ti;
+ uword *p = NULL;
+
+ p = hash_get (tm->tapcli_interface_index_by_sw_if_index,
+ sw_if_index);
+ if (p == 0) {
+ clib_warning ("sw_if_index %d unknown", sw_if_index);
+ return VNET_API_ERROR_INVALID_SW_IF_INDEX;
+ }
+ ti = vec_elt_at_index (tm->tapcli_interfaces, p[0]);
+
+ // inactive
+ ti->active = 0;
+ tapcli_tap_disconnect(ti);
+ // add to inactive list
+ vec_add1(tm->tapcli_inactive_interfaces, ti - tm->tapcli_interfaces);
+
+ // reset renumbered iface
+ if (p[0] < vec_len (tm->show_dev_instance_by_real_dev_instance))
+ tm->show_dev_instance_by_real_dev_instance[p[0]] = ~0;
+
+ ethernet_delete_interface (tm->vnet_main, ti->hw_if_index);
+ return rv;
+}
+
+/**
+ * @brief CLI function to delete TAP interface
+ *
+ * @param *vm - vlib_main_t
+ * @param *input - unformat_input_t
+ * @param *cmd - vlib_cli_command_t
+ *
+ * @return error - clib_error_t
+ *
+ */
+static clib_error_t *
+tap_delete_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ tapcli_main_t * tm = &tapcli_main;
+ u32 sw_if_index = ~0;
+
+ if (tm->is_disabled)
+ {
+ return clib_error_return (0, "device disabled...");
+ }
+
+ if (unformat (input, "%U", unformat_vnet_sw_interface, tm->vnet_main,
+ &sw_if_index))
+ ;
+ else
+ return clib_error_return (0, "unknown input `%U'",
+ format_unformat_error, input);
+
+
+ int rc = vnet_tap_delete (vm, sw_if_index);
+
+ if (!rc) {
+ vlib_cli_output (vm, "Deleted.");
+ } else {
+ vlib_cli_output (vm, "Error during deletion of tap interface. (rc: %d)", rc);
+ }
+
+ return 0;
+}
+
+VLIB_CLI_COMMAND (tap_delete_command, static) = {
+ .path = "tap delete",
+ .short_help = "tap delete <vpp-tap-intfc-name>",
+ .function = tap_delete_command_fn,
+};
+
+/**
+ * @brief Modifies tap interface - can result in new interface being created
+ *
+ * @param *vm - vlib_main_t
+ * @param orig_sw_if_index - u32
+ * @param *intfc_name - u8
+ * @param *hwaddr_arg - u8
+ * @param *sw_if_indexp - u32
+ * @param renumber - u8
+ * @param custom_dev_instance - u32
+ *
+ * @return rc - int
+ *
+ */
+int vnet_tap_modify (vlib_main_t * vm, u32 orig_sw_if_index,
+ u8 * intfc_name, u8 *hwaddr_arg,
+ u32 * sw_if_indexp,
+ u8 renumber, u32 custom_dev_instance)
+{
+ int rv = vnet_tap_delete (vm, orig_sw_if_index);
+
+ if (rv)
+ return rv;
+
+ rv = vnet_tap_connect_renumber(vm, intfc_name, hwaddr_arg, sw_if_indexp,
+ renumber, custom_dev_instance);
+
+ return rv;
+}
+
+/**
+ * @brief CLI function to modify TAP interface
+ *
+ * @param *vm - vlib_main_t
+ * @param *input - unformat_input_t
+ * @param *cmd - vlib_cli_command_t
+ *
+ * @return error - clib_error_t
+ *
+ */
+static clib_error_t *
+tap_modify_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ u8 * intfc_name;
+ tapcli_main_t * tm = &tapcli_main;
+ u32 sw_if_index = ~0;
+ u32 new_sw_if_index = ~0;
+ int user_hwaddr = 0;
+ u8 hwaddr[6];
+
+ if (tm->is_disabled)
+ {
+ return clib_error_return (0, "device disabled...");
+ }
+
+ if (unformat (input, "%U", unformat_vnet_sw_interface, tm->vnet_main,
+ &sw_if_index))
+ ;
+ else
+ return clib_error_return (0, "unknown input `%U'",
+ format_unformat_error, input);
+
+ if (unformat (input, "%s", &intfc_name))
+ ;
+ else
+ return clib_error_return (0, "unknown input `%U'",
+ format_unformat_error, input);
+
+ if (unformat(input, "hwaddr %U", unformat_ethernet_address,
+ &hwaddr))
+ user_hwaddr = 1;
+
+
+ int rc = vnet_tap_modify (vm, sw_if_index, intfc_name,
+ (user_hwaddr == 1 ? hwaddr : 0),
+ &new_sw_if_index, 0, 0);
+
+ if (!rc) {
+ vlib_cli_output (vm, "Modified %U for Linux tap '%s'",
+ format_vnet_sw_if_index_name, tm->vnet_main,
+ new_sw_if_index, intfc_name);
+ } else {
+ vlib_cli_output (vm, "Error during modification of tap interface. (rc: %d)", rc);
+ }
+
+ return 0;
+}
+
+VLIB_CLI_COMMAND (tap_modify_command, static) = {
+ .path = "tap modify",
+ .short_help = "tap modify <vpp-tap-intfc-name> <linux-intfc-name> [hwaddr <addr>]",
+ .function = tap_modify_command_fn,
+};
+
+/**
+ * @brief CLI function to connect TAP interface
+ *
+ * @param *vm - vlib_main_t
+ * @param *input - unformat_input_t
+ * @param *cmd - vlib_cli_command_t
+ *
+ * @return error - clib_error_t
+ *
+ */
+static clib_error_t *
+tap_connect_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ u8 * intfc_name;
+ tapcli_main_t * tm = &tapcli_main;
+ u8 hwaddr[6];
+ u8 *hwaddr_arg = 0;
+ u32 sw_if_index;
+
+ if (tm->is_disabled)
+ {
+ return clib_error_return (0, "device disabled...");
+ }
+
+ if (unformat (input, "%s", &intfc_name))
+ ;
+ else
+ return clib_error_return (0, "unknown input `%U'",
+ format_unformat_error, input);
+
+ if (unformat(input, "hwaddr %U", unformat_ethernet_address,
+ &hwaddr))
+ hwaddr_arg = hwaddr;
+
+ /* It is here for backward compatibility */
+ if (unformat(input, "hwaddr random"))
+ ;
+
+ int rv = vnet_tap_connect(vm, intfc_name, hwaddr_arg, &sw_if_index);
+ if (rv) {
+ switch (rv) {
+ case VNET_API_ERROR_SYSCALL_ERROR_1:
+ vlib_cli_output (vm, "Couldn't open /dev/net/tun");
+ break;
+
+ case VNET_API_ERROR_SYSCALL_ERROR_2:
+ vlib_cli_output (vm, "Error setting flags on '%s'", intfc_name);
+ break;
+
+ case VNET_API_ERROR_SYSCALL_ERROR_3:
+ vlib_cli_output (vm, "Couldn't open provisioning socket");
+ break;
+
+ case VNET_API_ERROR_SYSCALL_ERROR_4:
+ vlib_cli_output (vm, "Couldn't get if_index");
+ break;
+
+ case VNET_API_ERROR_SYSCALL_ERROR_5:
+ vlib_cli_output (vm, "Couldn't bind provisioning socket");
+ break;
+
+ case VNET_API_ERROR_SYSCALL_ERROR_6:
+ vlib_cli_output (0, "Couldn't set device non-blocking flag");
+ break;
+
+ case VNET_API_ERROR_SYSCALL_ERROR_7:
+ vlib_cli_output (0, "Couldn't set device MTU");
+ break;
+
+ case VNET_API_ERROR_SYSCALL_ERROR_8:
+ vlib_cli_output (0, "Couldn't get interface flags");
+ break;
+
+ case VNET_API_ERROR_SYSCALL_ERROR_9:
+ vlib_cli_output (0, "Couldn't set intfc admin state up");
+ break;
+
+ case VNET_API_ERROR_INVALID_REGISTRATION:
+ vlib_cli_output (0, "Invalid registration");
+ break;
+ default:
+ vlib_cli_output (0, "Unknown error: %d", rv);
+ break;
+ }
+ return 0;
+ }
+
+ vlib_cli_output(vm, "%U\n", format_vnet_sw_if_index_name, vnet_get_main(), sw_if_index);
+ return 0;
+ }
+
+VLIB_CLI_COMMAND (tap_connect_command, static) = {
+ .path = "tap connect",
+ .short_help = "tap connect <intfc-name> [hwaddr <addr>]",
+ .function = tap_connect_command_fn,
+};
+
+/**
+ * @brief TAPCLI main init
+ *
+ * @param *vm - vlib_main_t
+ *
+ * @return error - clib_error_t
+ *
+ */
+clib_error_t *
+tapcli_init (vlib_main_t * vm)
+{
+ tapcli_main_t * tm = &tapcli_main;
+
+ tm->vlib_main = vm;
+ tm->vnet_main = vnet_get_main();
+ tm->unix_main = &unix_main;
+ tm->mtu_bytes = TAP_MTU_DEFAULT;
+ tm->tapcli_interface_index_by_sw_if_index = hash_create (0, sizeof(uword));
+ tm->tapcli_interface_index_by_unix_fd = hash_create (0, sizeof (uword));
+ tm->rx_buffers = 0;
+ vec_alloc(tm->rx_buffers, VLIB_FRAME_SIZE);
+ vec_reset_length(tm->rx_buffers);
+ vm->os_punt_frame = tapcli_nopunt_frame;
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (tapcli_init);
diff --git a/src/vnet/unix/tapcli.h b/src/vnet/unix/tapcli.h
new file mode 100644
index 00000000000..fcd82dbf25d
--- /dev/null
+++ b/src/vnet/unix/tapcli.h
@@ -0,0 +1,52 @@
+/*
+ * tapcli.h : tap support
+ *
+ * Copyright (c) 2013 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * @file
+ * @brief TAPCLI definitions
+ */
+
+#ifndef __included_tapcli_h__
+#define __included_tapcli_h__
+
+/** TAP CLI errors */
+#define foreach_tapcli_error \
+ /* Must be first. */ \
+ _(NONE, "no error") \
+ _(READ, "read error") \
+ _(BUFFER_ALLOC, "buffer allocation error") \
+ _(UNKNOWN, "unknown error")
+
+typedef enum {
+#define _(sym,str) TAPCLI_ERROR_##sym,
+ foreach_tapcli_error
+#undef _
+ TAPCLI_N_ERROR,
+ } tapcli_error_t;
+
+/** TAP CLI interface details struct */
+typedef struct {
+ u32 sw_if_index;
+ u8 dev_name[64];
+} tapcli_interface_details_t;
+
+int vnet_tap_dump_ifs (tapcli_interface_details_t **out_tapids);
+
+#define TAP_MTU_MIN 68
+#define TAP_MTU_MAX 65535
+#define TAP_MTU_DEFAULT 1500
+
+#endif /* __included_tapcli_h__ */
diff --git a/src/vnet/unix/tuntap.c b/src/vnet/unix/tuntap.c
new file mode 100644
index 00000000000..4a5dd676a68
--- /dev/null
+++ b/src/vnet/unix/tuntap.c
@@ -0,0 +1,1000 @@
+/*
+ *------------------------------------------------------------------
+ * tuntap.c - kernel stack (reverse) punt/inject path
+ *
+ * Copyright (c) 2009 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+/**
+ * @file
+ * @brief TunTap Kernel stack (reverse) punt/inject path.
+ *
+ * This driver runs in one of two distinct modes:
+ * - "punt/inject" mode, where we send pkts not otherwise processed
+ * by the forwarding to the Linux kernel stack, and
+ *
+ * - "normal interface" mode, where we treat the Linux kernel stack
+ * as a peer.
+ *
+ * By default, we select punt/inject mode.
+ */
+
+#include <fcntl.h> /* for open */
+#include <sys/ioctl.h>
+#include <sys/socket.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <sys/uio.h> /* for iovec */
+#include <netinet/in.h>
+
+#include <linux/if_arp.h>
+#include <linux/if_tun.h>
+
+#include <vlib/vlib.h>
+#include <vlib/unix/unix.h>
+
+#include <vnet/ip/ip.h>
+
+#include <vnet/ethernet/ethernet.h>
+#include <vnet/devices/devices.h>
+#include <vnet/feature/feature.h>
+
+static vnet_device_class_t tuntap_dev_class;
+static vnet_hw_interface_class_t tuntap_interface_class;
+
+static void tuntap_punt_frame (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame);
+static void tuntap_nopunt_frame (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame);
+
+typedef struct {
+ u32 sw_if_index;
+ u8 is_v6;
+ u8 addr[16];
+} subif_address_t;
+
+/**
+ * @brief TUNTAP node main state
+ */
+typedef struct {
+ /** Vector of iovecs for readv/writev calls. */
+ struct iovec * iovecs;
+
+ /** Vector of VLIB rx buffers to use. We allocate them in blocks
+ of VLIB_FRAME_SIZE (256). */
+ u32 * rx_buffers;
+
+ /** File descriptors for /dev/net/tun and provisioning socket. */
+ int dev_net_tun_fd, dev_tap_fd;
+
+ /** Create a "tap" [ethernet] encaps device */
+ int is_ether;
+
+ /** 1 if a "normal" routed intfc, 0 if a punt/inject interface */
+
+ int have_normal_interface;
+
+ /** tap device destination MAC address. Required, or Linux drops pkts */
+ u8 ether_dst_mac[6];
+
+ /** Interface MTU in bytes and # of default sized buffers. */
+ u32 mtu_bytes, mtu_buffers;
+
+ /** Linux interface name for tun device. */
+ char * tun_name;
+
+ /** Pool of subinterface addresses */
+ subif_address_t *subifs;
+
+ /** Hash for subif addresses */
+ mhash_t subif_mhash;
+
+ /** Unix file index */
+ u32 unix_file_index;
+
+ /** For the "normal" interface, if configured */
+ u32 hw_if_index, sw_if_index;
+
+} tuntap_main_t;
+
+static tuntap_main_t tuntap_main = {
+ .tun_name = "vnet",
+
+ /** Suitable defaults for an Ethernet-like tun/tap device */
+ .mtu_bytes = 4096 + 256,
+};
+
+/**
+ * @brief tuntap_tx
+ * @node tuntap-tx
+ *
+ * Output node, writes the buffers comprising the incoming frame
+ * to the tun/tap device, aka hands them to the Linux kernel stack.
+ *
+ * @param *vm - vlib_main_t
+ * @param *node - vlib_node_runtime_t
+ * @param *frame - vlib_frame_t
+ *
+ * @return rc - uword
+ *
+ */
+static uword
+tuntap_tx (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ u32 * buffers = vlib_frame_args (frame);
+ uword n_packets = frame->n_vectors;
+ tuntap_main_t * tm = &tuntap_main;
+ vnet_main_t *vnm = vnet_get_main ();
+ vnet_interface_main_t *im = &vnm->interface_main;
+ u32 n_bytes = 0;
+ int i;
+
+ for (i = 0; i < n_packets; i++)
+ {
+ struct iovec * iov;
+ vlib_buffer_t * b;
+ uword l;
+
+ b = vlib_get_buffer (vm, buffers[i]);
+
+ if (tm->is_ether && (!tm->have_normal_interface))
+ {
+ vlib_buffer_reset(b);
+ clib_memcpy (vlib_buffer_get_current (b), tm->ether_dst_mac, 6);
+ }
+
+ /* Re-set iovecs if present. */
+ if (tm->iovecs)
+ _vec_len (tm->iovecs) = 0;
+
+ /** VLIB buffer chain -> Unix iovec(s). */
+ vec_add2 (tm->iovecs, iov, 1);
+ iov->iov_base = b->data + b->current_data;
+ iov->iov_len = l = b->current_length;
+
+ if (PREDICT_FALSE (b->flags & VLIB_BUFFER_NEXT_PRESENT))
+ {
+ do {
+ b = vlib_get_buffer (vm, b->next_buffer);
+
+ vec_add2 (tm->iovecs, iov, 1);
+
+ iov->iov_base = b->data + b->current_data;
+ iov->iov_len = b->current_length;
+ l += b->current_length;
+ } while (b->flags & VLIB_BUFFER_NEXT_PRESENT);
+ }
+
+ if (writev (tm->dev_net_tun_fd, tm->iovecs, vec_len (tm->iovecs)) < l)
+ clib_unix_warning ("writev");
+
+ n_bytes += l;
+ }
+
+ /* Update tuntap interface output stats. */
+ vlib_increment_combined_counter (im->combined_sw_if_counters
+ + VNET_INTERFACE_COUNTER_TX,
+ vm->cpu_index,
+ tm->sw_if_index, n_packets, n_bytes);
+
+
+ /** The normal interface path flattens the buffer chain */
+ if (tm->have_normal_interface)
+ vlib_buffer_free_no_next (vm, buffers, n_packets);
+ else
+ vlib_buffer_free (vm, buffers, n_packets);
+
+ return n_packets;
+}
+
+VLIB_REGISTER_NODE (tuntap_tx_node,static) = {
+ .function = tuntap_tx,
+ .name = "tuntap-tx",
+ .type = VLIB_NODE_TYPE_INTERNAL,
+ .vector_size = 4,
+};
+
+/**
+ * @brief TUNTAP receive node
+ * @node tuntap-rx
+ *
+ * @param *vm - vlib_main_t
+ * @param *node - vlib_node_runtime_t
+ * @param *frame - vlib_frame_t
+ *
+ * @return rc - uword
+ *
+ */
+static uword
+tuntap_rx (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ tuntap_main_t * tm = &tuntap_main;
+ vlib_buffer_t * b;
+ u32 bi;
+ const uword buffer_size = VLIB_BUFFER_DATA_SIZE;
+
+ /** Make sure we have some RX buffers. */
+ {
+ uword n_left = vec_len (tm->rx_buffers);
+ uword n_alloc;
+
+ if (n_left < VLIB_FRAME_SIZE / 2)
+ {
+ if (! tm->rx_buffers)
+ vec_alloc (tm->rx_buffers, VLIB_FRAME_SIZE);
+
+ n_alloc = vlib_buffer_alloc (vm, tm->rx_buffers + n_left, VLIB_FRAME_SIZE - n_left);
+ _vec_len (tm->rx_buffers) = n_left + n_alloc;
+ }
+ }
+
+ /** Allocate RX buffers from end of rx_buffers.
+ Turn them into iovecs to pass to readv. */
+ {
+ uword i_rx = vec_len (tm->rx_buffers) - 1;
+ vlib_buffer_t * b;
+ word i, n_bytes_left, n_bytes_in_packet;
+
+ /** We should have enough buffers left for an MTU sized packet. */
+ ASSERT (vec_len (tm->rx_buffers) >= tm->mtu_buffers);
+
+ vec_validate (tm->iovecs, tm->mtu_buffers - 1);
+ for (i = 0; i < tm->mtu_buffers; i++)
+ {
+ b = vlib_get_buffer (vm, tm->rx_buffers[i_rx - i]);
+ tm->iovecs[i].iov_base = b->data;
+ tm->iovecs[i].iov_len = buffer_size;
+ }
+
+ n_bytes_left = readv (tm->dev_net_tun_fd, tm->iovecs, tm->mtu_buffers);
+ n_bytes_in_packet = n_bytes_left;
+ if (n_bytes_left <= 0)
+ {
+ if (errno != EAGAIN)
+ clib_unix_warning ("readv %d", n_bytes_left);
+ return 0;
+ }
+
+ bi = tm->rx_buffers[i_rx];
+
+ while (1)
+ {
+ b = vlib_get_buffer (vm, tm->rx_buffers[i_rx]);
+ b->flags = 0;
+ b->current_data = 0;
+ b->current_length = n_bytes_left < buffer_size ? n_bytes_left : buffer_size;
+
+ n_bytes_left -= buffer_size;
+
+ if (n_bytes_left <= 0)
+ {
+ break;
+ }
+
+ i_rx--;
+ b->flags |= VLIB_BUFFER_NEXT_PRESENT;
+ b->next_buffer = tm->rx_buffers[i_rx];
+ }
+
+ /** Interface counters for tuntap interface. */
+ vlib_increment_combined_counter
+ (vnet_main.interface_main.combined_sw_if_counters
+ + VNET_INTERFACE_COUNTER_RX,
+ os_get_cpu_number(),
+ tm->sw_if_index,
+ 1, n_bytes_in_packet);
+
+ _vec_len (tm->rx_buffers) = i_rx;
+ }
+
+ b = vlib_get_buffer (vm, bi);
+
+ {
+ u32 next_index;
+ uword n_trace = vlib_get_trace_count (vm, node);
+
+ vnet_buffer (b)->sw_if_index[VLIB_RX] = tm->sw_if_index;
+ vnet_buffer (b)->sw_if_index[VLIB_TX] = (u32)~0;
+
+ /*
+ * Turn this on if you run into
+ * "bad monkey" contexts, and you want to know exactly
+ * which nodes they've visited...
+ */
+ if (VLIB_BUFFER_TRACE_TRAJECTORY)
+ b->pre_data[0] = 0;
+
+ b->error = node->errors[0];
+
+ if (tm->is_ether)
+ {
+ next_index = VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT;
+ }
+ else
+ switch (b->data[0] & 0xf0)
+ {
+ case 0x40:
+ next_index = VNET_DEVICE_INPUT_NEXT_IP4_INPUT;
+ break;
+ case 0x60:
+ next_index = VNET_DEVICE_INPUT_NEXT_IP6_INPUT;
+ break;
+ default:
+ next_index = VNET_DEVICE_INPUT_NEXT_DROP;
+ break;
+ }
+
+ /* The linux kernel couldn't care less if our interface is up */
+ if (tm->have_normal_interface)
+ {
+ vnet_main_t *vnm = vnet_get_main();
+ vnet_sw_interface_t * si;
+ si = vnet_get_sw_interface (vnm, tm->sw_if_index);
+ if (!(si->flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP))
+ next_index = VNET_DEVICE_INPUT_NEXT_DROP;
+ }
+
+ vnet_feature_start_device_input_x1 (tm->sw_if_index, &next_index, b, 0);
+
+ vlib_set_next_frame_buffer (vm, node, next_index, bi);
+
+ if (n_trace > 0)
+ {
+ vlib_trace_buffer (vm, node, next_index,
+ b, /* follow_chain */ 1);
+ vlib_set_trace_count (vm, node, n_trace - 1);
+ }
+ }
+
+ return 1;
+}
+
+/**
+ * @brief TUNTAP_RX error strings
+ */
+static char * tuntap_rx_error_strings[] = {
+ "unknown packet type",
+};
+
+VLIB_REGISTER_NODE (tuntap_rx_node,static) = {
+ .function = tuntap_rx,
+ .name = "tuntap-rx",
+ .sibling_of = "device-input",
+ .type = VLIB_NODE_TYPE_INPUT,
+ .state = VLIB_NODE_STATE_INTERRUPT,
+ .vector_size = 4,
+ .n_errors = 1,
+ .error_strings = tuntap_rx_error_strings,
+};
+
+/**
+ * @brief Gets called when file descriptor is ready from epoll.
+ *
+ * @param *uf - unix_file_t
+ *
+ * @return error - clib_error_t
+ */
+static clib_error_t * tuntap_read_ready (unix_file_t * uf)
+{
+ vlib_main_t * vm = vlib_get_main();
+ vlib_node_set_interrupt_pending (vm, tuntap_rx_node.index);
+ return 0;
+}
+
+/**
+ * @brief Clean up the tun/tap device
+ *
+ * @param *vm - vlib_main_t
+ *
+ * @return error - clib_error_t
+ *
+ */
+static clib_error_t *
+tuntap_exit (vlib_main_t * vm)
+{
+ tuntap_main_t *tm = &tuntap_main;
+ struct ifreq ifr;
+ int sfd;
+
+ /* Not present. */
+ if (! tm->dev_net_tun_fd || tm->dev_net_tun_fd < 0)
+ return 0;
+
+ sfd = socket (AF_INET, SOCK_STREAM, 0);
+ if (sfd < 0)
+ clib_unix_warning("provisioning socket");
+
+ memset(&ifr, 0, sizeof (ifr));
+ strncpy (ifr.ifr_name, tm->tun_name, sizeof (ifr.ifr_name)-1);
+
+ /* get flags, modify to bring down interface... */
+ if (ioctl (sfd, SIOCGIFFLAGS, &ifr) < 0)
+ clib_unix_warning ("SIOCGIFFLAGS");
+
+ ifr.ifr_flags &= ~(IFF_UP | IFF_RUNNING);
+
+ if (ioctl (sfd, SIOCSIFFLAGS, &ifr) < 0)
+ clib_unix_warning ("SIOCSIFFLAGS");
+
+ /* Turn off persistence */
+ if (ioctl (tm->dev_net_tun_fd, TUNSETPERSIST, 0) < 0)
+ clib_unix_warning ("TUNSETPERSIST");
+ close(tm->dev_tap_fd);
+ if (tm->dev_net_tun_fd >= 0)
+ close(tm->dev_net_tun_fd);
+ if (sfd >= 0)
+ close (sfd);
+
+ return 0;
+}
+
+VLIB_MAIN_LOOP_EXIT_FUNCTION (tuntap_exit);
+
+/**
+ * @brief CLI function for tun/tap config
+ *
+ * @param *vm - vlib_main_t
+ * @param *input - unformat_input_t
+ *
+ * @return error - clib_error_t
+ *
+ */
+static clib_error_t *
+tuntap_config (vlib_main_t * vm, unformat_input_t * input)
+{
+ tuntap_main_t *tm = &tuntap_main;
+ clib_error_t * error = 0;
+ struct ifreq ifr;
+ u8 * name;
+ int flags = IFF_TUN | IFF_NO_PI;
+ int is_enabled = 0, is_ether = 0, have_normal_interface = 0;
+ const uword buffer_size = VLIB_BUFFER_DATA_SIZE;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "mtu %d", &tm->mtu_bytes))
+ ;
+ else if (unformat (input, "enable"))
+ is_enabled = 1;
+ else if (unformat (input, "disable"))
+ is_enabled = 0;
+ else if (unformat (input, "ethernet") ||
+ unformat (input, "ether"))
+ is_ether = 1;
+ else if (unformat (input, "have-normal-interface") ||
+ unformat (input, "have-normal"))
+ have_normal_interface = 1;
+ else if (unformat (input, "name %s", &name))
+ tm->tun_name = (char *) name;
+ else
+ return clib_error_return (0, "unknown input `%U'",
+ format_unformat_error, input);
+ }
+
+ tm->dev_net_tun_fd = -1;
+ tm->dev_tap_fd = -1;
+
+ if (is_enabled == 0)
+ return 0;
+
+ if (geteuid())
+ {
+ clib_warning ("tuntap disabled: must be superuser");
+ return 0;
+ }
+
+ tm->is_ether = is_ether;
+ tm->have_normal_interface = have_normal_interface;
+
+ if (is_ether)
+ flags = IFF_TAP | IFF_NO_PI;
+
+ if ((tm->dev_net_tun_fd = open ("/dev/net/tun", O_RDWR)) < 0)
+ {
+ error = clib_error_return_unix (0, "open /dev/net/tun");
+ goto done;
+ }
+
+ memset (&ifr, 0, sizeof (ifr));
+ strncpy(ifr.ifr_name, tm->tun_name, sizeof(ifr.ifr_name)-1);
+ ifr.ifr_flags = flags;
+ if (ioctl (tm->dev_net_tun_fd, TUNSETIFF, (void *)&ifr) < 0)
+ {
+ error = clib_error_return_unix (0, "ioctl TUNSETIFF");
+ goto done;
+ }
+
+ /* Make it persistent, at least until we split. */
+ if (ioctl (tm->dev_net_tun_fd, TUNSETPERSIST, 1) < 0)
+ {
+ error = clib_error_return_unix (0, "TUNSETPERSIST");
+ goto done;
+ }
+
+ /* Open a provisioning socket */
+ if ((tm->dev_tap_fd = socket(PF_PACKET, SOCK_RAW,
+ htons(ETH_P_ALL))) < 0 )
+ {
+ error = clib_error_return_unix (0, "socket");
+ goto done;
+ }
+
+ /* Find the interface index. */
+ {
+ struct ifreq ifr;
+ struct sockaddr_ll sll;
+
+ memset (&ifr, 0, sizeof(ifr));
+ strncpy (ifr.ifr_name, tm->tun_name, sizeof(ifr.ifr_name)-1);
+ if (ioctl (tm->dev_tap_fd, SIOCGIFINDEX, &ifr) < 0 )
+ {
+ error = clib_error_return_unix (0, "ioctl SIOCGIFINDEX");
+ goto done;
+ }
+
+ /* Bind the provisioning socket to the interface. */
+ memset(&sll, 0, sizeof(sll));
+ sll.sll_family = AF_PACKET;
+ sll.sll_ifindex = ifr.ifr_ifindex;
+ sll.sll_protocol = htons(ETH_P_ALL);
+
+ if (bind(tm->dev_tap_fd, (struct sockaddr*) &sll, sizeof(sll)) < 0)
+ {
+ error = clib_error_return_unix (0, "bind");
+ goto done;
+ }
+ }
+
+ /* non-blocking I/O on /dev/tapX */
+ {
+ int one = 1;
+ if (ioctl (tm->dev_net_tun_fd, FIONBIO, &one) < 0)
+ {
+ error = clib_error_return_unix (0, "ioctl FIONBIO");
+ goto done;
+ }
+ }
+
+ tm->mtu_buffers = (tm->mtu_bytes + (buffer_size - 1)) / buffer_size;
+
+ ifr.ifr_mtu = tm->mtu_bytes;
+ if (ioctl (tm->dev_tap_fd, SIOCSIFMTU, &ifr) < 0)
+ {
+ error = clib_error_return_unix (0, "ioctl SIOCSIFMTU");
+ goto done;
+ }
+
+ /* get flags, modify to bring up interface... */
+ if (ioctl (tm->dev_tap_fd, SIOCGIFFLAGS, &ifr) < 0)
+ {
+ error = clib_error_return_unix (0, "ioctl SIOCGIFFLAGS");
+ goto done;
+ }
+
+ ifr.ifr_flags |= (IFF_UP | IFF_RUNNING);
+
+ if (ioctl (tm->dev_tap_fd, SIOCSIFFLAGS, &ifr) < 0)
+ {
+ error = clib_error_return_unix (0, "ioctl SIOCSIFFLAGS");
+ goto done;
+ }
+
+ if (is_ether)
+ {
+ if (ioctl (tm->dev_tap_fd, SIOCGIFHWADDR, &ifr) < 0)
+ {
+ error = clib_error_return_unix (0, "ioctl SIOCGIFHWADDR");
+ goto done;
+ }
+ else
+ clib_memcpy (tm->ether_dst_mac, ifr.ifr_hwaddr.sa_data, 6);
+ }
+
+ if (have_normal_interface)
+ {
+ vnet_main_t *vnm = vnet_get_main();
+ error = ethernet_register_interface
+ (vnm,
+ tuntap_dev_class.index,
+ 0 /* device instance */,
+ tm->ether_dst_mac /* ethernet address */,
+ &tm->hw_if_index,
+ 0 /* flag change */);
+ if (error)
+ clib_error_report (error);
+ tm->sw_if_index = tm->hw_if_index;
+ vm->os_punt_frame = tuntap_nopunt_frame;
+ }
+ else
+ {
+ vnet_main_t *vnm = vnet_get_main();
+ vnet_hw_interface_t * hi;
+
+ vm->os_punt_frame = tuntap_punt_frame;
+
+ tm->hw_if_index = vnet_register_interface
+ (vnm,
+ tuntap_dev_class.index, 0 /* device instance */,
+ tuntap_interface_class.index, 0);
+ hi = vnet_get_hw_interface (vnm, tm->hw_if_index);
+ tm->sw_if_index = hi->sw_if_index;
+
+ /* Interface is always up. */
+ vnet_hw_interface_set_flags (vnm, tm->hw_if_index,
+ VNET_HW_INTERFACE_FLAG_LINK_UP);
+ vnet_sw_interface_set_flags (vnm, tm->sw_if_index,
+ VNET_SW_INTERFACE_FLAG_ADMIN_UP);
+ }
+
+ {
+ unix_file_t template = {0};
+ template.read_function = tuntap_read_ready;
+ template.file_descriptor = tm->dev_net_tun_fd;
+ tm->unix_file_index = unix_file_add (&unix_main, &template);
+ }
+
+ done:
+ if (error)
+ {
+ if (tm->dev_net_tun_fd >= 0)
+ close (tm->dev_net_tun_fd);
+ if (tm->dev_tap_fd >= 0)
+ close (tm->dev_tap_fd);
+ }
+
+ return error;
+}
+
+VLIB_CONFIG_FUNCTION (tuntap_config, "tuntap");
+
+/**
+ * @brief Add or Del IP4 address to tun/tap interface
+ *
+ * @param *im - ip4_main_t
+ * @param opaque - uword
+ * @param sw_if_index - u32
+ * @param *address - ip4_address_t
+ * @param is_delete - u32
+ *
+ */
+void
+tuntap_ip4_add_del_interface_address (ip4_main_t * im,
+ uword opaque,
+ u32 sw_if_index,
+ ip4_address_t * address,
+ u32 address_length,
+ u32 if_address_index,
+ u32 is_delete)
+{
+ tuntap_main_t * tm = &tuntap_main;
+ struct ifreq ifr;
+ subif_address_t subif_addr, * ap;
+ uword * p;
+
+ /** Tuntap disabled, or using a "normal" interface. */
+ if (tm->have_normal_interface || tm->dev_tap_fd < 0)
+ return;
+
+ /** See if we already know about this subif */
+ memset (&subif_addr, 0, sizeof (subif_addr));
+ subif_addr.sw_if_index = sw_if_index;
+ clib_memcpy (&subif_addr.addr, address, sizeof (*address));
+
+ p = mhash_get (&tm->subif_mhash, &subif_addr);
+
+ if (p)
+ ap = pool_elt_at_index (tm->subifs, p[0]);
+ else
+ {
+ pool_get (tm->subifs, ap);
+ *ap = subif_addr;
+ mhash_set (&tm->subif_mhash, ap, ap - tm->subifs, 0);
+ }
+
+ /* Use subif pool index to select alias device. */
+ memset (&ifr, 0, sizeof (ifr));
+ snprintf (ifr.ifr_name, sizeof(ifr.ifr_name),
+ "%s:%d", tm->tun_name, (int)(ap - tm->subifs));
+
+ /* the tuntap punt/inject is enabled for IPv4 RX so long as
+ * any vpp interface has an IPv4 address.
+ * this is also ref counted.
+ */
+ ip4_sw_interface_enable_disable (tm->sw_if_index, !is_delete);
+
+ if (! is_delete)
+ {
+ struct sockaddr_in * sin;
+
+ sin = (struct sockaddr_in *)&ifr.ifr_addr;
+
+ /* Set ipv4 address, netmask. */
+ sin->sin_family = AF_INET;
+ clib_memcpy (&sin->sin_addr.s_addr, address, 4);
+ if (ioctl (tm->dev_tap_fd, SIOCSIFADDR, &ifr) < 0)
+ clib_unix_warning ("ioctl SIOCSIFADDR");
+
+ sin->sin_addr.s_addr = im->fib_masks[address_length];
+ if (ioctl (tm->dev_tap_fd, SIOCSIFNETMASK, &ifr) < 0)
+ clib_unix_warning ("ioctl SIOCSIFNETMASK");
+ }
+ else
+ {
+ mhash_unset (&tm->subif_mhash, &subif_addr, 0 /* old value ptr */);
+ pool_put (tm->subifs, ap);
+ }
+
+ /* get flags, modify to bring up interface... */
+ if (ioctl (tm->dev_tap_fd, SIOCGIFFLAGS, &ifr) < 0)
+ clib_unix_warning ("ioctl SIOCGIFFLAGS");
+
+ if (is_delete)
+ ifr.ifr_flags &= ~(IFF_UP | IFF_RUNNING);
+ else
+ ifr.ifr_flags |= (IFF_UP | IFF_RUNNING);
+
+ if (ioctl (tm->dev_tap_fd, SIOCSIFFLAGS, &ifr) < 0)
+ clib_unix_warning ("ioctl SIOCSIFFLAGS");
+}
+
+/**
+ * @brief workaround for a known include file bug.
+ * including @c <linux/ipv6.h> causes multiple definitions if
+ * @c <netinet/in.h is also included.
+ */
+struct in6_ifreq {
+ struct in6_addr ifr6_addr;
+ u32 ifr6_prefixlen;
+ int ifr6_ifindex;
+};
+
+/**
+ * @brief Add or Del tun/tap interface address.
+ *
+ * Both the v6 interface address API and the way ifconfig
+ * displays subinterfaces differ from their v4 couterparts.
+ * The code given here seems to work but YMMV.
+ *
+ * @param *im - ip6_main_t
+ * @param opaque - uword
+ * @param sw_if_index - u32
+ * @param *address - ip6_address_t
+ * @param address_length - u32
+ * @param if_address_index - u32
+ * @param is_delete - u32
+ */
+void
+tuntap_ip6_add_del_interface_address (ip6_main_t * im,
+ uword opaque,
+ u32 sw_if_index,
+ ip6_address_t * address,
+ u32 address_length,
+ u32 if_address_index,
+ u32 is_delete)
+{
+ tuntap_main_t * tm = &tuntap_main;
+ struct ifreq ifr;
+ struct in6_ifreq ifr6;
+ subif_address_t subif_addr, * ap;
+ uword * p;
+
+ /* Tuntap disabled, or using a "normal" interface. */
+ if (tm->have_normal_interface || tm->dev_tap_fd < 0)
+ return;
+
+ /* See if we already know about this subif */
+ memset (&subif_addr, 0, sizeof (subif_addr));
+ subif_addr.sw_if_index = sw_if_index;
+ subif_addr.is_v6 = 1;
+ clib_memcpy (&subif_addr.addr, address, sizeof (*address));
+
+ p = mhash_get (&tm->subif_mhash, &subif_addr);
+
+ if (p)
+ ap = pool_elt_at_index (tm->subifs, p[0]);
+ else
+ {
+ pool_get (tm->subifs, ap);
+ *ap = subif_addr;
+ mhash_set (&tm->subif_mhash, ap, ap - tm->subifs, 0);
+ }
+
+ /* Use subif pool index to select alias device. */
+ memset (&ifr, 0, sizeof (ifr));
+ memset (&ifr6, 0, sizeof (ifr6));
+ snprintf (ifr.ifr_name, sizeof(ifr.ifr_name),
+ "%s:%d", tm->tun_name, (int)(ap - tm->subifs));
+
+ /* the tuntap punt/inject is enabled for IPv6 RX so long as
+ * any vpp interface has an IPv6 address.
+ * this is also ref counted.
+ */
+ ip6_sw_interface_enable_disable (tm->sw_if_index, !is_delete);
+
+ if (! is_delete)
+ {
+ int sockfd = socket (AF_INET6, SOCK_STREAM, 0);
+ if (sockfd < 0)
+ clib_unix_warning ("get ifindex socket");
+
+ if (ioctl (sockfd, SIOGIFINDEX, &ifr) < 0)
+ clib_unix_warning ("get ifindex");
+
+ ifr6.ifr6_ifindex = ifr.ifr_ifindex;
+ ifr6.ifr6_prefixlen = address_length;
+ clib_memcpy (&ifr6.ifr6_addr, address, 16);
+
+ if (ioctl (sockfd, SIOCSIFADDR, &ifr6) < 0)
+ clib_unix_warning ("set address");
+
+ if (sockfd >= 0)
+ close (sockfd);
+ }
+ else
+ {
+ int sockfd = socket (AF_INET6, SOCK_STREAM, 0);
+ if (sockfd < 0)
+ clib_unix_warning ("get ifindex socket");
+
+ if (ioctl (sockfd, SIOGIFINDEX, &ifr) < 0)
+ clib_unix_warning ("get ifindex");
+
+ ifr6.ifr6_ifindex = ifr.ifr_ifindex;
+ ifr6.ifr6_prefixlen = address_length;
+ clib_memcpy (&ifr6.ifr6_addr, address, 16);
+
+ if (ioctl (sockfd, SIOCDIFADDR, &ifr6) < 0)
+ clib_unix_warning ("del address");
+
+ if (sockfd >= 0)
+ close (sockfd);
+
+ mhash_unset (&tm->subif_mhash, &subif_addr, 0 /* old value ptr */);
+ pool_put (tm->subifs, ap);
+ }
+}
+
+/**
+ * @brief TX the tun/tap frame
+ *
+ * @param *vm - vlib_main_t
+ * @param *node - vlib_node_runtime_t
+ * @param *frame - vlib_frame_t
+ *
+ */
+static void
+tuntap_punt_frame (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ tuntap_tx (vm, node, frame);
+ vlib_frame_free (vm, node, frame);
+}
+
+/**
+ * @brief Free the tun/tap frame
+ *
+ * @param *vm - vlib_main_t
+ * @param *node - vlib_node_runtime_t
+ * @param *frame - vlib_frame_t
+ *
+ */
+static void
+tuntap_nopunt_frame (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ u32 * buffers = vlib_frame_args (frame);
+ uword n_packets = frame->n_vectors;
+ vlib_buffer_free (vm, buffers, n_packets);
+ vlib_frame_free (vm, node, frame);
+}
+
+VNET_HW_INTERFACE_CLASS (tuntap_interface_class,static) = {
+ .name = "tuntap",
+ .flags = VNET_HW_INTERFACE_CLASS_FLAG_P2P,
+};
+
+/**
+ * @brief Format tun/tap interface name
+ *
+ * @param *s - u8 - formatter string
+ * @param *args - va_list
+ *
+ * @return *s - u8 - formatted string
+ *
+ */
+static u8 * format_tuntap_interface_name (u8 * s, va_list * args)
+{
+ u32 i = va_arg (*args, u32);
+
+ s = format (s, "tuntap-%d", i);
+ return s;
+}
+
+/**
+ * @brief TX packet out tun/tap
+ *
+ * @param *vm - vlib_main_t
+ * @param *node - vlib_node_runtime_t
+ * @param *frame - vlib_frame_t
+ *
+ * @return n_buffers - uword - Packets transmitted
+ *
+ */
+static uword
+tuntap_intfc_tx (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ tuntap_main_t * tm = &tuntap_main;
+ u32 * buffers = vlib_frame_args (frame);
+ uword n_buffers = frame->n_vectors;
+
+ /* Normal interface transmit happens only on the normal interface... */
+ if (tm->have_normal_interface)
+ return tuntap_tx (vm, node, frame);
+
+ vlib_buffer_free (vm, buffers, n_buffers);
+ return n_buffers;
+}
+
+VNET_DEVICE_CLASS (tuntap_dev_class,static) = {
+ .name = "tuntap",
+ .tx_function = tuntap_intfc_tx,
+ .format_device_name = format_tuntap_interface_name,
+};
+
+/**
+ * @brief tun/tap node init
+ *
+ * @param *vm - vlib_main_t
+ *
+ * @return error - clib_error_t
+ *
+ */
+static clib_error_t *
+tuntap_init (vlib_main_t * vm)
+{
+ clib_error_t * error;
+ ip4_main_t * im4 = &ip4_main;
+ ip6_main_t * im6 = &ip6_main;
+ ip4_add_del_interface_address_callback_t cb4;
+ ip6_add_del_interface_address_callback_t cb6;
+ tuntap_main_t * tm = &tuntap_main;
+
+ error = vlib_call_init_function (vm, ip4_init);
+ if (error)
+ return error;
+
+ mhash_init (&tm->subif_mhash, sizeof (u32), sizeof(subif_address_t));
+
+ cb4.function = tuntap_ip4_add_del_interface_address;
+ cb4.function_opaque = 0;
+ vec_add1 (im4->add_del_interface_address_callbacks, cb4);
+
+ cb6.function = tuntap_ip6_add_del_interface_address;
+ cb6.function_opaque = 0;
+ vec_add1 (im6->add_del_interface_address_callbacks, cb6);
+
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (tuntap_init);
diff --git a/src/vnet/unix/tuntap.h b/src/vnet/unix/tuntap.h
new file mode 100644
index 00000000000..d7f96caeaf0
--- /dev/null
+++ b/src/vnet/unix/tuntap.h
@@ -0,0 +1,36 @@
+/*
+ *------------------------------------------------------------------
+ * tuntap.h - kernel stack (reverse) punt/inject path
+ *
+ * Copyright (c) 2009 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+/**
+ * @file
+ * @brief Call from VLIB_INIT_FUNCTION to set the Linux kernel inject node name.
+ */
+void register_tuntap_inject_node_name (char *name);
+
+int vnet_tap_connect (vlib_main_t * vm, u8 * intfc_name,
+ u8 *hwaddr_arg, u32 * sw_if_indexp);
+int vnet_tap_connect_renumber (vlib_main_t * vm, u8 * intfc_name,
+ u8 *hwaddr_arg, u32 * sw_if_indexp,
+ u8 renumber, u32 custom_dev_instance);
+
+int vnet_tap_delete(vlib_main_t *vm, u32 sw_if_index);
+
+int vnet_tap_modify (vlib_main_t * vm, u32 orig_sw_if_index,
+ u8 * intfc_name, u8 *hwaddr_arg,
+ u32 * sw_if_indexp,
+ u8 renumber, u32 custom_dev_instance);
diff --git a/src/vnet/vnet.h b/src/vnet/vnet.h
new file mode 100644
index 00000000000..5a8ae858678
--- /dev/null
+++ b/src/vnet/vnet.h
@@ -0,0 +1,96 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * vnet.h: general networking definitions
+ *
+ * Copyright (c) 2011 Eliot Dresselhaus
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef included_vnet_vnet_h
+#define included_vnet_vnet_h
+
+#include <vppinfra/types.h>
+
+#include <vnet/unix/pcap.h>
+#include <vnet/buffer.h>
+#include <vnet/config.h>
+#include <vnet/interface.h>
+#include <vnet/rewrite.h>
+#include <vnet/api_errno.h>
+
+typedef struct vnet_main_t
+{
+ u32 local_interface_hw_if_index;
+ u32 local_interface_sw_if_index;
+
+ vnet_interface_main_t interface_main;
+
+ /* set up by constructors */
+ vnet_device_class_t *device_class_registrations;
+ vnet_hw_interface_class_t *hw_interface_class_registrations;
+ _vnet_interface_function_list_elt_t
+ * hw_interface_add_del_functions[VNET_ITF_FUNC_N_PRIO];
+ _vnet_interface_function_list_elt_t
+ * hw_interface_link_up_down_functions[VNET_ITF_FUNC_N_PRIO];
+ _vnet_interface_function_list_elt_t
+ * sw_interface_add_del_functions[VNET_ITF_FUNC_N_PRIO];
+ _vnet_interface_function_list_elt_t
+ * sw_interface_admin_up_down_functions[VNET_ITF_FUNC_N_PRIO];
+
+ uword *interface_tag_by_sw_if_index;
+
+ /*
+ * Last "api" error, preserved so we can issue reasonable diagnostics
+ * at or near the top of the food chain
+ */
+ vnet_api_error_t api_errno;
+
+ vlib_main_t *vlib_main;
+} vnet_main_t;
+
+vnet_main_t vnet_main;
+vnet_main_t **vnet_mains;
+
+#include <vnet/interface_funcs.h>
+#include <vnet/global_funcs.h>
+
+#endif /* included_vnet_vnet_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/vnet_all_api_h.h b/src/vnet/vnet_all_api_h.h
new file mode 100644
index 00000000000..1b4d6c45ef2
--- /dev/null
+++ b/src/vnet/vnet_all_api_h.h
@@ -0,0 +1,57 @@
+/*
+ *------------------------------------------------------------------
+ * vl_memory_api_h.h - memory API headers, in a specific order.
+ *
+ * Copyright (c) 2009-2010 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+
+/*
+ * Add to the bottom of the #include list, or elves will steal your
+ * keyboard in the middle of the night!
+ *
+ * Include current layer (2) last, or an artistic disagreement
+ * about message numbering will occur
+ */
+
+#ifndef included_from_layer_3
+#include <vlibmemory/vl_memory_api_h.h>
+#endif /* included_from_layer_3 */
+
+#include <vnet/devices/af_packet/af_packet.api.h>
+#include <vnet/devices/netmap/netmap.api.h>
+#include <vnet/devices/virtio/vhost_user.api.h>
+#include <vnet/gre/gre.api.h>
+#include <vnet/interface.api.h>
+#include <vnet/map/map.api.h>
+#include <vnet/l2/l2.api.h>
+#include <vnet/l2tp/l2tp.api.h>
+#include <vnet/span/span.api.h>
+#include <vnet/ip/ip.api.h>
+#include <vnet/unix/tap.api.h>
+#include <vnet/vxlan/vxlan.api.h>
+#include <vnet/vxlan-gpe/vxlan_gpe.api.h>
+#include <vnet/bfd/bfd.api.h>
+#include <vnet/ipsec/ipsec.api.h>
+#include <vnet/ipsec-gre/ipsec_gre.api.h>
+#include <vnet/lisp-cp/lisp.api.h>
+#include <vnet/lisp-gpe/lisp_gpe.api.h>
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/vnet_msg_enum.h b/src/vnet/vnet_msg_enum.h
new file mode 100644
index 00000000000..9899471ecd3
--- /dev/null
+++ b/src/vnet/vnet_msg_enum.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef included_vnet_msg_enum_h
+#define included_vnet_msg_enum_h
+
+#include <vppinfra/byte_order.h>
+
+#define vl_msg_id(n,h) n,
+typedef enum
+{
+ VL_ILLEGAL_MESSAGE_ID = 0,
+#include <vnet/vnet_all_api_h.h>
+ VL_MSG_FIRST_AVAILABLE,
+} vl_msg_id_t;
+#undef vl_msg_id
+
+#endif /* included_vnet_msg_enum_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/vxlan-gpe/decap.c b/src/vnet/vxlan-gpe/decap.c
new file mode 100644
index 00000000000..22ab4b62f66
--- /dev/null
+++ b/src/vnet/vxlan-gpe/decap.c
@@ -0,0 +1,733 @@
+/*
+ * decap.c - decapsulate VXLAN GPE
+ *
+ * Copyright (c) 2013 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * @file
+ * @brief Functions for decapsulating VXLAN GPE tunnels
+ *
+*/
+
+#include <vlib/vlib.h>
+#include <vnet/pg/pg.h>
+#include <vnet/vxlan-gpe/vxlan_gpe.h>
+
+vlib_node_registration_t vxlan_gpe_input_node;
+
+/**
+ * @brief Struct for VXLAN GPE decap packet tracing
+ *
+ */
+typedef struct {
+ u32 next_index;
+ u32 tunnel_index;
+ u32 error;
+} vxlan_gpe_rx_trace_t;
+
+/**
+ * @brief Tracing function for VXLAN GPE packet decapsulation
+ *
+ * @param *s
+ * @param *args
+ *
+ * @return *s
+ *
+ */
+static u8 * format_vxlan_gpe_rx_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ vxlan_gpe_rx_trace_t * t = va_arg (*args, vxlan_gpe_rx_trace_t *);
+
+ if (t->tunnel_index != ~0)
+ {
+ s = format (s, "VXLAN-GPE: tunnel %d next %d error %d", t->tunnel_index,
+ t->next_index, t->error);
+ }
+ else
+ {
+ s = format (s, "VXLAN-GPE: no tunnel next %d error %d\n", t->next_index,
+ t->error);
+ }
+ return s;
+}
+
+/**
+ * @brief Tracing function for VXLAN GPE packet decapsulation including length
+ *
+ * @param *s
+ * @param *args
+ *
+ * @return *s
+ *
+ */
+static u8 * format_vxlan_gpe_with_length (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+
+
+ return s;
+}
+
+/**
+ * @brief Common processing for IPv4 and IPv6 VXLAN GPE decap dispatch functions
+ *
+ * It is worth noting that other than trivial UDP forwarding (transit), VXLAN GPE
+ * tunnels are "terminate local". This means that there is no "TX" interface for this
+ * decap case, so that field in the buffer_metadata can be "used for something else".
+ * The something else in this case is, for the IPv4/IPv6 inner-packet type case, the
+ * FIB index used to look up the inner-packet's adjacency.
+ *
+ * vnet_buffer(b0)->sw_if_index[VLIB_TX] = t0->decap_fib_index;
+ *
+ * @param *vm
+ * @param *node
+ * @param *from_frame
+ * @param is_ip4
+ *
+ * @return from_frame->n_vectors
+ *
+ */
+always_inline uword
+vxlan_gpe_input (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame,
+ u8 is_ip4)
+{
+ u32 n_left_from, next_index, *from, *to_next;
+ vxlan_gpe_main_t * ngm = &vxlan_gpe_main;
+ vnet_main_t * vnm = ngm->vnet_main;
+ vnet_interface_main_t * im = &vnm->interface_main;
+ u32 last_tunnel_index = ~0;
+ vxlan4_gpe_tunnel_key_t last_key4;
+ vxlan6_gpe_tunnel_key_t last_key6;
+ u32 pkts_decapsulated = 0;
+ u32 cpu_index = os_get_cpu_number ();
+ u32 stats_sw_if_index, stats_n_packets, stats_n_bytes;
+
+ if (is_ip4)
+ memset (&last_key4, 0xff, sizeof(last_key4));
+ else
+ memset (&last_key6, 0xff, sizeof(last_key6));
+
+ from = vlib_frame_vector_args (from_frame);
+ n_left_from = from_frame->n_vectors;
+
+ next_index = node->cached_next_index;
+ stats_sw_if_index = node->runtime_data[0];
+ stats_n_packets = stats_n_bytes = 0;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame(vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ u32 bi0, bi1;
+ vlib_buffer_t * b0, *b1;
+ u32 next0, next1;
+ ip4_vxlan_gpe_header_t * iuvn4_0, *iuvn4_1;
+ ip6_vxlan_gpe_header_t * iuvn6_0, *iuvn6_1;
+ uword * p0, *p1;
+ u32 tunnel_index0, tunnel_index1;
+ vxlan_gpe_tunnel_t * t0, *t1;
+ vxlan4_gpe_tunnel_key_t key4_0, key4_1;
+ vxlan6_gpe_tunnel_key_t key6_0, key6_1;
+ u32 error0, error1;
+ u32 sw_if_index0, sw_if_index1, len0, len1;
+
+ /* Prefetch next iteration. */
+ {
+ vlib_buffer_t * p2, *p3;
+
+ p2 = vlib_get_buffer (vm, from[2]);
+ p3 = vlib_get_buffer (vm, from[3]);
+
+ vlib_prefetch_buffer_header(p2, LOAD);
+ vlib_prefetch_buffer_header(p3, LOAD);
+
+ CLIB_PREFETCH(p2->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
+ CLIB_PREFETCH(p3->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
+ }
+
+ bi0 = from[0];
+ bi1 = from[1];
+ to_next[0] = bi0;
+ to_next[1] = bi1;
+ from += 2;
+ to_next += 2;
+ n_left_to_next -= 2;
+ n_left_from -= 2;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ b1 = vlib_get_buffer (vm, bi1);
+
+ if (is_ip4)
+ {
+ /* udp leaves current_data pointing at the vxlan-gpe header */
+ vlib_buffer_advance (b0, -(word) (sizeof(udp_header_t) + sizeof(ip4_header_t)));
+ vlib_buffer_advance (b1, -(word) (sizeof(udp_header_t) + sizeof(ip4_header_t)));
+
+ iuvn4_0 = vlib_buffer_get_current (b0);
+ iuvn4_1 = vlib_buffer_get_current (b1);
+
+ /* pop (ip, udp, vxlan) */
+ vlib_buffer_advance (b0, sizeof(*iuvn4_0));
+ vlib_buffer_advance (b1, sizeof(*iuvn4_1));
+ }
+ else
+ {
+ /* udp leaves current_data pointing at the vxlan-gpe header */
+ vlib_buffer_advance (b0, -(word) (sizeof(udp_header_t) + sizeof(ip6_header_t)));
+ vlib_buffer_advance (b1, -(word) (sizeof(udp_header_t) + sizeof(ip6_header_t)));
+
+ iuvn6_0 = vlib_buffer_get_current (b0);
+ iuvn6_1 = vlib_buffer_get_current (b1);
+
+ /* pop (ip, udp, vxlan) */
+ vlib_buffer_advance (b0, sizeof(*iuvn6_0));
+ vlib_buffer_advance (b1, sizeof(*iuvn6_1));
+ }
+
+ tunnel_index0 = ~0;
+ tunnel_index1 = ~0;
+ error0 = 0;
+ error1 = 0;
+
+ if (is_ip4)
+ {
+ next0 =
+ (iuvn4_0->vxlan.protocol < VXLAN_GPE_PROTOCOL_MAX)?
+ ngm->decap_next_node_list[iuvn4_0->vxlan.protocol]: \
+ VXLAN_GPE_INPUT_NEXT_DROP;
+ next1 =
+ (iuvn4_1->vxlan.protocol < VXLAN_GPE_PROTOCOL_MAX)?
+ ngm->decap_next_node_list[iuvn4_1->vxlan.protocol]: \
+ VXLAN_GPE_INPUT_NEXT_DROP;
+
+ key4_0.local = iuvn4_0->ip4.dst_address.as_u32;
+ key4_1.local = iuvn4_1->ip4.dst_address.as_u32;
+
+ key4_0.remote = iuvn4_0->ip4.src_address.as_u32;
+ key4_1.remote = iuvn4_1->ip4.src_address.as_u32;
+
+ key4_0.vni = iuvn4_0->vxlan.vni_res;
+ key4_1.vni = iuvn4_1->vxlan.vni_res;
+
+ key4_0.pad = 0;
+ key4_1.pad = 0;
+ }
+ else /* is_ip6 */
+ {
+ next0 = (iuvn6_0->vxlan.protocol < node->n_next_nodes) ?
+ iuvn6_0->vxlan.protocol : VXLAN_GPE_INPUT_NEXT_DROP;
+ next1 = (iuvn6_1->vxlan.protocol < node->n_next_nodes) ?
+ iuvn6_1->vxlan.protocol : VXLAN_GPE_INPUT_NEXT_DROP;
+
+ key6_0.local.as_u64[0] = iuvn6_0->ip6.dst_address.as_u64[0];
+ key6_0.local.as_u64[1] = iuvn6_0->ip6.dst_address.as_u64[1];
+ key6_1.local.as_u64[0] = iuvn6_1->ip6.dst_address.as_u64[0];
+ key6_1.local.as_u64[1] = iuvn6_1->ip6.dst_address.as_u64[1];
+
+ key6_0.remote.as_u64[0] = iuvn6_0->ip6.src_address.as_u64[0];
+ key6_0.remote.as_u64[1] = iuvn6_0->ip6.src_address.as_u64[1];
+ key6_1.remote.as_u64[0] = iuvn6_1->ip6.src_address.as_u64[0];
+ key6_1.remote.as_u64[1] = iuvn6_1->ip6.src_address.as_u64[1];
+
+ key6_0.vni = iuvn6_0->vxlan.vni_res;
+ key6_1.vni = iuvn6_1->vxlan.vni_res;
+ }
+
+ /* Processing packet 0*/
+ if (is_ip4)
+ {
+ /* Processing for key4_0 */
+ if (PREDICT_FALSE((key4_0.as_u64[0] != last_key4.as_u64[0])
+ || (key4_0.as_u64[1] != last_key4.as_u64[1])))
+ {
+ p0 = hash_get_mem(ngm->vxlan4_gpe_tunnel_by_key, &key4_0);
+
+ if (p0 == 0)
+ {
+ error0 = VXLAN_GPE_ERROR_NO_SUCH_TUNNEL;
+ goto trace0;
+ }
+
+ last_key4.as_u64[0] = key4_0.as_u64[0];
+ last_key4.as_u64[1] = key4_0.as_u64[1];
+ tunnel_index0 = last_tunnel_index = p0[0];
+ }
+ else
+ tunnel_index0 = last_tunnel_index;
+ }
+ else /* is_ip6 */
+ {
+ next0 =
+ (iuvn6_0->vxlan.protocol < VXLAN_GPE_PROTOCOL_MAX)?
+ ngm->decap_next_node_list[iuvn6_0->vxlan.protocol]: \
+ VXLAN_GPE_INPUT_NEXT_DROP;
+ next1 =
+ (iuvn6_1->vxlan.protocol < VXLAN_GPE_PROTOCOL_MAX)?
+ ngm->decap_next_node_list[iuvn6_1->vxlan.protocol]: \
+ VXLAN_GPE_INPUT_NEXT_DROP;
+
+ key6_0.local.as_u64[0] = iuvn6_0->ip6.dst_address.as_u64[0];
+ key6_0.local.as_u64[1] = iuvn6_0->ip6.dst_address.as_u64[1];
+ key6_1.local.as_u64[0] = iuvn6_1->ip6.dst_address.as_u64[0];
+ key6_1.local.as_u64[1] = iuvn6_1->ip6.dst_address.as_u64[1];
+
+ key6_0.remote.as_u64[0] = iuvn6_0->ip6.src_address.as_u64[0];
+ key6_0.remote.as_u64[1] = iuvn6_0->ip6.src_address.as_u64[1];
+ key6_1.remote.as_u64[0] = iuvn6_1->ip6.src_address.as_u64[0];
+ key6_1.remote.as_u64[1] = iuvn6_1->ip6.src_address.as_u64[1];
+
+ key6_0.vni = iuvn6_0->vxlan.vni_res;
+ key6_1.vni = iuvn6_1->vxlan.vni_res;
+
+ /* Processing for key6_0 */
+ if (PREDICT_FALSE(memcmp (&key6_0, &last_key6, sizeof(last_key6)) != 0))
+ {
+ p0 = hash_get_mem(ngm->vxlan6_gpe_tunnel_by_key, &key6_0);
+
+ if (p0 == 0)
+ {
+ error0 = VXLAN_GPE_ERROR_NO_SUCH_TUNNEL;
+ goto trace0;
+ }
+
+ memcpy (&last_key6, &key6_0, sizeof(key6_0));
+ tunnel_index0 = last_tunnel_index = p0[0];
+ }
+ else
+ tunnel_index0 = last_tunnel_index;
+ }
+
+ t0 = pool_elt_at_index(ngm->tunnels, tunnel_index0);
+
+
+ sw_if_index0 = t0->sw_if_index;
+ len0 = vlib_buffer_length_in_chain (vm, b0);
+
+ /* Required to make the l2 tag push / pop code work on l2 subifs */
+ vnet_update_l2_len (b0);
+
+ /**
+ * ip[46] lookup in the configured FIB
+ */
+ vnet_buffer(b0)->sw_if_index[VLIB_TX] = t0->decap_fib_index;
+
+ pkts_decapsulated++;
+ stats_n_packets += 1;
+ stats_n_bytes += len0;
+
+ if (PREDICT_FALSE(sw_if_index0 != stats_sw_if_index))
+ {
+ stats_n_packets -= 1;
+ stats_n_bytes -= len0;
+ if (stats_n_packets)
+ vlib_increment_combined_counter (
+ im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX,
+ cpu_index, stats_sw_if_index, stats_n_packets, stats_n_bytes);
+ stats_n_packets = 1;
+ stats_n_bytes = len0;
+ stats_sw_if_index = sw_if_index0;
+ }
+
+ trace0: b0->error = error0 ? node->errors[error0] : 0;
+
+ if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ vxlan_gpe_rx_trace_t *tr = vlib_add_trace (vm, node, b0, sizeof(*tr));
+ tr->next_index = next0;
+ tr->error = error0;
+ tr->tunnel_index = tunnel_index0;
+ }
+
+ /* Process packet 1 */
+ if (is_ip4)
+ {
+ /* Processing for key4_1 */
+ if (PREDICT_FALSE(
+ (key4_1.as_u64[0] != last_key4.as_u64[0])
+ || (key4_1.as_u64[1] != last_key4.as_u64[1])))
+ {
+ p1 = hash_get_mem(ngm->vxlan4_gpe_tunnel_by_key, &key4_1);
+
+ if (p1 == 0)
+ {
+ error1 = VXLAN_GPE_ERROR_NO_SUCH_TUNNEL;
+ goto trace1;
+ }
+
+ last_key4.as_u64[0] = key4_1.as_u64[0];
+ last_key4.as_u64[1] = key4_1.as_u64[1];
+ tunnel_index1 = last_tunnel_index = p1[0];
+ }
+ else
+ tunnel_index1 = last_tunnel_index;
+ }
+ else /* is_ip6 */
+ {
+ /* Processing for key6_1 */
+ if (PREDICT_FALSE(memcmp (&key6_1, &last_key6, sizeof(last_key6)) != 0))
+ {
+ p1 = hash_get_mem(ngm->vxlan6_gpe_tunnel_by_key, &key6_1);
+
+ if (p1 == 0)
+ {
+ error1 = VXLAN_GPE_ERROR_NO_SUCH_TUNNEL;
+ goto trace1;
+ }
+
+ memcpy (&last_key6, &key6_1, sizeof(key6_1));
+ tunnel_index1 = last_tunnel_index = p1[0];
+ }
+ else
+ tunnel_index1 = last_tunnel_index;
+ }
+
+ t1 = pool_elt_at_index(ngm->tunnels, tunnel_index1);
+
+ sw_if_index1 = t1->sw_if_index;
+ len1 = vlib_buffer_length_in_chain (vm, b1);
+
+ /* Required to make the l2 tag push / pop code work on l2 subifs */
+ vnet_update_l2_len (b1);
+
+ /*
+ * ip[46] lookup in the configured FIB
+ */
+ vnet_buffer(b1)->sw_if_index[VLIB_TX] = t1->decap_fib_index;
+
+ pkts_decapsulated++;
+ stats_n_packets += 1;
+ stats_n_bytes += len1;
+
+ /* Batch stats increment on the same vxlan tunnel so counter
+ is not incremented per packet */
+ if (PREDICT_FALSE(sw_if_index1 != stats_sw_if_index))
+ {
+ stats_n_packets -= 1;
+ stats_n_bytes -= len1;
+ if (stats_n_packets)
+ vlib_increment_combined_counter (
+ im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX,
+ cpu_index, stats_sw_if_index, stats_n_packets, stats_n_bytes);
+ stats_n_packets = 1;
+ stats_n_bytes = len1;
+ stats_sw_if_index = sw_if_index1;
+ }
+ vnet_buffer(b1)->sw_if_index[VLIB_TX] = t1->decap_fib_index;
+
+ trace1: b1->error = error1 ? node->errors[error1] : 0;
+
+ if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ vxlan_gpe_rx_trace_t *tr = vlib_add_trace (vm, node, b1, sizeof(*tr));
+ tr->next_index = next1;
+ tr->error = error1;
+ tr->tunnel_index = tunnel_index1;
+ }
+
+ vlib_validate_buffer_enqueue_x2(vm, node, next_index, to_next,
+ n_left_to_next, bi0, bi1, next0, next1);
+ }
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 bi0;
+ vlib_buffer_t * b0;
+ u32 next0;
+ ip4_vxlan_gpe_header_t * iuvn4_0;
+ ip6_vxlan_gpe_header_t * iuvn6_0;
+ uword * p0;
+ u32 tunnel_index0;
+ vxlan_gpe_tunnel_t * t0;
+ vxlan4_gpe_tunnel_key_t key4_0;
+ vxlan6_gpe_tunnel_key_t key6_0;
+ u32 error0;
+ u32 sw_if_index0, len0;
+
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+
+ if (is_ip4)
+ {
+ /* udp leaves current_data pointing at the vxlan-gpe header */
+ vlib_buffer_advance (
+ b0, -(word) (sizeof(udp_header_t) + sizeof(ip4_header_t)));
+
+ iuvn4_0 = vlib_buffer_get_current (b0);
+
+ /* pop (ip, udp, vxlan) */
+ vlib_buffer_advance (b0, sizeof(*iuvn4_0));
+ }
+ else
+ {
+ /* udp leaves current_data pointing at the vxlan-gpe header */
+ vlib_buffer_advance (
+ b0, -(word) (sizeof(udp_header_t) + sizeof(ip6_header_t)));
+
+ iuvn6_0 = vlib_buffer_get_current (b0);
+
+ /* pop (ip, udp, vxlan) */
+ vlib_buffer_advance (b0, sizeof(*iuvn6_0));
+ }
+
+ tunnel_index0 = ~0;
+ error0 = 0;
+
+ if (is_ip4)
+ {
+ next0 =
+ (iuvn4_0->vxlan.protocol < VXLAN_GPE_PROTOCOL_MAX)?
+ ngm->decap_next_node_list[iuvn4_0->vxlan.protocol]: \
+ VXLAN_GPE_INPUT_NEXT_DROP;
+
+ key4_0.local = iuvn4_0->ip4.dst_address.as_u32;
+ key4_0.remote = iuvn4_0->ip4.src_address.as_u32;
+ key4_0.vni = iuvn4_0->vxlan.vni_res;
+ key4_0.pad = 0;
+
+ /* Processing for key4_0 */
+ if (PREDICT_FALSE(
+ (key4_0.as_u64[0] != last_key4.as_u64[0])
+ || (key4_0.as_u64[1] != last_key4.as_u64[1])))
+ {
+ p0 = hash_get_mem(ngm->vxlan4_gpe_tunnel_by_key, &key4_0);
+
+ if (p0 == 0)
+ {
+ error0 = VXLAN_GPE_ERROR_NO_SUCH_TUNNEL;
+ goto trace00;
+ }
+
+ last_key4.as_u64[0] = key4_0.as_u64[0];
+ last_key4.as_u64[1] = key4_0.as_u64[1];
+ tunnel_index0 = last_tunnel_index = p0[0];
+ }
+ else
+ tunnel_index0 = last_tunnel_index;
+ }
+ else /* is_ip6 */
+ {
+ next0 =
+ (iuvn6_0->vxlan.protocol < VXLAN_GPE_PROTOCOL_MAX)?
+ ngm->decap_next_node_list[iuvn6_0->vxlan.protocol]: \
+ VXLAN_GPE_INPUT_NEXT_DROP;
+
+ key6_0.local.as_u64[0] = iuvn6_0->ip6.dst_address.as_u64[0];
+ key6_0.local.as_u64[1] = iuvn6_0->ip6.dst_address.as_u64[1];
+ key6_0.remote.as_u64[0] = iuvn6_0->ip6.src_address.as_u64[0];
+ key6_0.remote.as_u64[1] = iuvn6_0->ip6.src_address.as_u64[1];
+ key6_0.vni = iuvn6_0->vxlan.vni_res;
+
+ /* Processing for key6_0 */
+ if (PREDICT_FALSE(memcmp (&key6_0, &last_key6, sizeof(last_key6)) != 0))
+ {
+ p0 = hash_get_mem(ngm->vxlan6_gpe_tunnel_by_key, &key6_0);
+
+ if (p0 == 0)
+ {
+ error0 = VXLAN_GPE_ERROR_NO_SUCH_TUNNEL;
+ goto trace00;
+ }
+
+ memcpy (&last_key6, &key6_0, sizeof(key6_0));
+ tunnel_index0 = last_tunnel_index = p0[0];
+ }
+ else
+ tunnel_index0 = last_tunnel_index;
+ }
+
+ t0 = pool_elt_at_index(ngm->tunnels, tunnel_index0);
+
+
+ sw_if_index0 = t0->sw_if_index;
+ len0 = vlib_buffer_length_in_chain (vm, b0);
+
+ /* Required to make the l2 tag push / pop code work on l2 subifs */
+ vnet_update_l2_len (b0);
+
+ /*
+ * ip[46] lookup in the configured FIB
+ */
+ vnet_buffer(b0)->sw_if_index[VLIB_TX] = t0->decap_fib_index;
+
+ pkts_decapsulated++;
+ stats_n_packets += 1;
+ stats_n_bytes += len0;
+
+ /* Batch stats increment on the same vxlan-gpe tunnel so counter
+ is not incremented per packet */
+ if (PREDICT_FALSE(sw_if_index0 != stats_sw_if_index))
+ {
+ stats_n_packets -= 1;
+ stats_n_bytes -= len0;
+ if (stats_n_packets)
+ vlib_increment_combined_counter (
+ im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX,
+ cpu_index, stats_sw_if_index, stats_n_packets, stats_n_bytes);
+ stats_n_packets = 1;
+ stats_n_bytes = len0;
+ stats_sw_if_index = sw_if_index0;
+ }
+
+ trace00: b0->error = error0 ? node->errors[error0] : 0;
+
+ if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ vxlan_gpe_rx_trace_t *tr = vlib_add_trace (vm, node, b0, sizeof(*tr));
+ tr->next_index = next0;
+ tr->error = error0;
+ tr->tunnel_index = tunnel_index0;
+ }
+ vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next,
+ n_left_to_next, bi0, next0);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+ vlib_node_increment_counter (vm, vxlan_gpe_input_node.index,
+ VXLAN_GPE_ERROR_DECAPSULATED, pkts_decapsulated);
+ /* Increment any remaining batch stats */
+ if (stats_n_packets)
+ {
+ vlib_increment_combined_counter (
+ im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX, cpu_index,
+ stats_sw_if_index, stats_n_packets, stats_n_bytes);
+ node->runtime_data[0] = stats_sw_if_index;
+ }
+ return from_frame->n_vectors;
+}
+
+/**
+ * @brief Graph processing dispatch function for IPv4 VXLAN GPE
+ *
+ * @node vxlan4-gpe-input
+ * @param *vm
+ * @param *node
+ * @param *from_frame
+ *
+ * @return from_frame->n_vectors
+ *
+ */
+static uword
+vxlan4_gpe_input (vlib_main_t * vm, vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame)
+{
+ return vxlan_gpe_input (vm, node, from_frame, /* is_ip4 */1);
+}
+
+
+void
+vxlan_gpe_register_decap_protocol (u8 protocol_id, uword next_node_index)
+{
+ vxlan_gpe_main_t *hm = &vxlan_gpe_main;
+ hm->decap_next_node_list[protocol_id] = next_node_index;
+ return;
+}
+
+void
+vxlan_gpe_unregister_decap_protocol (u8 protocol_id, uword next_node_index)
+{
+ vxlan_gpe_main_t *hm = &vxlan_gpe_main;
+ hm->decap_next_node_list[protocol_id] = VXLAN_GPE_INPUT_NEXT_DROP;
+ return;
+}
+
+
+/**
+ * @brief Graph processing dispatch function for IPv6 VXLAN GPE
+ *
+ * @node vxlan6-gpe-input
+ * @param *vm
+ * @param *node
+ * @param *from_frame
+ *
+ * @return from_frame->n_vectors - uword
+ *
+ */
+static uword
+vxlan6_gpe_input (vlib_main_t * vm, vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame)
+{
+ return vxlan_gpe_input (vm, node, from_frame, /* is_ip4 */0);
+}
+
+/**
+ * @brief VXLAN GPE error strings
+ */
+static char * vxlan_gpe_error_strings[] = {
+#define vxlan_gpe_error(n,s) s,
+#include <vnet/vxlan-gpe/vxlan_gpe_error.def>
+#undef vxlan_gpe_error
+#undef _
+};
+
+VLIB_REGISTER_NODE (vxlan4_gpe_input_node) = {
+ .function = vxlan4_gpe_input,
+ .name = "vxlan4-gpe-input",
+ /* Takes a vector of packets. */
+ .vector_size = sizeof (u32),
+ .type = VLIB_NODE_TYPE_INTERNAL,
+ .n_errors = ARRAY_LEN(vxlan_gpe_error_strings),
+ .error_strings = vxlan_gpe_error_strings,
+
+ .n_next_nodes = VXLAN_GPE_INPUT_N_NEXT,
+ .next_nodes = {
+#define _(s,n) [VXLAN_GPE_INPUT_NEXT_##s] = n,
+ foreach_vxlan_gpe_input_next
+#undef _
+ },
+
+ .format_buffer = format_vxlan_gpe_with_length,
+ .format_trace = format_vxlan_gpe_rx_trace,
+ // $$$$ .unformat_buffer = unformat_vxlan_gpe_header,
+};
+
+VLIB_NODE_FUNCTION_MULTIARCH (vxlan4_gpe_input_node, vxlan4_gpe_input);
+
+VLIB_REGISTER_NODE (vxlan6_gpe_input_node) = {
+ .function = vxlan6_gpe_input,
+ .name = "vxlan6-gpe-input",
+ /* Takes a vector of packets. */
+ .vector_size = sizeof (u32),
+ .type = VLIB_NODE_TYPE_INTERNAL,
+ .n_errors = ARRAY_LEN(vxlan_gpe_error_strings),
+ .error_strings = vxlan_gpe_error_strings,
+
+ .n_next_nodes = VXLAN_GPE_INPUT_N_NEXT,
+ .next_nodes = {
+#define _(s,n) [VXLAN_GPE_INPUT_NEXT_##s] = n,
+ foreach_vxlan_gpe_input_next
+#undef _
+ },
+
+ .format_buffer = format_vxlan_gpe_with_length,
+ .format_trace = format_vxlan_gpe_rx_trace,
+ // $$$$ .unformat_buffer = unformat_vxlan_gpe_header,
+};
+
+VLIB_NODE_FUNCTION_MULTIARCH (vxlan6_gpe_input_node, vxlan6_gpe_input);
diff --git a/src/vnet/vxlan-gpe/dir.dox b/src/vnet/vxlan-gpe/dir.dox
new file mode 100644
index 00000000000..c154733b21f
--- /dev/null
+++ b/src/vnet/vxlan-gpe/dir.dox
@@ -0,0 +1,32 @@
+/*
+ *
+ * Copyright (c) 2013 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ @dir
+ @brief VXLAN GPE
+
+ Based on IETF: draft-quinn-vxlan-gpe-03.txt
+
+Abstract
+
+ This draft describes extending Virtual eXtensible Local Area Network
+ (VXLAN), via changes to the VXLAN header, with three new
+ capabilities: support for multi-protocol encapsulation, operations,
+ administration and management (OAM) signaling and explicit
+ versioning.
+
+ See file: vxlan-gpe-rfc.txt
+
+*/ \ No newline at end of file
diff --git a/src/vnet/vxlan-gpe/encap.c b/src/vnet/vxlan-gpe/encap.c
new file mode 100644
index 00000000000..3a486e5606e
--- /dev/null
+++ b/src/vnet/vxlan-gpe/encap.c
@@ -0,0 +1,388 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * @file
+ * @brief Functions for encapsulating VXLAN GPE tunnels
+ *
+*/
+#include <vppinfra/error.h>
+#include <vppinfra/hash.h>
+#include <vnet/vnet.h>
+#include <vnet/ip/ip.h>
+#include <vnet/ethernet/ethernet.h>
+#include <vnet/vxlan-gpe/vxlan_gpe.h>
+
+/** Statistics (not really errors) */
+#define foreach_vxlan_gpe_encap_error \
+_(ENCAPSULATED, "good packets encapsulated")
+
+/**
+ * @brief VXLAN GPE encap error strings
+ */
+static char * vxlan_gpe_encap_error_strings[] = {
+#define _(sym,string) string,
+ foreach_vxlan_gpe_encap_error
+#undef _
+};
+
+/**
+ * @brief Struct for VXLAN GPE errors/counters
+ */
+typedef enum {
+#define _(sym,str) VXLAN_GPE_ENCAP_ERROR_##sym,
+ foreach_vxlan_gpe_encap_error
+#undef _
+ VXLAN_GPE_ENCAP_N_ERROR,
+} vxlan_gpe_encap_error_t;
+
+/**
+ * @brief Struct for tracing VXLAN GPE encapsulated packets
+ */
+typedef struct {
+ u32 tunnel_index;
+} vxlan_gpe_encap_trace_t;
+
+/**
+ * @brief Trace of packets encapsulated in VXLAN GPE
+ *
+ * @param *s
+ * @param *args
+ *
+ * @return *s
+ *
+ */
+u8 * format_vxlan_gpe_encap_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ vxlan_gpe_encap_trace_t * t
+ = va_arg (*args, vxlan_gpe_encap_trace_t *);
+
+ s = format (s, "VXLAN-GPE-ENCAP: tunnel %d", t->tunnel_index);
+ return s;
+}
+
+/**
+ * @brief Instantiates UDP + VXLAN-GPE header then set next node to IP4|6 lookup
+ *
+ * @param *ngm
+ * @param *b0
+ * @param *t0 contains rewrite header
+ * @param *next0 relative index of next dispatch function (next node)
+ * @param is_v4 Is this IPv4? (or IPv6)
+ *
+ */
+always_inline void
+vxlan_gpe_encap_one_inline (vxlan_gpe_main_t * ngm, vlib_buffer_t * b0,
+ vxlan_gpe_tunnel_t * t0, u32 * next0,
+ u8 is_v4)
+{
+ ASSERT(sizeof(ip4_vxlan_gpe_header_t) == 36);
+ ASSERT(sizeof(ip6_vxlan_gpe_header_t) == 56);
+
+ ip_udp_encap_one (ngm->vlib_main, b0, t0->rewrite, t0->rewrite_size, is_v4);
+ next0[0] = t0->encap_next_node;
+}
+
+/**
+ * @brief Instantiates UDP + VXLAN-GPE header then set next node to IP4|6 lookup for two packets
+ *
+ * @param *ngm
+ * @param *b0 Packet0
+ * @param *b1 Packet1
+ * @param *t0 contains rewrite header for Packet0
+ * @param *t1 contains rewrite header for Packet1
+ * @param *next0 relative index of next dispatch function (next node) for Packet0
+ * @param *next1 relative index of next dispatch function (next node) for Packet1
+ * @param is_v4 Is this IPv4? (or IPv6)
+ *
+ */
+always_inline void
+vxlan_gpe_encap_two_inline (vxlan_gpe_main_t * ngm, vlib_buffer_t * b0,
+ vlib_buffer_t * b1, vxlan_gpe_tunnel_t * t0,
+ vxlan_gpe_tunnel_t * t1, u32 * next0,
+ u32 * next1, u8 is_v4)
+{
+ ASSERT(sizeof(ip4_vxlan_gpe_header_t) == 36);
+ ASSERT(sizeof(ip6_vxlan_gpe_header_t) == 56);
+
+ ip_udp_encap_one (ngm->vlib_main, b0, t0->rewrite, t0->rewrite_size, is_v4);
+ ip_udp_encap_one (ngm->vlib_main, b1, t1->rewrite, t1->rewrite_size, is_v4);
+ next0[0] = next1[0] = t0->encap_next_node;
+}
+
+/**
+ * @brief Common processing for IPv4 and IPv6 VXLAN GPE encap dispatch functions
+ *
+ * It is worth noting that other than trivial UDP forwarding (transit), VXLAN GPE
+ * tunnels are "establish local". This means that we don't have a TX interface as yet
+ * as we need to look up where the outer-header dest is. By setting the TX index in the
+ * buffer metadata to the encap FIB, we can do a lookup to get the adjacency and real TX.
+ *
+ * vnet_buffer(b0)->sw_if_index[VLIB_TX] = t0->encap_fib_index;
+ *
+ * @node vxlan-gpe-input
+ * @param *vm
+ * @param *node
+ * @param *from_frame
+ *
+ * @return from_frame->n_vectors
+ *
+ */
+static uword
+vxlan_gpe_encap (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame)
+{
+ u32 n_left_from, next_index, *from, *to_next;
+ vxlan_gpe_main_t * ngm = &vxlan_gpe_main;
+ vnet_main_t * vnm = ngm->vnet_main;
+ vnet_interface_main_t * im = &vnm->interface_main;
+ u32 pkts_encapsulated = 0;
+ u32 cpu_index = os_get_cpu_number ();
+ u32 stats_sw_if_index, stats_n_packets, stats_n_bytes;
+
+ from = vlib_frame_vector_args (from_frame);
+ n_left_from = from_frame->n_vectors;
+
+ next_index = node->cached_next_index;
+ stats_sw_if_index = node->runtime_data[0];
+ stats_n_packets = stats_n_bytes = 0;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame(vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ u32 bi0, bi1;
+ vlib_buffer_t * b0, *b1;
+ u32 next0, next1;
+ u32 sw_if_index0, sw_if_index1, len0, len1;
+ vnet_hw_interface_t * hi0, *hi1;
+ vxlan_gpe_tunnel_t * t0, *t1;
+ u8 is_ip4_0, is_ip4_1;
+
+ next0 = next1 = VXLAN_GPE_ENCAP_NEXT_IP4_LOOKUP;
+
+ /* Prefetch next iteration. */
+ {
+ vlib_buffer_t * p2, *p3;
+
+ p2 = vlib_get_buffer (vm, from[2]);
+ p3 = vlib_get_buffer (vm, from[3]);
+
+ vlib_prefetch_buffer_header(p2, LOAD);
+ vlib_prefetch_buffer_header(p3, LOAD);
+
+ CLIB_PREFETCH(p2->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
+ CLIB_PREFETCH(p3->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
+ }
+
+ bi0 = from[0];
+ bi1 = from[1];
+ to_next[0] = bi0;
+ to_next[1] = bi1;
+ from += 2;
+ to_next += 2;
+ n_left_to_next -= 2;
+ n_left_from -= 2;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ b1 = vlib_get_buffer (vm, bi1);
+
+ /* 1-wide cache? */
+ sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_TX];
+ sw_if_index1 = vnet_buffer(b1)->sw_if_index[VLIB_TX];
+ hi0 = vnet_get_sup_hw_interface (vnm, vnet_buffer(b0)->sw_if_index[VLIB_TX]);
+ hi1 = vnet_get_sup_hw_interface (vnm, vnet_buffer(b1)->sw_if_index[VLIB_TX]);
+
+ t0 = pool_elt_at_index(ngm->tunnels, hi0->dev_instance);
+ t1 = pool_elt_at_index(ngm->tunnels, hi1->dev_instance);
+
+ is_ip4_0 = (t0->flags & VXLAN_GPE_TUNNEL_IS_IPV4);
+ is_ip4_1 = (t1->flags & VXLAN_GPE_TUNNEL_IS_IPV4);
+
+ if (PREDICT_TRUE(is_ip4_0 == is_ip4_1))
+ {
+ vxlan_gpe_encap_two_inline (ngm, b0, b1, t0, t1, &next0, &next1,is_ip4_0);
+ }
+ else
+ {
+ vxlan_gpe_encap_one_inline (ngm, b0, t0, &next0, is_ip4_0);
+ vxlan_gpe_encap_one_inline (ngm, b1, t1, &next1, is_ip4_1);
+ }
+
+ /* Reset to look up tunnel partner in the configured FIB */
+ vnet_buffer(b0)->sw_if_index[VLIB_TX] = t0->encap_fib_index;
+ vnet_buffer(b1)->sw_if_index[VLIB_TX] = t1->encap_fib_index;
+ vnet_buffer(b0)->sw_if_index[VLIB_RX] = sw_if_index0;
+ vnet_buffer(b1)->sw_if_index[VLIB_RX] = sw_if_index1;
+ pkts_encapsulated += 2;
+
+ len0 = vlib_buffer_length_in_chain (vm, b0);
+ len1 = vlib_buffer_length_in_chain (vm, b0);
+ stats_n_packets += 2;
+ stats_n_bytes += len0 + len1;
+
+ /* Batch stats increment on the same vxlan tunnel so counter is not
+ incremented per packet. Note stats are still incremented for deleted
+ and admin-down tunnel where packets are dropped. It is not worthwhile
+ to check for this rare case and affect normal path performance. */
+ if (PREDICT_FALSE((sw_if_index0 != stats_sw_if_index)
+ || (sw_if_index1 != stats_sw_if_index)))
+ {
+ stats_n_packets -= 2;
+ stats_n_bytes -= len0 + len1;
+ if (sw_if_index0 == sw_if_index1)
+ {
+ if (stats_n_packets)
+ vlib_increment_combined_counter (
+ im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
+ cpu_index, stats_sw_if_index, stats_n_packets, stats_n_bytes);
+ stats_sw_if_index = sw_if_index0;
+ stats_n_packets = 2;
+ stats_n_bytes = len0 + len1;
+ }
+ else
+ {
+ vlib_increment_combined_counter (
+ im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
+ cpu_index, sw_if_index0, 1, len0);
+ vlib_increment_combined_counter (
+ im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
+ cpu_index, sw_if_index1, 1, len1);
+ }
+ }
+
+ if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ vxlan_gpe_encap_trace_t *tr = vlib_add_trace (vm, node, b0, sizeof(*tr));
+ tr->tunnel_index = t0 - ngm->tunnels;
+ }
+
+ if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ vxlan_gpe_encap_trace_t *tr = vlib_add_trace (vm, node, b1,
+ sizeof(*tr));
+ tr->tunnel_index = t1 - ngm->tunnels;
+ }
+
+ vlib_validate_buffer_enqueue_x2(vm, node, next_index, to_next,
+ n_left_to_next, bi0, bi1, next0, next1);
+ }
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 bi0;
+ vlib_buffer_t * b0;
+ u32 next0 = VXLAN_GPE_ENCAP_NEXT_IP4_LOOKUP;
+ u32 sw_if_index0, len0;
+ vnet_hw_interface_t * hi0;
+ vxlan_gpe_tunnel_t * t0;
+ u8 is_ip4_0;
+
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+
+ /* 1-wide cache? */
+ sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_TX];
+ hi0 = vnet_get_sup_hw_interface (vnm, vnet_buffer(b0)->sw_if_index[VLIB_TX]);
+
+ t0 = pool_elt_at_index(ngm->tunnels, hi0->dev_instance);
+
+ is_ip4_0 = (t0->flags & VXLAN_GPE_TUNNEL_IS_IPV4);
+
+ vxlan_gpe_encap_one_inline (ngm, b0, t0, &next0, is_ip4_0);
+
+ /* Reset to look up tunnel partner in the configured FIB */
+ vnet_buffer(b0)->sw_if_index[VLIB_TX] = t0->encap_fib_index;
+ vnet_buffer(b0)->sw_if_index[VLIB_RX] = sw_if_index0;
+ pkts_encapsulated++;
+
+ len0 = vlib_buffer_length_in_chain (vm, b0);
+ stats_n_packets += 1;
+ stats_n_bytes += len0;
+
+ /* Batch stats increment on the same vxlan tunnel so counter is not
+ * incremented per packet. Note stats are still incremented for deleted
+ * and admin-down tunnel where packets are dropped. It is not worthwhile
+ * to check for this rare case and affect normal path performance. */
+ if (PREDICT_FALSE(sw_if_index0 != stats_sw_if_index))
+ {
+ stats_n_packets -= 1;
+ stats_n_bytes -= len0;
+ if (stats_n_packets)
+ vlib_increment_combined_counter (
+ im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
+ cpu_index, stats_sw_if_index, stats_n_packets, stats_n_bytes);
+ stats_n_packets = 1;
+ stats_n_bytes = len0;
+ stats_sw_if_index = sw_if_index0;
+ }
+ if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ vxlan_gpe_encap_trace_t *tr = vlib_add_trace (vm, node, b0,
+ sizeof(*tr));
+ tr->tunnel_index = t0 - ngm->tunnels;
+ }
+ vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next,
+ n_left_to_next, bi0, next0);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+ vlib_node_increment_counter (vm, node->node_index,
+ VXLAN_GPE_ENCAP_ERROR_ENCAPSULATED,
+ pkts_encapsulated);
+ /* Increment any remaining batch stats */
+ if (stats_n_packets)
+ {
+ vlib_increment_combined_counter (
+ im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX, cpu_index,
+ stats_sw_if_index, stats_n_packets, stats_n_bytes);
+ node->runtime_data[0] = stats_sw_if_index;
+ }
+
+ return from_frame->n_vectors;
+}
+
+VLIB_REGISTER_NODE (vxlan_gpe_encap_node) = {
+ .function = vxlan_gpe_encap,
+ .name = "vxlan-gpe-encap",
+ .vector_size = sizeof (u32),
+ .format_trace = format_vxlan_gpe_encap_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_errors = ARRAY_LEN(vxlan_gpe_encap_error_strings),
+ .error_strings = vxlan_gpe_encap_error_strings,
+
+ .n_next_nodes = VXLAN_GPE_ENCAP_N_NEXT,
+
+ .next_nodes = {
+ [VXLAN_GPE_ENCAP_NEXT_IP4_LOOKUP] = "ip4-lookup",
+ [VXLAN_GPE_ENCAP_NEXT_IP6_LOOKUP] = "ip6-lookup",
+ [VXLAN_GPE_ENCAP_NEXT_DROP] = "error-drop",
+ },
+};
+
diff --git a/src/vnet/vxlan-gpe/vxlan-gpe-rfc.txt b/src/vnet/vxlan-gpe/vxlan-gpe-rfc.txt
new file mode 100644
index 00000000000..35cee50f573
--- /dev/null
+++ b/src/vnet/vxlan-gpe/vxlan-gpe-rfc.txt
@@ -0,0 +1,868 @@
+Network Working Group P. Quinn
+Internet-Draft Cisco Systems, Inc.
+Intended status: Experimental P. Agarwal
+Expires: January 4, 2015 Broadcom
+ R. Fernando
+ L. Kreeger
+ D. Lewis
+ F. Maino
+ M. Smith
+ N. Yadav
+ Cisco Systems, Inc.
+ L. Yong
+ Huawei USA
+ X. Xu
+ Huawei Technologies
+ U. Elzur
+ Intel
+ P. Garg
+ Microsoft
+ July 3, 2014
+
+
+ Generic Protocol Extension for VXLAN
+ draft-quinn-vxlan-gpe-03.txt
+
+Abstract
+
+ This draft describes extending Virtual eXtensible Local Area Network
+ (VXLAN), via changes to the VXLAN header, with three new
+ capabilities: support for multi-protocol encapsulation, operations,
+ administration and management (OAM) signaling and explicit
+ versioning.
+
+Status of this Memo
+
+ This Internet-Draft is submitted in full conformance with the
+ provisions of BCP 78 and BCP 79.
+
+ Internet-Drafts are working documents of the Internet Engineering
+ Task Force (IETF). Note that other groups may also distribute
+ working documents as Internet-Drafts. The list of current Internet-
+ Drafts is at http://datatracker.ietf.org/drafts/current/.
+
+ Internet-Drafts are draft documents valid for a maximum of six months
+ and may be updated, replaced, or obsoleted by other documents at any
+ time. It is inappropriate to use Internet-Drafts as reference
+ material or to cite them other than as "work in progress."
+
+
+
+
+Quinn, et al. Expires January 4, 2015 [Page 1]
+
+Internet-Draft Generic Protocol Extension for VXLAN July 2014
+
+
+ This Internet-Draft will expire on January 4, 2015.
+
+Copyright Notice
+
+ Copyright (c) 2014 IETF Trust and the persons identified as the
+ document authors. All rights reserved.
+
+ This document is subject to BCP 78 and the IETF Trust's Legal
+ Provisions Relating to IETF Documents
+ (http://trustee.ietf.org/license-info) in effect on the date of
+ publication of this document. Please review these documents
+ carefully, as they describe your rights and restrictions with respect
+ to this document. Code Components extracted from this document must
+ include Simplified BSD License text as described in Section 4.e of
+ the Trust Legal Provisions and are provided without warranty as
+ described in the Simplified BSD License.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+Quinn, et al. Expires January 4, 2015 [Page 2]
+
+Internet-Draft Generic Protocol Extension for VXLAN July 2014
+
+
+Table of Contents
+
+ 1. Introduction . . . . . . . . . . . . . . . . . . . . . . . . . 4
+ 2. VXLAN Without Protocol Extension . . . . . . . . . . . . . . . 5
+ 3. Generic Protocol Extension VXLAN (VXLAN-gpe) . . . . . . . . . 6
+ 3.1. Multi Protocol Support . . . . . . . . . . . . . . . . . . 6
+ 3.2. OAM Support . . . . . . . . . . . . . . . . . . . . . . . 7
+ 3.3. Version Bits . . . . . . . . . . . . . . . . . . . . . . . 7
+ 4. Backward Compatibility . . . . . . . . . . . . . . . . . . . . 8
+ 4.1. VXLAN VTEP to VXLAN-gpe VTEP . . . . . . . . . . . . . . . 8
+ 4.2. VXLAN-gpe VTEP to VXLAN VTEP . . . . . . . . . . . . . . . 8
+ 4.3. VXLAN-gpe UDP Ports . . . . . . . . . . . . . . . . . . . 8
+ 4.4. VXLAN-gpe and Encapsulated IP Header Fields . . . . . . . 8
+ 5. VXLAN-gpe Examples . . . . . . . . . . . . . . . . . . . . . . 9
+ 6. Security Considerations . . . . . . . . . . . . . . . . . . . 11
+ 7. Acknowledgments . . . . . . . . . . . . . . . . . . . . . . . 12
+ 8. IANA Considerations . . . . . . . . . . . . . . . . . . . . . 13
+ 8.1. UDP Port . . . . . . . . . . . . . . . . . . . . . . . . . 13
+ 8.2. VXLAN-gpe Next Protocol . . . . . . . . . . . . . . . . . 13
+ 8.3. VXLAN-gpe Reserved Bits . . . . . . . . . . . . . . . . . 13
+ 9. References . . . . . . . . . . . . . . . . . . . . . . . . . . 14
+ 9.1. Normative References . . . . . . . . . . . . . . . . . . . 14
+ 9.2. Informative References . . . . . . . . . . . . . . . . . . 14
+ Authors' Addresses . . . . . . . . . . . . . . . . . . . . . . . . 15
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+Quinn, et al. Expires January 4, 2015 [Page 3]
+
+Internet-Draft Generic Protocol Extension for VXLAN July 2014
+
+
+1. Introduction
+
+ Virtual eXtensible Local Area Network [VXLAN] defines an
+ encapsulation format that encapsulates Ethernet frames in an outer
+ UDP/IP transport. As data centers evolve, the need to carry other
+ protocols encapsulated in an IP packet is required, as well as the
+ need to provide increased visibility and diagnostic capabilities
+ within the overlay. The VXLAN header does not specify the protocol
+ being encapsulated and therefore is currently limited to
+ encapsulating only Ethernet frame payload, nor does it provide the
+ ability to define OAM protocols. Rather than defining yet another
+ encapsulation, VXLAN is extended to provide protocol typing and OAM
+ capabilities.
+
+ This document describes extending VXLAN via the following changes:
+
+ Next Protocol Bit (P bit): A reserved flag bit is allocated, and set
+ in the VXLAN-gpe header to indicate that a next protocol field is
+ present.
+
+ OAM Flag Bit (O bit): A reserved flag bit is allocated, and set in
+ the VXLAN-gpe header, to indicate that the packet is an OAM
+ packet.
+
+ Version: Two reserved bits are allocated, and set in the VXLAN-gpe
+ header, to indicate VXLAN-gpe protocol version.
+
+ Next Protocol: A 8 bit next protocol field is present in the VXLAN-
+ gpe header.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+Quinn, et al. Expires January 4, 2015 [Page 4]
+
+Internet-Draft Generic Protocol Extension for VXLAN July 2014
+
+
+2. VXLAN Without Protocol Extension
+
+ As described in the introduction, the VXLAN header has no protocol
+ identifier that indicates the type of payload being carried by VXLAN.
+ Because of this, VXLAN is limited to an Ethernet payload.
+ Furthermore, the VXLAN header has no mechanism to signal OAM packets.
+
+ The VXLAN header defines bits 0-7 as flags (some defined, some
+ reserved), the VXLAN network identifier (VNI) field and several
+ reserved bits. The flags provide flexibility to define how the
+ reserved bits can be used to change the definition of the VXLAN
+ header.
+
+
+
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ |R|R|R|R|I|R|R|R| Reserved |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | VXLAN Network Identifier (VNI) | Reserved |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+
+ Figure 1: VXLAN Header
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+Quinn, et al. Expires January 4, 2015 [Page 5]
+
+Internet-Draft Generic Protocol Extension for VXLAN July 2014
+
+
+3. Generic Protocol Extension VXLAN (VXLAN-gpe)
+
+3.1. Multi Protocol Support
+
+ This draft defines the following two changes to the VXLAN header in
+ order to support multi-protocol encapsulation:
+
+ P Bit: Flag bit 5 is defined as the Next Protocol bit. The P bit
+ MUST be set to 1 to indicate the presence of the 8 bit next
+ protocol field.
+
+ P = 0 indicates that the payload MUST conform to VXLAN as defined
+ in [VXLAN].
+
+ Flag bit 5 was chosen as the P bit because this flag bit is
+ currently reserved in VXLAN.
+
+ Next Protocol Field: The lower 8 bits of the first word are used to
+ carry a next protocol. This next protocol field contains the
+ protocol of the encapsulated payload packet. A new protocol
+ registry will be requested from IANA.
+
+ This draft defines the following Next Protocol values:
+
+ 0x1 : IPv4
+ 0x2 : IPv6
+ 0x3 : Ethernet
+ 0x4 : Network Service Header [NSH]
+
+
+
+
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ |R|R|R|R|I|P|R|R| Reserved |Next Protocol |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | VXLAN Network Identifier (VNI) | Reserved |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+
+
+ Figure 2: VXLAN-gpe Next Protocol
+
+
+
+
+
+
+
+
+Quinn, et al. Expires January 4, 2015 [Page 6]
+
+Internet-Draft Generic Protocol Extension for VXLAN July 2014
+
+
+3.2. OAM Support
+
+ Flag bit 7 is defined as the O bit. When the O bit is set to 1, the
+ packet is an OAM packet and OAM processing MUST occur. The OAM
+ protocol details are out of scope for this document. As with the
+ P-bit, bit 7 is currently a reserved flag in VXLAN.
+
+
+
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ |R|R|R|R|I|P|R|O| Reserved |Next Protocol |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | VXLAN Network Identifier (VNI) | Reserved |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+
+
+ Figure 3: VXLAN-gpe OAM Bit
+
+3.3. Version Bits
+
+ VXLAN-gpe bits 8 and 9 are defined as version bits. These bits are
+ reserved in VXLAN. The version field is used to ensure backward
+ compatibility going forward with future VXLAN-gpe updates.
+
+ The initial version for VXLAN-gpe is 0.
+
+
+
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ |R|R|R|R|I|P|R|O|Ver| Reserved |Next Protocol |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | VXLAN Network Identifier (VNI) | Reserved |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+
+
+
+
+ Figure 4: VXLAN-gpe Version Bits
+
+
+
+
+
+
+
+Quinn, et al. Expires January 4, 2015 [Page 7]
+
+Internet-Draft Generic Protocol Extension for VXLAN July 2014
+
+
+4. Backward Compatibility
+
+4.1. VXLAN VTEP to VXLAN-gpe VTEP
+
+ As per VXLAN, reserved bits 5 and 7, VXLAN-gpe P and O-bits
+ respectively must be set to zero. The remaining reserved bits must
+ be zero, including the VXLAN-gpe version field, bits 8 and 9. The
+ encapsulated payload MUST be Ethernet.
+
+4.2. VXLAN-gpe VTEP to VXLAN VTEP
+
+ A VXLAN-gpe VTEP MUST NOT encapsulate non-Ethernet frames to a VXLAN
+ VTEP. When encapsulating Ethernet frames to a VXLAN VTEP, the VXLAN-
+ gpe VTEP will set the P bit to 0, the Next Protocol to 0 and use UDP
+ destination port 4789. A VXLAN-gpe VTEP MUST also set O = 0 and Ver
+ = 0 when encapsulating Ethernet frames to VXLAN VTEP. The receiving
+ VXLAN VTEP will threat this packet as a VXLAN packet.
+
+ A method for determining the capabilities of a VXLAN VTEP (gpe or
+ non-gpe) is out of the scope of this draft.
+
+4.3. VXLAN-gpe UDP Ports
+
+ VXLAN-gpe uses a new UDP destination port (to be assigned by IANA)
+ when sending traffic to VXLAN-gpe VTEPs.
+
+4.4. VXLAN-gpe and Encapsulated IP Header Fields
+
+ When encapsulating and decapsulating IPv4 and IPv6 packets, certain
+ fields, such as IPv4 Time to Live (TTL) from the inner IP header need
+ to be considered. VXLAN-gpe IP encapsulation and decapsulation
+ utilizes the techniques described in [RFC6830], section 5.3.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+Quinn, et al. Expires January 4, 2015 [Page 8]
+
+Internet-Draft Generic Protocol Extension for VXLAN July 2014
+
+
+5. VXLAN-gpe Examples
+
+ This section provides three examples of protocols encapsulated using
+ the Generic Protocol Extension for VXLAN described in this document.
+
+
+
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ |R|R|R|R|I|1|R|0|0|0| Reserved | NP = IPv4 |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | VXLAN Network Identifier (VNI) | Reserved |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Original IPv4 Packet |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+
+
+ Figure 5: IPv4 and VXLAN-gpe
+
+
+
+
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ |R|R|R|R|I|1|R|0|0|0| Reserved | NP = IPv6 |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | VXLAN Network Identifier (VNI) | Reserved |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Original IPv6 Packet |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+
+
+ Figure 6: IPv6 and VXLAN-gpe
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+Quinn, et al. Expires January 4, 2015 [Page 9]
+
+Internet-Draft Generic Protocol Extension for VXLAN July 2014
+
+
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ |R|R|R|R|I|1|R|0|0|0| Reserved |NP = Ethernet |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | VXLAN Network Identifier (VNI) | Reserved |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Original Ethernet Frame |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+
+
+ Figure 7: Ethernet and VXLAN-gpe
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+Quinn, et al. Expires January 4, 2015 [Page 10]
+
+Internet-Draft Generic Protocol Extension for VXLAN July 2014
+
+
+6. Security Considerations
+
+ VXLAN's security is focused on issues around L2 encapsulation into
+ L3. With VXLAN-gpe, issues such as spoofing, flooding, and traffic
+ redirection are dependent on the particular protocol payload
+ encapsulated.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+Quinn, et al. Expires January 4, 2015 [Page 11]
+
+Internet-Draft Generic Protocol Extension for VXLAN July 2014
+
+
+7. Acknowledgments
+
+ A special thank you goes to Dino Farinacci for his guidance and
+ detailed review.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+Quinn, et al. Expires January 4, 2015 [Page 12]
+
+Internet-Draft Generic Protocol Extension for VXLAN July 2014
+
+
+8. IANA Considerations
+
+8.1. UDP Port
+
+ A new UDP port will be requested from IANA.
+
+8.2. VXLAN-gpe Next Protocol
+
+ IANA is requested to set up a registry of "Next Protocol". These are
+ 8-bit values. Next Protocol values 0, 1, 2, 3 and 4 are defined in
+ this draft. New values are assigned via Standards Action [RFC5226].
+
+ +---------------+-------------+---------------+
+ | Next Protocol | Description | Reference |
+ +---------------+-------------+---------------+
+ | 0 | Reserved | This document |
+ | | | |
+ | 1 | IPv4 | This document |
+ | | | |
+ | 2 | IPv6 | This document |
+ | | | |
+ | 3 | Ethernet | This document |
+ | | | |
+ | 4 | NSH | This document |
+ | | | |
+ | 5..253 | Unassigned | |
+ +---------------+-------------+---------------+
+
+ Table 1
+
+8.3. VXLAN-gpe Reserved Bits
+
+ There are ten bits at the beginning of the VXLAN-gpe header. New
+ bits are assigned via Standards Action [RFC5226].
+
+ Bits 0-3 - Reserved
+ Bit 4 - Instance ID (I bit)
+ Bit 5 - Next Protocol (P bit)
+ Bit 6 - Reserved
+ Bit 7 - OAM (O bit)
+ Bits 8-9 - Version
+
+
+
+
+
+
+
+
+
+
+Quinn, et al. Expires January 4, 2015 [Page 13]
+
+Internet-Draft Generic Protocol Extension for VXLAN July 2014
+
+
+9. References
+
+9.1. Normative References
+
+ [RFC0768] Postel, J., "User Datagram Protocol", STD 6, RFC 768,
+ August 1980.
+
+ [RFC0791] Postel, J., "Internet Protocol", STD 5, RFC 791,
+ September 1981.
+
+ [RFC2119] Bradner, S., "Key words for use in RFCs to Indicate
+ Requirement Levels", BCP 14, RFC 2119, March 1997.
+
+ [RFC5226] Narten, T. and H. Alvestrand, "Guidelines for Writing an
+ IANA Considerations Section in RFCs", BCP 26, RFC 5226,
+ May 2008.
+
+9.2. Informative References
+
+ [NSH] Quinn, P. and et al. , "Network Service Header", 2014.
+
+ [RFC1700] Reynolds, J. and J. Postel, "Assigned Numbers", RFC 1700,
+ October 1994.
+
+ [RFC6830] Farinacci, D., Fuller, V., Meyer, D., and D. Lewis, "The
+ Locator/ID Separation Protocol (LISP)", RFC 6830,
+ January 2013.
+
+ [VXLAN] Dutt, D., Mahalingam, M., Duda, K., Agarwal, P., Kreeger,
+ L., Sridhar, T., Bursell, M., and C. Wright, "VXLAN: A
+ Framework for Overlaying Virtualized Layer 2 Networks over
+ Layer 3 Networks", 2013.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+Quinn, et al. Expires January 4, 2015 [Page 14]
+
+Internet-Draft Generic Protocol Extension for VXLAN July 2014
+
+
+Authors' Addresses
+
+ Paul Quinn
+ Cisco Systems, Inc.
+
+ Email: paulq@cisco.com
+
+
+ Puneet Agarwal
+ Broadcom
+
+ Email: pagarwal@broadcom.com
+
+
+ Rex Fernando
+ Cisco Systems, Inc.
+
+ Email: rex@cisco.com
+
+
+ Larry Kreeger
+ Cisco Systems, Inc.
+
+ Email: kreeger@cisco.com
+
+
+ Darrel Lewis
+ Cisco Systems, Inc.
+
+ Email: darlewis@cisco.com
+
+
+ Fabio Maino
+ Cisco Systems, Inc.
+
+ Email: kreeger@cisco.com
+
+
+ Michael Smith
+ Cisco Systems, Inc.
+
+ Email: michsmit@cisco.com
+
+
+
+
+
+
+
+
+
+Quinn, et al. Expires January 4, 2015 [Page 15]
+
+Internet-Draft Generic Protocol Extension for VXLAN July 2014
+
+
+ Navindra Yadav
+ Cisco Systems, Inc.
+
+ Email: nyadav@cisco.com
+
+
+ Lucy Yong
+ Huawei USA
+
+ Email: lucy.yong@huawei.com
+
+
+ Xiaohu Xu
+ Huawei Technologies
+
+ Email: xuxiaohu@huawei.com
+
+
+ Uri Elzur
+ Intel
+
+ Email: uri.elzur@intel.com
+
+
+ Pankaj Garg
+ Microsoft
+
+ Email: Garg.Pankaj@microsoft.com
diff --git a/src/vnet/vxlan-gpe/vxlan_gpe.api b/src/vnet/vxlan-gpe/vxlan_gpe.api
new file mode 100644
index 00000000000..6c6973f8384
--- /dev/null
+++ b/src/vnet/vxlan-gpe/vxlan_gpe.api
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2015-2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+define vxlan_gpe_add_del_tunnel
+{
+ u32 client_index;
+ u32 context;
+ u8 is_ipv6;
+ u8 local[16];
+ u8 remote[16];
+ u32 encap_vrf_id;
+ u32 decap_vrf_id;
+ u8 protocol;
+ u32 vni;
+ u8 is_add;
+};
+
+define vxlan_gpe_add_del_tunnel_reply
+{
+ u32 context;
+ i32 retval;
+ u32 sw_if_index;
+};
+
+define vxlan_gpe_tunnel_dump
+{
+ u32 client_index;
+ u32 context;
+ u32 sw_if_index;
+};
+
+define vxlan_gpe_tunnel_details
+{
+ u32 context;
+ u32 sw_if_index;
+ u8 local[16];
+ u8 remote[16];
+ u32 vni;
+ u8 protocol;
+ u32 encap_vrf_id;
+ u32 decap_vrf_id;
+ u8 is_ipv6;
+};
+
+/*
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */ \ No newline at end of file
diff --git a/src/vnet/vxlan-gpe/vxlan_gpe.c b/src/vnet/vxlan-gpe/vxlan_gpe.c
new file mode 100644
index 00000000000..b97510c4ee3
--- /dev/null
+++ b/src/vnet/vxlan-gpe/vxlan_gpe.c
@@ -0,0 +1,659 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * @file
+ * @brief Common utility functions for IPv4 and IPv6 VXLAN GPE tunnels
+ *
+*/
+#include <vnet/vxlan-gpe/vxlan_gpe.h>
+#include <vnet/fib/fib.h>
+#include <vnet/ip/format.h>
+
+vxlan_gpe_main_t vxlan_gpe_main;
+
+/**
+ * @brief Tracing function for VXLAN GPE tunnel packets
+ *
+ * @param *s formatting string
+ * @param *args
+ *
+ * @return *s formatted string
+ *
+ */
+u8 * format_vxlan_gpe_tunnel (u8 * s, va_list * args)
+{
+ vxlan_gpe_tunnel_t * t = va_arg (*args, vxlan_gpe_tunnel_t *);
+ vxlan_gpe_main_t * gm = &vxlan_gpe_main;
+
+ s = format (s, "[%d] local: %U remote: %U ",
+ t - gm->tunnels,
+ format_ip46_address, &t->local, IP46_TYPE_ANY,
+ format_ip46_address, &t->remote, IP46_TYPE_ANY);
+
+ s = format (s, " vxlan VNI %d ", t->vni);
+
+ switch (t->protocol)
+ {
+ case VXLAN_GPE_PROTOCOL_IP4:
+ s = format (s, "next-protocol ip4");
+ break;
+ case VXLAN_GPE_PROTOCOL_IP6:
+ s = format (s, "next-protocol ip6");
+ break;
+ case VXLAN_GPE_PROTOCOL_ETHERNET:
+ s = format (s, "next-protocol ethernet");
+ break;
+ case VXLAN_GPE_PROTOCOL_NSH:
+ s = format (s, "next-protocol nsh");
+ break;
+ default:
+ s = format (s, "next-protocol unknown %d", t->protocol);
+ }
+
+ s = format (s, " fibs: (encap %d, decap %d)",
+ t->encap_fib_index,
+ t->decap_fib_index);
+
+ return s;
+}
+
+/**
+ * @brief Naming for VXLAN GPE tunnel
+ *
+ * @param *s formatting string
+ * @param *args
+ *
+ * @return *s formatted string
+ *
+ */
+static u8 * format_vxlan_gpe_name (u8 * s, va_list * args)
+{
+ u32 dev_instance = va_arg (*args, u32);
+ return format (s, "vxlan_gpe_tunnel%d", dev_instance);
+}
+
+static uword dummy_interface_tx (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ clib_warning ("you shouldn't be here, leaking buffers...");
+ return frame->n_vectors;
+}
+
+/**
+ * @brief CLI function for VXLAN GPE admin up/down
+ *
+ * @param *vnm
+ * @param hw_if_index
+ * @param flag
+ *
+ * @return *rc
+ *
+ */
+static clib_error_t *
+vxlan_gpe_interface_admin_up_down (vnet_main_t * vnm, u32 hw_if_index, u32 flags)
+{
+ if (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP)
+ vnet_hw_interface_set_flags (vnm, hw_if_index, VNET_HW_INTERFACE_FLAG_LINK_UP);
+ else
+ vnet_hw_interface_set_flags (vnm, hw_if_index, 0);
+
+ return 0;
+}
+
+VNET_DEVICE_CLASS (vxlan_gpe_device_class,static) = {
+ .name = "VXLAN_GPE",
+ .format_device_name = format_vxlan_gpe_name,
+ .format_tx_trace = format_vxlan_gpe_encap_trace,
+ .tx_function = dummy_interface_tx,
+ .admin_up_down_function = vxlan_gpe_interface_admin_up_down,
+};
+
+
+/**
+ * @brief Formatting function for tracing VXLAN GPE with length
+ *
+ * @param *s
+ * @param *args
+ *
+ * @return *s
+ *
+ */
+static u8 * format_vxlan_gpe_header_with_length (u8 * s, va_list * args)
+{
+ u32 dev_instance = va_arg (*args, u32);
+ s = format (s, "unimplemented dev %u", dev_instance);
+ return s;
+}
+
+VNET_HW_INTERFACE_CLASS (vxlan_gpe_hw_class) = {
+ .name = "VXLAN_GPE",
+ .format_header = format_vxlan_gpe_header_with_length,
+ .build_rewrite = default_build_rewrite,
+ .flags = VNET_HW_INTERFACE_CLASS_FLAG_P2P,
+};
+
+
+#define foreach_gpe_copy_field \
+_(vni) \
+_(protocol) \
+_(encap_fib_index) \
+_(decap_fib_index)
+
+#define foreach_copy_ipv4 { \
+ _(local.ip4.as_u32) \
+ _(remote.ip4.as_u32) \
+}
+
+#define foreach_copy_ipv6 { \
+ _(local.ip6.as_u64[0]) \
+ _(local.ip6.as_u64[1]) \
+ _(remote.ip6.as_u64[0]) \
+ _(remote.ip6.as_u64[1]) \
+}
+
+
+/**
+ * @brief Calculate IPv4 VXLAN GPE rewrite header
+ *
+ * @param *t
+ *
+ * @return rc
+ *
+ */
+int vxlan4_gpe_rewrite (vxlan_gpe_tunnel_t * t, u32 extension_size,
+ u8 protocol_override, uword encap_next_node)
+{
+ u8 *rw = 0;
+ ip4_header_t * ip0;
+ ip4_vxlan_gpe_header_t * h0;
+ int len;
+
+ len = sizeof (*h0) + extension_size;
+
+ vec_free(t->rewrite);
+ vec_validate_aligned (rw, len-1, CLIB_CACHE_LINE_BYTES);
+
+ h0 = (ip4_vxlan_gpe_header_t *) rw;
+
+ /* Fixed portion of the (outer) ip4 header */
+ ip0 = &h0->ip4;
+ ip0->ip_version_and_header_length = 0x45;
+ ip0->ttl = 254;
+ ip0->protocol = IP_PROTOCOL_UDP;
+
+ /* we fix up the ip4 header length and checksum after-the-fact */
+ ip0->src_address.as_u32 = t->local.ip4.as_u32;
+ ip0->dst_address.as_u32 = t->remote.ip4.as_u32;
+ ip0->checksum = ip4_header_checksum (ip0);
+
+ /* UDP header, randomize src port on something, maybe? */
+ h0->udp.src_port = clib_host_to_net_u16 (4790);
+ h0->udp.dst_port = clib_host_to_net_u16 (UDP_DST_PORT_vxlan_gpe);
+
+ /* VXLAN header. Are we having fun yet? */
+ h0->vxlan.flags = VXLAN_GPE_FLAGS_I | VXLAN_GPE_FLAGS_P;
+ h0->vxlan.ver_res = VXLAN_GPE_VERSION;
+ if (protocol_override)
+ {
+ h0->vxlan.protocol = protocol_override;
+ }
+ else
+ {
+ h0->vxlan.protocol = t->protocol;
+ }
+ t->rewrite_size = sizeof(ip4_vxlan_gpe_header_t) + extension_size;
+ h0->vxlan.vni_res = clib_host_to_net_u32 (t->vni<<8);
+
+ t->rewrite = rw;
+ t->encap_next_node = encap_next_node;
+ return (0);
+}
+
+/**
+ * @brief Calculate IPv6 VXLAN GPE rewrite header
+ *
+ * @param *t
+ *
+ * @return rc
+ *
+ */
+int vxlan6_gpe_rewrite (vxlan_gpe_tunnel_t * t, u32 extension_size,
+ u8 protocol_override, uword encap_next_node)
+{
+ u8 *rw = 0;
+ ip6_header_t * ip0;
+ ip6_vxlan_gpe_header_t * h0;
+ int len;
+
+ len = sizeof (*h0) + extension_size;
+
+ vec_free(t->rewrite);
+ vec_validate_aligned (rw, len-1, CLIB_CACHE_LINE_BYTES);
+
+ h0 = (ip6_vxlan_gpe_header_t *) rw;
+
+ /* Fixed portion of the (outer) ip4 header */
+ ip0 = &h0->ip6;
+ ip0->ip_version_traffic_class_and_flow_label = clib_host_to_net_u32(6 << 28);
+ ip0->hop_limit = 255;
+ ip0->protocol = IP_PROTOCOL_UDP;
+
+ ip0->src_address.as_u64[0] = t->local.ip6.as_u64[0];
+ ip0->src_address.as_u64[1] = t->local.ip6.as_u64[1];
+ ip0->dst_address.as_u64[0] = t->remote.ip6.as_u64[0];
+ ip0->dst_address.as_u64[1] = t->remote.ip6.as_u64[1];
+
+ /* UDP header, randomize src port on something, maybe? */
+ h0->udp.src_port = clib_host_to_net_u16 (4790);
+ h0->udp.dst_port = clib_host_to_net_u16 (UDP_DST_PORT_vxlan_gpe);
+
+ /* VXLAN header. Are we having fun yet? */
+ h0->vxlan.flags = VXLAN_GPE_FLAGS_I | VXLAN_GPE_FLAGS_P;
+ h0->vxlan.ver_res = VXLAN_GPE_VERSION;
+ if (protocol_override)
+ {
+ h0->vxlan.protocol = t->protocol;
+ }
+ else
+ {
+ h0->vxlan.protocol = protocol_override;
+ }
+ t->rewrite_size = sizeof(ip4_vxlan_gpe_header_t) + extension_size;
+ h0->vxlan.vni_res = clib_host_to_net_u32 (t->vni<<8);
+
+ t->rewrite = rw;
+ t->encap_next_node = encap_next_node;
+ return (0);
+}
+
+/**
+ * @brief Add or Del a VXLAN GPE tunnel
+ *
+ * @param *a
+ * @param *sw_if_index
+ *
+ * @return rc
+ *
+ */
+int vnet_vxlan_gpe_add_del_tunnel
+(vnet_vxlan_gpe_add_del_tunnel_args_t *a, u32 * sw_if_indexp)
+{
+ vxlan_gpe_main_t * gm = &vxlan_gpe_main;
+ vxlan_gpe_tunnel_t *t = 0;
+ vnet_main_t * vnm = gm->vnet_main;
+ vnet_hw_interface_t * hi;
+ uword * p;
+ u32 hw_if_index = ~0;
+ u32 sw_if_index = ~0;
+ int rv;
+ vxlan4_gpe_tunnel_key_t key4, *key4_copy;
+ vxlan6_gpe_tunnel_key_t key6, *key6_copy;
+ hash_pair_t *hp;
+
+ if (!a->is_ip6)
+ {
+ key4.local = a->local.ip4.as_u32;
+ key4.remote = a->remote.ip4.as_u32;
+ key4.vni = clib_host_to_net_u32 (a->vni << 8);
+ key4.pad = 0;
+
+ p = hash_get_mem(gm->vxlan4_gpe_tunnel_by_key, &key4);
+ }
+ else
+ {
+ key6.local.as_u64[0] = a->local.ip6.as_u64[0];
+ key6.local.as_u64[1] = a->local.ip6.as_u64[1];
+ key6.remote.as_u64[0] = a->remote.ip6.as_u64[0];
+ key6.remote.as_u64[1] = a->remote.ip6.as_u64[1];
+ key6.vni = clib_host_to_net_u32 (a->vni << 8);
+
+ p = hash_get_mem(gm->vxlan6_gpe_tunnel_by_key, &key6);
+ }
+
+ if (a->is_add)
+ {
+ /* adding a tunnel: tunnel must not already exist */
+ if (p)
+ return VNET_API_ERROR_INVALID_VALUE;
+
+ pool_get_aligned (gm->tunnels, t, CLIB_CACHE_LINE_BYTES);
+ memset (t, 0, sizeof (*t));
+
+ /* copy from arg structure */
+#define _(x) t->x = a->x;
+ foreach_gpe_copy_field;
+ if (!a->is_ip6) foreach_copy_ipv4
+ else foreach_copy_ipv6
+#undef _
+
+ if (!a->is_ip6) t->flags |= VXLAN_GPE_TUNNEL_IS_IPV4;
+
+ if (!a->is_ip6) {
+ rv = vxlan4_gpe_rewrite (t, 0, 0, VXLAN_GPE_ENCAP_NEXT_IP4_LOOKUP);
+ } else {
+ rv = vxlan6_gpe_rewrite (t, 0, 0, VXLAN_GPE_ENCAP_NEXT_IP6_LOOKUP);
+ }
+
+ if (rv)
+ {
+ pool_put (gm->tunnels, t);
+ return rv;
+ }
+
+ if (!a->is_ip6)
+ {
+ key4_copy = clib_mem_alloc (sizeof (*key4_copy));
+ clib_memcpy (key4_copy, &key4, sizeof (*key4_copy));
+ hash_set_mem (gm->vxlan4_gpe_tunnel_by_key, key4_copy,
+ t - gm->tunnels);
+ }
+ else
+ {
+ key6_copy = clib_mem_alloc (sizeof (*key6_copy));
+ clib_memcpy (key6_copy, &key6, sizeof (*key6_copy));
+ hash_set_mem (gm->vxlan6_gpe_tunnel_by_key, key6_copy,
+ t - gm->tunnels);
+ }
+
+ if (vec_len (gm->free_vxlan_gpe_tunnel_hw_if_indices) > 0)
+ {
+ hw_if_index = gm->free_vxlan_gpe_tunnel_hw_if_indices
+ [vec_len (gm->free_vxlan_gpe_tunnel_hw_if_indices)-1];
+ _vec_len (gm->free_vxlan_gpe_tunnel_hw_if_indices) -= 1;
+
+ hi = vnet_get_hw_interface (vnm, hw_if_index);
+ hi->dev_instance = t - gm->tunnels;
+ hi->hw_instance = hi->dev_instance;
+ }
+ else
+ {
+ hw_if_index = vnet_register_interface
+ (vnm, vxlan_gpe_device_class.index, t - gm->tunnels,
+ vxlan_gpe_hw_class.index, t - gm->tunnels);
+ hi = vnet_get_hw_interface (vnm, hw_if_index);
+ hi->output_node_index = vxlan_gpe_encap_node.index;
+ }
+
+ t->hw_if_index = hw_if_index;
+ t->sw_if_index = sw_if_index = hi->sw_if_index;
+ vec_validate_init_empty (gm->tunnel_index_by_sw_if_index, sw_if_index, ~0);
+ gm->tunnel_index_by_sw_if_index[sw_if_index] = t - gm->tunnels;
+
+ vnet_sw_interface_set_flags (vnm, hi->sw_if_index,
+ VNET_SW_INTERFACE_FLAG_ADMIN_UP);
+ }
+ else
+ {
+ /* deleting a tunnel: tunnel must exist */
+ if (!p)
+ return VNET_API_ERROR_NO_SUCH_ENTRY;
+
+ t = pool_elt_at_index (gm->tunnels, p[0]);
+
+ vnet_sw_interface_set_flags (vnm, t->sw_if_index, 0 /* down */);
+ vec_add1 (gm->free_vxlan_gpe_tunnel_hw_if_indices, t->hw_if_index);
+
+ gm->tunnel_index_by_sw_if_index[t->sw_if_index] = ~0;
+
+ if (!a->is_ip6)
+ {
+ hp = hash_get_pair (gm->vxlan4_gpe_tunnel_by_key, &key4);
+ key4_copy = (void *)(hp->key);
+ hash_unset_mem (gm->vxlan4_gpe_tunnel_by_key, &key4);
+ clib_mem_free (key4_copy);
+ }
+ else
+ {
+ hp = hash_get_pair (gm->vxlan6_gpe_tunnel_by_key, &key6);
+ key6_copy = (void *)(hp->key);
+ hash_unset_mem (gm->vxlan4_gpe_tunnel_by_key, &key6);
+ clib_mem_free (key6_copy);
+ }
+
+ vec_free (t->rewrite);
+ pool_put (gm->tunnels, t);
+ }
+
+ if (sw_if_indexp)
+ *sw_if_indexp = sw_if_index;
+
+ return 0;
+}
+
+static clib_error_t *
+vxlan_gpe_add_del_tunnel_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ unformat_input_t _line_input, * line_input = &_line_input;
+ u8 is_add = 1;
+ ip46_address_t local, remote;
+ u8 local_set = 0;
+ u8 remote_set = 0;
+ u8 ipv4_set = 0;
+ u8 ipv6_set = 0;
+ u32 encap_fib_index = 0;
+ u32 decap_fib_index = 0;
+ u8 protocol = VXLAN_GPE_PROTOCOL_IP4;
+ u32 vni;
+ u8 vni_set = 0;
+ int rv;
+ u32 tmp;
+ vnet_vxlan_gpe_add_del_tunnel_args_t _a, * a = &_a;
+ u32 sw_if_index;
+
+ /* Get a line of input. */
+ if (! unformat_user (input, unformat_line_input, line_input))
+ return 0;
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT) {
+ if (unformat (line_input, "del"))
+ is_add = 0;
+ else if (unformat (line_input, "local %U",
+ unformat_ip4_address, &local.ip4))
+ {
+ local_set = 1;
+ ipv4_set = 1;
+ }
+ else if (unformat (line_input, "remote %U",
+ unformat_ip4_address, &remote.ip4))
+ {
+ remote_set = 1;
+ ipv4_set = 1;
+ }
+ else if (unformat (line_input, "local %U",
+ unformat_ip6_address, &local.ip6))
+ {
+ local_set = 1;
+ ipv6_set = 1;
+ }
+ else if (unformat (line_input, "remote %U",
+ unformat_ip6_address, &remote.ip6))
+ {
+ remote_set = 1;
+ ipv6_set = 1;
+ }
+ else if (unformat (line_input, "encap-vrf-id %d", &tmp))
+ {
+ if (ipv6_set)
+ encap_fib_index = ip6_fib_index_from_table_id (tmp);
+ else
+ encap_fib_index = ip4_fib_index_from_table_id (tmp);
+
+ if (encap_fib_index == ~0)
+ return clib_error_return (0, "nonexistent encap fib id %d", tmp);
+ }
+ else if (unformat (line_input, "decap-vrf-id %d", &tmp))
+ {
+ if (ipv6_set)
+ decap_fib_index = ip6_fib_index_from_table_id (tmp);
+ else
+ decap_fib_index = ip4_fib_index_from_table_id (tmp);
+
+ if (decap_fib_index == ~0)
+ return clib_error_return (0, "nonexistent decap fib id %d", tmp);
+ }
+ else if (unformat (line_input, "vni %d", &vni))
+ vni_set = 1;
+ else if (unformat(line_input, "next-ip4"))
+ protocol = VXLAN_GPE_PROTOCOL_IP4;
+ else if (unformat(line_input, "next-ip6"))
+ protocol = VXLAN_GPE_PROTOCOL_IP6;
+ else if (unformat(line_input, "next-ethernet"))
+ protocol = VXLAN_GPE_PROTOCOL_ETHERNET;
+ else if (unformat(line_input, "next-nsh"))
+ protocol = VXLAN_GPE_PROTOCOL_NSH;
+ else
+ return clib_error_return (0, "parse error: '%U'",
+ format_unformat_error, line_input);
+ }
+
+ unformat_free (line_input);
+
+ if (local_set == 0)
+ return clib_error_return (0, "tunnel local address not specified");
+
+ if (remote_set == 0)
+ return clib_error_return (0, "tunnel remote address not specified");
+
+ if (ipv4_set && ipv6_set)
+ return clib_error_return (0, "both IPv4 and IPv6 addresses specified");
+
+ if ((ipv4_set && memcmp(&local.ip4, &remote.ip4, sizeof(local.ip4)) == 0) ||
+ (ipv6_set && memcmp(&local.ip6, &remote.ip6, sizeof(local.ip6)) == 0))
+ return clib_error_return (0, "src and dst addresses are identical");
+
+ if (vni_set == 0)
+ return clib_error_return (0, "vni not specified");
+
+ memset (a, 0, sizeof (*a));
+
+ a->is_add = is_add;
+ a->is_ip6 = ipv6_set;
+
+#define _(x) a->x = x;
+ foreach_gpe_copy_field;
+ if (ipv4_set) foreach_copy_ipv4
+ else foreach_copy_ipv6
+#undef _
+
+ rv = vnet_vxlan_gpe_add_del_tunnel (a, &sw_if_index);
+
+ switch(rv)
+ {
+ case 0:
+ vlib_cli_output(vm, "%U\n", format_vnet_sw_if_index_name, vnet_get_main(), sw_if_index);
+ break;
+ case VNET_API_ERROR_INVALID_DECAP_NEXT:
+ return clib_error_return (0, "invalid decap-next...");
+
+ case VNET_API_ERROR_TUNNEL_EXIST:
+ return clib_error_return (0, "tunnel already exists...");
+
+ case VNET_API_ERROR_NO_SUCH_ENTRY:
+ return clib_error_return (0, "tunnel does not exist...");
+
+ default:
+ return clib_error_return
+ (0, "vnet_vxlan_gpe_add_del_tunnel returned %d", rv);
+ }
+
+ return 0;
+}
+
+VLIB_CLI_COMMAND (create_vxlan_gpe_tunnel_command, static) = {
+ .path = "create vxlan-gpe tunnel",
+ .short_help =
+ "create vxlan-gpe tunnel local <local-addr> remote <remote-addr>"
+ " vni <nn> [next-ip4][next-ip6][next-ethernet][next-nsh]"
+ " [encap-vrf-id <nn>] [decap-vrf-id <nn>]"
+ " [del]\n",
+ .function = vxlan_gpe_add_del_tunnel_command_fn,
+};
+
+/**
+ * @brief CLI function for showing VXLAN GPE tunnels
+ *
+ * @param *vm
+ * @param *input
+ * @param *cmd
+ *
+ * @return error
+ *
+ */
+static clib_error_t *
+show_vxlan_gpe_tunnel_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ vxlan_gpe_main_t * gm = &vxlan_gpe_main;
+ vxlan_gpe_tunnel_t * t;
+
+ if (pool_elts (gm->tunnels) == 0)
+ vlib_cli_output (vm, "No vxlan-gpe tunnels configured.");
+
+ pool_foreach (t, gm->tunnels,
+ ({
+ vlib_cli_output (vm, "%U", format_vxlan_gpe_tunnel, t);
+ }));
+
+ return 0;
+}
+
+VLIB_CLI_COMMAND (show_vxlan_gpe_tunnel_command, static) = {
+ .path = "show vxlan-gpe",
+ .function = show_vxlan_gpe_tunnel_command_fn,
+};
+
+/**
+ * @brief Feature init function for VXLAN GPE
+ *
+ * @param *vm
+ *
+ * @return error
+ *
+ */
+clib_error_t *vxlan_gpe_init (vlib_main_t *vm)
+{
+ vxlan_gpe_main_t *gm = &vxlan_gpe_main;
+
+ gm->vnet_main = vnet_get_main();
+ gm->vlib_main = vm;
+
+ gm->vxlan4_gpe_tunnel_by_key
+ = hash_create_mem (0, sizeof(vxlan4_gpe_tunnel_key_t), sizeof (uword));
+
+ gm->vxlan6_gpe_tunnel_by_key
+ = hash_create_mem (0, sizeof(vxlan6_gpe_tunnel_key_t), sizeof (uword));
+
+
+ udp_register_dst_port (vm, UDP_DST_PORT_vxlan_gpe,
+ vxlan4_gpe_input_node.index, 1 /* is_ip4 */);
+ udp_register_dst_port (vm, UDP_DST_PORT_vxlan6_gpe,
+ vxlan6_gpe_input_node.index, 0 /* is_ip4 */);
+
+ /* Register the list of standard decap protocols supported */
+ vxlan_gpe_register_decap_protocol (VXLAN_GPE_PROTOCOL_IP4,
+ VXLAN_GPE_INPUT_NEXT_IP4_INPUT);
+ vxlan_gpe_register_decap_protocol (VXLAN_GPE_PROTOCOL_IP6,
+ VXLAN_GPE_INPUT_NEXT_IP6_INPUT);
+ vxlan_gpe_register_decap_protocol (VXLAN_GPE_PROTOCOL_ETHERNET,
+ VXLAN_GPE_INPUT_NEXT_ETHERNET_INPUT);
+ return 0;
+}
+
+VLIB_INIT_FUNCTION(vxlan_gpe_init);
+
diff --git a/src/vnet/vxlan-gpe/vxlan_gpe.h b/src/vnet/vxlan-gpe/vxlan_gpe.h
new file mode 100644
index 00000000000..1b4bc44e7bb
--- /dev/null
+++ b/src/vnet/vxlan-gpe/vxlan_gpe.h
@@ -0,0 +1,221 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * @file
+ * @brief VXLAN GPE definitions
+ *
+*/
+#ifndef included_vnet_vxlan_gpe_h
+#define included_vnet_vxlan_gpe_h
+
+#include <vppinfra/error.h>
+#include <vppinfra/hash.h>
+#include <vnet/vnet.h>
+#include <vnet/ip/ip.h>
+#include <vnet/l2/l2_input.h>
+#include <vnet/ethernet/ethernet.h>
+#include <vnet/vxlan-gpe/vxlan_gpe_packet.h>
+#include <vnet/ip/ip4_packet.h>
+#include <vnet/ip/ip6_packet.h>
+#include <vnet/ip/udp.h>
+
+/**
+ * @brief VXLAN GPE header struct
+ *
+ */
+typedef CLIB_PACKED (struct {
+ /** 20 bytes */
+ ip4_header_t ip4;
+ /** 8 bytes */
+ udp_header_t udp;
+ /** 8 bytes */
+ vxlan_gpe_header_t vxlan;
+}) ip4_vxlan_gpe_header_t;
+
+typedef CLIB_PACKED (struct {
+ /** 40 bytes */
+ ip6_header_t ip6;
+ /** 8 bytes */
+ udp_header_t udp;
+ /** 8 bytes */
+ vxlan_gpe_header_t vxlan;
+}) ip6_vxlan_gpe_header_t;
+
+/**
+ * @brief Key struct for IPv4 VXLAN GPE tunnel.
+ * Key fields: local remote, vni
+ * all fields in NET byte order
+ * VNI shifted 8 bits
+ */
+typedef CLIB_PACKED(struct {
+ union {
+ struct {
+ u32 local;
+ u32 remote;
+
+ u32 vni;
+ u32 pad;
+ };
+ u64 as_u64[2];
+ };
+}) vxlan4_gpe_tunnel_key_t;
+
+/**
+ * @brief Key struct for IPv6 VXLAN GPE tunnel.
+ * Key fields: local remote, vni
+ * all fields in NET byte order
+ * VNI shifted 8 bits
+ */
+typedef CLIB_PACKED(struct {
+ ip6_address_t local;
+ ip6_address_t remote;
+ u32 vni;
+}) vxlan6_gpe_tunnel_key_t;
+
+/**
+ * @brief Struct for VXLAN GPE tunnel
+ */
+typedef struct {
+ /** Rewrite string. $$$$ embed vnet_rewrite header */
+ u8 * rewrite;
+
+ /** encapsulated protocol */
+ u8 protocol;
+
+ /** tunnel local address */
+ ip46_address_t local;
+ /** tunnel remote address */
+ ip46_address_t remote;
+
+ /** FIB indices - tunnel partner lookup here */
+ u32 encap_fib_index;
+ /** FIB indices - inner IP packet lookup here */
+ u32 decap_fib_index;
+
+ /** VXLAN GPE VNI in HOST byte order, shifted left 8 bits */
+ u32 vni;
+
+ /** vnet intfc hw_if_index */
+ u32 hw_if_index;
+ /** vnet intfc sw_if_index */
+ u32 sw_if_index;
+
+ /** flags */
+ u32 flags;
+
+ /** rewrite size for dynamic plugins like iOAM */
+ u8 rewrite_size;
+
+ /** Next node after VxLAN-GPE encap */
+ uword encap_next_node;
+} vxlan_gpe_tunnel_t;
+
+/** Flags for vxlan_gpe_tunnel_t */
+#define VXLAN_GPE_TUNNEL_IS_IPV4 1
+
+/** next nodes for VXLAN GPE input */
+#define foreach_vxlan_gpe_input_next \
+_(DROP, "error-drop") \
+_(IP4_INPUT, "ip4-input") \
+_(IP6_INPUT, "ip6-input") \
+_(ETHERNET_INPUT, "ethernet-input")
+
+/** struct for next nodes for VXLAN GPE input */
+typedef enum {
+#define _(s,n) VXLAN_GPE_INPUT_NEXT_##s,
+ foreach_vxlan_gpe_input_next
+#undef _
+ VXLAN_GPE_INPUT_N_NEXT,
+} vxlan_gpe_input_next_t;
+
+/** struct for VXLAN GPE errors */
+typedef enum {
+#define vxlan_gpe_error(n,s) VXLAN_GPE_ERROR_##n,
+#include <vnet/vxlan-gpe/vxlan_gpe_error.def>
+#undef vxlan_gpe_error
+ VXLAN_GPE_N_ERROR,
+} vxlan_gpe_input_error_t;
+
+/** Struct for VXLAN GPE node state */
+typedef struct {
+ /** vector of encap tunnel instances */
+ vxlan_gpe_tunnel_t *tunnels;
+
+ /** lookup IPv4 VXLAN GPE tunnel by key */
+ uword * vxlan4_gpe_tunnel_by_key;
+ /** lookup IPv6 VXLAN GPE tunnel by key */
+ uword * vxlan6_gpe_tunnel_by_key;
+
+ /** Free vlib hw_if_indices */
+ u32 * free_vxlan_gpe_tunnel_hw_if_indices;
+
+ /** Mapping from sw_if_index to tunnel index */
+ u32 * tunnel_index_by_sw_if_index;
+
+ /** State convenience vlib_main_t */
+ vlib_main_t * vlib_main;
+ /** State convenience vnet_main_t */
+ vnet_main_t * vnet_main;
+
+ /** List of next nodes for the decap indexed on protocol */
+ uword decap_next_node_list[VXLAN_GPE_PROTOCOL_MAX];
+} vxlan_gpe_main_t;
+
+vxlan_gpe_main_t vxlan_gpe_main;
+
+extern vlib_node_registration_t vxlan_gpe_encap_node;
+extern vlib_node_registration_t vxlan4_gpe_input_node;
+extern vlib_node_registration_t vxlan6_gpe_input_node;
+
+u8 * format_vxlan_gpe_encap_trace (u8 * s, va_list * args);
+
+/** Struct for VXLAN GPE add/del args */
+typedef struct {
+ u8 is_add;
+ u8 is_ip6;
+ ip46_address_t local, remote;
+ u8 protocol;
+ u32 encap_fib_index;
+ u32 decap_fib_index;
+ u32 vni;
+} vnet_vxlan_gpe_add_del_tunnel_args_t;
+
+
+int vnet_vxlan_gpe_add_del_tunnel
+(vnet_vxlan_gpe_add_del_tunnel_args_t *a, u32 * sw_if_indexp);
+
+
+int vxlan4_gpe_rewrite (vxlan_gpe_tunnel_t * t, u32 extension_size,
+ u8 protocol_override, uword encap_next_node);
+int vxlan6_gpe_rewrite (vxlan_gpe_tunnel_t * t, u32 extension_size,
+ u8 protocol_override, uword encap_next_node);
+
+/**
+ * @brief Struct for defining VXLAN GPE next nodes
+ */
+typedef enum {
+ VXLAN_GPE_ENCAP_NEXT_IP4_LOOKUP,
+ VXLAN_GPE_ENCAP_NEXT_IP6_LOOKUP,
+ VXLAN_GPE_ENCAP_NEXT_DROP,
+ VXLAN_GPE_ENCAP_N_NEXT
+} vxlan_gpe_encap_next_t;
+
+
+void vxlan_gpe_unregister_decap_protocol (u8 protocol_id, uword next_node_index);
+
+void vxlan_gpe_register_decap_protocol (u8 protocol_id, uword next_node_index);
+
+
+#endif /* included_vnet_vxlan_gpe_h */
diff --git a/src/vnet/vxlan-gpe/vxlan_gpe_api.c b/src/vnet/vxlan-gpe/vxlan_gpe_api.c
new file mode 100644
index 00000000000..012a41dabbb
--- /dev/null
+++ b/src/vnet/vxlan-gpe/vxlan_gpe_api.c
@@ -0,0 +1,249 @@
+/*
+ *------------------------------------------------------------------
+ * vxlan_gpe_api.c - vxlan_gpe api
+ *
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+
+#include <vnet/vnet.h>
+#include <vlibmemory/api.h>
+
+#include <vnet/interface.h>
+#include <vnet/api_errno.h>
+#include <vnet/vxlan-gpe/vxlan_gpe.h>
+#include <vnet/fib/fib_table.h>
+
+#include <vnet/vnet_msg_enum.h>
+
+#define vl_typedefs /* define message structures */
+#include <vnet/vnet_all_api_h.h>
+#undef vl_typedefs
+
+#define vl_endianfun /* define message structures */
+#include <vnet/vnet_all_api_h.h>
+#undef vl_endianfun
+
+/* instantiate all the print functions we know about */
+#define vl_print(handle, ...) vlib_cli_output (handle, __VA_ARGS__)
+#define vl_printfun
+#include <vnet/vnet_all_api_h.h>
+#undef vl_printfun
+
+#include <vlibapi/api_helper_macros.h>
+
+#define foreach_vpe_api_msg \
+_(VXLAN_GPE_ADD_DEL_TUNNEL, vxlan_gpe_add_del_tunnel) \
+_(VXLAN_GPE_TUNNEL_DUMP, vxlan_gpe_tunnel_dump) \
+
+static void
+ vl_api_vxlan_gpe_add_del_tunnel_t_handler
+ (vl_api_vxlan_gpe_add_del_tunnel_t * mp)
+{
+ vl_api_vxlan_gpe_add_del_tunnel_reply_t *rmp;
+ int rv = 0;
+ vnet_vxlan_gpe_add_del_tunnel_args_t _a, *a = &_a;
+ u32 encap_fib_index, decap_fib_index;
+ u8 protocol;
+ uword *p;
+ ip4_main_t *im = &ip4_main;
+ u32 sw_if_index = ~0;
+
+
+ p = hash_get (im->fib_index_by_table_id, ntohl (mp->encap_vrf_id));
+ if (!p)
+ {
+ rv = VNET_API_ERROR_NO_SUCH_FIB;
+ goto out;
+ }
+ encap_fib_index = p[0];
+
+ protocol = mp->protocol;
+
+ /* Interpret decap_vrf_id as an opaque if sending to other-than-ip4-input */
+ if (protocol == VXLAN_GPE_INPUT_NEXT_IP4_INPUT)
+ {
+ p = hash_get (im->fib_index_by_table_id, ntohl (mp->decap_vrf_id));
+ if (!p)
+ {
+ rv = VNET_API_ERROR_NO_SUCH_INNER_FIB;
+ goto out;
+ }
+ decap_fib_index = p[0];
+ }
+ else
+ {
+ decap_fib_index = ntohl (mp->decap_vrf_id);
+ }
+
+ /* Check src & dst are different */
+ if ((mp->is_ipv6 && memcmp (mp->local, mp->remote, 16) == 0) ||
+ (!mp->is_ipv6 && memcmp (mp->local, mp->remote, 4) == 0))
+ {
+ rv = VNET_API_ERROR_SAME_SRC_DST;
+ goto out;
+ }
+ memset (a, 0, sizeof (*a));
+
+ a->is_add = mp->is_add;
+ a->is_ip6 = mp->is_ipv6;
+ /* ip addresses sent in network byte order */
+ if (a->is_ip6)
+ {
+ clib_memcpy (&(a->local.ip6), mp->local, 16);
+ clib_memcpy (&(a->remote.ip6), mp->remote, 16);
+ }
+ else
+ {
+ clib_memcpy (&(a->local.ip4), mp->local, 4);
+ clib_memcpy (&(a->remote.ip4), mp->remote, 4);
+ }
+ a->encap_fib_index = encap_fib_index;
+ a->decap_fib_index = decap_fib_index;
+ a->protocol = protocol;
+ a->vni = ntohl (mp->vni);
+ rv = vnet_vxlan_gpe_add_del_tunnel (a, &sw_if_index);
+
+out:
+ /* *INDENT-OFF* */
+ REPLY_MACRO2(VL_API_VXLAN_GPE_ADD_DEL_TUNNEL_REPLY,
+ ({
+ rmp->sw_if_index = ntohl (sw_if_index);
+ }));
+ /* *INDENT-ON* */
+}
+
+static void send_vxlan_gpe_tunnel_details
+ (vxlan_gpe_tunnel_t * t, unix_shared_memory_queue_t * q, u32 context)
+{
+ vl_api_vxlan_gpe_tunnel_details_t *rmp;
+ ip4_main_t *im4 = &ip4_main;
+ ip6_main_t *im6 = &ip6_main;
+ u8 is_ipv6 = !(t->flags & VXLAN_GPE_TUNNEL_IS_IPV4);
+
+ rmp = vl_msg_api_alloc (sizeof (*rmp));
+ memset (rmp, 0, sizeof (*rmp));
+ rmp->_vl_msg_id = ntohs (VL_API_VXLAN_GPE_TUNNEL_DETAILS);
+ if (is_ipv6)
+ {
+ memcpy (rmp->local, &(t->local.ip6), 16);
+ memcpy (rmp->remote, &(t->remote.ip6), 16);
+ rmp->encap_vrf_id = htonl (im6->fibs[t->encap_fib_index].ft_table_id);
+ rmp->decap_vrf_id = htonl (im6->fibs[t->decap_fib_index].ft_table_id);
+ }
+ else
+ {
+ memcpy (rmp->local, &(t->local.ip4), 4);
+ memcpy (rmp->remote, &(t->remote.ip4), 4);
+ rmp->encap_vrf_id = htonl (im4->fibs[t->encap_fib_index].ft_table_id);
+ rmp->decap_vrf_id = htonl (im4->fibs[t->decap_fib_index].ft_table_id);
+ }
+ rmp->vni = htonl (t->vni);
+ rmp->protocol = t->protocol;
+ rmp->sw_if_index = htonl (t->sw_if_index);
+ rmp->is_ipv6 = is_ipv6;
+ rmp->context = context;
+
+ vl_msg_api_send_shmem (q, (u8 *) & rmp);
+}
+
+static void vl_api_vxlan_gpe_tunnel_dump_t_handler
+ (vl_api_vxlan_gpe_tunnel_dump_t * mp)
+{
+ unix_shared_memory_queue_t *q;
+ vxlan_gpe_main_t *vgm = &vxlan_gpe_main;
+ vxlan_gpe_tunnel_t *t;
+ u32 sw_if_index;
+
+ q = vl_api_client_index_to_input_queue (mp->client_index);
+ if (q == 0)
+ {
+ return;
+ }
+
+ sw_if_index = ntohl (mp->sw_if_index);
+
+ if (~0 == sw_if_index)
+ {
+ /* *INDENT-OFF* */
+ pool_foreach (t, vgm->tunnels,
+ ({
+ send_vxlan_gpe_tunnel_details(t, q, mp->context);
+ }));
+ /* *INDENT-ON* */
+ }
+ else
+ {
+ if ((sw_if_index >= vec_len (vgm->tunnel_index_by_sw_if_index)) ||
+ (~0 == vgm->tunnel_index_by_sw_if_index[sw_if_index]))
+ {
+ return;
+ }
+ t = &vgm->tunnels[vgm->tunnel_index_by_sw_if_index[sw_if_index]];
+ send_vxlan_gpe_tunnel_details (t, q, mp->context);
+ }
+}
+
+
+/*
+ * vpe_api_hookup
+ * Add vpe's API message handlers to the table.
+ * vlib has alread mapped shared memory and
+ * added the client registration handlers.
+ * See .../vlib-api/vlibmemory/memclnt_vlib.c:memclnt_process()
+ */
+#define vl_msg_name_crc_list
+#include <vnet/vnet_all_api_h.h>
+#undef vl_msg_name_crc_list
+
+static void
+setup_message_id_table (api_main_t * am)
+{
+#define _(id,n,crc) vl_msg_api_add_msg_name_crc (am, #n "_" #crc, id);
+ foreach_vl_msg_name_crc_span;
+#undef _
+}
+
+static clib_error_t *
+vxlan_gpe_api_hookup (vlib_main_t * vm)
+{
+ api_main_t *am = &api_main;
+
+#define _(N,n) \
+ vl_msg_api_set_handlers(VL_API_##N, #n, \
+ vl_api_##n##_t_handler, \
+ vl_noop_handler, \
+ vl_api_##n##_t_endian, \
+ vl_api_##n##_t_print, \
+ sizeof(vl_api_##n##_t), 1);
+ foreach_vpe_api_msg;
+#undef _
+
+ /*
+ * Set up the (msg_name, crc, message-id) table
+ */
+ setup_message_id_table (am);
+
+ return 0;
+}
+
+VLIB_API_INIT_FUNCTION (vxlan_gpe_api_hookup);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/vxlan-gpe/vxlan_gpe_error.def b/src/vnet/vxlan-gpe/vxlan_gpe_error.def
new file mode 100644
index 00000000000..9cf1b1cb656
--- /dev/null
+++ b/src/vnet/vxlan-gpe/vxlan_gpe_error.def
@@ -0,0 +1,16 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+vxlan_gpe_error (DECAPSULATED, "good packets decapsulated")
+vxlan_gpe_error (NO_SUCH_TUNNEL, "no such tunnel packets")
diff --git a/src/vnet/vxlan-gpe/vxlan_gpe_packet.h b/src/vnet/vxlan-gpe/vxlan_gpe_packet.h
new file mode 100644
index 00000000000..ec3c2e586e1
--- /dev/null
+++ b/src/vnet/vxlan-gpe/vxlan_gpe_packet.h
@@ -0,0 +1,110 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * @file
+ * @brief VXLAN GPE packet header structure
+ *
+*/
+#ifndef included_vxlan_gpe_packet_h
+#define included_vxlan_gpe_packet_h
+
+/**
+ * From draft-quinn-vxlan-gpe-03.txt
+ *
+ * 0 1 2 3
+ * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |R|R|R|R|I|P|R|O|Ver| Reserved |Next Protocol |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | VXLAN Network Identifier (VNI) | Reserved |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * I Bit: Flag bit 4 indicates that the VNI is valid.
+ *
+ * P Bit: Flag bit 5 is defined as the Next Protocol bit. The P bit
+ * MUST be set to 1 to indicate the presence of the 8 bit next
+ * protocol field.
+ *
+ * O Bit: Flag bit 7 is defined as the O bit. When the O bit is set to 1,
+ *
+ * the packet is an OAM packet and OAM processing MUST occur. The OAM
+ * protocol details are out of scope for this document. As with the
+ * P-bit, bit 7 is currently a reserved flag in VXLAN.
+ *
+ * VXLAN-gpe bits 8 and 9 are defined as version bits. These bits are
+ * reserved in VXLAN. The version field is used to ensure backward
+ * compatibility going forward with future VXLAN-gpe updates.
+ *
+ * The initial version for VXLAN-gpe is 0.
+ *
+ * This draft defines the following Next Protocol values:
+ *
+ * 0x1 : IPv4
+ * 0x2 : IPv6
+ * 0x3 : Ethernet
+ * 0x4 : Network Service Header [NSH]
+ */
+
+/**
+ * @brief VXLAN GPE support inner protocol definition.
+ * 1 - IP4
+ * 2 - IP6
+ * 3 - ETHERNET
+ * 4 - NSH
+ */
+#define foreach_vxlan_gpe_protocol \
+_ (0x01, IP4) \
+_ (0x02, IP6) \
+_ (0x03, ETHERNET) \
+_ (0x04, NSH) \
+_ (0x05, IOAM)
+
+
+/**
+ * @brief Struct for VXLAN GPE support inner protocol definition.
+ * 1 - IP4
+ * 2 - IP6
+ * 3 - ETHERNET
+ * 4 - NSH
+ * 5 - IOAM
+ */
+typedef enum {
+#define _(n,f) VXLAN_GPE_PROTOCOL_##f = n,
+ foreach_vxlan_gpe_protocol
+#undef _
+ VXLAN_GPE_PROTOCOL_MAX,
+} vxlan_gpe_protocol_t;
+
+/**
+ * @brief VXLAN GPE Header definition
+ */
+typedef struct {
+ u8 flags;
+ /** Version and Reserved */
+ u8 ver_res;
+ /** Reserved */
+ u8 res;
+ /** see vxlan_gpe_protocol_t */
+ u8 protocol;
+ /** VNI and Reserved */
+ u32 vni_res;
+} vxlan_gpe_header_t;
+
+#define VXLAN_GPE_FLAGS_I 0x08
+#define VXLAN_GPE_FLAGS_P 0x04
+#define VXLAN_GPE_FLAGS_O 0x01
+#define VXLAN_GPE_VERSION 0x0
+
+#endif /* included_vxlan_gpe_packet_h */
diff --git a/src/vnet/vxlan/decap.c b/src/vnet/vxlan/decap.c
new file mode 100644
index 00000000000..73e50ffe31f
--- /dev/null
+++ b/src/vnet/vxlan/decap.c
@@ -0,0 +1,1130 @@
+/*
+ * decap.c: vxlan tunnel decap packet processing
+ *
+ * Copyright (c) 2013 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vlib/vlib.h>
+#include <vnet/pg/pg.h>
+#include <vnet/vxlan/vxlan.h>
+
+vlib_node_registration_t vxlan4_input_node;
+vlib_node_registration_t vxlan6_input_node;
+
+typedef struct {
+ u32 next_index;
+ u32 tunnel_index;
+ u32 error;
+ u32 vni;
+} vxlan_rx_trace_t;
+
+static u8 * format_vxlan_rx_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ vxlan_rx_trace_t * t = va_arg (*args, vxlan_rx_trace_t *);
+
+ if (t->tunnel_index != ~0)
+ {
+ s = format (s, "VXLAN decap from vxlan_tunnel%d vni %d next %d error %d",
+ t->tunnel_index, t->vni, t->next_index, t->error);
+ }
+ else
+ {
+ s = format (s, "VXLAN decap error - tunnel for vni %d does not exist",
+ t->vni);
+ }
+ return s;
+}
+
+always_inline uword
+vxlan_input (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame,
+ u32 is_ip4)
+{
+ u32 n_left_from, next_index, * from, * to_next;
+ vxlan_main_t * vxm = &vxlan_main;
+ vnet_main_t * vnm = vxm->vnet_main;
+ vnet_interface_main_t * im = &vnm->interface_main;
+ u32 last_tunnel_index = ~0;
+ vxlan4_tunnel_key_t last_key4;
+ vxlan6_tunnel_key_t last_key6;
+ u32 pkts_decapsulated = 0;
+ u32 cpu_index = os_get_cpu_number();
+ u32 stats_sw_if_index, stats_n_packets, stats_n_bytes;
+
+ if (is_ip4)
+ last_key4.as_u64 = ~0;
+ else
+ memset (&last_key6, 0xff, sizeof (last_key6));
+
+ from = vlib_frame_vector_args (from_frame);
+ n_left_from = from_frame->n_vectors;
+
+ next_index = node->cached_next_index;
+ stats_sw_if_index = node->runtime_data[0];
+ stats_n_packets = stats_n_bytes = 0;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame (vm, node, next_index,
+ to_next, n_left_to_next);
+ while (n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ u32 bi0, bi1;
+ vlib_buffer_t * b0, * b1;
+ u32 next0, next1;
+ ip4_header_t * ip4_0, * ip4_1;
+ ip6_header_t * ip6_0, * ip6_1;
+ vxlan_header_t * vxlan0, * vxlan1;
+ uword * p0, * p1;
+ u32 tunnel_index0, tunnel_index1;
+ vxlan_tunnel_t * t0, * t1, * mt0 = NULL, * mt1 = NULL;
+ vxlan4_tunnel_key_t key4_0, key4_1;
+ vxlan6_tunnel_key_t key6_0, key6_1;
+ u32 error0, error1;
+ u32 sw_if_index0, sw_if_index1, len0, len1;
+
+ /* Prefetch next iteration. */
+ {
+ vlib_buffer_t * p2, * p3;
+
+ p2 = vlib_get_buffer (vm, from[2]);
+ p3 = vlib_get_buffer (vm, from[3]);
+
+ vlib_prefetch_buffer_header (p2, LOAD);
+ vlib_prefetch_buffer_header (p3, LOAD);
+
+ CLIB_PREFETCH (p2->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
+ CLIB_PREFETCH (p3->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
+ }
+
+ bi0 = from[0];
+ bi1 = from[1];
+ to_next[0] = bi0;
+ to_next[1] = bi1;
+ from += 2;
+ to_next += 2;
+ n_left_to_next -= 2;
+ n_left_from -= 2;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ b1 = vlib_get_buffer (vm, bi1);
+
+ /* udp leaves current_data pointing at the vxlan header */
+ vxlan0 = vlib_buffer_get_current (b0);
+ vxlan1 = vlib_buffer_get_current (b1);
+ if (is_ip4) {
+ vlib_buffer_advance
+ (b0, -(word)(sizeof(udp_header_t)+sizeof(ip4_header_t)));
+ vlib_buffer_advance
+ (b1, -(word)(sizeof(udp_header_t)+sizeof(ip4_header_t)));
+ ip4_0 = vlib_buffer_get_current (b0);
+ ip4_1 = vlib_buffer_get_current (b1);
+ } else {
+ vlib_buffer_advance
+ (b0, -(word)(sizeof(udp_header_t)+sizeof(ip6_header_t)));
+ vlib_buffer_advance
+ (b1, -(word)(sizeof(udp_header_t)+sizeof(ip6_header_t)));
+ ip6_0 = vlib_buffer_get_current (b0);
+ ip6_1 = vlib_buffer_get_current (b1);
+ }
+
+ /* pop (ip, udp, vxlan) */
+ if (is_ip4) {
+ vlib_buffer_advance
+ (b0, sizeof(*ip4_0)+sizeof(udp_header_t)+sizeof(*vxlan0));
+ vlib_buffer_advance
+ (b1, sizeof(*ip4_1)+sizeof(udp_header_t)+sizeof(*vxlan1));
+ } else {
+ vlib_buffer_advance
+ (b0, sizeof(*ip6_0)+sizeof(udp_header_t)+sizeof(*vxlan0));
+ vlib_buffer_advance
+ (b1, sizeof(*ip6_1)+sizeof(udp_header_t)+sizeof(*vxlan1));
+ }
+
+ tunnel_index0 = ~0;
+ error0 = 0;
+
+ tunnel_index1 = ~0;
+ error1 = 0;
+
+ if (PREDICT_FALSE (vxlan0->flags != VXLAN_FLAGS_I))
+ {
+ error0 = VXLAN_ERROR_BAD_FLAGS;
+ next0 = VXLAN_INPUT_NEXT_DROP;
+ goto trace0;
+ }
+
+ if (is_ip4) {
+ key4_0.src = ip4_0->src_address.as_u32;
+ key4_0.vni = vxlan0->vni_reserved;
+
+ /* Make sure VXLAN tunnel exist according to packet SIP and VNI */
+ if (PREDICT_FALSE (key4_0.as_u64 != last_key4.as_u64))
+ {
+ p0 = hash_get (vxm->vxlan4_tunnel_by_key, key4_0.as_u64);
+ if (PREDICT_FALSE (p0 == NULL))
+ {
+ error0 = VXLAN_ERROR_NO_SUCH_TUNNEL;
+ next0 = VXLAN_INPUT_NEXT_DROP;
+ goto trace0;
+ }
+ last_key4.as_u64 = key4_0.as_u64;
+ tunnel_index0 = last_tunnel_index = p0[0];
+ }
+ else
+ tunnel_index0 = last_tunnel_index;
+ t0 = pool_elt_at_index (vxm->tunnels, tunnel_index0);
+
+ /* Validate VXLAN tunnel SIP against packet DIP */
+ if (PREDICT_TRUE (ip4_0->dst_address.as_u32 == t0->src.ip4.as_u32))
+ goto next0; /* valid packet */
+ if (PREDICT_FALSE (ip4_address_is_multicast (&ip4_0->dst_address)))
+ {
+ key4_0.src = ip4_0->dst_address.as_u32;
+ key4_0.vni = vxlan0->vni_reserved;
+ /* Make sure mcast VXLAN tunnel exist by packet DIP and VNI */
+ p0 = hash_get (vxm->vxlan4_tunnel_by_key, key4_0.as_u64);
+ if (PREDICT_TRUE (p0 != NULL))
+ {
+ mt0 = pool_elt_at_index (vxm->tunnels, p0[0]);
+ goto next0; /* valid packet */
+ }
+ }
+ error0 = VXLAN_ERROR_NO_SUCH_TUNNEL;
+ next0 = VXLAN_INPUT_NEXT_DROP;
+ goto trace0;
+
+ } else /* !is_ip4 */ {
+ key6_0.src.as_u64[0] = ip6_0->src_address.as_u64[0];
+ key6_0.src.as_u64[1] = ip6_0->src_address.as_u64[1];
+ key6_0.vni = vxlan0->vni_reserved;
+
+ /* Make sure VXLAN tunnel exist according to packet SIP and VNI */
+ if (PREDICT_FALSE (memcmp(&key6_0, &last_key6, sizeof(last_key6)) != 0))
+ {
+ p0 = hash_get_mem (vxm->vxlan6_tunnel_by_key, &key6_0);
+ if (PREDICT_FALSE (p0 == NULL))
+ {
+ error0 = VXLAN_ERROR_NO_SUCH_TUNNEL;
+ next0 = VXLAN_INPUT_NEXT_DROP;
+ goto trace0;
+ }
+ clib_memcpy (&last_key6, &key6_0, sizeof(key6_0));
+ tunnel_index0 = last_tunnel_index = p0[0];
+ }
+ else
+ tunnel_index0 = last_tunnel_index;
+ t0 = pool_elt_at_index (vxm->tunnels, tunnel_index0);
+
+ /* Validate VXLAN tunnel SIP against packet DIP */
+ if (PREDICT_TRUE (ip6_address_is_equal (&ip6_0->dst_address,
+ &t0->src.ip6)))
+ goto next0; /* valid packet */
+ if (PREDICT_FALSE (ip6_address_is_multicast (&ip6_0->dst_address)))
+ {
+ key6_0.src.as_u64[0] = ip6_0->dst_address.as_u64[0];
+ key6_0.src.as_u64[1] = ip6_0->dst_address.as_u64[1];
+ key6_0.vni = vxlan0->vni_reserved;
+ p0 = hash_get (vxm->vxlan6_tunnel_by_key, &key6_0);
+ if (PREDICT_TRUE (p0 != NULL))
+ {
+ mt0 = pool_elt_at_index (vxm->tunnels, p0[0]);
+ goto next0; /* valid packet */
+ }
+ }
+ error0 = VXLAN_ERROR_NO_SUCH_TUNNEL;
+ next0 = VXLAN_INPUT_NEXT_DROP;
+ goto trace0;
+ }
+
+ next0:
+ next0 = t0->decap_next_index;
+ sw_if_index0 = t0->sw_if_index;
+ len0 = vlib_buffer_length_in_chain (vm, b0);
+
+ /* Required to make the l2 tag push / pop code work on l2 subifs */
+ if (PREDICT_TRUE(next0 == VXLAN_INPUT_NEXT_L2_INPUT))
+ vnet_update_l2_len (b0);
+
+ /* Set packet input sw_if_index to unicast VXLAN tunnel for learning */
+ vnet_buffer(b0)->sw_if_index[VLIB_RX] = sw_if_index0;
+ sw_if_index0 = (mt0) ? mt0->sw_if_index : sw_if_index0;
+
+ pkts_decapsulated ++;
+ stats_n_packets += 1;
+ stats_n_bytes += len0;
+
+ /* Batch stats increment on the same vxlan tunnel so counter
+ is not incremented per packet */
+ if (PREDICT_FALSE (sw_if_index0 != stats_sw_if_index))
+ {
+ stats_n_packets -= 1;
+ stats_n_bytes -= len0;
+ if (stats_n_packets)
+ vlib_increment_combined_counter
+ (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX,
+ cpu_index, stats_sw_if_index,
+ stats_n_packets, stats_n_bytes);
+ stats_n_packets = 1;
+ stats_n_bytes = len0;
+ stats_sw_if_index = sw_if_index0;
+ }
+
+ trace0:
+ b0->error = error0 ? node->errors[error0] : 0;
+
+ if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ vxlan_rx_trace_t *tr
+ = vlib_add_trace (vm, node, b0, sizeof (*tr));
+ tr->next_index = next0;
+ tr->error = error0;
+ tr->tunnel_index = tunnel_index0;
+ tr->vni = vnet_get_vni (vxlan0);
+ }
+
+ if (PREDICT_FALSE (vxlan1->flags != VXLAN_FLAGS_I))
+ {
+ error1 = VXLAN_ERROR_BAD_FLAGS;
+ next1 = VXLAN_INPUT_NEXT_DROP;
+ goto trace1;
+ }
+
+ if (is_ip4) {
+ key4_1.src = ip4_1->src_address.as_u32;
+ key4_1.vni = vxlan1->vni_reserved;
+
+ /* Make sure unicast VXLAN tunnel exist by packet SIP and VNI */
+ if (PREDICT_FALSE (key4_1.as_u64 != last_key4.as_u64))
+ {
+ p1 = hash_get (vxm->vxlan4_tunnel_by_key, key4_1.as_u64);
+ if (PREDICT_FALSE (p1 == NULL))
+ {
+ error1 = VXLAN_ERROR_NO_SUCH_TUNNEL;
+ next1 = VXLAN_INPUT_NEXT_DROP;
+ goto trace1;
+ }
+ last_key4.as_u64 = key4_1.as_u64;
+ tunnel_index1 = last_tunnel_index = p1[0];
+ }
+ else
+ tunnel_index1 = last_tunnel_index;
+ t1 = pool_elt_at_index (vxm->tunnels, tunnel_index1);
+
+ /* Validate VXLAN tunnel SIP against packet DIP */
+ if (PREDICT_TRUE (ip4_1->dst_address.as_u32 == t1->src.ip4.as_u32))
+ goto next1; /* valid packet */
+ if (PREDICT_FALSE (ip4_address_is_multicast (&ip4_1->dst_address)))
+ {
+ key4_1.src = ip4_1->dst_address.as_u32;
+ key4_1.vni = vxlan1->vni_reserved;
+ /* Make sure mcast VXLAN tunnel exist by packet DIP and VNI */
+ p1 = hash_get (vxm->vxlan4_tunnel_by_key, key4_1.as_u64);
+ if (PREDICT_TRUE (p1 != NULL))
+ {
+ mt1 = pool_elt_at_index (vxm->tunnels, p1[0]);
+ goto next1; /* valid packet */
+ }
+ }
+ error1 = VXLAN_ERROR_NO_SUCH_TUNNEL;
+ next1 = VXLAN_INPUT_NEXT_DROP;
+ goto trace1;
+
+ } else /* !is_ip4 */ {
+ key6_1.src.as_u64[0] = ip6_1->src_address.as_u64[0];
+ key6_1.src.as_u64[1] = ip6_1->src_address.as_u64[1];
+ key6_1.vni = vxlan1->vni_reserved;
+
+ /* Make sure VXLAN tunnel exist according to packet SIP and VNI */
+ if (PREDICT_FALSE (memcmp(&key6_1, &last_key6, sizeof(last_key6)) != 0))
+ {
+ p1 = hash_get_mem (vxm->vxlan6_tunnel_by_key, &key6_1);
+
+ if (PREDICT_FALSE (p1 == NULL))
+ {
+ error1 = VXLAN_ERROR_NO_SUCH_TUNNEL;
+ next1 = VXLAN_INPUT_NEXT_DROP;
+ goto trace1;
+ }
+
+ clib_memcpy (&last_key6, &key6_1, sizeof(key6_1));
+ tunnel_index1 = last_tunnel_index = p1[0];
+ }
+ else
+ tunnel_index1 = last_tunnel_index;
+ t1 = pool_elt_at_index (vxm->tunnels, tunnel_index1);
+
+ /* Validate VXLAN tunnel SIP against packet DIP */
+ if (PREDICT_TRUE (ip6_address_is_equal (&ip6_1->dst_address,
+ &t1->src.ip6)))
+ goto next1; /* valid packet */
+ if (PREDICT_FALSE (ip6_address_is_multicast (&ip6_1->dst_address)))
+ {
+ key6_1.src.as_u64[0] = ip6_1->dst_address.as_u64[0];
+ key6_1.src.as_u64[1] = ip6_1->dst_address.as_u64[1];
+ key6_1.vni = vxlan1->vni_reserved;
+ p1 = hash_get (vxm->vxlan6_tunnel_by_key, &key6_1);
+ if (PREDICT_TRUE (p1 != NULL))
+ {
+ mt1 = pool_elt_at_index (vxm->tunnels, p1[0]);
+ goto next1; /* valid packet */
+ }
+ }
+ error1 = VXLAN_ERROR_NO_SUCH_TUNNEL;
+ next1 = VXLAN_INPUT_NEXT_DROP;
+ goto trace1;
+ }
+
+ next1:
+ next1 = t1->decap_next_index;
+ sw_if_index1 = t1->sw_if_index;
+ len1 = vlib_buffer_length_in_chain (vm, b1);
+
+ /* Required to make the l2 tag push / pop code work on l2 subifs */
+ if (PREDICT_TRUE(next1 == VXLAN_INPUT_NEXT_L2_INPUT))
+ vnet_update_l2_len (b1);
+
+ /* Set packet input sw_if_index to unicast VXLAN tunnel for learning */
+ vnet_buffer(b1)->sw_if_index[VLIB_RX] = sw_if_index1;
+ sw_if_index1 = (mt1) ? mt1->sw_if_index : sw_if_index1;
+
+ pkts_decapsulated ++;
+ stats_n_packets += 1;
+ stats_n_bytes += len1;
+
+ /* Batch stats increment on the same vxlan tunnel so counter
+ is not incremented per packet */
+ if (PREDICT_FALSE (sw_if_index1 != stats_sw_if_index))
+ {
+ stats_n_packets -= 1;
+ stats_n_bytes -= len1;
+ if (stats_n_packets)
+ vlib_increment_combined_counter
+ (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX,
+ cpu_index, stats_sw_if_index,
+ stats_n_packets, stats_n_bytes);
+ stats_n_packets = 1;
+ stats_n_bytes = len1;
+ stats_sw_if_index = sw_if_index1;
+ }
+
+ trace1:
+ b1->error = error1 ? node->errors[error1] : 0;
+
+ if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ vxlan_rx_trace_t *tr
+ = vlib_add_trace (vm, node, b1, sizeof (*tr));
+ tr->next_index = next1;
+ tr->error = error1;
+ tr->tunnel_index = tunnel_index1;
+ tr->vni = vnet_get_vni (vxlan1);
+ }
+
+ vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, bi1, next0, next1);
+ }
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 bi0;
+ vlib_buffer_t * b0;
+ u32 next0;
+ ip4_header_t * ip4_0;
+ ip6_header_t * ip6_0;
+ vxlan_header_t * vxlan0;
+ uword * p0;
+ u32 tunnel_index0;
+ vxlan_tunnel_t * t0, * mt0 = NULL;
+ vxlan4_tunnel_key_t key4_0;
+ vxlan6_tunnel_key_t key6_0;
+ u32 error0;
+ u32 sw_if_index0, len0;
+
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+
+ /* udp leaves current_data pointing at the vxlan header */
+ vxlan0 = vlib_buffer_get_current (b0);
+ if (is_ip4) {
+ vlib_buffer_advance
+ (b0, -(word)(sizeof(udp_header_t)+sizeof(ip4_header_t)));
+ ip4_0 = vlib_buffer_get_current (b0);
+ } else {
+ vlib_buffer_advance
+ (b0, -(word)(sizeof(udp_header_t)+sizeof(ip6_header_t)));
+ ip6_0 = vlib_buffer_get_current (b0);
+ }
+
+ /* pop (ip, udp, vxlan) */
+ if (is_ip4) {
+ vlib_buffer_advance
+ (b0, sizeof(*ip4_0)+sizeof(udp_header_t)+sizeof(*vxlan0));
+ } else {
+ vlib_buffer_advance
+ (b0, sizeof(*ip6_0)+sizeof(udp_header_t)+sizeof(*vxlan0));
+ }
+
+ tunnel_index0 = ~0;
+ error0 = 0;
+
+ if (PREDICT_FALSE (vxlan0->flags != VXLAN_FLAGS_I))
+ {
+ error0 = VXLAN_ERROR_BAD_FLAGS;
+ next0 = VXLAN_INPUT_NEXT_DROP;
+ goto trace00;
+ }
+
+ if (is_ip4) {
+ key4_0.src = ip4_0->src_address.as_u32;
+ key4_0.vni = vxlan0->vni_reserved;
+
+ /* Make sure unicast VXLAN tunnel exist by packet SIP and VNI */
+ if (PREDICT_FALSE (key4_0.as_u64 != last_key4.as_u64))
+ {
+ p0 = hash_get (vxm->vxlan4_tunnel_by_key, key4_0.as_u64);
+ if (PREDICT_FALSE (p0 == NULL))
+ {
+ error0 = VXLAN_ERROR_NO_SUCH_TUNNEL;
+ next0 = VXLAN_INPUT_NEXT_DROP;
+ goto trace00;
+ }
+ last_key4.as_u64 = key4_0.as_u64;
+ tunnel_index0 = last_tunnel_index = p0[0];
+ }
+ else
+ tunnel_index0 = last_tunnel_index;
+ t0 = pool_elt_at_index (vxm->tunnels, tunnel_index0);
+
+ /* Validate VXLAN tunnel SIP against packet DIP */
+ if (PREDICT_TRUE (ip4_0->dst_address.as_u32 == t0->src.ip4.as_u32))
+ goto next00; /* valid packet */
+ if (PREDICT_FALSE (ip4_address_is_multicast (&ip4_0->dst_address)))
+ {
+ key4_0.src = ip4_0->dst_address.as_u32;
+ key4_0.vni = vxlan0->vni_reserved;
+ /* Make sure mcast VXLAN tunnel exist by packet DIP and VNI */
+ p0 = hash_get (vxm->vxlan4_tunnel_by_key, key4_0.as_u64);
+ if (PREDICT_TRUE (p0 != NULL))
+ {
+ mt0 = pool_elt_at_index (vxm->tunnels, p0[0]);
+ goto next00; /* valid packet */
+ }
+ }
+ error0 = VXLAN_ERROR_NO_SUCH_TUNNEL;
+ next0 = VXLAN_INPUT_NEXT_DROP;
+ goto trace00;
+
+ } else /* !is_ip4 */ {
+ key6_0.src.as_u64[0] = ip6_0->src_address.as_u64[0];
+ key6_0.src.as_u64[1] = ip6_0->src_address.as_u64[1];
+ key6_0.vni = vxlan0->vni_reserved;
+
+ /* Make sure VXLAN tunnel exist according to packet SIP and VNI */
+ if (PREDICT_FALSE (memcmp(&key6_0, &last_key6, sizeof(last_key6)) != 0))
+ {
+ p0 = hash_get_mem (vxm->vxlan6_tunnel_by_key, &key6_0);
+ if (PREDICT_FALSE (p0 == NULL))
+ {
+ error0 = VXLAN_ERROR_NO_SUCH_TUNNEL;
+ next0 = VXLAN_INPUT_NEXT_DROP;
+ goto trace00;
+ }
+ clib_memcpy (&last_key6, &key6_0, sizeof(key6_0));
+ tunnel_index0 = last_tunnel_index = p0[0];
+ }
+ else
+ tunnel_index0 = last_tunnel_index;
+ t0 = pool_elt_at_index (vxm->tunnels, tunnel_index0);
+
+ /* Validate VXLAN tunnel SIP against packet DIP */
+ if (PREDICT_TRUE (ip6_address_is_equal (&ip6_0->dst_address,
+ &t0->src.ip6)))
+ goto next00; /* valid packet */
+ if (PREDICT_FALSE (ip6_address_is_multicast (&ip6_0->dst_address)))
+ {
+ key6_0.src.as_u64[0] = ip6_0->dst_address.as_u64[0];
+ key6_0.src.as_u64[1] = ip6_0->dst_address.as_u64[1];
+ key6_0.vni = vxlan0->vni_reserved;
+ p0 = hash_get (vxm->vxlan6_tunnel_by_key, &key6_0);
+ if (PREDICT_TRUE (p0 != NULL))
+ {
+ mt0 = pool_elt_at_index (vxm->tunnels, p0[0]);
+ goto next00; /* valid packet */
+ }
+ }
+ error0 = VXLAN_ERROR_NO_SUCH_TUNNEL;
+ next0 = VXLAN_INPUT_NEXT_DROP;
+ goto trace00;
+ }
+
+ next00:
+ next0 = t0->decap_next_index;
+ sw_if_index0 = t0->sw_if_index;
+ len0 = vlib_buffer_length_in_chain (vm, b0);
+
+ /* Required to make the l2 tag push / pop code work on l2 subifs */
+ if (PREDICT_TRUE(next0 == VXLAN_INPUT_NEXT_L2_INPUT))
+ vnet_update_l2_len (b0);
+
+ /* Set packet input sw_if_index to unicast VXLAN tunnel for learning */
+ vnet_buffer(b0)->sw_if_index[VLIB_RX] = sw_if_index0;
+ sw_if_index0 = (mt0) ? mt0->sw_if_index : sw_if_index0;
+
+ pkts_decapsulated ++;
+ stats_n_packets += 1;
+ stats_n_bytes += len0;
+
+ /* Batch stats increment on the same vxlan tunnel so counter
+ is not incremented per packet */
+ if (PREDICT_FALSE (sw_if_index0 != stats_sw_if_index))
+ {
+ stats_n_packets -= 1;
+ stats_n_bytes -= len0;
+ if (stats_n_packets)
+ vlib_increment_combined_counter
+ (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX,
+ cpu_index, stats_sw_if_index,
+ stats_n_packets, stats_n_bytes);
+ stats_n_packets = 1;
+ stats_n_bytes = len0;
+ stats_sw_if_index = sw_if_index0;
+ }
+
+ trace00:
+ b0->error = error0 ? node->errors[error0] : 0;
+
+ if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ vxlan_rx_trace_t *tr
+ = vlib_add_trace (vm, node, b0, sizeof (*tr));
+ tr->next_index = next0;
+ tr->error = error0;
+ tr->tunnel_index = tunnel_index0;
+ tr->vni = vnet_get_vni (vxlan0);
+ }
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, next0);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+ /* Do we still need this now that tunnel tx stats is kept? */
+ vlib_node_increment_counter (vm, is_ip4?
+ vxlan4_input_node.index:vxlan6_input_node.index,
+ VXLAN_ERROR_DECAPSULATED,
+ pkts_decapsulated);
+
+ /* Increment any remaining batch stats */
+ if (stats_n_packets)
+ {
+ vlib_increment_combined_counter
+ (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX,
+ cpu_index, stats_sw_if_index, stats_n_packets, stats_n_bytes);
+ node->runtime_data[0] = stats_sw_if_index;
+ }
+
+ return from_frame->n_vectors;
+}
+
+static uword
+vxlan4_input (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame)
+{
+ return vxlan_input(vm, node, from_frame, /* is_ip4 */ 1);
+}
+
+static uword
+vxlan6_input (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame)
+{
+ return vxlan_input(vm, node, from_frame, /* is_ip4 */ 0);
+}
+
+static char * vxlan_error_strings[] = {
+#define vxlan_error(n,s) s,
+#include <vnet/vxlan/vxlan_error.def>
+#undef vxlan_error
+#undef _
+};
+
+VLIB_REGISTER_NODE (vxlan4_input_node) = {
+ .function = vxlan4_input,
+ .name = "vxlan4-input",
+ /* Takes a vector of packets. */
+ .vector_size = sizeof (u32),
+
+ .n_errors = VXLAN_N_ERROR,
+ .error_strings = vxlan_error_strings,
+
+ .n_next_nodes = VXLAN_INPUT_N_NEXT,
+ .next_nodes = {
+#define _(s,n) [VXLAN_INPUT_NEXT_##s] = n,
+ foreach_vxlan_input_next
+#undef _
+ },
+
+//temp .format_buffer = format_vxlan_header,
+ .format_trace = format_vxlan_rx_trace,
+ // $$$$ .unformat_buffer = unformat_vxlan_header,
+};
+
+VLIB_NODE_FUNCTION_MULTIARCH (vxlan4_input_node, vxlan4_input)
+
+VLIB_REGISTER_NODE (vxlan6_input_node) = {
+ .function = vxlan6_input,
+ .name = "vxlan6-input",
+ /* Takes a vector of packets. */
+ .vector_size = sizeof (u32),
+
+ .n_errors = VXLAN_N_ERROR,
+ .error_strings = vxlan_error_strings,
+
+ .n_next_nodes = VXLAN_INPUT_N_NEXT,
+ .next_nodes = {
+#define _(s,n) [VXLAN_INPUT_NEXT_##s] = n,
+ foreach_vxlan_input_next
+#undef _
+ },
+
+//temp .format_buffer = format_vxlan_header,
+ .format_trace = format_vxlan_rx_trace,
+ // $$$$ .unformat_buffer = unformat_vxlan_header,
+};
+
+VLIB_NODE_FUNCTION_MULTIARCH (vxlan6_input_node, vxlan6_input)
+
+
+typedef enum {
+ IP_VXLAN_BYPASS_NEXT_DROP,
+ IP_VXLAN_BYPASS_NEXT_VXLAN,
+ IP_VXLAN_BYPASS_N_NEXT,
+} ip_vxan_bypass_next_t;
+
+always_inline uword
+ip_vxlan_bypass_inline (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame,
+ u32 is_ip4)
+{
+ vxlan_main_t * vxm = &vxlan_main;
+ u32 * from, * to_next, n_left_from, n_left_to_next, next_index;
+ vlib_node_runtime_t * error_node = vlib_node_get_runtime (vm, ip4_input_node.index);
+ ip4_address_t addr4; /* last IPv4 address matching a local VTEP address */
+ ip6_address_t addr6; /* last IPv6 address matching a local VTEP address */
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+
+ if (node->flags & VLIB_NODE_FLAG_TRACE)
+ ip4_forward_next_trace (vm, node, frame, VLIB_TX);
+
+ if (is_ip4) addr4.data_u32 = ~0;
+ else ip6_address_set_zero (&addr6);
+
+ while (n_left_from > 0)
+ {
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ vlib_buffer_t * b0, * b1;
+ ip4_header_t * ip0, * ip1;
+ udp_header_t * udp0, * udp1;
+ u32 bi0, ip_len0, udp_len0, flags0, next0;
+ u32 bi1, ip_len1, udp_len1, flags1, next1;
+ i32 len_diff0, len_diff1;
+ u8 error0, good_udp0, proto0;
+ u8 error1, good_udp1, proto1;
+
+ /* Prefetch next iteration. */
+ {
+ vlib_buffer_t * p2, * p3;
+
+ p2 = vlib_get_buffer (vm, from[2]);
+ p3 = vlib_get_buffer (vm, from[3]);
+
+ vlib_prefetch_buffer_header (p2, LOAD);
+ vlib_prefetch_buffer_header (p3, LOAD);
+
+ CLIB_PREFETCH (p2->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
+ CLIB_PREFETCH (p3->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
+ }
+
+ bi0 = to_next[0] = from[0];
+ bi1 = to_next[1] = from[1];
+ from += 2;
+ n_left_from -= 2;
+ to_next += 2;
+ n_left_to_next -= 2;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ b1 = vlib_get_buffer (vm, bi1);
+ ip0 = vlib_buffer_get_current (b0);
+ ip1 = vlib_buffer_get_current (b1);
+
+ /* Setup packet for next IP feature */
+ vnet_feature_next(vnet_buffer(b0)->sw_if_index[VLIB_RX], &next0, b0);
+ vnet_feature_next(vnet_buffer(b1)->sw_if_index[VLIB_RX], &next1, b1);
+
+ /* Treat IP frag packets as "experimental" protocol for now
+ until support of IP frag reassembly is implemented */
+ proto0 = ip4_is_fragment(ip0) ? 0xfe : ip0->protocol;
+ proto1 = ip4_is_fragment(ip1) ? 0xfe : ip1->protocol;
+
+ /* Process packet 0 */
+ if (proto0 != IP_PROTOCOL_UDP)
+ goto exit0; /* not UDP packet */
+
+ udp0 = ip4_next_header (ip0);
+ if (udp0->dst_port != clib_host_to_net_u16 (UDP_DST_PORT_vxlan))
+ goto exit0; /* not VXLAN packet */
+
+ if (is_ip4)
+ {
+ if (addr4.as_u32 != ip0->dst_address.as_u32)
+ {
+ if (!hash_get (vxm->vtep4, ip0->dst_address.as_u32))
+ goto exit0; /* no local VTEP for VXLAN packet */
+ addr4 = ip0->dst_address;
+ }
+ }
+ else goto exit0; /* IP6 VXLAN bypass not yet supported */
+
+ /* vxlan-input node expect current at VXLAN header */
+ vlib_buffer_advance (b0, sizeof(ip4_header_t)+sizeof(udp_header_t));
+
+ flags0 = b0->flags;
+ good_udp0 = (flags0 & IP_BUFFER_L4_CHECKSUM_CORRECT) != 0;
+
+ /* Don't verify UDP checksum for packets with explicit zero checksum. */
+ good_udp0 |= udp0->checksum == 0;
+
+ /* Verify UDP length */
+ ip_len0 = clib_net_to_host_u16 (ip0->length);
+ udp_len0 = clib_net_to_host_u16 (udp0->length);
+
+ len_diff0 = ip_len0 - udp_len0;
+
+ /* Verify UDP checksum */
+ if (PREDICT_FALSE (!good_udp0))
+ {
+ if (!(flags0 & IP_BUFFER_L4_CHECKSUM_COMPUTED))
+ flags0 = ip4_tcp_udp_validate_checksum (vm, b0);
+ good_udp0 =
+ (flags0 & IP_BUFFER_L4_CHECKSUM_CORRECT) != 0;
+ }
+
+ error0 = good_udp0 ? 0 : IP4_ERROR_UDP_CHECKSUM;
+ error0 = (len_diff0 >= 0) ? error0 : IP4_ERROR_UDP_LENGTH;
+
+ next0 = error0 ?
+ IP_VXLAN_BYPASS_NEXT_DROP : IP_VXLAN_BYPASS_NEXT_VXLAN;
+ b0->error = error0 ? error_node->errors[error0] : 0;
+
+ exit0:
+ /* Process packet 1 */
+ if (proto1 != IP_PROTOCOL_UDP)
+ goto exit1; /* not UDP packet */
+
+ udp1 = ip4_next_header (ip1);
+ if (udp1->dst_port != clib_host_to_net_u16 (UDP_DST_PORT_vxlan))
+ goto exit1; /* not VXLAN packet */
+
+ if (is_ip4)
+ {
+ if (addr4.as_u32 != ip1->dst_address.as_u32)
+ {
+ if (!hash_get (vxm->vtep4, ip1->dst_address.as_u32))
+ goto exit1; /* no local VTEP for VXLAN packet */
+ addr4 = ip1->dst_address;
+ }
+ }
+ else goto exit1; /* IP6 VXLAN bypass not yet supported */
+
+ /* vxlan-input node expect current at VXLAN header */
+ vlib_buffer_advance (b1, sizeof(ip4_header_t)+sizeof(udp_header_t));
+
+ flags1 = b1->flags;
+ good_udp1 = (flags1 & IP_BUFFER_L4_CHECKSUM_CORRECT) != 0;
+
+ /* Don't verify UDP checksum for packets with explicit zero checksum. */
+ good_udp1 |= udp1->checksum == 0;
+
+ /* Verify UDP length */
+ ip_len1 = clib_net_to_host_u16 (ip1->length);
+ udp_len1 = clib_net_to_host_u16 (udp1->length);
+
+ len_diff1 = ip_len1 - udp_len1;
+
+ /* Verify UDP checksum */
+ if (PREDICT_FALSE (!good_udp1))
+ {
+ if (!(flags1 & IP_BUFFER_L4_CHECKSUM_COMPUTED))
+ flags1 = ip4_tcp_udp_validate_checksum (vm, b1);
+ good_udp1 =
+ (flags1 & IP_BUFFER_L4_CHECKSUM_CORRECT) != 0;
+ }
+
+ error1 = good_udp1 ? 0 : IP4_ERROR_UDP_CHECKSUM;
+ error1 = (len_diff1 >= 0) ? error1 : IP4_ERROR_UDP_LENGTH;
+
+ next1 = error1 ?
+ IP_VXLAN_BYPASS_NEXT_DROP : IP_VXLAN_BYPASS_NEXT_VXLAN;
+ b1->error = error1 ? error_node->errors[error1] : 0;
+
+ exit1:
+ vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, bi1, next0, next1);
+ }
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ vlib_buffer_t * b0;
+ ip4_header_t * ip0;
+ udp_header_t * udp0;
+ u32 bi0, ip_len0, udp_len0, flags0, next0;
+ i32 len_diff0;
+ u8 error0, good_udp0, proto0;
+
+ bi0 = to_next[0] = from[0];
+ from += 1;
+ n_left_from -= 1;
+ to_next += 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ ip0 = vlib_buffer_get_current (b0);
+
+ /* Setup packet for next IP feature */
+ vnet_feature_next(vnet_buffer(b0)->sw_if_index[VLIB_RX], &next0, b0);
+
+ /* Treat IP frag packets as "experimental" protocol for now
+ until support of IP frag reassembly is implemented */
+ proto0 = ip4_is_fragment(ip0) ? 0xfe : ip0->protocol;
+
+ if (proto0 != IP_PROTOCOL_UDP)
+ goto exit; /* not UDP packet */
+
+ udp0 = ip4_next_header (ip0);
+ if (udp0->dst_port != clib_host_to_net_u16 (UDP_DST_PORT_vxlan))
+ goto exit; /* not VXLAN packet */
+
+ if (is_ip4)
+ {
+ if (addr4.as_u32 != ip0->dst_address.as_u32)
+ {
+ if (!hash_get (vxm->vtep4, ip0->dst_address.as_u32))
+ goto exit; /* no local VTEP for VXLAN packet */
+ addr4 = ip0->dst_address;
+ }
+ }
+ else goto exit; /* IP6 VXLAN bypass not yet supported */
+
+ /* vxlan-input node expect current at VXLAN header */
+ vlib_buffer_advance (b0, sizeof(ip4_header_t)+sizeof(udp_header_t));
+
+ flags0 = b0->flags;
+ good_udp0 = (flags0 & IP_BUFFER_L4_CHECKSUM_CORRECT) != 0;
+
+ /* Don't verify UDP checksum for packets with explicit zero checksum. */
+ good_udp0 |= udp0->checksum == 0;
+
+ /* Verify UDP length */
+ ip_len0 = clib_net_to_host_u16 (ip0->length);
+ udp_len0 = clib_net_to_host_u16 (udp0->length);
+
+ len_diff0 = ip_len0 - udp_len0;
+
+ /* Verify UDP checksum */
+ if (PREDICT_FALSE (!good_udp0))
+ {
+ if (!(flags0 & IP_BUFFER_L4_CHECKSUM_COMPUTED))
+ flags0 = ip4_tcp_udp_validate_checksum (vm, b0);
+ good_udp0 =
+ (flags0 & IP_BUFFER_L4_CHECKSUM_CORRECT) != 0;
+ }
+
+ error0 = good_udp0 ? 0 : IP4_ERROR_UDP_CHECKSUM;
+ error0 = (len_diff0 >= 0) ? error0 : IP4_ERROR_UDP_LENGTH;
+
+ next0 = error0 ?
+ IP_VXLAN_BYPASS_NEXT_DROP : IP_VXLAN_BYPASS_NEXT_VXLAN;
+ b0->error = error0 ? error_node->errors[error0] : 0;
+
+ exit:
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, next0);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ return frame->n_vectors;
+}
+
+static uword
+ip4_vxlan_bypass (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ return ip_vxlan_bypass_inline (vm, node, frame, /* is_ip4 */ 1);
+}
+
+VLIB_REGISTER_NODE (ip4_vxlan_bypass_node) = {
+ .function = ip4_vxlan_bypass,
+ .name = "ip4-vxlan-bypass",
+ .vector_size = sizeof (u32),
+
+ .n_next_nodes = IP_VXLAN_BYPASS_N_NEXT,
+ .next_nodes = {
+ [IP_VXLAN_BYPASS_NEXT_DROP] = "error-drop",
+ [IP_VXLAN_BYPASS_NEXT_VXLAN] = "vxlan4-input",
+ },
+
+ .format_buffer = format_ip4_header,
+ .format_trace = format_ip4_forward_next_trace,
+};
+
+VLIB_NODE_FUNCTION_MULTIARCH (ip4_vxlan_bypass_node,ip4_vxlan_bypass)
+
+
+static clib_error_t *
+set_ip_vxlan_bypass (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ unformat_input_t _line_input, * line_input = &_line_input;
+ vnet_main_t * vnm = vnet_get_main();
+ clib_error_t * error = 0;
+ u32 sw_if_index, is_del;
+
+ sw_if_index = ~0;
+ is_del = 0;
+
+ if (! unformat_user (input, unformat_line_input, line_input))
+ return 0;
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat_user (line_input, unformat_vnet_sw_interface, vnm, &sw_if_index))
+ ;
+ else if (unformat (line_input, "del"))
+ is_del = 1;
+ else
+ {
+ error = unformat_parse_error (line_input);
+ goto done;
+ }
+ }
+
+ if (~0 == sw_if_index)
+ {
+ error = clib_error_return (0, "unknown interface `%U'",
+ format_unformat_error, line_input);
+ goto done;
+ }
+
+ vnet_feature_enable_disable ("ip4-unicast", "ip4-vxlan-bypass", sw_if_index,
+ is_del == 0, 0, 0);
+ done:
+ return error;
+}
+
+/*?
+ * This command adds the 'ip4-vxlan-bypass' graph node for a given interface.
+ * By adding the IPv4 vxlan-bypass graph node to an interface, the node checks
+ * for and validate input vxlan packet and bypass ip4-lookup, ip4-local,
+ * ip4-udp-lookup nodes to speedup vxlan packet forwarding. This node will
+ * cause extra overhead to for non-vxlan packets which is kept at a minimum.
+ *
+ * @cliexpar
+ * @parblock
+ * Example of graph node before ip4-vxlan-bypass is enabled:
+ * @cliexstart{show vlib graph ip4-vxlan-bypass}
+ * Name Next Previous
+ * ip4-vxlan-bypass error-drop [0]
+ * vxlan4-input [1]
+ * ip4-lookup [2]
+ * @cliexend
+ *
+ * Example of how to enable ip4-vxlan-bypass on an interface:
+ * @cliexcmd{set interface ip vxlan-bypass GigabitEthernet2/0/0}
+ *
+ * Example of graph node after ip4-vxlan-bypass is enabled:
+ * @cliexstart{show vlib graph ip4-vxlan-bypass}
+ * Name Next Previous
+ * ip4-vxlan-bypass error-drop [0] ip4-input
+ * vxlan4-input [1] ip4-input-no-checksum
+ * ip4-lookup [2]
+ * @cliexend
+ *
+ * Example of how to display the feature enabed on an interface:
+ * @cliexstart{show ip interface features GigabitEthernet2/0/0}
+ * IP feature paths configured on GigabitEthernet2/0/0...
+ *
+ * ipv4 unicast:
+ * ip4-vxlan-bypass
+ * ip4-lookup
+ *
+ * ipv4 multicast:
+ * ip4-lookup-multicast
+ *
+ * ipv4 multicast:
+ * interface-output
+ *
+ * ipv6 unicast:
+ * ip6-lookup
+ *
+ * ipv6 multicast:
+ * ip6-lookup
+ *
+ * ipv6 multicast:
+ * interface-output
+ * @cliexend
+ *
+ * Example of how to disable unicast source checking on an interface:
+ * @cliexcmd{set interface ip vxlan-bypass GigabitEthernet2/0/0 del}
+ * @endparblock
+?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (set_interface_ip_vxlan_bypass_command, static) = {
+ .path = "set interface ip vxlan-bypass",
+ .function = set_ip_vxlan_bypass,
+ .short_help = "set interface ip vxlan-bypass <interface> [del]",
+};
+
+/* Dummy init function to get us linked in. */
+clib_error_t * ip4_vxlan_bypass_init (vlib_main_t * vm)
+{ return 0; }
+
+VLIB_INIT_FUNCTION (ip4_vxlan_bypass_init);
diff --git a/src/vnet/vxlan/dir.dox b/src/vnet/vxlan/dir.dox
new file mode 100644
index 00000000000..31a9e2b6112
--- /dev/null
+++ b/src/vnet/vxlan/dir.dox
@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+@dir
+@brief VXLAN Code.
+
+This directory contains source code to support VXLAN.
+
+*/
+/*? %%clicmd:group_label VXLAN CLI %% ?*/
diff --git a/src/vnet/vxlan/encap.c b/src/vnet/vxlan/encap.c
new file mode 100644
index 00000000000..5b63064a848
--- /dev/null
+++ b/src/vnet/vxlan/encap.c
@@ -0,0 +1,553 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <vppinfra/error.h>
+#include <vppinfra/hash.h>
+#include <vnet/vnet.h>
+#include <vnet/ip/ip.h>
+#include <vnet/ethernet/ethernet.h>
+#include <vnet/vxlan/vxlan.h>
+
+/* Statistics (not all errors) */
+#define foreach_vxlan_encap_error \
+_(ENCAPSULATED, "good packets encapsulated")
+
+static char * vxlan_encap_error_strings[] = {
+#define _(sym,string) string,
+ foreach_vxlan_encap_error
+#undef _
+};
+
+typedef enum {
+#define _(sym,str) VXLAN_ENCAP_ERROR_##sym,
+ foreach_vxlan_encap_error
+#undef _
+ VXLAN_ENCAP_N_ERROR,
+} vxlan_encap_error_t;
+
+typedef enum {
+ VXLAN_ENCAP_NEXT_DROP,
+ VXLAN_ENCAP_N_NEXT,
+} vxlan_encap_next_t;
+
+typedef struct {
+ u32 tunnel_index;
+ u32 vni;
+} vxlan_encap_trace_t;
+
+u8 * format_vxlan_encap_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ vxlan_encap_trace_t * t
+ = va_arg (*args, vxlan_encap_trace_t *);
+
+ s = format (s, "VXLAN encap to vxlan_tunnel%d vni %d",
+ t->tunnel_index, t->vni);
+ return s;
+}
+
+
+#define foreach_fixed_header4_offset \
+ _(0) _(1) _(2) _(3)
+
+#define foreach_fixed_header6_offset \
+ _(0) _(1) _(2) _(3) _(4) _(5) _(6)
+
+always_inline uword
+vxlan_encap_inline (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame,
+ u32 is_ip4)
+{
+ u32 n_left_from, next_index, * from, * to_next;
+ vxlan_main_t * vxm = &vxlan_main;
+ vnet_main_t * vnm = vxm->vnet_main;
+ vnet_interface_main_t * im = &vnm->interface_main;
+ u32 pkts_encapsulated = 0;
+ u16 old_l0 = 0, old_l1 = 0;
+ u32 cpu_index = os_get_cpu_number();
+ u32 stats_sw_if_index, stats_n_packets, stats_n_bytes;
+ u32 sw_if_index0 = 0, sw_if_index1 = 0;
+ u32 next0 = 0, next1 = 0;
+ vnet_hw_interface_t * hi0, * hi1;
+ vxlan_tunnel_t * t0 = NULL, * t1 = NULL;
+
+ from = vlib_frame_vector_args (from_frame);
+ n_left_from = from_frame->n_vectors;
+
+ next_index = node->cached_next_index;
+ stats_sw_if_index = node->runtime_data[0];
+ stats_n_packets = stats_n_bytes = 0;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame (vm, node, next_index,
+ to_next, n_left_to_next);
+
+ while (n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ u32 bi0, bi1;
+ vlib_buffer_t * b0, * b1;
+ u32 flow_hash0, flow_hash1;
+ u32 len0, len1;
+ ip4_header_t * ip4_0, * ip4_1;
+ ip6_header_t * ip6_0, * ip6_1;
+ udp_header_t * udp0, * udp1;
+ u64 * copy_src0, * copy_dst0;
+ u64 * copy_src1, * copy_dst1;
+ u32 * copy_src_last0, * copy_dst_last0;
+ u32 * copy_src_last1, * copy_dst_last1;
+ u16 new_l0, new_l1;
+ ip_csum_t sum0, sum1;
+
+ /* Prefetch next iteration. */
+ {
+ vlib_buffer_t * p2, * p3;
+
+ p2 = vlib_get_buffer (vm, from[2]);
+ p3 = vlib_get_buffer (vm, from[3]);
+
+ vlib_prefetch_buffer_header (p2, LOAD);
+ vlib_prefetch_buffer_header (p3, LOAD);
+
+ CLIB_PREFETCH (p2->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
+ CLIB_PREFETCH (p3->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
+ }
+
+ bi0 = from[0];
+ bi1 = from[1];
+ to_next[0] = bi0;
+ to_next[1] = bi1;
+ from += 2;
+ to_next += 2;
+ n_left_to_next -= 2;
+ n_left_from -= 2;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ b1 = vlib_get_buffer (vm, bi1);
+
+ flow_hash0 = vnet_l2_compute_flow_hash (b0);
+ flow_hash1 = vnet_l2_compute_flow_hash (b1);
+
+ /* Get next node index and adj index from tunnel next_dpo */
+ if (sw_if_index0 != vnet_buffer(b0)->sw_if_index[VLIB_TX])
+ {
+ sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_TX];
+ hi0 = vnet_get_sup_hw_interface (vnm, sw_if_index0);
+ t0 = &vxm->tunnels[hi0->dev_instance];
+ /* Note: change to always set next0 if it may be set to drop */
+ next0 = t0->next_dpo.dpoi_next_node;
+ }
+ vnet_buffer(b0)->ip.adj_index[VLIB_TX] = t0->next_dpo.dpoi_index;
+
+ /* Get next node index and adj index from tunnel next_dpo */
+ if (sw_if_index1 != vnet_buffer(b1)->sw_if_index[VLIB_TX])
+ {
+ sw_if_index1 = vnet_buffer(b1)->sw_if_index[VLIB_TX];
+ hi1 = vnet_get_sup_hw_interface (vnm, sw_if_index1);
+ t1 = &vxm->tunnels[hi1->dev_instance];
+ /* Note: change to always set next1 if it may be set to drop */
+ next1 = t1->next_dpo.dpoi_next_node;
+ }
+ vnet_buffer(b1)->ip.adj_index[VLIB_TX] = t1->next_dpo.dpoi_index;
+
+ /* Apply the rewrite string. $$$$ vnet_rewrite? */
+ vlib_buffer_advance (b0, -(word)_vec_len(t0->rewrite));
+ vlib_buffer_advance (b1, -(word)_vec_len(t1->rewrite));
+
+ if (is_ip4)
+ {
+ /* IP4 VXLAN header should be 36 octects */
+ ASSERT(sizeof(ip4_vxlan_header_t) == 36);
+ ASSERT(vec_len(t0->rewrite) == sizeof(ip4_vxlan_header_t));
+ ASSERT(vec_len(t1->rewrite) == sizeof(ip4_vxlan_header_t));
+
+ ip4_0 = vlib_buffer_get_current(b0);
+ ip4_1 = vlib_buffer_get_current(b1);
+
+ /* Copy the fixed header */
+ copy_dst0 = (u64 *) ip4_0;
+ copy_src0 = (u64 *) t0->rewrite;
+ copy_dst1 = (u64 *) ip4_1;
+ copy_src1 = (u64 *) t1->rewrite;
+ /* Copy first 32 octets 8-bytes at a time */
+#define _(offs) copy_dst0[offs] = copy_src0[offs];
+ foreach_fixed_header4_offset;
+#undef _
+#define _(offs) copy_dst1[offs] = copy_src1[offs];
+ foreach_fixed_header4_offset;
+#undef _
+ /* Last 4 octets. Hopefully gcc will be our friend */
+ copy_dst_last0 = (u32 *)(&copy_dst0[4]);
+ copy_src_last0 = (u32 *)(&copy_src0[4]);
+ copy_dst_last0[0] = copy_src_last0[0];
+ copy_dst_last1 = (u32 *)(&copy_dst1[4]);
+ copy_src_last1 = (u32 *)(&copy_src1[4]);
+ copy_dst_last1[0] = copy_src_last1[0];
+
+ /* Fix the IP4 checksum and length */
+ sum0 = ip4_0->checksum;
+ new_l0 = /* old_l0 always 0, see the rewrite setup */
+ clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0));
+ sum0 = ip_csum_update (sum0, old_l0, new_l0, ip4_header_t,
+ length /* changed member */);
+ ip4_0->checksum = ip_csum_fold (sum0);
+ ip4_0->length = new_l0;
+ sum1 = ip4_1->checksum;
+ new_l1 = /* old_l1 always 0, see the rewrite setup */
+ clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b1));
+ sum1 = ip_csum_update (sum1, old_l1, new_l1, ip4_header_t,
+ length /* changed member */);
+ ip4_1->checksum = ip_csum_fold (sum1);
+ ip4_1->length = new_l1;
+
+ /* Fix UDP length and set source port */
+ udp0 = (udp_header_t *)(ip4_0+1);
+ new_l0 = clib_host_to_net_u16 (vlib_buffer_length_in_chain(vm, b0)
+ - sizeof (*ip4_0));
+ udp0->length = new_l0;
+ udp0->src_port = flow_hash0;
+ udp1 = (udp_header_t *)(ip4_1+1);
+ new_l1 = clib_host_to_net_u16 (vlib_buffer_length_in_chain(vm, b1)
+ - sizeof (*ip4_1));
+ udp1->length = new_l1;
+ udp1->src_port = flow_hash1;
+ }
+ else /* ipv6 */
+ {
+ int bogus = 0;
+
+ /* IP6 VXLAN header should be 56 octects */
+ ASSERT(sizeof(ip6_vxlan_header_t) == 56);
+ ASSERT(vec_len(t0->rewrite) == sizeof(ip6_vxlan_header_t));
+ ASSERT(vec_len(t1->rewrite) == sizeof(ip6_vxlan_header_t));
+ ip6_0 = vlib_buffer_get_current(b0);
+ ip6_1 = vlib_buffer_get_current(b1);
+
+ /* Copy the fixed header */
+ copy_dst0 = (u64 *) ip6_0;
+ copy_src0 = (u64 *) t0->rewrite;
+ copy_dst1 = (u64 *) ip6_1;
+ copy_src1 = (u64 *) t1->rewrite;
+ /* Copy first 56 (ip6) octets 8-bytes at a time */
+#define _(offs) copy_dst0[offs] = copy_src0[offs];
+ foreach_fixed_header6_offset;
+#undef _
+#define _(offs) copy_dst1[offs] = copy_src1[offs];
+ foreach_fixed_header6_offset;
+#undef _
+ /* Fix IP6 payload length */
+ new_l0 =
+ clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0)
+ - sizeof(*ip6_0));
+ ip6_0->payload_length = new_l0;
+ new_l1 =
+ clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b1)
+ - sizeof(*ip6_1));
+ ip6_1->payload_length = new_l1;
+
+ /* Fix UDP length and set source port */
+ udp0 = (udp_header_t *)(ip6_0+1);
+ udp0->length = new_l0;
+ udp0->src_port = flow_hash0;
+ udp1 = (udp_header_t *)(ip6_1+1);
+ udp1->length = new_l1;
+ udp1->src_port = flow_hash1;
+
+ /* IPv6 UDP checksum is mandatory */
+ udp0->checksum = ip6_tcp_udp_icmp_compute_checksum(vm, b0,
+ ip6_0, &bogus);
+ ASSERT(bogus == 0);
+ if (udp0->checksum == 0)
+ udp0->checksum = 0xffff;
+ udp1->checksum = ip6_tcp_udp_icmp_compute_checksum(vm, b1,
+ ip6_1, &bogus);
+ ASSERT(bogus == 0);
+ if (udp1->checksum == 0)
+ udp1->checksum = 0xffff;
+ }
+
+ pkts_encapsulated += 2;
+ len0 = vlib_buffer_length_in_chain (vm, b0);
+ len1 = vlib_buffer_length_in_chain (vm, b1);
+ stats_n_packets += 2;
+ stats_n_bytes += len0 + len1;
+
+ /* Batch stats increment on the same vxlan tunnel so counter is not
+ incremented per packet. Note stats are still incremented for deleted
+ and admin-down tunnel where packets are dropped. It is not worthwhile
+ to check for this rare case and affect normal path performance. */
+ if (PREDICT_FALSE ((sw_if_index0 != stats_sw_if_index) ||
+ (sw_if_index1 != stats_sw_if_index)))
+ {
+ stats_n_packets -= 2;
+ stats_n_bytes -= len0 + len1;
+ if (sw_if_index0 == sw_if_index1)
+ {
+ if (stats_n_packets)
+ vlib_increment_combined_counter
+ (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
+ cpu_index, stats_sw_if_index,
+ stats_n_packets, stats_n_bytes);
+ stats_sw_if_index = sw_if_index0;
+ stats_n_packets = 2;
+ stats_n_bytes = len0 + len1;
+ }
+ else
+ {
+ vlib_increment_combined_counter
+ (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
+ cpu_index, sw_if_index0, 1, len0);
+ vlib_increment_combined_counter
+ (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
+ cpu_index, sw_if_index1, 1, len1);
+ }
+ }
+
+ if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ vxlan_encap_trace_t *tr =
+ vlib_add_trace (vm, node, b0, sizeof (*tr));
+ tr->tunnel_index = t0 - vxm->tunnels;
+ tr->vni = t0->vni;
+ }
+
+ if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ vxlan_encap_trace_t *tr =
+ vlib_add_trace (vm, node, b1, sizeof (*tr));
+ tr->tunnel_index = t1 - vxm->tunnels;
+ tr->vni = t1->vni;
+ }
+
+ vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, bi1, next0, next1);
+ }
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 bi0;
+ vlib_buffer_t * b0;
+ u32 flow_hash0;
+ u32 len0;
+ ip4_header_t * ip4_0;
+ ip6_header_t * ip6_0;
+ udp_header_t * udp0;
+ u64 * copy_src0, * copy_dst0;
+ u32 * copy_src_last0, * copy_dst_last0;
+ u16 new_l0;
+ ip_csum_t sum0;
+
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+
+ flow_hash0 = vnet_l2_compute_flow_hash(b0);
+
+ /* Get next node index and adj index from tunnel next_dpo */
+ if (sw_if_index0 != vnet_buffer(b0)->sw_if_index[VLIB_TX])
+ {
+ sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_TX];
+ hi0 = vnet_get_sup_hw_interface (vnm, sw_if_index0);
+ t0 = &vxm->tunnels[hi0->dev_instance];
+ /* Note: change to always set next0 if it may be set to drop */
+ next0 = t0->next_dpo.dpoi_next_node;
+ }
+ vnet_buffer(b0)->ip.adj_index[VLIB_TX] = t0->next_dpo.dpoi_index;
+
+ /* Apply the rewrite string. $$$$ vnet_rewrite? */
+ vlib_buffer_advance (b0, -(word)_vec_len(t0->rewrite));
+
+ if (is_ip4)
+ {
+ /* IP4 VXLAN header should be 36 octects */
+ ASSERT(sizeof(ip4_vxlan_header_t) == 36);
+ ASSERT(vec_len(t0->rewrite) == sizeof(ip4_vxlan_header_t));
+ ip4_0 = vlib_buffer_get_current(b0);
+
+ /* Copy the fixed header */
+ copy_dst0 = (u64 *) ip4_0;
+ copy_src0 = (u64 *) t0->rewrite;
+ /* Copy first 32 octets 8-bytes at a time */
+#define _(offs) copy_dst0[offs] = copy_src0[offs];
+ foreach_fixed_header4_offset;
+#undef _
+ /* Last 4 octets. Hopefully gcc will be our friend */
+ copy_dst_last0 = (u32 *)(&copy_dst0[4]);
+ copy_src_last0 = (u32 *)(&copy_src0[4]);
+ copy_dst_last0[0] = copy_src_last0[0];
+
+ /* Fix the IP4 checksum and length */
+ sum0 = ip4_0->checksum;
+ new_l0 = /* old_l0 always 0, see the rewrite setup */
+ clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0));
+ sum0 = ip_csum_update (sum0, old_l0, new_l0, ip4_header_t,
+ length /* changed member */);
+ ip4_0->checksum = ip_csum_fold (sum0);
+ ip4_0->length = new_l0;
+
+ /* Fix UDP length and set source port */
+ udp0 = (udp_header_t *)(ip4_0+1);
+ new_l0 = clib_host_to_net_u16 (vlib_buffer_length_in_chain(vm, b0)
+ - sizeof (*ip4_0));
+ udp0->length = new_l0;
+ udp0->src_port = flow_hash0;
+ }
+
+ else /* ip6 path */
+ {
+ int bogus = 0;
+
+ /* IP6 VXLAN header should be 56 octects */
+ ASSERT(sizeof(ip6_vxlan_header_t) == 56);
+ ASSERT(vec_len(t0->rewrite) == sizeof(ip6_vxlan_header_t));
+ ip6_0 = vlib_buffer_get_current(b0);
+ /* Copy the fixed header */
+ copy_dst0 = (u64 *) ip6_0;
+ copy_src0 = (u64 *) t0->rewrite;
+ /* Copy first 56 (ip6) octets 8-bytes at a time */
+#define _(offs) copy_dst0[offs] = copy_src0[offs];
+ foreach_fixed_header6_offset;
+#undef _
+ /* Fix IP6 payload length */
+ new_l0 =
+ clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0)
+ - sizeof(*ip6_0));
+ ip6_0->payload_length = new_l0;
+
+ /* Fix UDP length and set source port */
+ udp0 = (udp_header_t *)(ip6_0+1);
+ udp0->length = new_l0;
+ udp0->src_port = flow_hash0;
+
+ /* IPv6 UDP checksum is mandatory */
+ udp0->checksum = ip6_tcp_udp_icmp_compute_checksum(vm, b0,
+ ip6_0, &bogus);
+ ASSERT(bogus == 0);
+ if (udp0->checksum == 0)
+ udp0->checksum = 0xffff;
+ }
+
+ pkts_encapsulated ++;
+ len0 = vlib_buffer_length_in_chain (vm, b0);
+ stats_n_packets += 1;
+ stats_n_bytes += len0;
+
+ /* Batch stats increment on the same vxlan tunnel so counter is not
+ incremented per packet. Note stats are still incremented for deleted
+ and admin-down tunnel where packets are dropped. It is not worthwhile
+ to check for this rare case and affect normal path performance. */
+ if (PREDICT_FALSE (sw_if_index0 != stats_sw_if_index))
+ {
+ stats_n_packets -= 1;
+ stats_n_bytes -= len0;
+ if (stats_n_packets)
+ vlib_increment_combined_counter
+ (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
+ cpu_index, stats_sw_if_index,
+ stats_n_packets, stats_n_bytes);
+ stats_n_packets = 1;
+ stats_n_bytes = len0;
+ stats_sw_if_index = sw_if_index0;
+ }
+
+ if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ vxlan_encap_trace_t *tr =
+ vlib_add_trace (vm, node, b0, sizeof (*tr));
+ tr->tunnel_index = t0 - vxm->tunnels;
+ tr->vni = t0->vni;
+ }
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, next0);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ /* Do we still need this now that tunnel tx stats is kept? */
+ vlib_node_increment_counter (vm, node->node_index,
+ VXLAN_ENCAP_ERROR_ENCAPSULATED,
+ pkts_encapsulated);
+
+ /* Increment any remaining batch stats */
+ if (stats_n_packets)
+ {
+ vlib_increment_combined_counter
+ (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
+ cpu_index, stats_sw_if_index, stats_n_packets, stats_n_bytes);
+ node->runtime_data[0] = stats_sw_if_index;
+ }
+
+ return from_frame->n_vectors;
+}
+
+static uword
+vxlan4_encap (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame)
+{
+ return vxlan_encap_inline (vm, node, from_frame, /* is_ip4 */ 1);
+}
+
+static uword
+vxlan6_encap (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame)
+{
+ return vxlan_encap_inline (vm, node, from_frame, /* is_ip4 */ 0);
+}
+
+VLIB_REGISTER_NODE (vxlan4_encap_node) = {
+ .function = vxlan4_encap,
+ .name = "vxlan4-encap",
+ .vector_size = sizeof (u32),
+ .format_trace = format_vxlan_encap_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+ .n_errors = ARRAY_LEN(vxlan_encap_error_strings),
+ .error_strings = vxlan_encap_error_strings,
+ .n_next_nodes = VXLAN_ENCAP_N_NEXT,
+ .next_nodes = {
+ [VXLAN_ENCAP_NEXT_DROP] = "error-drop",
+ },
+};
+
+VLIB_NODE_FUNCTION_MULTIARCH (vxlan4_encap_node, vxlan4_encap)
+
+VLIB_REGISTER_NODE (vxlan6_encap_node) = {
+ .function = vxlan6_encap,
+ .name = "vxlan6-encap",
+ .vector_size = sizeof (u32),
+ .format_trace = format_vxlan_encap_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+ .n_errors = ARRAY_LEN(vxlan_encap_error_strings),
+ .error_strings = vxlan_encap_error_strings,
+ .n_next_nodes = VXLAN_ENCAP_N_NEXT,
+ .next_nodes = {
+ [VXLAN_ENCAP_NEXT_DROP] = "error-drop",
+ },
+};
+
+VLIB_NODE_FUNCTION_MULTIARCH (vxlan6_encap_node, vxlan6_encap)
+
diff --git a/src/vnet/vxlan/vxlan.api b/src/vnet/vxlan/vxlan.api
new file mode 100644
index 00000000000..048220fba52
--- /dev/null
+++ b/src/vnet/vxlan/vxlan.api
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2015-2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+ define vxlan_add_del_tunnel
+{
+ u32 client_index;
+ u32 context;
+ u8 is_add;
+ u8 is_ipv6;
+ u8 src_address[16];
+ u8 dst_address[16];
+ u32 mcast_sw_if_index;
+ u32 encap_vrf_id;
+ u32 decap_next_index;
+ u32 vni;
+};
+
+define vxlan_add_del_tunnel_reply
+{
+ u32 context;
+ i32 retval;
+ u32 sw_if_index;
+};
+
+define vxlan_tunnel_dump
+{
+ u32 client_index;
+ u32 context;
+ u32 sw_if_index;
+};
+
+define vxlan_tunnel_details
+{
+ u32 context;
+ u32 sw_if_index;
+ u8 src_address[16];
+ u8 dst_address[16];
+ u32 mcast_sw_if_index;
+ u32 encap_vrf_id;
+ u32 decap_next_index;
+ u32 vni;
+ u8 is_ipv6;
+};
+
+/** \brief Interface set vxlan-bypass request
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param sw_if_index - interface used to reach neighbor
+ @param is_ipv6 - if non-zero, enable ipv6-vxlan-bypass, else ipv4-vxlan-bypass
+ @param enable - if non-zero enable, else disable
+*/
+define sw_interface_set_vxlan_bypass
+{
+ u32 client_index;
+ u32 context;
+ u32 sw_if_index;
+ u8 is_ipv6;
+ u8 enable;
+};
+
+/** \brief Interface set vxlan-bypass response
+ @param context - sender context, to match reply w/ request
+ @param retval - return code for the request
+*/
+define sw_interface_set_vxlan_bypass_reply
+{
+ u32 context;
+ i32 retval;
+}; \ No newline at end of file
diff --git a/src/vnet/vxlan/vxlan.c b/src/vnet/vxlan/vxlan.c
new file mode 100644
index 00000000000..abf0182f91f
--- /dev/null
+++ b/src/vnet/vxlan/vxlan.c
@@ -0,0 +1,899 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <vnet/vxlan/vxlan.h>
+#include <vnet/ip/format.h>
+#include <vnet/fib/fib_entry.h>
+#include <vnet/fib/fib_table.h>
+#include <vnet/dpo/receive_dpo.h>
+#include <vlib/vlib.h>
+
+/**
+ * @file
+ * @brief VXLAN.
+ *
+ * VXLAN provides the features needed to allow L2 bridge domains (BDs)
+ * to span multiple servers. This is done by building an L2 overlay on
+ * top of an L3 network underlay using VXLAN tunnels.
+ *
+ * This makes it possible for servers to be co-located in the same data
+ * center or be separated geographically as long as they are reachable
+ * through the underlay L3 network.
+ *
+ * You can refer to this kind of L2 overlay bridge domain as a VXLAN
+ * (Virtual eXtensible VLAN) segment.
+ */
+
+
+vxlan_main_t vxlan_main;
+
+static u8 * format_decap_next (u8 * s, va_list * args)
+{
+ u32 next_index = va_arg (*args, u32);
+
+ switch (next_index)
+ {
+ case VXLAN_INPUT_NEXT_DROP:
+ return format (s, "drop");
+ case VXLAN_INPUT_NEXT_L2_INPUT:
+ return format (s, "l2");
+ default:
+ return format (s, "index %d", next_index);
+ }
+ return s;
+}
+
+u8 * format_vxlan_tunnel (u8 * s, va_list * args)
+{
+ vxlan_tunnel_t * t = va_arg (*args, vxlan_tunnel_t *);
+ vxlan_main_t * ngm = &vxlan_main;
+
+ s = format (s, "[%d] src %U dst %U vni %d sw_if_index %d ",
+ t - ngm->tunnels,
+ format_ip46_address, &t->src, IP46_TYPE_ANY,
+ format_ip46_address, &t->dst, IP46_TYPE_ANY,
+ t->vni, t->sw_if_index);
+
+ if (ip46_address_is_multicast (&t->dst))
+ s = format (s, "mcast_sw_if_index %d ", t->mcast_sw_if_index);
+
+ s = format (s, "encap_fib_index %d fib_entry_index %d decap_next %U\n",
+ t->encap_fib_index, t->fib_entry_index,
+ format_decap_next, t->decap_next_index);
+ return s;
+}
+
+static u8 * format_vxlan_name (u8 * s, va_list * args)
+{
+ u32 dev_instance = va_arg (*args, u32);
+ return format (s, "vxlan_tunnel%d", dev_instance);
+}
+
+static uword dummy_interface_tx (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ clib_warning ("you shouldn't be here, leaking buffers...");
+ return frame->n_vectors;
+}
+
+static clib_error_t *
+vxlan_interface_admin_up_down (vnet_main_t * vnm, u32 hw_if_index, u32 flags)
+{
+ if (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP)
+ vnet_hw_interface_set_flags (vnm, hw_if_index, VNET_HW_INTERFACE_FLAG_LINK_UP);
+ else
+ vnet_hw_interface_set_flags (vnm, hw_if_index, 0);
+
+ return /* no error */ 0;
+}
+
+VNET_DEVICE_CLASS (vxlan_device_class,static) = {
+ .name = "VXLAN",
+ .format_device_name = format_vxlan_name,
+ .format_tx_trace = format_vxlan_encap_trace,
+ .tx_function = dummy_interface_tx,
+ .admin_up_down_function = vxlan_interface_admin_up_down,
+};
+
+static u8 * format_vxlan_header_with_length (u8 * s, va_list * args)
+{
+ u32 dev_instance = va_arg (*args, u32);
+ s = format (s, "unimplemented dev %u", dev_instance);
+ return s;
+}
+
+VNET_HW_INTERFACE_CLASS (vxlan_hw_class) = {
+ .name = "VXLAN",
+ .format_header = format_vxlan_header_with_length,
+ .build_rewrite = default_build_rewrite,
+};
+
+static void
+vxlan_tunnel_restack_dpo(vxlan_tunnel_t * t)
+{
+ dpo_id_t dpo = DPO_INVALID;
+ u32 encap_index = ip46_address_is_ip4(&t->dst) ?
+ vxlan4_encap_node.index : vxlan6_encap_node.index;
+ fib_forward_chain_type_t forw_type = ip46_address_is_ip4(&t->dst) ?
+ FIB_FORW_CHAIN_TYPE_UNICAST_IP4 : FIB_FORW_CHAIN_TYPE_UNICAST_IP6;
+
+ fib_entry_contribute_forwarding (t->fib_entry_index, forw_type, &dpo);
+ dpo_stack_from_node (encap_index, &t->next_dpo, &dpo);
+ dpo_reset(&dpo);
+}
+
+static vxlan_tunnel_t *
+vxlan_tunnel_from_fib_node (fib_node_t *node)
+{
+#if (CLIB_DEBUG > 0)
+ ASSERT(FIB_NODE_TYPE_VXLAN_TUNNEL == node->fn_type);
+#endif
+ return ((vxlan_tunnel_t*) (((char*)node) -
+ STRUCT_OFFSET_OF(vxlan_tunnel_t, node)));
+}
+
+/**
+ * Function definition to backwalk a FIB node -
+ * Here we will restack the new dpo of VXLAN DIP to encap node.
+ */
+static fib_node_back_walk_rc_t
+vxlan_tunnel_back_walk (fib_node_t *node,
+ fib_node_back_walk_ctx_t *ctx)
+{
+ vxlan_tunnel_restack_dpo(vxlan_tunnel_from_fib_node(node));
+ return (FIB_NODE_BACK_WALK_CONTINUE);
+}
+
+/**
+ * Function definition to get a FIB node from its index
+ */
+static fib_node_t*
+vxlan_tunnel_fib_node_get (fib_node_index_t index)
+{
+ vxlan_tunnel_t * t;
+ vxlan_main_t * vxm = &vxlan_main;
+
+ t = pool_elt_at_index(vxm->tunnels, index);
+
+ return (&t->node);
+}
+
+/**
+ * Function definition to inform the FIB node that its last lock has gone.
+ */
+static void
+vxlan_tunnel_last_lock_gone (fib_node_t *node)
+{
+ /*
+ * The VXLAN tunnel is a root of the graph. As such
+ * it never has children and thus is never locked.
+ */
+ ASSERT(0);
+}
+
+/*
+ * Virtual function table registered by VXLAN tunnels
+ * for participation in the FIB object graph.
+ */
+const static fib_node_vft_t vxlan_vft = {
+ .fnv_get = vxlan_tunnel_fib_node_get,
+ .fnv_last_lock = vxlan_tunnel_last_lock_gone,
+ .fnv_back_walk = vxlan_tunnel_back_walk,
+};
+
+
+#define foreach_copy_field \
+_(vni) \
+_(mcast_sw_if_index) \
+_(encap_fib_index) \
+_(decap_next_index) \
+_(src) \
+_(dst)
+
+static int vxlan4_rewrite (vxlan_tunnel_t * t)
+{
+ u8 *rw = 0;
+ ip4_header_t * ip0;
+ ip4_vxlan_header_t * h0;
+ int len = sizeof (*h0);
+
+ vec_validate_aligned (rw, len-1, CLIB_CACHE_LINE_BYTES);
+
+ h0 = (ip4_vxlan_header_t *) rw;
+
+ /* Fixed portion of the (outer) ip4 header */
+ ip0 = &h0->ip4;
+ ip0->ip_version_and_header_length = 0x45;
+ ip0->ttl = 254;
+ ip0->protocol = IP_PROTOCOL_UDP;
+
+ /* we fix up the ip4 header length and checksum after-the-fact */
+ ip0->src_address.as_u32 = t->src.ip4.as_u32;
+ ip0->dst_address.as_u32 = t->dst.ip4.as_u32;
+ ip0->checksum = ip4_header_checksum (ip0);
+
+ /* UDP header, randomize src port on something, maybe? */
+ h0->udp.src_port = clib_host_to_net_u16 (4789);
+ h0->udp.dst_port = clib_host_to_net_u16 (UDP_DST_PORT_vxlan);
+
+ /* VXLAN header */
+ vnet_set_vni_and_flags(&h0->vxlan, t->vni);
+
+ t->rewrite = rw;
+ return (0);
+}
+
+static int vxlan6_rewrite (vxlan_tunnel_t * t)
+{
+ u8 *rw = 0;
+ ip6_header_t * ip0;
+ ip6_vxlan_header_t * h0;
+ int len = sizeof (*h0);
+
+ vec_validate_aligned (rw, len-1, CLIB_CACHE_LINE_BYTES);
+
+ h0 = (ip6_vxlan_header_t *) rw;
+
+ /* Fixed portion of the (outer) ip6 header */
+ ip0 = &h0->ip6;
+ ip0->ip_version_traffic_class_and_flow_label = clib_host_to_net_u32(6 << 28);
+ ip0->hop_limit = 255;
+ ip0->protocol = IP_PROTOCOL_UDP;
+
+ ip0->src_address.as_u64[0] = t->src.ip6.as_u64[0];
+ ip0->src_address.as_u64[1] = t->src.ip6.as_u64[1];
+ ip0->dst_address.as_u64[0] = t->dst.ip6.as_u64[0];
+ ip0->dst_address.as_u64[1] = t->dst.ip6.as_u64[1];
+
+ /* UDP header, randomize src port on something, maybe? */
+ h0->udp.src_port = clib_host_to_net_u16 (4789);
+ h0->udp.dst_port = clib_host_to_net_u16 (UDP_DST_PORT_vxlan);
+
+ /* VXLAN header */
+ vnet_set_vni_and_flags(&h0->vxlan, t->vni);
+
+ t->rewrite = rw;
+ return (0);
+}
+
+static int vxlan_check_decap_next(vxlan_main_t * vxm, u32 is_ip6, u32 decap_next_index)
+{
+ vlib_main_t * vm = vxm->vlib_main;
+ vlib_node_runtime_t *r;
+
+ if(!is_ip6)
+ {
+ r = vlib_node_get_runtime (vm, vxlan4_input_node.index);
+ if(decap_next_index >= r->n_next_nodes)
+ return 1;
+ }
+ else
+ {
+ r = vlib_node_get_runtime (vm, vxlan6_input_node.index);
+ if(decap_next_index >= r->n_next_nodes)
+ return 1;
+ }
+
+ return 0;
+}
+
+static uword
+vtep_addr_ref(ip46_address_t *ip)
+{
+ if (!ip46_address_is_ip4(ip))
+ return 1; /* always create */
+ uword *pvtep = hash_get (vxlan_main.vtep4, ip->ip4.as_u32);
+ if (pvtep)
+ return ++pvtep[0];
+ hash_set (vxlan_main.vtep4, ip->ip4.as_u32, 1);
+ return 1;
+}
+
+static uword
+vtep_addr_unref(ip46_address_t *ip)
+{
+ if (!ip46_address_is_ip4(ip))
+ return 0; /* alwways destroy */
+ uword *pvtep = hash_get (vxlan_main.vtep4, ip->ip4.as_u32);
+ ASSERT(pvtep);
+ if (!(--pvtep[0]))
+ hash_unset (vxlan_main.vtep4, ip->ip4.as_u32);
+ return pvtep[0];
+}
+
+static
+mcast_remote_t *
+mcast_ep_get(ip46_address_t * ip)
+{
+ ASSERT(ip46_address_is_multicast(ip));
+ uword * ep_idx = hash_get_mem (vxlan_main.mcast_ep_by_ip, ip);
+ ASSERT(ep_idx);
+ return pool_elt_at_index(vxlan_main.mcast_eps, *ep_idx);
+}
+
+static void
+mcast_ep_add(mcast_remote_t * new_ep)
+{
+ mcast_remote_t * ep;
+
+ pool_get_aligned (vxlan_main.mcast_eps, ep, CLIB_CACHE_LINE_BYTES);
+ *ep = *new_ep;
+ hash_set_mem (vxlan_main.mcast_ep_by_ip, &ep->ip, ep - vxlan_main.mcast_eps);
+}
+
+static void
+mcast_ep_remove(mcast_remote_t * ep)
+{
+ hash_unset_mem (vxlan_main.mcast_ep_by_ip, &ep->ip);
+ pool_put (vxlan_main.mcast_eps, ep);
+}
+
+static void
+ip46_multicast_ethernet_address(u8 * ethernet_address, ip46_address_t * ip) {
+ if (ip46_address_is_ip4(ip))
+ ip4_multicast_ethernet_address(ethernet_address, &ip->ip4);
+ else
+ ip6_multicast_ethernet_address(ethernet_address, ip->ip6.as_u32[0]);
+}
+
+int vnet_vxlan_add_del_tunnel
+(vnet_vxlan_add_del_tunnel_args_t *a, u32 * sw_if_indexp)
+{
+ vxlan_main_t * vxm = &vxlan_main;
+ vxlan_tunnel_t *t = 0;
+ vnet_main_t * vnm = vxm->vnet_main;
+ uword * p;
+ u32 hw_if_index = ~0;
+ u32 sw_if_index = ~0;
+ int rv;
+ vxlan4_tunnel_key_t key4;
+ vxlan6_tunnel_key_t key6;
+ u32 is_ip6 = a->is_ip6;
+
+ if (!is_ip6)
+ {
+ key4.src = a->dst.ip4.as_u32; /* decap src in key is encap dst in config */
+ key4.vni = clib_host_to_net_u32 (a->vni << 8);
+ p = hash_get (vxm->vxlan4_tunnel_by_key, key4.as_u64);
+ }
+ else
+ {
+ key6.src.as_u64[0] = a->dst.ip6.as_u64[0];
+ key6.src.as_u64[1] = a->dst.ip6.as_u64[1];
+ key6.vni = clib_host_to_net_u32 (a->vni << 8);
+ p = hash_get_mem (vxm->vxlan6_tunnel_by_key, &key6);
+ }
+
+ if (a->is_add)
+ {
+ l2input_main_t * l2im = &l2input_main;
+
+ /* adding a tunnel: tunnel must not already exist */
+ if (p)
+ return VNET_API_ERROR_TUNNEL_EXIST;
+
+ /*if not set explicitly, default to l2 */
+ if(a->decap_next_index == ~0)
+ a->decap_next_index = VXLAN_INPUT_NEXT_L2_INPUT;
+ if (vxlan_check_decap_next(vxm, is_ip6, a->decap_next_index))
+ return VNET_API_ERROR_INVALID_DECAP_NEXT;
+
+ pool_get_aligned (vxm->tunnels, t, CLIB_CACHE_LINE_BYTES);
+ memset (t, 0, sizeof (*t));
+
+ /* copy from arg structure */
+#define _(x) t->x = a->x;
+ foreach_copy_field;
+#undef _
+
+ /* copy the key */
+ if (is_ip6)
+ {
+ t->key6 = clib_mem_alloc (sizeof(vxlan6_tunnel_key_t));
+ clib_memcpy (t->key6, &key6, sizeof(key6));
+ }
+ else
+ {
+ t->key4 = 0; /* not yet used */
+ }
+
+ if (!is_ip6)
+ rv = vxlan4_rewrite (t);
+ else
+ rv = vxlan6_rewrite (t);
+
+ if (rv)
+ {
+ pool_put (vxm->tunnels, t);
+ return rv;
+ }
+
+ if (!is_ip6)
+ hash_set (vxm->vxlan4_tunnel_by_key, key4.as_u64, t - vxm->tunnels);
+ else
+ hash_set_mem (vxm->vxlan6_tunnel_by_key, t->key6, t - vxm->tunnels);
+
+ vnet_hw_interface_t * hi;
+ if (vec_len (vxm->free_vxlan_tunnel_hw_if_indices) > 0)
+ {
+ vnet_interface_main_t * im = &vnm->interface_main;
+ hw_if_index = vxm->free_vxlan_tunnel_hw_if_indices
+ [vec_len (vxm->free_vxlan_tunnel_hw_if_indices)-1];
+ _vec_len (vxm->free_vxlan_tunnel_hw_if_indices) -= 1;
+
+ hi = vnet_get_hw_interface (vnm, hw_if_index);
+ hi->dev_instance = t - vxm->tunnels;
+ hi->hw_instance = hi->dev_instance;
+
+ /* clear old stats of freed tunnel before reuse */
+ sw_if_index = hi->sw_if_index;
+ vnet_interface_counter_lock(im);
+ vlib_zero_combined_counter
+ (&im->combined_sw_if_counters[VNET_INTERFACE_COUNTER_TX], sw_if_index);
+ vlib_zero_combined_counter
+ (&im->combined_sw_if_counters[VNET_INTERFACE_COUNTER_RX], sw_if_index);
+ vlib_zero_simple_counter
+ (&im->sw_if_counters[VNET_INTERFACE_COUNTER_DROP], sw_if_index);
+ vnet_interface_counter_unlock(im);
+ }
+ else
+ {
+ hw_if_index = vnet_register_interface
+ (vnm, vxlan_device_class.index, t - vxm->tunnels,
+ vxlan_hw_class.index, t - vxm->tunnels);
+ hi = vnet_get_hw_interface (vnm, hw_if_index);
+ }
+
+ t->hw_if_index = hw_if_index;
+ t->sw_if_index = sw_if_index = hi->sw_if_index;
+
+ vec_validate_init_empty (vxm->tunnel_index_by_sw_if_index, sw_if_index, ~0);
+ vxm->tunnel_index_by_sw_if_index[sw_if_index] = t - vxm->tunnels;
+
+ /* setup l2 input config with l2 feature and bd 0 to drop packet */
+ vec_validate (l2im->configs, sw_if_index);
+ l2im->configs[sw_if_index].feature_bitmap = L2INPUT_FEAT_DROP;
+ l2im->configs[sw_if_index].bd_index = 0;
+
+ vnet_sw_interface_set_flags (vnm, sw_if_index,
+ VNET_SW_INTERFACE_FLAG_ADMIN_UP);
+ fib_node_init(&t->node, FIB_NODE_TYPE_VXLAN_TUNNEL);
+ fib_prefix_t tun_dst_pfx;
+ u32 encap_index = !is_ip6 ?
+ vxlan4_encap_node.index : vxlan6_encap_node.index;
+ vnet_flood_class_t flood_class = VNET_FLOOD_CLASS_TUNNEL_NORMAL;
+
+ fib_prefix_from_ip46_addr(&t->dst, &tun_dst_pfx);
+ if (!ip46_address_is_multicast(&t->dst))
+ {
+ /* Unicast tunnel -
+ * source the FIB entry for the tunnel's destination
+ * and become a child thereof. The tunnel will then get poked
+ * when the forwarding for the entry updates, and the tunnel can
+ * re-stack accordingly
+ */
+ vtep_addr_ref(&t->src);
+ t->fib_entry_index = fib_table_entry_special_add
+ (t->encap_fib_index, &tun_dst_pfx, FIB_SOURCE_RR,
+ FIB_ENTRY_FLAG_NONE, ADJ_INDEX_INVALID);
+ t->sibling_index = fib_entry_child_add
+ (t->fib_entry_index, FIB_NODE_TYPE_VXLAN_TUNNEL, t - vxm->tunnels);
+ vxlan_tunnel_restack_dpo(t);
+ }
+ else
+ {
+ /* Multicast tunnel -
+ * as the same mcast group can be used for mutiple mcast tunnels
+ * with different VNIs, create the output fib adjecency only if
+ * it does not already exist
+ */
+ fib_protocol_t fp = (is_ip6) ? FIB_PROTOCOL_IP6 : FIB_PROTOCOL_IP4;
+ dpo_id_t dpo = DPO_INVALID;
+ dpo_proto_t dproto = fib_proto_to_dpo(fp);
+
+ if (vtep_addr_ref(&t->dst) == 1)
+ {
+ u8 mcast_mac[6];
+
+ ip46_multicast_ethernet_address(mcast_mac, &t->dst);
+ receive_dpo_add_or_lock(dproto, ~0, NULL, &dpo);
+ mcast_remote_t new_ep = {
+ .ip = t->dst,
+ .mcast_adj_index = adj_rewrite_add_and_lock
+ (fp, fib_proto_to_link(fp), a->mcast_sw_if_index, mcast_mac),
+ /* Add VRF local mcast adj. */
+ .fib_entry_index = fib_table_entry_special_dpo_add
+ (t->encap_fib_index, &tun_dst_pfx,
+ FIB_SOURCE_SPECIAL, FIB_ENTRY_FLAG_NONE, &dpo)
+ };
+ mcast_ep_add(&new_ep);
+ dpo_reset(&dpo);
+ }
+ /* Stack shared mcast dst mac addr rewrite on encap */
+ dpo_set (&dpo, DPO_ADJACENCY, dproto,
+ mcast_ep_get(&t->dst)->mcast_adj_index);
+ dpo_stack_from_node (encap_index, &t->next_dpo, &dpo);
+ dpo_reset (&dpo);
+ flood_class = VNET_FLOOD_CLASS_TUNNEL_MASTER;
+ }
+
+ /* Set vxlan tunnel output node */
+ hi->output_node_index = encap_index;
+
+ vnet_get_sw_interface (vnet_get_main(), sw_if_index)->flood_class = flood_class;
+ }
+ else
+ {
+ /* deleting a tunnel: tunnel must exist */
+ if (!p)
+ return VNET_API_ERROR_NO_SUCH_ENTRY;
+
+ t = pool_elt_at_index (vxm->tunnels, p[0]);
+
+ vnet_sw_interface_set_flags (vnm, t->sw_if_index, 0 /* down */);
+ /* make sure tunnel is removed from l2 bd or xconnect */
+ set_int_l2_mode(vxm->vlib_main, vnm, MODE_L3, t->sw_if_index, 0, 0, 0, 0);
+ vec_add1 (vxm->free_vxlan_tunnel_hw_if_indices, t->hw_if_index);
+
+ vxm->tunnel_index_by_sw_if_index[t->sw_if_index] = ~0;
+
+ if (!is_ip6)
+ hash_unset (vxm->vxlan4_tunnel_by_key, key4.as_u64);
+ else
+ {
+ hash_unset_mem (vxm->vxlan6_tunnel_by_key, t->key6);
+ clib_mem_free (t->key6);
+ }
+
+ if (!ip46_address_is_multicast(&t->dst))
+ {
+ vtep_addr_unref(&a->src);
+ fib_entry_child_remove(t->fib_entry_index, t->sibling_index);
+ fib_table_entry_delete_index(t->fib_entry_index, FIB_SOURCE_RR);
+ }
+ else if (vtep_addr_unref(&t->dst) == 0)
+ {
+ mcast_remote_t* ep = mcast_ep_get(&t->dst);
+ adj_unlock(ep->mcast_adj_index);
+ fib_table_entry_delete_index(ep->fib_entry_index, FIB_SOURCE_SPECIAL);
+ mcast_ep_remove(ep);
+ }
+
+ fib_node_deinit(&t->node);
+ vec_free (t->rewrite);
+ pool_put (vxm->tunnels, t);
+ }
+
+ if (sw_if_indexp)
+ *sw_if_indexp = sw_if_index;
+
+ return 0;
+}
+
+static u32 fib4_index_from_fib_id (u32 fib_id)
+{
+ ip4_main_t * im = &ip4_main;
+ uword * p;
+
+ p = hash_get (im->fib_index_by_table_id, fib_id);
+ if (!p)
+ return ~0;
+
+ return p[0];
+}
+
+static u32 fib6_index_from_fib_id (u32 fib_id)
+{
+ ip6_main_t * im = &ip6_main;
+ uword * p;
+
+ p = hash_get (im->fib_index_by_table_id, fib_id);
+ if (!p)
+ return ~0;
+
+ return p[0];
+}
+
+static uword get_decap_next_for_node(u32 node_index, u32 ipv4_set)
+{
+ vxlan_main_t * vxm = &vxlan_main;
+ vlib_main_t * vm = vxm->vlib_main;
+ uword next_index = ~0;
+
+ if (ipv4_set)
+ {
+ next_index = vlib_node_add_next (vm, vxlan4_input_node.index, node_index);
+ }
+ else
+ {
+ next_index = vlib_node_add_next (vm, vxlan6_input_node.index, node_index);
+ }
+
+ return next_index;
+}
+
+static uword unformat_decap_next (unformat_input_t * input, va_list * args)
+{
+ u32 * result = va_arg (*args, u32 *);
+ u32 ipv4_set = va_arg (*args, int);
+ vxlan_main_t * vxm = &vxlan_main;
+ vlib_main_t * vm = vxm->vlib_main;
+ u32 node_index;
+ u32 tmp;
+
+ if (unformat (input, "l2"))
+ *result = VXLAN_INPUT_NEXT_L2_INPUT;
+ else if (unformat (input, "node %U", unformat_vlib_node, vm, &node_index))
+ *result = get_decap_next_for_node(node_index, ipv4_set);
+ else if (unformat (input, "%d", &tmp))
+ *result = tmp;
+ else
+ return 0;
+ return 1;
+}
+
+static clib_error_t *
+vxlan_add_del_tunnel_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ unformat_input_t _line_input, * line_input = &_line_input;
+ ip46_address_t src , dst;
+ u8 is_add = 1;
+ u8 src_set = 0;
+ u8 dst_set = 0;
+ u8 grp_set = 0;
+ u8 ipv4_set = 0;
+ u8 ipv6_set = 0;
+ u32 encap_fib_index = 0;
+ u32 mcast_sw_if_index = ~0;
+ u32 decap_next_index = VXLAN_INPUT_NEXT_L2_INPUT;
+ u32 vni = 0;
+ u32 tmp;
+ int rv;
+ vnet_vxlan_add_del_tunnel_args_t _a, * a = &_a;
+ u32 tunnel_sw_if_index;
+
+ /* Cant "universally zero init" (={0}) due to GCC bug 53119 */
+ memset(&src, 0, sizeof src);
+ memset(&dst, 0, sizeof dst);
+
+ /* Get a line of input. */
+ if (! unformat_user (input, unformat_line_input, line_input))
+ return 0;
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT) {
+ if (unformat (line_input, "del"))
+ {
+ is_add = 0;
+ }
+ else if (unformat (line_input, "src %U",
+ unformat_ip4_address, &src.ip4))
+ {
+ src_set = 1;
+ ipv4_set = 1;
+ }
+ else if (unformat (line_input, "dst %U",
+ unformat_ip4_address, &dst.ip4))
+ {
+ dst_set = 1;
+ ipv4_set = 1;
+ }
+ else if (unformat (line_input, "src %U",
+ unformat_ip6_address, &src.ip6))
+ {
+ src_set = 1;
+ ipv6_set = 1;
+ }
+ else if (unformat (line_input, "dst %U",
+ unformat_ip6_address, &dst.ip6))
+ {
+ dst_set = 1;
+ ipv6_set = 1;
+ }
+ else if (unformat (line_input, "group %U %U",
+ unformat_ip4_address, &dst.ip4,
+ unformat_vnet_sw_interface,
+ vnet_get_main(), &mcast_sw_if_index))
+ {
+ grp_set = dst_set = 1;
+ ipv4_set = 1;
+ }
+ else if (unformat (line_input, "group %U %U",
+ unformat_ip6_address, &dst.ip6,
+ unformat_vnet_sw_interface,
+ vnet_get_main(), &mcast_sw_if_index))
+ {
+ grp_set = dst_set = 1;
+ ipv6_set = 1;
+ }
+ else if (unformat (line_input, "encap-vrf-id %d", &tmp))
+ {
+ if (ipv6_set)
+ encap_fib_index = fib6_index_from_fib_id (tmp);
+ else
+ encap_fib_index = fib4_index_from_fib_id (tmp);
+ if (encap_fib_index == ~0)
+ return clib_error_return (0, "nonexistent encap-vrf-id %d", tmp);
+ }
+ else if (unformat (line_input, "decap-next %U", unformat_decap_next,
+ &decap_next_index, ipv4_set))
+ ;
+ else if (unformat (line_input, "vni %d", &vni))
+ {
+ if (vni >> 24)
+ return clib_error_return (0, "vni %d out of range", vni);
+ }
+ else
+ return clib_error_return (0, "parse error: '%U'",
+ format_unformat_error, line_input);
+ }
+
+ unformat_free (line_input);
+
+ if (src_set == 0)
+ return clib_error_return (0, "tunnel src address not specified");
+
+ if (dst_set == 0)
+ return clib_error_return (0, "tunnel dst address not specified");
+
+ if (grp_set && !ip46_address_is_multicast(&dst))
+ return clib_error_return (0, "tunnel group address not multicast");
+
+ if (grp_set == 0 && ip46_address_is_multicast(&dst))
+ return clib_error_return (0, "dst address must be unicast");
+
+ if (grp_set && mcast_sw_if_index == ~0)
+ return clib_error_return (0, "tunnel nonexistent multicast device");
+
+ if (ipv4_set && ipv6_set)
+ return clib_error_return (0, "both IPv4 and IPv6 addresses specified");
+
+ if (ip46_address_cmp(&src, &dst) == 0)
+ return clib_error_return (0, "src and dst addresses are identical");
+
+ if (decap_next_index == ~0)
+ return clib_error_return (0, "next node not found");
+
+ if (vni == 0)
+ return clib_error_return (0, "vni not specified");
+
+ memset (a, 0, sizeof (*a));
+
+ a->is_add = is_add;
+ a->is_ip6 = ipv6_set;
+
+#define _(x) a->x = x;
+ foreach_copy_field;
+#undef _
+
+ rv = vnet_vxlan_add_del_tunnel (a, &tunnel_sw_if_index);
+
+ switch(rv)
+ {
+ case 0:
+ if (is_add)
+ vlib_cli_output(vm, "%U\n", format_vnet_sw_if_index_name,
+ vnet_get_main(), tunnel_sw_if_index);
+ break;
+
+ case VNET_API_ERROR_TUNNEL_EXIST:
+ return clib_error_return (0, "tunnel already exists...");
+
+ case VNET_API_ERROR_NO_SUCH_ENTRY:
+ return clib_error_return (0, "tunnel does not exist...");
+
+ default:
+ return clib_error_return
+ (0, "vnet_vxlan_add_del_tunnel returned %d", rv);
+ }
+
+ return 0;
+}
+
+/*?
+ * Add or delete a VXLAN Tunnel.
+ *
+ * VXLAN provides the features needed to allow L2 bridge domains (BDs)
+ * to span multiple servers. This is done by building an L2 overlay on
+ * top of an L3 network underlay using VXLAN tunnels.
+ *
+ * This makes it possible for servers to be co-located in the same data
+ * center or be separated geographically as long as they are reachable
+ * through the underlay L3 network.
+ *
+ * You can refer to this kind of L2 overlay bridge domain as a VXLAN
+ * (Virtual eXtensible VLAN) segment.
+ *
+ * @cliexpar
+ * Example of how to create a VXLAN Tunnel:
+ * @cliexcmd{create vxlan tunnel src 10.0.3.1 dst 10.0.3.3 vni 13 encap-vrf-id 7}
+ * Example of how to delete a VXLAN Tunnel:
+ * @cliexcmd{create vxlan tunnel src 10.0.3.1 dst 10.0.3.3 vni 13 del}
+ ?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (create_vxlan_tunnel_command, static) = {
+ .path = "create vxlan tunnel",
+ .short_help =
+ "create vxlan tunnel src <local-vtep-addr>"
+ " {dst <remote-vtep-addr>|group <mcast-vtep-addr> <intf-name>} vni <nn>"
+ " [encap-vrf-id <nn>] [decap-next [l2|node <name>]] [del]",
+ .function = vxlan_add_del_tunnel_command_fn,
+};
+/* *INDENT-ON* */
+
+static clib_error_t *
+show_vxlan_tunnel_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ vxlan_main_t * vxm = &vxlan_main;
+ vxlan_tunnel_t * t;
+
+ if (pool_elts (vxm->tunnels) == 0)
+ vlib_cli_output (vm, "No vxlan tunnels configured...");
+
+ pool_foreach (t, vxm->tunnels,
+ ({
+ vlib_cli_output (vm, "%U", format_vxlan_tunnel, t);
+ }));
+
+ return 0;
+}
+
+/*?
+ * Display all the VXLAN Tunnel entries.
+ *
+ * @cliexpar
+ * Example of how to display the VXLAN Tunnel entries:
+ * @cliexstart{show vxlan tunnel}
+ * [0] src 10.0.3.1 dst 10.0.3.3 vni 13 encap_fib_index 0 sw_if_index 5 decap_next l2
+ * @cliexend
+ ?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (show_vxlan_tunnel_command, static) = {
+ .path = "show vxlan tunnel",
+ .short_help = "show vxlan tunnel",
+ .function = show_vxlan_tunnel_command_fn,
+};
+/* *INDENT-ON* */
+
+
+clib_error_t *vxlan_init (vlib_main_t *vm)
+{
+ vxlan_main_t * vxm = &vxlan_main;
+
+ vxm->vnet_main = vnet_get_main();
+ vxm->vlib_main = vm;
+
+ /* initialize the ip6 hash */
+ vxm->vxlan6_tunnel_by_key = hash_create_mem(0,
+ sizeof(vxlan6_tunnel_key_t),
+ sizeof(uword));
+ vxm->mcast_ep_by_ip = hash_create_mem(0,
+ sizeof(ip46_address_t),
+ sizeof(uword));
+
+ udp_register_dst_port (vm, UDP_DST_PORT_vxlan,
+ vxlan4_input_node.index, /* is_ip4 */ 1);
+ udp_register_dst_port (vm, UDP_DST_PORT_vxlan6,
+ vxlan6_input_node.index, /* is_ip4 */ 0);
+
+ fib_node_register_type(FIB_NODE_TYPE_VXLAN_TUNNEL, &vxlan_vft);
+
+ return 0;
+}
+
+VLIB_INIT_FUNCTION(vxlan_init);
diff --git a/src/vnet/vxlan/vxlan.h b/src/vnet/vxlan/vxlan.h
new file mode 100644
index 00000000000..7605d94aa98
--- /dev/null
+++ b/src/vnet/vxlan/vxlan.h
@@ -0,0 +1,199 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef included_vnet_vxlan_h
+#define included_vnet_vxlan_h
+
+#include <vppinfra/error.h>
+#include <vppinfra/hash.h>
+#include <vnet/vnet.h>
+#include <vnet/ip/ip.h>
+#include <vnet/l2/l2_input.h>
+#include <vnet/l2/l2_output.h>
+#include <vnet/l2/l2_bd.h>
+#include <vnet/ethernet/ethernet.h>
+#include <vnet/vxlan/vxlan_packet.h>
+#include <vnet/ip/ip4_packet.h>
+#include <vnet/ip/ip6_packet.h>
+#include <vnet/ip/udp.h>
+#include <vnet/dpo/dpo.h>
+#include <vnet/adj/adj_types.h>
+
+typedef CLIB_PACKED (struct {
+ ip4_header_t ip4; /* 20 bytes */
+ udp_header_t udp; /* 8 bytes */
+ vxlan_header_t vxlan; /* 8 bytes */
+}) ip4_vxlan_header_t;
+
+typedef CLIB_PACKED (struct {
+ ip6_header_t ip6; /* 40 bytes */
+ udp_header_t udp; /* 8 bytes */
+ vxlan_header_t vxlan; /* 8 bytes */
+}) ip6_vxlan_header_t;
+
+typedef CLIB_PACKED(struct {
+ /*
+ * Key fields: ip src and vxlan vni on incoming VXLAN packet
+ * all fields in NET byte order
+ */
+ union {
+ struct {
+ u32 src;
+ u32 vni; /* shifted left 8 bits */
+ };
+ u64 as_u64;
+ };
+}) vxlan4_tunnel_key_t;
+
+typedef CLIB_PACKED(struct {
+ /*
+ * Key fields: ip src and vxlan vni on incoming VXLAN packet
+ * all fields in NET byte order
+ */
+ ip6_address_t src;
+ u32 vni; /* shifted left 8 bits */
+}) vxlan6_tunnel_key_t;
+
+typedef struct {
+ /* Rewrite string. $$$$ embed vnet_rewrite header */
+ u8 * rewrite;
+
+ /* FIB DPO for IP forwarding of VXLAN encap packet */
+ dpo_id_t next_dpo;
+
+ /* storage for the hash key */
+ union {
+ vxlan4_tunnel_key_t *key4; /* unused for now */
+ vxlan6_tunnel_key_t *key6;
+ };
+
+ /* vxlan VNI in HOST byte order */
+ u32 vni;
+
+ /* tunnel src and dst addresses */
+ ip46_address_t src;
+ ip46_address_t dst;
+
+ /* mcast packet output intfc index (used only if dst is mcast) */
+ u32 mcast_sw_if_index;
+
+ /* decap next index */
+ u32 decap_next_index;
+
+ /* The FIB index for src/dst addresses */
+ u32 encap_fib_index;
+
+ /* vnet intfc index */
+ u32 sw_if_index;
+ u32 hw_if_index;
+
+ /**
+ * Linkage into the FIB object graph
+ */
+ fib_node_t node;
+
+ /*
+ * The FIB entry for (depending on VXLAN tunnel is unicast or mcast)
+ * sending unicast VXLAN encap packets or receiving mcast VXLAN packets
+ */
+ fib_node_index_t fib_entry_index;
+ adj_index_t mcast_adj_index;
+
+ /**
+ * The tunnel is a child of the FIB entry for its desintion. This is
+ * so it receives updates when the forwarding information for that entry
+ * changes.
+ * The tunnels sibling index on the FIB entry's dependency list.
+ */
+ u32 sibling_index;
+} vxlan_tunnel_t;
+
+#define foreach_vxlan_input_next \
+_(DROP, "error-drop") \
+_(L2_INPUT, "l2-input")
+
+typedef enum {
+#define _(s,n) VXLAN_INPUT_NEXT_##s,
+ foreach_vxlan_input_next
+#undef _
+ VXLAN_INPUT_N_NEXT,
+} vxlan_input_next_t;
+
+typedef enum {
+#define vxlan_error(n,s) VXLAN_ERROR_##n,
+#include <vnet/vxlan/vxlan_error.def>
+#undef vxlan_error
+ VXLAN_N_ERROR,
+} vxlan_input_error_t;
+
+typedef struct {
+ ip46_address_t ip;
+ fib_node_index_t fib_entry_index;
+ adj_index_t mcast_adj_index;
+} mcast_remote_t;
+
+typedef struct {
+ /* vector of encap tunnel instances */
+ vxlan_tunnel_t * tunnels;
+
+ /* lookup tunnel by key */
+ uword * vxlan4_tunnel_by_key; /* keyed on ipv4.dst + vni */
+ uword * vxlan6_tunnel_by_key; /* keyed on ipv6.dst + vni */
+
+ /* local VTEP IPs ref count used by vxlan-bypass node to check if
+ received VXLAN packet DIP matches any local VTEP address */
+ uword * vtep4; /* local ip4 VTEPs keyed on their ip4 addr */
+ uword * vtep6; /* local ip6 VTEPs keyed on their ip6 addr */
+
+ /* set of active remote mcast VTEP */
+ mcast_remote_t * mcast_eps;
+ uword * mcast_ep_by_ip; /* mcast VTEPs keyed on their ip46 addr */
+
+ /* Free vlib hw_if_indices */
+ u32 * free_vxlan_tunnel_hw_if_indices;
+
+ /* Mapping from sw_if_index to tunnel index */
+ u32 * tunnel_index_by_sw_if_index;
+
+ /* convenience */
+ vlib_main_t * vlib_main;
+ vnet_main_t * vnet_main;
+} vxlan_main_t;
+
+vxlan_main_t vxlan_main;
+
+extern vlib_node_registration_t vxlan4_input_node;
+extern vlib_node_registration_t vxlan6_input_node;
+extern vlib_node_registration_t vxlan4_encap_node;
+extern vlib_node_registration_t vxlan6_encap_node;
+
+u8 * format_vxlan_encap_trace (u8 * s, va_list * args);
+
+typedef struct {
+ u8 is_add;
+
+ /* we normally use is_ip4, but since this adds to the
+ * structure, this seems less of abreaking change */
+ u8 is_ip6;
+ ip46_address_t src, dst;
+ u32 mcast_sw_if_index;
+ u32 encap_fib_index;
+ u32 decap_next_index;
+ u32 vni;
+} vnet_vxlan_add_del_tunnel_args_t;
+
+int vnet_vxlan_add_del_tunnel
+(vnet_vxlan_add_del_tunnel_args_t *a, u32 * sw_if_indexp);
+
+#endif /* included_vnet_vxlan_h */
diff --git a/src/vnet/vxlan/vxlan_api.c b/src/vnet/vxlan/vxlan_api.c
new file mode 100644
index 00000000000..6c9cbd79764
--- /dev/null
+++ b/src/vnet/vxlan/vxlan_api.c
@@ -0,0 +1,253 @@
+/*
+ *------------------------------------------------------------------
+ * vxlan_api.c - vxlan api
+ *
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+
+#include <vnet/vnet.h>
+#include <vlibmemory/api.h>
+
+#include <vnet/interface.h>
+#include <vnet/api_errno.h>
+#include <vnet/feature/feature.h>
+#include <vnet/vxlan/vxlan.h>
+#include <vnet/fib/fib_table.h>
+
+#include <vnet/vnet_msg_enum.h>
+
+#define vl_typedefs /* define message structures */
+#include <vnet/vnet_all_api_h.h>
+#undef vl_typedefs
+
+#define vl_endianfun /* define message structures */
+#include <vnet/vnet_all_api_h.h>
+#undef vl_endianfun
+
+/* instantiate all the print functions we know about */
+#define vl_print(handle, ...) vlib_cli_output (handle, __VA_ARGS__)
+#define vl_printfun
+#include <vnet/vnet_all_api_h.h>
+#undef vl_printfun
+
+#include <vlibapi/api_helper_macros.h>
+
+#define foreach_vpe_api_msg \
+_(SW_INTERFACE_SET_VXLAN_BYPASS, sw_interface_set_vxlan_bypass) \
+_(VXLAN_ADD_DEL_TUNNEL, vxlan_add_del_tunnel) \
+_(VXLAN_TUNNEL_DUMP, vxlan_tunnel_dump)
+
+static void
+ vl_api_sw_interface_set_vxlan_bypass_t_handler
+ (vl_api_sw_interface_set_vxlan_bypass_t * mp)
+{
+ vl_api_sw_interface_set_vxlan_bypass_reply_t *rmp;
+ int rv = 0;
+ u32 sw_if_index = ntohl (mp->sw_if_index);
+
+ VALIDATE_SW_IF_INDEX (mp);
+
+ if (mp->is_ipv6)
+ {
+ /* not yet implemented */
+ }
+ else
+ vnet_feature_enable_disable ("ip4-unicast", "ip4-vxlan-bypass",
+ sw_if_index, mp->enable, 0, 0);
+
+ BAD_SW_IF_INDEX_LABEL;
+
+ REPLY_MACRO (VL_API_SW_INTERFACE_SET_VXLAN_BYPASS_REPLY);
+}
+
+static void vl_api_vxlan_add_del_tunnel_t_handler
+ (vl_api_vxlan_add_del_tunnel_t * mp)
+{
+ vl_api_vxlan_add_del_tunnel_reply_t *rmp;
+ int rv = 0;
+ vnet_vxlan_add_del_tunnel_args_t _a, *a = &_a;
+ u32 encap_fib_index;
+ uword *p;
+ ip4_main_t *im = &ip4_main;
+ vnet_main_t *vnm = vnet_get_main ();
+ u32 sw_if_index = ~0;
+
+ p = hash_get (im->fib_index_by_table_id, ntohl (mp->encap_vrf_id));
+ if (!p)
+ {
+ rv = VNET_API_ERROR_NO_SUCH_FIB;
+ goto out;
+ }
+ encap_fib_index = p[0];
+ memset (a, 0, sizeof (*a));
+
+ a->is_add = mp->is_add;
+ a->is_ip6 = mp->is_ipv6;
+
+ /* ip addresses sent in network byte order */
+ ip46_from_addr_buf (mp->is_ipv6, mp->dst_address, &a->dst);
+ ip46_from_addr_buf (mp->is_ipv6, mp->src_address, &a->src);
+
+ /* Check src & dst are different */
+ if (ip46_address_cmp (&a->dst, &a->src) == 0)
+ {
+ rv = VNET_API_ERROR_SAME_SRC_DST;
+ goto out;
+ }
+ a->mcast_sw_if_index = ntohl (mp->mcast_sw_if_index);
+ if (ip46_address_is_multicast (&a->dst) &&
+ pool_is_free_index (vnm->interface_main.sw_interfaces,
+ a->mcast_sw_if_index))
+ {
+ rv = VNET_API_ERROR_INVALID_SW_IF_INDEX;
+ goto out;
+ }
+ a->encap_fib_index = encap_fib_index;
+ a->decap_next_index = ntohl (mp->decap_next_index);
+ a->vni = ntohl (mp->vni);
+ rv = vnet_vxlan_add_del_tunnel (a, &sw_if_index);
+
+out:
+ /* *INDENT-OFF* */
+ REPLY_MACRO2(VL_API_VXLAN_ADD_DEL_TUNNEL_REPLY,
+ ({
+ rmp->sw_if_index = ntohl (sw_if_index);
+ }));
+ /* *INDENT-ON* */
+}
+
+static void send_vxlan_tunnel_details
+ (vxlan_tunnel_t * t, unix_shared_memory_queue_t * q, u32 context)
+{
+ vl_api_vxlan_tunnel_details_t *rmp;
+ ip4_main_t *im4 = &ip4_main;
+ ip6_main_t *im6 = &ip6_main;
+ u8 is_ipv6 = !ip46_address_is_ip4 (&t->dst);
+
+ rmp = vl_msg_api_alloc (sizeof (*rmp));
+ memset (rmp, 0, sizeof (*rmp));
+ rmp->_vl_msg_id = ntohs (VL_API_VXLAN_TUNNEL_DETAILS);
+ if (is_ipv6)
+ {
+ memcpy (rmp->src_address, t->src.ip6.as_u8, 16);
+ memcpy (rmp->dst_address, t->dst.ip6.as_u8, 16);
+ rmp->encap_vrf_id = htonl (im6->fibs[t->encap_fib_index].ft_table_id);
+ }
+ else
+ {
+ memcpy (rmp->src_address, t->src.ip4.as_u8, 4);
+ memcpy (rmp->dst_address, t->dst.ip4.as_u8, 4);
+ rmp->encap_vrf_id = htonl (im4->fibs[t->encap_fib_index].ft_table_id);
+ }
+ rmp->mcast_sw_if_index = htonl (t->mcast_sw_if_index);
+ rmp->vni = htonl (t->vni);
+ rmp->decap_next_index = htonl (t->decap_next_index);
+ rmp->sw_if_index = htonl (t->sw_if_index);
+ rmp->is_ipv6 = is_ipv6;
+ rmp->context = context;
+
+ vl_msg_api_send_shmem (q, (u8 *) & rmp);
+}
+
+static void vl_api_vxlan_tunnel_dump_t_handler
+ (vl_api_vxlan_tunnel_dump_t * mp)
+{
+ unix_shared_memory_queue_t *q;
+ vxlan_main_t *vxm = &vxlan_main;
+ vxlan_tunnel_t *t;
+ u32 sw_if_index;
+
+ q = vl_api_client_index_to_input_queue (mp->client_index);
+ if (q == 0)
+ {
+ return;
+ }
+
+ sw_if_index = ntohl (mp->sw_if_index);
+
+ if (~0 == sw_if_index)
+ {
+ /* *INDENT-OFF* */
+ pool_foreach (t, vxm->tunnels,
+ ({
+ send_vxlan_tunnel_details(t, q, mp->context);
+ }));
+ /* *INDENT-ON* */
+ }
+ else
+ {
+ if ((sw_if_index >= vec_len (vxm->tunnel_index_by_sw_if_index)) ||
+ (~0 == vxm->tunnel_index_by_sw_if_index[sw_if_index]))
+ {
+ return;
+ }
+ t = &vxm->tunnels[vxm->tunnel_index_by_sw_if_index[sw_if_index]];
+ send_vxlan_tunnel_details (t, q, mp->context);
+ }
+}
+
+/*
+ * vpe_api_hookup
+ * Add vpe's API message handlers to the table.
+ * vlib has alread mapped shared memory and
+ * added the client registration handlers.
+ * See .../vlib-api/vlibmemory/memclnt_vlib.c:memclnt_process()
+ */
+#define vl_msg_name_crc_list
+#include <vnet/vnet_all_api_h.h>
+#undef vl_msg_name_crc_list
+
+static void
+setup_message_id_table (api_main_t * am)
+{
+#define _(id,n,crc) vl_msg_api_add_msg_name_crc (am, #n "_" #crc, id);
+ foreach_vl_msg_name_crc_vxlan;
+#undef _
+}
+
+static clib_error_t *
+vxlan_api_hookup (vlib_main_t * vm)
+{
+ api_main_t *am = &api_main;
+
+#define _(N,n) \
+ vl_msg_api_set_handlers(VL_API_##N, #n, \
+ vl_api_##n##_t_handler, \
+ vl_noop_handler, \
+ vl_api_##n##_t_endian, \
+ vl_api_##n##_t_print, \
+ sizeof(vl_api_##n##_t), 1);
+ foreach_vpe_api_msg;
+#undef _
+
+ am->api_trace_cfg[VL_API_VXLAN_ADD_DEL_TUNNEL].size += 16 * sizeof (u32);
+
+ /*
+ * Set up the (msg_name, crc, message-id) table
+ */
+ setup_message_id_table (am);
+
+ return 0;
+}
+
+VLIB_API_INIT_FUNCTION (vxlan_api_hookup);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/vxlan/vxlan_error.def b/src/vnet/vxlan/vxlan_error.def
new file mode 100644
index 00000000000..17f905950f5
--- /dev/null
+++ b/src/vnet/vxlan/vxlan_error.def
@@ -0,0 +1,17 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+vxlan_error (DECAPSULATED, "good packets decapsulated")
+vxlan_error (NO_SUCH_TUNNEL, "no such tunnel packets")
+vxlan_error (BAD_FLAGS, "packets with bad flags field in vxlan header")
diff --git a/src/vnet/vxlan/vxlan_packet.h b/src/vnet/vxlan/vxlan_packet.h
new file mode 100644
index 00000000000..5f93a36fd8f
--- /dev/null
+++ b/src/vnet/vxlan/vxlan_packet.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef __included_vxlan_packet_h__
+#define __included_vxlan_packet_h__ 1
+
+/*
+ * From RFC-7384
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |R|R|R|R|I|R|R|R| Reserved |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | VXLAN Network Identifier (VNI) | Reserved |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * VXLAN Header: This is an 8-byte field that has:
+ *
+ * - Flags (8 bits): where the I flag MUST be set to 1 for a valid
+ * VXLAN Network ID (VNI). The other 7 bits (designated "R") are
+ * reserved fields and MUST be set to zero on transmission and
+ * ignored on receipt.
+ *
+ * - VXLAN Segment ID/VXLAN Network Identifier (VNI): this is a
+ * 24-bit value used to designate the individual VXLAN overlay
+ * network on which the communicating VMs are situated. VMs in
+ * different VXLAN overlay networks cannot communicate with each
+ * other.
+ *
+ * - Reserved fields (24 bits and 8 bits): MUST be set to zero on
+ * transmission and ignored on receipt.
+ *
+ */
+
+typedef struct {
+ u8 flags;
+ u8 res1;
+ u8 res2;
+ u8 res3;
+ u32 vni_reserved;
+} vxlan_header_t;
+
+#define VXLAN_FLAGS_I 0x08
+
+static inline u32 vnet_get_vni (vxlan_header_t * h)
+{
+ u32 vni_reserved_host_byte_order;
+
+ vni_reserved_host_byte_order = clib_net_to_host_u32 (h->vni_reserved);
+ return vni_reserved_host_byte_order >> 8;
+}
+
+static inline void vnet_set_vni_and_flags (vxlan_header_t * h, u32 vni)
+{
+ h->vni_reserved = clib_host_to_net_u32 (vni<<8);
+ * (u32 *) h = 0;
+ h->flags = VXLAN_FLAGS_I;
+}
+
+#endif /* __included_vxlan_packet_h__ */