summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--MAINTAINERS5
-rw-r--r--src/plugins/srv6-ad-flow/CMakeLists.txt21
-rw-r--r--src/plugins/srv6-ad-flow/FEATURE.yaml8
-rw-r--r--src/plugins/srv6-ad-flow/ad-flow.c474
-rw-r--r--src/plugins/srv6-ad-flow/ad-flow.h141
-rw-r--r--src/plugins/srv6-ad-flow/ad_flow_plugin_doc.md25
-rw-r--r--src/plugins/srv6-ad-flow/node.c1025
-rw-r--r--src/plugins/srv6-ad-flow/test/test_srv6_ad_flow.py637
8 files changed, 2336 insertions, 0 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index c70c31f96ae..4f0a36891c1 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -489,6 +489,11 @@ I: srv6-ad
M: Francois Clad <fclad@cisco.com>
F: src/plugins/srv6-ad/
+Plugin - IPv6 Segment Routing Flow-Based Dynamic Proxy
+I: srv6-ad-flow
+M: Francois Clad <fclad@cisco.com>
+F: src/plugins/srv6-ad-flow/
+
Plugin - IPv6 Segment Routing Masquerading Proxy
I: srv6-am
M: Francois Clad <fclad@cisco.com>
diff --git a/src/plugins/srv6-ad-flow/CMakeLists.txt b/src/plugins/srv6-ad-flow/CMakeLists.txt
new file mode 100644
index 00000000000..63cb457e5ae
--- /dev/null
+++ b/src/plugins/srv6-ad-flow/CMakeLists.txt
@@ -0,0 +1,21 @@
+# Copyright (c) 2018 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+add_vpp_plugin(srv6adflow
+ SOURCES
+ ad-flow.c
+ node.c
+
+ INSTALL_HEADERS
+ ad-flow.h
+)
diff --git a/src/plugins/srv6-ad-flow/FEATURE.yaml b/src/plugins/srv6-ad-flow/FEATURE.yaml
new file mode 100644
index 00000000000..c5107cdffeb
--- /dev/null
+++ b/src/plugins/srv6-ad-flow/FEATURE.yaml
@@ -0,0 +1,8 @@
+---
+name: SRv6 - Service Chaining Flow-based Dynamic Proxy
+maintainer: Francois Clad <fclad@cisco.com>
+features:
+ - SRv6 - flow-based dynamic service chaining proxy (draft-ietf-spring-sr-service-programming-01)
+description: "SRv6 flow-based dynamic proxy"
+state: production
+properties: [CLI]
diff --git a/src/plugins/srv6-ad-flow/ad-flow.c b/src/plugins/srv6-ad-flow/ad-flow.c
new file mode 100644
index 00000000000..fd9706dabe1
--- /dev/null
+++ b/src/plugins/srv6-ad-flow/ad-flow.c
@@ -0,0 +1,474 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ *------------------------------------------------------------------
+ * ad-flow.c - SRv6 Flow-based Dynamic Proxy (AD.Flow) behavior
+ *------------------------------------------------------------------
+ */
+
+#include <vnet/vnet.h>
+#include <vnet/adj/adj.h>
+#include <vnet/plugin/plugin.h>
+#include <vpp/app/version.h>
+#include <srv6-ad-flow/ad-flow.h>
+
+#include <vppinfra/bihash_template.c>
+
+#define SID_CREATE_IFACE_FEATURE_ERROR -1
+#define SID_CREATE_INVALID_IFACE_TYPE -3
+#define SID_CREATE_INVALID_IFACE_INDEX -4
+#define SID_CREATE_INVALID_ADJ_INDEX -5
+
+unsigned char function_name[] = "SRv6-AD-Flow-plugin";
+unsigned char keyword_str[] = "End.AD.Flow";
+unsigned char def_str[] =
+ "Endpoint with flow-based dynamic proxy to SR-unaware appliance";
+unsigned char params_str[] = "nh <next-hop> oif <iface-out> iif <iface-in>";
+
+srv6_ad_flow_main_t srv6_ad_flow_main;
+
+static u32
+ad_flow_calc_bihash_buckets (u32 n_elts)
+{
+ return 1 << (max_log2 (n_elts >> 1) + 1);
+}
+
+static u32
+ad_flow_calc_bihash_memory (u32 n_buckets, uword kv_size)
+{
+ return n_buckets * (8 + kv_size * 4);
+}
+
+/*****************************************/
+/* SRv6 LocalSID instantiation and removal functions */
+static int
+srv6_ad_flow_localsid_creation_fn (ip6_sr_localsid_t *localsid)
+{
+ ip6_sr_main_t *srm = &sr_main;
+ srv6_ad_flow_main_t *sm = &srv6_ad_flow_main;
+ srv6_ad_flow_localsid_t *ls_mem = localsid->plugin_mem;
+ u32 localsid_index = localsid - srm->localsids;
+
+ /* Step 1: Prepare xconnect adjacency for sending packets to the VNF */
+
+ /* Retrieve the adjacency corresponding to the (OIF, next_hop) */
+ adj_index_t nh_adj_index = ADJ_INDEX_INVALID;
+ if (ls_mem->inner_type == AD_TYPE_IP4)
+ nh_adj_index =
+ adj_nbr_add_or_lock (FIB_PROTOCOL_IP4, VNET_LINK_IP4, &ls_mem->nh_addr,
+ ls_mem->sw_if_index_out);
+ else if (ls_mem->inner_type == AD_TYPE_IP6)
+ nh_adj_index =
+ adj_nbr_add_or_lock (FIB_PROTOCOL_IP6, VNET_LINK_IP6, &ls_mem->nh_addr,
+ ls_mem->sw_if_index_out);
+
+ if (nh_adj_index == ADJ_INDEX_INVALID)
+ {
+ clib_mem_free (ls_mem);
+ return SID_CREATE_INVALID_ADJ_INDEX;
+ }
+
+ ls_mem->nh_adj = nh_adj_index;
+
+ /* Step 2: Prepare inbound policy for packets returning from the VNF */
+
+ /* Sanitise the SW_IF_INDEX */
+ if (pool_is_free_index (sm->vnet_main->interface_main.sw_interfaces,
+ ls_mem->sw_if_index_in))
+ {
+ adj_unlock (ls_mem->nh_adj);
+ clib_mem_free (ls_mem);
+ return SID_CREATE_INVALID_IFACE_INDEX;
+ }
+
+ vnet_sw_interface_t *sw =
+ vnet_get_sw_interface (sm->vnet_main, ls_mem->sw_if_index_in);
+ if (sw->type != VNET_SW_INTERFACE_TYPE_HARDWARE)
+ {
+ adj_unlock (ls_mem->nh_adj);
+ clib_mem_free (ls_mem);
+ return SID_CREATE_INVALID_IFACE_TYPE;
+ }
+
+ if (ls_mem->inner_type == AD_TYPE_IP4)
+ {
+ /* Enable End.AD4 rewrite node for this interface */
+ int ret =
+ vnet_feature_enable_disable ("ip4-unicast", "srv6-ad4-flow-rewrite",
+ ls_mem->sw_if_index_in, 1, 0, 0);
+ if (ret != 0)
+ {
+ adj_unlock (ls_mem->nh_adj);
+ clib_mem_free (ls_mem);
+ return SID_CREATE_IFACE_FEATURE_ERROR;
+ }
+
+ /* Associate local SID index to this interface (resize vector if needed)
+ */
+ if (ls_mem->sw_if_index_in >= vec_len (sm->sw_iface_localsid4))
+ {
+ vec_resize (sm->sw_iface_localsid4,
+ (pool_len (sm->vnet_main->interface_main.sw_interfaces) -
+ vec_len (sm->sw_iface_localsid4)));
+ }
+ sm->sw_iface_localsid4[ls_mem->sw_if_index_in] = localsid_index;
+ }
+ else if (ls_mem->inner_type == AD_TYPE_IP6)
+ {
+ /* Enable End.AD6 rewrite node for this interface */
+ int ret =
+ vnet_feature_enable_disable ("ip6-unicast", "srv6-ad6-flow-rewrite",
+ ls_mem->sw_if_index_in, 1, 0, 0);
+ if (ret != 0)
+ {
+ adj_unlock (ls_mem->nh_adj);
+ clib_mem_free (ls_mem);
+ return SID_CREATE_IFACE_FEATURE_ERROR;
+ }
+
+ /* Associate local SID index to this interface (resize vector if needed)
+ */
+ if (ls_mem->sw_if_index_in >= vec_len (sm->sw_iface_localsid6))
+ {
+ vec_resize (sm->sw_iface_localsid6,
+ (pool_len (sm->vnet_main->interface_main.sw_interfaces) -
+ vec_len (sm->sw_iface_localsid6)));
+ }
+ sm->sw_iface_localsid6[ls_mem->sw_if_index_in] = localsid_index;
+ }
+
+ /* Initialize flow and cache tables */
+ ls_mem->cache_size = SRV6_AD_FLOW_DEFAULT_CACHE_SIZE;
+ ls_mem->cache_buckets = ad_flow_calc_bihash_buckets (ls_mem->cache_size);
+ ls_mem->cache_memory_size = ad_flow_calc_bihash_memory (
+ ls_mem->cache_buckets, sizeof (clib_bihash_40_8_t));
+
+ pool_alloc (ls_mem->cache, ls_mem->cache_size);
+ pool_alloc (ls_mem->lru_pool, ls_mem->cache_size);
+
+ dlist_elt_t *head;
+ pool_get (ls_mem->lru_pool, head);
+ ls_mem->lru_head_index = head - ls_mem->lru_pool;
+ clib_dlist_init (ls_mem->lru_pool, ls_mem->lru_head_index);
+
+ clib_bihash_init_40_8 (&ls_mem->ftable, "ad-flow", ls_mem->cache_buckets,
+ ls_mem->cache_memory_size);
+
+ /* Step 3: Initialize rewrite counters */
+ srv6_ad_flow_localsid_t **ls_p;
+ pool_get (sm->sids, ls_p);
+ *ls_p = ls_mem;
+ ls_mem->index = ls_p - sm->sids;
+
+ vlib_validate_combined_counter (&(sm->sid_bypass_counters), ls_mem->index);
+ vlib_validate_combined_counter (&(sm->sid_punt_counters), ls_mem->index);
+ vlib_validate_combined_counter (&(sm->sid_cache_full_counters),
+ ls_mem->index);
+ vlib_validate_combined_counter (&(sm->rw_valid_counters), ls_mem->index);
+ vlib_validate_combined_counter (&(sm->rw_invalid_counters), ls_mem->index);
+
+ vlib_zero_combined_counter (&(sm->sid_bypass_counters), ls_mem->index);
+ vlib_zero_combined_counter (&(sm->sid_punt_counters), ls_mem->index);
+ vlib_zero_combined_counter (&(sm->sid_cache_full_counters), ls_mem->index);
+ vlib_zero_combined_counter (&(sm->rw_valid_counters), ls_mem->index);
+ vlib_zero_combined_counter (&(sm->rw_invalid_counters), ls_mem->index);
+
+ return 0;
+}
+
+static int
+srv6_ad_flow_localsid_removal_fn (ip6_sr_localsid_t *localsid)
+{
+ srv6_ad_flow_main_t *sm = &srv6_ad_flow_main;
+ srv6_ad_flow_localsid_t *ls_mem = localsid->plugin_mem;
+
+ if (ls_mem->inner_type == AD_TYPE_IP4)
+ {
+ /* Disable End.AD4 rewrite node for this interface */
+ int ret =
+ vnet_feature_enable_disable ("ip4-unicast", "srv6-ad4-flow-rewrite",
+ ls_mem->sw_if_index_in, 0, 0, 0);
+ if (ret != 0)
+ return -1;
+
+ /* Remove local SID pointer from interface table */
+ sm->sw_iface_localsid4[ls_mem->sw_if_index_in] = ~(u32) 0;
+ }
+ else if (ls_mem->inner_type == AD_TYPE_IP6)
+ {
+ /* Disable End.AD6 rewrite node for this interface */
+ int ret =
+ vnet_feature_enable_disable ("ip6-unicast", "srv6-ad6-flow-rewrite",
+ ls_mem->sw_if_index_in, 0, 0, 0);
+ if (ret != 0)
+ return -1;
+
+ /* Remove local SID pointer from interface table */
+ sm->sw_iface_localsid6[ls_mem->sw_if_index_in] = ~(u32) 0;
+ }
+
+ /* Unlock (OIF, NHOP) adjacency */
+ adj_unlock (ls_mem->nh_adj);
+
+ /* Delete SID entry */
+ pool_put (sm->sids, pool_elt_at_index (sm->sids, ls_mem->index));
+
+ /* Clean up local SID memory */
+ srv6_ad_flow_entry_t *e;
+ pool_foreach (e, ls_mem->cache)
+ {
+ vec_free (e->rw_data);
+ }
+ pool_free (ls_mem->cache);
+ pool_free (ls_mem->lru_pool);
+ clib_bihash_free_40_8 (&ls_mem->ftable);
+ clib_mem_free (localsid->plugin_mem);
+
+ return 0;
+}
+
+/**********************************/
+/* SRv6 LocalSID format functions */
+/*
+ * Prints nicely the parameters of a localsid
+ * Example: print "Table 5"
+ */
+u8 *
+format_srv6_ad_flow_localsid (u8 *s, va_list *args)
+{
+ srv6_ad_flow_localsid_t *ls_mem = va_arg (*args, void *);
+
+ vnet_main_t *vnm = vnet_get_main ();
+ srv6_ad_flow_main_t *sm = &srv6_ad_flow_main;
+
+ if (ls_mem->inner_type == AD_TYPE_IP4)
+ {
+ s = format (s, "Next-hop:\t%U\n\t", format_ip4_address,
+ &ls_mem->nh_addr.ip4);
+ }
+ else if (ls_mem->inner_type == AD_TYPE_IP6)
+ {
+ s = format (s, "Next-hop:\t%U\n\t", format_ip6_address,
+ &ls_mem->nh_addr.ip6);
+ }
+
+ s = format (s, "Outgoing iface:\t%U\n", format_vnet_sw_if_index_name, vnm,
+ ls_mem->sw_if_index_out);
+ s = format (s, "\tIncoming iface:\t%U\n", format_vnet_sw_if_index_name, vnm,
+ ls_mem->sw_if_index_in);
+
+ vlib_counter_t sid_bypass, sid_punt, sid_full, rw_valid, rw_invalid;
+ vlib_get_combined_counter (&(sm->sid_bypass_counters), ls_mem->index,
+ &sid_bypass);
+ vlib_get_combined_counter (&(sm->sid_punt_counters), ls_mem->index,
+ &sid_punt);
+ vlib_get_combined_counter (&(sm->sid_cache_full_counters), ls_mem->index,
+ &sid_full);
+ vlib_get_combined_counter (&(sm->rw_valid_counters), ls_mem->index,
+ &rw_valid);
+ vlib_get_combined_counter (&(sm->rw_invalid_counters), ls_mem->index,
+ &rw_invalid);
+
+ s =
+ format (s, "\tTraffic that bypassed the NF: \t[%Ld packets : %Ld bytes]\n",
+ sid_bypass.packets, sid_bypass.bytes);
+ s = format (s, "\tPunted traffic: \t[%Ld packets : %Ld bytes]\n",
+ sid_punt.packets, sid_punt.bytes);
+ s =
+ format (s, "\tDropped traffic (cache full): \t[%Ld packets : %Ld bytes]\n",
+ sid_full.packets, sid_full.bytes);
+ s = format (s, "\tGood rewrite traffic: \t[%Ld packets : %Ld bytes]\n",
+ rw_valid.packets, rw_valid.bytes);
+ s = format (s, "\tBad rewrite traffic: \t[%Ld packets : %Ld bytes]\n",
+ rw_invalid.packets, rw_invalid.bytes);
+
+ return s;
+}
+
+/*
+ * Process the parameters of a localsid
+ * Example: process from:
+ * sr localsid address cafe::1 behavior new_srv6_localsid 5
+ * everything from behavior on... so in this case 'new_srv6_localsid 5'
+ * Notice that it MUST match the keyword_str and params_str defined above.
+ */
+uword
+unformat_srv6_ad_flow_localsid (unformat_input_t *input, va_list *args)
+{
+ void **plugin_mem_p = va_arg (*args, void **);
+ srv6_ad_flow_localsid_t *ls_mem;
+
+ vnet_main_t *vnm = vnet_get_main ();
+
+ u8 inner_type = AD_TYPE_IP4;
+ ip46_address_t nh_addr;
+ u32 sw_if_index_out;
+ u32 sw_if_index_in;
+
+ u8 params = 0;
+#define PARAM_AD_NH (1 << 0)
+#define PARAM_AD_OIF (1 << 1)
+#define PARAM_AD_IIF (1 << 2)
+
+ if (!unformat (input, "end.ad.flow"))
+ return 0;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (!(params & PARAM_AD_NH) &&
+ unformat (input, "nh %U", unformat_ip4_address, &nh_addr.ip4))
+ {
+ inner_type = AD_TYPE_IP4;
+ params |= PARAM_AD_NH;
+ }
+ if (!(params & PARAM_AD_NH) &&
+ unformat (input, "nh %U", unformat_ip6_address, &nh_addr.ip6))
+ {
+ inner_type = AD_TYPE_IP6;
+ params |= PARAM_AD_NH;
+ }
+ else if (!(params & PARAM_AD_OIF) &&
+ unformat (input, "oif %U", unformat_vnet_sw_interface, vnm,
+ &sw_if_index_out))
+ {
+ params |= PARAM_AD_OIF;
+ }
+ else if (!(params & PARAM_AD_IIF) &&
+ unformat (input, "iif %U", unformat_vnet_sw_interface, vnm,
+ &sw_if_index_in))
+ {
+ params |= PARAM_AD_IIF;
+ }
+ else
+ {
+ break;
+ }
+ }
+
+ /* Make sure that all parameters are supplied */
+ u8 params_chk = (PARAM_AD_NH | PARAM_AD_OIF | PARAM_AD_IIF);
+ if ((params & params_chk) != params_chk)
+ {
+ return 0;
+ }
+
+ /* Allocate and initialize memory block for local SID parameters */
+ ls_mem = clib_mem_alloc_aligned_at_offset (sizeof *ls_mem, 0, 0, 1);
+ clib_memset (ls_mem, 0, sizeof *ls_mem);
+ *plugin_mem_p = ls_mem;
+
+ /* Set local SID parameters */
+ ls_mem->inner_type = inner_type;
+ if (inner_type == AD_TYPE_IP4)
+ ls_mem->nh_addr.ip4 = nh_addr.ip4;
+ else if (inner_type == AD_TYPE_IP6)
+ ls_mem->nh_addr.ip6 = nh_addr.ip6;
+ ls_mem->sw_if_index_out = sw_if_index_out;
+ ls_mem->sw_if_index_in = sw_if_index_in;
+
+ return 1;
+}
+
+/*************************/
+/* SRv6 LocalSID FIB DPO */
+static u8 *
+format_srv6_ad_flow_dpo (u8 *s, va_list *args)
+{
+ index_t index = va_arg (*args, index_t);
+ CLIB_UNUSED (u32 indent) = va_arg (*args, u32);
+
+ return (format (s, "SR: dynamic_proxy_index:[%u]", index));
+}
+
+void
+srv6_ad_flow_dpo_lock (dpo_id_t *dpo)
+{
+}
+
+void
+srv6_ad_flow_dpo_unlock (dpo_id_t *dpo)
+{
+}
+
+const static dpo_vft_t srv6_ad_flow_vft = {
+ .dv_lock = srv6_ad_flow_dpo_lock,
+ .dv_unlock = srv6_ad_flow_dpo_unlock,
+ .dv_format = format_srv6_ad_flow_dpo,
+};
+
+const static char *const srv6_ad_flow_ip6_nodes[] = {
+ "srv6-ad-flow-localsid",
+ NULL,
+};
+
+const static char *const *const srv6_ad_flow_nodes[DPO_PROTO_NUM] = {
+ [DPO_PROTO_IP6] = srv6_ad_flow_ip6_nodes,
+};
+
+/**********************/
+static clib_error_t *
+srv6_ad_flow_init (vlib_main_t *vm)
+{
+ srv6_ad_flow_main_t *sm = &srv6_ad_flow_main;
+ int rv = 0;
+
+ sm->vlib_main = vm;
+ sm->vnet_main = vnet_get_main ();
+
+ /* Create DPO */
+ sm->srv6_ad_flow_dpo_type =
+ dpo_register_new_type (&srv6_ad_flow_vft, srv6_ad_flow_nodes);
+
+ /* Register SRv6 LocalSID */
+ rv = sr_localsid_register_function (
+ vm, function_name, keyword_str, def_str, params_str, 128,
+ &sm->srv6_ad_flow_dpo_type, format_srv6_ad_flow_localsid,
+ unformat_srv6_ad_flow_localsid, srv6_ad_flow_localsid_creation_fn,
+ srv6_ad_flow_localsid_removal_fn);
+ if (rv < 0)
+ clib_error_return (0, "SRv6 LocalSID function could not be registered.");
+ else
+ sm->srv6_localsid_behavior_id = rv;
+
+ return 0;
+}
+
+VNET_FEATURE_INIT (srv6_ad4_flow_rewrite, static) = {
+ .arc_name = "ip4-unicast",
+ .node_name = "srv6-ad4-flow-rewrite",
+ .runs_before = 0,
+};
+
+VNET_FEATURE_INIT (srv6_ad6_flow_rewrite, static) = {
+ .arc_name = "ip6-unicast",
+ .node_name = "srv6-ad6-flow-rewrite",
+ .runs_before = 0,
+};
+
+VLIB_INIT_FUNCTION (srv6_ad_flow_init);
+
+VLIB_PLUGIN_REGISTER () = {
+ .version = VPP_BUILD_VER,
+ .description = "Dynamic Segment Routing for IPv6 (SRv6) Proxy",
+};
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/plugins/srv6-ad-flow/ad-flow.h b/src/plugins/srv6-ad-flow/ad-flow.h
new file mode 100644
index 00000000000..e92a21d93c5
--- /dev/null
+++ b/src/plugins/srv6-ad-flow/ad-flow.h
@@ -0,0 +1,141 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef __included_srv6_ad_flow_h__
+#define __included_srv6_ad_flow_h__
+
+#include <vnet/vnet.h>
+#include <vnet/ip/ip.h>
+#include <vnet/srv6/sr.h>
+#include <vnet/srv6/sr_packet.h>
+
+#include <vppinfra/error.h>
+#include <vppinfra/elog.h>
+
+#include <vppinfra/dlist.h>
+#include <vppinfra/bihash_40_8.h>
+
+#define AD_TYPE_IP4 IP_PROTOCOL_IP_IN_IP
+#define AD_TYPE_IP6 IP_PROTOCOL_IPV6
+
+#define SRV6_AD_FLOW_DEFAULT_CACHE_SIZE 100
+#define SRV6_AD_CACHE_TIMEOUT 300
+
+/*
+ * Cache entries
+ */
+typedef struct
+{
+ /* Cache key (5-tuple) */
+ struct
+ {
+ ip46_address_t s_addr;
+ ip46_address_t d_addr;
+ u8 proto;
+ u16 s_port;
+ u16 d_port;
+ } key;
+
+ /* Cached encapsulation headers */
+ u32 rw_len;
+ u8 *rw_data;
+
+ /* Index in global LRU list */
+ u32 lru_index;
+
+ /* Last update timestamp */
+ f64 last_lru_update;
+
+ /* Last heard timestamp */
+ f64 last_heard;
+} srv6_ad_flow_entry_t;
+
+/*
+ * This is the memory that will be stored per each localsid
+ * the user instantiates
+ */
+typedef struct
+{
+ ip46_address_t nh_addr; /**< Proxied device address */
+ u32 sw_if_index_out; /**< Outgoing iface to proxied dev. */
+ u32 nh_adj; /**< Adjacency index for out. iface */
+ u8 inner_type;
+
+ u32 sw_if_index_in; /**< Incoming iface from proxied dev. */
+
+ u32 cache_size;
+ u32 cache_buckets;
+ uword cache_memory_size;
+
+ clib_bihash_40_8_t ftable; /**< Flow table */
+ srv6_ad_flow_entry_t *cache; /**< Cache table */
+ dlist_elt_t *lru_pool;
+ u32 lru_head_index;
+
+ u32 index;
+} srv6_ad_flow_localsid_t;
+
+typedef struct
+{
+ u16 msg_id_base; /**< API message ID base */
+
+ vlib_main_t *vlib_main; /**< [convenience] vlib main */
+ vnet_main_t *vnet_main; /**< [convenience] vnet main */
+
+ dpo_type_t srv6_ad_flow_dpo_type; /**< DPO type */
+
+ u32 srv6_localsid_behavior_id; /**< SRv6 LocalSID behavior number */
+
+ u32 *sw_iface_localsid2; /**< Retrieve local SID from iface */
+ u32 *sw_iface_localsid4; /**< Retrieve local SID from iface */
+ u32 *sw_iface_localsid6; /**< Retrieve local SID from iface */
+
+ srv6_ad_flow_localsid_t **sids; /**< Pool of AD SID pointers */
+
+ vlib_combined_counter_main_t
+ sid_bypass_counters; /**< Packets/bytes bypassing NF */
+ vlib_combined_counter_main_t sid_punt_counters; /**< Packets/bytes punted */
+ vlib_combined_counter_main_t sid_cache_full_counters;
+
+ vlib_combined_counter_main_t
+ rw_valid_counters; /**< Valid rewrite counters */
+ vlib_combined_counter_main_t
+ rw_invalid_counters; /**< Invalid rewrite counters */
+} srv6_ad_flow_main_t;
+
+typedef struct
+{
+ srv6_ad_flow_localsid_t *ls;
+ f64 now;
+} srv6_ad_is_idle_entry_ctx_t;
+
+extern srv6_ad_flow_main_t srv6_ad_flow_main;
+
+format_function_t format_srv6_ad_flow_localsid;
+unformat_function_t unformat_srv6_ad_flow_localsid;
+
+void srv6_ad_flow_dpo_lock (dpo_id_t *dpo);
+void srv6_ad_flow_dpo_unlock (dpo_id_t *dpo);
+
+extern vlib_node_registration_t srv6_ad_flow_localsid_node;
+
+#endif /* __included_srv6_ad_flow_h__ */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/plugins/srv6-ad-flow/ad_flow_plugin_doc.md b/src/plugins/srv6-ad-flow/ad_flow_plugin_doc.md
new file mode 100644
index 00000000000..1f58fc2b663
--- /dev/null
+++ b/src/plugins/srv6-ad-flow/ad_flow_plugin_doc.md
@@ -0,0 +1,25 @@
+# SRv6 endpoint to SR-unaware appliance via per-flow dynamic proxy {#srv6_ad_flow_plugin_doc}
+
+## Overview
+
+TBD
+
+## CLI configuration
+
+The following command instantiates a new End.AD.Flow segment that sends the inner
+packets on interface `IFACE-OUT` towards an appliance at address `S-ADDR` and
+restores the encapsulation headers of the packets coming back on interface
+`IFACE-IN`.
+
+```
+sr localsid address SID behavior end.ad.flow nh S-ADDR oif IFACE-OUT iif IFACE-IN
+```
+
+For example, the below command configures the SID `1::A1` with an End.AD.Flow
+function for sending traffic on interface `GigabitEthernet0/8/0` to the
+appliance at address `A1::`, and receiving it back on interface
+`GigabitEthernet0/9/0`.
+
+```
+sr localsid address 1::A1 behavior end.ad.flow nh A1:: oif GigabitEthernet0/8/0 iif GigabitEthernet0/9/0
+```
diff --git a/src/plugins/srv6-ad-flow/node.c b/src/plugins/srv6-ad-flow/node.c
new file mode 100644
index 00000000000..66be2dc7972
--- /dev/null
+++ b/src/plugins/srv6-ad-flow/node.c
@@ -0,0 +1,1025 @@
+/*
+ * node.c
+ *
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+#include <vppinfra/error.h>
+#include <srv6-ad-flow/ad-flow.h>
+
+/****************************** Packet tracing ******************************/
+
+typedef struct
+{
+ u32 localsid_index;
+} srv6_ad_flow_localsid_trace_t;
+
+typedef struct
+{
+ u8 error;
+ ip6_address_t src, dst;
+} srv6_ad_flow_rewrite_trace_t;
+
+static u8 *
+format_srv6_ad_flow_localsid_trace (u8 *s, va_list *args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ srv6_ad_flow_localsid_trace_t *t =
+ va_arg (*args, srv6_ad_flow_localsid_trace_t *);
+
+ return format (s, "SRv6-AD-Flow-localsid: localsid_index %d",
+ t->localsid_index);
+}
+
+static u8 *
+format_srv6_ad_flow_rewrite_trace (u8 *s, va_list *args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ srv6_ad_flow_rewrite_trace_t *t =
+ va_arg (*args, srv6_ad_flow_rewrite_trace_t *);
+
+ if (PREDICT_FALSE (t->error != 0))
+ {
+ return format (s, "SRv6-AD-Flow-rewrite: cache is empty");
+ }
+
+ return format (s, "SRv6-AD-Flow-rewrite: src %U dst %U", format_ip6_address,
+ &t->src, format_ip6_address, &t->dst);
+}
+
+/**************************** Nodes registration *****************************/
+
+vlib_node_registration_t srv6_ad4_flow_rewrite_node;
+vlib_node_registration_t srv6_ad6_flow_rewrite_node;
+
+/****************************** Packet counters ******************************/
+
+#define foreach_srv6_ad_flow_rewrite_counter \
+ _ (PROCESSED, "srv6-ad-flow rewritten packets") \
+ _ (NO_RW, "(Error) No header for rewriting.")
+
+typedef enum
+{
+#define _(sym, str) SRV6_AD_FLOW_REWRITE_COUNTER_##sym,
+ foreach_srv6_ad_flow_rewrite_counter
+#undef _
+ SRV6_AD_FLOW_REWRITE_N_COUNTERS,
+} srv6_ad_flow_rewrite_counters;
+
+static char *srv6_ad_flow_rewrite_counter_strings[] = {
+#define _(sym, string) string,
+ foreach_srv6_ad_flow_rewrite_counter
+#undef _
+};
+
+/******************************** Next nodes *********************************/
+
+typedef enum
+{
+ SRV6_AD_FLOW_LOCALSID_NEXT_ERROR,
+ SRV6_AD_FLOW_LOCALSID_NEXT_REWRITE4,
+ SRV6_AD_FLOW_LOCALSID_NEXT_REWRITE6,
+ SRV6_AD_FLOW_LOCALSID_NEXT_BYPASS,
+ SRV6_AD_FLOW_LOCALSID_NEXT_PUNT,
+ SRV6_AD_FLOW_LOCALSID_N_NEXT,
+} srv6_ad_flow_localsid_next_t;
+
+typedef enum
+{
+ SRV6_AD_FLOW_REWRITE_NEXT_ERROR,
+ SRV6_AD_FLOW_REWRITE_NEXT_LOOKUP,
+ SRV6_AD_FLOW_REWRITE_N_NEXT,
+} srv6_ad_flow_rewrite_next_t;
+
+/***************************** Inline functions ******************************/
+
+static_always_inline int
+ad_flow_lru_insert (srv6_ad_flow_localsid_t *ls, srv6_ad_flow_entry_t *e,
+ f64 now)
+{
+ dlist_elt_t *lru_list_elt;
+ pool_get (ls->lru_pool, lru_list_elt);
+ e->lru_index = lru_list_elt - ls->lru_pool;
+ clib_dlist_addtail (ls->lru_pool, ls->lru_head_index, e->lru_index);
+ lru_list_elt->value = e - ls->cache;
+ e->last_lru_update = now;
+ return 1;
+}
+
+always_inline void
+ad_flow_entry_update_lru (srv6_ad_flow_localsid_t *ls, srv6_ad_flow_entry_t *e)
+{
+ /* don't update too often - timeout is in magnitude of seconds anyway */
+ if (e->last_heard > e->last_lru_update + 1)
+ {
+ clib_dlist_remove (ls->lru_pool, e->lru_index);
+ clib_dlist_addtail (ls->lru_pool, ls->lru_head_index, e->lru_index);
+ e->last_lru_update = e->last_heard;
+ }
+}
+
+always_inline void
+ad_flow_entry_delete (srv6_ad_flow_localsid_t *ls, srv6_ad_flow_entry_t *e,
+ int lru_delete)
+{
+ clib_bihash_kv_40_8_t kv;
+
+ if (ls->inner_type == AD_TYPE_IP4)
+ {
+ kv.key[0] = ((u64) e->key.s_addr.ip4.as_u32 << 32) |
+ (u64) e->key.d_addr.ip4.as_u32;
+ kv.key[1] = ((u64) e->key.s_port << 16) | ((u64) e->key.d_port);
+ kv.key[2] = 0;
+ kv.key[3] = 0;
+ kv.key[4] = 0;
+ }
+ else
+ {
+ kv.key[0] = e->key.s_addr.ip6.as_u64[0];
+ kv.key[1] = e->key.s_addr.ip6.as_u64[1];
+ kv.key[2] = e->key.d_addr.ip6.as_u64[0];
+ kv.key[3] = e->key.d_addr.ip6.as_u64[1];
+ kv.key[4] = ((u64) e->key.s_port << 16) | ((u64) e->key.d_port);
+ }
+
+ clib_bihash_add_del_40_8 (&ls->ftable, &kv, 0);
+
+ vec_free (e->rw_data);
+
+ if (lru_delete)
+ {
+ clib_dlist_remove (ls->lru_pool, e->lru_index);
+ }
+ pool_put_index (ls->lru_pool, e->lru_index);
+ pool_put (ls->cache, e);
+}
+
+static_always_inline int
+ad_flow_lru_free_one (srv6_ad_flow_localsid_t *ls, f64 now)
+{
+ srv6_ad_flow_entry_t *e = NULL;
+ dlist_elt_t *oldest_elt;
+ f64 entry_timeout_time;
+ u32 oldest_index;
+ oldest_index = clib_dlist_remove_head (ls->lru_pool, ls->lru_head_index);
+ if (~0 != oldest_index)
+ {
+ oldest_elt = pool_elt_at_index (ls->lru_pool, oldest_index);
+ e = pool_elt_at_index (ls->cache, oldest_elt->value);
+
+ entry_timeout_time = e->last_heard + (f64) SRV6_AD_CACHE_TIMEOUT;
+ if (now >= entry_timeout_time)
+ {
+ ad_flow_entry_delete (ls, e, 0);
+ return 1;
+ }
+ else
+ {
+ clib_dlist_addhead (ls->lru_pool, ls->lru_head_index, oldest_index);
+ }
+ }
+ return 0;
+}
+
+static_always_inline srv6_ad_flow_entry_t *
+ad_flow_entry_alloc (srv6_ad_flow_localsid_t *ls, f64 now)
+{
+ srv6_ad_flow_entry_t *e;
+
+ ad_flow_lru_free_one (ls, now);
+
+ pool_get (ls->cache, e);
+ clib_memset (e, 0, sizeof *e);
+
+ ad_flow_lru_insert (ls, e, now);
+
+ return e;
+}
+
+always_inline u32
+ad_flow_value_get_session_index (clib_bihash_kv_40_8_t *value)
+{
+ return value->value & ~(u32) 0;
+}
+
+int
+ad_flow_is_idle_entry_cb (clib_bihash_kv_40_8_t *kv, void *arg)
+{
+ srv6_ad_is_idle_entry_ctx_t *ctx = arg;
+ srv6_ad_flow_entry_t *e;
+ u64 entry_timeout_time;
+ srv6_ad_flow_localsid_t *ls = ctx->ls;
+
+ e = pool_elt_at_index (ls->cache, ad_flow_value_get_session_index (kv));
+ entry_timeout_time = e->last_heard + (f64) SRV6_AD_CACHE_TIMEOUT;
+ if (ctx->now >= entry_timeout_time)
+ {
+ ad_flow_entry_delete (ls, e, 1);
+ return 1;
+ }
+ return 0;
+}
+
+/****************************** Local SID node *******************************/
+
+/**
+ * @brief Function doing SRH processing for AD behavior
+ */
+static_always_inline int
+end_ad_flow_walk_expect_first_hdr (vlib_main_t *vm, vlib_buffer_t *b,
+ ip6_ext_header_t *first_hdr,
+ u8 first_hdr_type, u8 expected_hdr_type,
+ u32 *encap_length, u8 **found_hdr)
+{
+ if (PREDICT_TRUE (first_hdr_type == expected_hdr_type))
+ {
+ *found_hdr = (void *) first_hdr;
+ }
+ else
+ {
+ u8 ext_hdr_type = first_hdr_type;
+ ip6_ext_header_t *ext_hdr = first_hdr;
+
+ if (!ip6_ext_hdr (ext_hdr_type))
+ {
+ *found_hdr = NULL;
+ return -1;
+ }
+
+ u32 ext_hdr_length = ip6_ext_header_len (ext_hdr);
+ if (!vlib_object_within_buffer_data (vm, b, ext_hdr, ext_hdr_length))
+ {
+ *found_hdr = NULL;
+ return -2;
+ }
+ *encap_length += ext_hdr_length;
+ ext_hdr_type = ext_hdr->next_hdr;
+
+ while (ext_hdr_type != expected_hdr_type && ip6_ext_hdr (ext_hdr_type))
+ {
+ ext_hdr = ip6_ext_next_header (ext_hdr);
+ ext_hdr_length = ip6_ext_header_len (ext_hdr);
+ if (!vlib_object_within_buffer_data (vm, b, ext_hdr, ext_hdr_length))
+ {
+ *found_hdr = NULL;
+ return -2;
+ }
+ *encap_length += ext_hdr_length;
+ ext_hdr_type = ext_hdr->next_hdr;
+ }
+
+ if (ext_hdr_type != expected_hdr_type)
+ {
+ *found_hdr = NULL;
+ return -1;
+ }
+
+ *found_hdr = ip6_ext_next_header (ext_hdr);
+ }
+
+ return 0;
+}
+
+/**
+ * @brief Function doing SRH processing for per-flow AD behavior (IPv6 inner
+ * traffic)
+ */
+static_always_inline void
+end_ad_flow_processing_v6 (vlib_main_t *vm, vlib_buffer_t *b, ip6_header_t *ip,
+ srv6_ad_flow_localsid_t *ls_mem, u32 *next,
+ vlib_combined_counter_main_t **cnt, u32 *cnt_idx,
+ f64 now)
+{
+ ip6_sr_main_t *srm = &sr_main;
+ srv6_ad_flow_main_t *sm = &srv6_ad_flow_main;
+ ip6_address_t *new_dst;
+ u32 encap_length = sizeof (ip6_header_t);
+ ip6_sr_header_t *srh;
+ clib_bihash_40_8_t *h = &ls_mem->ftable;
+ ip6_header_t *ulh = NULL;
+ u16 src_port = 0, dst_port = 0;
+ srv6_ad_flow_entry_t *e = NULL;
+ clib_bihash_kv_40_8_t kv, value;
+ srv6_ad_is_idle_entry_ctx_t ctx;
+
+ /* Find SRH in the extension header chain */
+ end_ad_flow_walk_expect_first_hdr (vm, b, (void *) (ip + 1), ip->protocol,
+ IP_PROTOCOL_IPV6_ROUTE, &encap_length,
+ (u8 **) &srh);
+
+ /* Punt the packet if no SRH or SRH with SL = 0 */
+ if (PREDICT_FALSE (srh == NULL || srh->type != ROUTING_HEADER_TYPE_SR ||
+ srh->segments_left == 0))
+ {
+ *next = SRV6_AD_FLOW_LOCALSID_NEXT_PUNT;
+ *cnt = &(sm->sid_punt_counters);
+ *cnt_idx = ls_mem->index;
+ return;
+ }
+
+ /* Decrement Segments Left and update Destination Address */
+ srh->segments_left -= 1;
+ new_dst = (ip6_address_t *) (srh->segments) + srh->segments_left;
+ ip->dst_address.as_u64[0] = new_dst->as_u64[0];
+ ip->dst_address.as_u64[1] = new_dst->as_u64[1];
+
+ /* Compute the total encapsulation size and determine ULH type */
+ encap_length += ip6_ext_header_len ((ip6_ext_header_t *) srh);
+
+ /* Find the inner IPv6 header (ULH) */
+ int ret = end_ad_flow_walk_expect_first_hdr (
+ vm, b, ip6_ext_next_header ((ip6_ext_header_t *) srh), srh->protocol,
+ IP_PROTOCOL_IPV6, &encap_length, (u8 **) &ulh);
+
+ if (PREDICT_FALSE (ulh == NULL))
+ {
+ if (ret == -1) /* Bypass the NF if ULH is not of expected type */
+ {
+ *next = SRV6_AD_FLOW_LOCALSID_NEXT_BYPASS;
+ *cnt = &(sm->sid_bypass_counters);
+ *cnt_idx = ls_mem->index;
+ }
+ else
+ {
+ *next = SRV6_AD_FLOW_LOCALSID_NEXT_ERROR;
+ *cnt = &(srm->sr_ls_invalid_counters);
+ }
+ return;
+ }
+
+ /* Compute flow hash on ULH */
+ if (PREDICT_TRUE (ulh->protocol == IP_PROTOCOL_UDP ||
+ ulh->protocol == IP_PROTOCOL_TCP))
+ {
+ udp_header_t *ulh_l4_hdr = (udp_header_t *) (ulh + 1);
+ src_port = ulh_l4_hdr->src_port;
+ dst_port = ulh_l4_hdr->dst_port;
+ }
+
+ kv.key[0] = ulh->src_address.as_u64[0];
+ kv.key[1] = ulh->src_address.as_u64[1];
+ kv.key[2] = ulh->dst_address.as_u64[0];
+ kv.key[3] = ulh->dst_address.as_u64[1];
+ kv.key[4] = ((u64) src_port << 16) | ((u64) dst_port);
+
+ /* Lookup flow in hashtable */
+ if (!clib_bihash_search_40_8 (h, &kv, &value))
+ {
+ e = pool_elt_at_index (ls_mem->cache,
+ ad_flow_value_get_session_index (&value));
+ }
+
+ if (!e)
+ {
+ if (pool_elts (ls_mem->cache) >= ls_mem->cache_size)
+ {
+ if (!ad_flow_lru_free_one (ls_mem, now))
+ {
+ *next = SRV6_AD_FLOW_LOCALSID_NEXT_ERROR;
+ *cnt = &(sm->sid_cache_full_counters);
+ *cnt_idx = ls_mem->index;
+ return;
+ }
+ }
+
+ e = ad_flow_entry_alloc (ls_mem, now);
+ ASSERT (e);
+ e->key.s_addr.ip6.as_u64[0] = ulh->src_address.as_u64[0];
+ e->key.s_addr.ip6.as_u64[1] = ulh->src_address.as_u64[1];
+ e->key.d_addr.ip6.as_u64[0] = ulh->dst_address.as_u64[0];
+ e->key.d_addr.ip6.as_u64[1] = ulh->dst_address.as_u64[1];
+ e->key.s_port = src_port;
+ e->key.d_port = dst_port;
+ e->key.proto = ulh->protocol;
+
+ kv.value = (u64) (e - ls_mem->cache);
+
+ ctx.now = now;
+ ctx.ls = ls_mem;
+ clib_bihash_add_or_overwrite_stale_40_8 (h, &kv,
+ ad_flow_is_idle_entry_cb, &ctx);
+ }
+ e->last_heard = now;
+
+ /* Cache encapsulation headers */
+ if (PREDICT_FALSE (encap_length > e->rw_len))
+ {
+ vec_validate (e->rw_data, encap_length - 1);
+ }
+ clib_memcpy_fast (e->rw_data, ip, encap_length);
+ e->rw_len = encap_length;
+
+ /* Update LRU */
+ ad_flow_entry_update_lru (ls_mem, e);
+
+ /* Decapsulate the packet */
+ vlib_buffer_advance (b, encap_length);
+
+ /* Set next node */
+ *next = SRV6_AD_FLOW_LOCALSID_NEXT_REWRITE6;
+
+ /* Set Xconnect adjacency to VNF */
+ vnet_buffer (b)->ip.adj_index[VLIB_TX] = ls_mem->nh_adj;
+}
+
+static_always_inline void
+end_ad_flow_processing_v4 (vlib_main_t *vm, vlib_buffer_t *b, ip6_header_t *ip,
+ srv6_ad_flow_localsid_t *ls_mem, u32 *next,
+ vlib_combined_counter_main_t **cnt, u32 *cnt_idx,
+ f64 now)
+{
+ ip6_sr_main_t *srm = &sr_main;
+ srv6_ad_flow_main_t *sm = &srv6_ad_flow_main;
+ ip6_address_t *new_dst;
+ u32 encap_length = sizeof (ip6_header_t);
+ ip6_sr_header_t *srh;
+ clib_bihash_40_8_t *h = &ls_mem->ftable;
+ ip4_header_t *ulh = NULL;
+ u16 src_port = 0, dst_port = 0;
+ srv6_ad_flow_entry_t *e = NULL;
+ clib_bihash_kv_40_8_t kv, value;
+ srv6_ad_is_idle_entry_ctx_t ctx;
+
+ /* Find SRH in the extension header chain */
+ end_ad_flow_walk_expect_first_hdr (vm, b, (void *) (ip + 1), ip->protocol,
+ IP_PROTOCOL_IPV6_ROUTE, &encap_length,
+ (u8 **) &srh);
+
+ /* Punt the packet if no SRH or SRH with SL = 0 */
+ if (PREDICT_FALSE (srh == NULL || srh->type != ROUTING_HEADER_TYPE_SR ||
+ srh->segments_left == 0))
+ {
+ *next = SRV6_AD_FLOW_LOCALSID_NEXT_PUNT;
+ *cnt = &(sm->sid_punt_counters);
+ *cnt_idx = ls_mem->index;
+ return;
+ }
+
+ /* Decrement Segments Left and update Destination Address */
+ srh->segments_left -= 1;
+ new_dst = (ip6_address_t *) (srh->segments) + srh->segments_left;
+ ip->dst_address.as_u64[0] = new_dst->as_u64[0];
+ ip->dst_address.as_u64[1] = new_dst->as_u64[1];
+
+ /* Add SRH length to the total encapsulation size */
+ encap_length += ip6_ext_header_len ((ip6_ext_header_t *) srh);
+
+ /* Find the inner IPv6 header (ULH) */
+ int ret = end_ad_flow_walk_expect_first_hdr (
+ vm, b, ip6_ext_next_header ((ip6_ext_header_t *) srh), srh->protocol,
+ IP_PROTOCOL_IP_IN_IP, &encap_length, (u8 **) &ulh);
+
+ if (PREDICT_FALSE (ulh == NULL))
+ {
+ if (ret == -1) /* Bypass the NF if ULH is not of expected type */
+ {
+ *next = SRV6_AD_FLOW_LOCALSID_NEXT_BYPASS;
+ *cnt = &(sm->sid_bypass_counters);
+ *cnt_idx = ls_mem->index;
+ }
+ else
+ {
+ *next = SRV6_AD_FLOW_LOCALSID_NEXT_ERROR;
+ *cnt = &(srm->sr_ls_invalid_counters);
+ }
+ return;
+ }
+
+ /* Compute flow hash on ULH */
+ if (PREDICT_TRUE (ulh->protocol == IP_PROTOCOL_UDP ||
+ ulh->protocol == IP_PROTOCOL_TCP))
+ {
+ udp_header_t *ulh_l4_hdr = (udp_header_t *) (ulh + 1);
+ src_port = ulh_l4_hdr->src_port;
+ dst_port = ulh_l4_hdr->dst_port;
+ }
+
+ kv.key[0] = *((u64 *) &ulh->address_pair);
+ kv.key[1] = ((u64) src_port << 16) | ((u64) dst_port);
+ kv.key[2] = 0;
+ kv.key[3] = 0;
+ kv.key[4] = 0;
+
+ /* Lookup flow in hashtable */
+ if (!clib_bihash_search_40_8 (h, &kv, &value))
+ {
+ e = pool_elt_at_index (ls_mem->cache,
+ ad_flow_value_get_session_index (&value));
+ }
+
+ if (!e)
+ {
+ if (pool_elts (ls_mem->cache) >= ls_mem->cache_size)
+ {
+ if (!ad_flow_lru_free_one (ls_mem, now))
+ {
+ *next = SRV6_AD_FLOW_LOCALSID_NEXT_ERROR;
+ *cnt = &(sm->sid_cache_full_counters);
+ *cnt_idx = ls_mem->index;
+ return;
+ }
+ }
+
+ e = ad_flow_entry_alloc (ls_mem, now);
+ ASSERT (e);
+ e->key.s_addr.ip4 = ulh->src_address;
+ e->key.d_addr.ip4 = ulh->dst_address;
+ e->key.s_port = src_port;
+ e->key.d_port = dst_port;
+ e->key.proto = ulh->protocol;
+
+ kv.value = (u64) (e - ls_mem->cache);
+
+ ctx.now = now;
+ ctx.ls = ls_mem;
+ clib_bihash_add_or_overwrite_stale_40_8 (h, &kv,
+ ad_flow_is_idle_entry_cb, &ctx);
+ }
+ e->last_heard = now;
+
+ /* Cache encapsulation headers */
+ if (PREDICT_FALSE (encap_length > e->rw_len))
+ {
+ vec_validate (e->rw_data, encap_length - 1);
+ }
+ clib_memcpy_fast (e->rw_data, ip, encap_length);
+ e->rw_len = encap_length;
+
+ /* Update LRU */
+ ad_flow_entry_update_lru (ls_mem, e);
+
+ /* Decapsulate the packet */
+ vlib_buffer_advance (b, encap_length);
+
+ /* Set next node */
+ *next = SRV6_AD_FLOW_LOCALSID_NEXT_REWRITE4;
+
+ /* Set Xconnect adjacency to VNF */
+ vnet_buffer (b)->ip.adj_index[VLIB_TX] = ls_mem->nh_adj;
+}
+
+/**
+ * @brief SRv6 AD Localsid graph node
+ */
+static uword
+srv6_ad_flow_localsid_fn (vlib_main_t *vm, vlib_node_runtime_t *node,
+ vlib_frame_t *frame)
+{
+ ip6_sr_main_t *srm = &sr_main;
+ f64 now = vlib_time_now (vm);
+ u32 n_left_from, next_index, *from, *to_next, n_left_to_next;
+ u32 thread_index = vm->thread_index;
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ /* TODO: Dual/quad loop */
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 bi0;
+ vlib_buffer_t *b0;
+ ip6_header_t *ip0 = 0;
+ ip6_sr_localsid_t *ls0;
+ srv6_ad_flow_localsid_t *ls_mem0;
+ u32 next0;
+ vlib_combined_counter_main_t *cnt0 = &(srm->sr_ls_valid_counters);
+ u32 cnt_idx0;
+
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ ip0 = vlib_buffer_get_current (b0);
+
+ /* Retrieve local SID context based on IP DA (adj) */
+ ls0 = pool_elt_at_index (srm->localsids,
+ vnet_buffer (b0)->ip.adj_index[VLIB_TX]);
+
+ cnt_idx0 = ls0 - srm->localsids;
+
+ /* Retrieve local SID's plugin memory */
+ ls_mem0 = ls0->plugin_mem;
+
+ /* SRH processing */
+ if (ls_mem0->inner_type == AD_TYPE_IP6)
+ end_ad_flow_processing_v6 (vm, b0, ip0, ls_mem0, &next0, &cnt0,
+ &cnt_idx0, now);
+ else
+ end_ad_flow_processing_v4 (vm, b0, ip0, ls_mem0, &next0, &cnt0,
+ &cnt_idx0, now);
+
+ /* Trace packet (if enabled) */
+ if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ srv6_ad_flow_localsid_trace_t *tr =
+ vlib_add_trace (vm, node, b0, sizeof *tr);
+ tr->localsid_index = ls_mem0->index;
+ }
+
+ /* Increment the appropriate per-SID counter */
+ vlib_increment_combined_counter (
+ cnt0, thread_index, cnt_idx0, 1,
+ vlib_buffer_length_in_chain (vm, b0));
+
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
+ n_left_to_next, bi0, next0);
+ }
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ return frame->n_vectors;
+}
+
+VLIB_REGISTER_NODE (srv6_ad_flow_localsid_node) = {
+ .function = srv6_ad_flow_localsid_fn,
+ .name = "srv6-ad-flow-localsid",
+ .vector_size = sizeof (u32),
+ .format_trace = format_srv6_ad_flow_localsid_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+ .n_next_nodes = SRV6_AD_FLOW_LOCALSID_N_NEXT,
+ .next_nodes = {
+ [SRV6_AD_FLOW_LOCALSID_NEXT_PUNT] = "ip6-local",
+ [SRV6_AD_FLOW_LOCALSID_NEXT_BYPASS] = "ip6-lookup",
+ [SRV6_AD_FLOW_LOCALSID_NEXT_REWRITE4] = "ip4-rewrite",
+ [SRV6_AD_FLOW_LOCALSID_NEXT_REWRITE6] = "ip6-rewrite",
+ [SRV6_AD_FLOW_LOCALSID_NEXT_ERROR] = "error-drop",
+ },
+};
+
+/****************************** Rewriting node *******************************/
+
+/**
+ * @brief Graph node for applying a SR policy into an IPv6 packet.
+ * Encapsulation
+ */
+static uword
+srv6_ad4_flow_rewrite_fn (vlib_main_t *vm, vlib_node_runtime_t *node,
+ vlib_frame_t *frame)
+{
+ ip6_sr_main_t *srm = &sr_main;
+ srv6_ad_flow_main_t *sm = &srv6_ad_flow_main;
+ u32 n_left_from, next_index, *from, *to_next;
+ u32 cnt_packets = 0;
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ /* TODO: Dual/quad loop */
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 bi0;
+ vlib_buffer_t *b0;
+ ip4_header_t *ip0_encap = 0;
+ ip6_header_t *ip0 = 0;
+ ip6_sr_localsid_t *ls0;
+ srv6_ad_flow_localsid_t *ls0_mem;
+ srv6_ad_flow_entry_t *s0;
+ u32 next0 = SRV6_AD_FLOW_REWRITE_NEXT_LOOKUP;
+ u16 new_l0 = 0;
+
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ ip0_encap = vlib_buffer_get_current (b0);
+ ls0 = pool_elt_at_index (
+ srm->localsids,
+ sm->sw_iface_localsid4[vnet_buffer (b0)->sw_if_index[VLIB_RX]]);
+ ls0_mem = ls0->plugin_mem;
+
+ if (PREDICT_FALSE (ls0_mem == NULL))
+ {
+ next0 = SRV6_AD_FLOW_REWRITE_NEXT_ERROR;
+ b0->error = node->errors[SRV6_AD_FLOW_REWRITE_COUNTER_NO_RW];
+ }
+ else
+ {
+ clib_bihash_kv_40_8_t kv0, value0;
+
+ /* Compute flow hash */
+ u64 ports = 0;
+ if (PREDICT_TRUE (ip0_encap->protocol == IP_PROTOCOL_UDP ||
+ ip0_encap->protocol == IP_PROTOCOL_TCP))
+ {
+ udp_header_t *udp0 = (udp_header_t *) (ip0_encap + 1);
+ ports =
+ ((u64) udp0->src_port << 16) | ((u64) udp0->dst_port);
+ }
+
+ kv0.key[0] = *((u64 *) &ip0_encap->address_pair);
+ kv0.key[1] = ports;
+ kv0.key[2] = 0;
+ kv0.key[3] = 0;
+ kv0.key[4] = 0;
+
+ /* Lookup flow in hashtable */
+ if (clib_bihash_search_40_8 (&ls0_mem->ftable, &kv0, &value0) <
+ 0)
+ {
+ /* not found */
+ next0 = SRV6_AD_FLOW_REWRITE_NEXT_ERROR;
+ b0->error = node->errors[SRV6_AD_FLOW_REWRITE_COUNTER_NO_RW];
+ }
+ else
+ {
+ /* found */
+ s0 = pool_elt_at_index (
+ ls0_mem->cache, ad_flow_value_get_session_index (&value0));
+ ASSERT (s0);
+ ASSERT (VLIB_BUFFER_PRE_DATA_SIZE >=
+ (s0->rw_len + b0->current_data));
+
+ clib_memcpy_fast (((u8 *) ip0_encap) - s0->rw_len,
+ s0->rw_data, s0->rw_len);
+ vlib_buffer_advance (b0, -(word) s0->rw_len);
+
+ ip0 = vlib_buffer_get_current (b0);
+
+ /* Update inner IPv4 TTL and checksum */
+ u32 checksum0;
+ ip0_encap->ttl -= 1;
+ checksum0 =
+ ip0_encap->checksum + clib_host_to_net_u16 (0x0100);
+ checksum0 += checksum0 >= 0xffff;
+ ip0_encap->checksum = checksum0;
+
+ /* Update outer IPv6 length (in case it has changed) */
+ new_l0 = s0->rw_len - sizeof (ip6_header_t) +
+ clib_net_to_host_u16 (ip0_encap->length);
+ ip0->payload_length = clib_host_to_net_u16 (new_l0);
+ }
+ }
+
+ if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE) &&
+ PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ srv6_ad_flow_rewrite_trace_t *tr =
+ vlib_add_trace (vm, node, b0, sizeof *tr);
+ tr->error = 0;
+
+ if (next0 == SRV6_AD_FLOW_REWRITE_NEXT_ERROR)
+ {
+ tr->error = 1;
+ }
+ else
+ {
+ clib_memcpy_fast (tr->src.as_u8, ip0->src_address.as_u8,
+ sizeof tr->src.as_u8);
+ clib_memcpy_fast (tr->dst.as_u8, ip0->dst_address.as_u8,
+ sizeof tr->dst.as_u8);
+ }
+ }
+
+ /* Increment per-SID AD rewrite counters */
+ vlib_increment_combined_counter (
+ ((next0 == SRV6_AD_FLOW_REWRITE_NEXT_ERROR) ?
+ &(sm->rw_invalid_counters) :
+ &(sm->rw_valid_counters)),
+ vm->thread_index, ls0_mem->index, 1,
+ vlib_buffer_length_in_chain (vm, b0));
+
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
+ n_left_to_next, bi0, next0);
+
+ cnt_packets++;
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ /* Update counters */
+ vlib_node_increment_counter (vm, srv6_ad4_flow_rewrite_node.index,
+ SRV6_AD_FLOW_REWRITE_COUNTER_PROCESSED,
+ cnt_packets);
+
+ return frame->n_vectors;
+}
+
+VLIB_REGISTER_NODE (srv6_ad4_flow_rewrite_node) = {
+ .function = srv6_ad4_flow_rewrite_fn,
+ .name = "srv6-ad4-flow-rewrite",
+ .vector_size = sizeof (u32),
+ .format_trace = format_srv6_ad_flow_rewrite_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+ .n_errors = SRV6_AD_FLOW_REWRITE_N_COUNTERS,
+ .error_strings = srv6_ad_flow_rewrite_counter_strings,
+ .n_next_nodes = SRV6_AD_FLOW_REWRITE_N_NEXT,
+ .next_nodes = {
+ [SRV6_AD_FLOW_REWRITE_NEXT_LOOKUP] = "ip6-lookup",
+ [SRV6_AD_FLOW_REWRITE_NEXT_ERROR] = "error-drop",
+ },
+};
+
+/**
+ * @brief Graph node for applying a SR policy into an IPv6 packet.
+ * Encapsulation
+ */
+static uword
+srv6_ad6_flow_rewrite_fn (vlib_main_t *vm, vlib_node_runtime_t *node,
+ vlib_frame_t *frame)
+{
+ ip6_sr_main_t *srm = &sr_main;
+ srv6_ad_flow_main_t *sm = &srv6_ad_flow_main;
+ u32 n_left_from, next_index, *from, *to_next;
+ u32 cnt_packets = 0;
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ /* TODO: Dual/quad loop */
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 bi0;
+ vlib_buffer_t *b0;
+ ip6_header_t *ip0 = 0, *ip0_encap = 0;
+ ip6_sr_localsid_t *ls0;
+ srv6_ad_flow_localsid_t *ls0_mem;
+ srv6_ad_flow_entry_t *s0;
+ u32 next0 = SRV6_AD_FLOW_REWRITE_NEXT_LOOKUP;
+ u16 new_l0 = 0;
+
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ ip0_encap = vlib_buffer_get_current (b0);
+ ls0 = pool_elt_at_index (
+ srm->localsids,
+ sm->sw_iface_localsid6[vnet_buffer (b0)->sw_if_index[VLIB_RX]]);
+ ls0_mem = ls0->plugin_mem;
+
+ if (PREDICT_FALSE (ls0_mem == NULL))
+ {
+ next0 = SRV6_AD_FLOW_REWRITE_NEXT_ERROR;
+ b0->error = node->errors[SRV6_AD_FLOW_REWRITE_COUNTER_NO_RW];
+ }
+ else
+ {
+ /* ############################################# */
+ clib_bihash_kv_40_8_t kv0, value0;
+
+ /* Compute flow hash */
+ u64 ports = 0;
+ if (PREDICT_TRUE (ip0_encap->protocol == IP_PROTOCOL_UDP ||
+ ip0_encap->protocol == IP_PROTOCOL_TCP))
+ {
+ udp_header_t *udp0 = (udp_header_t *) (ip0_encap + 1);
+ ports =
+ ((u64) udp0->src_port << 16) | ((u64) udp0->dst_port);
+ }
+
+ kv0.key[0] = ip0_encap->src_address.as_u64[0];
+ kv0.key[1] = ip0_encap->src_address.as_u64[1];
+ kv0.key[2] = ip0_encap->dst_address.as_u64[0];
+ kv0.key[3] = ip0_encap->dst_address.as_u64[1];
+ kv0.key[4] = ports;
+
+ /* Lookup flow in hashtable */
+ if (clib_bihash_search_40_8 (&ls0_mem->ftable, &kv0, &value0))
+ {
+ /* not found */
+ next0 = SRV6_AD_FLOW_REWRITE_NEXT_ERROR;
+ b0->error = node->errors[SRV6_AD_FLOW_REWRITE_COUNTER_NO_RW];
+ }
+ else
+ {
+ /* found */
+ s0 = pool_elt_at_index (
+ ls0_mem->cache, ad_flow_value_get_session_index (&value0));
+ ASSERT (s0);
+
+ ASSERT (VLIB_BUFFER_PRE_DATA_SIZE >=
+ (s0->rw_len + b0->current_data));
+
+ clib_memcpy_fast (((u8 *) ip0_encap) - s0->rw_len,
+ s0->rw_data, s0->rw_len);
+ vlib_buffer_advance (b0, -(word) s0->rw_len);
+
+ ip0 = vlib_buffer_get_current (b0);
+
+ /* Update inner IPv6 hop limit */
+ ip0_encap->hop_limit -= 1;
+
+ /* Update outer IPv6 length (in case it has changed) */
+ new_l0 = s0->rw_len +
+ clib_net_to_host_u16 (ip0_encap->payload_length);
+ ip0->payload_length = clib_host_to_net_u16 (new_l0);
+ }
+ }
+
+ if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE) &&
+ PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ srv6_ad_flow_rewrite_trace_t *tr =
+ vlib_add_trace (vm, node, b0, sizeof *tr);
+ tr->error = 0;
+
+ if (next0 == SRV6_AD_FLOW_REWRITE_NEXT_ERROR)
+ {
+ tr->error = 1;
+ }
+ else
+ {
+ clib_memcpy_fast (tr->src.as_u8, ip0->src_address.as_u8,
+ sizeof tr->src.as_u8);
+ clib_memcpy_fast (tr->dst.as_u8, ip0->dst_address.as_u8,
+ sizeof tr->dst.as_u8);
+ }
+ }
+
+ /* Increment per-SID AD rewrite counters */
+ vlib_increment_combined_counter (
+ ((next0 == SRV6_AD_FLOW_REWRITE_NEXT_ERROR) ?
+ &(sm->rw_invalid_counters) :
+ &(sm->rw_valid_counters)),
+ vm->thread_index, ls0_mem->index, 1,
+ vlib_buffer_length_in_chain (vm, b0));
+
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
+ n_left_to_next, bi0, next0);
+
+ cnt_packets++;
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ /* Update counters */
+ vlib_node_increment_counter (vm, srv6_ad6_flow_rewrite_node.index,
+ SRV6_AD_FLOW_REWRITE_COUNTER_PROCESSED,
+ cnt_packets);
+
+ return frame->n_vectors;
+}
+
+VLIB_REGISTER_NODE (srv6_ad6_flow_rewrite_node) = {
+ .function = srv6_ad6_flow_rewrite_fn,
+ .name = "srv6-ad6-flow-rewrite",
+ .vector_size = sizeof (u32),
+ .format_trace = format_srv6_ad_flow_rewrite_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+ .n_errors = SRV6_AD_FLOW_REWRITE_N_COUNTERS,
+ .error_strings = srv6_ad_flow_rewrite_counter_strings,
+ .n_next_nodes = SRV6_AD_FLOW_REWRITE_N_NEXT,
+ .next_nodes = {
+ [SRV6_AD_FLOW_REWRITE_NEXT_LOOKUP] = "ip6-lookup",
+ [SRV6_AD_FLOW_REWRITE_NEXT_ERROR] = "error-drop",
+ },
+};
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/plugins/srv6-ad-flow/test/test_srv6_ad_flow.py b/src/plugins/srv6-ad-flow/test/test_srv6_ad_flow.py
new file mode 100644
index 00000000000..f5452089a79
--- /dev/null
+++ b/src/plugins/srv6-ad-flow/test/test_srv6_ad_flow.py
@@ -0,0 +1,637 @@
+#!/usr/bin/env python3
+
+import unittest
+import binascii
+from socket import AF_INET6
+
+from framework import VppTestCase, VppTestRunner
+from vpp_ip import DpoProto
+from vpp_ip_route import VppIpRoute, VppRoutePath, VppIpTable
+
+import scapy.compat
+from scapy.packet import Raw
+from scapy.layers.l2 import Ether, Dot1Q
+from scapy.layers.inet6 import IPv6, UDP, IPv6ExtHdrSegmentRouting
+from scapy.layers.inet import IP, UDP
+
+from util import ppp
+
+
+class TestSRv6(VppTestCase):
+ """ SRv6 Flow-based Dynamic Proxy plugin Test Case """
+
+ @classmethod
+ def setUpClass(self):
+ super(TestSRv6, self).setUpClass()
+
+ @classmethod
+ def tearDownClass(cls):
+ super(TestSRv6, cls).tearDownClass()
+
+ def setUp(self):
+ """ Perform test setup before each test case.
+ """
+ super(TestSRv6, self).setUp()
+
+ # packet sizes, inclusive L2 overhead
+ self.pg_packet_sizes = [64, 512, 1518, 9018]
+
+ # reset packet_infos
+ self.reset_packet_infos()
+
+ def tearDown(self):
+ """ Clean up test setup after each test case.
+ """
+ self.teardown_interfaces()
+
+ super(TestSRv6, self).tearDown()
+
+ def configure_interface(self,
+ interface,
+ ipv6=False, ipv4=False,
+ ipv6_table_id=0, ipv4_table_id=0):
+ """ Configure interface.
+ :param ipv6: configure IPv6 on interface
+ :param ipv4: configure IPv4 on interface
+ :param ipv6_table_id: FIB table_id for IPv6
+ :param ipv4_table_id: FIB table_id for IPv4
+ """
+ self.logger.debug("Configuring interface %s" % (interface.name))
+ if ipv6:
+ self.logger.debug("Configuring IPv6")
+ interface.set_table_ip6(ipv6_table_id)
+ interface.config_ip6()
+ interface.resolve_ndp(timeout=5)
+ if ipv4:
+ self.logger.debug("Configuring IPv4")
+ interface.set_table_ip4(ipv4_table_id)
+ interface.config_ip4()
+ interface.resolve_arp()
+ interface.admin_up()
+
+ def setup_interfaces(self, ipv6=[], ipv4=[],
+ ipv6_table_id=[], ipv4_table_id=[]):
+ """ Create and configure interfaces.
+
+ :param ipv6: list of interface IPv6 capabilities
+ :param ipv4: list of interface IPv4 capabilities
+ :param ipv6_table_id: list of intf IPv6 FIB table_ids
+ :param ipv4_table_id: list of intf IPv4 FIB table_ids
+ :returns: List of created interfaces.
+ """
+ # how many interfaces?
+ if len(ipv6):
+ count = len(ipv6)
+ else:
+ count = len(ipv4)
+ self.logger.debug("Creating and configuring %d interfaces" % (count))
+
+ # fill up ipv6 and ipv4 lists if needed
+ # not enabled (False) is the default
+ if len(ipv6) < count:
+ ipv6 += (count - len(ipv6)) * [False]
+ if len(ipv4) < count:
+ ipv4 += (count - len(ipv4)) * [False]
+
+ # fill up table_id lists if needed
+ # table_id 0 (global) is the default
+ if len(ipv6_table_id) < count:
+ ipv6_table_id += (count - len(ipv6_table_id)) * [0]
+ if len(ipv4_table_id) < count:
+ ipv4_table_id += (count - len(ipv4_table_id)) * [0]
+
+ # create 'count' pg interfaces
+ self.create_pg_interfaces(range(count))
+
+ # setup all interfaces
+ for i in range(count):
+ intf = self.pg_interfaces[i]
+ self.configure_interface(intf,
+ ipv6[i], ipv4[i],
+ ipv6_table_id[i], ipv4_table_id[i])
+
+ if any(ipv6):
+ self.logger.debug(self.vapi.cli("show ip6 neighbors"))
+ if any(ipv4):
+ self.logger.debug(self.vapi.cli("show ip4 neighbors"))
+ self.logger.debug(self.vapi.cli("show interface"))
+ self.logger.debug(self.vapi.cli("show hardware"))
+
+ return self.pg_interfaces
+
+ def teardown_interfaces(self):
+ """ Unconfigure and bring down interface.
+ """
+ self.logger.debug("Tearing down interfaces")
+ # tear down all interfaces
+ # AFAIK they cannot be deleted
+ for i in self.pg_interfaces:
+ self.logger.debug("Tear down interface %s" % (i.name))
+ i.admin_down()
+ i.unconfig()
+ i.set_table_ip4(0)
+ i.set_table_ip6(0)
+
+ def test_SRv6_End_AD_IPv6(self):
+ """ Test SRv6 End.AD behavior with IPv6 traffic.
+ """
+ self.src_addr = 'a0::'
+ self.sid_list = ['a1::', 'a2::a6', 'a3::']
+ self.test_sid_index = 1
+
+ # send traffic to one destination interface
+ # source and destination interfaces are IPv6 only
+ self.setup_interfaces(ipv6=[True, True])
+
+ # configure route to next segment
+ route = VppIpRoute(self, self.sid_list[self.test_sid_index + 1], 128,
+ [VppRoutePath(self.pg0.remote_ip6,
+ self.pg0.sw_if_index,
+ proto=DpoProto.DPO_PROTO_IP6)])
+ route.add_vpp_config()
+
+ # configure SRv6 localSID behavior
+ cli_str = "sr localsid address " + \
+ self.sid_list[self.test_sid_index] + \
+ " behavior end.ad.flow" + \
+ " nh " + self.pg1.remote_ip6 + \
+ " oif " + self.pg1.name + \
+ " iif " + self.pg1.name
+ self.vapi.cli(cli_str)
+
+ # log the localsids
+ self.logger.debug(self.vapi.cli("show sr localsid"))
+
+ # send one packet per packet size
+ count = len(self.pg_packet_sizes)
+
+ # prepare IPv6 in SRv6 headers
+ packet_header1 = self.create_packet_header_IPv6_SRH_IPv6(
+ srcaddr=self.src_addr,
+ sidlist=self.sid_list[::-1],
+ segleft=len(self.sid_list) - self.test_sid_index - 1)
+
+ # generate packets (pg0->pg1)
+ pkts1 = self.create_stream(self.pg0, self.pg1, packet_header1,
+ self.pg_packet_sizes, count)
+
+ # send packets and verify received packets
+ self.send_and_verify_pkts(self.pg0, pkts1, self.pg1,
+ self.compare_rx_tx_packet_End_AD_IPv6_out)
+
+ # log the localsid counters
+ self.logger.info(self.vapi.cli("show sr localsid"))
+
+ # prepare IPv6 header for returning packets
+ packet_header2 = self.create_packet_header_IPv6()
+
+ # generate returning packets (pg1->pg0)
+ pkts2 = self.create_stream(self.pg1, self.pg0, packet_header2,
+ self.pg_packet_sizes, count)
+
+ # send packets and verify received packets
+ self.send_and_verify_pkts(self.pg1, pkts2, self.pg0,
+ self.compare_rx_tx_packet_End_AD_IPv6_in)
+
+ # log the localsid counters
+ self.logger.info(self.vapi.cli("show sr localsid"))
+
+ # remove SRv6 localSIDs
+ cli_str = "sr localsid del address " + \
+ self.sid_list[self.test_sid_index]
+ self.vapi.cli(cli_str)
+
+ # cleanup interfaces
+ self.teardown_interfaces()
+
+ def compare_rx_tx_packet_End_AD_IPv6_out(self, tx_pkt, rx_pkt):
+ """ Compare input and output packet after passing End.AD with IPv6
+
+ :param tx_pkt: transmitted packet
+ :param rx_pkt: received packet
+ """
+
+ # get first (outer) IPv6 header of rx'ed packet
+ rx_ip = rx_pkt.getlayer(IPv6)
+
+ tx_ip = tx_pkt.getlayer(IPv6)
+ tx_ip2 = tx_pkt.getlayer(IPv6, 2)
+
+ # verify if rx'ed packet has no SRH
+ self.assertFalse(rx_pkt.haslayer(IPv6ExtHdrSegmentRouting))
+
+ # the whole rx_ip pkt should be equal to tx_ip2
+ # except for the hlim field
+ # -> adjust tx'ed hlim to expected hlim
+ tx_ip2.hlim = tx_ip2.hlim - 1
+
+ self.assertEqual(rx_ip, tx_ip2)
+
+ self.logger.debug("packet verification: SUCCESS")
+
+ def compare_rx_tx_packet_End_AD_IPv6_in(self, tx_pkt, rx_pkt):
+ """ Compare input and output packet after passing End.AD
+
+ :param tx_pkt: transmitted packet
+ :param rx_pkt: received packet
+ """
+
+ # get first (outer) IPv6 header of rx'ed packet
+ rx_ip = rx_pkt.getlayer(IPv6)
+ # received ip.src should be equal to SR Policy source
+ self.assertEqual(rx_ip.src, self.src_addr)
+ # received ip.dst should be equal to expected sidlist next segment
+ self.assertEqual(rx_ip.dst, self.sid_list[self.test_sid_index + 1])
+
+ # rx'ed packet should have SRH
+ self.assertTrue(rx_pkt.haslayer(IPv6ExtHdrSegmentRouting))
+
+ # get SRH
+ rx_srh = rx_pkt.getlayer(IPv6ExtHdrSegmentRouting)
+ # rx'ed seglist should be equal to SID-list in reversed order
+ self.assertEqual(rx_srh.addresses, self.sid_list[::-1])
+ # segleft should be equal to previous segleft value minus 1
+ self.assertEqual(rx_srh.segleft,
+ len(self.sid_list) - self.test_sid_index - 2)
+ # lastentry should be equal to the SID-list length minus 1
+ self.assertEqual(rx_srh.lastentry, len(self.sid_list) - 1)
+
+ # the whole rx'ed pkt beyond SRH should be equal to tx'ed pkt
+ # except for the hop-limit field
+ tx_ip = tx_pkt.getlayer(IPv6)
+ # -> update tx'ed hlim to the expected hlim
+ tx_ip.hlim -= 1
+ # -> check payload
+ self.assertEqual(rx_srh.payload, tx_ip)
+
+ self.logger.debug("packet verification: SUCCESS")
+
+ def test_SRv6_End_AD_IPv4(self):
+ """ Test SRv6 End.AD behavior with IPv4 traffic.
+ """
+ self.src_addr = 'a0::'
+ self.sid_list = ['a1::', 'a2::a4', 'a3::']
+ self.test_sid_index = 1
+
+ # send traffic to one destination interface
+ # source and destination interfaces are IPv6 only
+ self.setup_interfaces(ipv6=[True, False], ipv4=[False, True])
+
+ # configure route to next segment
+ route = VppIpRoute(self, self.sid_list[self.test_sid_index + 1], 128,
+ [VppRoutePath(self.pg0.remote_ip6,
+ self.pg0.sw_if_index,
+ proto=DpoProto.DPO_PROTO_IP6)])
+ route.add_vpp_config()
+
+ # configure SRv6 localSID behavior
+ cli_str = "sr localsid address " + \
+ self.sid_list[self.test_sid_index] + \
+ " behavior end.ad.flow" + \
+ " nh " + self.pg1.remote_ip4 + \
+ " oif " + self.pg1.name + \
+ " iif " + self.pg1.name
+ self.vapi.cli(cli_str)
+
+ # log the localsids
+ self.logger.debug(self.vapi.cli("show sr localsid"))
+
+ # send one packet per packet size
+ count = len(self.pg_packet_sizes)
+
+ # prepare IPv4 in SRv6 headers
+ packet_header1 = self.create_packet_header_IPv6_SRH_IPv4(
+ srcaddr=self.src_addr,
+ sidlist=self.sid_list[::-1],
+ segleft=len(self.sid_list) - self.test_sid_index - 1)
+
+ # generate packets (pg0->pg1)
+ pkts1 = self.create_stream(self.pg0, self.pg1, packet_header1,
+ self.pg_packet_sizes, count)
+
+ # send packets and verify received packets
+ self.send_and_verify_pkts(self.pg0, pkts1, self.pg1,
+ self.compare_rx_tx_packet_End_AD_IPv4_out)
+
+ # log the localsid counters
+ self.logger.info(self.vapi.cli("show sr localsid"))
+
+ # prepare IPv6 header for returning packets
+ packet_header2 = self.create_packet_header_IPv4()
+
+ # generate returning packets (pg1->pg0)
+ pkts2 = self.create_stream(self.pg1, self.pg0, packet_header2,
+ self.pg_packet_sizes, count)
+
+ # send packets and verify received packets
+ self.send_and_verify_pkts(self.pg1, pkts2, self.pg0,
+ self.compare_rx_tx_packet_End_AD_IPv4_in)
+
+ # log the localsid counters
+ self.logger.info(self.vapi.cli("show sr localsid"))
+
+ # remove SRv6 localSIDs
+ cli_str = "sr localsid del address " + \
+ self.sid_list[self.test_sid_index]
+ self.vapi.cli(cli_str)
+
+ # cleanup interfaces
+ self.teardown_interfaces()
+
+ def compare_rx_tx_packet_End_AD_IPv4_out(self, tx_pkt, rx_pkt):
+ """ Compare input and output packet after passing End.AD with IPv4
+
+ :param tx_pkt: transmitted packet
+ :param rx_pkt: received packet
+ """
+
+ # get IPv4 header of rx'ed packet
+ rx_ip = rx_pkt.getlayer(IP)
+
+ tx_ip = tx_pkt.getlayer(IPv6)
+ tx_ip2 = tx_pkt.getlayer(IP)
+
+ # verify if rx'ed packet has no SRH
+ self.assertFalse(rx_pkt.haslayer(IPv6ExtHdrSegmentRouting))
+
+ # the whole rx_ip pkt should be equal to tx_ip2
+ # except for the ttl field and ip checksum
+ # -> adjust tx'ed ttl to expected ttl
+ tx_ip2.ttl = tx_ip2.ttl - 1
+ # -> set tx'ed ip checksum to None and let scapy recompute
+ tx_ip2.chksum = None
+ # read back the pkt (with str()) to force computing these fields
+ # probably other ways to accomplish this are possible
+ tx_ip2 = IP(scapy.compat.raw(tx_ip2))
+
+ self.assertEqual(rx_ip, tx_ip2)
+
+ self.logger.debug("packet verification: SUCCESS")
+
+ def compare_rx_tx_packet_End_AD_IPv4_in(self, tx_pkt, rx_pkt):
+ """ Compare input and output packet after passing End.AD
+
+ :param tx_pkt: transmitted packet
+ :param rx_pkt: received packet
+ """
+
+ # get first (outer) IPv6 header of rx'ed packet
+ rx_ip = rx_pkt.getlayer(IPv6)
+ # received ip.src should be equal to SR Policy source
+ self.assertEqual(rx_ip.src, self.src_addr)
+ # received ip.dst should be equal to expected sidlist next segment
+ self.assertEqual(rx_ip.dst, self.sid_list[self.test_sid_index + 1])
+
+ # rx'ed packet should have SRH
+ self.assertTrue(rx_pkt.haslayer(IPv6ExtHdrSegmentRouting))
+
+ # get SRH
+ rx_srh = rx_pkt.getlayer(IPv6ExtHdrSegmentRouting)
+ # rx'ed seglist should be equal to SID-list in reversed order
+ self.assertEqual(rx_srh.addresses, self.sid_list[::-1])
+ # segleft should be equal to previous segleft value minus 1
+ self.assertEqual(rx_srh.segleft,
+ len(self.sid_list) - self.test_sid_index - 2)
+ # lastentry should be equal to the SID-list length minus 1
+ self.assertEqual(rx_srh.lastentry, len(self.sid_list) - 1)
+
+ # the whole rx'ed pkt beyond SRH should be equal to tx'ed pkt
+ # except for the ttl field and ip checksum
+ tx_ip = tx_pkt.getlayer(IP)
+ # -> adjust tx'ed ttl to expected ttl
+ tx_ip.ttl = tx_ip.ttl - 1
+ # -> set tx'ed ip checksum to None and let scapy recompute
+ tx_ip.chksum = None
+ # -> read back the pkt (with str()) to force computing these fields
+ # probably other ways to accomplish this are possible
+ self.assertEqual(rx_srh.payload, IP(scapy.compat.raw(tx_ip)))
+
+ self.logger.debug("packet verification: SUCCESS")
+
+ def create_stream(self, src_if, dst_if, packet_header, packet_sizes,
+ count):
+ """Create SRv6 input packet stream for defined interface.
+
+ :param VppInterface src_if: Interface to create packet stream for
+ :param VppInterface dst_if: destination interface of packet stream
+ :param packet_header: Layer3 scapy packet headers,
+ L2 is added when not provided,
+ Raw(payload) with packet_info is added
+ :param list packet_sizes: packet stream pckt sizes,sequentially applied
+ to packets in stream have
+ :param int count: number of packets in packet stream
+ :return: list of packets
+ """
+ self.logger.info("Creating packets")
+ pkts = []
+ for i in range(0, count - 1):
+ payload_info = self.create_packet_info(src_if, dst_if)
+ self.logger.debug(
+ "Creating packet with index %d" % (payload_info.index))
+ payload = self.info_to_payload(payload_info)
+ # add L2 header if not yet provided in packet_header
+ if packet_header.getlayer(0).name == 'Ethernet':
+ p = packet_header / Raw(payload)
+ else:
+ p = Ether(dst=src_if.local_mac, src=src_if.remote_mac) / \
+ packet_header / Raw(payload)
+ size = packet_sizes[i % len(packet_sizes)]
+ self.logger.debug("Packet size %d" % (size))
+ self.extend_packet(p, size)
+ # we need to store the packet with the automatic fields computed
+ # read back the dumped packet (with str())
+ # to force computing these fields
+ # probably other ways are possible
+ p = Ether(scapy.compat.raw(p))
+ payload_info.data = p.copy()
+ self.logger.debug(ppp("Created packet:", p))
+ pkts.append(p)
+ self.logger.info("Done creating packets")
+ return pkts
+
+ def send_and_verify_pkts(self, input, pkts, output, compare_func):
+ """Send packets and verify received packets using compare_func
+
+ :param input: ingress interface of DUT
+ :param pkts: list of packets to transmit
+ :param output: egress interface of DUT
+ :param compare_func: function to compare in and out packets
+ """
+ # add traffic stream to input interface
+ input.add_stream(pkts)
+
+ # enable capture on all interfaces
+ self.pg_enable_capture(self.pg_interfaces)
+
+ # start traffic
+ self.logger.info("Starting traffic")
+ self.pg_start()
+
+ # get output capture
+ self.logger.info("Getting packet capture")
+ capture = output.get_capture()
+
+ # assert nothing was captured on input interface
+ # input.assert_nothing_captured()
+
+ # verify captured packets
+ self.verify_captured_pkts(output, capture, compare_func)
+
+ def create_packet_header_IPv6(self, saddr='1234::1', daddr='4321::1',
+ sport=1234, dport=1234):
+ """Create packet header: IPv6 header, UDP header
+
+ :param dst: IPv6 destination address
+
+ IPv6 source address is 1234::1
+ IPv6 destination address is 4321::1
+ UDP source port and destination port are 1234
+ """
+
+ p = IPv6(src=saddr, dst=daddr) / UDP(sport=sport, dport=dport)
+ return p
+
+ def create_packet_header_IPv6_SRH_IPv6(self, srcaddr, sidlist, segleft,
+ insrc='1234::1', indst='4321::1',
+ sport=1234, dport=1234):
+ """Create packet header: IPv6 encapsulated in SRv6:
+ IPv6 header with SRH, IPv6 header, UDP header
+
+ :param int srcaddr: outer source address
+ :param list sidlist: segment list of outer IPv6 SRH
+ :param int segleft: segments-left field of outer IPv6 SRH
+
+ Outer IPv6 source address is set to srcaddr
+ Outer IPv6 destination address is set to sidlist[segleft]
+ Inner IPv6 source addresses is 1234::1
+ Inner IPv6 destination address is 4321::1
+ UDP source port and destination port are 1234
+ """
+
+ p = IPv6(src=srcaddr, dst=sidlist[segleft]) / \
+ IPv6ExtHdrSegmentRouting(addresses=sidlist,
+ segleft=segleft, nh=41) / \
+ IPv6(src=insrc, dst=indst) / \
+ UDP(sport=sport, dport=dport)
+ return p
+
+ def create_packet_header_IPv4(self):
+ """Create packet header: IPv4 header, UDP header
+
+ :param dst: IPv4 destination address
+
+ IPv4 source address is 123.1.1.1
+ IPv4 destination address is 124.1.1.1
+ UDP source port and destination port are 1234
+ """
+
+ p = IP(src='123.1.1.1', dst='124.1.1.1') / UDP(sport=1234, dport=1234)
+ return p
+
+ def create_packet_header_IPv6_SRH_IPv4(self, srcaddr, sidlist, segleft):
+ """Create packet header: IPv4 encapsulated in SRv6:
+ IPv6 header with SRH, IPv4 header, UDP header
+
+ :param int srcaddr: outer source address
+ :param list sidlist: segment list of outer IPv6 SRH
+ :param int segleft: segments-left field of outer IPv6 SRH
+
+ Outer IPv6 source address is set to srcaddr
+ Outer IPv6 destination address is set to sidlist[segleft]
+ Inner IPv4 source address is 123.1.1.1
+ Inner IPv4 destination address is 124.1.1.1
+ UDP source port and destination port are 1234
+ """
+
+ p = IPv6(src=srcaddr, dst=sidlist[segleft]) / \
+ IPv6ExtHdrSegmentRouting(addresses=sidlist,
+ segleft=segleft, nh=4) / \
+ IP(src='123.1.1.1', dst='124.1.1.1') / \
+ UDP(sport=1234, dport=1234)
+ return p
+
+ def get_payload_info(self, packet):
+ """ Extract the payload_info from the packet
+ """
+ # in most cases, payload_info is in packet[Raw]
+ # but packet[Raw] gives the complete payload
+ # (incl L2 header) for the T.Encaps L2 case
+ try:
+ payload_info = self.payload_to_info(packet[Raw])
+
+ except:
+ # remote L2 header from packet[Raw]:
+ # take packet[Raw], convert it to an Ether layer
+ # and then extract Raw from it
+ payload_info = self.payload_to_info(
+ Ether(scapy.compat.raw(packet[Raw]))[Raw])
+
+ return payload_info
+
+ def verify_captured_pkts(self, dst_if, capture, compare_func):
+ """
+ Verify captured packet stream for specified interface.
+ Compare ingress with egress packets using the specified compare fn
+
+ :param dst_if: egress interface of DUT
+ :param capture: captured packets
+ :param compare_func: function to compare in and out packet
+ """
+ self.logger.info("Verifying capture on interface %s using function %s"
+ % (dst_if.name, compare_func.__name__))
+
+ last_info = dict()
+ for i in self.pg_interfaces:
+ last_info[i.sw_if_index] = None
+ dst_sw_if_index = dst_if.sw_if_index
+
+ for packet in capture:
+ try:
+ # extract payload_info from packet's payload
+ payload_info = self.get_payload_info(packet)
+ packet_index = payload_info.index
+
+ self.logger.debug("Verifying packet with index %d"
+ % (packet_index))
+ # packet should have arrived on the expected interface
+ self.assertEqual(payload_info.dst, dst_sw_if_index)
+ self.logger.debug(
+ "Got packet on interface %s: src=%u (idx=%u)" %
+ (dst_if.name, payload_info.src, packet_index))
+
+ # search for payload_info with same src and dst if_index
+ # this will give us the transmitted packet
+ next_info = self.get_next_packet_info_for_interface2(
+ payload_info.src, dst_sw_if_index,
+ last_info[payload_info.src])
+ last_info[payload_info.src] = next_info
+ # next_info should not be None
+ self.assertTrue(next_info is not None)
+ # index of tx and rx packets should be equal
+ self.assertEqual(packet_index, next_info.index)
+ # data field of next_info contains the tx packet
+ txed_packet = next_info.data
+
+ self.logger.debug(ppp("Transmitted packet:",
+ txed_packet)) # ppp=Pretty Print Packet
+
+ self.logger.debug(ppp("Received packet:", packet))
+
+ # compare rcvd packet with expected packet using compare_func
+ compare_func(txed_packet, packet)
+
+ except:
+ self.logger.error(ppp("Unexpected or invalid packet:", packet))
+ raise
+
+ # have all expected packets arrived?
+ for i in self.pg_interfaces:
+ remaining_packet = self.get_next_packet_info_for_interface2(
+ i.sw_if_index, dst_sw_if_index, last_info[i.sw_if_index])
+ self.assertTrue(remaining_packet is None,
+ "Interface %s: Packet expected from interface %s "
+ "didn't arrive" % (dst_if.name, i.name))
+
+
+if __name__ == '__main__':
+ unittest.main(testRunner=VppTestRunner)