aboutsummaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
authorChenmin Sun <chenmin.sun@intel.com>2020-07-27 17:40:17 +0800
committerDamjan Marion <dmarion@me.com>2020-08-31 12:14:03 +0000
commit34bfa50b61f8cf454ddce9f378e3d5d29f74a72b (patch)
tree37b9e9256a959ce0dbc0bfc66c13814ff59526eb /src
parent29f3c7d2ecac2f9d80bb33e91bd5d1f9d434768a (diff)
flow: code refactor
This is the code refactor for vnet/flow infra and the dpdk_plugin flow implementation. The main works of the refactor are: 1. Added two base flow type: VNET_FLOW_TYPE_IP4 and VNET_FLOW_TYPE_IP6 as the base the flow type 2. All the other flows are derived from the base flow types 3. Removed some flow types that are not currently supported by the hardware, and VPP won't leverage them either: IP4_GTPU_IP4, IP4_GTPU_IP6, IP6_GTPC, IP6_GTPU, IP6_GTPU_IP4, IP6_GTPU_IP6 4. Re-implemented the vnet/flow cli as well as the dpdk_plugin implementation 5. refine cli prompt 6. refine display info in command "show flow entry" Type: refactor Signed-off-by: Chenmin Sun <chenmin.sun@intel.com> Change-Id: Ica5e61c5881adc73b28335fd83e36ec1cb420c96
Diffstat (limited to 'src')
-rw-r--r--src/plugins/dpdk/device/flow.c587
-rw-r--r--src/plugins/gtpu/gtpu.c2
-rw-r--r--src/vnet/flow/flow.h122
-rw-r--r--src/vnet/flow/flow_api.c26
-rw-r--r--src/vnet/flow/flow_cli.c506
-rw-r--r--src/vnet/flow/flow_types.api80
-rw-r--r--src/vnet/vxlan/vxlan.c10
7 files changed, 586 insertions, 747 deletions
diff --git a/src/plugins/dpdk/device/flow.c b/src/plugins/dpdk/device/flow.c
index f34050ac033..a090ec0e930 100644
--- a/src/plugins/dpdk/device/flow.c
+++ b/src/plugins/dpdk/device/flow.c
@@ -23,28 +23,52 @@
#include <vnet/ethernet/arp_packet.h>
#include <vnet/vxlan/vxlan.h>
#include <dpdk/device/dpdk.h>
-
#include <dpdk/device/dpdk_priv.h>
#include <vppinfra/error.h>
-/* check if flow is L2 flow */
-#define FLOW_IS_L2_LAYER(f) \
+#define FLOW_IS_ETHERNET_CLASS(f) \
(f->type == VNET_FLOW_TYPE_ETHERNET)
+#define FLOW_IS_IPV4_CLASS(f) \
+ ((f->type == VNET_FLOW_TYPE_IP4) || \
+ (f->type == VNET_FLOW_TYPE_IP4_N_TUPLE) || \
+ (f->type == VNET_FLOW_TYPE_IP4_N_TUPLE_TAGGED) || \
+ (f->type == VNET_FLOW_TYPE_IP4_VXLAN) || \
+ (f->type == VNET_FLOW_TYPE_IP4_GTPC) || \
+ (f->type == VNET_FLOW_TYPE_IP4_GTPU) || \
+ (f->type == VNET_FLOW_TYPE_IP4_L2TPV3OIP) || \
+ (f->type == VNET_FLOW_TYPE_IP4_IPSEC_ESP) || \
+ (f->type == VNET_FLOW_TYPE_IP4_IPSEC_AH))
+
+#define FLOW_IS_IPV6_CLASS(f) \
+ ((f->type == VNET_FLOW_TYPE_IP6) || \
+ (f->type == VNET_FLOW_TYPE_IP6_N_TUPLE) || \
+ (f->type == VNET_FLOW_TYPE_IP6_N_TUPLE_TAGGED) || \
+ (f->type == VNET_FLOW_TYPE_IP6_VXLAN))
+
/* check if flow is VLAN sensitive */
-#define FLOW_IS_VLAN_TAGGED(f) \
+#define FLOW_HAS_VLAN_TAG(f) \
((f->type == VNET_FLOW_TYPE_IP4_N_TUPLE_TAGGED) || \
- (f->type == VNET_FLOW_TYPE_IP6_N_TUPLE_TAGGED))
+ (f->type == VNET_FLOW_TYPE_IP6_N_TUPLE_TAGGED))
+
+/* check if flow is L3 type */
+#define FLOW_IS_L3_TYPE(f) \
+ ((f->type == VNET_FLOW_TYPE_IP4) || \
+ (f->type == VNET_FLOW_TYPE_IP6))
/* check if flow is L4 type */
-#define FLOW_IS_L4_LAYER(f) \
+#define FLOW_IS_L4_TYPE(f) \
((f->type == VNET_FLOW_TYPE_IP4_N_TUPLE) || \
- (f->type == VNET_FLOW_TYPE_IP6_N_TUPLE))
+ (f->type == VNET_FLOW_TYPE_IP6_N_TUPLE) || \
+ (f->type == VNET_FLOW_TYPE_IP4_N_TUPLE_TAGGED) || \
+ (f->type == VNET_FLOW_TYPE_IP6_N_TUPLE_TAGGED))
/* check if flow is L4 tunnel type */
-#define FLOW_IS_L4_TUNNEL_LAYER(f) \
- ((f->type >= VNET_FLOW_TYPE_IP4_VXLAN) || \
- (f->type <= VNET_FLOW_TYPE_IP6_GTPU_IP6))
+#define FLOW_IS_L4_TUNNEL_TYPE(f) \
+ ((f->type == VNET_FLOW_TYPE_IP4_VXLAN) || \
+ (f->type == VNET_FLOW_TYPE_IP6_VXLAN) || \
+ (f->type == VNET_FLOW_TYPE_IP4_GTPC) || \
+ (f->type == VNET_FLOW_TYPE_IP4_GTPU))
/* constant structs */
static const struct rte_flow_attr ingress = {.ingress = 1 };
@@ -111,9 +135,7 @@ dpdk_flow_add (dpdk_device_t * xd, vnet_flow_t * f, dpdk_flow_entry_t * fe)
{
struct rte_flow_item_eth eth[2] = { };
struct rte_flow_item_ipv4 ip4[2] = { };
- struct rte_flow_item_ipv4 inner_ip4[2] = { };
struct rte_flow_item_ipv6 ip6[2] = { };
- struct rte_flow_item_ipv6 inner_ip6[2] = { };
struct rte_flow_item_udp udp[2] = { };
struct rte_flow_item_tcp tcp[2] = { };
struct rte_flow_item_gtp gtp[2] = { };
@@ -143,14 +165,32 @@ dpdk_flow_add (dpdk_device_t * xd, vnet_flow_t * f, dpdk_flow_entry_t * fe)
u8 protocol = IP_PROTOCOL_RESERVED;
int rv = 0;
+ enum
+ {
+ FLOW_UNKNOWN_CLASS,
+ FLOW_ETHERNET_CLASS,
+ FLOW_IPV4_CLASS,
+ FLOW_IPV6_CLASS,
+ } flow_class = FLOW_UNKNOWN_CLASS;
+
+ if (FLOW_IS_ETHERNET_CLASS (f))
+ flow_class = FLOW_ETHERNET_CLASS;
+ else if (FLOW_IS_IPV4_CLASS (f))
+ flow_class = FLOW_IPV4_CLASS;
+ else if (FLOW_IS_IPV6_CLASS (f))
+ flow_class = FLOW_IPV6_CLASS;
+ else
+ return VNET_FLOW_ERROR_NOT_SUPPORTED;
+
if (f->actions & (~xd->supported_flow_actions))
return VNET_FLOW_ERROR_NOT_SUPPORTED;
/* Match items */
- /* Ethernet */
+ /* Layer 2, Ethernet */
vec_add2 (items, item, 1);
item->type = RTE_FLOW_ITEM_TYPE_ETH;
- if (f->type == VNET_FLOW_TYPE_ETHERNET)
+
+ if (flow_class == FLOW_ETHERNET_CLASS)
{
vnet_flow_ethernet_t *te = &f->ethernet;
@@ -187,7 +227,8 @@ dpdk_flow_add (dpdk_device_t * xd, vnet_flow_t * f, dpdk_flow_entry_t * fe)
item->mask = NULL;
}
- if (FLOW_IS_VLAN_TAGGED (f))
+ /* currently only single empty vlan tag is supported */
+ if (FLOW_HAS_VLAN_TAG (f))
{
vec_add2 (items, item, 1);
item->type = RTE_FLOW_ITEM_TYPE_VLAN;
@@ -195,164 +236,143 @@ dpdk_flow_add (dpdk_device_t * xd, vnet_flow_t * f, dpdk_flow_entry_t * fe)
item->mask = NULL;
}
- if (FLOW_IS_L2_LAYER (f))
+ if (FLOW_IS_ETHERNET_CLASS (f))
goto pattern_end;
- /* IP */
+ /* Layer 3, IP */
vec_add2 (items, item, 1);
- if (f->type == VNET_FLOW_TYPE_IP4_L2TPV3OIP)
+ if (flow_class == FLOW_IPV4_CLASS)
{
- vnet_flow_ip4_l2tpv3oip_t *l2tp = &f->ip4_l2tpv3oip;
- item->type = RTE_FLOW_ITEM_TYPE_IPV4;
+ vnet_flow_ip4_t *ip4_ptr = &f->ip4;
- if (!l2tp->src_addr.mask.as_u32 && !l2tp->dst_addr.mask.as_u32)
- {
- item->spec = NULL;
- item->mask = NULL;
- }
- else
- {
- ip4[0].hdr.src_addr = l2tp->src_addr.addr.as_u32;
- ip4[1].hdr.src_addr = l2tp->src_addr.mask.as_u32;
- ip4[0].hdr.dst_addr = l2tp->dst_addr.addr.as_u32;
- ip4[1].hdr.dst_addr = l2tp->dst_addr.mask.as_u32;
- item->spec = ip4;
- item->mask = ip4 + 1;
- }
- protocol = l2tp->protocol;
- }
- else if (f->type == VNET_FLOW_TYPE_IP4_IPSEC_ESP)
- {
- vnet_flow_ip4_ipsec_esp_t *tesp = &f->ip4_ipsec_esp;
item->type = RTE_FLOW_ITEM_TYPE_IPV4;
-
- if (!tesp->src_addr.mask.as_u32 && !tesp->dst_addr.mask.as_u32)
+ if ((!ip4_ptr->src_addr.mask.as_u32) &&
+ (!ip4_ptr->dst_addr.mask.as_u32) && (!ip4_ptr->protocol.mask))
{
item->spec = NULL;
item->mask = NULL;
}
else
{
- ip4[0].hdr.src_addr = tesp->src_addr.addr.as_u32;
- ip4[1].hdr.src_addr = tesp->src_addr.mask.as_u32;
- ip4[0].hdr.dst_addr = tesp->dst_addr.addr.as_u32;
- ip4[1].hdr.dst_addr = tesp->dst_addr.mask.as_u32;
+ ip4[0].hdr.src_addr = ip4_ptr->src_addr.addr.as_u32;
+ ip4[1].hdr.src_addr = ip4_ptr->src_addr.mask.as_u32;
+ ip4[0].hdr.dst_addr = ip4_ptr->dst_addr.addr.as_u32;
+ ip4[1].hdr.dst_addr = ip4_ptr->dst_addr.mask.as_u32;
+ ip4[0].hdr.next_proto_id = ip4_ptr->protocol.prot;
+ ip4[1].hdr.next_proto_id = ip4_ptr->protocol.mask;
+
item->spec = ip4;
item->mask = ip4 + 1;
}
- protocol = tesp->protocol;
- }
- else if (f->type == VNET_FLOW_TYPE_IP4_IPSEC_AH)
- {
- vnet_flow_ip4_ipsec_ah_t *tah = &f->ip4_ipsec_ah;
- item->type = RTE_FLOW_ITEM_TYPE_IPV4;
- if (!tah->src_addr.mask.as_u32 && !tah->dst_addr.mask.as_u32)
- {
- item->spec = NULL;
- item->mask = NULL;
- }
- else
+ if (FLOW_IS_L4_TYPE (f) || FLOW_IS_L4_TUNNEL_TYPE (f))
{
- ip4[0].hdr.src_addr = tah->src_addr.addr.as_u32;
- ip4[1].hdr.src_addr = tah->src_addr.mask.as_u32;
- ip4[0].hdr.dst_addr = tah->dst_addr.addr.as_u32;
- ip4[1].hdr.dst_addr = tah->dst_addr.mask.as_u32;
- item->spec = ip4;
- item->mask = ip4 + 1;
+ vnet_flow_ip4_n_tuple_t *ip4_n_ptr = &f->ip4_n_tuple;
+
+ src_port = ip4_n_ptr->src_port.port;
+ dst_port = ip4_n_ptr->dst_port.port;
+ src_port_mask = ip4_n_ptr->src_port.mask;
+ dst_port_mask = ip4_n_ptr->dst_port.mask;
}
- protocol = tah->protocol;
+
+ protocol = ip4_ptr->protocol.prot;
}
- else if ((f->type == VNET_FLOW_TYPE_IP6_N_TUPLE) ||
- (f->type == VNET_FLOW_TYPE_IP6_GTPC) ||
- (f->type == VNET_FLOW_TYPE_IP6_GTPU) ||
- (f->type == VNET_FLOW_TYPE_IP6_GTPU_IP4) ||
- (f->type == VNET_FLOW_TYPE_IP6_GTPU_IP6))
+ else if (flow_class == FLOW_IPV6_CLASS)
{
- vnet_flow_ip6_n_tuple_t *t6 = &f->ip6_n_tuple;
+ vnet_flow_ip6_t *ip6_ptr = &f->ip6;
+
item->type = RTE_FLOW_ITEM_TYPE_IPV6;
- if (!clib_memcmp (&t6->src_addr.mask, &zero_addr, 16) &&
- !clib_memcmp (&t6->dst_addr.mask, &zero_addr, 16))
+ if ((ip6_ptr->src_addr.mask.as_u64[0] == 0) &&
+ (ip6_ptr->src_addr.mask.as_u64[1] == 0) &&
+ (!ip6_ptr->protocol.mask))
{
item->spec = NULL;
item->mask = NULL;
}
else
{
- clib_memcpy_fast (ip6[0].hdr.src_addr, &t6->src_addr.addr, 16);
- clib_memcpy_fast (ip6[1].hdr.src_addr, &t6->src_addr.mask, 16);
- clib_memcpy_fast (ip6[0].hdr.dst_addr, &t6->dst_addr.addr, 16);
- clib_memcpy_fast (ip6[1].hdr.dst_addr, &t6->dst_addr.mask, 16);
+ clib_memcpy (ip6[0].hdr.src_addr, &ip6_ptr->src_addr.addr,
+ ARRAY_LEN (ip6_ptr->src_addr.addr.as_u8));
+ clib_memcpy (ip6[1].hdr.src_addr, &ip6_ptr->src_addr.mask,
+ ARRAY_LEN (ip6_ptr->src_addr.mask.as_u8));
+ clib_memcpy (ip6[0].hdr.dst_addr, &ip6_ptr->dst_addr.addr,
+ ARRAY_LEN (ip6_ptr->dst_addr.addr.as_u8));
+ clib_memcpy (ip6[1].hdr.dst_addr, &ip6_ptr->dst_addr.mask,
+ ARRAY_LEN (ip6_ptr->dst_addr.mask.as_u8));
+ ip6[0].hdr.proto = ip6_ptr->protocol.prot;
+ ip6[1].hdr.proto = ip6_ptr->protocol.mask;
+
item->spec = ip6;
item->mask = ip6 + 1;
}
- src_port = t6->src_port.port;
- dst_port = t6->dst_port.port;
- src_port_mask = t6->src_port.mask;
- dst_port_mask = t6->dst_port.mask;
- protocol = t6->protocol;
+ if (FLOW_IS_L4_TYPE (f) || FLOW_IS_L4_TUNNEL_TYPE (f))
+ {
+ vnet_flow_ip6_n_tuple_t *ip6_n_ptr = &f->ip6_n_tuple;
+
+ src_port = ip6_n_ptr->src_port.port;
+ dst_port = ip6_n_ptr->dst_port.port;
+ src_port_mask = ip6_n_ptr->src_port.mask;
+ dst_port_mask = ip6_n_ptr->dst_port.mask;
+ }
+
+ protocol = ip6_ptr->protocol.prot;
}
- else if ((f->type == VNET_FLOW_TYPE_IP4_N_TUPLE) ||
- (f->type == VNET_FLOW_TYPE_IP4_GTPC) ||
- (f->type == VNET_FLOW_TYPE_IP4_GTPU) ||
- (f->type == VNET_FLOW_TYPE_IP4_GTPU_IP4) ||
- (f->type == VNET_FLOW_TYPE_IP4_GTPU_IP6))
+
+ if (FLOW_IS_L3_TYPE (f))
+ goto pattern_end;
+
+ /* Layer 3, IP */
+ vec_add2 (items, item, 1);
+ switch (protocol)
{
- vnet_flow_ip4_n_tuple_t *t4 = &f->ip4_n_tuple;
- item->type = RTE_FLOW_ITEM_TYPE_IPV4;
+ case IP_PROTOCOL_L2TP:
+ item->type = RTE_FLOW_ITEM_TYPE_L2TPV3OIP;
+ l2tp[0].session_id = clib_host_to_net_u32 (f->ip4_l2tpv3oip.session_id);
+ l2tp[1].session_id = ~0;
+
+ item->spec = l2tp;
+ item->mask = l2tp + 1;
+ break;
+
+ case IP_PROTOCOL_IPSEC_ESP:
+ item->type = RTE_FLOW_ITEM_TYPE_ESP;
+ esp[0].hdr.spi = clib_host_to_net_u32 (f->ip4_ipsec_esp.spi);
+ esp[1].hdr.spi = ~0;
+
+ item->spec = esp;
+ item->mask = esp + 1;
+ break;
- if (!t4->src_addr.mask.as_u32 && !t4->dst_addr.mask.as_u32)
+ case IP_PROTOCOL_IPSEC_AH:
+ item->type = RTE_FLOW_ITEM_TYPE_AH;
+ ah[0].spi = clib_host_to_net_u32 (f->ip4_ipsec_ah.spi);
+ ah[1].spi = ~0;
+
+ item->spec = ah;
+ item->mask = ah + 1;
+ break;
+ case IP_PROTOCOL_TCP:
+ item->type = RTE_FLOW_ITEM_TYPE_TCP;
+ if ((src_port_mask == 0) && (dst_port_mask == 0))
{
item->spec = NULL;
item->mask = NULL;
}
else
{
- ip4[0].hdr.src_addr = t4->src_addr.addr.as_u32;
- ip4[1].hdr.src_addr = t4->src_addr.mask.as_u32;
- ip4[0].hdr.dst_addr = t4->dst_addr.addr.as_u32;
- ip4[1].hdr.dst_addr = t4->dst_addr.mask.as_u32;
- item->spec = ip4;
- item->mask = ip4 + 1;
+ tcp[0].hdr.src_port = clib_host_to_net_u16 (src_port);
+ tcp[1].hdr.src_port = clib_host_to_net_u16 (src_port_mask);
+ tcp[0].hdr.dst_port = clib_host_to_net_u16 (dst_port);
+ tcp[1].hdr.dst_port = clib_host_to_net_u16 (dst_port_mask);
+ item->spec = tcp;
+ item->mask = tcp + 1;
}
+ break;
- src_port = t4->src_port.port;
- dst_port = t4->dst_port.port;
- src_port_mask = t4->src_port.mask;
- dst_port_mask = t4->dst_port.mask;
- protocol = t4->protocol;
- }
- else if (f->type == VNET_FLOW_TYPE_IP4_VXLAN)
- {
- vnet_flow_ip4_vxlan_t *v4 = &f->ip4_vxlan;
- ip4[0].hdr.src_addr = v4->src_addr.as_u32;
- ip4[1].hdr.src_addr = -1;
- ip4[0].hdr.dst_addr = v4->dst_addr.as_u32;
- ip4[1].hdr.dst_addr = -1;
- item->type = RTE_FLOW_ITEM_TYPE_IPV4;
- item->spec = ip4;
- item->mask = ip4 + 1;
-
- dst_port = v4->dst_port;
- dst_port_mask = -1;
- src_port = 0;
- src_port_mask = 0;
- protocol = IP_PROTOCOL_UDP;
- }
- else
- {
- rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
- goto done;
- }
-
- /* Layer 4 */
- if (protocol == IP_PROTOCOL_UDP)
- {
- vec_add2 (items, item, 1);
+ case IP_PROTOCOL_UDP:
item->type = RTE_FLOW_ITEM_TYPE_UDP;
-
if ((src_port_mask == 0) && (dst_port_mask == 0))
{
item->spec = NULL;
@@ -367,275 +387,60 @@ dpdk_flow_add (dpdk_device_t * xd, vnet_flow_t * f, dpdk_flow_entry_t * fe)
item->spec = udp;
item->mask = udp + 1;
}
- }
- else if (protocol == IP_PROTOCOL_TCP)
- {
- vec_add2 (items, item, 1);
- item->type = RTE_FLOW_ITEM_TYPE_TCP;
- if ((src_port_mask == 0) && (dst_port_mask == 0))
- {
- item->spec = NULL;
- item->mask = NULL;
- }
- else
+ /* handle the UDP tunnels */
+ if (f->type == VNET_FLOW_TYPE_IP4_GTPC)
{
- tcp[0].hdr.src_port = clib_host_to_net_u16 (src_port);
- tcp[1].hdr.src_port = clib_host_to_net_u16 (src_port_mask);
- tcp[0].hdr.dst_port = clib_host_to_net_u16 (dst_port);
- tcp[1].hdr.dst_port = clib_host_to_net_u16 (dst_port_mask);
- item->spec = tcp;
- item->mask = tcp + 1;
- }
- }
- else if (protocol == IP_PROTOCOL_IPSEC_ESP)
- {
- vec_add2 (items, item, 1);
- item->type = RTE_FLOW_ITEM_TYPE_ESP;
-
- vnet_flow_ip4_ipsec_esp_t *tesp = &f->ip4_ipsec_esp;
- esp[0].hdr.spi = clib_host_to_net_u32 (tesp->spi);
- esp[1].hdr.spi = ~0;
-
- item->spec = esp;
- item->mask = esp + 1;
- }
- else if (protocol == IP_PROTOCOL_IPSEC_AH)
- {
- vec_add2 (items, item, 1);
- item->type = RTE_FLOW_ITEM_TYPE_AH;
-
- vnet_flow_ip4_ipsec_ah_t *tah = &f->ip4_ipsec_ah;
- ah[0].spi = clib_host_to_net_u32 (tah->spi);
- ah[1].spi = ~0;
-
- item->spec = ah;
- item->mask = ah + 1;
- }
- else if (protocol == IP_PROTOCOL_RESERVED)
- {
- rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
- goto done;
- }
-
- /* Tunnel header match */
- if (f->type == VNET_FLOW_TYPE_IP4_L2TPV3OIP)
- {
- vec_add2 (items, item, 1);
- item->type = RTE_FLOW_ITEM_TYPE_L2TPV3OIP;
-
- vnet_flow_ip4_l2tpv3oip_t *tl2tp = &f->ip4_l2tpv3oip;
- l2tp[0].session_id = clib_host_to_net_u32 (tl2tp->session_id);
- l2tp[1].session_id = ~0;
-
- item->spec = l2tp;
- item->mask = l2tp + 1;
- }
+ gtp[0].teid = clib_host_to_net_u32 (f->ip4_gtpc.teid);
+ gtp[1].teid = ~0;
- if (f->type == VNET_FLOW_TYPE_IP4_VXLAN)
- {
- u32 vni = f->ip4_vxlan.vni;
- vxlan_header_t spec_hdr = {
- .flags = VXLAN_FLAGS_I,
- .vni_reserved = clib_host_to_net_u32 (vni << 8)
- };
- vxlan_header_t mask_hdr = {
- .flags = 0xff,
- .vni_reserved = clib_host_to_net_u32 (((u32) - 1) << 8)
- };
-
- clib_memset (raw, 0, sizeof raw);
- raw[0].item.relative = 1;
- raw[0].item.length = vxlan_hdr_sz;
-
- clib_memcpy_fast (raw[0].val + raw_sz, &spec_hdr, vxlan_hdr_sz);
- raw[0].item.pattern = raw[0].val + raw_sz;
- clib_memcpy_fast (raw[1].val + raw_sz, &mask_hdr, vxlan_hdr_sz);
- raw[1].item.pattern = raw[1].val + raw_sz;
-
- vec_add2 (items, item, 1);
- item->type = RTE_FLOW_ITEM_TYPE_RAW;
- item->spec = raw;
- item->mask = raw + 1;
- }
- else if (f->type == VNET_FLOW_TYPE_IP4_GTPC)
- {
- vnet_flow_ip4_gtpc_t *gc = &f->ip4_gtpc;
- gtp[0].teid = clib_host_to_net_u32 (gc->teid);
- gtp[1].teid = ~0;
-
- vec_add2 (items, item, 1);
- item->type = RTE_FLOW_ITEM_TYPE_GTPC;
- item->spec = gtp;
- item->mask = gtp + 1;
- }
- else if (f->type == VNET_FLOW_TYPE_IP4_GTPU)
- {
- vnet_flow_ip4_gtpu_t *gu = &f->ip4_gtpu;
- gtp[0].teid = clib_host_to_net_u32 (gu->teid);
- gtp[1].teid = ~0;
-
- vec_add2 (items, item, 1);
- item->type = RTE_FLOW_ITEM_TYPE_GTPU;
- item->spec = gtp;
- item->mask = gtp + 1;
- }
- else if ((f->type == VNET_FLOW_TYPE_IP4_GTPU_IP4) ||
- (f->type == VNET_FLOW_TYPE_IP4_GTPU_IP6))
- {
- vnet_flow_ip4_gtpu_t *gu = &f->ip4_gtpu;
- gtp[0].teid = clib_host_to_net_u32 (gu->teid);
- gtp[1].teid = ~0;
-
- vec_add2 (items, item, 1);
- item->type = RTE_FLOW_ITEM_TYPE_GTPU;
- item->spec = gtp;
- item->mask = gtp + 1;
-
- /* inner IP4 header */
- if (f->type == VNET_FLOW_TYPE_IP4_GTPU_IP4)
- {
vec_add2 (items, item, 1);
- item->type = RTE_FLOW_ITEM_TYPE_IPV4;
-
- vnet_flow_ip4_gtpu_ip4_t *gu4 = &f->ip4_gtpu_ip4;
- if (!gu4->inner_src_addr.mask.as_u32 &&
- !gu4->inner_dst_addr.mask.as_u32)
- {
- item->spec = NULL;
- item->mask = NULL;
- }
- else
- {
- inner_ip4[0].hdr.src_addr = gu4->inner_src_addr.addr.as_u32;
- inner_ip4[1].hdr.src_addr = gu4->inner_src_addr.mask.as_u32;
- inner_ip4[0].hdr.dst_addr = gu4->inner_dst_addr.addr.as_u32;
- inner_ip4[1].hdr.dst_addr = gu4->inner_dst_addr.mask.as_u32;
- item->spec = inner_ip4;
- item->mask = inner_ip4 + 1;
- }
+ item->type = RTE_FLOW_ITEM_TYPE_GTPC;
+ item->spec = gtp;
+ item->mask = gtp + 1;
}
- else if (f->type == VNET_FLOW_TYPE_IP4_GTPU_IP6)
+ else if (f->type == VNET_FLOW_TYPE_IP4_GTPU)
{
- ip6_address_t zero_addr;
- vnet_flow_ip4_gtpu_ip6_t *gu6 = &f->ip4_gtpu_ip6;
-
- clib_memset (&zero_addr, 0, sizeof (ip6_address_t));
-
- vec_add2 (items, item, 1);
- item->type = RTE_FLOW_ITEM_TYPE_IPV6;
-
- if (!clib_memcmp (&gu6->inner_src_addr.mask, &zero_addr, 16) &&
- !clib_memcmp (&gu6->inner_dst_addr.mask, &zero_addr, 16))
- {
- item->spec = NULL;
- item->mask = NULL;
- }
- else
- {
- clib_memcpy_fast (inner_ip6[0].hdr.src_addr,
- &gu6->inner_src_addr.addr, 16);
- clib_memcpy_fast (inner_ip6[1].hdr.src_addr,
- &gu6->inner_src_addr.mask, 16);
- clib_memcpy_fast (inner_ip6[0].hdr.dst_addr,
- &gu6->inner_dst_addr.addr, 16);
- clib_memcpy_fast (inner_ip6[1].hdr.dst_addr,
- &gu6->inner_dst_addr.mask, 16);
- item->spec = inner_ip6;
- item->mask = inner_ip6 + 1;
- }
- }
- }
- else if (f->type == VNET_FLOW_TYPE_IP6_GTPC)
- {
- vnet_flow_ip6_gtpc_t *gc = &f->ip6_gtpc;
- gtp[0].teid = clib_host_to_net_u32 (gc->teid);
- gtp[1].teid = ~0;
+ gtp[0].teid = clib_host_to_net_u32 (f->ip4_gtpu.teid);
+ gtp[1].teid = ~0;
- vec_add2 (items, item, 1);
- item->type = RTE_FLOW_ITEM_TYPE_GTPC;
- item->spec = gtp;
- item->mask = gtp + 1;
- }
- else if (f->type == VNET_FLOW_TYPE_IP6_GTPU)
- {
- vnet_flow_ip6_gtpu_t *gu = &f->ip6_gtpu;
- gtp[0].teid = clib_host_to_net_u32 (gu->teid);
- gtp[1].teid = ~0;
-
- vec_add2 (items, item, 1);
- item->type = RTE_FLOW_ITEM_TYPE_GTPU;
- item->spec = gtp;
- item->mask = gtp + 1;
- }
- else if ((f->type == VNET_FLOW_TYPE_IP6_GTPU_IP4) ||
- (f->type == VNET_FLOW_TYPE_IP6_GTPU_IP6))
- {
- vnet_flow_ip6_gtpu_t *gu = &f->ip6_gtpu;
- gtp[0].teid = clib_host_to_net_u32 (gu->teid);
- gtp[1].teid = ~0;
-
- vec_add2 (items, item, 1);
- item->type = RTE_FLOW_ITEM_TYPE_GTPU;
- item->spec = gtp;
- item->mask = gtp + 1;
-
- /* inner IP4 header */
- if (f->type == VNET_FLOW_TYPE_IP6_GTPU_IP4)
- {
vec_add2 (items, item, 1);
- item->type = RTE_FLOW_ITEM_TYPE_IPV4;
-
- vnet_flow_ip6_gtpu_ip4_t *gu4 = &f->ip6_gtpu_ip4;
-
- if (!gu4->inner_src_addr.mask.as_u32 &&
- !gu4->inner_dst_addr.mask.as_u32)
- {
- item->spec = NULL;
- item->mask = NULL;
- }
- else
- {
- inner_ip4[0].hdr.src_addr = gu4->inner_src_addr.addr.as_u32;
- inner_ip4[1].hdr.src_addr = gu4->inner_src_addr.mask.as_u32;
- inner_ip4[0].hdr.dst_addr = gu4->inner_dst_addr.addr.as_u32;
- inner_ip4[1].hdr.dst_addr = gu4->inner_dst_addr.mask.as_u32;
- item->spec = inner_ip4;
- item->mask = inner_ip4 + 1;
- }
+ item->type = RTE_FLOW_ITEM_TYPE_GTPU;
+ item->spec = gtp;
+ item->mask = gtp + 1;
}
-
- if (f->type == VNET_FLOW_TYPE_IP6_GTPU_IP6)
+ else if (f->type == VNET_FLOW_TYPE_IP4_VXLAN)
{
- ip6_address_t zero_addr;
- vnet_flow_ip6_gtpu_ip6_t *gu6 = &f->ip6_gtpu_ip6;
-
- clib_memset (&zero_addr, 0, sizeof (ip6_address_t));
+ u32 vni = f->ip4_vxlan.vni;
+
+ vxlan_header_t spec_hdr = {
+ .flags = VXLAN_FLAGS_I,
+ .vni_reserved = clib_host_to_net_u32 (vni << 8)
+ };
+ vxlan_header_t mask_hdr = {
+ .flags = 0xff,
+ .vni_reserved = clib_host_to_net_u32 (((u32) - 1) << 8)
+ };
+
+ clib_memset (raw, 0, sizeof raw);
+ raw[0].item.relative = 1;
+ raw[0].item.length = vxlan_hdr_sz;
+
+ clib_memcpy_fast (raw[0].val + raw_sz, &spec_hdr, vxlan_hdr_sz);
+ raw[0].item.pattern = raw[0].val + raw_sz;
+ clib_memcpy_fast (raw[1].val + raw_sz, &mask_hdr, vxlan_hdr_sz);
+ raw[1].item.pattern = raw[1].val + raw_sz;
vec_add2 (items, item, 1);
- item->type = RTE_FLOW_ITEM_TYPE_IPV6;
-
- if (!clib_memcmp (&gu6->inner_src_addr.mask, &zero_addr, 16) &&
- !clib_memcmp (&gu6->inner_dst_addr.mask, &zero_addr, 16))
- {
- item->spec = NULL;
- item->mask = NULL;
- }
- else
- {
- clib_memcpy_fast (inner_ip6[0].hdr.src_addr,
- &gu6->inner_src_addr.addr, 16);
- clib_memcpy_fast (inner_ip6[1].hdr.src_addr,
- &gu6->inner_src_addr.mask, 16);
- clib_memcpy_fast (inner_ip6[0].hdr.dst_addr,
- &gu6->inner_dst_addr.addr, 16);
- clib_memcpy_fast (inner_ip6[1].hdr.dst_addr,
- &gu6->inner_dst_addr.mask, 16);
- item->spec = inner_ip6;
- item->mask = inner_ip6 + 1;
- }
-
+ item->type = RTE_FLOW_ITEM_TYPE_RAW;
+ item->spec = raw;
+ item->mask = raw + 1;
}
+ break;
+
+ default:
+ rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
+ goto done;
}
pattern_end:
@@ -652,6 +457,7 @@ pattern_end:
action->conf = &queue;
fate = true;
}
+
if (f->actions & VNET_FLOW_ACTION_DROP)
{
vec_add2 (actions, action, 1);
@@ -664,6 +470,7 @@ pattern_end:
else
fate = true;
}
+
if (f->actions & VNET_FLOW_ACTION_RSS)
{
u64 rss_type = 0;
@@ -691,6 +498,7 @@ pattern_end:
else
fate = true;
}
+
if (fate == false)
{
vec_add2 (actions, action, 1);
@@ -719,6 +527,7 @@ pattern_end:
rv = VNET_FLOW_ERROR_ALREADY_EXISTS;
else
rv = VNET_FLOW_ERROR_INTERNAL;
+
goto done;
}
@@ -825,17 +634,13 @@ dpdk_flow_ops_fn (vnet_main_t * vnm, vnet_flow_dev_op_t op, u32 dev_instance,
switch (flow->type)
{
case VNET_FLOW_TYPE_ETHERNET:
+ case VNET_FLOW_TYPE_IP4:
+ case VNET_FLOW_TYPE_IP6:
case VNET_FLOW_TYPE_IP4_N_TUPLE:
case VNET_FLOW_TYPE_IP6_N_TUPLE:
case VNET_FLOW_TYPE_IP4_VXLAN:
case VNET_FLOW_TYPE_IP4_GTPC:
case VNET_FLOW_TYPE_IP4_GTPU:
- case VNET_FLOW_TYPE_IP4_GTPU_IP4:
- case VNET_FLOW_TYPE_IP4_GTPU_IP6:
- case VNET_FLOW_TYPE_IP6_GTPC:
- case VNET_FLOW_TYPE_IP6_GTPU:
- case VNET_FLOW_TYPE_IP6_GTPU_IP4:
- case VNET_FLOW_TYPE_IP6_GTPU_IP6:
case VNET_FLOW_TYPE_IP4_L2TPV3OIP:
case VNET_FLOW_TYPE_IP4_IPSEC_ESP:
case VNET_FLOW_TYPE_IP4_IPSEC_AH:
diff --git a/src/plugins/gtpu/gtpu.c b/src/plugins/gtpu/gtpu.c
index ab221f1f562..65c3053cd01 100644
--- a/src/plugins/gtpu/gtpu.c
+++ b/src/plugins/gtpu/gtpu.c
@@ -1130,7 +1130,7 @@ vnet_gtpu_add_del_rx_flow (u32 hw_if_index, u32 t_index, int is_add)
+ sizeof (ip4_header_t) + sizeof (udp_header_t),
.type = VNET_FLOW_TYPE_IP4_GTPU,
.ip4_gtpu = {
- .protocol = IP_PROTOCOL_UDP,
+ .protocol.prot = IP_PROTOCOL_UDP,
.src_addr.addr = t->dst.ip4,
.src_addr.mask.as_u32 = ~0,
.dst_addr.addr = t->src.ip4,
diff --git a/src/vnet/flow/flow.h b/src/vnet/flow/flow.h
index b5ec7ccd142..04305edf92a 100644
--- a/src/vnet/flow/flow.h
+++ b/src/vnet/flow/flow.h
@@ -26,86 +26,75 @@
#define foreach_flow_type \
/* l2 flow*/ \
_(ETHERNET, ethernet, "ethernet") \
+ /* l3 IP flow */ \
+ _(IP4, ip4, "ipv4") \
+ _(IP6, ip6, "ipv6") \
+ /* IP tunnel flow */ \
+ _(IP4_L2TPV3OIP, ip4_l2tpv3oip, "ipv4-l2tpv3oip") \
+ _(IP4_IPSEC_ESP, ip4_ipsec_esp, "ipv4-ipsec-esp") \
+ _(IP4_IPSEC_AH, ip4_ipsec_ah, "ipv4-ipsec-ah") \
/* l4 flow*/ \
_(IP4_N_TUPLE, ip4_n_tuple, "ipv4-n-tuple") \
_(IP6_N_TUPLE, ip6_n_tuple, "ipv6-n-tuple") \
_(IP4_N_TUPLE_TAGGED, ip4_n_tuple_tagged, "ipv4-n-tuple-tagged") \
_(IP6_N_TUPLE_TAGGED, ip6_n_tuple_tagged, "ipv6-n-tuple-tagged") \
- /* IP tunnel flow */ \
- _(IP4_L2TPV3OIP, ip4_l2tpv3oip, "ipv4-l2tpv3oip") \
- _(IP4_IPSEC_ESP, ip4_ipsec_esp, "ipv4-ipsec-esp") \
- _(IP4_IPSEC_AH, ip4_ipsec_ah, "ipv4-ipsec-ah") \
/* L4 tunnel flow*/ \
_(IP4_VXLAN, ip4_vxlan, "ipv4-vxlan") \
_(IP6_VXLAN, ip6_vxlan, "ipv6-vxlan") \
_(IP4_GTPC, ip4_gtpc, "ipv4-gtpc") \
- _(IP4_GTPU, ip4_gtpu, "ipv4-gtpu") \
- _(IP4_GTPU_IP4, ip4_gtpu_ip4, "ipv4-gtpu-ipv4") \
- _(IP4_GTPU_IP6, ip4_gtpu_ip6, "ipv4-gtpu-ipv6") \
- _(IP6_GTPC, ip6_gtpc, "ipv6-gtpc") \
- _(IP6_GTPU, ip6_gtpu, "ipv6-gtpu") \
- _(IP6_GTPU_IP4, ip6_gtpu_ip4, "ipv6-gtpu-ipv4") \
- _(IP6_GTPU_IP6, ip6_gtpu_ip6, "ipv6-gtpu-ipv6")
+ _(IP4_GTPU, ip4_gtpu, "ipv4-gtpu")
#define foreach_flow_entry_ethernet \
_fe(ethernet_header_t, eth_hdr)
-#define foreach_flow_entry_ip4_n_tuple \
- _fe(ip4_address_and_mask_t, src_addr) \
- _fe(ip4_address_and_mask_t, dst_addr) \
- _fe(ip_port_and_mask_t, src_port) \
- _fe(ip_port_and_mask_t, dst_port) \
- _fe(ip_protocol_t, protocol)
-
-#define foreach_flow_entry_ip6_n_tuple \
- _fe(ip6_address_and_mask_t, src_addr) \
- _fe(ip6_address_and_mask_t, dst_addr) \
- _fe(ip_port_and_mask_t, src_port) \
- _fe(ip_port_and_mask_t, dst_port) \
- _fe(ip_protocol_t, protocol)
-
-#define foreach_flow_entry_ip4_n_tuple_tagged \
+#define foreach_flow_entry_ip4 \
_fe(ip4_address_and_mask_t, src_addr) \
_fe(ip4_address_and_mask_t, dst_addr) \
- _fe(ip_port_and_mask_t, src_port) \
- _fe(ip_port_and_mask_t, dst_port) \
- _fe(ip_protocol_t, protocol)
+ _fe(ip_prot_and_mask_t, protocol)
-#define foreach_flow_entry_ip6_n_tuple_tagged \
+#define foreach_flow_entry_ip6 \
_fe(ip6_address_and_mask_t, src_addr) \
_fe(ip6_address_and_mask_t, dst_addr) \
- _fe(ip_port_and_mask_t, src_port) \
- _fe(ip_port_and_mask_t, dst_port) \
- _fe(ip_protocol_t, protocol)
+ _fe(ip_prot_and_mask_t, protocol)
#define foreach_flow_entry_ip4_l2tpv3oip \
- _fe(ip4_address_and_mask_t, src_addr) \
- _fe(ip4_address_and_mask_t, dst_addr) \
- _fe(ip_protocol_t, protocol) \
+ foreach_flow_entry_ip4 \
_fe(u32, session_id)
#define foreach_flow_entry_ip4_ipsec_esp \
- _fe(ip4_address_and_mask_t, src_addr) \
- _fe(ip4_address_and_mask_t, dst_addr) \
- _fe(ip_protocol_t, protocol) \
+ foreach_flow_entry_ip4 \
_fe(u32, spi)
#define foreach_flow_entry_ip4_ipsec_ah \
- _fe(ip4_address_and_mask_t, src_addr) \
- _fe(ip4_address_and_mask_t, dst_addr) \
- _fe(ip_protocol_t, protocol) \
+ foreach_flow_entry_ip4 \
_fe(u32, spi)
+#define foreach_flow_entry_ip4_n_tuple \
+ foreach_flow_entry_ip4 \
+ _fe(ip_port_and_mask_t, src_port) \
+ _fe(ip_port_and_mask_t, dst_port)
+
+#define foreach_flow_entry_ip6_n_tuple \
+ foreach_flow_entry_ip6 \
+ _fe(ip_port_and_mask_t, src_port) \
+ _fe(ip_port_and_mask_t, dst_port)
+
+#define foreach_flow_entry_ip4_n_tuple_tagged \
+ foreach_flow_entry_ip4 \
+ _fe(ip_port_and_mask_t, src_port) \
+ _fe(ip_port_and_mask_t, dst_port)
+
+#define foreach_flow_entry_ip6_n_tuple_tagged \
+ foreach_flow_entry_ip6 \
+ _fe(ip_port_and_mask_t, src_port) \
+ _fe(ip_port_and_mask_t, dst_port)
+
#define foreach_flow_entry_ip4_vxlan \
- _fe(ip4_address_t, src_addr) \
- _fe(ip4_address_t, dst_addr) \
- _fe(u16, dst_port) \
+ foreach_flow_entry_ip4_n_tuple \
_fe(u16, vni)
#define foreach_flow_entry_ip6_vxlan \
- _fe(ip6_address_t, src_addr) \
- _fe(ip6_address_t, dst_addr) \
- _fe(u16, dst_port) \
+ foreach_flow_entry_ip6_n_tuple \
_fe(u16, vni)
#define foreach_flow_entry_ip4_gtpc \
@@ -116,34 +105,6 @@
foreach_flow_entry_ip4_n_tuple \
_fe(u32, teid)
-#define foreach_flow_entry_ip4_gtpu_ip4 \
- foreach_flow_entry_ip4_gtpu \
- _fe(ip4_address_and_mask_t, inner_src_addr) \
- _fe(ip4_address_and_mask_t, inner_dst_addr)
-
-#define foreach_flow_entry_ip4_gtpu_ip6 \
- foreach_flow_entry_ip4_gtpu \
- _fe(ip6_address_and_mask_t, inner_src_addr) \
- _fe(ip6_address_and_mask_t, inner_dst_addr)
-
-#define foreach_flow_entry_ip6_gtpc \
- foreach_flow_entry_ip6_n_tuple \
- _fe(u32, teid)
-
-#define foreach_flow_entry_ip6_gtpu \
- foreach_flow_entry_ip6_n_tuple \
- _fe(u32, teid)
-
-#define foreach_flow_entry_ip6_gtpu_ip4 \
- foreach_flow_entry_ip6_gtpu \
- _fe(ip4_address_and_mask_t, inner_src_addr) \
- _fe(ip4_address_and_mask_t, inner_dst_addr)
-
-#define foreach_flow_entry_ip6_gtpu_ip6 \
- foreach_flow_entry_ip6_gtpu \
- _fe(ip6_address_and_mask_t, inner_src_addr) \
- _fe(ip6_address_and_mask_t, inner_dst_addr)
-
#define foreach_flow_action \
_(0, COUNT, "count") \
_(1, MARK, "mark") \
@@ -160,7 +121,6 @@ typedef enum
#undef _
} vnet_flow_action_t;
-
#define foreach_flow_error \
_( -1, NOT_SUPPORTED, "not supported") \
_( -2, ALREADY_DONE, "already done") \
@@ -215,6 +175,14 @@ typedef struct
u16 port, mask;
} ip_port_and_mask_t;
+typedef struct
+{
+ ip_protocol_t prot;
+ /* ip protocol mask should be either 0 or 0xFF */
+ /* other values are meanless */
+ u8 mask;
+} ip_prot_and_mask_t;
+
typedef enum
{
VNET_FLOW_TYPE_UNKNOWN,
diff --git a/src/vnet/flow/flow_api.c b/src/vnet/flow/flow_api.c
index bd077d79df8..9ae3802b6fe 100644
--- a/src/vnet/flow/flow_api.c
+++ b/src/vnet/flow/flow_api.c
@@ -66,6 +66,14 @@ ipv6_addr_and_mask_convert (vl_api_ip6_address_and_mask_t * vl_api_addr,
}
static inline void
+protocol_and_mask_convert (vl_api_ip_prot_and_mask_t * vl_api_protocol,
+ ip_prot_and_mask_t * vnet_protocol)
+{
+ vnet_protocol->prot = (ip_protocol_t) vl_api_protocol->prot;
+ vnet_protocol->mask = vl_api_protocol->mask;
+}
+
+static inline void
port_and_mask_convert (vl_api_ip_port_and_mask_t * vl_api_port,
ip_port_and_mask_t * vnet_port)
{
@@ -79,11 +87,10 @@ ipv4_n_tuple_flow_convert (vl_api_flow_ip4_n_tuple_t * vl_api_flow,
{
ipv4_addr_and_mask_convert (&vl_api_flow->src_addr, &f->src_addr);
ipv4_addr_and_mask_convert (&vl_api_flow->dst_addr, &f->dst_addr);
+ protocol_and_mask_convert (&vl_api_flow->protocol, &f->protocol);
port_and_mask_convert (&vl_api_flow->src_port, &f->src_port);
port_and_mask_convert (&vl_api_flow->dst_port, &f->dst_port);
-
- f->protocol = (ip_protocol_t) vl_api_flow->protocol;
}
static void
@@ -92,11 +99,10 @@ ipv6_n_tuple_flow_convert (vl_api_flow_ip6_n_tuple_t * vl_api_flow,
{
ipv6_addr_and_mask_convert (&vl_api_flow->src_addr, &f->src_addr);
ipv6_addr_and_mask_convert (&vl_api_flow->dst_addr, &f->dst_addr);
+ protocol_and_mask_convert (&vl_api_flow->protocol, &f->protocol);
port_and_mask_convert (&vl_api_flow->src_port, &f->src_port);
port_and_mask_convert (&vl_api_flow->dst_port, &f->dst_port);
-
- f->protocol = (ip_protocol_t) vl_api_flow->protocol;
}
static inline void
@@ -124,7 +130,7 @@ ipv4_l2tpv3oip_flow_convert (vl_api_flow_ip4_l2tpv3oip_t * vl_api_flow,
ipv4_addr_and_mask_convert (&vl_api_flow->src_addr, &f->src_addr);
ipv4_addr_and_mask_convert (&vl_api_flow->dst_addr, &f->dst_addr);
- f->protocol = (ip_protocol_t) vl_api_flow->protocol;
+ protocol_and_mask_convert (&vl_api_flow->protocol, &f->protocol);
f->session_id = ntohl (vl_api_flow->session_id);
}
@@ -135,7 +141,7 @@ ipv4_ipsec_esp_flow_convert (vl_api_flow_ip4_ipsec_esp_t * vl_api_flow,
ipv4_addr_and_mask_convert (&vl_api_flow->src_addr, &f->src_addr);
ipv4_addr_and_mask_convert (&vl_api_flow->dst_addr, &f->dst_addr);
- f->protocol = (ip_protocol_t) vl_api_flow->protocol;
+ protocol_and_mask_convert (&vl_api_flow->protocol, &f->protocol);
f->spi = ntohl (vl_api_flow->spi);
}
@@ -146,7 +152,7 @@ ipv4_ipsec_ah_flow_convert (vl_api_flow_ip4_ipsec_ah_t * vl_api_flow,
ipv4_addr_and_mask_convert (&vl_api_flow->src_addr, &f->src_addr);
ipv4_addr_and_mask_convert (&vl_api_flow->dst_addr, &f->dst_addr);
- f->protocol = (ip_protocol_t) vl_api_flow->protocol;
+ protocol_and_mask_convert (&vl_api_flow->protocol, &f->protocol);
f->spi = ntohl (vl_api_flow->spi);
}
@@ -160,7 +166,7 @@ ipv4_gtpu_flow_convert (vl_api_flow_ip4_gtpu_t * vl_api_flow,
port_and_mask_convert (&vl_api_flow->src_port, &f->src_port);
port_and_mask_convert (&vl_api_flow->dst_port, &f->dst_port);
- f->protocol = (ip_protocol_t) vl_api_flow->protocol;
+ protocol_and_mask_convert (&vl_api_flow->protocol, &f->protocol);
f->teid = ntohl (vl_api_flow->teid);
}
@@ -174,7 +180,7 @@ ipv4_gtpc_flow_convert (vl_api_flow_ip4_gtpc_t * vl_api_flow,
port_and_mask_convert (&vl_api_flow->src_port, &f->src_port);
port_and_mask_convert (&vl_api_flow->dst_port, &f->dst_port);
- f->protocol = (ip_protocol_t) vl_api_flow->protocol;
+ protocol_and_mask_convert (&vl_api_flow->protocol, &f->protocol);
f->teid = ntohl (vl_api_flow->teid);
}
@@ -235,12 +241,10 @@ vl_api_flow_add_t_handler (vl_api_flow_add_t * mp)
rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
goto out;
break;
-
}
rv = vnet_flow_add (vnm, &flow, &flow_index);
- goto out;
out:
/* *INDENT-OFF* */
REPLY_MACRO2(VL_API_FLOW_ADD_REPLY,
diff --git a/src/vnet/flow/flow_cli.c b/src/vnet/flow/flow_cli.c
index 98007a7723e..7dd68677c3b 100644
--- a/src/vnet/flow/flow_cli.c
+++ b/src/vnet/flow/flow_cli.c
@@ -12,11 +12,13 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
+#include <stddef.h>
#include <vnet/vnet.h>
#include <vnet/devices/devices.h>
#include <vnet/ip/ip.h>
#include <vnet/ethernet/ethernet.h>
+#include <vnet/ethernet/packet.h>
#include <vnet/flow/flow.h>
static format_function_t format_flow;
@@ -60,6 +62,40 @@ format_ip_port_and_mask (u8 * s, va_list * args)
return format (s, "%u/0x%x", pm->port, pm->mask);
}
+uword
+unformat_ip_protocol_and_mask (unformat_input_t * input, va_list * args)
+{
+ ip_prot_and_mask_t *pm = va_arg (*args, ip_prot_and_mask_t *);
+ u32 prot = 0, mask = 0;
+
+ if (unformat (input, "any"))
+ ;
+ else if (unformat (input, "%U", unformat_ip_protocol, &prot))
+ mask = 0xFF;
+ else if (unformat (input, "%u", &prot))
+ mask = 0xFF;
+ else
+ return 0;
+
+ if (prot > 0XFF || mask > 0xFF)
+ return 0;
+
+ pm->prot = prot;
+ pm->mask = mask;
+ return 1;
+}
+
+u8 *
+format_ip_protocol_and_mask (u8 * s, va_list * args)
+{
+ ip_prot_and_mask_t *pm = va_arg (*args, ip_prot_and_mask_t *);
+
+ if (pm->prot == 0 && pm->mask == 0)
+ return format (s, "any");
+
+ return format (s, "%U", format_ip_protocol, pm->prot);
+}
+
u8 *
format_flow_error (u8 * s, va_list * args)
{
@@ -114,6 +150,38 @@ format_flow_enabled_hw (u8 * s, va_list * args)
return s;
}
+u8 *
+format_rss_function (u8 * s, va_list * args)
+{
+ vnet_rss_function_t func = va_arg (*args, vnet_rss_function_t);
+
+ if (0)
+ ;
+#undef _
+#define _(f, n) \
+ else if (func == VNET_RSS_FUNC_##f) \
+ return format (s, n);
+
+ foreach_rss_function
+#undef _
+ return format (s, "unknown");
+}
+
+u8 *
+format_rss_types (u8 * s, va_list * args)
+{
+ u64 type = va_arg (*args, u64);
+
+#undef _
+#define _(a,b,c) \
+ if (type & (1UL<<a)) \
+ s = format (s, "%s ", c);
+
+ foreach_flow_rss_types
+#undef _
+ return s;
+}
+
static const char *flow_type_strings[] = { 0,
#define _(a,b,c) c,
foreach_flow_type
@@ -274,37 +342,40 @@ test_flow (vlib_main_t * vm, unformat_input_t * input,
FLOW_ENABLE,
FLOW_DISABLE
} action = FLOW_UNKNOWN_ACTION;
+ enum
+ {
+ FLOW_UNKNOWN_CLASS,
+ FLOW_ETHERNET_CLASS,
+ FLOW_IPV4_CLASS,
+ FLOW_IPV6_CLASS,
+ } flow_class = FLOW_UNKNOWN_CLASS;
+
u32 hw_if_index = ~0, flow_index = ~0;
int rv;
- u32 prot = 0, teid = 0, session_id = 0, spi = 0;
- vnet_flow_type_t type = VNET_FLOW_TYPE_IP4_N_TUPLE;
- bool is_gtpc_set = false;
- bool is_gtpu_set = false;
- bool is_l2tpv3oip_set = false;
- bool is_ipsec_esp_set = false, is_ipsec_ah_set = false;
- vnet_flow_type_t outer_type = VNET_FLOW_TYPE_UNKNOWN;
- vnet_flow_type_t inner_type = VNET_FLOW_TYPE_UNKNOWN;
- bool outer_ip4_set = false, inner_ip4_set = false;
- bool outer_ip6_set = false, inner_ip6_set = false;
+ u32 teid = 0, session_id = 0, spi = 0;
+ u16 vni = 0;
+ vnet_flow_type_t type = VNET_FLOW_TYPE_UNKNOWN;
ip4_address_and_mask_t ip4s = { };
ip4_address_and_mask_t ip4d = { };
- ip4_address_and_mask_t inner_ip4s = { };
- ip4_address_and_mask_t inner_ip4d = { };
ip6_address_and_mask_t ip6s = { };
ip6_address_and_mask_t ip6d = { };
- ip6_address_and_mask_t inner_ip6s = { };
- ip6_address_and_mask_t inner_ip6d = { };
ip_port_and_mask_t sport = { };
ip_port_and_mask_t dport = { };
+ ip_prot_and_mask_t protocol = { };
u16 eth_type;
- bool ethernet_set = false;
+ bool tcp_udp_port_set = false;
+ bool gtpc_set = false;
+ bool gtpu_set = false;
+ bool vni_set = false;
+ bool l2tpv3oip_set = false;
+ bool ipsec_esp_set = false, ipsec_ah_set = false;
u8 *rss_type[3] = { };
u8 *type_str = NULL;
clib_memset (&flow, 0, sizeof (vnet_flow_t));
flow.index = ~0;
flow.actions = 0;
- flow.ip4_n_tuple.protocol = ~0;
+
if (!unformat_user (input, unformat_line_input, line_input))
return 0;
@@ -320,56 +391,47 @@ test_flow (vlib_main_t * vm, unformat_input_t * input,
action = FLOW_DISABLE;
else if (unformat (line_input, "eth-type %U",
unformat_ethernet_type_host_byte_order, &eth_type))
- ethernet_set = true;
+ flow_class = FLOW_ETHERNET_CLASS;
else if (unformat (line_input, "src-ip %U",
unformat_ip4_address_and_mask, &ip4s))
- outer_ip4_set = true;
+ flow_class = FLOW_IPV4_CLASS;
else if (unformat (line_input, "dst-ip %U",
unformat_ip4_address_and_mask, &ip4d))
- outer_ip4_set = true;
+ flow_class = FLOW_IPV4_CLASS;
else if (unformat (line_input, "ip6-src-ip %U",
unformat_ip6_address_and_mask, &ip6s))
- outer_ip6_set = true;
+ flow_class = FLOW_IPV6_CLASS;
else if (unformat (line_input, "ip6-dst-ip %U",
unformat_ip6_address_and_mask, &ip6d))
- outer_ip6_set = true;
- else if (unformat (line_input, "inner-src-ip %U",
- unformat_ip4_address_and_mask, &inner_ip4s))
- inner_ip4_set = true;
- else if (unformat (line_input, "inner-dst-ip %U",
- unformat_ip4_address_and_mask, &inner_ip4d))
- inner_ip4_set = true;
- else if (unformat (line_input, "inner-ip6-src-ip %U",
- unformat_ip6_address_and_mask, &inner_ip6s))
- inner_ip6_set = true;
- else if (unformat (line_input, "inner-ip6-dst-ip %U",
- unformat_ip6_address_and_mask, &inner_ip6d))
- inner_ip6_set = true;
+ flow_class = FLOW_IPV6_CLASS;
else if (unformat (line_input, "src-port %U", unformat_ip_port_and_mask,
&sport))
- ;
+ tcp_udp_port_set = true;
else if (unformat (line_input, "dst-port %U", unformat_ip_port_and_mask,
&dport))
- ;
- else if (unformat (line_input, "proto %U", unformat_ip_protocol, &prot))
- ;
- else if (unformat (line_input, "proto %u", &prot))
+ tcp_udp_port_set = true;
+ else
+ if (unformat
+ (line_input, "proto %U", unformat_ip_protocol_and_mask,
+ &protocol))
;
else if (unformat (line_input, "gtpc teid %u", &teid))
- is_gtpc_set = true;
+ gtpc_set = true;
else if (unformat (line_input, "gtpu teid %u", &teid))
- is_gtpu_set = true;
+ gtpu_set = true;
+ else if (unformat (line_input, "vxlan vni %u", &vni))
+ vni_set = true;
else if (unformat (line_input, "session id %u", &session_id))
{
- if (prot == IP_PROTOCOL_L2TP)
- is_l2tpv3oip_set = true;
+ if (protocol.prot == IP_PROTOCOL_L2TP)
+ l2tpv3oip_set = true;
}
else if (unformat (line_input, "spi %u", &spi))
{
- if (prot == IP_PROTOCOL_IPSEC_ESP)
- is_ipsec_esp_set = true;
- else if (prot == IP_PROTOCOL_IPSEC_AH)
- is_ipsec_ah_set = true;
+ if (protocol.prot == IP_PROTOCOL_IPSEC_ESP)
+ ipsec_esp_set = true;
+ else if (protocol.prot == IP_PROTOCOL_IPSEC_AH)
+ ipsec_ah_set = true;
}
else if (unformat (line_input, "index %u", &flow_index))
;
@@ -391,9 +453,9 @@ test_flow (vlib_main_t * vm, unformat_input_t * input,
if (0)
;
#undef _
-#define _(f, s) \
- else if (unformat (line_input, s)) \
- flow.rss_fun = VNET_RSS_FUNC_##f;
+#define _(f, s) \
+ else if (unformat (line_input, s)) \
+ flow.rss_fun = VNET_RSS_FUNC_##f;
foreach_rss_function
#undef _
@@ -415,30 +477,29 @@ test_flow (vlib_main_t * vm, unformat_input_t * input,
if (unformat (line_input, "%s use %s and %s",
&rss_type[0], &rss_type[1], &rss_type[2]))
;
- else
- if (unformat
- (line_input, "%s use %s", &rss_type[0], &rss_type[1]))
+ else if (unformat
+ (line_input, "%s use %s", &rss_type[0], &rss_type[1]))
;
else if (unformat (line_input, "%s", &rss_type[0]))
;
#undef _
#define _(a,b,c) \
- else if (!clib_strcmp(c, (const char *)type_str)) \
- flow.rss_types |= (1ULL<<a);
+ else if (!clib_strcmp(c, (const char *)type_str)) \
+ flow.rss_types |= (1ULL<<a);
#define check_rss_types(_str) \
- if (_str != NULL) {\
- type_str = _str;\
- if (0) \
- ; \
- foreach_flow_rss_types \
- else \
- { \
- return clib_error_return (0, "parse error: '%U'", \
- format_unformat_error, line_input); \
- } \
- }
+ if (_str != NULL) {\
+ type_str = _str;\
+ if (0) \
+ ; \
+ foreach_flow_rss_types \
+ else \
+ { \
+ return clib_error_return (0, "parse error: '%U'", \
+ format_unformat_error, line_input); \
+ } \
+ }
check_rss_types (rss_type[0])
check_rss_types (rss_type[1]) check_rss_types (rss_type[2])
@@ -469,200 +530,141 @@ test_flow (vlib_main_t * vm, unformat_input_t * input,
return clib_error_return (0, "Please specify at least one action");
/* Adjust the flow type */
- if (ethernet_set == true)
- outer_type = VNET_FLOW_TYPE_ETHERNET;
- if (outer_ip4_set == true)
- outer_type = VNET_FLOW_TYPE_IP4_N_TUPLE;
- else if (outer_ip6_set == true)
- outer_type = VNET_FLOW_TYPE_IP6_N_TUPLE;
- if (inner_ip4_set == true)
- inner_type = VNET_FLOW_TYPE_IP4_N_TUPLE;
- else if (inner_ip6_set == true)
- inner_type = VNET_FLOW_TYPE_IP6_N_TUPLE;
-
- if (outer_type == VNET_FLOW_TYPE_UNKNOWN)
- return clib_error_return (0, "Please specify a supported flow type");
-
- if (outer_type == VNET_FLOW_TYPE_ETHERNET)
- type = VNET_FLOW_TYPE_ETHERNET;
- else if (outer_type == VNET_FLOW_TYPE_IP4_N_TUPLE)
+ switch (flow_class)
{
- type = VNET_FLOW_TYPE_IP4_N_TUPLE;
+ case FLOW_ETHERNET_CLASS:
+ type = VNET_FLOW_TYPE_ETHERNET;
+ break;
- if (inner_type == VNET_FLOW_TYPE_UNKNOWN)
+ case FLOW_IPV4_CLASS:
+ if (gtpc_set)
{
- if (is_gtpc_set)
- type = VNET_FLOW_TYPE_IP4_GTPC;
- else if (is_gtpu_set)
- type = VNET_FLOW_TYPE_IP4_GTPU;
- else if (is_l2tpv3oip_set)
- type = VNET_FLOW_TYPE_IP4_L2TPV3OIP;
- else if (is_ipsec_esp_set)
- type = VNET_FLOW_TYPE_IP4_IPSEC_ESP;
- else if (is_ipsec_ah_set)
- type = VNET_FLOW_TYPE_IP4_IPSEC_AH;
+ type = VNET_FLOW_TYPE_IP4_GTPC;
+ protocol.prot = IP_PROTOCOL_UDP;
}
- else if (inner_type == VNET_FLOW_TYPE_IP4_N_TUPLE)
+ else if (gtpu_set)
{
- if (is_gtpu_set)
- type = VNET_FLOW_TYPE_IP4_GTPU_IP4;
+ type = VNET_FLOW_TYPE_IP4_GTPU;
+ protocol.prot = IP_PROTOCOL_UDP;
}
- else if (inner_type == VNET_FLOW_TYPE_IP6_N_TUPLE)
+ else if (vni_set)
{
- if (is_gtpu_set)
- type = VNET_FLOW_TYPE_IP4_GTPU_IP6;
+ type = VNET_FLOW_TYPE_IP4_VXLAN;
+ protocol.prot = IP_PROTOCOL_UDP;
}
- }
- else if (outer_type == VNET_FLOW_TYPE_IP6_N_TUPLE)
- {
- type = VNET_FLOW_TYPE_IP6_N_TUPLE;
+ else if (l2tpv3oip_set)
+ type = VNET_FLOW_TYPE_IP4_L2TPV3OIP;
+ else if (ipsec_esp_set)
+ type = VNET_FLOW_TYPE_IP4_IPSEC_ESP;
+ else if (ipsec_ah_set)
+ type = VNET_FLOW_TYPE_IP4_IPSEC_AH;
+ else if (tcp_udp_port_set)
+ type = VNET_FLOW_TYPE_IP4_N_TUPLE;
+ else
+ type = VNET_FLOW_TYPE_IP4;
+ break;
+ case FLOW_IPV6_CLASS:
+ if (tcp_udp_port_set)
+ type = VNET_FLOW_TYPE_IP6_N_TUPLE;
+ else if (vni_set)
+ type = VNET_FLOW_TYPE_IP6_VXLAN;
+ else
+ type = VNET_FLOW_TYPE_IP6;
+ break;
- if (inner_type == VNET_FLOW_TYPE_UNKNOWN)
- {
- if (is_gtpc_set)
- type = VNET_FLOW_TYPE_IP6_GTPC;
- else if (is_gtpu_set)
- type = VNET_FLOW_TYPE_IP6_GTPU;
- }
- else if (inner_type == VNET_FLOW_TYPE_IP4_N_TUPLE)
- {
- if (is_gtpu_set)
- type = VNET_FLOW_TYPE_IP6_GTPU_IP4;
- }
- else if (inner_type == VNET_FLOW_TYPE_IP6_N_TUPLE)
- {
- if (is_gtpu_set)
- type = VNET_FLOW_TYPE_IP6_GTPU_IP6;
- }
+ default:
+ return clib_error_return (0,
+ "Please specify a supported flow type");
}
- //assign specific field values per flow type
- switch (type)
+ /* Assign specific field values per flow type */
+ if (flow_class == FLOW_ETHERNET_CLASS)
{
- case VNET_FLOW_TYPE_ETHERNET:
- memset (&flow.ethernet, 0, sizeof (flow.ethernet));
flow.ethernet.eth_hdr.type = eth_type;
- break;
- case VNET_FLOW_TYPE_IP4_L2TPV3OIP:
- clib_memcpy (&flow.ip4_l2tpv3oip.src_addr, &ip4s,
- sizeof (ip4_address_and_mask_t));
- clib_memcpy (&flow.ip4_l2tpv3oip.dst_addr, &ip4d,
- sizeof (ip4_address_and_mask_t));
- flow.ip4_l2tpv3oip.protocol = prot;
- flow.ip4_l2tpv3oip.session_id = session_id;
- break;
- case VNET_FLOW_TYPE_IP4_IPSEC_ESP:
- clib_memcpy (&flow.ip4_ipsec_esp.src_addr, &ip4s,
- sizeof (ip4_address_and_mask_t));
- clib_memcpy (&flow.ip4_ipsec_esp.dst_addr, &ip4d,
- sizeof (ip4_address_and_mask_t));
- flow.ip4_ipsec_esp.protocol = prot;
- flow.ip4_ipsec_esp.spi = spi;
- break;
- case VNET_FLOW_TYPE_IP4_IPSEC_AH:
- clib_memcpy (&flow.ip4_ipsec_ah.src_addr, &ip4s,
- sizeof (ip4_address_and_mask_t));
- clib_memcpy (&flow.ip4_ipsec_ah.dst_addr, &ip4d,
- sizeof (ip4_address_and_mask_t));
- flow.ip4_ipsec_ah.protocol = prot;
- flow.ip4_ipsec_ah.spi = spi;
- break;
- case VNET_FLOW_TYPE_IP4_N_TUPLE:
- case VNET_FLOW_TYPE_IP4_GTPC:
- case VNET_FLOW_TYPE_IP4_GTPU:
- case VNET_FLOW_TYPE_IP4_GTPU_IP4:
- case VNET_FLOW_TYPE_IP4_GTPU_IP6:
- clib_memcpy (&flow.ip4_n_tuple.src_addr, &ip4s,
+ }
+ else if (flow_class == FLOW_IPV4_CLASS)
+ {
+ vnet_flow_ip4_t *ip4_ptr = &flow.ip4;
+
+ clib_memcpy (&ip4_ptr->src_addr, &ip4s,
sizeof (ip4_address_and_mask_t));
- clib_memcpy (&flow.ip4_n_tuple.dst_addr, &ip4d,
+ clib_memcpy (&ip4_ptr->dst_addr, &ip4d,
sizeof (ip4_address_and_mask_t));
- clib_memcpy (&flow.ip4_n_tuple.src_port, &sport,
- sizeof (ip_port_and_mask_t));
- clib_memcpy (&flow.ip4_n_tuple.dst_port, &dport,
- sizeof (ip_port_and_mask_t));
- flow.ip4_n_tuple.protocol = prot;
-
- if (type == VNET_FLOW_TYPE_IP4_GTPC)
- flow.ip4_gtpc.teid = teid;
- else if (type == VNET_FLOW_TYPE_IP4_GTPU)
- flow.ip4_gtpu.teid = teid;
- else if (type == VNET_FLOW_TYPE_IP4_GTPU_IP4)
- {
- flow.ip4_gtpu_ip4.teid = teid;
- clib_memcpy (&flow.ip4_gtpu_ip4.inner_src_addr, &inner_ip4s,
- sizeof (ip4_address_and_mask_t));
- clib_memcpy (&flow.ip4_gtpu_ip4.inner_dst_addr, &inner_ip4d,
- sizeof (ip4_address_and_mask_t));
- }
- else if (type == VNET_FLOW_TYPE_IP4_GTPU_IP6)
+ ip4_ptr->protocol.prot = protocol.prot;
+
+ /* In this cli, we use the protocol.mask only when the flow type is
+ * VNET_FLOW_TYPE_IP4/IP6. For other cases, the IP protocol is just
+ * used to identify the next layer type: e.g. UDP/TCP or IPSEC_ESP
+ */
+ if (type == VNET_FLOW_TYPE_IP4)
+ ip4_ptr->protocol.mask = protocol.mask;
+
+ switch (protocol.prot)
{
- flow.ip4_gtpu_ip6.teid = teid;
- clib_memcpy (&flow.ip4_gtpu_ip6.inner_src_addr, &inner_ip6s,
- sizeof (ip6_address_and_mask_t));
- clib_memcpy (&flow.ip4_gtpu_ip6.inner_dst_addr, &inner_ip6d,
- sizeof (ip6_address_and_mask_t));
+ /* ip4-n-tuple */
+ case IP_PROTOCOL_TCP:
+ case IP_PROTOCOL_UDP:
+ flow.ip4_n_tuple.src_port = sport;
+ flow.ip4_n_tuple.dst_port = dport;
+
+ if (type == VNET_FLOW_TYPE_IP4_GTPC)
+ flow.ip4_gtpc.teid = teid;
+ else if (type == VNET_FLOW_TYPE_IP4_GTPU)
+ flow.ip4_gtpu.teid = teid;
+ else if (type == VNET_FLOW_TYPE_IP4_VXLAN)
+ flow.ip4_vxlan.vni = vni;
+ break;
+ case IP_PROTOCOL_L2TP:
+ flow.ip4_l2tpv3oip.session_id = session_id;
+ break;
+ case IP_PROTOCOL_IPSEC_ESP:
+ flow.ip4_ipsec_esp.spi = spi;
+ break;
+ case IP_PROTOCOL_IPSEC_AH:
+ flow.ip4_ipsec_esp.spi = spi;
+ break;
+ default:
+ break;
}
+ }
+ else if (flow_class == FLOW_IPV6_CLASS)
+ {
+ vnet_flow_ip6_t *ip6_ptr = &flow.ip6;
- if (flow.ip4_n_tuple.protocol == (ip_protocol_t) ~ 0)
- return clib_error_return (0, "Please specify ip protocol");
- if ((type != VNET_FLOW_TYPE_IP4_N_TUPLE) &&
- (flow.ip4_n_tuple.protocol != IP_PROTOCOL_UDP))
- return clib_error_return (0,
- "For GTP related flow, ip protocol must be UDP");
- break;
-
- case VNET_FLOW_TYPE_IP6_N_TUPLE:
- case VNET_FLOW_TYPE_IP6_GTPC:
- case VNET_FLOW_TYPE_IP6_GTPU:
- case VNET_FLOW_TYPE_IP6_GTPU_IP4:
- case VNET_FLOW_TYPE_IP6_GTPU_IP6:
clib_memcpy (&flow.ip6_n_tuple.src_addr, &ip6s,
sizeof (ip6_address_and_mask_t));
clib_memcpy (&flow.ip6_n_tuple.dst_addr, &ip6d,
sizeof (ip6_address_and_mask_t));
- clib_memcpy (&flow.ip6_n_tuple.src_port, &sport,
- sizeof (ip_port_and_mask_t));
- clib_memcpy (&flow.ip6_n_tuple.dst_port, &dport,
- sizeof (ip_port_and_mask_t));
- flow.ip6_n_tuple.protocol = prot;
-
- if (type == VNET_FLOW_TYPE_IP6_GTPC)
- flow.ip6_gtpc.teid = teid;
- else if (type == VNET_FLOW_TYPE_IP6_GTPU)
- flow.ip6_gtpu.teid = teid;
- else if (type == VNET_FLOW_TYPE_IP6_GTPU_IP4)
- {
- flow.ip6_gtpu_ip4.teid = teid;
- clib_memcpy (&flow.ip6_gtpu_ip4.inner_src_addr, &inner_ip4s,
- sizeof (ip4_address_and_mask_t));
- clib_memcpy (&flow.ip6_gtpu_ip4.inner_dst_addr, &inner_ip4d,
- sizeof (ip4_address_and_mask_t));
- }
- else if (type == VNET_FLOW_TYPE_IP6_GTPU_IP6)
- {
- flow.ip6_gtpu_ip6.teid = teid;
- clib_memcpy (&flow.ip6_gtpu_ip6.inner_src_addr, &inner_ip6s,
- sizeof (ip6_address_and_mask_t));
- clib_memcpy (&flow.ip6_gtpu_ip6.inner_dst_addr, &inner_ip6d,
- sizeof (ip6_address_and_mask_t));
- }
- if (flow.ip6_n_tuple.protocol == (ip_protocol_t) ~ 0)
- return clib_error_return (0, "Please specify ip protocol");
- if ((type != VNET_FLOW_TYPE_IP6_N_TUPLE) &&
- (flow.ip6_n_tuple.protocol != IP_PROTOCOL_UDP))
- return clib_error_return (0,
- "For GTP related flow, ip protocol must be UDP");
- break;
+ ip6_ptr->protocol.prot = protocol.prot;
- default:
- break;
+ /* In this cli, we use the protocol.mask only when the flow type is
+ * VNET_FLOW_TYPE_IP4/IP6. For other cases, the IP protocol is just
+ * used to identify the next layer type: e.g. UDP/TCP or IPSEC_ESP
+ */
+ if (type == VNET_FLOW_TYPE_IP6)
+ ip6_ptr->protocol.mask = protocol.mask;
+
+ switch (protocol.prot)
+ {
+ /* ip6-n-tuple */
+ case IP_PROTOCOL_TCP:
+ case IP_PROTOCOL_UDP:
+ flow.ip6_n_tuple.src_port = sport;
+ flow.ip6_n_tuple.dst_port = dport;
+
+ if (type == VNET_FLOW_TYPE_IP6_VXLAN)
+ flow.ip6_vxlan.vni = vni;
+ break;
+ default:
+ break;
+ }
}
flow.type = type;
rv = vnet_flow_add (vnm, &flow, &flow_index);
if (!rv)
- printf ("flow %u added\n", flow_index);
+ vlib_cli_output (vm, "flow %u added", flow_index);
break;
case FLOW_DEL:
@@ -681,20 +683,27 @@ test_flow (vlib_main_t * vm, unformat_input_t * input,
if (rv < 0)
return clib_error_return (0, "flow error: %U", format_flow_error, rv);
+
return 0;
}
/* *INDENT-OFF* */
VLIB_CLI_COMMAND (test_flow_command, static) = {
.path = "test flow",
- .short_help = "test flow add [src-ip <ip-addr/mask>] [dst-ip "
- "<ip-addr/mask>] [src-port <port/mask>] [dst-port <port/mask>] "
- "[proto <ip-proto>",
+ .short_help = "test flow [add|del|enable|disable] [index <id>] "
+ "[src-ip <ip-addr/mask>] [dst-ip <ip-addr/mask>] "
+ "[ip6-src-ip <ip-addr/mask>] [ip6-dst-ip <ip-addr/mask>] "
+ "[src-port <port/mask>] [dst-port <port/mask>] "
+ "[proto <ip-proto>] "
+ "[gtpc teid <teid>] [gtpu teid <teid>] [vxlan <vni>] "
+ "[session id <session>] [spi <spi>]"
+ "[next-node <node>] [mark <id>] [buffer-advance <len>] "
+ "[redirect-to-queue <queue>] [drop] "
+ "[rss function <name>] [rss types <flow type>]",
.function = test_flow,
};
/* *INDENT-ON* */
-
static u8 *
format_flow_match_element (u8 * s, va_list * args)
{
@@ -710,6 +719,16 @@ format_flow_match_element (u8 * s, va_list * args)
if (strncmp (type, "u32", 3) == 0)
return format (s, "%d", *(u32 *) ptr);
+ if (strncmp (type, "ethernet_header_t", 13) == 0)
+ {
+ ethernet_max_header_t m;
+ memset (&m, 0, sizeof (m));
+ m.ethernet = *(ethernet_header_t *) ptr;
+ /* convert the ethernet type to net order */
+ m.ethernet.type = clib_host_to_net_u16 (m.ethernet.type);
+ return format (s, "%U", format_ethernet_header, &m);
+ }
+
if (strncmp (type, "ip4_address_t", 13) == 0)
return format (s, "%U", format_ip4_address, ptr);
@@ -722,8 +741,8 @@ format_flow_match_element (u8 * s, va_list * args)
if (strncmp (type, "ip6_address_and_mask_t", 13) == 0)
return format (s, "%U", format_ip6_address_and_mask, ptr);
- if (strncmp (type, "ip_protocol_t", 13) == 0)
- return format (s, "%U", format_ip_protocol, *(ip_protocol_t *) ptr);
+ if (strncmp (type, "ip_prot_and_mask_t", 13) == 0)
+ return format (s, "%U", format_ip_protocol_and_mask, ptr);
if (strncmp (type, "ip_port_and_mask_t", 18) == 0)
return format (s, "%U", format_ip_port_and_mask, ptr);
@@ -777,9 +796,16 @@ format_flow (u8 * s, va_list * args)
s = format (s, "\n%Uaction: %U", format_white_space, indent + 2,
format_flow_actions, f->actions);
+ if (f->actions & VNET_FLOW_ACTION_DROP)
+ t = format (t, "%sdrop", t ? ", " : "");
+
if (f->actions & VNET_FLOW_ACTION_MARK)
t = format (t, "%smark %u", t ? ", " : "", f->mark_flow_id);
+ if (f->actions & VNET_FLOW_ACTION_REDIRECT_TO_QUEUE)
+ t =
+ format (t, "%sredirect-to-queue %u", t ? ", " : "", f->redirect_queue);
+
if (f->actions & VNET_FLOW_ACTION_REDIRECT_TO_NODE)
t = format (t, "%snext-node %U", t ? ", " : "",
format_vlib_node_name, vm, f->redirect_node_index);
@@ -787,6 +813,14 @@ format_flow (u8 * s, va_list * args)
if (f->actions & VNET_FLOW_ACTION_BUFFER_ADVANCE)
t = format (t, "%sbuffer-advance %d", t ? ", " : "", f->buffer_advance);
+ if (f->actions & VNET_FLOW_ACTION_RSS)
+ {
+ t = format (t, "%srss function %U", t ? ", " : "",
+ format_rss_function, f->rss_fun);
+ t = format (t, "%srss types %U", t ? ", " : "",
+ format_rss_types, f->rss_types);
+ }
+
if (t)
{
s = format (s, "\n%U%v", format_white_space, indent + 4, t);
diff --git a/src/vnet/flow/flow_types.api b/src/vnet/flow/flow_types.api
index d872d6eb264..7df46d2131d 100644
--- a/src/vnet/flow/flow_types.api
+++ b/src/vnet/flow/flow_types.api
@@ -14,20 +14,22 @@
* limitations under the License.
*/
-option version = "0.0.1";
+option version = "0.0.2";
import "vnet/ethernet/ethernet_types.api";
import "vnet/ip/ip_types.api";
enum flow_type
{
FLOW_TYPE_ETHERNET = 1,
+ FLOW_TYPE_IP4,
+ FLOW_TYPE_IP6,
+ FLOW_TYPE_IP4_L2TPV3OIP,
+ FLOW_TYPE_IP4_IPSEC_ESP,
+ FLOW_TYPE_IP4_IPSEC_AH,
FLOW_TYPE_IP4_N_TUPLE,
FLOW_TYPE_IP6_N_TUPLE,
FLOW_TYPE_IP4_N_TUPLE_TAGGED,
FLOW_TYPE_IP6_N_TUPLE_TAGGED,
- FLOW_TYPE_IP4_L2TPV3OIP,
- FLOW_TYPE_IP4_IPSEC_ESP,
- FLOW_TYPE_IP4_IPSEC_AH,
FLOW_TYPE_IP4_VXLAN,
FLOW_TYPE_IP6_VXLAN,
FLOW_TYPE_IP4_GTPC,
@@ -50,6 +52,12 @@ typedef ip_port_and_mask
u16 mask;
};
+typedef ip_prot_and_mask
+{
+ vl_api_ip_proto_t prot;
+ u8 mask;
+};
+
typedef flow_ethernet
{
i32 foo;
@@ -58,14 +66,30 @@ typedef flow_ethernet
u16 type;
};
+typedef flow_ip4
+{
+ i32 foo;
+ vl_api_ip4_address_and_mask_t src_addr;
+ vl_api_ip4_address_and_mask_t dst_addr;
+ vl_api_ip_prot_and_mask_t protocol;
+};
+
+typedef flow_ip6
+{
+ i32 foo;
+ vl_api_ip6_address_and_mask_t src_addr;
+ vl_api_ip6_address_and_mask_t dst_addr;
+ vl_api_ip_prot_and_mask_t protocol;
+};
+
typedef flow_ip4_n_tuple
{
i32 foo;
vl_api_ip4_address_and_mask_t src_addr;
vl_api_ip4_address_and_mask_t dst_addr;
+ vl_api_ip_prot_and_mask_t protocol;
vl_api_ip_port_and_mask_t src_port;
vl_api_ip_port_and_mask_t dst_port;
- vl_api_ip_proto_t protocol;
};
typedef flow_ip6_n_tuple
@@ -73,9 +97,9 @@ typedef flow_ip6_n_tuple
i32 foo;
vl_api_ip6_address_and_mask_t src_addr;
vl_api_ip6_address_and_mask_t dst_addr;
+ vl_api_ip_prot_and_mask_t protocol;
vl_api_ip_port_and_mask_t src_port;
vl_api_ip_port_and_mask_t dst_port;
- vl_api_ip_proto_t protocol;
};
typedef flow_ip4_n_tuple_tagged
@@ -83,9 +107,9 @@ typedef flow_ip4_n_tuple_tagged
i32 foo;
vl_api_ip4_address_and_mask_t src_addr;
vl_api_ip4_address_and_mask_t dst_addr;
+ vl_api_ip_prot_and_mask_t protocol;
vl_api_ip_port_and_mask_t src_port;
vl_api_ip_port_and_mask_t dst_port;
- vl_api_ip_proto_t protocol;
};
typedef flow_ip6_n_tuple_tagged
@@ -93,9 +117,9 @@ typedef flow_ip6_n_tuple_tagged
i32 foo;
vl_api_ip6_address_and_mask_t src_addr;
vl_api_ip6_address_and_mask_t dst_addr;
+ vl_api_ip_prot_and_mask_t protocol;
vl_api_ip_port_and_mask_t src_port;
vl_api_ip_port_and_mask_t dst_port;
- vl_api_ip_proto_t protocol;
};
typedef flow_ip4_l2tpv3oip
@@ -103,9 +127,7 @@ typedef flow_ip4_l2tpv3oip
i32 foo;
vl_api_ip4_address_and_mask_t src_addr;
vl_api_ip4_address_and_mask_t dst_addr;
- vl_api_ip_port_and_mask_t src_port;
- vl_api_ip_port_and_mask_t dst_port;
- vl_api_ip_proto_t protocol;
+ vl_api_ip_prot_and_mask_t protocol;
u32 session_id;
};
@@ -114,9 +136,7 @@ typedef flow_ip4_ipsec_esp
i32 foo;
vl_api_ip4_address_and_mask_t src_addr;
vl_api_ip4_address_and_mask_t dst_addr;
- vl_api_ip_port_and_mask_t src_port;
- vl_api_ip_port_and_mask_t dst_port;
- vl_api_ip_proto_t protocol;
+ vl_api_ip_prot_and_mask_t protocol;
u32 spi;
};
@@ -125,27 +145,29 @@ typedef flow_ip4_ipsec_ah
i32 foo;
vl_api_ip4_address_and_mask_t src_addr;
vl_api_ip4_address_and_mask_t dst_addr;
- vl_api_ip_port_and_mask_t src_port;
- vl_api_ip_port_and_mask_t dst_port;
- vl_api_ip_proto_t protocol;
+ vl_api_ip_prot_and_mask_t protocol;
u32 spi;
};
typedef flow_ip4_vxlan
{
i32 foo;
- vl_api_ip4_address_t src_addr;
- vl_api_ip4_address_t dst_addr;
- u16 dst_port;
+ vl_api_ip4_address_and_mask_t src_addr;
+ vl_api_ip4_address_and_mask_t dst_addr;
+ vl_api_ip_prot_and_mask_t protocol;
+ vl_api_ip_port_and_mask_t src_port;
+ vl_api_ip_port_and_mask_t dst_port;
u16 vni;
};
typedef flow_ip6_vxlan
{
i32 foo;
- vl_api_ip6_address_t src_addr;
- vl_api_ip6_address_t dst_addr;
- u16 dst_port;
+ vl_api_ip6_address_and_mask_t src_addr;
+ vl_api_ip6_address_and_mask_t dst_addr;
+ vl_api_ip_prot_and_mask_t protocol;
+ vl_api_ip_port_and_mask_t src_port;
+ vl_api_ip_port_and_mask_t dst_port;
u16 vni;
};
@@ -154,9 +176,9 @@ typedef flow_ip4_gtpc
i32 foo;
vl_api_ip4_address_and_mask_t src_addr;
vl_api_ip4_address_and_mask_t dst_addr;
+ vl_api_ip_prot_and_mask_t protocol;
vl_api_ip_port_and_mask_t src_port;
vl_api_ip_port_and_mask_t dst_port;
- vl_api_ip_proto_t protocol;
u32 teid;
};
@@ -165,22 +187,24 @@ typedef flow_ip4_gtpu
i32 foo;
vl_api_ip4_address_and_mask_t src_addr;
vl_api_ip4_address_and_mask_t dst_addr;
+ vl_api_ip_prot_and_mask_t protocol;
vl_api_ip_port_and_mask_t src_port;
vl_api_ip_port_and_mask_t dst_port;
- vl_api_ip_proto_t protocol;
u32 teid;
};
union flow
{
vl_api_flow_ethernet_t ethernet;
+ vl_api_flow_ip4_t ip4;
+ vl_api_flow_ip6_t ip6;
+ vl_api_flow_ip4_l2tpv3oip_t ip4_l2tpv3oip;
+ vl_api_flow_ip4_ipsec_esp_t ip4_ipsec_esp;
+ vl_api_flow_ip4_ipsec_ah_t ip4_ipsec_ah;
vl_api_flow_ip4_n_tuple_t ip4_n_tuple;
vl_api_flow_ip6_n_tuple_t ip6_n_tuple;
vl_api_flow_ip4_n_tuple_tagged_t ip4_n_tuple_tagged;
vl_api_flow_ip6_n_tuple_tagged_t ip6_n_tuple_tagged;
- vl_api_flow_ip4_l2tpv3oip_t ip4_l2tpv3oip;
- vl_api_flow_ip4_ipsec_esp_t ip4_ipsec_esp;
- vl_api_flow_ip4_ipsec_ah_t ip4_ipsec_ah;
vl_api_flow_ip4_vxlan_t ip4_vxlan;
vl_api_flow_ip6_vxlan_t ip6_vxlan;
vl_api_flow_ip4_gtpc_t ip4_gtpc;
diff --git a/src/vnet/vxlan/vxlan.c b/src/vnet/vxlan/vxlan.c
index ea1748ce4a8..b1e4aaf291f 100644
--- a/src/vnet/vxlan/vxlan.c
+++ b/src/vnet/vxlan/vxlan.c
@@ -1114,9 +1114,13 @@ vnet_vxlan_add_del_rx_flow (u32 hw_if_index, u32 t_index, int is_add)
.buffer_advance = sizeof (ethernet_header_t),
.type = VNET_FLOW_TYPE_IP4_VXLAN,
.ip4_vxlan = {
- .src_addr = t->dst.ip4,
- .dst_addr = t->src.ip4,
- .dst_port = UDP_DST_PORT_vxlan,
+ .protocol.prot = IP_PROTOCOL_UDP,
+ .src_addr.addr = t->dst.ip4,
+ .dst_addr.addr = t->src.ip4,
+ .src_addr.mask.as_u32 = ~0,
+ .dst_addr.mask.as_u32 = ~0,
+ .dst_port.port = UDP_DST_PORT_vxlan,
+ .dst_port.mask = 0xFF,
.vni = t->vni,
}
,