aboutsummaryrefslogtreecommitdiffstats
path: root/src/plugins/dev_octeon/flow.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/plugins/dev_octeon/flow.c')
-rw-r--r--src/plugins/dev_octeon/flow.c476
1 files changed, 439 insertions, 37 deletions
diff --git a/src/plugins/dev_octeon/flow.c b/src/plugins/dev_octeon/flow.c
index 1c367a036ab..5bef25f5369 100644
--- a/src/plugins/dev_octeon/flow.c
+++ b/src/plugins/dev_octeon/flow.c
@@ -46,6 +46,8 @@ VLIB_REGISTER_LOG_CLASS (oct_log, static) = {
(f->type == VNET_FLOW_TYPE_IP4_GTPC) || \
(f->type == VNET_FLOW_TYPE_IP4_GTPU))
+#define FLOW_IS_GENERIC_TYPE(f) (f->type == VNET_FLOW_TYPE_GENERIC)
+
#define OCT_FLOW_UNSUPPORTED_ACTIONS(f) \
((f->actions == VNET_FLOW_ACTION_BUFFER_ADVANCE) || \
(f->actions == VNET_FLOW_ACTION_REDIRECT_TO_NODE))
@@ -71,6 +73,9 @@ VLIB_REGISTER_LOG_CLASS (oct_log, static) = {
_ (62, FLOW_KEY_TYPE_L3_DST, "l3-dst-only") \
_ (63, FLOW_KEY_TYPE_L3_SRC, "l3-src-only")
+#define GTPU_PORT 2152
+#define VXLAN_PORT 4789
+
typedef struct
{
u16 src_port;
@@ -87,6 +92,27 @@ typedef struct
u32 teid;
} gtpu_header_t;
+typedef struct
+{
+ u8 layer;
+ u16 nxt_proto;
+ vnet_dev_port_t *port;
+ struct roc_npc_item_info *items;
+ struct
+ {
+ u8 *spec;
+ u8 *mask;
+ u16 off;
+ } oct_drv;
+ struct
+ {
+ u8 *spec;
+ u8 *mask;
+ u16 off;
+ u16 len;
+ } generic;
+} oct_flow_parse_state;
+
static void
oct_flow_convert_rss_types (u64 *key, u64 rss_types)
{
@@ -163,6 +189,14 @@ oct_flow_rule_create (vnet_dev_port_t *port, struct roc_npc_action *actions,
npc = &oct_port->npc;
+ for (int i = 0; item_info[i].type != ROC_NPC_ITEM_TYPE_END; i++)
+ {
+ log_debug (port->dev, "Flow[%d] Item[%d] type %d spec 0x%U mask 0x%U",
+ flow->index, i, item_info[i].type, format_hex_bytes,
+ item_info[i].spec, item_info[i].size, format_hex_bytes,
+ item_info[i].mask, item_info[i].size);
+ }
+
npc_flow =
roc_npc_flow_create (npc, &attr, item_info, actions, npc->pf_func, &rv);
if (rv)
@@ -183,6 +217,320 @@ oct_flow_rule_create (vnet_dev_port_t *port, struct roc_npc_action *actions,
return VNET_DEV_OK;
}
+static int
+oct_parse_l2 (oct_flow_parse_state *pst)
+{
+ struct roc_npc_flow_item_eth *eth_spec =
+ (struct roc_npc_flow_item_eth *) &pst->oct_drv.spec[pst->oct_drv.off];
+ struct roc_npc_flow_item_eth *eth_mask =
+ (struct roc_npc_flow_item_eth *) &pst->oct_drv.mask[pst->oct_drv.off];
+ ethernet_header_t *eth_hdr_mask =
+ (ethernet_header_t *) &pst->generic.mask[pst->generic.off];
+ ethernet_header_t *eth_hdr =
+ (ethernet_header_t *) &pst->generic.spec[pst->generic.off];
+ u16 tpid, etype;
+
+ tpid = etype = clib_net_to_host_u16 (eth_hdr->type);
+ clib_memcpy_fast (eth_spec, eth_hdr, sizeof (ethernet_header_t));
+ clib_memcpy_fast (eth_mask, eth_hdr_mask, sizeof (ethernet_header_t));
+ eth_spec->has_vlan = 0;
+
+ pst->items[pst->layer].spec = (void *) eth_spec;
+ pst->items[pst->layer].mask = (void *) eth_mask;
+ pst->items[pst->layer].size = sizeof (ethernet_header_t);
+ pst->items[pst->layer].type = ROC_NPC_ITEM_TYPE_ETH;
+ pst->generic.off += sizeof (ethernet_header_t);
+ pst->oct_drv.off += sizeof (struct roc_npc_flow_item_eth);
+ pst->layer++;
+
+ /* Parse VLAN Tags if any */
+ struct roc_npc_flow_item_vlan *vlan_spec =
+ (struct roc_npc_flow_item_vlan *) &pst->oct_drv.spec[pst->oct_drv.off];
+ struct roc_npc_flow_item_vlan *vlan_mask =
+ (struct roc_npc_flow_item_vlan *) &pst->oct_drv.mask[pst->oct_drv.off];
+ ethernet_vlan_header_t *vlan_hdr, *vlan_hdr_mask;
+ u8 vlan_cnt = 0;
+
+ while (tpid == ETHERNET_TYPE_DOT1AD || tpid == ETHERNET_TYPE_VLAN)
+ {
+ if (pst->generic.off >= pst->generic.len)
+ break;
+
+ vlan_hdr =
+ (ethernet_vlan_header_t *) &pst->generic.spec[pst->generic.off];
+ vlan_hdr_mask =
+ (ethernet_vlan_header_t *) &pst->generic.mask[pst->generic.off];
+ tpid = etype = clib_net_to_host_u16 (vlan_hdr->type);
+ clib_memcpy (&vlan_spec[vlan_cnt], vlan_hdr,
+ sizeof (ethernet_vlan_header_t));
+ clib_memcpy (&vlan_mask[vlan_cnt], vlan_hdr_mask,
+ sizeof (ethernet_vlan_header_t));
+ pst->items[pst->layer].spec = (void *) &vlan_spec[vlan_cnt];
+ pst->items[pst->layer].mask = (void *) &vlan_mask[vlan_cnt];
+ pst->items[pst->layer].size = sizeof (ethernet_vlan_header_t);
+ pst->items[pst->layer].type = ROC_NPC_ITEM_TYPE_VLAN;
+ pst->generic.off += sizeof (ethernet_vlan_header_t);
+ pst->oct_drv.off += sizeof (struct roc_npc_flow_item_vlan);
+ pst->layer++;
+ vlan_cnt++;
+ }
+
+ /* Inner most vlan tag */
+ if (vlan_cnt)
+ vlan_spec[vlan_cnt - 1].has_more_vlan = 0;
+
+ pst->nxt_proto = etype;
+ return 0;
+}
+
+static int
+oct_parse_l3 (oct_flow_parse_state *pst)
+{
+
+ if (pst->generic.off >= pst->generic.len || pst->nxt_proto == 0)
+ return 0;
+
+ if (pst->nxt_proto == ETHERNET_TYPE_MPLS)
+ {
+ int label_stack_bottom = 0;
+ do
+ {
+
+ u8 *mpls_spec = &pst->generic.spec[pst->generic.off];
+ u8 *mpls_mask = &pst->generic.mask[pst->generic.off];
+
+ label_stack_bottom = mpls_spec[2] & 1;
+ pst->items[pst->layer].spec = (void *) mpls_spec;
+ pst->items[pst->layer].mask = (void *) mpls_mask;
+ pst->items[pst->layer].size = sizeof (u32);
+ pst->items[pst->layer].type = ROC_NPC_ITEM_TYPE_MPLS;
+ pst->generic.off += sizeof (u32);
+ pst->layer++;
+ }
+ while (label_stack_bottom);
+
+ pst->nxt_proto = 0;
+ return 0;
+ }
+ else if (pst->nxt_proto == ETHERNET_TYPE_IP4)
+ {
+ ip4_header_t *ip4_spec =
+ (ip4_header_t *) &pst->generic.spec[pst->generic.off];
+ ip4_header_t *ip4_mask =
+ (ip4_header_t *) &pst->generic.mask[pst->generic.off];
+ pst->items[pst->layer].spec = (void *) ip4_spec;
+ pst->items[pst->layer].mask = (void *) ip4_mask;
+ pst->items[pst->layer].size = sizeof (ip4_header_t);
+ pst->items[pst->layer].type = ROC_NPC_ITEM_TYPE_IPV4;
+ pst->generic.off += sizeof (ip4_header_t);
+ pst->layer++;
+ pst->nxt_proto = ip4_spec->protocol;
+ }
+ else if (pst->nxt_proto == ETHERNET_TYPE_IP6)
+ {
+ struct roc_npc_flow_item_ipv6 *ip6_spec =
+ (struct roc_npc_flow_item_ipv6 *) &pst->oct_drv.spec[pst->oct_drv.off];
+ struct roc_npc_flow_item_ipv6 *ip6_mask =
+ (struct roc_npc_flow_item_ipv6 *) &pst->oct_drv.mask[pst->oct_drv.off];
+ ip6_header_t *ip6_hdr_mask =
+ (ip6_header_t *) &pst->generic.mask[pst->generic.off];
+ ip6_header_t *ip6_hdr =
+ (ip6_header_t *) &pst->generic.spec[pst->generic.off];
+ u8 nxt_hdr = ip6_hdr->protocol;
+
+ clib_memcpy (ip6_spec, ip6_hdr, sizeof (ip6_header_t));
+ clib_memcpy (ip6_mask, ip6_hdr_mask, sizeof (ip6_header_t));
+ pst->items[pst->layer].spec = (void *) ip6_spec;
+ pst->items[pst->layer].mask = (void *) ip6_mask;
+ pst->items[pst->layer].size = sizeof (ip6_header_t);
+ pst->items[pst->layer].type = ROC_NPC_ITEM_TYPE_IPV6;
+ pst->generic.off += sizeof (ip6_header_t);
+ pst->oct_drv.off += sizeof (struct roc_npc_flow_item_ipv6);
+ pst->layer++;
+
+ while (nxt_hdr == IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS ||
+ nxt_hdr == IP_PROTOCOL_IP6_DESTINATION_OPTIONS ||
+ nxt_hdr == IP_PROTOCOL_IPV6_ROUTE)
+ {
+ if (pst->generic.off >= pst->generic.len)
+ return 0;
+
+ ip6_ext_header_t *ip6_ext_spec =
+ (ip6_ext_header_t *) &pst->generic.spec[pst->generic.off];
+ ip6_ext_header_t *ip6_ext_mask =
+ (ip6_ext_header_t *) &pst->generic.mask[pst->generic.off];
+ nxt_hdr = ip6_ext_spec->next_hdr;
+
+ pst->items[pst->layer].spec = (void *) ip6_ext_spec;
+ pst->items[pst->layer].mask = (void *) ip6_ext_mask;
+ pst->items[pst->layer].type = ROC_NPC_ITEM_TYPE_IPV6_EXT;
+ pst->generic.off += ip6_ext_header_len (ip6_ext_spec);
+ pst->layer++;
+ }
+
+ if (pst->generic.off >= pst->generic.len)
+ return 0;
+
+ if (nxt_hdr == IP_PROTOCOL_IPV6_FRAGMENTATION)
+ {
+ ip6_frag_hdr_t *ip6_ext_frag_spec =
+ (ip6_frag_hdr_t *) &pst->generic.spec[pst->generic.off];
+ ip6_frag_hdr_t *ip6_ext_frag_mask =
+ (ip6_frag_hdr_t *) &pst->generic.mask[pst->generic.off];
+
+ pst->items[pst->layer].spec = (void *) ip6_ext_frag_spec;
+ pst->items[pst->layer].mask = (void *) ip6_ext_frag_mask;
+ pst->items[pst->layer].size = sizeof (ip6_frag_hdr_t);
+ pst->items[pst->layer].type = ROC_NPC_ITEM_TYPE_IPV6_FRAG_EXT;
+ pst->generic.off += sizeof (ip6_frag_hdr_t);
+ pst->layer++;
+ }
+
+ pst->nxt_proto = nxt_hdr;
+ }
+ /* Unsupported L3. */
+ else
+ return -1;
+
+ return 0;
+}
+
+static int
+oct_parse_l4 (oct_flow_parse_state *pst)
+{
+
+ if (pst->generic.off >= pst->generic.len || pst->nxt_proto == 0)
+ return 0;
+
+#define _(protocol_t, protocol_value, ltype) \
+ if (pst->nxt_proto == protocol_value) \
+ \
+ { \
+ \
+ protocol_t *spec = (protocol_t *) &pst->generic.spec[pst->generic.off]; \
+ protocol_t *mask = (protocol_t *) &pst->generic.mask[pst->generic.off]; \
+ pst->items[pst->layer].spec = spec; \
+ pst->items[pst->layer].mask = mask; \
+ \
+ pst->items[pst->layer].size = sizeof (protocol_t); \
+ \
+ pst->items[pst->layer].type = ROC_NPC_ITEM_TYPE_##ltype; \
+ pst->generic.off += sizeof (protocol_t); \
+ pst->layer++; \
+ return 0; \
+ }
+
+ _ (esp_header_t, IP_PROTOCOL_IPSEC_ESP, ESP)
+ _ (udp_header_t, IP_PROTOCOL_UDP, UDP)
+ _ (tcp_header_t, IP_PROTOCOL_TCP, TCP)
+ _ (sctp_header_t, IP_PROTOCOL_SCTP, SCTP)
+ _ (icmp46_header_t, IP_PROTOCOL_ICMP, ICMP)
+ _ (icmp46_header_t, IP_PROTOCOL_ICMP6, ICMP)
+ _ (igmp_header_t, IP_PROTOCOL_IGMP, IGMP)
+ _ (gre_header_t, IP_PROTOCOL_GRE, GRE)
+
+ /* Unsupported L4. */
+ return -1;
+}
+
+static int
+oct_parse_tunnel (oct_flow_parse_state *pst)
+{
+ if (pst->generic.off >= pst->generic.len)
+ return 0;
+
+ if (pst->items[pst->layer - 1].type == ROC_NPC_ITEM_TYPE_GRE)
+ {
+ gre_header_t *gre_hdr = (gre_header_t *) pst->items[pst->layer - 1].spec;
+ pst->nxt_proto = clib_net_to_host_u16 (gre_hdr->protocol);
+ goto parse_l3;
+ }
+
+ else if (pst->items[pst->layer - 1].type == ROC_NPC_ITEM_TYPE_UDP)
+ {
+ udp_header_t *udp_h = (udp_header_t *) pst->items[pst->layer - 1].spec;
+ u16 dport = clib_net_to_host_u16 (udp_h->dst_port);
+
+ if (dport == GTPU_PORT)
+ {
+ gtpu_header_t *gtpu_spec =
+ (gtpu_header_t *) &pst->generic.spec[pst->generic.off];
+ gtpu_header_t *gtpu_mask =
+ (gtpu_header_t *) &pst->generic.mask[pst->generic.off];
+ pst->items[pst->layer].spec = (void *) gtpu_spec;
+ pst->items[pst->layer].mask = (void *) gtpu_mask;
+ pst->items[pst->layer].size = sizeof (gtpu_header_t);
+ pst->items[pst->layer].type = ROC_NPC_ITEM_TYPE_GTPU;
+ pst->generic.off += sizeof (gtpu_header_t);
+ pst->layer++;
+ pst->nxt_proto = 0;
+ return 0;
+ }
+ else if (dport == VXLAN_PORT)
+ {
+ vxlan_header_t *vxlan_spec =
+ (vxlan_header_t *) &pst->generic.spec[pst->generic.off];
+ vxlan_header_t *vxlan_mask =
+ (vxlan_header_t *) &pst->generic.spec[pst->generic.off];
+ pst->items[pst->layer].spec = (void *) vxlan_spec;
+ pst->items[pst->layer].mask = (void *) vxlan_mask;
+ pst->items[pst->layer].size = sizeof (vxlan_header_t);
+ pst->items[pst->layer].type = ROC_NPC_ITEM_TYPE_VXLAN;
+ pst->generic.off += sizeof (vxlan_header_t);
+ pst->layer++;
+ pst->nxt_proto = 0;
+ goto parse_l2;
+ }
+ }
+ /* No supported Tunnel detected. */
+ else
+ {
+ log_err (pst->port->dev,
+ "Partially parsed till offset %u, not able to parse further",
+ pst->generic.off);
+ return 0;
+ }
+parse_l2:
+ if (oct_parse_l2 (pst))
+ return -1;
+parse_l3:
+ if (oct_parse_l3 (pst))
+ return -1;
+
+ return oct_parse_l4 (pst);
+}
+
+static vnet_dev_rv_t
+oct_flow_generic_pattern_parse (oct_flow_parse_state *pst)
+{
+
+ if (oct_parse_l2 (pst))
+ goto err;
+
+ if (oct_parse_l3 (pst))
+ goto err;
+
+ if (oct_parse_l4 (pst))
+ goto err;
+
+ if (oct_parse_tunnel (pst))
+ goto err;
+
+ if (pst->generic.off < pst->generic.len)
+ {
+ log_err (pst->port->dev,
+ "Partially parsed till offset %u, not able to parse further",
+ pst->generic.off);
+ goto err;
+ }
+
+ pst->items[pst->layer].type = ROC_NPC_ITEM_TYPE_END;
+ return VNET_DEV_OK;
+
+err:
+ return VNET_DEV_ERR_NOT_SUPPORTED;
+}
+
static vnet_dev_rv_t
oct_flow_add (vlib_main_t *vm, vnet_dev_port_t *port, vnet_flow_t *flow,
uword *private_data)
@@ -190,12 +538,22 @@ oct_flow_add (vlib_main_t *vm, vnet_dev_port_t *port, vnet_flow_t *flow,
struct roc_npc_item_info item_info[ROC_NPC_ITEM_TYPE_END] = {};
struct roc_npc_action actions[ROC_NPC_ITEM_TYPE_END] = {};
oct_port_t *oct_port = vnet_dev_get_port_data (port);
+ ethernet_header_t eth_spec = {}, eth_mask = {};
+ sctp_header_t sctp_spec = {}, sctp_mask = {};
+ gtpu_header_t gtpu_spec = {}, gtpu_mask = {};
+ ip4_header_t ip4_spec = {}, ip4_mask = {};
+ ip6_header_t ip6_spec = {}, ip6_mask = {};
+ udp_header_t udp_spec = {}, udp_mask = {};
+ tcp_header_t tcp_spec = {}, tcp_mask = {};
+ esp_header_t esp_spec = {}, esp_mask = {};
u16 l4_src_port = 0, l4_dst_port = 0;
u16 l4_src_mask = 0, l4_dst_mask = 0;
struct roc_npc_action_rss rss_conf = {};
struct roc_npc_action_queue conf = {};
struct roc_npc_action_mark mark = {};
struct roc_npc *npc = &oct_port->npc;
+ u8 *flow_spec = 0, *flow_mask = 0;
+ u8 *drv_spec = 0, *drv_mask = 0;
vnet_dev_rv_t rv = VNET_DEV_OK;
int layer = 0, index = 0;
u16 *queues = NULL;
@@ -203,11 +561,52 @@ oct_flow_add (vlib_main_t *vm, vnet_dev_port_t *port, vnet_flow_t *flow,
u8 proto = 0;
u16 action = 0;
+ if (FLOW_IS_GENERIC_TYPE (flow))
+ {
+ unformat_input_t input;
+ int rc;
+
+ unformat_init_string (
+ &input, (const char *) flow->generic.pattern.spec,
+ strlen ((const char *) flow->generic.pattern.spec));
+ unformat_user (&input, unformat_hex_string, &flow_spec);
+ unformat_free (&input);
+
+ unformat_init_string (
+ &input, (const char *) flow->generic.pattern.mask,
+ strlen ((const char *) flow->generic.pattern.mask));
+ unformat_user (&input, unformat_hex_string, &flow_mask);
+ unformat_free (&input);
+
+ vec_validate (drv_spec, 1024);
+ vec_validate (drv_mask, 1024);
+ oct_flow_parse_state pst = {
+ .nxt_proto = 0,
+ .port = port,
+ .items = item_info,
+ .oct_drv = { .spec = drv_spec, .mask = drv_mask },
+ .generic = { .spec = flow_spec,
+ .mask = flow_mask,
+ .len = vec_len (flow_spec) },
+ };
+
+ rc = oct_flow_generic_pattern_parse (&pst);
+ if (rc)
+ {
+ vec_free (flow_spec);
+ vec_free (flow_mask);
+ vec_free (drv_spec);
+ vec_free (drv_mask);
+ return VNET_DEV_ERR_NOT_SUPPORTED;
+ }
+
+ goto parse_flow_actions;
+ }
+
if (FLOW_IS_ETHERNET_CLASS (flow))
{
- ethernet_header_t eth_spec = { .type = clib_host_to_net_u16 (
- flow->ethernet.eth_hdr.type) },
- eth_mask = { .type = 0xFFFF };
+ eth_spec.type = clib_host_to_net_u16 (flow->ethernet.eth_hdr.type);
+ eth_mask.type = 0xFFFF;
item_info[layer].spec = (void *) &eth_spec;
item_info[layer].mask = (void *) &eth_mask;
@@ -220,10 +619,11 @@ oct_flow_add (vlib_main_t *vm, vnet_dev_port_t *port, vnet_flow_t *flow,
{
vnet_flow_ip4_t *ip4_hdr = &flow->ip4;
proto = ip4_hdr->protocol.prot;
- ip4_header_t ip4_spec = { .src_address = ip4_hdr->src_addr.addr,
- .dst_address = ip4_hdr->dst_addr.addr },
- ip4_mask = { .src_address = ip4_hdr->src_addr.mask,
- .dst_address = ip4_hdr->dst_addr.mask };
+
+ ip4_spec.src_address = ip4_hdr->src_addr.addr;
+ ip4_spec.dst_address = ip4_hdr->dst_addr.addr;
+ ip4_mask.src_address = ip4_hdr->src_addr.mask;
+ ip4_mask.dst_address = ip4_hdr->dst_addr.mask;
item_info[layer].spec = (void *) &ip4_spec;
item_info[layer].mask = (void *) &ip4_mask;
@@ -245,10 +645,11 @@ oct_flow_add (vlib_main_t *vm, vnet_dev_port_t *port, vnet_flow_t *flow,
{
vnet_flow_ip6_t *ip6_hdr = &flow->ip6;
proto = ip6_hdr->protocol.prot;
- ip6_header_t ip6_spec = { .src_address = ip6_hdr->src_addr.addr,
- .dst_address = ip6_hdr->dst_addr.addr },
- ip6_mask = { .src_address = ip6_hdr->src_addr.mask,
- .dst_address = ip6_hdr->dst_addr.mask };
+
+ ip6_spec.src_address = ip6_hdr->src_addr.addr;
+ ip6_spec.dst_address = ip6_hdr->dst_addr.addr;
+ ip6_mask.src_address = ip6_hdr->src_addr.mask;
+ ip6_mask.dst_address = ip6_hdr->dst_addr.mask;
item_info[layer].spec = (void *) &ip6_spec;
item_info[layer].mask = (void *) &ip6_mask;
@@ -273,16 +674,15 @@ oct_flow_add (vlib_main_t *vm, vnet_dev_port_t *port, vnet_flow_t *flow,
switch (proto)
{
case IP_PROTOCOL_UDP:
- item_info[layer].type = ROC_NPC_ITEM_TYPE_UDP;
-
- udp_header_t udp_spec = { .src_port = l4_src_port,
- .dst_port = l4_dst_port },
- udp_mask = { .src_port = l4_src_mask,
- .dst_port = l4_dst_mask };
+ udp_spec.src_port = l4_src_port;
+ udp_spec.dst_port = l4_dst_port;
+ udp_mask.src_port = l4_src_mask;
+ udp_mask.dst_port = l4_dst_mask;
item_info[layer].spec = (void *) &udp_spec;
item_info[layer].mask = (void *) &udp_mask;
item_info[layer].size = sizeof (udp_header_t);
+ item_info[layer].type = ROC_NPC_ITEM_TYPE_UDP;
layer++;
if (FLOW_IS_L4_TUNNEL_TYPE (flow))
@@ -290,14 +690,13 @@ oct_flow_add (vlib_main_t *vm, vnet_dev_port_t *port, vnet_flow_t *flow,
switch (flow->type)
{
case VNET_FLOW_TYPE_IP4_GTPU:
- item_info[layer].type = ROC_NPC_ITEM_TYPE_GTPU;
- gtpu_header_t gtpu_spec = { .teid = clib_host_to_net_u32 (
- flow->ip4_gtpu.teid) },
- gtpu_mask = { .teid = 0XFFFFFFFF };
+ gtpu_spec.teid = clib_host_to_net_u32 (flow->ip4_gtpu.teid);
+ gtpu_mask.teid = 0XFFFFFFFF;
item_info[layer].spec = (void *) &gtpu_spec;
item_info[layer].mask = (void *) &gtpu_mask;
item_info[layer].size = sizeof (gtpu_header_t);
+ item_info[layer].type = ROC_NPC_ITEM_TYPE_GTPU;
layer++;
break;
@@ -309,42 +708,39 @@ oct_flow_add (vlib_main_t *vm, vnet_dev_port_t *port, vnet_flow_t *flow,
break;
case IP_PROTOCOL_TCP:
- item_info[layer].type = ROC_NPC_ITEM_TYPE_TCP;
-
- tcp_header_t tcp_spec = { .src_port = l4_src_port,
- .dst_port = l4_dst_port },
- tcp_mask = { .src_port = l4_src_mask,
- .dst_port = l4_dst_mask };
+ tcp_spec.src_port = l4_src_port;
+ tcp_spec.dst_port = l4_dst_port;
+ tcp_mask.src_port = l4_src_mask;
+ tcp_mask.dst_port = l4_dst_mask;
item_info[layer].spec = (void *) &tcp_spec;
item_info[layer].mask = (void *) &tcp_mask;
item_info[layer].size = sizeof (tcp_header_t);
+ item_info[layer].type = ROC_NPC_ITEM_TYPE_TCP;
layer++;
break;
case IP_PROTOCOL_SCTP:
- item_info[layer].type = ROC_NPC_ITEM_TYPE_SCTP;
-
- sctp_header_t sctp_spec = { .src_port = l4_src_port,
- .dst_port = l4_dst_port },
- sctp_mask = { .src_port = l4_src_mask,
- .dst_port = l4_dst_mask };
+ sctp_spec.src_port = l4_src_port;
+ sctp_spec.dst_port = l4_dst_port;
+ sctp_mask.src_port = l4_src_mask;
+ sctp_mask.dst_port = l4_dst_mask;
item_info[layer].spec = (void *) &sctp_spec;
item_info[layer].mask = (void *) &sctp_mask;
item_info[layer].size = sizeof (sctp_header_t);
+ item_info[layer].type = ROC_NPC_ITEM_TYPE_SCTP;
layer++;
break;
case IP_PROTOCOL_IPSEC_ESP:
- item_info[layer].type = ROC_NPC_ITEM_TYPE_ESP;
- esp_header_t esp_spec = { .spi = clib_host_to_net_u32 (
- flow->ip4_ipsec_esp.spi) },
- esp_mask = { .spi = 0xFFFFFFFF };
+ esp_spec.spi = clib_host_to_net_u32 (flow->ip4_ipsec_esp.spi);
+ esp_mask.spi = 0xFFFFFFFF;
item_info[layer].spec = (void *) &esp_spec;
item_info[layer].mask = (void *) &esp_mask;
item_info[layer].size = sizeof (u32);
+ item_info[layer].type = ROC_NPC_ITEM_TYPE_ESP;
layer++;
break;
@@ -357,6 +753,7 @@ oct_flow_add (vlib_main_t *vm, vnet_dev_port_t *port, vnet_flow_t *flow,
end_item_info:
item_info[layer].type = ROC_NPC_ITEM_TYPE_END;
+parse_flow_actions:
if (flow->actions & VNET_FLOW_ACTION_REDIRECT_TO_QUEUE)
{
conf.index = flow->redirect_queue;
@@ -422,6 +819,11 @@ end_item_info:
if (queues)
clib_mem_free (queues);
+ vec_free (flow_spec);
+ vec_free (flow_mask);
+ vec_free (drv_spec);
+ vec_free (drv_mask);
+
return rv;
}