summaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
authorSriram Vatala <svatala@marvell.com>2024-03-12 04:22:00 +0000
committerMonendra Singh Kushwaha <kmonendra@marvell.com>2024-06-10 18:51:40 +0000
commit6e0f70a035e2d2d0db75e4bfa4ceb748a3c2259e (patch)
tree65d657d760672ff19074ed41a012293ed4ad5dd9 /src
parentd8d3ad99af90fccac88a83a77f8676097568fb71 (diff)
octeon: add support for vnet generic flow type
This patch adds vnet generic flow type support in OCTEON plugin, which extends the existing vnet flow types supported. It allows users to configure additional match patterns like 802.1q tag fields, 802.1ad tag fields, MPLS fields, IP DSCP etc., if supported by the underlying hardware. On OCTEON various match patterns including user defined custom protocol types can be supported depending on the programmable classification profile. Generic flows operate based on hexadecimal strings representing packet data bytes and corresponding mask data bytes. The mask data bytes, with bits set to '1', selectively identify the data bytes used for hardware flow matching. To configure generic flow rules, packetforge tool is recommended which accepts inputs in a user readable and friendly format. This tool is available in VPP tree under `extras/packetforge`. Detailed instructions can be found in the documentation under `extras/packetforge`. Additionally user can use existing vnet flow CLI and binary API interfaces to configure rules manually. Type: feature Change-Id: I8198536cf1fe0a4719542a8b54c599230c7852e9 Signed-off-by: Sriram Vatala <svatala@marvell.com> (cherry picked from commit 282f2ecd8e7d68b9930d6f1e2c62205c9dfe8285)
Diffstat (limited to 'src')
-rw-r--r--src/plugins/dev_octeon/flow.c386
-rw-r--r--src/plugins/dev_octeon/octeon.h6
2 files changed, 392 insertions, 0 deletions
diff --git a/src/plugins/dev_octeon/flow.c b/src/plugins/dev_octeon/flow.c
index 1c367a036ab..35aabde76a7 100644
--- a/src/plugins/dev_octeon/flow.c
+++ b/src/plugins/dev_octeon/flow.c
@@ -46,6 +46,8 @@ VLIB_REGISTER_LOG_CLASS (oct_log, static) = {
(f->type == VNET_FLOW_TYPE_IP4_GTPC) || \
(f->type == VNET_FLOW_TYPE_IP4_GTPU))
+#define FLOW_IS_GENERIC_TYPE(f) (f->type == VNET_FLOW_TYPE_GENERIC)
+
#define OCT_FLOW_UNSUPPORTED_ACTIONS(f) \
((f->actions == VNET_FLOW_ACTION_BUFFER_ADVANCE) || \
(f->actions == VNET_FLOW_ACTION_REDIRECT_TO_NODE))
@@ -71,6 +73,9 @@ VLIB_REGISTER_LOG_CLASS (oct_log, static) = {
_ (62, FLOW_KEY_TYPE_L3_DST, "l3-dst-only") \
_ (63, FLOW_KEY_TYPE_L3_SRC, "l3-src-only")
+#define GTPU_PORT 2152
+#define VXLAN_PORT 4789
+
typedef struct
{
u16 src_port;
@@ -87,6 +92,27 @@ typedef struct
u32 teid;
} gtpu_header_t;
+typedef struct
+{
+ u8 layer;
+ u16 nxt_proto;
+ vnet_dev_port_t *port;
+ struct roc_npc_item_info *items;
+ struct
+ {
+ u8 *spec;
+ u8 *mask;
+ u16 off;
+ } oct_drv;
+ struct
+ {
+ u8 *spec;
+ u8 *mask;
+ u16 off;
+ u16 len;
+ } generic;
+} oct_flow_parse_state;
+
static void
oct_flow_convert_rss_types (u64 *key, u64 rss_types)
{
@@ -183,6 +209,320 @@ oct_flow_rule_create (vnet_dev_port_t *port, struct roc_npc_action *actions,
return VNET_DEV_OK;
}
+static int
+oct_parse_l2 (oct_flow_parse_state *pst)
+{
+ struct roc_npc_flow_item_eth *eth_spec =
+ (struct roc_npc_flow_item_eth *) &pst->oct_drv.spec[pst->oct_drv.off];
+ struct roc_npc_flow_item_eth *eth_mask =
+ (struct roc_npc_flow_item_eth *) &pst->oct_drv.mask[pst->oct_drv.off];
+ ethernet_header_t *eth_hdr_mask =
+ (ethernet_header_t *) &pst->generic.mask[pst->generic.off];
+ ethernet_header_t *eth_hdr =
+ (ethernet_header_t *) &pst->generic.spec[pst->generic.off];
+ u16 tpid, etype;
+
+ tpid = etype = clib_net_to_host_u16 (eth_hdr->type);
+ clib_memcpy_fast (eth_spec, eth_hdr, sizeof (ethernet_header_t));
+ clib_memcpy_fast (eth_mask, eth_hdr_mask, sizeof (ethernet_header_t));
+ eth_spec->has_vlan = 0;
+
+ pst->items[pst->layer].spec = (void *) eth_spec;
+ pst->items[pst->layer].mask = (void *) eth_mask;
+ pst->items[pst->layer].size = sizeof (ethernet_header_t);
+ pst->items[pst->layer].type = ROC_NPC_ITEM_TYPE_ETH;
+ pst->generic.off += sizeof (ethernet_header_t);
+ pst->oct_drv.off += sizeof (struct roc_npc_flow_item_eth);
+ pst->layer++;
+
+ /* Parse VLAN Tags if any */
+ struct roc_npc_flow_item_vlan *vlan_spec =
+ (struct roc_npc_flow_item_vlan *) &pst->oct_drv.spec[pst->oct_drv.off];
+ struct roc_npc_flow_item_vlan *vlan_mask =
+ (struct roc_npc_flow_item_vlan *) &pst->oct_drv.mask[pst->oct_drv.off];
+ ethernet_vlan_header_t *vlan_hdr, *vlan_hdr_mask;
+ u8 vlan_cnt = 0;
+
+ while (tpid == ETHERNET_TYPE_DOT1AD || tpid == ETHERNET_TYPE_VLAN)
+ {
+ if (pst->generic.off >= pst->generic.len)
+ break;
+
+ vlan_hdr =
+ (ethernet_vlan_header_t *) &pst->generic.spec[pst->generic.off];
+ vlan_hdr_mask =
+ (ethernet_vlan_header_t *) &pst->generic.mask[pst->generic.off];
+ tpid = etype = clib_net_to_host_u16 (vlan_hdr->type);
+ clib_memcpy (&vlan_spec[vlan_cnt], vlan_hdr,
+ sizeof (ethernet_vlan_header_t));
+ clib_memcpy (&vlan_mask[vlan_cnt], vlan_hdr_mask,
+ sizeof (ethernet_vlan_header_t));
+ pst->items[pst->layer].spec = (void *) &vlan_spec[vlan_cnt];
+ pst->items[pst->layer].mask = (void *) &vlan_mask[vlan_cnt];
+ pst->items[pst->layer].size = sizeof (ethernet_vlan_header_t);
+ pst->items[pst->layer].type = ROC_NPC_ITEM_TYPE_VLAN;
+ pst->generic.off += sizeof (ethernet_vlan_header_t);
+ pst->oct_drv.off += sizeof (struct roc_npc_flow_item_vlan);
+ pst->layer++;
+ vlan_cnt++;
+ }
+
+ /* Inner most vlan tag */
+ if (vlan_cnt)
+ vlan_spec[vlan_cnt - 1].has_more_vlan = 0;
+
+ pst->nxt_proto = etype;
+ return 0;
+}
+
+static int
+oct_parse_l3 (oct_flow_parse_state *pst)
+{
+
+ if (pst->generic.off >= pst->generic.len || pst->nxt_proto == 0)
+ return 0;
+
+ if (pst->nxt_proto == ETHERNET_TYPE_MPLS)
+ {
+ int label_stack_bottom = 0;
+ do
+ {
+
+ u8 *mpls_spec = &pst->generic.spec[pst->generic.off];
+ u8 *mpls_mask = &pst->generic.mask[pst->generic.off];
+
+ label_stack_bottom = mpls_spec[2] & 1;
+ pst->items[pst->layer].spec = (void *) mpls_spec;
+ pst->items[pst->layer].mask = (void *) mpls_mask;
+ pst->items[pst->layer].size = sizeof (u32);
+ pst->items[pst->layer].type = ROC_NPC_ITEM_TYPE_MPLS;
+ pst->generic.off += sizeof (u32);
+ pst->layer++;
+ }
+ while (label_stack_bottom);
+
+ pst->nxt_proto = 0;
+ return 0;
+ }
+ else if (pst->nxt_proto == ETHERNET_TYPE_IP4)
+ {
+ ip4_header_t *ip4_spec =
+ (ip4_header_t *) &pst->generic.spec[pst->generic.off];
+ ip4_header_t *ip4_mask =
+ (ip4_header_t *) &pst->generic.mask[pst->generic.off];
+ pst->items[pst->layer].spec = (void *) ip4_spec;
+ pst->items[pst->layer].mask = (void *) ip4_mask;
+ pst->items[pst->layer].size = sizeof (ip4_header_t);
+ pst->items[pst->layer].type = ROC_NPC_ITEM_TYPE_IPV4;
+ pst->generic.off += sizeof (ip4_header_t);
+ pst->layer++;
+ pst->nxt_proto = ip4_spec->protocol;
+ }
+ else if (pst->nxt_proto == ETHERNET_TYPE_IP6)
+ {
+ struct roc_npc_flow_item_ipv6 *ip6_spec =
+ (struct roc_npc_flow_item_ipv6 *) &pst->oct_drv.spec[pst->oct_drv.off];
+ struct roc_npc_flow_item_ipv6 *ip6_mask =
+ (struct roc_npc_flow_item_ipv6 *) &pst->oct_drv.mask[pst->oct_drv.off];
+ ip6_header_t *ip6_hdr_mask =
+ (ip6_header_t *) &pst->generic.mask[pst->generic.off];
+ ip6_header_t *ip6_hdr =
+ (ip6_header_t *) &pst->generic.spec[pst->generic.off];
+ u8 nxt_hdr = ip6_hdr->protocol;
+
+ clib_memcpy (ip6_spec, ip6_hdr, sizeof (ip6_header_t));
+ clib_memcpy (ip6_mask, ip6_hdr_mask, sizeof (ip6_header_t));
+ pst->items[pst->layer].spec = (void *) ip6_spec;
+ pst->items[pst->layer].mask = (void *) ip6_mask;
+ pst->items[pst->layer].size = sizeof (ip6_header_t);
+ pst->items[pst->layer].type = ROC_NPC_ITEM_TYPE_IPV6;
+ pst->generic.off += sizeof (ip6_header_t);
+ pst->oct_drv.off += sizeof (struct roc_npc_flow_item_ipv6);
+ pst->layer++;
+
+ while (nxt_hdr == IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS ||
+ nxt_hdr == IP_PROTOCOL_IP6_DESTINATION_OPTIONS ||
+ nxt_hdr == IP_PROTOCOL_IPV6_ROUTE)
+ {
+ if (pst->generic.off >= pst->generic.len)
+ return 0;
+
+ ip6_ext_header_t *ip6_ext_spec =
+ (ip6_ext_header_t *) &pst->generic.spec[pst->generic.off];
+ ip6_ext_header_t *ip6_ext_mask =
+ (ip6_ext_header_t *) &pst->generic.mask[pst->generic.off];
+ nxt_hdr = ip6_ext_spec->next_hdr;
+
+ pst->items[pst->layer].spec = (void *) ip6_ext_spec;
+ pst->items[pst->layer].mask = (void *) ip6_ext_mask;
+ pst->items[pst->layer].type = ROC_NPC_ITEM_TYPE_IPV6_EXT;
+ pst->generic.off += ip6_ext_header_len (ip6_ext_spec);
+ pst->layer++;
+ }
+
+ if (pst->generic.off >= pst->generic.len)
+ return 0;
+
+ if (nxt_hdr == IP_PROTOCOL_IPV6_FRAGMENTATION)
+ {
+ ip6_frag_hdr_t *ip6_ext_frag_spec =
+ (ip6_frag_hdr_t *) &pst->generic.spec[pst->generic.off];
+ ip6_frag_hdr_t *ip6_ext_frag_mask =
+ (ip6_frag_hdr_t *) &pst->generic.mask[pst->generic.off];
+
+ pst->items[pst->layer].spec = (void *) ip6_ext_frag_spec;
+ pst->items[pst->layer].mask = (void *) ip6_ext_frag_mask;
+ pst->items[pst->layer].size = sizeof (ip6_frag_hdr_t);
+ pst->items[pst->layer].type = ROC_NPC_ITEM_TYPE_IPV6_FRAG_EXT;
+ pst->generic.off += sizeof (ip6_frag_hdr_t);
+ pst->layer++;
+ }
+
+ pst->nxt_proto = nxt_hdr;
+ }
+ /* Unsupported L3. */
+ else
+ return -1;
+
+ return 0;
+}
+
+static int
+oct_parse_l4 (oct_flow_parse_state *pst)
+{
+
+ if (pst->generic.off >= pst->generic.len || pst->nxt_proto == 0)
+ return 0;
+
+#define _(protocol_t, protocol_value, ltype) \
+ if (pst->nxt_proto == protocol_value) \
+ \
+ { \
+ \
+ protocol_t *spec = (protocol_t *) &pst->generic.spec[pst->generic.off]; \
+ protocol_t *mask = (protocol_t *) &pst->generic.mask[pst->generic.off]; \
+ pst->items[pst->layer].spec = spec; \
+ pst->items[pst->layer].mask = mask; \
+ \
+ pst->items[pst->layer].size = sizeof (protocol_t); \
+ \
+ pst->items[pst->layer].type = ROC_NPC_ITEM_TYPE_##ltype; \
+ pst->generic.off += sizeof (protocol_t); \
+ pst->layer++; \
+ return 0; \
+ }
+
+ _ (esp_header_t, IP_PROTOCOL_IPSEC_ESP, ESP)
+ _ (udp_header_t, IP_PROTOCOL_UDP, UDP)
+ _ (tcp_header_t, IP_PROTOCOL_TCP, TCP)
+ _ (sctp_header_t, IP_PROTOCOL_SCTP, SCTP)
+ _ (icmp46_header_t, IP_PROTOCOL_ICMP, ICMP)
+ _ (icmp46_header_t, IP_PROTOCOL_ICMP6, ICMP)
+ _ (igmp_header_t, IP_PROTOCOL_IGMP, IGMP)
+ _ (gre_header_t, IP_PROTOCOL_GRE, GRE)
+
+ /* Unsupported L4. */
+ return -1;
+}
+
+static int
+oct_parse_tunnel (oct_flow_parse_state *pst)
+{
+ if (pst->generic.off >= pst->generic.len)
+ return 0;
+
+ if (pst->items[pst->layer - 1].type == ROC_NPC_ITEM_TYPE_GRE)
+ {
+ gre_header_t *gre_hdr = (gre_header_t *) pst->items[pst->layer - 1].spec;
+ pst->nxt_proto = clib_net_to_host_u16 (gre_hdr->protocol);
+ goto parse_l3;
+ }
+
+ else if (pst->items[pst->layer - 1].type == ROC_NPC_ITEM_TYPE_UDP)
+ {
+ udp_header_t *udp_h = (udp_header_t *) pst->items[pst->layer - 1].spec;
+ u16 dport = clib_net_to_host_u16 (udp_h->dst_port);
+
+ if (dport == GTPU_PORT)
+ {
+ gtpu_header_t *gtpu_spec =
+ (gtpu_header_t *) &pst->generic.spec[pst->generic.off];
+ gtpu_header_t *gtpu_mask =
+ (gtpu_header_t *) &pst->generic.mask[pst->generic.off];
+ pst->items[pst->layer].spec = (void *) gtpu_spec;
+ pst->items[pst->layer].mask = (void *) gtpu_mask;
+ pst->items[pst->layer].size = sizeof (gtpu_header_t);
+ pst->items[pst->layer].type = ROC_NPC_ITEM_TYPE_GTPU;
+ pst->generic.off += sizeof (gtpu_header_t);
+ pst->layer++;
+ pst->nxt_proto = 0;
+ return 0;
+ }
+ else if (dport == VXLAN_PORT)
+ {
+ vxlan_header_t *vxlan_spec =
+ (vxlan_header_t *) &pst->generic.spec[pst->generic.off];
+ vxlan_header_t *vxlan_mask =
+ (vxlan_header_t *) &pst->generic.spec[pst->generic.off];
+ pst->items[pst->layer].spec = (void *) vxlan_spec;
+ pst->items[pst->layer].mask = (void *) vxlan_mask;
+ pst->items[pst->layer].size = sizeof (vxlan_header_t);
+ pst->items[pst->layer].type = ROC_NPC_ITEM_TYPE_VXLAN;
+ pst->generic.off += sizeof (vxlan_header_t);
+ pst->layer++;
+ pst->nxt_proto = 0;
+ goto parse_l2;
+ }
+ }
+ /* No supported Tunnel detected. */
+ else
+ {
+ log_err (pst->port->dev,
+ "Partially parsed till offset %u, not able to parse further",
+ pst->generic.off);
+ return 0;
+ }
+parse_l2:
+ if (oct_parse_l2 (pst))
+ return -1;
+parse_l3:
+ if (oct_parse_l3 (pst))
+ return -1;
+
+ return oct_parse_l4 (pst);
+}
+
+static vnet_dev_rv_t
+oct_flow_generic_pattern_parse (oct_flow_parse_state *pst)
+{
+
+ if (oct_parse_l2 (pst))
+ goto err;
+
+ if (oct_parse_l3 (pst))
+ goto err;
+
+ if (oct_parse_l4 (pst))
+ goto err;
+
+ if (oct_parse_tunnel (pst))
+ goto err;
+
+ if (pst->generic.off < pst->generic.len)
+ {
+ log_err (pst->port->dev,
+ "Partially parsed till offset %u, not able to parse further",
+ pst->generic.off);
+ goto err;
+ }
+
+ pst->items[pst->layer].type = ROC_NPC_ITEM_TYPE_END;
+ return VNET_DEV_OK;
+
+err:
+ return VNET_DEV_ERR_NOT_SUPPORTED;
+}
+
static vnet_dev_rv_t
oct_flow_add (vlib_main_t *vm, vnet_dev_port_t *port, vnet_flow_t *flow,
uword *private_data)
@@ -196,6 +536,7 @@ oct_flow_add (vlib_main_t *vm, vnet_dev_port_t *port, vnet_flow_t *flow,
struct roc_npc_action_queue conf = {};
struct roc_npc_action_mark mark = {};
struct roc_npc *npc = &oct_port->npc;
+ u8 *flow_spec = 0, *flow_mask = 0;
vnet_dev_rv_t rv = VNET_DEV_OK;
int layer = 0, index = 0;
u16 *queues = NULL;
@@ -203,6 +544,45 @@ oct_flow_add (vlib_main_t *vm, vnet_dev_port_t *port, vnet_flow_t *flow,
u8 proto = 0;
u16 action = 0;
+ if (FLOW_IS_GENERIC_TYPE (flow))
+ {
+ u8 drv_item_spec[1024] = { 0 }, drv_item_mask[1024] = { 0 };
+ unformat_input_t input;
+ int rc;
+
+ unformat_init_string (
+ &input, (const char *) flow->generic.pattern.spec,
+ strlen ((const char *) flow->generic.pattern.spec));
+ unformat_user (&input, unformat_hex_string, &flow_spec);
+ unformat_free (&input);
+
+ unformat_init_string (
+ &input, (const char *) flow->generic.pattern.mask,
+ strlen ((const char *) flow->generic.pattern.mask));
+ unformat_user (&input, unformat_hex_string, &flow_mask);
+ unformat_free (&input);
+
+ oct_flow_parse_state pst = {
+ .nxt_proto = 0,
+ .port = port,
+ .items = item_info,
+ .oct_drv = { .spec = drv_item_spec, .mask = drv_item_mask },
+ .generic = { .spec = flow_spec,
+ .mask = flow_mask,
+ .len = vec_len (flow_spec) },
+ };
+
+ rc = oct_flow_generic_pattern_parse (&pst);
+ if (rc)
+ {
+ vec_free (flow_spec);
+ vec_free (flow_mask);
+ return VNET_DEV_ERR_NOT_SUPPORTED;
+ }
+
+ goto parse_flow_actions;
+ }
+
if (FLOW_IS_ETHERNET_CLASS (flow))
{
ethernet_header_t eth_spec = { .type = clib_host_to_net_u16 (
@@ -357,6 +737,7 @@ oct_flow_add (vlib_main_t *vm, vnet_dev_port_t *port, vnet_flow_t *flow,
end_item_info:
item_info[layer].type = ROC_NPC_ITEM_TYPE_END;
+parse_flow_actions:
if (flow->actions & VNET_FLOW_ACTION_REDIRECT_TO_QUEUE)
{
conf.index = flow->redirect_queue;
@@ -422,6 +803,11 @@ end_item_info:
if (queues)
clib_mem_free (queues);
+ if (flow_spec)
+ vec_free (flow_spec);
+ if (flow_mask)
+ vec_free (flow_mask);
+
return rv;
}
diff --git a/src/plugins/dev_octeon/octeon.h b/src/plugins/dev_octeon/octeon.h
index c02b66e7cdb..4926f1afe79 100644
--- a/src/plugins/dev_octeon/octeon.h
+++ b/src/plugins/dev_octeon/octeon.h
@@ -12,6 +12,12 @@
#include <vnet/flow/flow.h>
#include <vnet/udp/udp.h>
#include <vnet/ipsec/esp.h>
+#include <vnet/ethernet/packet.h>
+#include <vnet/ip/ip_packet.h>
+#include <vnet/ip/icmp46_packet.h>
+#include <vnet/ip/igmp_packet.h>
+#include <vnet/gre/packet.h>
+#include <vxlan/vxlan.h>
#include <base/roc_api.h>
#include <dev_octeon/hw_defs.h>