aboutsummaryrefslogtreecommitdiffstats
path: root/src/plugins
diff options
context:
space:
mode:
Diffstat (limited to 'src/plugins')
-rw-r--r--src/plugins/avf/CMakeLists.txt2
-rw-r--r--src/plugins/avf/avf.h43
-rw-r--r--src/plugins/avf/avf_advanced_flow.h1068
-rw-r--r--src/plugins/avf/avf_fdir_lib.c879
-rw-r--r--src/plugins/avf/device.c15
-rw-r--r--src/plugins/avf/flow.c421
-rw-r--r--src/plugins/avf/virtchnl.h36
7 files changed, 2430 insertions, 34 deletions
diff --git a/src/plugins/avf/CMakeLists.txt b/src/plugins/avf/CMakeLists.txt
index 8547fb76c9c..95fde7ceff8 100644
--- a/src/plugins/avf/CMakeLists.txt
+++ b/src/plugins/avf/CMakeLists.txt
@@ -20,6 +20,8 @@ add_vpp_plugin(avf
output.c
plugin.c
avf_api.c
+ flow.c
+ avf_fdir_lib.c
MULTIARCH_SOURCES
input.c
diff --git a/src/plugins/avf/avf.h b/src/plugins/avf/avf.h
index 53e1275ea62..6c09084eb8c 100644
--- a/src/plugins/avf/avf.h
+++ b/src/plugins/avf/avf.h
@@ -29,6 +29,9 @@
#include <vnet/interface.h>
+#include <vnet/devices/devices.h>
+#include <vnet/flow/flow.h>
+
#define AVF_QUEUE_SZ_MAX 4096
#define AVF_QUEUE_SZ_MIN 64
@@ -97,16 +100,17 @@ extern vlib_log_class_registration_t avf_log;
format_vlib_pci_addr, &dev->pci_addr, \
## __VA_ARGS__)
-#define foreach_avf_device_flags \
- _(0, INITIALIZED, "initialized") \
- _(1, ERROR, "error") \
- _(2, ADMIN_UP, "admin-up") \
- _(3, VA_DMA, "vaddr-dma") \
- _(4, LINK_UP, "link-up") \
- _(5, SHARED_TXQ_LOCK, "shared-txq-lock") \
- _(6, ELOG, "elog") \
- _(7, PROMISC, "promisc") \
- _(8, RX_INT, "rx-interrupts")
+#define foreach_avf_device_flags \
+ _ (0, INITIALIZED, "initialized") \
+ _ (1, ERROR, "error") \
+ _ (2, ADMIN_UP, "admin-up") \
+ _ (3, VA_DMA, "vaddr-dma") \
+ _ (4, LINK_UP, "link-up") \
+ _ (5, SHARED_TXQ_LOCK, "shared-txq-lock") \
+ _ (6, ELOG, "elog") \
+ _ (7, PROMISC, "promisc") \
+ _ (8, RX_INT, "rx-interrupts") \
+ _ (9, RX_FLOW_OFFLOAD, "rx-flow-offload")
enum
{
@@ -183,6 +187,20 @@ typedef struct
typedef struct
{
+ u32 flow_index;
+ u32 mark;
+ struct avf_fdir_conf *rcfg;
+} avf_flow_entry_t;
+
+typedef struct
+{
+ u32 flow_id;
+ u16 next_index;
+ i16 buffer_advance;
+} avf_flow_lookup_entry_t;
+
+typedef struct
+{
CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
u32 flags;
u32 per_interface_next_index;
@@ -224,6 +242,10 @@ typedef struct
virtchnl_link_speed_t link_speed;
vlib_pci_addr_t pci_addr;
+ /* flow */
+ avf_flow_entry_t *flow_entries; /* pool */
+ avf_flow_lookup_entry_t *flow_lookup_entries; /* pool */
+
/* stats */
virtchnl_eth_stats_t eth_stats;
virtchnl_eth_stats_t last_cleared_eth_stats;
@@ -319,6 +341,7 @@ clib_error_t *avf_program_flow (u32 dev_instance, int is_add, u8 *rule,
format_function_t format_avf_device;
format_function_t format_avf_device_name;
format_function_t format_avf_input_trace;
+vnet_flow_dev_ops_function_t avf_flow_ops_fn;
static_always_inline avf_device_t *
avf_get_device (u32 dev_instance)
diff --git a/src/plugins/avf/avf_advanced_flow.h b/src/plugins/avf/avf_advanced_flow.h
new file mode 100644
index 00000000000..42288b7163b
--- /dev/null
+++ b/src/plugins/avf/avf_advanced_flow.h
@@ -0,0 +1,1068 @@
+/*
+ *------------------------------------------------------------------
+ * Copyright (c) 2020 Intel and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+
+#ifndef _AVF_ADVANCED_FLOW_H_
+#define _AVF_ADVANCED_FLOW_H_
+
+#define AVF_SUCCESS (0)
+#define AVF_FAILURE (-1)
+
+#define BIT(a) (1UL << (a))
+#define BIT_ULL(a) (1ULL << (a))
+
+/* These macros are used to generate compilation errors if a structure/union
+ * is not exactly the correct length. It gives a divide by zero error if the
+ * structure/union is not of the correct size, otherwise it creates an enum
+ * that is never used.
+ */
+#define VIRTCHNL_CHECK_STRUCT_LEN(n, X) \
+ enum virtchnl_static_assert_enum_##X \
+ { \
+ virtchnl_static_assert_##X = (n) / ((sizeof (struct X) == (n)) ? 1 : 0) \
+ }
+#define VIRTCHNL_CHECK_UNION_LEN(n, X) \
+ enum virtchnl_static_asset_enum_##X \
+ { \
+ virtchnl_static_assert_##X = (n) / ((sizeof (union X) == (n)) ? 1 : 0) \
+ }
+
+/* AVF ethernet frame types */
+#define AVF_ETHER_TYPE_IPV4 0x0800 /**< IPv4 Protocol. */
+#define AVF_ETHER_TYPE_IPV6 0x86DD /**< IPv6 Protocol. */
+
+#define VIRTCHNL_MAX_NUM_PROTO_HDRS 32
+#define PROTO_HDR_SHIFT 5
+#define PROTO_HDR_FIELD_START(proto_hdr_type) \
+ (proto_hdr_type << PROTO_HDR_SHIFT)
+#define PROTO_HDR_FIELD_MASK ((1UL << PROTO_HDR_SHIFT) - 1)
+
+/* VF use these macros to configure each protocol header.
+ * Specify which protocol headers and protocol header fields base on
+ * virtchnl_proto_hdr_type and virtchnl_proto_hdr_field.
+ * @param hdr: a struct of virtchnl_proto_hdr
+ * @param hdr_type: ETH/IPV4/TCP, etc
+ * @param field: SRC/DST/TEID/SPI, etc
+ */
+#define VIRTCHNL_ADD_PROTO_HDR_FIELD(hdr, field) \
+ ((hdr)->field_selector |= BIT ((field) &PROTO_HDR_FIELD_MASK))
+#define VIRTCHNL_DEL_PROTO_HDR_FIELD(hdr, field) \
+ ((hdr)->field_selector &= ~BIT ((field) &PROTO_HDR_FIELD_MASK))
+#define VIRTCHNL_TEST_PROTO_HDR_FIELD(hdr, val) \
+ ((hdr)->field_selector & BIT ((val) &PROTO_HDR_FIELD_MASK))
+#define VIRTCHNL_GET_PROTO_HDR_FIELD(hdr) ((hdr)->field_selector)
+
+#define VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, hdr_type, field) \
+ (VIRTCHNL_ADD_PROTO_HDR_FIELD (hdr, VIRTCHNL_PROTO_HDR_##hdr_type##_##field))
+#define VIRTCHNL_DEL_PROTO_HDR_FIELD_BIT(hdr, hdr_type, field) \
+ (VIRTCHNL_DEL_PROTO_HDR_FIELD (hdr, VIRTCHNL_PROTO_HDR_##hdr_type##_##field))
+
+#define VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, hdr_type) \
+ ((hdr)->type = VIRTCHNL_PROTO_HDR_##hdr_type)
+#define VIRTCHNL_GET_PROTO_HDR_TYPE(hdr) (((hdr)->type) >> PROTO_HDR_SHIFT)
+#define VIRTCHNL_TEST_PROTO_HDR_TYPE(hdr, val) \
+ ((hdr)->type == ((val) >> PROTO_HDR_SHIFT))
+#define VIRTCHNL_TEST_PROTO_HDR(hdr, val) \
+ (VIRTCHNL_TEST_PROTO_HDR_TYPE (hdr, val) && \
+ VIRTCHNL_TEST_PROTO_HDR_FIELD (hdr, val))
+
+/* protocol */
+
+#define AVF_PROT_MAC_INNER (1ULL << 1)
+#define AVF_PROT_MAC_OUTER (1ULL << 2)
+#define AVF_PROT_VLAN_INNER (1ULL << 3)
+#define AVF_PROT_VLAN_OUTER (1ULL << 4)
+#define AVF_PROT_IPV4_INNER (1ULL << 5)
+#define AVF_PROT_IPV4_OUTER (1ULL << 6)
+#define AVF_PROT_IPV6_INNER (1ULL << 7)
+#define AVF_PROT_IPV6_OUTER (1ULL << 8)
+#define AVF_PROT_TCP_INNER (1ULL << 9)
+#define AVF_PROT_TCP_OUTER (1ULL << 10)
+#define AVF_PROT_UDP_INNER (1ULL << 11)
+#define AVF_PROT_UDP_OUTER (1ULL << 12)
+#define AVF_PROT_SCTP_INNER (1ULL << 13)
+#define AVF_PROT_SCTP_OUTER (1ULL << 14)
+#define AVF_PROT_ICMP4_INNER (1ULL << 15)
+#define AVF_PROT_ICMP4_OUTER (1ULL << 16)
+#define AVF_PROT_ICMP6_INNER (1ULL << 17)
+#define AVF_PROT_ICMP6_OUTER (1ULL << 18)
+#define AVF_PROT_VXLAN (1ULL << 19)
+#define AVF_PROT_NVGRE (1ULL << 20)
+#define AVF_PROT_GTPU (1ULL << 21)
+#define AVF_PROT_ESP (1ULL << 22)
+#define AVF_PROT_AH (1ULL << 23)
+#define AVF_PROT_L2TPV3OIP (1ULL << 24)
+#define AVF_PROT_PFCP (1ULL << 25)
+
+/* field */
+
+#define AVF_SMAC (1ULL << 63)
+#define AVF_DMAC (1ULL << 62)
+#define AVF_ETHERTYPE (1ULL << 61)
+#define AVF_IP_SRC (1ULL << 60)
+#define AVF_IP_DST (1ULL << 59)
+#define AVF_IP_PROTO (1ULL << 58)
+#define AVF_IP_TTL (1ULL << 57)
+#define AVF_IP_TOS (1ULL << 56)
+#define AVF_SPORT (1ULL << 55)
+#define AVF_DPORT (1ULL << 54)
+#define AVF_ICMP_TYPE (1ULL << 53)
+#define AVF_ICMP_CODE (1ULL << 52)
+#define AVF_VXLAN_VNI (1ULL << 51)
+#define AVF_NVGRE_TNI (1ULL << 50)
+#define AVF_GTPU_TEID (1ULL << 49)
+#define AVF_GTPU_QFI (1ULL << 48)
+#define AVF_ESP_SPI (1ULL << 47)
+#define AVF_AH_SPI (1ULL << 46)
+#define AVF_L2TPV3OIP_SESSION_ID (1ULL << 45)
+#define AVF_PFCP_S_FIELD (1ULL << 44)
+#define AVF_PFCP_SEID (1ULL << 43)
+
+/* input set */
+
+#define AVF_INSET_NONE 0ULL
+
+/* non-tunnel */
+
+#define AVF_INSET_SMAC (AVF_PROT_MAC_OUTER | AVF_SMAC)
+#define AVF_INSET_DMAC (AVF_PROT_MAC_OUTER | AVF_DMAC)
+#define AVF_INSET_VLAN_INNER (AVF_PROT_VLAN_INNER)
+#define AVF_INSET_VLAN_OUTER (AVF_PROT_VLAN_OUTER)
+#define AVF_INSET_ETHERTYPE (AVF_ETHERTYPE)
+
+#define AVF_INSET_IPV4_SRC (AVF_PROT_IPV4_OUTER | AVF_IP_SRC)
+#define AVF_INSET_IPV4_DST (AVF_PROT_IPV4_OUTER | AVF_IP_DST)
+#define AVF_INSET_IPV4_TOS (AVF_PROT_IPV4_OUTER | AVF_IP_TOS)
+#define AVF_INSET_IPV4_PROTO (AVF_PROT_IPV4_OUTER | AVF_IP_PROTO)
+#define AVF_INSET_IPV4_TTL (AVF_PROT_IPV4_OUTER | AVF_IP_TTL)
+#define AVF_INSET_IPV6_SRC (AVF_PROT_IPV6_OUTER | AVF_IP_SRC)
+#define AVF_INSET_IPV6_DST (AVF_PROT_IPV6_OUTER | AVF_IP_DST)
+#define AVF_INSET_IPV6_NEXT_HDR (AVF_PROT_IPV6_OUTER | AVF_IP_PROTO)
+#define AVF_INSET_IPV6_HOP_LIMIT (AVF_PROT_IPV6_OUTER | AVF_IP_TTL)
+#define AVF_INSET_IPV6_TC (AVF_PROT_IPV6_OUTER | AVF_IP_TOS)
+
+#define AVF_INSET_TCP_SRC_PORT (AVF_PROT_TCP_OUTER | AVF_SPORT)
+#define AVF_INSET_TCP_DST_PORT (AVF_PROT_TCP_OUTER | AVF_DPORT)
+#define AVF_INSET_UDP_SRC_PORT (AVF_PROT_UDP_OUTER | AVF_SPORT)
+#define AVF_INSET_UDP_DST_PORT (AVF_PROT_UDP_OUTER | AVF_DPORT)
+#define AVF_INSET_SCTP_SRC_PORT (AVF_PROT_SCTP_OUTER | AVF_SPORT)
+#define AVF_INSET_SCTP_DST_PORT (AVF_PROT_SCTP_OUTER | AVF_DPORT)
+#define AVF_INSET_ICMP4_SRC_PORT (AVF_PROT_ICMP4_OUTER | AVF_SPORT)
+#define AVF_INSET_ICMP4_DST_PORT (AVF_PROT_ICMP4_OUTER | AVF_DPORT)
+#define AVF_INSET_ICMP6_SRC_PORT (AVF_PROT_ICMP6_OUTER | AVF_SPORT)
+#define AVF_INSET_ICMP6_DST_PORT (AVF_PROT_ICMP6_OUTER | AVF_DPORT)
+#define AVF_INSET_ICMP4_TYPE (AVF_PROT_ICMP4_OUTER | AVF_ICMP_TYPE)
+#define AVF_INSET_ICMP4_CODE (AVF_PROT_ICMP4_OUTER | AVF_ICMP_CODE)
+#define AVF_INSET_ICMP6_TYPE (AVF_PROT_ICMP6_OUTER | AVF_ICMP_TYPE)
+#define AVF_INSET_ICMP6_CODE (AVF_PROT_ICMP6_OUTER | AVF_ICMP_CODE)
+#define AVF_INSET_GTPU_TEID (AVF_PROT_GTPU | AVF_GTPU_TEID)
+#define AVF_INSET_GTPU_QFI (AVF_PROT_GTPU | AVF_GTPU_QFI)
+#define AVF_INSET_ESP_SPI (AVF_PROT_ESP | AVF_ESP_SPI)
+#define AVF_INSET_AH_SPI (AVF_PROT_AH | AVF_AH_SPI)
+#define AVF_INSET_L2TPV3OIP_SESSION_ID \
+ (AVF_PROT_L2TPV3OIP | AVF_L2TPV3OIP_SESSION_ID)
+#define AVF_INSET_PFCP_S_FIELD (AVF_PROT_PFCP | AVF_PFCP_S_FIELD)
+#define AVF_INSET_PFCP_SEID (AVF_PROT_PFCP | AVF_PFCP_S_FIELD | AVF_PFCP_SEID)
+
+/* Protocol header type within a packet segment. A segment consists of one or
+ * more protocol headers that make up a logical group of protocol headers. Each
+ * logical group of protocol headers encapsulates or is encapsulated using/by
+ * tunneling or encapsulation protocols for network virtualization.
+ */
+enum virtchnl_proto_hdr_type
+{
+ VIRTCHNL_PROTO_HDR_NONE,
+ VIRTCHNL_PROTO_HDR_ETH,
+ VIRTCHNL_PROTO_HDR_S_VLAN,
+ VIRTCHNL_PROTO_HDR_C_VLAN,
+ VIRTCHNL_PROTO_HDR_IPV4,
+ VIRTCHNL_PROTO_HDR_IPV6,
+ VIRTCHNL_PROTO_HDR_TCP,
+ VIRTCHNL_PROTO_HDR_UDP,
+ VIRTCHNL_PROTO_HDR_SCTP,
+ VIRTCHNL_PROTO_HDR_GTPU_IP,
+ VIRTCHNL_PROTO_HDR_GTPU_EH,
+ VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_DWN,
+ VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_UP,
+ VIRTCHNL_PROTO_HDR_PPPOE,
+ VIRTCHNL_PROTO_HDR_L2TPV3,
+ VIRTCHNL_PROTO_HDR_ESP,
+ VIRTCHNL_PROTO_HDR_AH,
+ VIRTCHNL_PROTO_HDR_PFCP,
+};
+
+/* Protocol header field within a protocol header. */
+enum virtchnl_proto_hdr_field
+{
+ /* ETHER */
+ VIRTCHNL_PROTO_HDR_ETH_SRC = PROTO_HDR_FIELD_START (VIRTCHNL_PROTO_HDR_ETH),
+ VIRTCHNL_PROTO_HDR_ETH_DST,
+ VIRTCHNL_PROTO_HDR_ETH_ETHERTYPE,
+ /* S-VLAN */
+ VIRTCHNL_PROTO_HDR_S_VLAN_ID =
+ PROTO_HDR_FIELD_START (VIRTCHNL_PROTO_HDR_S_VLAN),
+ /* C-VLAN */
+ VIRTCHNL_PROTO_HDR_C_VLAN_ID =
+ PROTO_HDR_FIELD_START (VIRTCHNL_PROTO_HDR_C_VLAN),
+ /* IPV4 */
+ VIRTCHNL_PROTO_HDR_IPV4_SRC =
+ PROTO_HDR_FIELD_START (VIRTCHNL_PROTO_HDR_IPV4),
+ VIRTCHNL_PROTO_HDR_IPV4_DST,
+ VIRTCHNL_PROTO_HDR_IPV4_DSCP,
+ VIRTCHNL_PROTO_HDR_IPV4_TTL,
+ VIRTCHNL_PROTO_HDR_IPV4_PROT,
+ /* IPV6 */
+ VIRTCHNL_PROTO_HDR_IPV6_SRC =
+ PROTO_HDR_FIELD_START (VIRTCHNL_PROTO_HDR_IPV6),
+ VIRTCHNL_PROTO_HDR_IPV6_DST,
+ VIRTCHNL_PROTO_HDR_IPV6_TC,
+ VIRTCHNL_PROTO_HDR_IPV6_HOP_LIMIT,
+ VIRTCHNL_PROTO_HDR_IPV6_PROT,
+ /* TCP */
+ VIRTCHNL_PROTO_HDR_TCP_SRC_PORT =
+ PROTO_HDR_FIELD_START (VIRTCHNL_PROTO_HDR_TCP),
+ VIRTCHNL_PROTO_HDR_TCP_DST_PORT,
+ /* UDP */
+ VIRTCHNL_PROTO_HDR_UDP_SRC_PORT =
+ PROTO_HDR_FIELD_START (VIRTCHNL_PROTO_HDR_UDP),
+ VIRTCHNL_PROTO_HDR_UDP_DST_PORT,
+ /* SCTP */
+ VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT =
+ PROTO_HDR_FIELD_START (VIRTCHNL_PROTO_HDR_SCTP),
+ VIRTCHNL_PROTO_HDR_SCTP_DST_PORT,
+ /* GTPU_IP */
+ VIRTCHNL_PROTO_HDR_GTPU_IP_TEID =
+ PROTO_HDR_FIELD_START (VIRTCHNL_PROTO_HDR_GTPU_IP),
+ /* GTPU_EH */
+ VIRTCHNL_PROTO_HDR_GTPU_EH_PDU =
+ PROTO_HDR_FIELD_START (VIRTCHNL_PROTO_HDR_GTPU_EH),
+ VIRTCHNL_PROTO_HDR_GTPU_EH_QFI,
+ /* PPPOE */
+ VIRTCHNL_PROTO_HDR_PPPOE_SESS_ID =
+ PROTO_HDR_FIELD_START (VIRTCHNL_PROTO_HDR_PPPOE),
+ /* L2TPV3 */
+ VIRTCHNL_PROTO_HDR_L2TPV3_SESS_ID =
+ PROTO_HDR_FIELD_START (VIRTCHNL_PROTO_HDR_L2TPV3),
+ /* ESP */
+ VIRTCHNL_PROTO_HDR_ESP_SPI = PROTO_HDR_FIELD_START (VIRTCHNL_PROTO_HDR_ESP),
+ /* AH */
+ VIRTCHNL_PROTO_HDR_AH_SPI = PROTO_HDR_FIELD_START (VIRTCHNL_PROTO_HDR_AH),
+ /* PFCP */
+ VIRTCHNL_PROTO_HDR_PFCP_S_FIELD =
+ PROTO_HDR_FIELD_START (VIRTCHNL_PROTO_HDR_PFCP),
+ VIRTCHNL_PROTO_HDR_PFCP_SEID,
+};
+
+struct virtchnl_proto_hdr
+{
+ enum virtchnl_proto_hdr_type type;
+ u32 field_selector; /* a bit mask to select field for header type */
+ u8 buffer[64];
+ /**
+ * binary buffer in network order for specific header type.
+ * For example, if type = VIRTCHNL_PROTO_HDR_IPV4, a IPv4
+ * header is expected to be copied into the buffer.
+ */
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN (72, virtchnl_proto_hdr);
+
+struct virtchnl_proto_hdrs
+{
+ u8 tunnel_level;
+ /**
+ * specify where protocol header start from.
+ * 0 - from the outer layer
+ * 1 - from the first inner layer
+ * 2 - from the second inner layer
+ * ....
+ **/
+ int count; /* the proto layers must < VIRTCHNL_MAX_NUM_PROTO_HDRS */
+ struct virtchnl_proto_hdr proto_hdr[VIRTCHNL_MAX_NUM_PROTO_HDRS];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN (2312, virtchnl_proto_hdrs);
+
+/* VIRTCHNL_OP_CONFIG_RSS_KEY
+ * VIRTCHNL_OP_CONFIG_RSS_LUT
+ * VF sends these messages to configure RSS. Only supported if both PF
+ * and VF drivers set the VIRTCHNL_VF_OFFLOAD_RSS_PF bit during
+ * configuration negotiation. If this is the case, then the RSS fields in
+ * the VF resource struct are valid.
+ * Both the key and LUT are initialized to 0 by the PF, meaning that
+ * RSS is effectively disabled until set up by the VF.
+ */
+struct virtchnl_rss_key
+{
+ u16 vsi_id;
+ u16 key_len;
+ u8 key[1]; /* RSS hash key, packed bytes */
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN (6, virtchnl_rss_key);
+
+struct virtchnl_rss_lut
+{
+ u16 vsi_id;
+ u16 lut_entries;
+ u8 lut[1]; /* RSS lookup table */
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN (6, virtchnl_rss_lut);
+
+/* VIRTCHNL_OP_GET_RSS_HENA_CAPS
+ * VIRTCHNL_OP_SET_RSS_HENA
+ * VF sends these messages to get and set the hash filter enable bits for RSS.
+ * By default, the PF sets these to all possible traffic types that the
+ * hardware supports. The VF can query this value if it wants to change the
+ * traffic types that are hashed by the hardware.
+ */
+struct virtchnl_rss_hena
+{
+ u64 hena;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN (8, virtchnl_rss_hena);
+
+/* Type of RSS algorithm */
+enum virtchnl_rss_algorithm
+{
+ VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC = 0,
+ VIRTCHNL_RSS_ALG_XOR_ASYMMETRIC = 1,
+ VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC = 2,
+ VIRTCHNL_RSS_ALG_XOR_SYMMETRIC = 3,
+};
+
+struct virtchnl_rss_cfg
+{
+ struct virtchnl_proto_hdrs proto_hdrs; /* protocol headers */
+ enum virtchnl_rss_algorithm rss_algorithm; /* rss algorithm type */
+ u8 reserved[128]; /* reserve for future */
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN (2444, virtchnl_rss_cfg);
+
+enum virtchnl_action
+{
+ /* action types */
+ VIRTCHNL_ACTION_DROP = 0,
+ VIRTCHNL_ACTION_TC_REDIRECT,
+ VIRTCHNL_ACTION_PASSTHRU,
+ VIRTCHNL_ACTION_QUEUE,
+ VIRTCHNL_ACTION_Q_REGION,
+ VIRTCHNL_ACTION_MARK,
+ VIRTCHNL_ACTION_COUNT,
+ VIRTCHNL_ACTION_NONE,
+};
+
+/* action configuration for FDIR */
+struct virtchnl_filter_action
+{
+ enum virtchnl_action type;
+ union
+ {
+ /* used for queue and qgroup action */
+ struct
+ {
+ u16 index;
+ u8 region;
+ } queue;
+ /* used for count action */
+ struct
+ {
+ /* share counter ID with other flow rules */
+ u8 shared;
+ u32 id; /* counter ID */
+ } count;
+ /* used for mark action */
+ u32 mark_id;
+ u8 reserve[32];
+ } act_conf;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN (36, virtchnl_filter_action);
+
+#define VIRTCHNL_MAX_NUM_ACTIONS 8
+
+struct virtchnl_filter_action_set
+{
+ /* action number must be less then VIRTCHNL_MAX_NUM_ACTIONS */
+ int count;
+ struct virtchnl_filter_action actions[VIRTCHNL_MAX_NUM_ACTIONS];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN (292, virtchnl_filter_action_set);
+
+/* pattern and action for FDIR rule */
+struct virtchnl_fdir_rule
+{
+ struct virtchnl_proto_hdrs proto_hdrs;
+ struct virtchnl_filter_action_set action_set;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN (2604, virtchnl_fdir_rule);
+
+/* query information to retrieve fdir rule counters.
+ * PF will fill out this structure to reset counter.
+ */
+struct virtchnl_fdir_query_info
+{
+ u32 match_packets_valid : 1;
+ u32 match_bytes_valid : 1;
+ u32 reserved : 30; /* Reserved, must be zero. */
+ u32 pad;
+ u64 matched_packets; /* Number of packets for this rule. */
+ u64 matched_bytes; /* Number of bytes through this rule. */
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN (24, virtchnl_fdir_query_info);
+
+/* Status returned to VF after VF requests FDIR commands
+ * VIRTCHNL_FDIR_SUCCESS
+ * VF FDIR related request is successfully done by PF
+ * The request can be OP_ADD/DEL/QUERY_FDIR_FILTER.
+ *
+ * VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE
+ * OP_ADD_FDIR_FILTER request is failed due to no Hardware resource.
+ *
+ * VIRTCHNL_FDIR_FAILURE_RULE_EXIST
+ * OP_ADD_FDIR_FILTER request is failed due to the rule is already existed.
+ *
+ * VIRTCHNL_FDIR_FAILURE_RULE_CONFLICT
+ * OP_ADD_FDIR_FILTER request is failed due to conflict with existing rule.
+ *
+ * VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST
+ * OP_DEL_FDIR_FILTER request is failed due to this rule doesn't exist.
+ *
+ * VIRTCHNL_FDIR_FAILURE_RULE_INVALID
+ * OP_ADD_FDIR_FILTER request is failed due to parameters validation
+ * or HW doesn't support.
+ *
+ * VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT
+ * OP_ADD/DEL_FDIR_FILTER request is failed due to timing out
+ * for programming.
+ *
+ * VIRTCHNL_FDIR_FAILURE_QUERY_INVALID
+ * OP_QUERY_FDIR_FILTER request is failed due to parameters validation,
+ * for example, VF query counter of a rule who has no counter action.
+ */
+enum virtchnl_fdir_prgm_status
+{
+ VIRTCHNL_FDIR_SUCCESS = 0,
+ VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE,
+ VIRTCHNL_FDIR_FAILURE_RULE_EXIST,
+ VIRTCHNL_FDIR_FAILURE_RULE_CONFLICT,
+ VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST,
+ VIRTCHNL_FDIR_FAILURE_RULE_INVALID,
+ VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT,
+ VIRTCHNL_FDIR_FAILURE_QUERY_INVALID,
+ VIRTCHNL_FDIR_FAILURE_MAX,
+};
+
+/* VIRTCHNL_OP_ADD_FDIR_FILTER
+ * VF sends this request to PF by filling out vsi_id,
+ * validate_only and rule_cfg. PF will return flow_id
+ * if the request is successfully done and return add_status to VF.
+ */
+struct virtchnl_fdir_add
+{
+ u16 vsi_id; /* INPUT */
+ /*
+ * 1 for validating a fdir rule, 0 for creating a fdir rule.
+ * Validate and create share one ops: VIRTCHNL_OP_ADD_FDIR_FILTER.
+ */
+ u16 validate_only; /* INPUT */
+ u32 flow_id; /* OUTPUT */
+ struct virtchnl_fdir_rule rule_cfg; /* INPUT */
+ enum virtchnl_fdir_prgm_status status; /* OUTPUT */
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN (2616, virtchnl_fdir_add);
+
+/* VIRTCHNL_OP_DEL_FDIR_FILTER
+ * VF sends this request to PF by filling out vsi_id
+ * and flow_id. PF will return del_status to VF.
+ */
+struct virtchnl_fdir_del
+{
+ u16 vsi_id; /* INPUT */
+ u16 pad;
+ u32 flow_id; /* INPUT */
+ enum virtchnl_fdir_prgm_status status; /* OUTPUT */
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN (12, virtchnl_fdir_del);
+
+/* VIRTCHNL_OP_QUERY_FDIR_FILTER
+ * VF sends this request to PF by filling out vsi_id,
+ * flow_id and reset_counter. PF will return query_info
+ * and query_status to VF.
+ */
+struct virtchnl_fdir_query
+{
+ u16 vsi_id; /* INPUT */
+ u16 pad1[3];
+ u32 flow_id; /* INPUT */
+ u32 reset_counter : 1; /* INPUT */
+ struct virtchnl_fdir_query_info query_info; /* OUTPUT */
+ enum virtchnl_fdir_prgm_status status; /* OUTPUT */
+ u32 pad2;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN (48, virtchnl_fdir_query);
+
+/**
+ * Those headers used temporary, maybe OS packet
+ * definition can replace. Add flow error, pattern
+ * and action definition.
+ */
+
+/**
+ * Verbose error types.
+ *
+ * Most of them provide the type of the object referenced by struct
+ * rte_flow_error.cause.
+ */
+enum avf_flow_error_type
+{
+ AVF_FLOW_ERROR_TYPE_NONE, /**< No error. */
+ AVF_FLOW_ERROR_TYPE_UNSPECIFIED, /**< Cause unspecified. */
+ AVF_FLOW_ERROR_TYPE_HANDLE, /**< Flow rule (handle). */
+ AVF_FLOW_ERROR_TYPE_ATTR_GROUP, /**< Group field. */
+ AVF_FLOW_ERROR_TYPE_ATTR_PRIORITY, /**< Priority field. */
+ AVF_FLOW_ERROR_TYPE_ATTR_INGRESS, /**< Ingress field. */
+ AVF_FLOW_ERROR_TYPE_ATTR_EGRESS, /**< Egress field. */
+ AVF_FLOW_ERROR_TYPE_ATTR_TRANSFER, /**< Transfer field. */
+ AVF_FLOW_ERROR_TYPE_ATTR, /**< Attributes structure. */
+ AVF_FLOW_ERROR_TYPE_ITEM_NUM, /**< Pattern length. */
+ AVF_FLOW_ERROR_TYPE_ITEM_SPEC, /**< Item specification. */
+ AVF_FLOW_ERROR_TYPE_ITEM_LAST, /**< Item specification range. */
+ AVF_FLOW_ERROR_TYPE_ITEM_MASK, /**< Item specification mask. */
+ AVF_FLOW_ERROR_TYPE_ITEM, /**< Specific pattern item. */
+ AVF_FLOW_ERROR_TYPE_ACTION_NUM, /**< Number of actions. */
+ AVF_FLOW_ERROR_TYPE_ACTION_CONF, /**< Action configuration. */
+ AVF_FLOW_ERROR_TYPE_ACTION, /**< Specific action. */
+};
+
+/**
+ * Verbose error structure definition.
+ * Both cause and message may be NULL regardless of the error type.
+ */
+struct avf_flow_error
+{
+ enum avf_flow_error_type type; /**< Cause field and error types. */
+ const void *cause; /**< Object responsible for the error. */
+ const char *message; /**< Human-readable error message. */
+};
+
+#define AVF_ETHER_ADDR_LEN 6
+struct avf_ether_addr
+{
+ u8 addr_bytes[AVF_ETHER_ADDR_LEN]; /**< Addr bytes in tx order */
+} __attribute__ ((__aligned__ (2)));
+
+struct avf_flow_eth_hdr
+{
+ struct avf_ether_addr dst; /**< Destination MAC. */
+ struct avf_ether_addr src; /**< Source MAC. */
+ u16 type; /**< EtherType or TPID. */
+};
+
+/**
+ * IPv4 Header
+ */
+struct avf_ipv4_hdr
+{
+ u8 version_ihl; /**< version and header length */
+ u8 type_of_service; /**< type of service */
+ u16 total_length; /**< length of packet */
+ u16 packet_id; /**< packet ID */
+ u16 fragment_offset; /**< fragmentation offset */
+ u8 time_to_live; /**< time to live */
+ u8 next_proto_id; /**< protocol ID */
+ u16 hdr_checksum; /**< header checksum */
+ u32 src_addr; /**< source address */
+ u32 dst_addr; /**< destination address */
+} __attribute__ ((__packed__));
+
+/**
+ * IPv6 Header
+ */
+struct avf_ipv6_hdr
+{
+ u32 vtc_flow; /**< IP version, traffic class & flow label. */
+ u16 payload_len; /**< IP packet length - includes header size */
+ u8 proto; /**< Protocol, next header. */
+ u8 hop_limits; /**< Hop limits. */
+ u8 src_addr[16]; /**< IP address of source host. */
+ u8 dst_addr[16]; /**< IP address of destination host(s). */
+} __attribute__ ((__packed__));
+
+/**
+ * TCP Header
+ */
+struct avf_tcp_hdr
+{
+ u16 src_port; /**< TCP source port. */
+ u16 dst_port; /**< TCP destination port. */
+ u32 sent_seq; /**< TX data sequence number. */
+ u32 recv_ack; /**< RX data acknowledgment sequence number. */
+ u8 data_off; /**< Data offset. */
+ u8 tcp_flags; /**< TCP flags */
+ u16 rx_win; /**< RX flow control window. */
+ u16 cksum; /**< TCP checksum. */
+ u16 tcp_urp; /**< TCP urgent pointer, if any. */
+} __attribute__ ((__packed__));
+
+/**
+ * UDP Header
+ */
+struct avf_udp_hdr
+{
+ u16 src_port; /**< UDP source port. */
+ u16 dst_port; /**< UDP destination port. */
+ u16 dgram_len; /**< UDP datagram length */
+ u16 dgram_cksum; /**< UDP datagram checksum */
+} __attribute__ ((__packed__));
+
+/**
+ * Match IP Authentication Header (AH), RFC 4302
+ */
+struct avf_ah_hdr
+{
+ u32 next_hdr : 8;
+ u32 payload_len : 8;
+ u32 reserved : 16;
+ u32 spi;
+ u32 seq_num;
+};
+
+/**
+ * ESP Header
+ */
+struct avf_esp_hdr
+{
+ u32 spi; /**< Security Parameters Index */
+ u32 seq; /**< packet sequence number */
+} __attribute__ ((__packed__));
+
+/**
+ * Match PFCP Header
+ */
+struct avf_pfcp_hdr
+{
+ u8 s_field;
+ u8 msg_type;
+ u16 msg_len;
+ u64 seid;
+};
+
+/**
+ * Matches a L2TPv3 over IP header.
+ */
+struct avf_l2tpv3oip_hdr
+{
+ u32 session_id; /**< Session ID. */
+};
+
+/**
+ * Matches a GTP PDU extension header with type 0x85.
+ */
+struct avf_gtp_psc_hdr
+{
+ u8 pdu_type; /**< PDU type. */
+ u8 qfi; /**< QoS flow identifier. */
+};
+
+/**
+ * Matches a GTPv1 header.
+ */
+struct avf_gtp_hdr
+{
+ /**
+ * Version (3b), protocol type (1b), reserved (1b),
+ * Extension header flag (1b),
+ * Sequence number flag (1b),
+ * N-PDU number flag (1b).
+ */
+ u8 v_pt_rsv_flags;
+ u8 msg_type; /**< Message type. */
+ u16 msg_len; /**< Message length. */
+ u32 teid; /**< Tunnel endpoint identifier. */
+};
+
+/**
+ * SCTP Header
+ */
+struct avf_sctp_hdr
+{
+ u16 src_port; /**< Source port. */
+ u16 dst_port; /**< Destin port. */
+ u32 tag; /**< Validation tag. */
+ u32 cksum; /**< Checksum. */
+} __attribute__ ((__packed__));
+
+/**
+ * Hash function types.
+ */
+enum avf_eth_hash_function
+{
+ AVF_ETH_HASH_FUNCTION_DEFAULT = 0,
+ AVF_ETH_HASH_FUNCTION_TOEPLITZ, /**< Toeplitz */
+ AVF_ETH_HASH_FUNCTION_SIMPLE_XOR, /**< Simple XOR */
+ /**
+ * Symmetric Toeplitz: src, dst will be replaced by
+ * xor(src, dst). For the case with src/dst only,
+ * src or dst address will xor with zero pair.
+ */
+ AVF_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ,
+ AVF_ETH_HASH_FUNCTION_MAX,
+};
+
+struct avf_flow_action_rss
+{
+ enum avf_eth_hash_function func; /**< RSS hash function to apply. */
+
+ u32 level;
+ u64 types; /**< Specific RSS hash types (see ETH_RSS_*). */
+ u32 key_len; /**< Hash key length in bytes. */
+ u32 queue_num; /**< Number of entries in @p queue. */
+ const u8 *key; /**< Hash key. */
+ const u16 *queue; /**< Queue indices to use. */
+};
+
+struct avf_flow_action_queue
+{
+ u16 index; /**< Queue index to use. */
+};
+
+struct avf_flow_action_mark
+{
+ u32 id; /**< Integer value to return with packets. */
+};
+
+struct avf_flow_action
+{
+ enum virtchnl_action type; /**< Action type. */
+ const void *conf; /**< Pointer to action configuration object. */
+};
+
+struct avf_flow_item
+{
+ enum virtchnl_proto_hdr_type type; /**< Item type. */
+ const void *spec; /**< Pointer to item specification structure. */
+ const void *mask; /**< Bit-mask applied to spec and last. */
+};
+
+struct avf_fdir_conf
+{
+ struct virtchnl_fdir_add add_fltr;
+ struct virtchnl_fdir_del del_fltr;
+ u64 input_set;
+ u32 flow_id;
+ u32 mark_flag;
+ u32 vsi;
+ u32 nb_rx_queues;
+};
+
+enum virthnl_adv_ops
+{
+ VIRTCHNL_ADV_OP_ADD_FDIR_FILTER = 0,
+ VIRTCHNL_ADV_OP_DEL_FDIR_FILTER,
+ VIRTCHNL_ADV_OP_QUERY_FDIR_FILTER,
+ VIRTCHNL_ADV_OP_MAX
+};
+
+/* virtual channel op handler */
+typedef int (*avf_fdir_vc_op_t) (void *vc_hdl, enum virthnl_adv_ops vc_op,
+ void *in, u32 in_len, void *out, u32 out_len);
+
+/* virtual channel context object */
+struct avf_fdir_vc_ctx
+{
+ void *vc_hdl; /* virtual channel handler */
+ avf_fdir_vc_op_t vc_op;
+};
+
+/**
+ * Create a rule cfg object.
+ *
+ * @param rcfg
+ * created rule cfg object.
+ * @param tunnel
+ * tunnel level where protocol header start from
+ * 0 from moster outer layer.
+ * 1 from first inner layer.
+ * 2 form second inner layer.
+ * ...
+ * @param vsi
+ * avf vsi id
+ *
+ * @param nrxq
+ * the rx queue number of the avf
+ *
+ * @return
+ * 0 = successful.
+ * < 0 = failure.
+ */
+int avf_fdir_rcfg_create (struct avf_fdir_conf **rcfg, int tunnel_level,
+ u16 vsi, u16 nrxq);
+
+/**
+ * Destroy a rule cfg object.
+ *
+ * @param rcfg
+ * the cfg object to destroy.
+ *
+ * @return
+ * 0 = successful.
+ * < 0 = failure.
+ */
+int avf_fdir_rcfg_destroy (struct avf_fdir_conf *rcfg);
+
+/**
+ * Set match potocol header on specific layer, it will overwrite is already be
+ * set.
+ *
+ * @param rcfg
+ * the rule cfg object
+ * @param layer
+ * layer of the protocol header.
+ * @param hdr
+ * protocol header type.
+ *
+ * @return
+ * 0 = successful.
+ * < 0 = failure.
+ */
+int avf_fdir_rcfg_set_hdr (struct avf_fdir_conf *rcfg, int layer,
+ enum virtchnl_proto_hdr_type hdr);
+
+/**
+ * Set a match field on specific protocol layer, if any match field already be
+ * set on this layer, it will be overwritten.
+ *
+ * @param rcfg
+ * the rule cfg object
+ * @param layer
+ * layer of the protocol header.
+ * @param item
+ * flow item
+ * @param error
+ * save error cause
+ *
+ * @return
+ * 0 = successful.
+ * < 0 = failure.
+ */
+int avf_fdir_rcfg_set_field (struct avf_fdir_conf *rcfg, int layer,
+ struct avf_flow_item *item,
+ struct avf_flow_error *error);
+
+/**
+ * Set action as to queue(group), conflict with drop action.
+ *
+ * @param rcfg
+ * rule cfg object
+ * @param queue
+ * queue id.
+ * @param size
+ * queue group size, must be 2^n. 1 means only to single queue.
+ * @param act_idx
+ * action index
+ *
+ * @return
+ * 0 = successful.
+ * < 0 = failure.
+ */
+int avf_fdir_rcfg_act_queue (struct avf_fdir_conf *rcfg, int queue, int size,
+ int act_idx);
+
+/**
+ * Set action as to queue group, conflict with drop action.
+ *
+ * @param rcfg
+ * the rule cfg object
+ * @param act
+ * flow actions
+ * @param act_idx
+ * action index
+ * @error
+ * save error cause
+ *
+ * @return
+ * 0 = successful.
+ * < 0 = failure.
+ */
+int avf_fdir_parse_action_qregion (struct avf_fdir_conf *rcfg,
+ const struct avf_flow_action *act,
+ int act_idx, struct avf_flow_error *error);
+
+/**
+ * Set action as as drop, conflict with to queue(gropu) action.
+ *
+ * @param rcfg
+ * the rule cfg object
+ * @param act_idx
+ * action index
+ *
+ * @return
+ * 0 = successful.
+ * < 0 = failure.
+ */
+int avf_fdir_rcfg_act_drop (struct avf_fdir_conf *rcfg, int act_idx);
+
+/**
+ * Set action as mark, it can co-exist with to queue(group) or drop action.
+ *
+ * @param rcfg
+ * the rule cfg object
+ * @param mark
+ * a 32 bit flow mark
+ * @param act_idx
+ * action index
+ *
+ * @return
+ * 0 = successful.
+ * < 0 = failure.
+ */
+int avf_fdir_rcfg_act_mark (struct avf_fdir_conf *rcfg, const u32 mark,
+ int act_idx);
+
+/**
+ * Validate a flow rule cfg, check with PF driver if the rule cfg is supportted
+ *or not.
+ *
+ * @param ctx
+ * virtual channel context
+ * @param rcfg
+ * the rule cfg object.
+ *
+ * @return
+ * 0 = successful.
+ * < 0 = failure.
+ */
+int avf_fdir_rcfg_validate (struct avf_fdir_vc_ctx *ctx,
+ struct avf_fdir_conf *rcfg);
+
+/**
+ * Create a flow rule, a FDIR rule is expected to be programmed into hardware
+ *if return success.
+ *
+ * @param ctx
+ * virtual channel context
+ * @param rcfg
+ * rule cfg object.
+ *
+ * @return
+ * 0 = successfule.
+ * < 0 = failure.
+ */
+int avf_fdir_rule_create (struct avf_fdir_vc_ctx *ctx,
+ struct avf_fdir_conf *rcfg);
+
+/**
+ * Destroy a flow rule.
+ *
+ * @param ctx
+ * virtual channel context
+ * @param rcfg
+ * the rule cfg object.
+ *
+ * @return
+ * 0 = successfule.
+ * < 0 = failure.
+ */
+int avf_fdir_rule_destroy (struct avf_fdir_vc_ctx *ctx,
+ struct avf_fdir_conf *rcfg);
+
+/*
+ * Parse avf patterns and set pattern fields.
+ *
+ * @param rcfg
+ * flow config
+ * @param avf_items
+ * pattern items
+ * @param error
+ * save error cause
+ *
+ * @return
+ * 0 = successful.
+ * < 0 = failure
+ */
+int avf_fdir_parse_pattern (struct avf_fdir_conf *rcfg,
+ struct avf_flow_item avf_items[],
+ struct avf_flow_error *error);
+
+/*
+ * Parse flow actions, set actions.
+ *
+ * @param actions
+ * flow actions
+ * @param rcfg
+ * flow config
+ * @param error
+ * save error cause
+ *
+ * @return
+ * 0 = successful.
+ * < 0 = failure
+ */
+int avf_fdir_parse_action (const struct avf_flow_action actions[],
+ struct avf_fdir_conf *rcfg,
+ struct avf_flow_error *error);
+
+/**
+ * Initialize flow error structure.
+ *
+ * @param[out] error
+ * Pointer to flow error structure (may be NULL).
+ * @param code
+ * Related error code
+ * @param type
+ * Cause field and error types.
+ * @param cause
+ * Object responsible for the error.
+ * @param message
+ * Human-readable error message.
+ *
+ * @return
+ * Negative error code (errno value)
+ */
+int avf_flow_error_set (struct avf_flow_error *error, int code,
+ enum avf_flow_error_type type, const void *cause,
+ const char *message);
+
+/*
+ * decode the error number to Verbose error string
+ *
+ * @param err_no
+ * error number
+ *
+ * @return
+ * Verbose error string
+ */
+char *avf_fdir_prgm_error_decode (int err_no);
+
+#endif /* _AVF_ADVANCED_FLOW_H_ */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/plugins/avf/avf_fdir_lib.c b/src/plugins/avf/avf_fdir_lib.c
new file mode 100644
index 00000000000..44c10dc7388
--- /dev/null
+++ b/src/plugins/avf/avf_fdir_lib.c
@@ -0,0 +1,879 @@
+/*
+ *------------------------------------------------------------------
+ * Copyright (c) 2020 Intel and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+
+#include <vppinfra/mem.h>
+#include "avf_advanced_flow.h"
+
+#define AVF_FDIR_IPV6_TC_OFFSET 20
+#define AVF_IPV6_TC_MASK (0xFF << AVF_FDIR_IPV6_TC_OFFSET)
+#define AVF_FDIR_MAX_QREGION_SIZE 128
+
+/*
+ * Return the last (most-significant) bit set.
+ */
+static inline int
+fls_u32 (u32 x)
+{
+ return (x == 0) ? 0 : 32 - count_leading_zeros (x);
+}
+
+static inline int
+ether_addr_is_zero (const struct avf_ether_addr *ea)
+{
+ const u16 *w = (const u16 *) ea;
+
+ return (w[0] | w[1] | w[2]) == 0;
+}
+
+int
+avf_fdir_rcfg_create (struct avf_fdir_conf **rcfg, int tunnel_level, u16 vsi,
+ u16 nrxq)
+{
+ (*rcfg) = clib_mem_alloc (sizeof (**rcfg));
+ if ((*rcfg) == NULL)
+ {
+ return -1;
+ }
+
+ clib_memset (*rcfg, 0, sizeof (**rcfg));
+
+ (*rcfg)->add_fltr.rule_cfg.proto_hdrs.tunnel_level = tunnel_level;
+ (*rcfg)->vsi = vsi;
+ (*rcfg)->nb_rx_queues = nrxq;
+
+ return 0;
+}
+
+int
+avf_fdir_rcfg_destroy (struct avf_fdir_conf *rcfg)
+{
+ clib_mem_free (rcfg);
+
+ return 0;
+}
+
+int
+avf_fdir_rcfg_set_hdr (struct avf_fdir_conf *rcfg, int layer,
+ enum virtchnl_proto_hdr_type hdr)
+{
+ struct virtchnl_proto_hdrs *hdrs;
+
+ hdrs = &rcfg->add_fltr.rule_cfg.proto_hdrs;
+ if (layer > VIRTCHNL_MAX_NUM_PROTO_HDRS)
+ return -1;
+
+ hdrs->proto_hdr[layer].type = hdr;
+
+ return 0;
+}
+
+int
+avf_fdir_rcfg_set_field (struct avf_fdir_conf *rcfg, int layer,
+ struct avf_flow_item *item,
+ struct avf_flow_error *error)
+{
+ const struct avf_ipv4_hdr *ipv4_spec, *ipv4_mask;
+ const struct avf_ipv6_hdr *ipv6_spec, *ipv6_mask;
+ const struct avf_udp_hdr *udp_spec, *udp_mask;
+ const struct avf_tcp_hdr *tcp_spec, *tcp_mask;
+ const struct avf_sctp_hdr *sctp_spec, *sctp_mask;
+ const struct avf_gtp_hdr *gtp_spec, *gtp_mask;
+ const struct avf_gtp_psc_hdr *gtp_psc_spec, *gtp_psc_mask;
+ const struct avf_l2tpv3oip_hdr *l2tpv3oip_spec, *l2tpv3oip_mask;
+ const struct avf_esp_hdr *esp_spec, *esp_mask;
+ const struct avf_ah_hdr *ah_spec, *ah_mask;
+ const struct avf_pfcp_hdr *pfcp_spec, *pfcp_mask;
+ const struct avf_flow_eth_hdr *eth_spec, *eth_mask;
+
+ struct virtchnl_proto_hdr *hdr;
+ enum virtchnl_proto_hdr_type type;
+ u16 ether_type;
+ int ret = 0;
+
+ u8 ipv6_addr_mask[16] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
+
+ hdr = &rcfg->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
+ type = item->type;
+
+ switch (type)
+ {
+ case VIRTCHNL_PROTO_HDR_ETH:
+ eth_spec = item->spec;
+ eth_mask = item->mask;
+
+ hdr->type = VIRTCHNL_PROTO_HDR_ETH;
+
+ if (eth_spec && eth_mask)
+ {
+ if (!ether_addr_is_zero (&eth_mask->src) ||
+ !ether_addr_is_zero (&eth_mask->dst))
+ {
+ ret = avf_flow_error_set (error, AVF_FAILURE,
+ AVF_FLOW_ERROR_TYPE_ITEM, item,
+ "Invalid MAC_addr mask.");
+ return ret;
+ }
+
+ if (eth_mask->type)
+ {
+ if (eth_mask->type != 0xffff)
+ {
+ ret = avf_flow_error_set (error, AVF_FAILURE,
+ AVF_FLOW_ERROR_TYPE_ITEM, item,
+ "Invalid type mask.");
+ return ret;
+ }
+ }
+ }
+
+ if (eth_spec && eth_mask && eth_mask->type)
+ {
+ ether_type = clib_net_to_host_u16 (eth_spec->type);
+ if (ether_type == AVF_ETHER_TYPE_IPV4 ||
+ ether_type == AVF_ETHER_TYPE_IPV6)
+ {
+ ret = avf_flow_error_set (error, AVF_FAILURE,
+ AVF_FLOW_ERROR_TYPE_ITEM, item,
+ "Unsupported ether_type.");
+ return ret;
+ }
+
+ rcfg->input_set |= AVF_INSET_ETHERTYPE;
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT (hdr, ETH, ETHERTYPE);
+
+ clib_memcpy (hdr->buffer, eth_spec, sizeof (*eth_spec));
+ }
+ break;
+
+ case VIRTCHNL_PROTO_HDR_IPV4:
+ ipv4_spec = item->spec;
+ ipv4_mask = item->mask;
+ hdr->type = VIRTCHNL_PROTO_HDR_IPV4;
+
+ if (ipv4_spec && ipv4_mask)
+ {
+ if (ipv4_mask->version_ihl || ipv4_mask->total_length ||
+ ipv4_mask->packet_id || ipv4_mask->fragment_offset ||
+ ipv4_mask->hdr_checksum)
+ {
+ ret = avf_flow_error_set (error, AVF_FAILURE,
+ AVF_FLOW_ERROR_TYPE_ITEM, item,
+ "Invalid IPv4 mask.");
+ return ret;
+ }
+
+ if (ipv4_mask->type_of_service == 0xff)
+ {
+ rcfg->input_set |= AVF_INSET_IPV4_TOS;
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT (hdr, IPV4, DSCP);
+ }
+
+ if (ipv4_mask->next_proto_id == 0xff)
+ {
+ rcfg->input_set |= AVF_INSET_IPV4_PROTO;
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT (hdr, IPV4, PROT);
+ }
+
+ if (ipv4_mask->time_to_live == 0xff)
+ {
+ rcfg->input_set |= AVF_INSET_IPV4_TTL;
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT (hdr, IPV4, TTL);
+ }
+
+ if (ipv4_mask->src_addr == 0xffffffff)
+ {
+ rcfg->input_set |= AVF_INSET_IPV4_SRC;
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT (hdr, IPV4, SRC);
+ }
+
+ if (ipv4_mask->dst_addr == 0xffffffff)
+ {
+ rcfg->input_set |= AVF_INSET_IPV4_DST;
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT (hdr, IPV4, DST);
+ }
+
+ clib_memcpy (hdr->buffer, ipv4_spec, sizeof (*ipv4_spec));
+ }
+ break;
+
+ case VIRTCHNL_PROTO_HDR_IPV6:
+ ipv6_spec = item->spec;
+ ipv6_mask = item->mask;
+ hdr->type = VIRTCHNL_PROTO_HDR_IPV6;
+
+ if (ipv6_spec && ipv6_mask)
+ {
+ if (ipv6_mask->payload_len)
+ {
+ ret = avf_flow_error_set (error, AVF_FAILURE,
+ AVF_FLOW_ERROR_TYPE_ITEM, item,
+ "Invalid IPv6 mask");
+ return ret;
+ }
+
+ if ((ipv6_mask->vtc_flow &
+ clib_host_to_net_u32 (AVF_IPV6_TC_MASK)) ==
+ (clib_host_to_net_u32 (AVF_IPV6_TC_MASK)))
+ {
+ rcfg->input_set |= AVF_INSET_IPV6_TC;
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT (hdr, IPV6, TC);
+ }
+
+ if (ipv6_mask->proto == 0xff)
+ {
+ rcfg->input_set |= AVF_INSET_IPV6_NEXT_HDR;
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT (hdr, IPV6, PROT);
+ }
+
+ if (ipv6_mask->hop_limits == 0xff)
+ {
+ rcfg->input_set |= AVF_INSET_IPV6_HOP_LIMIT;
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT (hdr, IPV6, HOP_LIMIT);
+ }
+
+ if (!clib_memcmp (ipv6_mask->src_addr, ipv6_addr_mask,
+ sizeof (ipv6_mask->src_addr)))
+ {
+ rcfg->input_set |= AVF_INSET_IPV6_SRC;
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT (hdr, IPV6, SRC);
+ }
+ if (!clib_memcmp (ipv6_mask->dst_addr, ipv6_addr_mask,
+ sizeof (ipv6_mask->dst_addr)))
+ {
+ rcfg->input_set |= AVF_INSET_IPV6_DST;
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT (hdr, IPV6, DST);
+
+ clib_memcpy (hdr->buffer, ipv6_spec, sizeof (*ipv6_spec));
+ }
+ }
+
+ break;
+
+ case VIRTCHNL_PROTO_HDR_UDP:
+ udp_spec = item->spec;
+ udp_mask = item->mask;
+ hdr->type = VIRTCHNL_PROTO_HDR_UDP;
+
+ if (udp_spec && udp_mask)
+ {
+ if (udp_mask->dgram_len || udp_mask->dgram_cksum)
+ {
+ ret = avf_flow_error_set (error, AVF_FAILURE,
+ AVF_FLOW_ERROR_TYPE_ITEM, item,
+ "Invalid UDP mask");
+ return ret;
+ };
+
+ if (udp_mask->src_port == 0xffff)
+ {
+ rcfg->input_set |= AVF_INSET_UDP_SRC_PORT;
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT (hdr, UDP, SRC_PORT);
+ }
+
+ if (udp_mask->dst_port == 0xffff)
+ {
+ rcfg->input_set |= AVF_INSET_UDP_DST_PORT;
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT (hdr, UDP, DST_PORT);
+ }
+
+ clib_memcpy (hdr->buffer, udp_spec, sizeof (*udp_spec));
+ }
+ break;
+
+ case VIRTCHNL_PROTO_HDR_TCP:
+ tcp_spec = item->spec;
+ tcp_mask = item->mask;
+ hdr->type = VIRTCHNL_PROTO_HDR_TCP;
+
+ if (tcp_spec && tcp_mask)
+ {
+ if (tcp_mask->sent_seq || tcp_mask->recv_ack || tcp_mask->data_off ||
+ tcp_mask->tcp_flags || tcp_mask->rx_win || tcp_mask->cksum ||
+ tcp_mask->tcp_urp)
+ {
+ ret = avf_flow_error_set (error, AVF_FAILURE,
+ AVF_FLOW_ERROR_TYPE_ITEM, item,
+ "Invalid TCP mask");
+ return ret;
+ }
+
+ if (tcp_mask->src_port == 0xffff)
+ {
+ rcfg->input_set |= AVF_INSET_TCP_SRC_PORT;
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT (hdr, TCP, SRC_PORT);
+ }
+
+ if (tcp_mask->dst_port == 0xffff)
+ {
+ rcfg->input_set |= AVF_INSET_TCP_DST_PORT;
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT (hdr, TCP, DST_PORT);
+ }
+
+ clib_memcpy (hdr->buffer, tcp_spec, sizeof (*tcp_spec));
+ }
+
+ break;
+
+ case VIRTCHNL_PROTO_HDR_SCTP:
+ sctp_spec = item->spec;
+ sctp_mask = item->mask;
+ hdr->type = VIRTCHNL_PROTO_HDR_SCTP;
+
+ if (sctp_spec && sctp_mask)
+ {
+ if (sctp_mask->cksum)
+ {
+ ret = avf_flow_error_set (error, AVF_FAILURE,
+ AVF_FLOW_ERROR_TYPE_ITEM, item,
+ "Invalid UDP mask");
+ return ret;
+ }
+
+ if (sctp_mask->src_port == 0xffff)
+ {
+ rcfg->input_set |= AVF_INSET_SCTP_SRC_PORT;
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT (hdr, SCTP, SRC_PORT);
+ }
+
+ if (sctp_mask->dst_port == 0xffff)
+ {
+ rcfg->input_set |= AVF_INSET_SCTP_DST_PORT;
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT (hdr, SCTP, DST_PORT);
+ }
+
+ clib_memcpy (hdr->buffer, sctp_spec, sizeof (*sctp_spec));
+ }
+ break;
+
+ case VIRTCHNL_PROTO_HDR_GTPU_IP:
+ gtp_spec = item->spec;
+ gtp_mask = item->mask;
+ hdr->type = VIRTCHNL_PROTO_HDR_GTPU_IP;
+
+ if (gtp_spec && gtp_mask)
+ {
+ if (gtp_mask->v_pt_rsv_flags || gtp_mask->msg_type ||
+ gtp_mask->msg_len)
+ {
+ ret = avf_flow_error_set (error, AVF_FAILURE,
+ AVF_FLOW_ERROR_TYPE_ITEM, item,
+ "Invalid GTP mask");
+ return ret;
+ }
+
+ if (gtp_mask->teid == 0xffffffff)
+ {
+ rcfg->input_set |= AVF_INSET_GTPU_TEID;
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT (hdr, GTPU_IP, TEID);
+ }
+
+ clib_memcpy (hdr->buffer, gtp_spec, sizeof (*gtp_spec));
+ }
+
+ break;
+
+ case VIRTCHNL_PROTO_HDR_GTPU_EH:
+ gtp_psc_spec = item->spec;
+ gtp_psc_mask = item->mask;
+ hdr->type = VIRTCHNL_PROTO_HDR_GTPU_EH;
+
+ if (gtp_psc_spec && gtp_psc_mask)
+ {
+ if (gtp_psc_mask->qfi == 0xff)
+ {
+ rcfg->input_set |= AVF_INSET_GTPU_QFI;
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT (hdr, GTPU_EH, QFI);
+ }
+
+ clib_memcpy (hdr->buffer, gtp_psc_spec, sizeof (*gtp_psc_spec));
+ }
+
+ break;
+
+ case VIRTCHNL_PROTO_HDR_L2TPV3:
+ l2tpv3oip_spec = item->spec;
+ l2tpv3oip_mask = item->mask;
+ hdr->type = VIRTCHNL_PROTO_HDR_L2TPV3;
+
+ if (l2tpv3oip_spec && l2tpv3oip_mask)
+ {
+ if (l2tpv3oip_mask->session_id == 0xffffffff)
+ {
+ rcfg->input_set |= AVF_L2TPV3OIP_SESSION_ID;
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT (hdr, L2TPV3, SESS_ID);
+ }
+
+ clib_memcpy (hdr->buffer, l2tpv3oip_spec, sizeof (*l2tpv3oip_spec));
+ }
+ break;
+
+ case VIRTCHNL_PROTO_HDR_ESP:
+ esp_spec = item->spec;
+ esp_mask = item->mask;
+ hdr->type = VIRTCHNL_PROTO_HDR_ESP;
+
+ if (esp_spec && esp_mask)
+ {
+ if (esp_mask->spi == 0xffffffff)
+ {
+ rcfg->input_set |= AVF_INSET_ESP_SPI;
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT (hdr, ESP, SPI);
+ }
+
+ clib_memcpy (hdr->buffer, esp_spec, sizeof (*esp_spec));
+ }
+ break;
+
+ case VIRTCHNL_PROTO_HDR_AH:
+ ah_spec = item->spec;
+ ah_mask = item->mask;
+ hdr->type = VIRTCHNL_PROTO_HDR_AH;
+
+ if (ah_spec && ah_mask)
+ {
+ if (ah_mask->spi == 0xffffffff)
+ {
+ rcfg->input_set |= AVF_INSET_AH_SPI;
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT (hdr, AH, SPI);
+ }
+
+ clib_memcpy (hdr->buffer, ah_spec, sizeof (*ah_spec));
+ }
+ break;
+
+ case VIRTCHNL_PROTO_HDR_PFCP:
+ pfcp_spec = item->spec;
+ pfcp_mask = item->mask;
+ hdr->type = VIRTCHNL_PROTO_HDR_PFCP;
+
+ if (pfcp_spec && pfcp_mask)
+ {
+ if (pfcp_mask->s_field == 0xff)
+ {
+ rcfg->input_set |= AVF_INSET_PFCP_S_FIELD;
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT (hdr, PFCP, S_FIELD);
+ }
+
+ clib_memcpy (hdr->buffer, pfcp_spec, sizeof (*pfcp_spec));
+ }
+ break;
+
+ default:
+ ret = avf_flow_error_set (error, AVF_FAILURE, AVF_FLOW_ERROR_TYPE_ITEM,
+ item, "Invalid pattern item.");
+ return ret;
+ }
+
+ return 0;
+}
+
+int
+avf_fdir_rcfg_act_queue (struct avf_fdir_conf *rcfg, int queue, int size,
+ int act_idx)
+{
+ if (act_idx > VIRTCHNL_MAX_NUM_ACTIONS)
+ return -AVF_FAILURE;
+
+ struct virtchnl_filter_action *filter_action;
+
+ filter_action = rcfg->add_fltr.rule_cfg.action_set.actions + act_idx;
+ filter_action->type = VIRTCHNL_ACTION_QUEUE;
+ filter_action->act_conf.queue.index = queue;
+
+ if (size == 1)
+ return 0;
+ else if (is_pow2 (size))
+ filter_action->act_conf.queue.region = fls_u32 (size) - 1;
+
+ return 0;
+}
+
+int
+avf_fdir_parse_action_qregion (struct avf_fdir_conf *rcfg,
+ const struct avf_flow_action *act, int act_idx,
+ struct avf_flow_error *error)
+{
+ const struct avf_flow_action_rss *rss = act->conf;
+ struct virtchnl_filter_action *filter_action;
+ u32 i;
+ int ret;
+
+ filter_action = rcfg->add_fltr.rule_cfg.action_set.actions + act_idx;
+
+ if (rss->queue_num <= 1)
+ {
+ ret = avf_flow_error_set (error, AVF_FAILURE, AVF_FLOW_ERROR_TYPE_ACTION,
+ act, "Queue region size can't be 0 or 1.");
+ return ret;
+ }
+
+ /* check if queue index for queue region is continuous */
+ for (i = 0; i < rss->queue_num - 1; i++)
+ {
+ if (rss->queue[i + 1] != rss->queue[i] + 1)
+ {
+ ret =
+ avf_flow_error_set (error, AVF_FAILURE, AVF_FLOW_ERROR_TYPE_ACTION,
+ act, "Discontinuous queue region");
+ return ret;
+ }
+ }
+
+ if (rss->queue[rss->queue_num - 1] >= rcfg->nb_rx_queues)
+ {
+ ret = avf_flow_error_set (error, AVF_FAILURE, AVF_FLOW_ERROR_TYPE_ACTION,
+ act, "Invalid queue region indexes.");
+ return ret;
+ }
+
+ if (!(is_pow2 (rss->queue_num) &&
+ rss->queue_num <= AVF_FDIR_MAX_QREGION_SIZE))
+ {
+ ret = avf_flow_error_set (error, AVF_FAILURE, AVF_FLOW_ERROR_TYPE_ACTION,
+ act,
+ "The region size should be any of the"
+ "following values: 1, 2, 4, 8, 16, 32"
+ ", 64, 128 as long as the total number of"
+ "queues do not exceed the VSI allocation");
+ return ret;
+ }
+
+ filter_action->type = VIRTCHNL_ACTION_Q_REGION;
+ filter_action->act_conf.queue.index = rss->queue[0];
+ filter_action->act_conf.queue.region = fls_u32 (rss->queue_num) - 1;
+
+ return 0;
+}
+
+int
+avf_fdir_rcfg_act_drop (struct avf_fdir_conf *rcfg, int act_idx)
+{
+ struct virtchnl_filter_action *filter_action;
+
+ if (act_idx > VIRTCHNL_MAX_NUM_ACTIONS)
+ return -AVF_FAILURE;
+
+ filter_action = rcfg->add_fltr.rule_cfg.action_set.actions + act_idx;
+ filter_action->type = VIRTCHNL_ACTION_DROP;
+
+ return 0;
+}
+
+int
+avf_fdir_rcfg_act_mark (struct avf_fdir_conf *rcfg, const u32 mark,
+ int act_idx)
+{
+ struct virtchnl_filter_action *filter_action;
+ if (act_idx > VIRTCHNL_MAX_NUM_ACTIONS)
+ return -AVF_FAILURE;
+
+ filter_action = rcfg->add_fltr.rule_cfg.action_set.actions + act_idx;
+
+ filter_action->type = VIRTCHNL_ACTION_MARK;
+ filter_action->act_conf.mark_id = mark;
+
+ return 0;
+}
+
+int
+avf_fdir_rcfg_validate (struct avf_fdir_vc_ctx *ctx,
+ struct avf_fdir_conf *rcfg)
+{
+ int ret;
+ rcfg->add_fltr.vsi_id = rcfg->vsi;
+ rcfg->add_fltr.validate_only = 1;
+ struct virtchnl_fdir_add fdir_ret;
+
+ ret =
+ ctx->vc_op (ctx->vc_hdl, VIRTCHNL_ADV_OP_ADD_FDIR_FILTER, &rcfg->add_fltr,
+ sizeof (rcfg->add_fltr), &fdir_ret, sizeof (fdir_ret));
+
+ if (ret != 0)
+ {
+ return ret;
+ }
+
+ if (fdir_ret.status != VIRTCHNL_FDIR_SUCCESS)
+ {
+ ret = -fdir_ret.status;
+ }
+
+ return ret;
+}
+
+int
+avf_fdir_rule_create (struct avf_fdir_vc_ctx *ctx, struct avf_fdir_conf *rcfg)
+{
+ int ret;
+ rcfg->add_fltr.vsi_id = rcfg->vsi;
+ rcfg->add_fltr.validate_only = 0;
+ struct virtchnl_fdir_add fdir_ret;
+
+ ret =
+ ctx->vc_op (ctx->vc_hdl, VIRTCHNL_ADV_OP_ADD_FDIR_FILTER, &rcfg->add_fltr,
+ sizeof (rcfg->add_fltr), &fdir_ret, sizeof (fdir_ret));
+
+ if (ret != 0)
+ {
+ return ret;
+ }
+
+ rcfg->flow_id = fdir_ret.flow_id;
+
+ if (fdir_ret.status != VIRTCHNL_FDIR_SUCCESS)
+ {
+ ret = -fdir_ret.status;
+ }
+
+ return ret;
+}
+
+int
+avf_fdir_rule_destroy (struct avf_fdir_vc_ctx *ctx, struct avf_fdir_conf *rcfg)
+{
+ int ret;
+ struct virtchnl_fdir_del fdir_ret;
+ rcfg->del_fltr.vsi_id = rcfg->vsi;
+ rcfg->del_fltr.flow_id = rcfg->flow_id;
+
+ ret =
+ ctx->vc_op (ctx->vc_hdl, VIRTCHNL_ADV_OP_DEL_FDIR_FILTER, &rcfg->del_fltr,
+ sizeof (rcfg->del_fltr), &fdir_ret, sizeof (fdir_ret));
+
+ if (ret != 0)
+ {
+ return ret;
+ }
+
+ if (fdir_ret.status != VIRTCHNL_FDIR_SUCCESS)
+ {
+ ret = -fdir_ret.status;
+ }
+
+ return ret;
+}
+
+int
+avf_fdir_parse_action (const struct avf_flow_action actions[],
+ struct avf_fdir_conf *rcfg,
+ struct avf_flow_error *error)
+{
+ int act_idx = 0, ret = 0;
+ u32 dest_num = 0;
+ u32 mark_num = 0;
+ u32 act_num;
+ struct virtchnl_filter_action *filter_action;
+ const struct avf_flow_action_queue *act_q;
+ const struct avf_flow_action_mark *act_msk;
+
+ struct virtchnl_fdir_rule *rule_cfg = &rcfg->add_fltr.rule_cfg;
+
+ for (; actions->type != VIRTCHNL_ACTION_NONE; actions++, act_idx++)
+ {
+ switch (actions->type)
+ {
+ case VIRTCHNL_ACTION_PASSTHRU:
+ dest_num++;
+ filter_action = &rule_cfg->action_set.actions[act_idx];
+ filter_action->type = VIRTCHNL_ACTION_PASSTHRU;
+ rule_cfg->action_set.count++;
+ break;
+
+ case VIRTCHNL_ACTION_DROP:
+ dest_num++;
+ ret = avf_fdir_rcfg_act_drop (rcfg, act_idx);
+ if (ret)
+ return ret;
+
+ rule_cfg->action_set.count++;
+ break;
+
+ case VIRTCHNL_ACTION_QUEUE:
+ dest_num++;
+ act_q = actions->conf;
+
+ if (act_q->index >= rcfg->nb_rx_queues)
+ {
+ ret = avf_flow_error_set (error, AVF_FAILURE,
+ AVF_FLOW_ERROR_TYPE_ACTION, actions,
+ "Invalid queue for FDIR.");
+ return -AVF_FAILURE;
+ }
+
+ ret = avf_fdir_rcfg_act_queue (rcfg, act_q->index, 1, act_idx);
+ if (ret)
+ return ret;
+
+ rule_cfg->action_set.count++;
+ break;
+
+ case VIRTCHNL_ACTION_Q_REGION:
+ dest_num++;
+ filter_action = &rule_cfg->action_set.actions[act_idx];
+ ret = avf_fdir_parse_action_qregion (rcfg, actions, act_idx, error);
+ if (ret)
+ return ret;
+
+ rule_cfg->action_set.count++;
+ break;
+
+ case VIRTCHNL_ACTION_MARK:
+ mark_num++;
+ act_msk = actions->conf;
+ rcfg->mark_flag = 1;
+
+ ret = avf_fdir_rcfg_act_mark (rcfg, act_msk->id, act_idx);
+ if (ret)
+ return ret;
+
+ rule_cfg->action_set.count++;
+ break;
+
+ default:
+ ret =
+ avf_flow_error_set (error, AVF_FAILURE, AVF_FLOW_ERROR_TYPE_ACTION,
+ actions, "Invalid action.");
+ return ret;
+ }
+ }
+
+ if (dest_num >= 2)
+ {
+ ret = avf_flow_error_set (error, AVF_FAILURE, AVF_FLOW_ERROR_TYPE_ACTION,
+ actions, "Unsupported action combination");
+ return ret;
+ }
+
+ if (mark_num >= 2)
+ {
+ ret = avf_flow_error_set (error, AVF_FAILURE, AVF_FLOW_ERROR_TYPE_ACTION,
+ actions, "Too many mark actions");
+ return ret;
+ }
+
+ if (dest_num + mark_num == 0)
+ {
+ ret = avf_flow_error_set (error, AVF_FAILURE, AVF_FLOW_ERROR_TYPE_ACTION,
+ actions, "Empty action");
+ return ret;
+ }
+
+ /* Mark only is equal to mark + passthru. */
+ act_num = rule_cfg->action_set.count;
+ if (dest_num == 0)
+ {
+ filter_action = &rule_cfg->action_set.actions[act_num];
+ filter_action->type = VIRTCHNL_ACTION_PASSTHRU;
+ rule_cfg->action_set.count = ++act_num;
+ }
+
+ return ret;
+}
+
+int
+avf_fdir_parse_pattern (struct avf_fdir_conf *rcfg,
+ struct avf_flow_item avf_items[],
+ struct avf_flow_error *error)
+{
+ int layer = 0;
+ int ret = 0;
+ struct avf_flow_item *item;
+
+ for (item = avf_items; item->type != VIRTCHNL_PROTO_HDR_NONE; item++)
+ {
+ ret = avf_fdir_rcfg_set_field (rcfg, layer, item, error);
+ if (ret)
+ return ret;
+
+ rcfg->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
+ }
+
+ return ret;
+}
+
+int
+avf_flow_error_set (struct avf_flow_error *error, int code,
+ enum avf_flow_error_type type, const void *cause,
+ const char *message)
+{
+ if (error)
+ {
+ *error = (struct avf_flow_error){
+ .type = type,
+ .cause = cause,
+ .message = message,
+ };
+ }
+
+ return code;
+}
+
+char *
+avf_fdir_prgm_error_decode (int err_no)
+{
+ enum virtchnl_fdir_prgm_status status;
+ char *s = NULL;
+
+ err_no = -err_no;
+
+ if (err_no >= VIRTCHNL_FDIR_FAILURE_MAX)
+ return "Failed to program the rule due to other reasons";
+
+ status = (enum virtchnl_fdir_prgm_status) err_no;
+ switch (status)
+ {
+ case VIRTCHNL_FDIR_SUCCESS:
+ s = "Succeed in programming rule request by PF";
+ break;
+ case VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE:
+ s = "Failed to add rule request due to no hardware resource";
+ break;
+ case VIRTCHNL_FDIR_FAILURE_RULE_EXIST:
+ s = "Failed to add rule request due to the rule is already existed";
+ break;
+ case VIRTCHNL_FDIR_FAILURE_RULE_CONFLICT:
+ s = "Failed to add rule request due to the rule is conflict with "
+ "existing rule";
+ break;
+ case VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST:
+ s = "Failed to delete rule request due to this rule doesn't exist";
+ break;
+ case VIRTCHNL_FDIR_FAILURE_RULE_INVALID:
+ s = "Failed to add rule request due to the hardware doesn't support";
+ break;
+ case VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT:
+ s = "Failed to add rule request due to time out for programming";
+ break;
+ case VIRTCHNL_FDIR_FAILURE_QUERY_INVALID:
+ s = "Succeed in programming rule request by PF";
+ break;
+ default:
+ s = "Failed to program the rule due to other reasons";
+ break;
+ }
+
+ return s;
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/plugins/avf/device.c b/src/plugins/avf/device.c
index e4305251b23..def8a799ecb 100644
--- a/src/plugins/avf/device.c
+++ b/src/plugins/avf/device.c
@@ -25,7 +25,7 @@
#include <avf/avf.h>
#define AVF_MBOX_LEN 64
-#define AVF_MBOX_BUF_SZ 512
+#define AVF_MBOX_BUF_SZ 4096
#define AVF_RXQ_SZ 512
#define AVF_TXQ_SZ 512
#define AVF_ITR_INT 250
@@ -556,10 +556,11 @@ avf_op_get_vf_resources (vlib_main_t * vm, avf_device_t * ad,
virtchnl_vf_resource_t * res)
{
clib_error_t *err = 0;
- u32 bitmap = (VIRTCHNL_VF_OFFLOAD_L2 | VIRTCHNL_VF_OFFLOAD_RSS_PF |
- VIRTCHNL_VF_OFFLOAD_WB_ON_ITR | VIRTCHNL_VF_OFFLOAD_VLAN |
- VIRTCHNL_VF_OFFLOAD_RX_POLLING |
- VIRTCHNL_VF_CAP_ADV_LINK_SPEED);
+ u32 bitmap =
+ (VIRTCHNL_VF_OFFLOAD_L2 | VIRTCHNL_VF_OFFLOAD_RSS_PF |
+ VIRTCHNL_VF_OFFLOAD_WB_ON_ITR | VIRTCHNL_VF_OFFLOAD_VLAN |
+ VIRTCHNL_VF_OFFLOAD_RX_POLLING | VIRTCHNL_VF_CAP_ADV_LINK_SPEED |
+ VIRTCHNL_VF_OFFLOAD_FDIR_PF | VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF);
avf_log_debug (ad, "get_vf_reqources: bitmap 0x%x", bitmap);
err = avf_send_to_pf (vm, ad, VIRTCHNL_OP_GET_VF_RESOURCES, &bitmap,
@@ -1830,8 +1831,7 @@ avf_program_flow (u32 dev_instance, int is_add, u8 *rule, u32 rule_len,
}
/* *INDENT-OFF* */
-VNET_DEVICE_CLASS (avf_device_class,) =
-{
+VNET_DEVICE_CLASS (avf_device_class, ) = {
.name = "Adaptive Virtual Function (AVF) interface",
.clear_counters = avf_clear_hw_interface_counters,
.format_device = format_avf_device,
@@ -1842,6 +1842,7 @@ VNET_DEVICE_CLASS (avf_device_class,) =
.mac_addr_add_del_function = avf_add_del_mac_address,
.tx_function_n_errors = AVF_TX_N_ERROR,
.tx_function_error_strings = avf_tx_func_error_strings,
+ .flow_ops_function = avf_flow_ops_fn,
};
/* *INDENT-ON* */
diff --git a/src/plugins/avf/flow.c b/src/plugins/avf/flow.c
new file mode 100644
index 00000000000..bdb07ba849b
--- /dev/null
+++ b/src/plugins/avf/flow.c
@@ -0,0 +1,421 @@
+/*
+ *------------------------------------------------------------------
+ * Copyright (c) 2020 Intel and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+
+#include <stdbool.h>
+#include <vlib/vlib.h>
+#include <vppinfra/ring.h>
+#include <vlib/unix/unix.h>
+#include <vlib/pci/pci.h>
+#include <vnet/ethernet/ethernet.h>
+
+#include <avf/avf.h>
+#include <avf/avf_advanced_flow.h>
+
+int
+avf_fdir_vc_op_callback (void *vc_hdl, enum virthnl_adv_ops vc_op, void *in,
+ u32 in_len, void *out, u32 out_len)
+{
+ u32 dev_instance = *(u32 *) vc_hdl;
+ avf_device_t *ad = avf_get_device (dev_instance);
+ clib_error_t *err = 0;
+ int is_add;
+
+ if (vc_op >= VIRTCHNL_ADV_OP_MAX)
+ {
+ return -1;
+ }
+
+ switch (vc_op)
+ {
+ case VIRTCHNL_ADV_OP_ADD_FDIR_FILTER:
+ is_add = 1;
+ break;
+ case VIRTCHNL_ADV_OP_DEL_FDIR_FILTER:
+ is_add = 0;
+ break;
+ default:
+ avf_log_err (ad, "unsupported avf virtual channel opcode %u\n",
+ (u32) vc_op);
+ return -1;
+ }
+
+ err = avf_program_flow (dev_instance, is_add, in, in_len, out, out_len);
+ if (err != 0)
+ {
+ avf_log_err (ad, "avf fdir program failed: %U", format_clib_error, err);
+ clib_error_free (err);
+ return -1;
+ }
+
+ avf_log_debug (ad, "avf fdir program success");
+ return 0;
+}
+
+static int
+avf_flow_add (u32 dev_instance, vnet_flow_t *f, avf_flow_entry_t *fe)
+{
+ avf_device_t *ad = avf_get_device (dev_instance);
+ int rv = 0;
+ int ret = 0;
+ u16 src_port = 0, dst_port = 0;
+ u16 src_port_mask = 0, dst_port_mask = 0;
+ u8 protocol = IP_PROTOCOL_RESERVED;
+ bool fate = false;
+ struct avf_flow_error error;
+
+ int layer = 0;
+ int action_count = 0;
+
+ struct avf_fdir_vc_ctx vc_ctx;
+ struct avf_fdir_conf *filter;
+ struct avf_flow_item avf_items[VIRTCHNL_MAX_NUM_PROTO_HDRS];
+ struct avf_flow_action avf_actions[VIRTCHNL_MAX_NUM_ACTIONS];
+
+ struct avf_ipv4_hdr ip4_spec, ip4_mask;
+ struct avf_tcp_hdr tcp_spec, tcp_mask;
+ struct avf_udp_hdr udp_spec, udp_mask;
+ struct avf_gtp_hdr gtp_spec, gtp_mask;
+
+ struct avf_flow_action_queue act_q;
+ struct avf_flow_action_mark act_msk;
+
+ ret = avf_fdir_rcfg_create (&filter, 0, ad->vsi_id, ad->n_rx_queues);
+ if (ret)
+ {
+ rv = VNET_FLOW_ERROR_INTERNAL;
+ goto done;
+ }
+
+ /* init a virtual channel context */
+ vc_ctx.vc_hdl = &dev_instance;
+ vc_ctx.vc_op = avf_fdir_vc_op_callback;
+
+ clib_memset (avf_items, 0, sizeof (avf_actions));
+ clib_memset (avf_actions, 0, sizeof (avf_actions));
+
+ /* Ethernet Layer */
+ avf_items[layer].type = VIRTCHNL_PROTO_HDR_ETH;
+ avf_items[layer].spec = NULL;
+ layer++;
+
+ /* IPv4 Layer */
+ if ((f->type == VNET_FLOW_TYPE_IP4_N_TUPLE) ||
+ (f->type == VNET_FLOW_TYPE_IP4_GTPU))
+ {
+ vnet_flow_ip4_n_tuple_t *t4 = &f->ip4_n_tuple;
+ memset (&ip4_spec, 0, sizeof (ip4_spec));
+ memset (&ip4_mask, 0, sizeof (ip4_mask));
+
+ /* IPv4 Layer */
+ avf_items[layer].type = VIRTCHNL_PROTO_HDR_IPV4;
+ avf_items[layer].spec = &ip4_spec;
+ avf_items[layer].mask = &ip4_mask;
+ layer++;
+
+ src_port = t4->src_port.port;
+ dst_port = t4->dst_port.port;
+ src_port_mask = t4->src_port.mask;
+ dst_port_mask = t4->dst_port.mask;
+ protocol = t4->protocol.prot;
+
+ if (t4->src_addr.mask.as_u32)
+ {
+ ip4_spec.src_addr = t4->src_addr.addr.as_u32;
+ ip4_mask.src_addr = t4->src_addr.mask.as_u32;
+ }
+ if (t4->dst_addr.mask.as_u32)
+ {
+ ip4_spec.dst_addr = t4->dst_addr.addr.as_u32;
+ ip4_mask.dst_addr = t4->dst_addr.mask.as_u32;
+ }
+ }
+
+ if (protocol == IP_PROTOCOL_TCP)
+ {
+ memset (&tcp_spec, 0, sizeof (tcp_spec));
+ memset (&tcp_mask, 0, sizeof (tcp_mask));
+
+ avf_items[layer].type = VIRTCHNL_PROTO_HDR_TCP;
+ avf_items[layer].spec = &tcp_spec;
+ avf_items[layer].mask = &tcp_mask;
+ layer++;
+
+ if (src_port_mask)
+ {
+ tcp_spec.src_port = clib_host_to_net_u16 (src_port);
+ tcp_mask.src_port = clib_host_to_net_u16 (src_port_mask);
+ }
+ if (dst_port_mask)
+ {
+ tcp_spec.dst_port = clib_host_to_net_u16 (dst_port);
+ tcp_mask.dst_port = clib_host_to_net_u16 (dst_port_mask);
+ }
+ }
+ else if (protocol == IP_PROTOCOL_UDP)
+ {
+ memset (&udp_spec, 0, sizeof (udp_spec));
+ memset (&udp_mask, 0, sizeof (udp_mask));
+
+ avf_items[layer].type = VIRTCHNL_PROTO_HDR_UDP;
+ avf_items[layer].spec = &udp_spec;
+ avf_items[layer].mask = &udp_mask;
+ layer++;
+
+ if (src_port_mask)
+ {
+ udp_spec.src_port = clib_host_to_net_u16 (src_port);
+ udp_mask.src_port = clib_host_to_net_u16 (src_port_mask);
+ }
+ if (dst_port_mask)
+ {
+ udp_spec.dst_port = clib_host_to_net_u16 (dst_port);
+ udp_mask.dst_port = clib_host_to_net_u16 (dst_port_mask);
+ }
+ }
+ else
+ {
+ rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
+ goto done;
+ }
+
+ if (f->type == VNET_FLOW_TYPE_IP4_GTPU)
+ {
+
+ memset (&gtp_spec, 0, sizeof (gtp_spec));
+ memset (&gtp_mask, 0, sizeof (gtp_mask));
+
+ vnet_flow_ip4_gtpu_t *gu = &f->ip4_gtpu;
+ gtp_spec.teid = clib_host_to_net_u32 (gu->teid);
+ gtp_mask.teid = ~0;
+
+ avf_items[layer].type = VIRTCHNL_PROTO_HDR_GTPU_IP;
+ avf_items[layer].spec = &gtp_spec;
+ avf_items[layer].mask = &gtp_mask;
+ layer++;
+ }
+
+ /* pattern end flag */
+ avf_items[layer].type = VIRTCHNL_PROTO_HDR_NONE;
+ ret = avf_fdir_parse_pattern (filter, avf_items, &error);
+ if (ret)
+ {
+ avf_log_err (ad, "avf fdir parse pattern failed: %s", error.message);
+ rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
+ goto done;
+ }
+
+ /* Action */
+ /* Only one 'fate' can be assigned */
+ if (f->actions & VNET_FLOW_ACTION_REDIRECT_TO_QUEUE)
+ {
+ avf_actions[action_count].type = VIRTCHNL_ACTION_QUEUE;
+ avf_actions[action_count].conf = &act_q;
+
+ act_q.index = f->redirect_queue;
+ fate = true;
+ action_count++;
+ }
+
+ if (f->actions & VNET_FLOW_ACTION_DROP)
+ {
+ avf_actions[action_count].type = VIRTCHNL_ACTION_DROP;
+ avf_actions[action_count].conf = NULL;
+
+ if (fate == true)
+ {
+ rv = VNET_FLOW_ERROR_INTERNAL;
+ goto done;
+ }
+ else
+ fate = true;
+
+ action_count++;
+ }
+
+ if (fate == false)
+ {
+ avf_actions[action_count].type = VIRTCHNL_ACTION_PASSTHRU;
+ avf_actions[action_count].conf = NULL;
+
+ fate = true;
+ action_count++;
+ }
+
+ if (f->actions & VNET_FLOW_ACTION_MARK)
+ {
+ avf_actions[action_count].type = VIRTCHNL_ACTION_MARK;
+ avf_actions[action_count].conf = &act_msk;
+ action_count++;
+
+ act_msk.id = fe->mark;
+ }
+
+ /* action end flag */
+ avf_actions[action_count].type = VIRTCHNL_ACTION_NONE;
+
+ /* parse action */
+ ret = avf_fdir_parse_action (avf_actions, filter, &error);
+ if (ret)
+ {
+ avf_log_err (ad, "avf fdir parse action failed: %s", error.message);
+ rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
+ goto done;
+ }
+
+ /* create flow rule, save rule */
+ ret = avf_fdir_rule_create (&vc_ctx, filter);
+
+ if (ret)
+ {
+ avf_log_err (ad, "avf fdir rule create failed: %s",
+ avf_fdir_prgm_error_decode (ret));
+ rv = VNET_FLOW_ERROR_INTERNAL;
+ goto done;
+ }
+ else
+ {
+ fe->rcfg = filter;
+ }
+done:
+
+ return rv;
+}
+
+int
+avf_flow_ops_fn (vnet_main_t *vm, vnet_flow_dev_op_t op, u32 dev_instance,
+ u32 flow_index, uword *private_data)
+{
+ vnet_flow_t *flow = vnet_get_flow (flow_index);
+ avf_device_t *ad = avf_get_device (dev_instance);
+ avf_flow_entry_t *fe = NULL;
+ avf_flow_lookup_entry_t *fle = NULL;
+ int rv = 0;
+
+ if ((ad->feature_bitmap & VIRTCHNL_VF_OFFLOAD_FDIR_PF) == 0)
+ {
+ rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
+ goto done;
+ }
+
+ if (op == VNET_FLOW_DEV_OP_ADD_FLOW)
+ {
+ pool_get (ad->flow_entries, fe);
+ fe->flow_index = flow->index;
+
+ /* if we need to mark packets, assign one mark */
+ if (flow->actions &
+ (VNET_FLOW_ACTION_MARK | VNET_FLOW_ACTION_REDIRECT_TO_NODE |
+ VNET_FLOW_ACTION_BUFFER_ADVANCE))
+ {
+ /* reserve slot 0 */
+ if (ad->flow_lookup_entries == 0)
+ pool_get_aligned (ad->flow_lookup_entries, fle,
+ CLIB_CACHE_LINE_BYTES);
+ pool_get_aligned (ad->flow_lookup_entries, fle,
+ CLIB_CACHE_LINE_BYTES);
+ fe->mark = fle - ad->flow_lookup_entries;
+
+ /* install entry in the lookup table */
+ clib_memset (fle, -1, sizeof (*fle));
+ if (flow->actions & VNET_FLOW_ACTION_MARK)
+ fle->flow_id = flow->mark_flow_id;
+ if (flow->actions & VNET_FLOW_ACTION_REDIRECT_TO_NODE)
+ fle->next_index = flow->redirect_device_input_next_index;
+ if (flow->actions & VNET_FLOW_ACTION_BUFFER_ADVANCE)
+ fle->buffer_advance = flow->buffer_advance;
+
+ if ((ad->flags & AVF_DEVICE_F_RX_FLOW_OFFLOAD) == 0)
+ {
+ ad->flags |= AVF_DEVICE_F_RX_FLOW_OFFLOAD;
+ }
+ }
+ else
+ fe->mark = 0;
+
+ switch (flow->type)
+ {
+ case VNET_FLOW_TYPE_IP4_N_TUPLE:
+ case VNET_FLOW_TYPE_IP4_GTPU:
+ if ((rv = avf_flow_add (dev_instance, flow, fe)))
+ goto done;
+ break;
+ default:
+ rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
+ goto done;
+ }
+
+ *private_data = fe - ad->flow_entries;
+ }
+ else if (op == VNET_FLOW_DEV_OP_DEL_FLOW)
+ {
+ fe = vec_elt_at_index (ad->flow_entries, *private_data);
+
+ struct avf_fdir_vc_ctx ctx;
+ ctx.vc_hdl = &dev_instance;
+ ctx.vc_op = avf_fdir_vc_op_callback;
+
+ rv = avf_fdir_rule_destroy (&ctx, fe->rcfg);
+ if (rv)
+ return VNET_FLOW_ERROR_INTERNAL;
+
+ if (fe->mark)
+ {
+ fle = pool_elt_at_index (ad->flow_lookup_entries, fe->mark);
+ clib_memset (fle, -1, sizeof (*fle));
+ pool_put_index (ad->flow_lookup_entries, fe->mark);
+ }
+
+ (void) avf_fdir_rcfg_destroy (fe->rcfg);
+ clib_memset (fe, 0, sizeof (*fe));
+ pool_put (ad->flow_entries, fe);
+ goto disable_rx_offload;
+ }
+ else
+ return VNET_FLOW_ERROR_NOT_SUPPORTED;
+
+done:
+ if (rv)
+ {
+ if (fe)
+ {
+ clib_memset (fe, 0, sizeof (*fe));
+ pool_put (ad->flow_entries, fe);
+ }
+
+ if (fle)
+ {
+ clib_memset (fle, -1, sizeof (*fle));
+ pool_put (ad->flow_lookup_entries, fle);
+ }
+ }
+disable_rx_offload:
+ if ((ad->flags & AVF_DEVICE_F_RX_FLOW_OFFLOAD) != 0 &&
+ pool_elts (ad->flow_entries) == 0)
+ {
+ ad->flags &= ~AVF_DEVICE_F_RX_FLOW_OFFLOAD;
+ }
+
+ return rv;
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/plugins/avf/virtchnl.h b/src/plugins/avf/virtchnl.h
index 32d2ea00a6f..48ecab07059 100644
--- a/src/plugins/avf/virtchnl.h
+++ b/src/plugins/avf/virtchnl.h
@@ -121,23 +121,25 @@ typedef enum
VIRTCHNL_STATUS_NOT_SUPPORTED = -64,
} virtchnl_status_code_t;
-#define foreach_avf_vf_cap_flag \
- _( 0, OFFLOAD_L2, "l2") \
- _( 1, OFFLOAD_IWARP, "iwarp") \
- _( 2, OFFLOAD_RSVD, "rsvd") \
- _( 3, OFFLOAD_RSS_AQ, "rss-aq") \
- _( 4, OFFLOAD_RSS_REG, "rss-reg") \
- _( 5, OFFLOAD_WB_ON_ITR, "wb-on-itr") \
- _( 6, OFFLOAD_REQ_QUEUES, "req-queues") \
- _( 7, CAP_ADV_LINK_SPEED, "adv-link-speed") \
- _(16, OFFLOAD_VLAN, "vlan") \
- _(17, OFFLOAD_RX_POLLING, "rx-polling") \
- _(18, OFFLOAD_RSS_PCTYPE_V2, "rss-pctype-v2") \
- _(19, OFFLOAD_RSS_PF, "rss-pf") \
- _(20, OFFLOAD_ENCAP, "encap") \
- _(21, OFFLOAD_ENCAP_CSUM, "encap-csum") \
- _(22, OFFLOAD_RX_ENCAP_CSUM, "rx-encap-csum") \
- _(23, OFFLOAD_ADQ, "offload-adq")
+#define foreach_avf_vf_cap_flag \
+ _ (0, OFFLOAD_L2, "l2") \
+ _ (1, OFFLOAD_IWARP, "iwarp") \
+ _ (2, OFFLOAD_RSVD, "rsvd") \
+ _ (3, OFFLOAD_RSS_AQ, "rss-aq") \
+ _ (4, OFFLOAD_RSS_REG, "rss-reg") \
+ _ (5, OFFLOAD_WB_ON_ITR, "wb-on-itr") \
+ _ (6, OFFLOAD_REQ_QUEUES, "req-queues") \
+ _ (7, CAP_ADV_LINK_SPEED, "adv-link-speed") \
+ _ (16, OFFLOAD_VLAN, "vlan") \
+ _ (17, OFFLOAD_RX_POLLING, "rx-polling") \
+ _ (18, OFFLOAD_RSS_PCTYPE_V2, "rss-pctype-v2") \
+ _ (19, OFFLOAD_RSS_PF, "rss-pf") \
+ _ (20, OFFLOAD_ENCAP, "encap") \
+ _ (21, OFFLOAD_ENCAP_CSUM, "encap-csum") \
+ _ (22, OFFLOAD_RX_ENCAP_CSUM, "rx-encap-csum") \
+ _ (23, OFFLOAD_ADQ, "offload-adq") \
+ _ (27, OFFLOAD_ADV_RSS_PF, "offload-adv-rss-pf") \
+ _ (28, OFFLOAD_FDIR_PF, "offload-fdir-pf")
typedef enum
{